aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbugaevskiy <bugaevskiy@yandex-team.ru>2022-02-10 16:46:17 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:17 +0300
commitc7f68570483e493f4ddaf946de7b3a420ee621b0 (patch)
tree9c0071ecb0669ad439755fd802425230a52673ce
parent1312621288956f199a5bd5342b0133d4395fa725 (diff)
downloadydb-c7f68570483e493f4ddaf946de7b3a420ee621b0.tar.gz
Restoring authorship annotation for <bugaevskiy@yandex-team.ru>. Commit 1 of 2.
-rw-r--r--build/plugins/ytest.py2
-rw-r--r--build/rules/taxi.policy6
-rw-r--r--build/sysincl/misc.yml8
-rw-r--r--contrib/libs/jemalloc/COPYING4
-rw-r--r--contrib/libs/jemalloc/ChangeLog3036
-rw-r--r--contrib/libs/jemalloc/INSTALL.md842
-rw-r--r--contrib/libs/jemalloc/README14
-rw-r--r--contrib/libs/jemalloc/TUNING.md258
-rw-r--r--contrib/libs/jemalloc/VERSION2
-rw-r--r--contrib/libs/jemalloc/dynamic/ya.make8
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_externs.h208
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_a.h114
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_b.h854
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_stats.h542
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_a.h22
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_b.h464
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/arena_types.h102
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/assert.h112
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/atomic.h136
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h258
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/atomic_msvc.h316
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/background_thread_externs.h64
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/background_thread_inlines.h124
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/background_thread_structs.h108
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/base_externs.h44
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/base_inlines.h26
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/base_structs.h118
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/base_types.h66
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/bin.h246
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/bin_stats.h108
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/bin_types.h34
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/bit_util.h478
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h544
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/cache_bin.h262
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/ckh.h128
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/ctl.h204
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/div.h82
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/emitter.h970
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/extent_dss.h52
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/extent_externs.h166
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/extent_inlines.h1002
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/extent_mmap.h20
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/extent_structs.h512
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/extent_types.h46
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/hash.h188
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/hook.h326
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h196
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h518
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx-arm64.h12
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx.h300
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-win.h432
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h12
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h114
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h188
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h348
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h174
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h444
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h206
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h228
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_preamble.h426
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/large_externs.h64
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/log.h230
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/malloc_io.h204
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/mutex.h506
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/mutex_pool.h188
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/mutex_prof.h216
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/nstime.h68
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/pages.h176
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/ph.h782
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-linux.h844
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-osx.h22
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/private_namespace.h14
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/prng.h326
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/prof_externs.h210
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_a.h170
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_b.h500
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/prof_structs.h400
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/prof_types.h112
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/ql.h52
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/qr.h46
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/quantum.h154
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/rb.h382
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/rtree.h1012
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/rtree_tsd.h100
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/safety_check.h52
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/sc.h666
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/seq.h110
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/smoothstep.h464
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/spin.h80
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/stats.h48
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/sz.h636
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tcache_externs.h106
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tcache_inlines.h454
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tcache_structs.h140
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tcache_types.h118
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/test_hooks.h38
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/ticker.h180
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tsd.h794
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tsd_generic.h326
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tsd_tls.h120
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tsd_types.h20
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/tsd_win.h278
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/util.h72
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/internal/witness.h694
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/jemalloc-linux.h868
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/jemalloc-osx.h862
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/jemalloc-win.h856
-rw-r--r--contrib/libs/jemalloc/include/jemalloc/jemalloc.h12
-rw-r--r--contrib/libs/jemalloc/include/msvc_compat/strings.h92
-rw-r--r--contrib/libs/jemalloc/include/msvc_compat/windows_extra.h12
-rw-r--r--contrib/libs/jemalloc/reg_zone.cpp10
-rw-r--r--contrib/libs/jemalloc/src/arena.c4062
-rw-r--r--contrib/libs/jemalloc/src/background_thread.c1878
-rw-r--r--contrib/libs/jemalloc/src/base.c960
-rw-r--r--contrib/libs/jemalloc/src/bin.c190
-rw-r--r--contrib/libs/jemalloc/src/bitmap.c144
-rw-r--r--contrib/libs/jemalloc/src/ckh.c264
-rw-r--r--contrib/libs/jemalloc/src/ctl.c5276
-rw-r--r--contrib/libs/jemalloc/src/div.c110
-rw-r--r--contrib/libs/jemalloc/src/extent.c4804
-rw-r--r--contrib/libs/jemalloc/src/extent_dss.c542
-rw-r--r--contrib/libs/jemalloc/src/extent_mmap.c84
-rw-r--r--contrib/libs/jemalloc/src/hash.c6
-rw-r--r--contrib/libs/jemalloc/src/hook.c390
-rw-r--r--contrib/libs/jemalloc/src/jemalloc.c6552
-rw-r--r--contrib/libs/jemalloc/src/jemalloc_cpp.cpp282
-rw-r--r--contrib/libs/jemalloc/src/large.c790
-rw-r--r--contrib/libs/jemalloc/src/log.c156
-rw-r--r--contrib/libs/jemalloc/src/malloc_io.c1350
-rw-r--r--contrib/libs/jemalloc/src/mutex.c284
-rw-r--r--contrib/libs/jemalloc/src/mutex_pool.c36
-rw-r--r--contrib/libs/jemalloc/src/nstime.c340
-rw-r--r--contrib/libs/jemalloc/src/pages.c1296
-rw-r--r--contrib/libs/jemalloc/src/prng.c6
-rw-r--r--contrib/libs/jemalloc/src/prof.c5022
-rw-r--r--contrib/libs/jemalloc/src/rtree.c582
-rw-r--r--contrib/libs/jemalloc/src/safety_check.c48
-rw-r--r--contrib/libs/jemalloc/src/sc.c624
-rw-r--r--contrib/libs/jemalloc/src/stats.c2782
-rw-r--r--contrib/libs/jemalloc/src/sz.c128
-rw-r--r--contrib/libs/jemalloc/src/tcache.c1226
-rw-r--r--contrib/libs/jemalloc/src/test_hooks.c24
-rw-r--r--contrib/libs/jemalloc/src/ticker.c6
-rw-r--r--contrib/libs/jemalloc/src/tsd.c874
-rw-r--r--contrib/libs/jemalloc/src/witness.c200
-rw-r--r--contrib/libs/jemalloc/src/zone.c650
-rw-r--r--contrib/libs/jemalloc/ya.make80
-rw-r--r--contrib/libs/ya.make2
-rw-r--r--contrib/restricted/boost/boost/asio/detail/fenced_block.hpp2
-rw-r--r--contrib/restricted/boost/boost/filesystem/operations.hpp4
-rw-r--r--contrib/restricted/boost/libs/filesystem/src/operations.cpp6
-rw-r--r--contrib/restricted/boost/libs/regex/ya.make8
152 files changed, 36890 insertions, 36890 deletions
diff --git a/build/plugins/ytest.py b/build/plugins/ytest.py
index 8970837f0f..8aae1d5f03 100644
--- a/build/plugins/ytest.py
+++ b/build/plugins/ytest.py
@@ -135,7 +135,7 @@ def validate_test(unit, kw):
errors.append("BOOSTTEST is not allowed here")
elif valid_kw.get('SCRIPT-REL-PATH') == 'gtest':
project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
- if not project_path.startswith(("contrib", "devtools", "mail", "mds", "taxi")):
+ if not project_path.startswith(("contrib", "devtools", "mail", "mds", "taxi")):
errors.append("GTEST_UGLY is not allowed here, use GTEST instead")
size_timeout = collections.OrderedDict(sorted(consts.TestSize.DefaultTimeouts.items(), key=lambda t: t[1]))
diff --git a/build/rules/taxi.policy b/build/rules/taxi.policy
index 76cc083ef2..b70c1367c2 100644
--- a/build/rules/taxi.policy
+++ b/build/rules/taxi.policy
@@ -1,3 +1,3 @@
-# userver-specific libc overrides, pending removal
-ALLOW taxi/uservices/userver -> contrib/libs/libc_userver_workarounds
-DENY .* -> contrib/libs/libc_userver_workarounds
+# userver-specific libc overrides, pending removal
+ALLOW taxi/uservices/userver -> contrib/libs/libc_userver_workarounds
+DENY .* -> contrib/libs/libc_userver_workarounds
diff --git a/build/sysincl/misc.yml b/build/sysincl/misc.yml
index e9e6095888..a11fae7672 100644
--- a/build/sysincl/misc.yml
+++ b/build/sysincl/misc.yml
@@ -235,10 +235,10 @@
- fstream.h
- ../include/fenv.h
-- source_filter: "^contrib/restricted/boost/boost/stacktrace/detail"
- includes:
- - backtrace.h: contrib/libs/backtrace/backtrace.h
-
+- source_filter: "^contrib/restricted/boost/boost/stacktrace/detail"
+ includes:
+ - backtrace.h: contrib/libs/backtrace/backtrace.h
+
# windows sdk includes that we do not want to apply for whole arcadia
- source_filter: "^contrib"
includes:
diff --git a/contrib/libs/jemalloc/COPYING b/contrib/libs/jemalloc/COPYING
index 3b7fd3585d..9d07a6d039 100644
--- a/contrib/libs/jemalloc/COPYING
+++ b/contrib/libs/jemalloc/COPYING
@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
-Copyright (C) 2002-present Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-present Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
-Copyright (C) 2009-present Facebook, Inc. All rights reserved.
+Copyright (C) 2009-present Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/jemalloc/ChangeLog b/contrib/libs/jemalloc/ChangeLog
index e55813b7be..e8cb9eec50 100644
--- a/contrib/libs/jemalloc/ChangeLog
+++ b/contrib/libs/jemalloc/ChangeLog
@@ -1,1518 +1,1518 @@
-Following are change highlights associated with official releases. Important
-bug fixes are all mentioned, but some internal enhancements are omitted here for
-brevity. Much more detail can be found in the git revision history:
-
- https://github.com/jemalloc/jemalloc
-
-* 5.2.1 (August 5, 2019)
-
- This release is primarily about Windows. A critical virtual memory leak is
- resolved on all Windows platforms. The regression was present in all releases
- since 5.0.0.
-
- Bug fixes:
- - Fix a severe virtual memory leak on Windows. This regression was first
- released in 5.0.0. (@Ignition, @j0t, @frederik-h, @davidtgoldblatt,
- @interwq)
- - Fix size 0 handling in posix_memalign(). This regression was first released
- in 5.2.0. (@interwq)
- - Fix the prof_log unit test which may observe unexpected backtraces from
- compiler optimizations. The test was first added in 5.2.0. (@marxin,
- @gnzlbg, @interwq)
- - Fix the declaration of the extent_avail tree. This regression was first
- released in 5.1.0. (@zoulasc)
- - Fix an incorrect reference in jeprof. This functionality was first released
- in 3.0.0. (@prehistoric-penguin)
- - Fix an assertion on the deallocation fast-path. This regression was first
- released in 5.2.0. (@yinan1048576)
- - Fix the TLS_MODEL attribute in headers. This regression was first released
- in 5.0.0. (@zoulasc, @interwq)
-
- Optimizations and refactors:
- - Implement opt.retain on Windows and enable by default on 64-bit. (@interwq,
- @davidtgoldblatt)
- - Optimize away a branch on the operator delete[] path. (@mgrice)
- - Add format annotation to the format generator function. (@zoulasc)
- - Refactor and improve the size class header generation. (@yinan1048576)
- - Remove best fit. (@djwatson)
- - Avoid blocking on background thread locks for stats. (@oranagra, @interwq)
-
-* 5.2.0 (April 2, 2019)
-
- This release includes a few notable improvements, which are summarized below:
- 1) improved fast-path performance from the optimizations by @djwatson; 2)
- reduced virtual memory fragmentation and metadata usage; and 3) bug fixes on
- setting the number of background threads. In addition, peak / spike memory
- usage is improved with certain allocation patterns. As usual, the release and
- prior dev versions have gone through large-scale production testing.
-
- New features:
- - Implement oversize_threshold, which uses a dedicated arena for allocations
- crossing the specified threshold to reduce fragmentation. (@interwq)
- - Add extents usage information to stats. (@tyleretzel)
- - Log time information for sampled allocations. (@tyleretzel)
- - Support 0 size in sdallocx. (@djwatson)
- - Output rate for certain counters in malloc_stats. (@zinoale)
- - Add configure option --enable-readlinkat, which allows the use of readlinkat
- over readlink. (@davidtgoldblatt)
- - Add configure options --{enable,disable}-{static,shared} to allow not
- building unwanted libraries. (@Ericson2314)
- - Add configure option --disable-libdl to enable fully static builds.
- (@interwq)
- - Add mallctl interfaces:
- + opt.oversize_threshold (@interwq)
- + stats.arenas.<i>.extent_avail (@tyleretzel)
- + stats.arenas.<i>.extents.<j>.n{dirty,muzzy,retained} (@tyleretzel)
- + stats.arenas.<i>.extents.<j>.{dirty,muzzy,retained}_bytes
- (@tyleretzel)
-
- Portability improvements:
- - Update MSVC builds. (@maksqwe, @rustyx)
- - Workaround a compiler optimizer bug on s390x. (@rkmisra)
- - Make use of pthread_set_name_np(3) on FreeBSD. (@trasz)
- - Implement malloc_getcpu() to enable percpu_arena for windows. (@santagada)
- - Link against -pthread instead of -lpthread. (@paravoid)
- - Make background_thread not dependent on libdl. (@interwq)
- - Add stringify to fix a linker directive issue on MSVC. (@daverigby)
- - Detect and fall back when 8-bit atomics are unavailable. (@interwq)
- - Fall back to the default pthread_create if dlsym(3) fails. (@interwq)
-
- Optimizations and refactors:
- - Refactor the TSD module. (@davidtgoldblatt)
- - Avoid taking extents_muzzy mutex when muzzy is disabled. (@interwq)
- - Avoid taking large_mtx for auto arenas on the tcache flush path. (@interwq)
- - Optimize ixalloc by avoiding a size lookup. (@interwq)
- - Implement opt.oversize_threshold which uses a dedicated arena for requests
- crossing the threshold, also eagerly purges the oversize extents. Default
- the threshold to 8 MiB. (@interwq)
- - Clean compilation with -Wextra. (@gnzlbg, @jasone)
- - Refactor the size class module. (@davidtgoldblatt)
- - Refactor the stats emitter. (@tyleretzel)
- - Optimize pow2_ceil. (@rkmisra)
- - Avoid runtime detection of lazy purging on FreeBSD. (@trasz)
- - Optimize mmap(2) alignment handling on FreeBSD. (@trasz)
- - Improve error handling for THP state initialization. (@jsteemann)
- - Rework the malloc() fast path. (@djwatson)
- - Rework the free() fast path. (@djwatson)
- - Refactor and optimize the tcache fill / flush paths. (@djwatson)
- - Optimize sync / lwsync on PowerPC. (@chmeeedalf)
- - Bypass extent_dalloc() when retain is enabled. (@interwq)
- - Optimize the locking on large deallocation. (@interwq)
- - Reduce the number of pages committed from sanity checking in debug build.
- (@trasz, @interwq)
- - Deprecate OSSpinLock. (@interwq)
- - Lower the default number of background threads to 4 (when the feature
- is enabled). (@interwq)
- - Optimize the trylock spin wait. (@djwatson)
- - Use arena index for arena-matching checks. (@interwq)
- - Avoid forced decay on thread termination when using background threads.
- (@interwq)
- - Disable muzzy decay by default. (@djwatson, @interwq)
- - Only initialize libgcc unwinder when profiling is enabled. (@paravoid,
- @interwq)
-
- Bug fixes (all only relevant to jemalloc 5.x):
- - Fix background thread index issues with max_background_threads. (@djwatson,
- @interwq)
- - Fix stats output for opt.lg_extent_max_active_fit. (@interwq)
- - Fix opt.prof_prefix initialization. (@davidtgoldblatt)
- - Properly trigger decay on tcache destroy. (@interwq, @amosbird)
- - Fix tcache.flush. (@interwq)
- - Detect whether explicit extent zero out is necessary with huge pages or
- custom extent hooks, which may change the purge semantics. (@interwq)
- - Fix a side effect caused by extent_max_active_fit combined with decay-based
- purging, where freed extents can accumulate and not be reused for an
- extended period of time. (@interwq, @mpghf)
- - Fix a missing unlock on extent register error handling. (@zoulasc)
-
- Testing:
- - Simplify the Travis script output. (@gnzlbg)
- - Update the test scripts for FreeBSD. (@devnexen)
- - Add unit tests for the producer-consumer pattern. (@interwq)
- - Add Cirrus-CI config for FreeBSD builds. (@jasone)
- - Add size-matching sanity checks on tcache flush. (@davidtgoldblatt,
- @interwq)
-
- Incompatible changes:
- - Remove --with-lg-page-sizes. (@davidtgoldblatt)
-
- Documentation:
- - Attempt to build docs by default, however skip doc building when xsltproc
- is missing. (@interwq, @cmuellner)
-
-* 5.1.0 (May 4, 2018)
-
- This release is primarily about fine-tuning, ranging from several new features
- to numerous notable performance and portability enhancements. The release and
- prior dev versions have been running in multiple large scale applications for
- months, and the cumulative improvements are substantial in many cases.
-
- Given the long and successful production runs, this release is likely a good
- candidate for applications to upgrade, from both jemalloc 5.0 and before. For
- performance-critical applications, the newly added TUNING.md provides
- guidelines on jemalloc tuning.
-
- New features:
- - Implement transparent huge page support for internal metadata. (@interwq)
- - Add opt.thp to allow enabling / disabling transparent huge pages for all
- mappings. (@interwq)
- - Add maximum background thread count option. (@djwatson)
- - Allow prof_active to control opt.lg_prof_interval and prof.gdump.
- (@interwq)
- - Allow arena index lookup based on allocation addresses via mallctl.
- (@lionkov)
- - Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD)
- - Add opt.lg_extent_max_active_fit to set the max ratio between the size of
- the active extent selected (to split off from) and the size of the requested
- allocation. (@interwq, @davidtgoldblatt)
- - Add retain_grow_limit to set the max size when growing virtual address
- space. (@interwq)
- - Add mallctl interfaces:
- + arena.<i>.retain_grow_limit (@interwq)
- + arenas.lookup (@lionkov)
- + max_background_threads (@djwatson)
- + opt.lg_extent_max_active_fit (@interwq)
- + opt.max_background_threads (@djwatson)
- + opt.metadata_thp (@interwq)
- + opt.thp (@interwq)
- + stats.metadata_thp (@interwq)
-
- Portability improvements:
- - Support GNU/kFreeBSD configuration. (@paravoid)
- - Support m68k, nios2 and SH3 architectures. (@paravoid)
- - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo)
- - Fix symbol listing for cross-compiling. (@tamird)
- - Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid)
- - Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin)
- - Fix MSVC 2015 & 2017 builds. (@rustyx)
- - Improve RISC-V support. (@EdSchouten)
- - Set name mangling script in strict mode. (@nicolov)
- - Avoid MADV_HUGEPAGE on ARM. (@marxin)
- - Modify configure to determine return value of strerror_r.
- (@davidtgoldblatt, @cferris1000)
- - Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani)
- - Fix 32-bit build on MSVC. (@rustyx)
- - Fix external symbol on MSVC. (@maksqwe)
- - Avoid a printf format specifier warning. (@jasone)
- - Add configure option --disable-initial-exec-tls which can allow jemalloc to
- be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD)
- - AArch64: Add ILP32 support. (@cmuellner)
- - Add --with-lg-vaddr configure option to support cross compiling.
- (@cmuellner, @davidtgoldblatt)
-
- Optimizations and refactors:
- - Improve active extent fit with extent_max_active_fit. This considerably
- reduces fragmentation over time and improves virtual memory and metadata
- usage. (@davidtgoldblatt, @interwq)
- - Eagerly coalesce large extents to reduce fragmentation. (@interwq)
- - sdallocx: only read size info when page aligned (i.e. possibly sampled),
- which speeds up the sized deallocation path significantly. (@interwq)
- - Avoid attempting new mappings for in place expansion with retain, since
- it rarely succeeds in practice and causes high overhead. (@interwq)
- - Refactor OOM handling in newImpl. (@wqfish)
- - Add internal fine-grained logging functionality for debugging use.
- (@davidtgoldblatt)
- - Refactor arena / tcache interactions. (@davidtgoldblatt)
- - Refactor extent management with dumpable flag. (@davidtgoldblatt)
- - Add runtime detection of lazy purging. (@interwq)
- - Use pairing heap instead of red-black tree for extents_avail. (@djwatson)
- - Use sysctl on startup in FreeBSD. (@trasz)
- - Use thread local prng state instead of atomic. (@djwatson)
- - Make decay to always purge one more extent than before, because in
- practice large extents are usually the ones that cross the decay threshold.
- Purging the additional extent helps save memory as well as reduce VM
- fragmentation. (@interwq)
- - Fast division by dynamic values. (@davidtgoldblatt)
- - Improve the fit for aligned allocation. (@interwq, @edwinsmith)
- - Refactor extent_t bitpacking. (@rkmisra)
- - Optimize the generated assembly for ticker operations. (@davidtgoldblatt)
- - Convert stats printing to use a structured text emitter. (@davidtgoldblatt)
- - Remove preserve_lru feature for extents management. (@djwatson)
- - Consolidate two memory loads into one on the fast deallocation path.
- (@davidtgoldblatt, @interwq)
-
- Bug fixes (most of the issues are only relevant to jemalloc 5.0):
- - Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt)
- - Validate returned file descriptor before use. (@zonyitoo)
- - Fix a few background thread initialization and shutdown issues. (@interwq)
- - Fix an extent coalesce + decay race by taking both coalescing extents off
- the LRU list. (@interwq)
- - Fix potentially unbound increase during decay, caused by one thread keep
- stashing memory to purge while other threads generating new pages. The
- number of pages to purge is checked to prevent this. (@interwq)
- - Fix a FreeBSD bootstrap assertion. (@strejda, @interwq)
- - Handle 32 bit mutex counters. (@rkmisra)
- - Fix a indexing bug when creating background threads. (@davidtgoldblatt,
- @binliu19)
- - Fix arguments passed to extent_init. (@yuleniwo, @interwq)
- - Fix addresses used for ordering mutexes. (@rkmisra)
- - Fix abort_conf processing during bootstrap. (@interwq)
- - Fix include path order for out-of-tree builds. (@cmuellner)
-
- Incompatible changes:
- - Remove --disable-thp. (@interwq)
- - Remove mallctl interfaces:
- + config.thp (@interwq)
-
- Documentation:
- - Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson)
-
-* 5.0.1 (July 1, 2017)
-
- This bugfix release fixes several issues, most of which are obscure enough
- that typical applications are not impacted.
-
- Bug fixes:
- - Update decay->nunpurged before purging, in order to avoid potential update
- races and subsequent incorrect purging volume. (@interwq)
- - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy
- locking and/or background threads). This mitigates an initialization
- failure bug for which we still do not have a clear reproduction test case.
- (@interwq)
- - Modify tsd management so that it neither crashes nor leaks if a thread's
- only allocation activity is to call free() after TLS destructors have been
- executed. This behavior was observed when operating with GNU libc, and is
- unlikely to be an issue with other libc implementations. (@interwq)
- - Mask signals during background thread creation. This prevents signals from
- being inadvertently delivered to background threads. (@jasone,
- @davidtgoldblatt, @interwq)
- - Avoid inactivity checks within background threads, in order to prevent
- recursive mutex acquisition. (@interwq)
- - Fix extent_grow_retained() to use the specified hooks when the
- arena.<i>.extent_hooks mallctl is used to override the default hooks.
- (@interwq)
- - Add missing reentrancy support for custom extent hooks which allocate.
- (@interwq)
- - Post-fork(2), re-initialize the list of tcaches associated with each arena
- to contain no tcaches except the forking thread's. (@interwq)
- - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This
- fixes potential deadlocks after fork(2). (@interwq)
- - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to
- generate corrupt configure scripts. (@jasone)
- - Ensure that the configured page size (--with-lg-page) is no larger than the
- configured huge page size (--with-lg-hugepage). (@jasone)
-
-* 5.0.0 (June 13, 2017)
-
- Unlike all previous jemalloc releases, this release does not use naturally
- aligned "chunks" for virtual memory management, and instead uses page-aligned
- "extents". This change has few externally visible effects, but the internal
- impacts are... extensive. Many other internal changes combine to make this
- the most cohesively designed version of jemalloc so far, with ample
- opportunity for further enhancements.
-
- Continuous integration is now an integral aspect of development thanks to the
- efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
- stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a
- side effect the official release frequency may decrease over time.
-
- New features:
- - Implement optional per-CPU arena support; threads choose which arena to use
- based on current CPU rather than on fixed thread-->arena associations.
- (@interwq)
- - Implement two-phase decay of unused dirty pages. Pages transition from
- dirty-->muzzy-->clean, where the first phase transition relies on
- madvise(... MADV_FREE) semantics, and the second phase transition discards
- pages such that they are replaced with demand-zeroed pages on next access.
- (@jasone)
- - Increase decay time resolution from seconds to milliseconds. (@jasone)
- - Implement opt-in per CPU background threads, and use them for asynchronous
- decay-driven unused dirty page purging. (@interwq)
- - Add mutex profiling, which collects a variety of statistics useful for
- diagnosing overhead/contention issues. (@interwq)
- - Add C++ new/delete operator bindings. (@djwatson)
- - Support manually created arena destruction, such that all data and metadata
- are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
- associated with destroyed arenas. (@jasone)
- - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
- merged/destroyed arena statistics via mallctl. (@jasone)
- - Add opt.abort_conf to optionally abort if invalid configuration options are
- detected during initialization. (@interwq)
- - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
- stats dumped during exit if opt.stats_print is true. (@jasone)
- - Add --with-version=VERSION for use when embedding jemalloc into another
- project's git repository. (@jasone)
- - Add --disable-thp to support cross compiling. (@jasone)
- - Add --with-lg-hugepage to support cross compiling. (@jasone)
- - Add mallctl interfaces (various authors):
- + background_thread
- + opt.abort_conf
- + opt.retain
- + opt.percpu_arena
- + opt.background_thread
- + opt.{dirty,muzzy}_decay_ms
- + opt.stats_print_opts
- + arena.<i>.initialized
- + arena.<i>.destroy
- + arena.<i>.{dirty,muzzy}_decay_ms
- + arena.<i>.extent_hooks
- + arenas.{dirty,muzzy}_decay_ms
- + arenas.bin.<i>.slab_size
- + arenas.nlextents
- + arenas.lextent.<i>.size
- + arenas.create
- + stats.background_thread.{num_threads,num_runs,run_interval}
- + stats.mutexes.{ctl,background_thread,prof,reset}.
- {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
- num_owner_switch}
- + stats.arenas.<i>.{dirty,muzzy}_decay_ms
- + stats.arenas.<i>.uptime
- + stats.arenas.<i>.{pmuzzy,base,internal,resident}
- + stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
- + stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
- + stats.arenas.<i>.bins.<j>.mutex.
- {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
- num_owner_switch}
- + stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
- + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
- extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
- {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
- num_owner_switch}
-
- Portability improvements:
- - Improve reentrant allocation support, such that deadlock is less likely if
- e.g. a system library call in turn allocates memory. (@davidtgoldblatt,
- @interwq)
- - Support static linking of jemalloc with glibc. (@djwatson)
-
- Optimizations and refactors:
- - Organize virtual memory as "extents" of virtual memory pages, rather than as
- naturally aligned "chunks", and store all metadata in arbitrarily distant
- locations. This reduces virtual memory external fragmentation, and will
- interact better with huge pages (not yet explicitly supported). (@jasone)
- - Fold large and huge size classes together; only small and large size classes
- remain. (@jasone)
- - Unify the allocation paths, and merge most fast-path branching decisions.
- (@davidtgoldblatt, @interwq)
- - Embed per thread automatic tcache into thread-specific data, which reduces
- conditional branches and dereferences. Also reorganize tcache to increase
- fast-path data locality. (@interwq)
- - Rewrite atomics to closely model the C11 API, convert various
- synchronization from mutex-based to atomic, and use the explicit memory
- ordering control to resolve various hypothetical races without increasing
- synchronization overhead. (@davidtgoldblatt)
- - Extensively optimize rtree via various methods:
- + Add multiple layers of rtree lookup caching, since rtree lookups are now
- part of fast-path deallocation. (@interwq)
- + Determine rtree layout at compile time. (@jasone)
- + Make the tree shallower for common configurations. (@jasone)
- + Embed the root node in the top-level rtree data structure, thus avoiding
- one level of indirection. (@jasone)
- + Further specialize leaf elements as compared to internal node elements,
- and directly embed extent metadata needed for fast-path deallocation.
- (@jasone)
- + Ignore leading always-zero address bits (architecture-specific).
- (@jasone)
- - Reorganize headers (ongoing work) to make them hermetic, and disentangle
- various module dependencies. (@davidtgoldblatt)
- - Convert various internal data structures such as size class metadata from
- boot-time-initialized to compile-time-initialized. Propagate resulting data
- structure simplifications, such as making arena metadata fixed-size.
- (@jasone)
- - Simplify size class lookups when constrained to size classes that are
- multiples of the page size. This speeds lookups, but the primary benefit is
- complexity reduction in code that was the source of numerous regressions.
- (@jasone)
- - Lock individual extents when possible for localized extent operations,
- rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone)
- - Use first fit layout policy instead of best fit, in order to improve
- packing. (@jasone)
- - If munmap(2) is not in use, use an exponential series to grow each arena's
- virtual memory, so that the number of disjoint virtual memory mappings
- remains low. (@jasone)
- - Implement per arena base allocators, so that arenas never share any virtual
- memory pages. (@jasone)
- - Automatically generate private symbol name mangling macros. (@jasone)
-
- Incompatible changes:
- - Replace chunk hooks with an expanded/normalized set of extent hooks.
- (@jasone)
- - Remove ratio-based purging. (@jasone)
- - Remove --disable-tcache. (@jasone)
- - Remove --disable-tls. (@jasone)
- - Remove --enable-ivsalloc. (@jasone)
- - Remove --with-lg-size-class-group. (@jasone)
- - Remove --with-lg-tiny-min. (@jasone)
- - Remove --disable-cc-silence. (@jasone)
- - Remove --enable-code-coverage. (@jasone)
- - Remove --disable-munmap (replaced by opt.retain). (@jasone)
- - Remove Valgrind support. (@jasone)
- - Remove quarantine support. (@jasone)
- - Remove redzone support. (@jasone)
- - Remove mallctl interfaces (various authors):
- + config.munmap
- + config.tcache
- + config.tls
- + config.valgrind
- + opt.lg_chunk
- + opt.purge
- + opt.lg_dirty_mult
- + opt.decay_time
- + opt.quarantine
- + opt.redzone
- + opt.thp
- + arena.<i>.lg_dirty_mult
- + arena.<i>.decay_time
- + arena.<i>.chunk_hooks
- + arenas.initialized
- + arenas.lg_dirty_mult
- + arenas.decay_time
- + arenas.bin.<i>.run_size
- + arenas.nlruns
- + arenas.lrun.<i>.size
- + arenas.nhchunks
- + arenas.hchunk.<i>.size
- + arenas.extend
- + stats.cactive
- + stats.arenas.<i>.lg_dirty_mult
- + stats.arenas.<i>.decay_time
- + stats.arenas.<i>.metadata.{mapped,allocated}
- + stats.arenas.<i>.{npurge,nmadvise,purged}
- + stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
- + stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
- + stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
- + stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
-
- Bug fixes:
- - Improve interval-based profile dump triggering to dump only one profile when
- a single allocation's size exceeds the interval. (@jasone)
- - Use prefixed function names (as controlled by --with-jemalloc-prefix) when
- pruning backtrace frames in jeprof. (@jasone)
-
-* 4.5.0 (February 28, 2017)
-
- This is the first release to benefit from much broader continuous integration
- testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure
- in place for prior releases, it would have caught all of the most serious
- regressions fixed by this release.
-
- New features:
- - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
- transparent huge page integration. (@jasone)
- - Update zone allocator integration to work with macOS 10.12. (@glandium)
- - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
- EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not
- during configuration. (@jasone, @ronawho)
-
- Bug fixes:
- - Fix DSS (sbrk(2)-based) allocation. This regression was first released in
- 4.3.0. (@jasone)
- - Handle race in per size class utilization computation. This functionality
- was first released in 4.0.0. (@interwq)
- - Fix lock order reversal during gdump. (@jasone)
- - Fix/refactor tcache synchronization. This regression was first released in
- 4.0.0. (@jasone)
- - Fix various JSON-formatted malloc_stats_print() bugs. This functionality
- was first released in 4.3.0. (@jasone)
- - Fix huge-aligned allocation. This regression was first released in 4.4.0.
- (@jasone)
- - When transparent huge page integration is enabled, detect what state pages
- start in according to the kernel's current operating mode, and only convert
- arena chunks to non-huge during purging if that is not their initial state.
- This functionality was first released in 4.4.0. (@jasone)
- - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case.
- This regression was first released in 4.0.0. (@jasone, @428desmo)
- - Properly detect sparc64 when building for Linux. (@glaubitz)
-
-* 4.4.0 (December 3, 2016)
-
- New features:
- - Add configure support for *-*-linux-android. (@cferris1000, @jasone)
- - Add the --disable-syscall configure option, for use on systems that place
- security-motivated limitations on syscall(2). (@jasone)
- - Add support for Debian GNU/kFreeBSD. (@thesam)
-
- Optimizations:
- - Add extent serial numbers and use them where appropriate as a sort key that
- is higher priority than address, so that the allocation policy prefers older
- extents. This tends to improve locality (decrease fragmentation) when
- memory grows downward. (@jasone)
- - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
- on Linux 4.5 and newer. (@jasone)
- - Mark partially purged arena chunks as non-huge-page. This improves
- interaction with Linux's transparent huge page functionality. (@jasone)
-
- Bug fixes:
- - Fix size class computations for edge conditions involving extremely large
- allocations. This regression was first released in 4.0.0. (@jasone,
- @ingvarha)
- - Remove overly restrictive assertions related to the cactive statistic. This
- regression was first released in 4.1.0. (@jasone)
- - Implement a more reliable detection scheme for os_unfair_lock on macOS.
- (@jszakmeister)
-
-* 4.3.1 (November 7, 2016)
-
- Bug fixes:
- - Fix a severe virtual memory leak. This regression was first released in
- 4.3.0. (@interwq, @jasone)
- - Refactor atomic and prng APIs to restore support for 32-bit platforms that
- use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone)
-
-* 4.3.0 (November 4, 2016)
-
- This is the first release that passes the test suite for multiple Windows
- configurations, thanks in large part to @glandium setting up continuous
- integration via AppVeyor (and Travis CI for Linux and OS X).
-
- New features:
- - Add "J" (JSON) support to malloc_stats_print(). (@jasone)
- - Add Cray compiler support. (@ronawho)
-
- Optimizations:
- - Add/use adaptive spinning for bootstrapping and radix tree node
- initialization. (@jasone)
-
- Bug fixes:
- - Fix large allocation to search starting in the optimal size class heap,
- which can substantially reduce virtual memory churn and fragmentation. This
- regression was first released in 4.0.0. (@mjp41, @jasone)
- - Fix stats.arenas.<i>.nthreads accounting. (@interwq)
- - Fix and simplify decay-based purging. (@jasone)
- - Make DSS (sbrk(2)-related) operations lockless, which resolves potential
- deadlocks during thread exit. (@jasone)
- - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun,
- @jasone)
- - Fix over-sized allocation of arena_t (plus associated stats) data
- structures. (@jasone, @interwq)
- - Fix EXTRA_CFLAGS to not affect configuration. (@jasone)
- - Fix a Valgrind integration bug. (@ronawho)
- - Disallow 0x5a junk filling when running in Valgrind. (@jasone)
- - Fix a file descriptor leak on Linux. This regression was first released in
- 4.2.0. (@vsarunas, @jasone)
- - Fix static linking of jemalloc with glibc. (@djwatson)
- - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This
- works around other libraries' system call wrappers performing reentrant
- allocation. (@kspinka, @Whissi, @jasone)
- - Fix OS X default zone replacement to work with OS X 10.12. (@glandium,
- @jasone)
- - Fix cached memory management to avoid needless commit/decommit operations
- during purging, which resolves permanent virtual memory map fragmentation
- issues on Windows. (@mjp41, @jasone)
- - Fix TSD fetches to avoid (recursive) allocation. This is relevant to
- non-TLS and Windows configurations. (@jasone)
- - Fix malloc_conf overriding to work on Windows. (@jasone)
- - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone)
-
-* 4.2.1 (June 8, 2016)
-
- Bug fixes:
- - Fix bootstrapping issues for configurations that require allocation during
- tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
- - Fix gettimeofday() version of nstime_update(). (@ronawho)
- - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
- - Fix potential VM map fragmentation regression. (@jasone)
- - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
- - Fix heap profiling context leaks in reallocation edge cases. (@jasone)
-
-* 4.2.0 (May 12, 2016)
-
- New features:
- - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
- an arena's allocations in a single operation. (@jasone)
- - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
- - Add the --with-version configure option. (@jasone)
- - Support --with-lg-page values larger than actual page size. (@jasone)
-
- Optimizations:
- - Use pairing heaps rather than red-black trees for various hot data
- structures. (@djwatson, @jasone)
- - Streamline fast paths of rtree operations. (@jasone)
- - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
- - Decommit unused virtual memory if the OS does not overcommit. (@jasone)
- - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
- to avoid unfortunate interactions during fork(2). (@jasone)
-
- Bug fixes:
- - Fix chunk accounting related to triggering gdump profiles. (@jasone)
- - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
- - Scale leak report summary according to sampling probability. (@jasone)
-
-* 4.1.1 (May 3, 2016)
-
- This bugfix release resolves a variety of mostly minor issues, though the
- bitmap fix is critical for 64-bit Windows.
-
- Bug fixes:
- - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
- even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
- Windows. (@jasone)
- - Fix hashing functions to avoid unaligned memory accesses (and resulting
- crashes). This is relevant at least to some ARM-based platforms.
- (@rkmisra)
- - Fix fork()-related lock rank ordering reversals. These reversals were
- unlikely to cause deadlocks in practice except when heap profiling was
- enabled and active. (@jasone)
- - Fix various chunk leaks in OOM code paths. (@jasone)
- - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
- - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
- - Fix a variety of test failures that were due to test fragility rather than
- core bugs. (@jasone)
-
-* 4.1.0 (February 28, 2016)
-
- This release is primarily about optimizations, but it also incorporates a lot
- of portability-motivated refactoring and enhancements. Many people worked on
- this release, to an extent that even with the omission here of minor changes
- (see git revision history), and of the people who reported and diagnosed
- issues, so much of the work was contributed that starting with this release,
- changes are annotated with author credits to help reflect the collaborative
- effort involved.
-
- New features:
- - Implement decay-based unused dirty page purging, a major optimization with
- mallctl API impact. This is an alternative to the existing ratio-based
- unused dirty page purging, and is intended to eventually become the sole
- purging mechanism. New mallctls:
- + opt.purge
- + opt.decay_time
- + arena.<i>.decay
- + arena.<i>.decay_time
- + arenas.decay_time
- + stats.arenas.<i>.decay_time
- (@jasone, @cevans87)
- - Add --with-malloc-conf, which makes it possible to embed a default
- options string during configuration. This was motivated by the desire to
- specify --with-malloc-conf=purge:decay , since the default must remain
- purge:ratio until the 5.0.0 release. (@jasone)
- - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin)
- - Make *allocx() size class overflow behavior defined. The maximum
- size class is now less than PTRDIFF_MAX to protect applications against
- numerical overflow, and all allocation functions are guaranteed to indicate
- errors rather than potentially crashing if the request size exceeds the
- maximum size class. (@jasone)
- - jeprof:
- + Add raw heap profile support. (@jasone)
- + Add --retain and --exclude for backtrace symbol filtering. (@jasone)
-
- Optimizations:
- - Optimize the fast path to combine various bootstrapping and configuration
- checks and execute more streamlined code in the common case. (@interwq)
- - Use linear scan for small bitmaps (used for small object tracking). In
- addition to speeding up bitmap operations on 64-bit systems, this reduces
- allocator metadata overhead by approximately 0.2%. (@djwatson)
- - Separate arena_avail trees, which substantially speeds up run tree
- operations. (@djwatson)
- - Use memoization (boot-time-computed table) for run quantization. Separate
- arena_avail trees reduced the importance of this optimization. (@jasone)
- - Attempt mmap-based in-place huge reallocation. This can dramatically speed
- up incremental huge reallocation. (@jasone)
-
- Incompatible changes:
- - Make opt.narenas unsigned rather than size_t. (@jasone)
-
- Bug fixes:
- - Fix stats.cactive accounting regression. (@rustyx, @jasone)
- - Handle unaligned keys in hash(). This caused problems for some ARM systems.
- (@jasone, @cferris1000)
- - Refactor arenas array. In addition to fixing a fork-related deadlock, this
- makes arena lookups faster and simpler. (@jasone)
- - Move retained memory allocation out of the default chunk allocation
- function, to a location that gets executed even if the application installs
- a custom chunk allocation function. This resolves a virtual memory leak.
- (@buchgr)
- - Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
- - Fix run quantization. In practice this bug had no impact unless
- applications requested memory with alignment exceeding one page.
- (@jasone, @djwatson)
- - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv)
- - jeprof:
- + Don't discard curl options if timeout is not defined. (@djwatson)
- + Detect failed profile fetches. (@djwatson)
- - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
- --disable-stats case. (@jasone)
-
-* 4.0.4 (October 24, 2015)
-
- This bugfix release fixes another xallocx() regression. No other regressions
- have come to light in over a month, so this is likely a good starting point
- for people who prefer to wait for "dot one" releases with all the major issues
- shaken out.
-
- Bug fixes:
- - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
- allocations that have been randomly assigned an offset of 0 when
- --enable-cache-oblivious configure option is enabled.
-
-* 4.0.3 (September 24, 2015)
-
- This bugfix release continues the trend of xallocx() and heap profiling fixes.
-
- Bug fixes:
- - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
- allocations when --enable-cache-oblivious configure option is enabled.
- - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
- when resizing from/to a size class that is not a multiple of the chunk size.
- - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
- profile dumping started.
- - Work around a potentially bad thread-specific data initialization
- interaction with NPTL (glibc's pthreads implementation).
-
-* 4.0.2 (September 21, 2015)
-
- This bugfix release addresses a few bugs specific to heap profiling.
-
- Bug fixes:
- - Fix ixallocx_prof_sample() to never modify nor create sampled small
- allocations. xallocx() is in general incapable of moving small allocations,
- so this fix removes buggy code without loss of generality.
- - Fix irallocx_prof_sample() to always allocate large regions, even when
- alignment is non-zero.
- - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
- than dereferencing a potentially invalid tctx.
-
-* 4.0.1 (September 15, 2015)
-
- This is a bugfix release that is somewhat high risk due to the amount of
- refactoring required to address deep xallocx() problems. As a side effect of
- these fixes, xallocx() now tries harder to partially fulfill requests for
- optional extra space. Note that a couple of minor heap profiling
- optimizations are included, but these are better thought of as performance
- fixes that were integral to discovering most of the other bugs.
-
- Optimizations:
- - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
- fast path when heap profiling is enabled. Additionally, split a special
- case out into arena_prof_tctx_reset(), which also avoids chunk metadata
- reads.
- - Optimize irallocx_prof() to optimistically update the sampler state. The
- prior implementation appears to have been a holdover from when
- rallocx()/xallocx() functionality was combined as rallocm().
-
- Bug fixes:
- - Fix TLS configuration such that it is enabled by default for platforms on
- which it works correctly.
- - Fix arenas_cache_cleanup() and arena_get_hard() to handle
- allocation/deallocation within the application's thread-specific data
- cleanup functions even after arenas_cache is torn down.
- - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
- - Fix chunk purge hook calls for in-place huge shrinking reallocation to
- specify the old chunk size rather than the new chunk size. This bug caused
- no correctness issues for the default chunk purge function, but was
- visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
- - Fix heap profiling bugs:
- + Fix heap profiling to distinguish among otherwise identical sample sites
- with interposed resets (triggered via the "prof.reset" mallctl). This bug
- could cause data structure corruption that would most likely result in a
- segfault.
- + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
- + Make one call to prof_active_get_unlocked() per allocation event, and use
- the result throughout the relevant functions that handle an allocation
- event. Also add a missing check in prof_realloc(). These fixes protect
- allocation events against concurrent prof_active changes.
- + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
- in the correct order.
- + Fix prof_realloc() to call prof_free_sampled_object() after calling
- prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
- the same, the tctx could have been prematurely destroyed.
- - Fix portability bugs:
- + Don't bitshift by negative amounts when encoding/decoding run sizes in
- chunk header maps. This affected systems with page sizes greater than 8
- KiB.
- + Rename index_t to szind_t to avoid an existing type on Solaris.
- + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
- match glibc and avoid compilation errors when including both
- jemalloc/jemalloc.h and malloc.h in C++ code.
- + Don't assume that /bin/sh is appropriate when running size_classes.sh
- during configuration.
- + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
- + Link tests to librt if it contains clock_gettime(2).
-
-* 4.0.0 (August 17, 2015)
-
- This version contains many speed and space optimizations, both minor and
- major. The major themes are generalization, unification, and simplification.
- Although many of these optimizations cause no visible behavior change, their
- cumulative effect is substantial.
-
- New features:
- - Normalize size class spacing to be consistent across the complete size
- range. By default there are four size classes per size doubling, but this
- is now configurable via the --with-lg-size-class-group option. Also add the
- --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
- --with-lg-tiny-min options, which can be used to tweak page and size class
- settings. Impacts:
- + Worst case performance for incrementally growing/shrinking reallocation
- is improved because there are far fewer size classes, and therefore
- copying happens less often.
- + Internal fragmentation is limited to 20% for all but the smallest size
- classes (those less than four times the quantum). (1B + 4 KiB)
- and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
- + Chunk fragmentation tends to be lower because there are fewer distinct run
- sizes to pack.
- - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
- "tcache.destroy" mallctls control tcache lifetime and flushing, and the
- MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
- control which tcache is used for each operation.
- - Implement per thread heap profiling, as well as the ability to
- enable/disable heap profiling on a per thread basis. Add the "prof.reset",
- "prof.lg_sample", "thread.prof.name", "thread.prof.active",
- "opt.prof_thread_active_init", "prof.thread_active_init", and
- "thread.prof.active" mallctls.
- - Add support for per arena application-specified chunk allocators, configured
- via the "arena.<i>.chunk_hooks" mallctl.
- - Refactor huge allocation to be managed by arenas, so that arenas now
- function as general purpose independent allocators. This is important in
- the context of user-specified chunk allocators, aside from the scalability
- benefits. Related new statistics:
- + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
- "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
- mallctls provide high level per arena huge allocation statistics.
- + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
- "stats.arenas.<i>.hchunks.<j>.nmalloc",
- "stats.arenas.<i>.hchunks.<j>.ndalloc",
- "stats.arenas.<i>.hchunks.<j>.nrequests", and
- "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
- statistics.
- - Add the 'util' column to malloc_stats_print() output, which reports the
- proportion of available regions that are currently in use for each small
- size class.
- - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
- mallctl), so that it is possible to separately enable junk filling for
- allocation versus deallocation.
- - Add the jemalloc-config script, which provides information about how
- jemalloc was configured, and how to integrate it into application builds.
- - Add metadata statistics, which are accessible via the "stats.metadata",
- "stats.arenas.<i>.metadata.mapped", and
- "stats.arenas.<i>.metadata.allocated" mallctls.
- - Add the "stats.resident" mallctl, which reports the upper limit of
- physically resident memory mapped by the allocator.
- - Add per arena control over unused dirty page purging, via the
- "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
- "stats.arenas.<i>.lg_dirty_mult" mallctls.
- - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
- feature on/off during program execution.
- - Add sdallocx(), which implements sized deallocation. The primary
- optimization over dallocx() is the removal of a metadata read, which often
- suffers an L1 cache miss.
- - Add missing header includes in jemalloc/jemalloc.h, so that applications
- only have to #include <jemalloc/jemalloc.h>.
- - Add support for additional platforms:
- + Bitrig
- + Cygwin
- + DragonFlyBSD
- + iOS
- + OpenBSD
- + OpenRISC/or1k
-
- Optimizations:
- - Maintain dirty runs in per arena LRUs rather than in per arena trees of
- dirty-run-containing chunks. In practice this change significantly reduces
- dirty page purging volume.
- - Integrate whole chunks into the unused dirty page purging machinery. This
- reduces the cost of repeated huge allocation/deallocation, because it
- effectively introduces a cache of chunks.
- - Split the arena chunk map into two separate arrays, in order to increase
- cache locality for the frequently accessed bits.
- - Move small run metadata out of runs, into arena chunk headers. This reduces
- run fragmentation, smaller runs reduce external fragmentation for small size
- classes, and packed (less uniformly aligned) metadata layout improves CPU
- cache set distribution.
- - Randomly distribute large allocation base pointer alignment relative to page
- boundaries in order to more uniformly utilize CPU cache sets. This can be
- disabled via the --disable-cache-oblivious configure option, and queried via
- the "config.cache_oblivious" mallctl.
- - Micro-optimize the fast paths for the public API functions.
- - Refactor thread-specific data to reside in a single structure. This assures
- that only a single TLS read is necessary per call into the public API.
- - Implement in-place huge allocation growing and shrinking.
- - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
- additional optimizations that reduce maximum lookup depth to one or two
- levels. This resolves what was a concurrency bottleneck for per arena huge
- allocation, because a global data structure is critical for determining
- which arenas own which huge allocations.
-
- Incompatible changes:
- - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
- warnings by default.
- - Assure that the constness of malloc_usable_size()'s return type matches that
- of the system implementation.
- - Change the heap profile dump format to support per thread heap profiling,
- rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
- result, the bundled jeprof must now be used rather than the upstream
- (gperftools) pprof.
- - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
- internally deadlock on some platforms.
- - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
- - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
- "stats.arenas.<i>.bins.<j>.curregs".
- - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
- - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
- MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
-
- Removed features:
- - Remove the *allocm() API, which is superseded by the *allocx() API.
- - Remove the --enable-dss options, and make dss non-optional on all platforms
- which support sbrk(2).
- - Remove the "arenas.purge" mallctl, which was obsoleted by the
- "arena.<i>.purge" mallctl in 3.1.0.
- - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
- detects whether it is running inside Valgrind.
- - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
- "stats.huge.ndalloc" mallctls.
- - Remove the --enable-mremap option.
- - Remove the "stats.chunks.current", "stats.chunks.total", and
- "stats.chunks.high" mallctls.
-
- Bug fixes:
- - Fix the cactive statistic to decrease (rather than increase) when active
- memory decreases. This regression was first released in 3.5.0.
- - Fix OOM handling in memalign() and valloc(). A variant of this bug existed
- in all releases since 2.0.0, which introduced these functions.
- - Fix an OOM-related regression in arena_tcache_fill_small(), which could
- cause cache corruption on OOM. This regression was present in all releases
- from 2.2.0 through 3.6.0.
- - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
- calloc(), and realloc() when profiling is enabled.
- - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
- "secondary" precedence is specified, but sbrk(2) is not supported.
- - Fix fallback lg_floor() implementations to handle extremely large inputs.
- - Ensure the default purgeable zone is after the default zone on OS X.
- - Fix latent bugs in atomic_*().
- - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
- - Fix tls_model configuration to enable the initial-exec model when possible.
- - Mark malloc_conf as a weak symbol so that the application can override it.
- - Correctly detect glibc's adaptive pthread mutexes.
- - Fix the --without-export configure option.
-
-* 3.6.0 (March 31, 2014)
-
- This version contains a critical bug fix for a regression present in 3.5.0 and
- 3.5.1.
-
- Bug fixes:
- - Fix a regression in arena_chunk_alloc() that caused crashes during
- small/large allocation if chunk allocation failed. In the absence of this
- bug, chunk allocation failure would result in allocation failure, e.g. NULL
- return from malloc(). This regression was introduced in 3.5.0.
- - Fix backtracing for gcc intrinsics-based backtracing by specifying
- -fno-omit-frame-pointer to gcc. Note that the application (and all the
- libraries it links to) must also be compiled with this option for
- backtracing to be reliable.
- - Use dss allocation precedence for huge allocations as well as small/large
- allocations.
- - Fix test assertion failure message formatting. This bug did not manifest on
- x86_64 systems because of implementation subtleties in va_list.
- - Fix inconsequential test failures for hash and SFMT code.
-
- New features:
- - Support heap profiling on FreeBSD. This feature depends on the proc
- filesystem being mounted during heap profile dumping.
-
-* 3.5.1 (February 25, 2014)
-
- This version primarily addresses minor bugs in test code.
-
- Bug fixes:
- - Configure Solaris/Illumos to use MADV_FREE.
- - Fix junk filling for mremap(2)-based huge reallocation. This is only
- relevant if configuring with the --enable-mremap option specified.
- - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
- compiler.
- - Add a configure test for SSE2 rather than assuming it is usable on i686
- systems. This fixes test compilation errors, especially on 32-bit Linux
- systems.
- - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
- test.
- - Fix/remove flawed alignment-related overflow tests.
- - Prevent compiler optimizations that could change backtraces in the
- prof_accum unit test.
-
-* 3.5.0 (January 22, 2014)
-
- This version focuses on refactoring and automated testing, though it also
- includes some non-trivial heap profiling optimizations not mentioned below.
-
- New features:
- - Add the *allocx() API, which is a successor to the experimental *allocm()
- API. The *allocx() functions are slightly simpler to use because they have
- fewer parameters, they directly return the results of primary interest, and
- mallocx()/rallocx() avoid the strict aliasing pitfall that
- allocm()/rallocm() share with posix_memalign(). Note that *allocm() is
- slated for removal in the next non-bugfix release.
- - Add support for LinuxThreads.
-
- Bug fixes:
- - Unless heap profiling is enabled, disable floating point code and don't link
- with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
- systems, makes it possible to completely disable floating point register
- use. Some versions of glibc neglect to save/restore caller-saved floating
- point registers during dynamic lazy symbol loading, and the symbol loading
- code uses whatever malloc the application happens to have linked/loaded
- with, the result being potential floating point register corruption.
- - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
- backtrace creation in imemalign(). This bug impacted posix_memalign() and
- aligned_alloc().
- - Fix a file descriptor leak in a prof_dump_maps() error path.
- - Fix prof_dump() to close the dump file descriptor for all relevant error
- paths.
- - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
- allocation, not just deallocation.
- - Fix a data race for large allocation stats counters.
- - Fix a potential infinite loop during thread exit. This bug occurred on
- Solaris, and could affect other platforms with similar pthreads TSD
- implementations.
- - Don't junk-fill reallocations unless usable size changes. This fixes a
- violation of the *allocx()/*allocm() semantics.
- - Fix growing large reallocation to junk fill new space.
- - Fix huge deallocation to junk fill when munmap is disabled.
- - Change the default private namespace prefix from empty to je_, and change
- --with-private-namespace-prefix so that it prepends an additional prefix
- rather than replacing je_. This reduces the likelihood of applications
- which statically link jemalloc experiencing symbol name collisions.
- - Add missing private namespace mangling (relevant when
- --with-private-namespace is specified).
- - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
- static even for debug builds.
- - Add a missing mutex unlock in a malloc_init_hard() error path. In practice
- this error path is never executed.
- - Fix numerous bugs in malloc_strotumax() error handling/reporting. These
- bugs had no impact except for malformed inputs.
- - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by
- existing calls, so they had no impact.
-
-* 3.4.1 (October 20, 2013)
-
- Bug fixes:
- - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
- of internal data structures and subsequent crashes.
- - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
- uninitialized memory in:
- + arena chunk headers
- + internal zero-initialized data structures (relevant to tcache and prof
- code)
- - Preserve errno during the first allocation. A readlink(2) call during
- initialization fails unless /etc/malloc.conf exists, so errno was typically
- set during the first allocation prior to this fix.
- - Fix compilation warnings reported by gcc 4.8.1.
-
-* 3.4.0 (June 2, 2013)
-
- This version is essentially a small bugfix release, but the addition of
- aarch64 support requires that the minor version be incremented.
-
- Bug fixes:
- - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
- typically triggered by multiple threads concurrently deallocating huge
- objects.
-
- New features:
- - Add support for the aarch64 architecture.
-
-* 3.3.1 (March 6, 2013)
-
- This version fixes bugs that are typically encountered only when utilizing
- custom run-time options.
-
- Bug fixes:
- - Fix a locking order bug that could cause deadlock during fork if heap
- profiling were enabled.
- - Fix a chunk recycling bug that could cause the allocator to lose track of
- whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
- corruption if allocating via sbrk(2) (unlikely unless running with the
- "dss:primary" option specified). This was completely harmless on Linux
- unless using mlockall(2) (and unlikely even then, unless the
- --disable-munmap configure option or the "dss:primary" option was
- specified). This regression was introduced in 3.1.0 by the
- mlockall(2)/madvise(2) interaction fix.
- - Fix TLS-related memory corruption that could occur during thread exit if the
- thread never allocated memory. Only the quarantine and prof facilities were
- susceptible.
- - Fix two quarantine bugs:
- + Internal reallocation of the quarantined object array leaked the old
- array.
- + Reallocation failure for internal reallocation of the quarantined object
- array (very unlikely) resulted in memory corruption.
- - Fix Valgrind integration to annotate all internally allocated memory in a
- way that keeps Valgrind happy about internal data structure access.
- - Fix building for s390 systems.
-
-* 3.3.0 (January 23, 2013)
-
- This version includes a few minor performance improvements in addition to the
- listed new features and bug fixes.
-
- New features:
- - Add clipping support to lg_chunk option processing.
- - Add the --enable-ivsalloc option.
- - Add the --without-export option.
- - Add the --disable-zone-allocator option.
-
- Bug fixes:
- - Fix "arenas.extend" mallctl to output the number of arenas.
- - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
- is undefined.
- - Fix build break on FreeBSD related to alloca.h.
-
-* 3.2.0 (November 9, 2012)
-
- In addition to a couple of bug fixes, this version modifies page run
- allocation and dirty page purging algorithms in order to better control
- page-level virtual memory fragmentation.
-
- Incompatible changes:
- - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
-
- Bug fixes:
- - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
- after primary dss allocation fails.
- - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
- in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
-
-* 3.1.0 (October 16, 2012)
-
- New features:
- - Auto-detect whether running inside Valgrind, thus removing the need to
- manually specify MALLOC_CONF=valgrind:true.
- - Add the "arenas.extend" mallctl, which allows applications to create
- manually managed arenas.
- - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
- - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
- which provide control over dss/mmap precedence.
- - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
- - Define LG_QUANTUM for hppa.
-
- Incompatible changes:
- - Disable tcache by default if running inside Valgrind, in order to avoid
- making unallocated objects appear reachable to Valgrind.
- - Drop const from malloc_usable_size() argument on Linux.
-
- Bug fixes:
- - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
- - Remove const from __*_hook variable declarations, so that glibc can modify
- them during process forking.
- - Fix mlockall(2)/madvise(2) interaction.
- - Fix fork(2)-related deadlocks.
- - Fix error return value for "thread.tcache.enabled" mallctl.
-
-* 3.0.0 (May 11, 2012)
-
- Although this version adds some major new features, the primary focus is on
- internal code cleanup that facilitates maintainability and portability, most
- of which is not reflected in the ChangeLog. This is the first release to
- incorporate substantial contributions from numerous other developers, and the
- result is a more broadly useful allocator (see the git revision history for
- contribution details). Note that the license has been unified, thanks to
- Facebook granting a license under the same terms as the other copyright
- holders (see COPYING).
-
- New features:
- - Implement Valgrind support, redzones, and quarantine.
- - Add support for additional platforms:
- + FreeBSD
- + Mac OS X Lion
- + MinGW
- + Windows (no support yet for replacing the system malloc)
- - Add support for additional architectures:
- + MIPS
- + SH4
- + Tilera
- - Add support for cross compiling.
- - Add nallocm(), which rounds a request size up to the nearest size class
- without actually allocating.
- - Implement aligned_alloc() (blame C11).
- - Add the "thread.tcache.enabled" mallctl.
- - Add the "opt.prof_final" mallctl.
- - Update pprof (from gperftools 2.0).
- - Add the --with-mangling option.
- - Add the --disable-experimental option.
- - Add the --disable-munmap option, and make it the default on Linux.
- - Add the --enable-mremap option, which disables use of mremap(2) by default.
-
- Incompatible changes:
- - Enable stats by default.
- - Enable fill by default.
- - Disable lazy locking by default.
- - Rename the "tcache.flush" mallctl to "thread.tcache.flush".
- - Rename the "arenas.pagesize" mallctl to "arenas.page".
- - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
- - Change the "opt.prof_accum" default from true to false.
-
- Removed features:
- - Remove the swap feature, including the "config.swap", "swap.avail",
- "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
- - Remove highruns statistics, including the
- "stats.arenas.<i>.bins.<j>.highruns" and
- "stats.arenas.<i>.lruns.<j>.highruns" mallctls.
- - As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
- "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
- "arenas.[tqcs]bins" mallctls.
- - Remove the "arenas.chunksize" mallctl.
- - Remove the "opt.lg_prof_tcmax" option.
- - Remove the "opt.lg_prof_bt_max" option.
- - Remove the "opt.lg_tcache_gc_sweep" option.
- - Remove the --disable-tiny option, including the "config.tiny" mallctl.
- - Remove the --enable-dynamic-page-shift configure option.
- - Remove the --enable-sysv configure option.
-
- Bug fixes:
- - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
- invalid statistics and crashes.
- - Work around TLS deallocation via free() on Linux. This bug could cause
- write-after-free memory corruption.
- - Fix a potential deadlock that could occur during interval- and
- growth-triggered heap profile dumps.
- - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
- - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
- cause memory corruption and crashes with --enable-dss specified.
- - Fix fork-related bugs that could cause deadlock in children between fork
- and exec.
- - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
- - Fix realloc(p, 0) to act like free(p).
- - Do not enforce minimum alignment in memalign().
- - Check for NULL pointer in malloc_usable_size().
- - Fix an off-by-one heap profile statistics bug that could be observed in
- interval- and growth-triggered heap profiles.
- - Fix the "epoch" mallctl to update cached stats even if the passed in epoch
- is 0.
- - Fix bin->runcur management to fix a layout policy bug. This bug did not
- affect correctness.
- - Fix a bug in choose_arena_hard() that potentially caused more arenas to be
- initialized than necessary.
- - Add missing "opt.lg_tcache_max" mallctl implementation.
- - Use glibc allocator hooks to make mixed allocator usage less likely.
- - Fix build issues for --disable-tcache.
- - Don't mangle pthread_create() when --with-private-namespace is specified.
-
-* 2.2.5 (November 14, 2011)
-
- Bug fixes:
- - Fix huge_ralloc() race when using mremap(2). This is a serious bug that
- could cause memory corruption and/or crashes.
- - Fix huge_ralloc() to maintain chunk statistics.
- - Fix malloc_stats_print(..., "a") output.
-
-* 2.2.4 (November 5, 2011)
-
- Bug fixes:
- - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as
- well as for --disable-tls builds in earlier releases.
- - Do not assume a 4 KiB page size in test/rallocm.c.
-
-* 2.2.3 (August 31, 2011)
-
- This version fixes numerous bugs related to heap profiling.
-
- Bug fixes:
- - Fix a prof-related race condition. This bug could cause memory corruption,
- but only occurred in non-default configurations (prof_accum:false).
- - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is
- excluded from backtraces).
- - Fix a prof-related bug in realloc() (only triggered by OOM errors).
- - Fix prof-related bugs in allocm() and rallocm().
- - Fix prof_tdata_cleanup() for --disable-tls builds.
- - Fix a relative include path, to fix objdir builds.
-
-* 2.2.2 (July 30, 2011)
-
- Bug fixes:
- - Fix a build error for --disable-tcache.
- - Fix assertions in arena_purge() (for real this time).
- - Add the --with-private-namespace option. This is a workaround for symbol
- conflicts that can inadvertently arise when using static libraries.
-
-* 2.2.1 (March 30, 2011)
-
- Bug fixes:
- - Implement atomic operations for x86/x64. This fixes compilation failures
- for versions of gcc that are still in wide use.
- - Fix an assertion in arena_purge().
-
-* 2.2.0 (March 22, 2011)
-
- This version incorporates several improvements to algorithms and data
- structures that tend to reduce fragmentation and increase speed.
-
- New features:
- - Add the "stats.cactive" mallctl.
- - Update pprof (from google-perftools 1.7).
- - Improve backtracing-related configuration logic, and add the
- --disable-prof-libgcc option.
-
- Bug fixes:
- - Change default symbol visibility from "internal", to "hidden", which
- decreases the overhead of library-internal function calls.
- - Fix symbol visibility so that it is also set on OS X.
- - Fix a build dependency regression caused by the introduction of the .pic.o
- suffix for PIC object files.
- - Add missing checks for mutex initialization failures.
- - Don't use libgcc-based backtracing except on x64, where it is known to work.
- - Fix deadlocks on OS X that were due to memory allocation in
- pthread_mutex_lock().
- - Heap profiling-specific fixes:
- + Fix memory corruption due to integer overflow in small region index
- computation, when using a small enough sample interval that profiling
- context pointers are stored in small run headers.
- + Fix a bootstrap ordering bug that only occurred with TLS disabled.
- + Fix a rallocm() rsize bug.
- + Fix error detection bugs for aligned memory allocation.
-
-* 2.1.3 (March 14, 2011)
-
- Bug fixes:
- - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix
- for OS X in 2.1.2).
- - Fix a "thread.arena" mallctl bug.
- - Fix a thread cache stats merging bug.
-
-* 2.1.2 (March 2, 2011)
-
- Bug fixes:
- - Fix "thread.{de,}allocatedp" mallctl for OS X.
- - Add missing jemalloc.a to build system.
-
-* 2.1.1 (January 31, 2011)
-
- Bug fixes:
- - Fix aligned huge reallocation (affected allocm()).
- - Fix the ALLOCM_LG_ALIGN macro definition.
- - Fix a heap dumping deadlock.
- - Fix a "thread.arena" mallctl bug.
-
-* 2.1.0 (December 3, 2010)
-
- This version incorporates some optimizations that can't quite be considered
- bug fixes.
-
- New features:
- - Use Linux's mremap(2) for huge object reallocation when possible.
- - Avoid locking in mallctl*() when possible.
- - Add the "thread.[de]allocatedp" mallctl's.
- - Convert the manual page source from roff to DocBook, and generate both roff
- and HTML manuals.
-
- Bug fixes:
- - Fix a crash due to incorrect bootstrap ordering. This only impacted
- --enable-debug --enable-dss configurations.
- - Fix a minor statistics bug for mallctl("swap.avail", ...).
-
-* 2.0.1 (October 29, 2010)
-
- Bug fixes:
- - Fix a race condition in heap profiling that could cause undefined behavior
- if "opt.prof_accum" were disabled.
- - Add missing mutex unlocks for some OOM error paths in the heap profiling
- code.
- - Fix a compilation error for non-C99 builds.
-
-* 2.0.0 (October 24, 2010)
-
- This version focuses on the experimental *allocm() API, and on improved
- run-time configuration/introspection. Nonetheless, numerous performance
- improvements are also included.
-
- New features:
- - Implement the experimental {,r,s,d}allocm() API, which provides a superset
- of the functionality available via malloc(), calloc(), posix_memalign(),
- realloc(), malloc_usable_size(), and free(). These functions can be used to
- allocate/reallocate aligned zeroed memory, ask for optional extra memory
- during reallocation, prevent object movement during reallocation, etc.
- - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is
- more human-readable, and more flexible. For example:
- JEMALLOC_OPTIONS=AJP
- is now:
- MALLOC_CONF=abort:true,fill:true,stats_print:true
- - Port to Apple OS X. Sponsored by Mozilla.
- - Make it possible for the application to control thread-->arena mappings via
- the "thread.arena" mallctl.
- - Add compile-time support for all TLS-related functionality via pthreads TSD.
- This is mainly of interest for OS X, which does not support TLS, but has a
- TSD implementation with similar performance.
- - Override memalign() and valloc() if they are provided by the system.
- - Add the "arenas.purge" mallctl, which can be used to synchronously purge all
- dirty unused pages.
- - Make cumulative heap profiling data optional, so that it is possible to
- limit the amount of memory consumed by heap profiling data structures.
- - Add per thread allocation counters that can be accessed via the
- "thread.allocated" and "thread.deallocated" mallctls.
-
- Incompatible changes:
- - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above).
- - Increase default backtrace depth from 4 to 128 for heap profiling.
- - Disable interval-based profile dumps by default.
-
- Bug fixes:
- - Remove bad assertions in fork handler functions. These assertions could
- cause aborts for some combinations of configure settings.
- - Fix strerror_r() usage to deal with non-standard semantics in GNU libc.
- - Fix leak context reporting. This bug tended to cause the number of contexts
- to be underreported (though the reported number of objects and bytes were
- correct).
- - Fix a realloc() bug for large in-place growing reallocation. This bug could
- cause memory corruption, but it was hard to trigger.
- - Fix an allocation bug for small allocations that could be triggered if
- multiple threads raced to create a new run of backing pages.
- - Enhance the heap profiler to trigger samples based on usable size, rather
- than request size.
- - Fix a heap profiling bug due to sometimes losing track of requested object
- size for sampled objects.
-
-* 1.0.3 (August 12, 2010)
-
- Bug fixes:
- - Fix the libunwind-based implementation of stack backtracing (used for heap
- profiling). This bug could cause zero-length backtraces to be reported.
- - Add a missing mutex unlock in library initialization code. If multiple
- threads raced to initialize malloc, some of them could end up permanently
- blocked.
-
-* 1.0.2 (May 11, 2010)
-
- Bug fixes:
- - Fix junk filling of large objects, which could cause memory corruption.
- - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual
- memory limits could cause swap file configuration to fail. Contributed by
- Jordan DeLong.
-
-* 1.0.1 (April 14, 2010)
-
- Bug fixes:
- - Fix compilation when --enable-fill is specified.
- - Fix threads-related profiling bugs that affected accuracy and caused memory
- to be leaked during thread exit.
- - Fix dirty page purging race conditions that could cause crashes.
- - Fix crash in tcache flushing code during thread destruction.
-
-* 1.0.0 (April 11, 2010)
-
- This release focuses on speed and run-time introspection. Numerous
- algorithmic improvements make this release substantially faster than its
- predecessors.
-
- New features:
- - Implement autoconf-based configuration system.
- - Add mallctl*(), for the purposes of introspection and run-time
- configuration.
- - Make it possible for the application to manually flush a thread's cache, via
- the "tcache.flush" mallctl.
- - Base maximum dirty page count on proportion of active memory.
- - Compute various additional run-time statistics, including per size class
- statistics for large objects.
- - Expose malloc_stats_print(), which can be called repeatedly by the
- application.
- - Simplify the malloc_message() signature to only take one string argument,
- and incorporate an opaque data pointer argument for use by the application
- in combination with malloc_stats_print().
- - Add support for allocation backed by one or more swap files, and allow the
- application to disable over-commit if swap files are in use.
- - Implement allocation profiling and leak checking.
-
- Removed features:
- - Remove the dynamic arena rebalancing code, since thread-specific caching
- reduces its utility.
-
- Bug fixes:
- - Modify chunk allocation to work when address space layout randomization
- (ASLR) is in use.
- - Fix thread cleanup bugs related to TLS destruction.
- - Handle 0-size allocation requests in posix_memalign().
- - Fix a chunk leak. The leaked chunks were never touched, so this impacted
- virtual memory usage, but not physical memory usage.
-
-* linux_2008082[78]a (August 27/28, 2008)
-
- These snapshot releases are the simple result of incorporating Linux-specific
- support into the FreeBSD malloc sources.
-
---------------------------------------------------------------------------------
-vim:filetype=text:textwidth=80
+Following are change highlights associated with official releases. Important
+bug fixes are all mentioned, but some internal enhancements are omitted here for
+brevity. Much more detail can be found in the git revision history:
+
+ https://github.com/jemalloc/jemalloc
+
+* 5.2.1 (August 5, 2019)
+
+ This release is primarily about Windows. A critical virtual memory leak is
+ resolved on all Windows platforms. The regression was present in all releases
+ since 5.0.0.
+
+ Bug fixes:
+ - Fix a severe virtual memory leak on Windows. This regression was first
+ released in 5.0.0. (@Ignition, @j0t, @frederik-h, @davidtgoldblatt,
+ @interwq)
+ - Fix size 0 handling in posix_memalign(). This regression was first released
+ in 5.2.0. (@interwq)
+ - Fix the prof_log unit test which may observe unexpected backtraces from
+ compiler optimizations. The test was first added in 5.2.0. (@marxin,
+ @gnzlbg, @interwq)
+ - Fix the declaration of the extent_avail tree. This regression was first
+ released in 5.1.0. (@zoulasc)
+ - Fix an incorrect reference in jeprof. This functionality was first released
+ in 3.0.0. (@prehistoric-penguin)
+ - Fix an assertion on the deallocation fast-path. This regression was first
+ released in 5.2.0. (@yinan1048576)
+ - Fix the TLS_MODEL attribute in headers. This regression was first released
+ in 5.0.0. (@zoulasc, @interwq)
+
+ Optimizations and refactors:
+ - Implement opt.retain on Windows and enable by default on 64-bit. (@interwq,
+ @davidtgoldblatt)
+ - Optimize away a branch on the operator delete[] path. (@mgrice)
+ - Add format annotation to the format generator function. (@zoulasc)
+ - Refactor and improve the size class header generation. (@yinan1048576)
+ - Remove best fit. (@djwatson)
+ - Avoid blocking on background thread locks for stats. (@oranagra, @interwq)
+
+* 5.2.0 (April 2, 2019)
+
+ This release includes a few notable improvements, which are summarized below:
+ 1) improved fast-path performance from the optimizations by @djwatson; 2)
+ reduced virtual memory fragmentation and metadata usage; and 3) bug fixes on
+ setting the number of background threads. In addition, peak / spike memory
+ usage is improved with certain allocation patterns. As usual, the release and
+ prior dev versions have gone through large-scale production testing.
+
+ New features:
+ - Implement oversize_threshold, which uses a dedicated arena for allocations
+ crossing the specified threshold to reduce fragmentation. (@interwq)
+ - Add extents usage information to stats. (@tyleretzel)
+ - Log time information for sampled allocations. (@tyleretzel)
+ - Support 0 size in sdallocx. (@djwatson)
+ - Output rate for certain counters in malloc_stats. (@zinoale)
+ - Add configure option --enable-readlinkat, which allows the use of readlinkat
+ over readlink. (@davidtgoldblatt)
+ - Add configure options --{enable,disable}-{static,shared} to allow not
+ building unwanted libraries. (@Ericson2314)
+ - Add configure option --disable-libdl to enable fully static builds.
+ (@interwq)
+ - Add mallctl interfaces:
+ + opt.oversize_threshold (@interwq)
+ + stats.arenas.<i>.extent_avail (@tyleretzel)
+ + stats.arenas.<i>.extents.<j>.n{dirty,muzzy,retained} (@tyleretzel)
+ + stats.arenas.<i>.extents.<j>.{dirty,muzzy,retained}_bytes
+ (@tyleretzel)
+
+ Portability improvements:
+ - Update MSVC builds. (@maksqwe, @rustyx)
+ - Workaround a compiler optimizer bug on s390x. (@rkmisra)
+ - Make use of pthread_set_name_np(3) on FreeBSD. (@trasz)
+ - Implement malloc_getcpu() to enable percpu_arena for windows. (@santagada)
+ - Link against -pthread instead of -lpthread. (@paravoid)
+ - Make background_thread not dependent on libdl. (@interwq)
+ - Add stringify to fix a linker directive issue on MSVC. (@daverigby)
+ - Detect and fall back when 8-bit atomics are unavailable. (@interwq)
+ - Fall back to the default pthread_create if dlsym(3) fails. (@interwq)
+
+ Optimizations and refactors:
+ - Refactor the TSD module. (@davidtgoldblatt)
+ - Avoid taking extents_muzzy mutex when muzzy is disabled. (@interwq)
+ - Avoid taking large_mtx for auto arenas on the tcache flush path. (@interwq)
+ - Optimize ixalloc by avoiding a size lookup. (@interwq)
+ - Implement opt.oversize_threshold which uses a dedicated arena for requests
+ crossing the threshold, also eagerly purges the oversize extents. Default
+ the threshold to 8 MiB. (@interwq)
+ - Clean compilation with -Wextra. (@gnzlbg, @jasone)
+ - Refactor the size class module. (@davidtgoldblatt)
+ - Refactor the stats emitter. (@tyleretzel)
+ - Optimize pow2_ceil. (@rkmisra)
+ - Avoid runtime detection of lazy purging on FreeBSD. (@trasz)
+ - Optimize mmap(2) alignment handling on FreeBSD. (@trasz)
+ - Improve error handling for THP state initialization. (@jsteemann)
+ - Rework the malloc() fast path. (@djwatson)
+ - Rework the free() fast path. (@djwatson)
+ - Refactor and optimize the tcache fill / flush paths. (@djwatson)
+ - Optimize sync / lwsync on PowerPC. (@chmeeedalf)
+ - Bypass extent_dalloc() when retain is enabled. (@interwq)
+ - Optimize the locking on large deallocation. (@interwq)
+ - Reduce the number of pages committed from sanity checking in debug build.
+ (@trasz, @interwq)
+ - Deprecate OSSpinLock. (@interwq)
+ - Lower the default number of background threads to 4 (when the feature
+ is enabled). (@interwq)
+ - Optimize the trylock spin wait. (@djwatson)
+ - Use arena index for arena-matching checks. (@interwq)
+ - Avoid forced decay on thread termination when using background threads.
+ (@interwq)
+ - Disable muzzy decay by default. (@djwatson, @interwq)
+ - Only initialize libgcc unwinder when profiling is enabled. (@paravoid,
+ @interwq)
+
+ Bug fixes (all only relevant to jemalloc 5.x):
+ - Fix background thread index issues with max_background_threads. (@djwatson,
+ @interwq)
+ - Fix stats output for opt.lg_extent_max_active_fit. (@interwq)
+ - Fix opt.prof_prefix initialization. (@davidtgoldblatt)
+ - Properly trigger decay on tcache destroy. (@interwq, @amosbird)
+ - Fix tcache.flush. (@interwq)
+ - Detect whether explicit extent zero out is necessary with huge pages or
+ custom extent hooks, which may change the purge semantics. (@interwq)
+ - Fix a side effect caused by extent_max_active_fit combined with decay-based
+ purging, where freed extents can accumulate and not be reused for an
+ extended period of time. (@interwq, @mpghf)
+ - Fix a missing unlock on extent register error handling. (@zoulasc)
+
+ Testing:
+ - Simplify the Travis script output. (@gnzlbg)
+ - Update the test scripts for FreeBSD. (@devnexen)
+ - Add unit tests for the producer-consumer pattern. (@interwq)
+ - Add Cirrus-CI config for FreeBSD builds. (@jasone)
+ - Add size-matching sanity checks on tcache flush. (@davidtgoldblatt,
+ @interwq)
+
+ Incompatible changes:
+ - Remove --with-lg-page-sizes. (@davidtgoldblatt)
+
+ Documentation:
+ - Attempt to build docs by default, however skip doc building when xsltproc
+ is missing. (@interwq, @cmuellner)
+
+* 5.1.0 (May 4, 2018)
+
+ This release is primarily about fine-tuning, ranging from several new features
+ to numerous notable performance and portability enhancements. The release and
+ prior dev versions have been running in multiple large scale applications for
+ months, and the cumulative improvements are substantial in many cases.
+
+ Given the long and successful production runs, this release is likely a good
+ candidate for applications to upgrade, from both jemalloc 5.0 and before. For
+ performance-critical applications, the newly added TUNING.md provides
+ guidelines on jemalloc tuning.
+
+ New features:
+ - Implement transparent huge page support for internal metadata. (@interwq)
+ - Add opt.thp to allow enabling / disabling transparent huge pages for all
+ mappings. (@interwq)
+ - Add maximum background thread count option. (@djwatson)
+ - Allow prof_active to control opt.lg_prof_interval and prof.gdump.
+ (@interwq)
+ - Allow arena index lookup based on allocation addresses via mallctl.
+ (@lionkov)
+ - Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD)
+ - Add opt.lg_extent_max_active_fit to set the max ratio between the size of
+ the active extent selected (to split off from) and the size of the requested
+ allocation. (@interwq, @davidtgoldblatt)
+ - Add retain_grow_limit to set the max size when growing virtual address
+ space. (@interwq)
+ - Add mallctl interfaces:
+ + arena.<i>.retain_grow_limit (@interwq)
+ + arenas.lookup (@lionkov)
+ + max_background_threads (@djwatson)
+ + opt.lg_extent_max_active_fit (@interwq)
+ + opt.max_background_threads (@djwatson)
+ + opt.metadata_thp (@interwq)
+ + opt.thp (@interwq)
+ + stats.metadata_thp (@interwq)
+
+ Portability improvements:
+ - Support GNU/kFreeBSD configuration. (@paravoid)
+ - Support m68k, nios2 and SH3 architectures. (@paravoid)
+ - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo)
+ - Fix symbol listing for cross-compiling. (@tamird)
+ - Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid)
+ - Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin)
+ - Fix MSVC 2015 & 2017 builds. (@rustyx)
+ - Improve RISC-V support. (@EdSchouten)
+ - Set name mangling script in strict mode. (@nicolov)
+ - Avoid MADV_HUGEPAGE on ARM. (@marxin)
+ - Modify configure to determine return value of strerror_r.
+ (@davidtgoldblatt, @cferris1000)
+ - Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani)
+ - Fix 32-bit build on MSVC. (@rustyx)
+ - Fix external symbol on MSVC. (@maksqwe)
+ - Avoid a printf format specifier warning. (@jasone)
+ - Add configure option --disable-initial-exec-tls which can allow jemalloc to
+ be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD)
+ - AArch64: Add ILP32 support. (@cmuellner)
+ - Add --with-lg-vaddr configure option to support cross compiling.
+ (@cmuellner, @davidtgoldblatt)
+
+ Optimizations and refactors:
+ - Improve active extent fit with extent_max_active_fit. This considerably
+ reduces fragmentation over time and improves virtual memory and metadata
+ usage. (@davidtgoldblatt, @interwq)
+ - Eagerly coalesce large extents to reduce fragmentation. (@interwq)
+ - sdallocx: only read size info when page aligned (i.e. possibly sampled),
+ which speeds up the sized deallocation path significantly. (@interwq)
+ - Avoid attempting new mappings for in place expansion with retain, since
+ it rarely succeeds in practice and causes high overhead. (@interwq)
+ - Refactor OOM handling in newImpl. (@wqfish)
+ - Add internal fine-grained logging functionality for debugging use.
+ (@davidtgoldblatt)
+ - Refactor arena / tcache interactions. (@davidtgoldblatt)
+ - Refactor extent management with dumpable flag. (@davidtgoldblatt)
+ - Add runtime detection of lazy purging. (@interwq)
+ - Use pairing heap instead of red-black tree for extents_avail. (@djwatson)
+ - Use sysctl on startup in FreeBSD. (@trasz)
+ - Use thread local prng state instead of atomic. (@djwatson)
+ - Make decay to always purge one more extent than before, because in
+ practice large extents are usually the ones that cross the decay threshold.
+ Purging the additional extent helps save memory as well as reduce VM
+ fragmentation. (@interwq)
+ - Fast division by dynamic values. (@davidtgoldblatt)
+ - Improve the fit for aligned allocation. (@interwq, @edwinsmith)
+ - Refactor extent_t bitpacking. (@rkmisra)
+ - Optimize the generated assembly for ticker operations. (@davidtgoldblatt)
+ - Convert stats printing to use a structured text emitter. (@davidtgoldblatt)
+ - Remove preserve_lru feature for extents management. (@djwatson)
+ - Consolidate two memory loads into one on the fast deallocation path.
+ (@davidtgoldblatt, @interwq)
+
+ Bug fixes (most of the issues are only relevant to jemalloc 5.0):
+ - Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt)
+ - Validate returned file descriptor before use. (@zonyitoo)
+ - Fix a few background thread initialization and shutdown issues. (@interwq)
+ - Fix an extent coalesce + decay race by taking both coalescing extents off
+ the LRU list. (@interwq)
+ - Fix potentially unbound increase during decay, caused by one thread keep
+ stashing memory to purge while other threads generating new pages. The
+ number of pages to purge is checked to prevent this. (@interwq)
+ - Fix a FreeBSD bootstrap assertion. (@strejda, @interwq)
+ - Handle 32 bit mutex counters. (@rkmisra)
+ - Fix a indexing bug when creating background threads. (@davidtgoldblatt,
+ @binliu19)
+ - Fix arguments passed to extent_init. (@yuleniwo, @interwq)
+ - Fix addresses used for ordering mutexes. (@rkmisra)
+ - Fix abort_conf processing during bootstrap. (@interwq)
+ - Fix include path order for out-of-tree builds. (@cmuellner)
+
+ Incompatible changes:
+ - Remove --disable-thp. (@interwq)
+ - Remove mallctl interfaces:
+ + config.thp (@interwq)
+
+ Documentation:
+ - Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson)
+
+* 5.0.1 (July 1, 2017)
+
+ This bugfix release fixes several issues, most of which are obscure enough
+ that typical applications are not impacted.
+
+ Bug fixes:
+ - Update decay->nunpurged before purging, in order to avoid potential update
+ races and subsequent incorrect purging volume. (@interwq)
+ - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy
+ locking and/or background threads). This mitigates an initialization
+ failure bug for which we still do not have a clear reproduction test case.
+ (@interwq)
+ - Modify tsd management so that it neither crashes nor leaks if a thread's
+ only allocation activity is to call free() after TLS destructors have been
+ executed. This behavior was observed when operating with GNU libc, and is
+ unlikely to be an issue with other libc implementations. (@interwq)
+ - Mask signals during background thread creation. This prevents signals from
+ being inadvertently delivered to background threads. (@jasone,
+ @davidtgoldblatt, @interwq)
+ - Avoid inactivity checks within background threads, in order to prevent
+ recursive mutex acquisition. (@interwq)
+ - Fix extent_grow_retained() to use the specified hooks when the
+ arena.<i>.extent_hooks mallctl is used to override the default hooks.
+ (@interwq)
+ - Add missing reentrancy support for custom extent hooks which allocate.
+ (@interwq)
+ - Post-fork(2), re-initialize the list of tcaches associated with each arena
+ to contain no tcaches except the forking thread's. (@interwq)
+ - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This
+ fixes potential deadlocks after fork(2). (@interwq)
+ - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to
+ generate corrupt configure scripts. (@jasone)
+ - Ensure that the configured page size (--with-lg-page) is no larger than the
+ configured huge page size (--with-lg-hugepage). (@jasone)
+
+* 5.0.0 (June 13, 2017)
+
+ Unlike all previous jemalloc releases, this release does not use naturally
+ aligned "chunks" for virtual memory management, and instead uses page-aligned
+ "extents". This change has few externally visible effects, but the internal
+ impacts are... extensive. Many other internal changes combine to make this
+ the most cohesively designed version of jemalloc so far, with ample
+ opportunity for further enhancements.
+
+ Continuous integration is now an integral aspect of development thanks to the
+ efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
+ stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a
+ side effect the official release frequency may decrease over time.
+
+ New features:
+ - Implement optional per-CPU arena support; threads choose which arena to use
+ based on current CPU rather than on fixed thread-->arena associations.
+ (@interwq)
+ - Implement two-phase decay of unused dirty pages. Pages transition from
+ dirty-->muzzy-->clean, where the first phase transition relies on
+ madvise(... MADV_FREE) semantics, and the second phase transition discards
+ pages such that they are replaced with demand-zeroed pages on next access.
+ (@jasone)
+ - Increase decay time resolution from seconds to milliseconds. (@jasone)
+ - Implement opt-in per CPU background threads, and use them for asynchronous
+ decay-driven unused dirty page purging. (@interwq)
+ - Add mutex profiling, which collects a variety of statistics useful for
+ diagnosing overhead/contention issues. (@interwq)
+ - Add C++ new/delete operator bindings. (@djwatson)
+ - Support manually created arena destruction, such that all data and metadata
+ are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
+ associated with destroyed arenas. (@jasone)
+ - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
+ merged/destroyed arena statistics via mallctl. (@jasone)
+ - Add opt.abort_conf to optionally abort if invalid configuration options are
+ detected during initialization. (@interwq)
+ - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
+ stats dumped during exit if opt.stats_print is true. (@jasone)
+ - Add --with-version=VERSION for use when embedding jemalloc into another
+ project's git repository. (@jasone)
+ - Add --disable-thp to support cross compiling. (@jasone)
+ - Add --with-lg-hugepage to support cross compiling. (@jasone)
+ - Add mallctl interfaces (various authors):
+ + background_thread
+ + opt.abort_conf
+ + opt.retain
+ + opt.percpu_arena
+ + opt.background_thread
+ + opt.{dirty,muzzy}_decay_ms
+ + opt.stats_print_opts
+ + arena.<i>.initialized
+ + arena.<i>.destroy
+ + arena.<i>.{dirty,muzzy}_decay_ms
+ + arena.<i>.extent_hooks
+ + arenas.{dirty,muzzy}_decay_ms
+ + arenas.bin.<i>.slab_size
+ + arenas.nlextents
+ + arenas.lextent.<i>.size
+ + arenas.create
+ + stats.background_thread.{num_threads,num_runs,run_interval}
+ + stats.mutexes.{ctl,background_thread,prof,reset}.
+ {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
+ num_owner_switch}
+ + stats.arenas.<i>.{dirty,muzzy}_decay_ms
+ + stats.arenas.<i>.uptime
+ + stats.arenas.<i>.{pmuzzy,base,internal,resident}
+ + stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
+ + stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
+ + stats.arenas.<i>.bins.<j>.mutex.
+ {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
+ num_owner_switch}
+ + stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
+ + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
+ extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
+ {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
+ num_owner_switch}
+
+ Portability improvements:
+ - Improve reentrant allocation support, such that deadlock is less likely if
+ e.g. a system library call in turn allocates memory. (@davidtgoldblatt,
+ @interwq)
+ - Support static linking of jemalloc with glibc. (@djwatson)
+
+ Optimizations and refactors:
+ - Organize virtual memory as "extents" of virtual memory pages, rather than as
+ naturally aligned "chunks", and store all metadata in arbitrarily distant
+ locations. This reduces virtual memory external fragmentation, and will
+ interact better with huge pages (not yet explicitly supported). (@jasone)
+ - Fold large and huge size classes together; only small and large size classes
+ remain. (@jasone)
+ - Unify the allocation paths, and merge most fast-path branching decisions.
+ (@davidtgoldblatt, @interwq)
+ - Embed per thread automatic tcache into thread-specific data, which reduces
+ conditional branches and dereferences. Also reorganize tcache to increase
+ fast-path data locality. (@interwq)
+ - Rewrite atomics to closely model the C11 API, convert various
+ synchronization from mutex-based to atomic, and use the explicit memory
+ ordering control to resolve various hypothetical races without increasing
+ synchronization overhead. (@davidtgoldblatt)
+ - Extensively optimize rtree via various methods:
+ + Add multiple layers of rtree lookup caching, since rtree lookups are now
+ part of fast-path deallocation. (@interwq)
+ + Determine rtree layout at compile time. (@jasone)
+ + Make the tree shallower for common configurations. (@jasone)
+ + Embed the root node in the top-level rtree data structure, thus avoiding
+ one level of indirection. (@jasone)
+ + Further specialize leaf elements as compared to internal node elements,
+ and directly embed extent metadata needed for fast-path deallocation.
+ (@jasone)
+ + Ignore leading always-zero address bits (architecture-specific).
+ (@jasone)
+ - Reorganize headers (ongoing work) to make them hermetic, and disentangle
+ various module dependencies. (@davidtgoldblatt)
+ - Convert various internal data structures such as size class metadata from
+ boot-time-initialized to compile-time-initialized. Propagate resulting data
+ structure simplifications, such as making arena metadata fixed-size.
+ (@jasone)
+ - Simplify size class lookups when constrained to size classes that are
+ multiples of the page size. This speeds lookups, but the primary benefit is
+ complexity reduction in code that was the source of numerous regressions.
+ (@jasone)
+ - Lock individual extents when possible for localized extent operations,
+ rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone)
+ - Use first fit layout policy instead of best fit, in order to improve
+ packing. (@jasone)
+ - If munmap(2) is not in use, use an exponential series to grow each arena's
+ virtual memory, so that the number of disjoint virtual memory mappings
+ remains low. (@jasone)
+ - Implement per arena base allocators, so that arenas never share any virtual
+ memory pages. (@jasone)
+ - Automatically generate private symbol name mangling macros. (@jasone)
+
+ Incompatible changes:
+ - Replace chunk hooks with an expanded/normalized set of extent hooks.
+ (@jasone)
+ - Remove ratio-based purging. (@jasone)
+ - Remove --disable-tcache. (@jasone)
+ - Remove --disable-tls. (@jasone)
+ - Remove --enable-ivsalloc. (@jasone)
+ - Remove --with-lg-size-class-group. (@jasone)
+ - Remove --with-lg-tiny-min. (@jasone)
+ - Remove --disable-cc-silence. (@jasone)
+ - Remove --enable-code-coverage. (@jasone)
+ - Remove --disable-munmap (replaced by opt.retain). (@jasone)
+ - Remove Valgrind support. (@jasone)
+ - Remove quarantine support. (@jasone)
+ - Remove redzone support. (@jasone)
+ - Remove mallctl interfaces (various authors):
+ + config.munmap
+ + config.tcache
+ + config.tls
+ + config.valgrind
+ + opt.lg_chunk
+ + opt.purge
+ + opt.lg_dirty_mult
+ + opt.decay_time
+ + opt.quarantine
+ + opt.redzone
+ + opt.thp
+ + arena.<i>.lg_dirty_mult
+ + arena.<i>.decay_time
+ + arena.<i>.chunk_hooks
+ + arenas.initialized
+ + arenas.lg_dirty_mult
+ + arenas.decay_time
+ + arenas.bin.<i>.run_size
+ + arenas.nlruns
+ + arenas.lrun.<i>.size
+ + arenas.nhchunks
+ + arenas.hchunk.<i>.size
+ + arenas.extend
+ + stats.cactive
+ + stats.arenas.<i>.lg_dirty_mult
+ + stats.arenas.<i>.decay_time
+ + stats.arenas.<i>.metadata.{mapped,allocated}
+ + stats.arenas.<i>.{npurge,nmadvise,purged}
+ + stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
+ + stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
+ + stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
+ + stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
+
+ Bug fixes:
+ - Improve interval-based profile dump triggering to dump only one profile when
+ a single allocation's size exceeds the interval. (@jasone)
+ - Use prefixed function names (as controlled by --with-jemalloc-prefix) when
+ pruning backtrace frames in jeprof. (@jasone)
+
+* 4.5.0 (February 28, 2017)
+
+ This is the first release to benefit from much broader continuous integration
+ testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure
+ in place for prior releases, it would have caught all of the most serious
+ regressions fixed by this release.
+
+ New features:
+ - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
+ transparent huge page integration. (@jasone)
+ - Update zone allocator integration to work with macOS 10.12. (@glandium)
+ - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
+ EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not
+ during configuration. (@jasone, @ronawho)
+
+ Bug fixes:
+ - Fix DSS (sbrk(2)-based) allocation. This regression was first released in
+ 4.3.0. (@jasone)
+ - Handle race in per size class utilization computation. This functionality
+ was first released in 4.0.0. (@interwq)
+ - Fix lock order reversal during gdump. (@jasone)
+ - Fix/refactor tcache synchronization. This regression was first released in
+ 4.0.0. (@jasone)
+ - Fix various JSON-formatted malloc_stats_print() bugs. This functionality
+ was first released in 4.3.0. (@jasone)
+ - Fix huge-aligned allocation. This regression was first released in 4.4.0.
+ (@jasone)
+ - When transparent huge page integration is enabled, detect what state pages
+ start in according to the kernel's current operating mode, and only convert
+ arena chunks to non-huge during purging if that is not their initial state.
+ This functionality was first released in 4.4.0. (@jasone)
+ - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case.
+ This regression was first released in 4.0.0. (@jasone, @428desmo)
+ - Properly detect sparc64 when building for Linux. (@glaubitz)
+
+* 4.4.0 (December 3, 2016)
+
+ New features:
+ - Add configure support for *-*-linux-android. (@cferris1000, @jasone)
+ - Add the --disable-syscall configure option, for use on systems that place
+ security-motivated limitations on syscall(2). (@jasone)
+ - Add support for Debian GNU/kFreeBSD. (@thesam)
+
+ Optimizations:
+ - Add extent serial numbers and use them where appropriate as a sort key that
+ is higher priority than address, so that the allocation policy prefers older
+ extents. This tends to improve locality (decrease fragmentation) when
+ memory grows downward. (@jasone)
+ - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
+ on Linux 4.5 and newer. (@jasone)
+ - Mark partially purged arena chunks as non-huge-page. This improves
+ interaction with Linux's transparent huge page functionality. (@jasone)
+
+ Bug fixes:
+ - Fix size class computations for edge conditions involving extremely large
+ allocations. This regression was first released in 4.0.0. (@jasone,
+ @ingvarha)
+ - Remove overly restrictive assertions related to the cactive statistic. This
+ regression was first released in 4.1.0. (@jasone)
+ - Implement a more reliable detection scheme for os_unfair_lock on macOS.
+ (@jszakmeister)
+
+* 4.3.1 (November 7, 2016)
+
+ Bug fixes:
+ - Fix a severe virtual memory leak. This regression was first released in
+ 4.3.0. (@interwq, @jasone)
+ - Refactor atomic and prng APIs to restore support for 32-bit platforms that
+ use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone)
+
+* 4.3.0 (November 4, 2016)
+
+ This is the first release that passes the test suite for multiple Windows
+ configurations, thanks in large part to @glandium setting up continuous
+ integration via AppVeyor (and Travis CI for Linux and OS X).
+
+ New features:
+ - Add "J" (JSON) support to malloc_stats_print(). (@jasone)
+ - Add Cray compiler support. (@ronawho)
+
+ Optimizations:
+ - Add/use adaptive spinning for bootstrapping and radix tree node
+ initialization. (@jasone)
+
+ Bug fixes:
+ - Fix large allocation to search starting in the optimal size class heap,
+ which can substantially reduce virtual memory churn and fragmentation. This
+ regression was first released in 4.0.0. (@mjp41, @jasone)
+ - Fix stats.arenas.<i>.nthreads accounting. (@interwq)
+ - Fix and simplify decay-based purging. (@jasone)
+ - Make DSS (sbrk(2)-related) operations lockless, which resolves potential
+ deadlocks during thread exit. (@jasone)
+ - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun,
+ @jasone)
+ - Fix over-sized allocation of arena_t (plus associated stats) data
+ structures. (@jasone, @interwq)
+ - Fix EXTRA_CFLAGS to not affect configuration. (@jasone)
+ - Fix a Valgrind integration bug. (@ronawho)
+ - Disallow 0x5a junk filling when running in Valgrind. (@jasone)
+ - Fix a file descriptor leak on Linux. This regression was first released in
+ 4.2.0. (@vsarunas, @jasone)
+ - Fix static linking of jemalloc with glibc. (@djwatson)
+ - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This
+ works around other libraries' system call wrappers performing reentrant
+ allocation. (@kspinka, @Whissi, @jasone)
+ - Fix OS X default zone replacement to work with OS X 10.12. (@glandium,
+ @jasone)
+ - Fix cached memory management to avoid needless commit/decommit operations
+ during purging, which resolves permanent virtual memory map fragmentation
+ issues on Windows. (@mjp41, @jasone)
+ - Fix TSD fetches to avoid (recursive) allocation. This is relevant to
+ non-TLS and Windows configurations. (@jasone)
+ - Fix malloc_conf overriding to work on Windows. (@jasone)
+ - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone)
+
+* 4.2.1 (June 8, 2016)
+
+ Bug fixes:
+ - Fix bootstrapping issues for configurations that require allocation during
+ tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
+ - Fix gettimeofday() version of nstime_update(). (@ronawho)
+ - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
+ - Fix potential VM map fragmentation regression. (@jasone)
+ - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
+ - Fix heap profiling context leaks in reallocation edge cases. (@jasone)
+
+* 4.2.0 (May 12, 2016)
+
+ New features:
+ - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
+ an arena's allocations in a single operation. (@jasone)
+ - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
+ - Add the --with-version configure option. (@jasone)
+ - Support --with-lg-page values larger than actual page size. (@jasone)
+
+ Optimizations:
+ - Use pairing heaps rather than red-black trees for various hot data
+ structures. (@djwatson, @jasone)
+ - Streamline fast paths of rtree operations. (@jasone)
+ - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
+ - Decommit unused virtual memory if the OS does not overcommit. (@jasone)
+ - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
+ to avoid unfortunate interactions during fork(2). (@jasone)
+
+ Bug fixes:
+ - Fix chunk accounting related to triggering gdump profiles. (@jasone)
+ - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
+ - Scale leak report summary according to sampling probability. (@jasone)
+
+* 4.1.1 (May 3, 2016)
+
+ This bugfix release resolves a variety of mostly minor issues, though the
+ bitmap fix is critical for 64-bit Windows.
+
+ Bug fixes:
+ - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
+ even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
+ Windows. (@jasone)
+ - Fix hashing functions to avoid unaligned memory accesses (and resulting
+ crashes). This is relevant at least to some ARM-based platforms.
+ (@rkmisra)
+ - Fix fork()-related lock rank ordering reversals. These reversals were
+ unlikely to cause deadlocks in practice except when heap profiling was
+ enabled and active. (@jasone)
+ - Fix various chunk leaks in OOM code paths. (@jasone)
+ - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
+ - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
+ - Fix a variety of test failures that were due to test fragility rather than
+ core bugs. (@jasone)
+
+* 4.1.0 (February 28, 2016)
+
+ This release is primarily about optimizations, but it also incorporates a lot
+ of portability-motivated refactoring and enhancements. Many people worked on
+ this release, to an extent that even with the omission here of minor changes
+ (see git revision history), and of the people who reported and diagnosed
+ issues, so much of the work was contributed that starting with this release,
+ changes are annotated with author credits to help reflect the collaborative
+ effort involved.
+
+ New features:
+ - Implement decay-based unused dirty page purging, a major optimization with
+ mallctl API impact. This is an alternative to the existing ratio-based
+ unused dirty page purging, and is intended to eventually become the sole
+ purging mechanism. New mallctls:
+ + opt.purge
+ + opt.decay_time
+ + arena.<i>.decay
+ + arena.<i>.decay_time
+ + arenas.decay_time
+ + stats.arenas.<i>.decay_time
+ (@jasone, @cevans87)
+ - Add --with-malloc-conf, which makes it possible to embed a default
+ options string during configuration. This was motivated by the desire to
+ specify --with-malloc-conf=purge:decay , since the default must remain
+ purge:ratio until the 5.0.0 release. (@jasone)
+ - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin)
+ - Make *allocx() size class overflow behavior defined. The maximum
+ size class is now less than PTRDIFF_MAX to protect applications against
+ numerical overflow, and all allocation functions are guaranteed to indicate
+ errors rather than potentially crashing if the request size exceeds the
+ maximum size class. (@jasone)
+ - jeprof:
+ + Add raw heap profile support. (@jasone)
+ + Add --retain and --exclude for backtrace symbol filtering. (@jasone)
+
+ Optimizations:
+ - Optimize the fast path to combine various bootstrapping and configuration
+ checks and execute more streamlined code in the common case. (@interwq)
+ - Use linear scan for small bitmaps (used for small object tracking). In
+ addition to speeding up bitmap operations on 64-bit systems, this reduces
+ allocator metadata overhead by approximately 0.2%. (@djwatson)
+ - Separate arena_avail trees, which substantially speeds up run tree
+ operations. (@djwatson)
+ - Use memoization (boot-time-computed table) for run quantization. Separate
+ arena_avail trees reduced the importance of this optimization. (@jasone)
+ - Attempt mmap-based in-place huge reallocation. This can dramatically speed
+ up incremental huge reallocation. (@jasone)
+
+ Incompatible changes:
+ - Make opt.narenas unsigned rather than size_t. (@jasone)
+
+ Bug fixes:
+ - Fix stats.cactive accounting regression. (@rustyx, @jasone)
+ - Handle unaligned keys in hash(). This caused problems for some ARM systems.
+ (@jasone, @cferris1000)
+ - Refactor arenas array. In addition to fixing a fork-related deadlock, this
+ makes arena lookups faster and simpler. (@jasone)
+ - Move retained memory allocation out of the default chunk allocation
+ function, to a location that gets executed even if the application installs
+ a custom chunk allocation function. This resolves a virtual memory leak.
+ (@buchgr)
+ - Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
+ - Fix run quantization. In practice this bug had no impact unless
+ applications requested memory with alignment exceeding one page.
+ (@jasone, @djwatson)
+ - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv)
+ - jeprof:
+ + Don't discard curl options if timeout is not defined. (@djwatson)
+ + Detect failed profile fetches. (@djwatson)
+ - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
+ --disable-stats case. (@jasone)
+
+* 4.0.4 (October 24, 2015)
+
+ This bugfix release fixes another xallocx() regression. No other regressions
+ have come to light in over a month, so this is likely a good starting point
+ for people who prefer to wait for "dot one" releases with all the major issues
+ shaken out.
+
+ Bug fixes:
+ - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
+ allocations that have been randomly assigned an offset of 0 when
+ --enable-cache-oblivious configure option is enabled.
+
+* 4.0.3 (September 24, 2015)
+
+ This bugfix release continues the trend of xallocx() and heap profiling fixes.
+
+ Bug fixes:
+ - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
+ allocations when --enable-cache-oblivious configure option is enabled.
+ - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
+ when resizing from/to a size class that is not a multiple of the chunk size.
+ - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
+ profile dumping started.
+ - Work around a potentially bad thread-specific data initialization
+ interaction with NPTL (glibc's pthreads implementation).
+
+* 4.0.2 (September 21, 2015)
+
+ This bugfix release addresses a few bugs specific to heap profiling.
+
+ Bug fixes:
+ - Fix ixallocx_prof_sample() to never modify nor create sampled small
+ allocations. xallocx() is in general incapable of moving small allocations,
+ so this fix removes buggy code without loss of generality.
+ - Fix irallocx_prof_sample() to always allocate large regions, even when
+ alignment is non-zero.
+ - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
+ than dereferencing a potentially invalid tctx.
+
+* 4.0.1 (September 15, 2015)
+
+ This is a bugfix release that is somewhat high risk due to the amount of
+ refactoring required to address deep xallocx() problems. As a side effect of
+ these fixes, xallocx() now tries harder to partially fulfill requests for
+ optional extra space. Note that a couple of minor heap profiling
+ optimizations are included, but these are better thought of as performance
+ fixes that were integral to discovering most of the other bugs.
+
+ Optimizations:
+ - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
+ fast path when heap profiling is enabled. Additionally, split a special
+ case out into arena_prof_tctx_reset(), which also avoids chunk metadata
+ reads.
+ - Optimize irallocx_prof() to optimistically update the sampler state. The
+ prior implementation appears to have been a holdover from when
+ rallocx()/xallocx() functionality was combined as rallocm().
+
+ Bug fixes:
+ - Fix TLS configuration such that it is enabled by default for platforms on
+ which it works correctly.
+ - Fix arenas_cache_cleanup() and arena_get_hard() to handle
+ allocation/deallocation within the application's thread-specific data
+ cleanup functions even after arenas_cache is torn down.
+ - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
+ - Fix chunk purge hook calls for in-place huge shrinking reallocation to
+ specify the old chunk size rather than the new chunk size. This bug caused
+ no correctness issues for the default chunk purge function, but was
+ visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
+ - Fix heap profiling bugs:
+ + Fix heap profiling to distinguish among otherwise identical sample sites
+ with interposed resets (triggered via the "prof.reset" mallctl). This bug
+ could cause data structure corruption that would most likely result in a
+ segfault.
+ + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
+ + Make one call to prof_active_get_unlocked() per allocation event, and use
+ the result throughout the relevant functions that handle an allocation
+ event. Also add a missing check in prof_realloc(). These fixes protect
+ allocation events against concurrent prof_active changes.
+ + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
+ in the correct order.
+ + Fix prof_realloc() to call prof_free_sampled_object() after calling
+ prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
+ the same, the tctx could have been prematurely destroyed.
+ - Fix portability bugs:
+ + Don't bitshift by negative amounts when encoding/decoding run sizes in
+ chunk header maps. This affected systems with page sizes greater than 8
+ KiB.
+ + Rename index_t to szind_t to avoid an existing type on Solaris.
+ + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
+ match glibc and avoid compilation errors when including both
+ jemalloc/jemalloc.h and malloc.h in C++ code.
+ + Don't assume that /bin/sh is appropriate when running size_classes.sh
+ during configuration.
+ + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
+ + Link tests to librt if it contains clock_gettime(2).
+
+* 4.0.0 (August 17, 2015)
+
+ This version contains many speed and space optimizations, both minor and
+ major. The major themes are generalization, unification, and simplification.
+ Although many of these optimizations cause no visible behavior change, their
+ cumulative effect is substantial.
+
+ New features:
+ - Normalize size class spacing to be consistent across the complete size
+ range. By default there are four size classes per size doubling, but this
+ is now configurable via the --with-lg-size-class-group option. Also add the
+ --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
+ --with-lg-tiny-min options, which can be used to tweak page and size class
+ settings. Impacts:
+ + Worst case performance for incrementally growing/shrinking reallocation
+ is improved because there are far fewer size classes, and therefore
+ copying happens less often.
+ + Internal fragmentation is limited to 20% for all but the smallest size
+ classes (those less than four times the quantum). (1B + 4 KiB)
+ and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
+ + Chunk fragmentation tends to be lower because there are fewer distinct run
+ sizes to pack.
+ - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
+ "tcache.destroy" mallctls control tcache lifetime and flushing, and the
+ MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
+ control which tcache is used for each operation.
+ - Implement per thread heap profiling, as well as the ability to
+ enable/disable heap profiling on a per thread basis. Add the "prof.reset",
+ "prof.lg_sample", "thread.prof.name", "thread.prof.active",
+ "opt.prof_thread_active_init", "prof.thread_active_init", and
+ "thread.prof.active" mallctls.
+ - Add support for per arena application-specified chunk allocators, configured
+ via the "arena.<i>.chunk_hooks" mallctl.
+ - Refactor huge allocation to be managed by arenas, so that arenas now
+ function as general purpose independent allocators. This is important in
+ the context of user-specified chunk allocators, aside from the scalability
+ benefits. Related new statistics:
+ + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
+ "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
+ mallctls provide high level per arena huge allocation statistics.
+ + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
+ "stats.arenas.<i>.hchunks.<j>.nmalloc",
+ "stats.arenas.<i>.hchunks.<j>.ndalloc",
+ "stats.arenas.<i>.hchunks.<j>.nrequests", and
+ "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
+ statistics.
+ - Add the 'util' column to malloc_stats_print() output, which reports the
+ proportion of available regions that are currently in use for each small
+ size class.
+ - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
+ mallctl), so that it is possible to separately enable junk filling for
+ allocation versus deallocation.
+ - Add the jemalloc-config script, which provides information about how
+ jemalloc was configured, and how to integrate it into application builds.
+ - Add metadata statistics, which are accessible via the "stats.metadata",
+ "stats.arenas.<i>.metadata.mapped", and
+ "stats.arenas.<i>.metadata.allocated" mallctls.
+ - Add the "stats.resident" mallctl, which reports the upper limit of
+ physically resident memory mapped by the allocator.
+ - Add per arena control over unused dirty page purging, via the
+ "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
+ "stats.arenas.<i>.lg_dirty_mult" mallctls.
+ - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
+ feature on/off during program execution.
+ - Add sdallocx(), which implements sized deallocation. The primary
+ optimization over dallocx() is the removal of a metadata read, which often
+ suffers an L1 cache miss.
+ - Add missing header includes in jemalloc/jemalloc.h, so that applications
+ only have to #include <jemalloc/jemalloc.h>.
+ - Add support for additional platforms:
+ + Bitrig
+ + Cygwin
+ + DragonFlyBSD
+ + iOS
+ + OpenBSD
+ + OpenRISC/or1k
+
+ Optimizations:
+ - Maintain dirty runs in per arena LRUs rather than in per arena trees of
+ dirty-run-containing chunks. In practice this change significantly reduces
+ dirty page purging volume.
+ - Integrate whole chunks into the unused dirty page purging machinery. This
+ reduces the cost of repeated huge allocation/deallocation, because it
+ effectively introduces a cache of chunks.
+ - Split the arena chunk map into two separate arrays, in order to increase
+ cache locality for the frequently accessed bits.
+ - Move small run metadata out of runs, into arena chunk headers. This reduces
+ run fragmentation, smaller runs reduce external fragmentation for small size
+ classes, and packed (less uniformly aligned) metadata layout improves CPU
+ cache set distribution.
+ - Randomly distribute large allocation base pointer alignment relative to page
+ boundaries in order to more uniformly utilize CPU cache sets. This can be
+ disabled via the --disable-cache-oblivious configure option, and queried via
+ the "config.cache_oblivious" mallctl.
+ - Micro-optimize the fast paths for the public API functions.
+ - Refactor thread-specific data to reside in a single structure. This assures
+ that only a single TLS read is necessary per call into the public API.
+ - Implement in-place huge allocation growing and shrinking.
+ - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
+ additional optimizations that reduce maximum lookup depth to one or two
+ levels. This resolves what was a concurrency bottleneck for per arena huge
+ allocation, because a global data structure is critical for determining
+ which arenas own which huge allocations.
+
+ Incompatible changes:
+ - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
+ warnings by default.
+ - Assure that the constness of malloc_usable_size()'s return type matches that
+ of the system implementation.
+ - Change the heap profile dump format to support per thread heap profiling,
+ rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
+ result, the bundled jeprof must now be used rather than the upstream
+ (gperftools) pprof.
+ - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
+ internally deadlock on some platforms.
+ - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
+ - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
+ "stats.arenas.<i>.bins.<j>.curregs".
+ - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
+ - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
+ MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
+
+ Removed features:
+ - Remove the *allocm() API, which is superseded by the *allocx() API.
+ - Remove the --enable-dss options, and make dss non-optional on all platforms
+ which support sbrk(2).
+ - Remove the "arenas.purge" mallctl, which was obsoleted by the
+ "arena.<i>.purge" mallctl in 3.1.0.
+ - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
+ detects whether it is running inside Valgrind.
+ - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
+ "stats.huge.ndalloc" mallctls.
+ - Remove the --enable-mremap option.
+ - Remove the "stats.chunks.current", "stats.chunks.total", and
+ "stats.chunks.high" mallctls.
+
+ Bug fixes:
+ - Fix the cactive statistic to decrease (rather than increase) when active
+ memory decreases. This regression was first released in 3.5.0.
+ - Fix OOM handling in memalign() and valloc(). A variant of this bug existed
+ in all releases since 2.0.0, which introduced these functions.
+ - Fix an OOM-related regression in arena_tcache_fill_small(), which could
+ cause cache corruption on OOM. This regression was present in all releases
+ from 2.2.0 through 3.6.0.
+ - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
+ calloc(), and realloc() when profiling is enabled.
+ - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
+ "secondary" precedence is specified, but sbrk(2) is not supported.
+ - Fix fallback lg_floor() implementations to handle extremely large inputs.
+ - Ensure the default purgeable zone is after the default zone on OS X.
+ - Fix latent bugs in atomic_*().
+ - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
+ - Fix tls_model configuration to enable the initial-exec model when possible.
+ - Mark malloc_conf as a weak symbol so that the application can override it.
+ - Correctly detect glibc's adaptive pthread mutexes.
+ - Fix the --without-export configure option.
+
+* 3.6.0 (March 31, 2014)
+
+ This version contains a critical bug fix for a regression present in 3.5.0 and
+ 3.5.1.
+
+ Bug fixes:
+ - Fix a regression in arena_chunk_alloc() that caused crashes during
+ small/large allocation if chunk allocation failed. In the absence of this
+ bug, chunk allocation failure would result in allocation failure, e.g. NULL
+ return from malloc(). This regression was introduced in 3.5.0.
+ - Fix backtracing for gcc intrinsics-based backtracing by specifying
+ -fno-omit-frame-pointer to gcc. Note that the application (and all the
+ libraries it links to) must also be compiled with this option for
+ backtracing to be reliable.
+ - Use dss allocation precedence for huge allocations as well as small/large
+ allocations.
+ - Fix test assertion failure message formatting. This bug did not manifest on
+ x86_64 systems because of implementation subtleties in va_list.
+ - Fix inconsequential test failures for hash and SFMT code.
+
+ New features:
+ - Support heap profiling on FreeBSD. This feature depends on the proc
+ filesystem being mounted during heap profile dumping.
+
+* 3.5.1 (February 25, 2014)
+
+ This version primarily addresses minor bugs in test code.
+
+ Bug fixes:
+ - Configure Solaris/Illumos to use MADV_FREE.
+ - Fix junk filling for mremap(2)-based huge reallocation. This is only
+ relevant if configuring with the --enable-mremap option specified.
+ - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
+ compiler.
+ - Add a configure test for SSE2 rather than assuming it is usable on i686
+ systems. This fixes test compilation errors, especially on 32-bit Linux
+ systems.
+ - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
+ test.
+ - Fix/remove flawed alignment-related overflow tests.
+ - Prevent compiler optimizations that could change backtraces in the
+ prof_accum unit test.
+
+* 3.5.0 (January 22, 2014)
+
+ This version focuses on refactoring and automated testing, though it also
+ includes some non-trivial heap profiling optimizations not mentioned below.
+
+ New features:
+ - Add the *allocx() API, which is a successor to the experimental *allocm()
+ API. The *allocx() functions are slightly simpler to use because they have
+ fewer parameters, they directly return the results of primary interest, and
+ mallocx()/rallocx() avoid the strict aliasing pitfall that
+ allocm()/rallocm() share with posix_memalign(). Note that *allocm() is
+ slated for removal in the next non-bugfix release.
+ - Add support for LinuxThreads.
+
+ Bug fixes:
+ - Unless heap profiling is enabled, disable floating point code and don't link
+ with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
+ systems, makes it possible to completely disable floating point register
+ use. Some versions of glibc neglect to save/restore caller-saved floating
+ point registers during dynamic lazy symbol loading, and the symbol loading
+ code uses whatever malloc the application happens to have linked/loaded
+ with, the result being potential floating point register corruption.
+ - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
+ backtrace creation in imemalign(). This bug impacted posix_memalign() and
+ aligned_alloc().
+ - Fix a file descriptor leak in a prof_dump_maps() error path.
+ - Fix prof_dump() to close the dump file descriptor for all relevant error
+ paths.
+ - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
+ allocation, not just deallocation.
+ - Fix a data race for large allocation stats counters.
+ - Fix a potential infinite loop during thread exit. This bug occurred on
+ Solaris, and could affect other platforms with similar pthreads TSD
+ implementations.
+ - Don't junk-fill reallocations unless usable size changes. This fixes a
+ violation of the *allocx()/*allocm() semantics.
+ - Fix growing large reallocation to junk fill new space.
+ - Fix huge deallocation to junk fill when munmap is disabled.
+ - Change the default private namespace prefix from empty to je_, and change
+ --with-private-namespace-prefix so that it prepends an additional prefix
+ rather than replacing je_. This reduces the likelihood of applications
+ which statically link jemalloc experiencing symbol name collisions.
+ - Add missing private namespace mangling (relevant when
+ --with-private-namespace is specified).
+ - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
+ static even for debug builds.
+ - Add a missing mutex unlock in a malloc_init_hard() error path. In practice
+ this error path is never executed.
+ - Fix numerous bugs in malloc_strotumax() error handling/reporting. These
+ bugs had no impact except for malformed inputs.
+ - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by
+ existing calls, so they had no impact.
+
+* 3.4.1 (October 20, 2013)
+
+ Bug fixes:
+ - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
+ of internal data structures and subsequent crashes.
+ - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
+ uninitialized memory in:
+ + arena chunk headers
+ + internal zero-initialized data structures (relevant to tcache and prof
+ code)
+ - Preserve errno during the first allocation. A readlink(2) call during
+ initialization fails unless /etc/malloc.conf exists, so errno was typically
+ set during the first allocation prior to this fix.
+ - Fix compilation warnings reported by gcc 4.8.1.
+
+* 3.4.0 (June 2, 2013)
+
+ This version is essentially a small bugfix release, but the addition of
+ aarch64 support requires that the minor version be incremented.
+
+ Bug fixes:
+ - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
+ typically triggered by multiple threads concurrently deallocating huge
+ objects.
+
+ New features:
+ - Add support for the aarch64 architecture.
+
+* 3.3.1 (March 6, 2013)
+
+ This version fixes bugs that are typically encountered only when utilizing
+ custom run-time options.
+
+ Bug fixes:
+ - Fix a locking order bug that could cause deadlock during fork if heap
+ profiling were enabled.
+ - Fix a chunk recycling bug that could cause the allocator to lose track of
+ whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
+ corruption if allocating via sbrk(2) (unlikely unless running with the
+ "dss:primary" option specified). This was completely harmless on Linux
+ unless using mlockall(2) (and unlikely even then, unless the
+ --disable-munmap configure option or the "dss:primary" option was
+ specified). This regression was introduced in 3.1.0 by the
+ mlockall(2)/madvise(2) interaction fix.
+ - Fix TLS-related memory corruption that could occur during thread exit if the
+ thread never allocated memory. Only the quarantine and prof facilities were
+ susceptible.
+ - Fix two quarantine bugs:
+ + Internal reallocation of the quarantined object array leaked the old
+ array.
+ + Reallocation failure for internal reallocation of the quarantined object
+ array (very unlikely) resulted in memory corruption.
+ - Fix Valgrind integration to annotate all internally allocated memory in a
+ way that keeps Valgrind happy about internal data structure access.
+ - Fix building for s390 systems.
+
+* 3.3.0 (January 23, 2013)
+
+ This version includes a few minor performance improvements in addition to the
+ listed new features and bug fixes.
+
+ New features:
+ - Add clipping support to lg_chunk option processing.
+ - Add the --enable-ivsalloc option.
+ - Add the --without-export option.
+ - Add the --disable-zone-allocator option.
+
+ Bug fixes:
+ - Fix "arenas.extend" mallctl to output the number of arenas.
+ - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
+ is undefined.
+ - Fix build break on FreeBSD related to alloca.h.
+
+* 3.2.0 (November 9, 2012)
+
+ In addition to a couple of bug fixes, this version modifies page run
+ allocation and dirty page purging algorithms in order to better control
+ page-level virtual memory fragmentation.
+
+ Incompatible changes:
+ - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
+
+ Bug fixes:
+ - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
+ after primary dss allocation fails.
+ - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
+ in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
+
+* 3.1.0 (October 16, 2012)
+
+ New features:
+ - Auto-detect whether running inside Valgrind, thus removing the need to
+ manually specify MALLOC_CONF=valgrind:true.
+ - Add the "arenas.extend" mallctl, which allows applications to create
+ manually managed arenas.
+ - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
+ - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
+ which provide control over dss/mmap precedence.
+ - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
+ - Define LG_QUANTUM for hppa.
+
+ Incompatible changes:
+ - Disable tcache by default if running inside Valgrind, in order to avoid
+ making unallocated objects appear reachable to Valgrind.
+ - Drop const from malloc_usable_size() argument on Linux.
+
+ Bug fixes:
+ - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
+ - Remove const from __*_hook variable declarations, so that glibc can modify
+ them during process forking.
+ - Fix mlockall(2)/madvise(2) interaction.
+ - Fix fork(2)-related deadlocks.
+ - Fix error return value for "thread.tcache.enabled" mallctl.
+
+* 3.0.0 (May 11, 2012)
+
+ Although this version adds some major new features, the primary focus is on
+ internal code cleanup that facilitates maintainability and portability, most
+ of which is not reflected in the ChangeLog. This is the first release to
+ incorporate substantial contributions from numerous other developers, and the
+ result is a more broadly useful allocator (see the git revision history for
+ contribution details). Note that the license has been unified, thanks to
+ Facebook granting a license under the same terms as the other copyright
+ holders (see COPYING).
+
+ New features:
+ - Implement Valgrind support, redzones, and quarantine.
+ - Add support for additional platforms:
+ + FreeBSD
+ + Mac OS X Lion
+ + MinGW
+ + Windows (no support yet for replacing the system malloc)
+ - Add support for additional architectures:
+ + MIPS
+ + SH4
+ + Tilera
+ - Add support for cross compiling.
+ - Add nallocm(), which rounds a request size up to the nearest size class
+ without actually allocating.
+ - Implement aligned_alloc() (blame C11).
+ - Add the "thread.tcache.enabled" mallctl.
+ - Add the "opt.prof_final" mallctl.
+ - Update pprof (from gperftools 2.0).
+ - Add the --with-mangling option.
+ - Add the --disable-experimental option.
+ - Add the --disable-munmap option, and make it the default on Linux.
+ - Add the --enable-mremap option, which disables use of mremap(2) by default.
+
+ Incompatible changes:
+ - Enable stats by default.
+ - Enable fill by default.
+ - Disable lazy locking by default.
+ - Rename the "tcache.flush" mallctl to "thread.tcache.flush".
+ - Rename the "arenas.pagesize" mallctl to "arenas.page".
+ - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
+ - Change the "opt.prof_accum" default from true to false.
+
+ Removed features:
+ - Remove the swap feature, including the "config.swap", "swap.avail",
+ "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
+ - Remove highruns statistics, including the
+ "stats.arenas.<i>.bins.<j>.highruns" and
+ "stats.arenas.<i>.lruns.<j>.highruns" mallctls.
+ - As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
+ "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
+ "arenas.[tqcs]bins" mallctls.
+ - Remove the "arenas.chunksize" mallctl.
+ - Remove the "opt.lg_prof_tcmax" option.
+ - Remove the "opt.lg_prof_bt_max" option.
+ - Remove the "opt.lg_tcache_gc_sweep" option.
+ - Remove the --disable-tiny option, including the "config.tiny" mallctl.
+ - Remove the --enable-dynamic-page-shift configure option.
+ - Remove the --enable-sysv configure option.
+
+ Bug fixes:
+ - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
+ invalid statistics and crashes.
+ - Work around TLS deallocation via free() on Linux. This bug could cause
+ write-after-free memory corruption.
+ - Fix a potential deadlock that could occur during interval- and
+ growth-triggered heap profile dumps.
+ - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
+ - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
+ cause memory corruption and crashes with --enable-dss specified.
+ - Fix fork-related bugs that could cause deadlock in children between fork
+ and exec.
+ - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
+ - Fix realloc(p, 0) to act like free(p).
+ - Do not enforce minimum alignment in memalign().
+ - Check for NULL pointer in malloc_usable_size().
+ - Fix an off-by-one heap profile statistics bug that could be observed in
+ interval- and growth-triggered heap profiles.
+ - Fix the "epoch" mallctl to update cached stats even if the passed in epoch
+ is 0.
+ - Fix bin->runcur management to fix a layout policy bug. This bug did not
+ affect correctness.
+ - Fix a bug in choose_arena_hard() that potentially caused more arenas to be
+ initialized than necessary.
+ - Add missing "opt.lg_tcache_max" mallctl implementation.
+ - Use glibc allocator hooks to make mixed allocator usage less likely.
+ - Fix build issues for --disable-tcache.
+ - Don't mangle pthread_create() when --with-private-namespace is specified.
+
+* 2.2.5 (November 14, 2011)
+
+ Bug fixes:
+ - Fix huge_ralloc() race when using mremap(2). This is a serious bug that
+ could cause memory corruption and/or crashes.
+ - Fix huge_ralloc() to maintain chunk statistics.
+ - Fix malloc_stats_print(..., "a") output.
+
+* 2.2.4 (November 5, 2011)
+
+ Bug fixes:
+ - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as
+ well as for --disable-tls builds in earlier releases.
+ - Do not assume a 4 KiB page size in test/rallocm.c.
+
+* 2.2.3 (August 31, 2011)
+
+ This version fixes numerous bugs related to heap profiling.
+
+ Bug fixes:
+ - Fix a prof-related race condition. This bug could cause memory corruption,
+ but only occurred in non-default configurations (prof_accum:false).
+ - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is
+ excluded from backtraces).
+ - Fix a prof-related bug in realloc() (only triggered by OOM errors).
+ - Fix prof-related bugs in allocm() and rallocm().
+ - Fix prof_tdata_cleanup() for --disable-tls builds.
+ - Fix a relative include path, to fix objdir builds.
+
+* 2.2.2 (July 30, 2011)
+
+ Bug fixes:
+ - Fix a build error for --disable-tcache.
+ - Fix assertions in arena_purge() (for real this time).
+ - Add the --with-private-namespace option. This is a workaround for symbol
+ conflicts that can inadvertently arise when using static libraries.
+
+* 2.2.1 (March 30, 2011)
+
+ Bug fixes:
+ - Implement atomic operations for x86/x64. This fixes compilation failures
+ for versions of gcc that are still in wide use.
+ - Fix an assertion in arena_purge().
+
+* 2.2.0 (March 22, 2011)
+
+ This version incorporates several improvements to algorithms and data
+ structures that tend to reduce fragmentation and increase speed.
+
+ New features:
+ - Add the "stats.cactive" mallctl.
+ - Update pprof (from google-perftools 1.7).
+ - Improve backtracing-related configuration logic, and add the
+ --disable-prof-libgcc option.
+
+ Bug fixes:
+ - Change default symbol visibility from "internal", to "hidden", which
+ decreases the overhead of library-internal function calls.
+ - Fix symbol visibility so that it is also set on OS X.
+ - Fix a build dependency regression caused by the introduction of the .pic.o
+ suffix for PIC object files.
+ - Add missing checks for mutex initialization failures.
+ - Don't use libgcc-based backtracing except on x64, where it is known to work.
+ - Fix deadlocks on OS X that were due to memory allocation in
+ pthread_mutex_lock().
+ - Heap profiling-specific fixes:
+ + Fix memory corruption due to integer overflow in small region index
+ computation, when using a small enough sample interval that profiling
+ context pointers are stored in small run headers.
+ + Fix a bootstrap ordering bug that only occurred with TLS disabled.
+ + Fix a rallocm() rsize bug.
+ + Fix error detection bugs for aligned memory allocation.
+
+* 2.1.3 (March 14, 2011)
+
+ Bug fixes:
+ - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix
+ for OS X in 2.1.2).
+ - Fix a "thread.arena" mallctl bug.
+ - Fix a thread cache stats merging bug.
+
+* 2.1.2 (March 2, 2011)
+
+ Bug fixes:
+ - Fix "thread.{de,}allocatedp" mallctl for OS X.
+ - Add missing jemalloc.a to build system.
+
+* 2.1.1 (January 31, 2011)
+
+ Bug fixes:
+ - Fix aligned huge reallocation (affected allocm()).
+ - Fix the ALLOCM_LG_ALIGN macro definition.
+ - Fix a heap dumping deadlock.
+ - Fix a "thread.arena" mallctl bug.
+
+* 2.1.0 (December 3, 2010)
+
+ This version incorporates some optimizations that can't quite be considered
+ bug fixes.
+
+ New features:
+ - Use Linux's mremap(2) for huge object reallocation when possible.
+ - Avoid locking in mallctl*() when possible.
+ - Add the "thread.[de]allocatedp" mallctl's.
+ - Convert the manual page source from roff to DocBook, and generate both roff
+ and HTML manuals.
+
+ Bug fixes:
+ - Fix a crash due to incorrect bootstrap ordering. This only impacted
+ --enable-debug --enable-dss configurations.
+ - Fix a minor statistics bug for mallctl("swap.avail", ...).
+
+* 2.0.1 (October 29, 2010)
+
+ Bug fixes:
+ - Fix a race condition in heap profiling that could cause undefined behavior
+ if "opt.prof_accum" were disabled.
+ - Add missing mutex unlocks for some OOM error paths in the heap profiling
+ code.
+ - Fix a compilation error for non-C99 builds.
+
+* 2.0.0 (October 24, 2010)
+
+ This version focuses on the experimental *allocm() API, and on improved
+ run-time configuration/introspection. Nonetheless, numerous performance
+ improvements are also included.
+
+ New features:
+ - Implement the experimental {,r,s,d}allocm() API, which provides a superset
+ of the functionality available via malloc(), calloc(), posix_memalign(),
+ realloc(), malloc_usable_size(), and free(). These functions can be used to
+ allocate/reallocate aligned zeroed memory, ask for optional extra memory
+ during reallocation, prevent object movement during reallocation, etc.
+ - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is
+ more human-readable, and more flexible. For example:
+ JEMALLOC_OPTIONS=AJP
+ is now:
+ MALLOC_CONF=abort:true,fill:true,stats_print:true
+ - Port to Apple OS X. Sponsored by Mozilla.
+ - Make it possible for the application to control thread-->arena mappings via
+ the "thread.arena" mallctl.
+ - Add compile-time support for all TLS-related functionality via pthreads TSD.
+ This is mainly of interest for OS X, which does not support TLS, but has a
+ TSD implementation with similar performance.
+ - Override memalign() and valloc() if they are provided by the system.
+ - Add the "arenas.purge" mallctl, which can be used to synchronously purge all
+ dirty unused pages.
+ - Make cumulative heap profiling data optional, so that it is possible to
+ limit the amount of memory consumed by heap profiling data structures.
+ - Add per thread allocation counters that can be accessed via the
+ "thread.allocated" and "thread.deallocated" mallctls.
+
+ Incompatible changes:
+ - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above).
+ - Increase default backtrace depth from 4 to 128 for heap profiling.
+ - Disable interval-based profile dumps by default.
+
+ Bug fixes:
+ - Remove bad assertions in fork handler functions. These assertions could
+ cause aborts for some combinations of configure settings.
+ - Fix strerror_r() usage to deal with non-standard semantics in GNU libc.
+ - Fix leak context reporting. This bug tended to cause the number of contexts
+ to be underreported (though the reported number of objects and bytes were
+ correct).
+ - Fix a realloc() bug for large in-place growing reallocation. This bug could
+ cause memory corruption, but it was hard to trigger.
+ - Fix an allocation bug for small allocations that could be triggered if
+ multiple threads raced to create a new run of backing pages.
+ - Enhance the heap profiler to trigger samples based on usable size, rather
+ than request size.
+ - Fix a heap profiling bug due to sometimes losing track of requested object
+ size for sampled objects.
+
+* 1.0.3 (August 12, 2010)
+
+ Bug fixes:
+ - Fix the libunwind-based implementation of stack backtracing (used for heap
+ profiling). This bug could cause zero-length backtraces to be reported.
+ - Add a missing mutex unlock in library initialization code. If multiple
+ threads raced to initialize malloc, some of them could end up permanently
+ blocked.
+
+* 1.0.2 (May 11, 2010)
+
+ Bug fixes:
+ - Fix junk filling of large objects, which could cause memory corruption.
+ - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual
+ memory limits could cause swap file configuration to fail. Contributed by
+ Jordan DeLong.
+
+* 1.0.1 (April 14, 2010)
+
+ Bug fixes:
+ - Fix compilation when --enable-fill is specified.
+ - Fix threads-related profiling bugs that affected accuracy and caused memory
+ to be leaked during thread exit.
+ - Fix dirty page purging race conditions that could cause crashes.
+ - Fix crash in tcache flushing code during thread destruction.
+
+* 1.0.0 (April 11, 2010)
+
+ This release focuses on speed and run-time introspection. Numerous
+ algorithmic improvements make this release substantially faster than its
+ predecessors.
+
+ New features:
+ - Implement autoconf-based configuration system.
+ - Add mallctl*(), for the purposes of introspection and run-time
+ configuration.
+ - Make it possible for the application to manually flush a thread's cache, via
+ the "tcache.flush" mallctl.
+ - Base maximum dirty page count on proportion of active memory.
+ - Compute various additional run-time statistics, including per size class
+ statistics for large objects.
+ - Expose malloc_stats_print(), which can be called repeatedly by the
+ application.
+ - Simplify the malloc_message() signature to only take one string argument,
+ and incorporate an opaque data pointer argument for use by the application
+ in combination with malloc_stats_print().
+ - Add support for allocation backed by one or more swap files, and allow the
+ application to disable over-commit if swap files are in use.
+ - Implement allocation profiling and leak checking.
+
+ Removed features:
+ - Remove the dynamic arena rebalancing code, since thread-specific caching
+ reduces its utility.
+
+ Bug fixes:
+ - Modify chunk allocation to work when address space layout randomization
+ (ASLR) is in use.
+ - Fix thread cleanup bugs related to TLS destruction.
+ - Handle 0-size allocation requests in posix_memalign().
+ - Fix a chunk leak. The leaked chunks were never touched, so this impacted
+ virtual memory usage, but not physical memory usage.
+
+* linux_2008082[78]a (August 27/28, 2008)
+
+ These snapshot releases are the simple result of incorporating Linux-specific
+ support into the FreeBSD malloc sources.
+
+--------------------------------------------------------------------------------
+vim:filetype=text:textwidth=80
diff --git a/contrib/libs/jemalloc/INSTALL.md b/contrib/libs/jemalloc/INSTALL.md
index b8f729b0d7..f91b9fe3b9 100644
--- a/contrib/libs/jemalloc/INSTALL.md
+++ b/contrib/libs/jemalloc/INSTALL.md
@@ -1,421 +1,421 @@
-Building and installing a packaged release of jemalloc can be as simple as
-typing the following while in the root directory of the source tree:
-
- ./configure
- make
- make install
-
-If building from unpackaged developer sources, the simplest command sequence
-that might work is:
-
- ./autogen.sh
- make dist
- make
- make install
-
-Note that documentation is not built by the default target because doing so
-would create a dependency on xsltproc in packaged releases, hence the
-requirement to either run 'make dist' or avoid installing docs via the various
-install_* targets documented below.
-
-
-## Advanced configuration
-
-The 'configure' script supports numerous options that allow control of which
-functionality is enabled, where jemalloc is installed, etc. Optionally, pass
-any of the following arguments (not a definitive list) to 'configure':
-
-* `--help`
-
- Print a definitive list of options.
-
-* `--prefix=<install-root-dir>`
-
- Set the base directory in which to install. For example:
-
- ./configure --prefix=/usr/local
-
- will cause files to be installed into /usr/local/include, /usr/local/lib,
- and /usr/local/man.
-
-* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
-
- The VERSION file is mandatory for successful configuration, and the
- following steps are taken to assure its presence:
- 1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
- generate VERSION using the specified value.
- 2) If --with-version is not specified in either form and the source
- directory is inside a git repository, try to generate VERSION via 'git
- describe' invocations that pattern-match release tags.
- 3) If VERSION is missing, generate it with a bogus version:
- 0.0.0-0-g0000000000000000000000000000000000000000
-
- Note that --with-version=VERSION bypasses (1) and (2), which simplifies
- VERSION configuration when embedding a jemalloc release into another
- project's git repository.
-
-* `--with-rpath=<colon-separated-rpath>`
-
- Embed one or more library paths, so that libjemalloc can find the libraries
- it is linked to. This works only on ELF-based systems.
-
-* `--with-mangling=<map>`
-
- Mangle public symbols specified in <map> which is a comma-separated list of
- name:mangled pairs.
-
- For example, to use ld's --wrap option as an alternative method for
- overriding libc's malloc implementation, specify something like:
-
- --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
-
- Note that mangling happens prior to application of the prefix specified by
- --with-jemalloc-prefix, and mangled symbols are then ignored when applying
- the prefix.
-
-* `--with-jemalloc-prefix=<prefix>`
-
- Prefix all public APIs with <prefix>. For example, if <prefix> is
- "prefix_", API changes like the following occur:
-
- malloc() --> prefix_malloc()
- malloc_conf --> prefix_malloc_conf
- /etc/malloc.conf --> /etc/prefix_malloc.conf
- MALLOC_CONF --> PREFIX_MALLOC_CONF
-
- This makes it possible to use jemalloc at the same time as the system
- allocator, or even to use multiple copies of jemalloc simultaneously.
-
- By default, the prefix is "", except on OS X, where it is "je_". On OS X,
- jemalloc overlays the default malloc zone, but makes no attempt to actually
- replace the "malloc", "calloc", etc. symbols.
-
-* `--without-export`
-
- Don't export public APIs. This can be useful when building jemalloc as a
- static library, or to avoid exporting public APIs when using the zone
- allocator on OSX.
-
-* `--with-private-namespace=<prefix>`
-
- Prefix all library-private APIs with <prefix>je_. For shared libraries,
- symbol visibility mechanisms prevent these symbols from being exported, but
- for static libraries, naming collisions are a real possibility. By
- default, <prefix> is empty, which results in a symbol prefix of je_ .
-
-* `--with-install-suffix=<suffix>`
-
- Append <suffix> to the base name of all installed files, such that multiple
- versions of jemalloc can coexist in the same installation directory. For
- example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
-
-* `--with-malloc-conf=<malloc_conf>`
-
- Embed `<malloc_conf>` as a run-time options string that is processed prior to
- the malloc_conf global variable, the /etc/malloc.conf symlink, and the
- MALLOC_CONF environment variable. For example, to change the default decay
- time to 30 seconds:
-
- --with-malloc-conf=decay_ms:30000
-
-* `--enable-debug`
-
- Enable assertions and validation code. This incurs a substantial
- performance hit, but is very useful during application development.
-
-* `--disable-stats`
-
- Disable statistics gathering functionality. See the "opt.stats_print"
- option documentation for usage details.
-
-* `--enable-prof`
-
- Enable heap profiling and leak detection functionality. See the "opt.prof"
- option documentation for usage details. When enabled, there are several
- approaches to backtracing, and the configure script chooses the first one
- in the following list that appears to function correctly:
-
- + libunwind (requires --enable-prof-libunwind)
- + libgcc (unless --disable-prof-libgcc)
- + gcc intrinsics (unless --disable-prof-gcc)
-
-* `--enable-prof-libunwind`
-
- Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
- backtracing.
-
-* `--disable-prof-libgcc`
-
- Disable the use of libgcc's backtracing functionality.
-
-* `--disable-prof-gcc`
-
- Disable the use of gcc intrinsics for backtracing.
-
-* `--with-static-libunwind=<libunwind.a>`
-
- Statically link against the specified libunwind.a rather than dynamically
- linking with -lunwind.
-
-* `--disable-fill`
-
- Disable support for junk/zero filling of memory. See the "opt.junk" and
- "opt.zero" option documentation for usage details.
-
-* `--disable-zone-allocator`
-
- Disable zone allocator for Darwin. This means jemalloc won't be hooked as
- the default allocator on OSX/iOS.
-
-* `--enable-utrace`
-
- Enable utrace(2)-based allocation tracing. This feature is not broadly
- portable (FreeBSD has it, but Linux and OS X do not).
-
-* `--enable-xmalloc`
-
- Enable support for optional immediate termination due to out-of-memory
- errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
- See the "opt.xmalloc" option documentation for usage details.
-
-* `--enable-lazy-lock`
-
- Enable code that wraps pthread_create() to detect when an application
- switches from single-threaded to multi-threaded mode, so that it can avoid
- mutex locking/unlocking operations while in single-threaded mode. In
- practice, this feature usually has little impact on performance unless
- thread-specific caching is disabled.
-
-* `--disable-cache-oblivious`
-
- Disable cache-oblivious large allocation alignment for large allocation
- requests with no alignment constraints. If this feature is disabled, all
- large allocations are page-aligned as an implementation artifact, which can
- severely harm CPU cache utilization. However, the cache-oblivious layout
- comes at the cost of one extra page per large allocation, which in the
- most extreme case increases physical memory usage for the 16 KiB size class
- to 20 KiB.
-
-* `--disable-syscall`
-
- Disable use of syscall(2) rather than {open,read,write,close}(2). This is
- intended as a workaround for systems that place security limitations on
- syscall(2).
-
-* `--disable-cxx`
-
- Disable C++ integration. This will cause new and delete operator
- implementations to be omitted.
-
-* `--with-xslroot=<path>`
-
- Specify where to find DocBook XSL stylesheets when building the
- documentation.
-
-* `--with-lg-page=<lg-page>`
-
- Specify the base 2 log of the allocator page size, which must in turn be at
- least as large as the system page size. By default the configure script
- determines the host's page size and sets the allocator page size equal to
- the system page size, so this option need not be specified unless the
- system page size may change between configuration and execution, e.g. when
- cross compiling.
-
-* `--with-lg-hugepage=<lg-hugepage>`
-
- Specify the base 2 log of the system huge page size. This option is useful
- when cross compiling, or when overriding the default for systems that do
- not explicitly support huge pages.
-
-* `--with-lg-quantum=<lg-quantum>`
-
- Specify the base 2 log of the minimum allocation alignment. jemalloc needs
- to know the minimum alignment that meets the following C standard
- requirement (quoted from the April 12, 2011 draft of the C11 standard):
-
- > The pointer returned if the allocation succeeds is suitably aligned so
- that it may be assigned to a pointer to any type of object with a
- fundamental alignment requirement and then used to access such an object
- or an array of such objects in the space allocated [...]
-
- This setting is architecture-specific, and although jemalloc includes known
- safe values for the most commonly used modern architectures, there is a
- wrinkle related to GNU libc (glibc) that may impact your choice of
- <lg-quantum>. On most modern architectures, this mandates 16-byte
- alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
- requirement for performance reasons. An old discussion can be found at
- <https://sourceware.org/bugzilla/show_bug.cgi?id=206> . Unlike glibc,
- jemalloc does follow the C standard by default (caveat: jemalloc
- technically cheats for size classes smaller than the quantum), but the fact
- that Linux systems already work around this allocator noncompliance means
- that it is generally safe in practice to let jemalloc's minimum alignment
- follow glibc's lead. If you specify `--with-lg-quantum=3` during
- configuration, jemalloc will provide additional size classes that are not
- 16-byte-aligned (24, 40, and 56).
-
-* `--with-lg-vaddr=<lg-vaddr>`
-
- Specify the number of significant virtual address bits. By default, the
- configure script attempts to detect virtual address size on those platforms
- where it knows how, and picks a default otherwise. This option may be
- useful when cross-compiling.
-
-* `--disable-initial-exec-tls`
-
- Disable the initial-exec TLS model for jemalloc's internal thread-local
- storage (on those platforms that support explicit settings). This can allow
- jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
- Note that in this case, there will be two malloc implementations operating
- in the same process, which will almost certainly result in confusing runtime
- crashes if pointers leak from one implementation to the other.
-
-* `--disable-libdl`
-
- Disable the usage of libdl, namely dlsym(3) which is required by the lazy
- lock option. This can allow building static binaries.
-
-The following environment variables (not a definitive list) impact configure's
-behavior:
-
-* `CFLAGS="?"`
-* `CXXFLAGS="?"`
-
- Pass these flags to the C/C++ compiler. Any flags set by the configure
- script are prepended, which means explicitly set flags generally take
- precedence. Take care when specifying flags such as -Werror, because
- configure tests may be affected in undesirable ways.
-
-* `EXTRA_CFLAGS="?"`
-* `EXTRA_CXXFLAGS="?"`
-
- Append these flags to CFLAGS/CXXFLAGS, without passing them to the
- compiler(s) during configuration. This makes it possible to add flags such
- as -Werror, while allowing the configure script to determine what other
- flags are appropriate for the specified configuration.
-
-* `CPPFLAGS="?"`
-
- Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
- 'cpp' when 'configure' is looking for include files, so you must use
- CPPFLAGS instead if you need to help 'configure' find header files.
-
-* `LD_LIBRARY_PATH="?"`
-
- 'ld' uses this colon-separated list to find libraries.
-
-* `LDFLAGS="?"`
-
- Pass these flags when linking.
-
-* `PATH="?"`
-
- 'configure' uses this to find programs.
-
-In some cases it may be necessary to work around configuration results that do
-not match reality. For example, Linux 4.5 added support for the MADV_FREE flag
-to madvise(2), which can cause problems if building on a host with MADV_FREE
-support and deploying to a target without. To work around this, use a cache
-file to override the relevant configuration variable defined in configure.ac,
-e.g.:
-
- echo "je_cv_madv_free=no" > config.cache && ./configure -C
-
-
-## Advanced compilation
-
-To build only parts of jemalloc, use the following targets:
-
- build_lib_shared
- build_lib_static
- build_lib
- build_doc_html
- build_doc_man
- build_doc
-
-To install only parts of jemalloc, use the following targets:
-
- install_bin
- install_include
- install_lib_shared
- install_lib_static
- install_lib_pc
- install_lib
- install_doc_html
- install_doc_man
- install_doc
-
-To clean up build results to varying degrees, use the following make targets:
-
- clean
- distclean
- relclean
-
-
-## Advanced installation
-
-Optionally, define make variables when invoking make, including (not
-exclusively):
-
-* `INCLUDEDIR="?"`
-
- Use this as the installation prefix for header files.
-
-* `LIBDIR="?"`
-
- Use this as the installation prefix for libraries.
-
-* `MANDIR="?"`
-
- Use this as the installation prefix for man pages.
-
-* `DESTDIR="?"`
-
- Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
- when installing to a different path than was specified via --prefix.
-
-* `CC="?"`
-
- Use this to invoke the C compiler.
-
-* `CFLAGS="?"`
-
- Pass these flags to the compiler.
-
-* `CPPFLAGS="?"`
-
- Pass these flags to the C preprocessor.
-
-* `LDFLAGS="?"`
-
- Pass these flags when linking.
-
-* `PATH="?"`
-
- Use this to search for programs used during configuration and building.
-
-
-## Development
-
-If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
-script rather than 'configure'. This re-generates 'configure', enables
-configuration dependency rules, and enables re-generation of automatically
-generated source files.
-
-The build system supports using an object directory separate from the source
-tree. For example, you can create an 'obj' directory, and from within that
-directory, issue configuration and build commands:
-
- autoconf
- mkdir obj
- cd obj
- ../configure --enable-autogen
- make
-
-
-## Documentation
-
-The manual page is generated in both html and roff formats. Any web browser
-can be used to view the html manual. The roff manual page can be formatted
-prior to installation via the following command:
-
- nroff -man -t doc/jemalloc.3
+Building and installing a packaged release of jemalloc can be as simple as
+typing the following while in the root directory of the source tree:
+
+ ./configure
+ make
+ make install
+
+If building from unpackaged developer sources, the simplest command sequence
+that might work is:
+
+ ./autogen.sh
+ make dist
+ make
+ make install
+
+Note that documentation is not built by the default target because doing so
+would create a dependency on xsltproc in packaged releases, hence the
+requirement to either run 'make dist' or avoid installing docs via the various
+install_* targets documented below.
+
+
+## Advanced configuration
+
+The 'configure' script supports numerous options that allow control of which
+functionality is enabled, where jemalloc is installed, etc. Optionally, pass
+any of the following arguments (not a definitive list) to 'configure':
+
+* `--help`
+
+ Print a definitive list of options.
+
+* `--prefix=<install-root-dir>`
+
+ Set the base directory in which to install. For example:
+
+ ./configure --prefix=/usr/local
+
+ will cause files to be installed into /usr/local/include, /usr/local/lib,
+ and /usr/local/man.
+
+* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
+
+ The VERSION file is mandatory for successful configuration, and the
+ following steps are taken to assure its presence:
+ 1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
+ generate VERSION using the specified value.
+ 2) If --with-version is not specified in either form and the source
+ directory is inside a git repository, try to generate VERSION via 'git
+ describe' invocations that pattern-match release tags.
+ 3) If VERSION is missing, generate it with a bogus version:
+ 0.0.0-0-g0000000000000000000000000000000000000000
+
+ Note that --with-version=VERSION bypasses (1) and (2), which simplifies
+ VERSION configuration when embedding a jemalloc release into another
+ project's git repository.
+
+* `--with-rpath=<colon-separated-rpath>`
+
+ Embed one or more library paths, so that libjemalloc can find the libraries
+ it is linked to. This works only on ELF-based systems.
+
+* `--with-mangling=<map>`
+
+ Mangle public symbols specified in <map> which is a comma-separated list of
+ name:mangled pairs.
+
+ For example, to use ld's --wrap option as an alternative method for
+ overriding libc's malloc implementation, specify something like:
+
+ --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
+
+ Note that mangling happens prior to application of the prefix specified by
+ --with-jemalloc-prefix, and mangled symbols are then ignored when applying
+ the prefix.
+
+* `--with-jemalloc-prefix=<prefix>`
+
+ Prefix all public APIs with <prefix>. For example, if <prefix> is
+ "prefix_", API changes like the following occur:
+
+ malloc() --> prefix_malloc()
+ malloc_conf --> prefix_malloc_conf
+ /etc/malloc.conf --> /etc/prefix_malloc.conf
+ MALLOC_CONF --> PREFIX_MALLOC_CONF
+
+ This makes it possible to use jemalloc at the same time as the system
+ allocator, or even to use multiple copies of jemalloc simultaneously.
+
+ By default, the prefix is "", except on OS X, where it is "je_". On OS X,
+ jemalloc overlays the default malloc zone, but makes no attempt to actually
+ replace the "malloc", "calloc", etc. symbols.
+
+* `--without-export`
+
+ Don't export public APIs. This can be useful when building jemalloc as a
+ static library, or to avoid exporting public APIs when using the zone
+ allocator on OSX.
+
+* `--with-private-namespace=<prefix>`
+
+ Prefix all library-private APIs with <prefix>je_. For shared libraries,
+ symbol visibility mechanisms prevent these symbols from being exported, but
+ for static libraries, naming collisions are a real possibility. By
+ default, <prefix> is empty, which results in a symbol prefix of je_ .
+
+* `--with-install-suffix=<suffix>`
+
+ Append <suffix> to the base name of all installed files, such that multiple
+ versions of jemalloc can coexist in the same installation directory. For
+ example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
+
+* `--with-malloc-conf=<malloc_conf>`
+
+ Embed `<malloc_conf>` as a run-time options string that is processed prior to
+ the malloc_conf global variable, the /etc/malloc.conf symlink, and the
+ MALLOC_CONF environment variable. For example, to change the default decay
+ time to 30 seconds:
+
+ --with-malloc-conf=decay_ms:30000
+
+* `--enable-debug`
+
+ Enable assertions and validation code. This incurs a substantial
+ performance hit, but is very useful during application development.
+
+* `--disable-stats`
+
+ Disable statistics gathering functionality. See the "opt.stats_print"
+ option documentation for usage details.
+
+* `--enable-prof`
+
+ Enable heap profiling and leak detection functionality. See the "opt.prof"
+ option documentation for usage details. When enabled, there are several
+ approaches to backtracing, and the configure script chooses the first one
+ in the following list that appears to function correctly:
+
+ + libunwind (requires --enable-prof-libunwind)
+ + libgcc (unless --disable-prof-libgcc)
+ + gcc intrinsics (unless --disable-prof-gcc)
+
+* `--enable-prof-libunwind`
+
+ Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
+ backtracing.
+
+* `--disable-prof-libgcc`
+
+ Disable the use of libgcc's backtracing functionality.
+
+* `--disable-prof-gcc`
+
+ Disable the use of gcc intrinsics for backtracing.
+
+* `--with-static-libunwind=<libunwind.a>`
+
+ Statically link against the specified libunwind.a rather than dynamically
+ linking with -lunwind.
+
+* `--disable-fill`
+
+ Disable support for junk/zero filling of memory. See the "opt.junk" and
+ "opt.zero" option documentation for usage details.
+
+* `--disable-zone-allocator`
+
+ Disable zone allocator for Darwin. This means jemalloc won't be hooked as
+ the default allocator on OSX/iOS.
+
+* `--enable-utrace`
+
+ Enable utrace(2)-based allocation tracing. This feature is not broadly
+ portable (FreeBSD has it, but Linux and OS X do not).
+
+* `--enable-xmalloc`
+
+ Enable support for optional immediate termination due to out-of-memory
+ errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
+ See the "opt.xmalloc" option documentation for usage details.
+
+* `--enable-lazy-lock`
+
+ Enable code that wraps pthread_create() to detect when an application
+ switches from single-threaded to multi-threaded mode, so that it can avoid
+ mutex locking/unlocking operations while in single-threaded mode. In
+ practice, this feature usually has little impact on performance unless
+ thread-specific caching is disabled.
+
+* `--disable-cache-oblivious`
+
+ Disable cache-oblivious large allocation alignment for large allocation
+ requests with no alignment constraints. If this feature is disabled, all
+ large allocations are page-aligned as an implementation artifact, which can
+ severely harm CPU cache utilization. However, the cache-oblivious layout
+ comes at the cost of one extra page per large allocation, which in the
+ most extreme case increases physical memory usage for the 16 KiB size class
+ to 20 KiB.
+
+* `--disable-syscall`
+
+ Disable use of syscall(2) rather than {open,read,write,close}(2). This is
+ intended as a workaround for systems that place security limitations on
+ syscall(2).
+
+* `--disable-cxx`
+
+ Disable C++ integration. This will cause new and delete operator
+ implementations to be omitted.
+
+* `--with-xslroot=<path>`
+
+ Specify where to find DocBook XSL stylesheets when building the
+ documentation.
+
+* `--with-lg-page=<lg-page>`
+
+ Specify the base 2 log of the allocator page size, which must in turn be at
+ least as large as the system page size. By default the configure script
+ determines the host's page size and sets the allocator page size equal to
+ the system page size, so this option need not be specified unless the
+ system page size may change between configuration and execution, e.g. when
+ cross compiling.
+
+* `--with-lg-hugepage=<lg-hugepage>`
+
+ Specify the base 2 log of the system huge page size. This option is useful
+ when cross compiling, or when overriding the default for systems that do
+ not explicitly support huge pages.
+
+* `--with-lg-quantum=<lg-quantum>`
+
+ Specify the base 2 log of the minimum allocation alignment. jemalloc needs
+ to know the minimum alignment that meets the following C standard
+ requirement (quoted from the April 12, 2011 draft of the C11 standard):
+
+ > The pointer returned if the allocation succeeds is suitably aligned so
+ that it may be assigned to a pointer to any type of object with a
+ fundamental alignment requirement and then used to access such an object
+ or an array of such objects in the space allocated [...]
+
+ This setting is architecture-specific, and although jemalloc includes known
+ safe values for the most commonly used modern architectures, there is a
+ wrinkle related to GNU libc (glibc) that may impact your choice of
+ <lg-quantum>. On most modern architectures, this mandates 16-byte
+ alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
+ requirement for performance reasons. An old discussion can be found at
+ <https://sourceware.org/bugzilla/show_bug.cgi?id=206> . Unlike glibc,
+ jemalloc does follow the C standard by default (caveat: jemalloc
+ technically cheats for size classes smaller than the quantum), but the fact
+ that Linux systems already work around this allocator noncompliance means
+ that it is generally safe in practice to let jemalloc's minimum alignment
+ follow glibc's lead. If you specify `--with-lg-quantum=3` during
+ configuration, jemalloc will provide additional size classes that are not
+ 16-byte-aligned (24, 40, and 56).
+
+* `--with-lg-vaddr=<lg-vaddr>`
+
+ Specify the number of significant virtual address bits. By default, the
+ configure script attempts to detect virtual address size on those platforms
+ where it knows how, and picks a default otherwise. This option may be
+ useful when cross-compiling.
+
+* `--disable-initial-exec-tls`
+
+ Disable the initial-exec TLS model for jemalloc's internal thread-local
+ storage (on those platforms that support explicit settings). This can allow
+ jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
+ Note that in this case, there will be two malloc implementations operating
+ in the same process, which will almost certainly result in confusing runtime
+ crashes if pointers leak from one implementation to the other.
+
+* `--disable-libdl`
+
+ Disable the usage of libdl, namely dlsym(3) which is required by the lazy
+ lock option. This can allow building static binaries.
+
+The following environment variables (not a definitive list) impact configure's
+behavior:
+
+* `CFLAGS="?"`
+* `CXXFLAGS="?"`
+
+ Pass these flags to the C/C++ compiler. Any flags set by the configure
+ script are prepended, which means explicitly set flags generally take
+ precedence. Take care when specifying flags such as -Werror, because
+ configure tests may be affected in undesirable ways.
+
+* `EXTRA_CFLAGS="?"`
+* `EXTRA_CXXFLAGS="?"`
+
+ Append these flags to CFLAGS/CXXFLAGS, without passing them to the
+ compiler(s) during configuration. This makes it possible to add flags such
+ as -Werror, while allowing the configure script to determine what other
+ flags are appropriate for the specified configuration.
+
+* `CPPFLAGS="?"`
+
+ Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
+ 'cpp' when 'configure' is looking for include files, so you must use
+ CPPFLAGS instead if you need to help 'configure' find header files.
+
+* `LD_LIBRARY_PATH="?"`
+
+ 'ld' uses this colon-separated list to find libraries.
+
+* `LDFLAGS="?"`
+
+ Pass these flags when linking.
+
+* `PATH="?"`
+
+ 'configure' uses this to find programs.
+
+In some cases it may be necessary to work around configuration results that do
+not match reality. For example, Linux 4.5 added support for the MADV_FREE flag
+to madvise(2), which can cause problems if building on a host with MADV_FREE
+support and deploying to a target without. To work around this, use a cache
+file to override the relevant configuration variable defined in configure.ac,
+e.g.:
+
+ echo "je_cv_madv_free=no" > config.cache && ./configure -C
+
+
+## Advanced compilation
+
+To build only parts of jemalloc, use the following targets:
+
+ build_lib_shared
+ build_lib_static
+ build_lib
+ build_doc_html
+ build_doc_man
+ build_doc
+
+To install only parts of jemalloc, use the following targets:
+
+ install_bin
+ install_include
+ install_lib_shared
+ install_lib_static
+ install_lib_pc
+ install_lib
+ install_doc_html
+ install_doc_man
+ install_doc
+
+To clean up build results to varying degrees, use the following make targets:
+
+ clean
+ distclean
+ relclean
+
+
+## Advanced installation
+
+Optionally, define make variables when invoking make, including (not
+exclusively):
+
+* `INCLUDEDIR="?"`
+
+ Use this as the installation prefix for header files.
+
+* `LIBDIR="?"`
+
+ Use this as the installation prefix for libraries.
+
+* `MANDIR="?"`
+
+ Use this as the installation prefix for man pages.
+
+* `DESTDIR="?"`
+
+ Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
+ when installing to a different path than was specified via --prefix.
+
+* `CC="?"`
+
+ Use this to invoke the C compiler.
+
+* `CFLAGS="?"`
+
+ Pass these flags to the compiler.
+
+* `CPPFLAGS="?"`
+
+ Pass these flags to the C preprocessor.
+
+* `LDFLAGS="?"`
+
+ Pass these flags when linking.
+
+* `PATH="?"`
+
+ Use this to search for programs used during configuration and building.
+
+
+## Development
+
+If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
+script rather than 'configure'. This re-generates 'configure', enables
+configuration dependency rules, and enables re-generation of automatically
+generated source files.
+
+The build system supports using an object directory separate from the source
+tree. For example, you can create an 'obj' directory, and from within that
+directory, issue configuration and build commands:
+
+ autoconf
+ mkdir obj
+ cd obj
+ ../configure --enable-autogen
+ make
+
+
+## Documentation
+
+The manual page is generated in both html and roff formats. Any web browser
+can be used to view the html manual. The roff manual page can be formatted
+prior to installation via the following command:
+
+ nroff -man -t doc/jemalloc.3
diff --git a/contrib/libs/jemalloc/README b/contrib/libs/jemalloc/README
index 3a6e0d2725..4d82e79c72 100644
--- a/contrib/libs/jemalloc/README
+++ b/contrib/libs/jemalloc/README
@@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came
into use as the FreeBSD libc allocator in 2005, and since then it has found its
way into numerous applications that rely on its predictable behavior. In 2010
jemalloc development efforts broadened to include developer support features
-such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
-releases continue to be integrated back into FreeBSD, and therefore versatility
-remains critical. Ongoing development efforts trend toward making jemalloc
-among the best allocators for a broad range of demanding applications, and
-eliminating/mitigating weaknesses that have practical repercussions for real
-world applications.
+such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
+releases continue to be integrated back into FreeBSD, and therefore versatility
+remains critical. Ongoing development efforts trend toward making jemalloc
+among the best allocators for a broad range of demanding applications, and
+eliminating/mitigating weaknesses that have practical repercussions for real
+world applications.
The COPYING file contains copyright and licensing information.
@@ -17,4 +17,4 @@ jemalloc.
The ChangeLog file contains a brief summary of changes for each release.
-URL: http://jemalloc.net/
+URL: http://jemalloc.net/
diff --git a/contrib/libs/jemalloc/TUNING.md b/contrib/libs/jemalloc/TUNING.md
index 34fca05b43..13f4c57ac7 100644
--- a/contrib/libs/jemalloc/TUNING.md
+++ b/contrib/libs/jemalloc/TUNING.md
@@ -1,129 +1,129 @@
-This document summarizes the common approaches for performance fine tuning with
-jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work
-reasonably well in practice, and most applications should not have to tune any
-options. However, in order to cover a wide range of applications and avoid
-pathological cases, the default setting is sometimes kept conservative and
-suboptimal, even for many common workloads. When jemalloc is properly tuned for
-a specific application / workload, it is common to improve system level metrics
-by a few percent, or make favorable trade-offs.
-
-
-## Notable runtime options for performance tuning
-
-Runtime options can be set via
-[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
-
-* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
-
- Enabling jemalloc background threads generally improves the tail latency for
- application threads, since unused memory purging is shifted to the dedicated
- background threads. In addition, unintended purging delay caused by
- application inactivity is avoided with background threads.
-
- Suggested: `background_thread:true` when jemalloc managed threads can be
- allowed.
-
-* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
-
- Allowing jemalloc to utilize transparent huge pages for its internal
- metadata usually reduces TLB misses significantly, especially for programs
- with large memory footprint and frequent allocation / deallocation
- activities. Metadata memory usage may increase due to the use of huge
- pages.
-
- Suggested for allocation intensive programs: `metadata_thp:auto` or
- `metadata_thp:always`, which is expected to improve CPU utilization at a
- small memory cost.
-
-* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
- [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
-
- Decay time determines how fast jemalloc returns unused pages back to the
- operating system, and therefore provides a fairly straightforward trade-off
- between CPU and memory usage. Shorter decay time purges unused pages faster
- to reduces memory usage (usually at the cost of more CPU cycles spent on
- purging), and vice versa.
-
- Suggested: tune the values based on the desired trade-offs.
-
-* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
-
- By default jemalloc uses multiple arenas to reduce internal lock contention.
- However high arena count may also increase overall memory fragmentation,
- since arenas manage memory independently. When high degree of parallelism
- is not expected at the allocator level, lower number of arenas often
- improves memory usage.
-
- Suggested: if low parallelism is expected, try lower arena count while
- monitoring CPU and memory usage.
-
-* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
-
- Enable dynamic thread to arena association based on running CPU. This has
- the potential to improve locality, e.g. when thread to CPU affinity is
- present.
-
- Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
- thread migration between processors is expected to be infrequent.
-
-Examples:
-
-* High resource consumption application, prioritizing CPU utilization:
-
- `background_thread:true,metadata_thp:auto` combined with relaxed decay time
- (increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
- e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
-
-* High resource consumption application, prioritizing memory usage:
-
- `background_thread:true` combined with shorter decay time (decreased
- `dirty_decay_ms` and / or `muzzy_decay_ms`,
- e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
- (e.g. number of CPUs).
-
-* Low resource consumption application:
-
- `narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased
- `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
- `dirty_decay_ms:1000,muzzy_decay_ms:0`).
-
-* Extremely conservative -- minimize memory usage at all costs, only suitable when
-allocation activity is very rare:
-
- `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
-
-Note that it is recommended to combine the options with `abort_conf:true` which
-aborts immediately on illegal options.
-
-## Beyond runtime options
-
-In addition to the runtime options, there are a number of programmatic ways to
-improve application performance with jemalloc.
-
-* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
-
- Manually created arenas can help performance in various ways, e.g. by
- managing locality and contention for specific usages. For example,
- applications can explicitly allocate frequently accessed objects from a
- dedicated arena with
- [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
- locality. In addition, explicit arenas often benefit from individually
- tuned options, e.g. relaxed [decay
- time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
- frequent reuse is expected.
-
-* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
-
- Extent hooks allow customization for managing underlying memory. One use
- case for performance purpose is to utilize huge pages -- for example,
- [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
- uses explicit arenas with customized extent hooks to manage 1GB huge pages
- for frequently accessed data, which reduces TLB misses significantly.
-
-* [Explicit thread-to-arena
- binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
-
- It is common for some threads in an application to have different memory
- access / allocation patterns. Threads with heavy workloads often benefit
- from explicit binding, e.g. binding very active threads to dedicated arenas
- may reduce contention at the allocator level.
+This document summarizes the common approaches for performance fine tuning with
+jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work
+reasonably well in practice, and most applications should not have to tune any
+options. However, in order to cover a wide range of applications and avoid
+pathological cases, the default setting is sometimes kept conservative and
+suboptimal, even for many common workloads. When jemalloc is properly tuned for
+a specific application / workload, it is common to improve system level metrics
+by a few percent, or make favorable trade-offs.
+
+
+## Notable runtime options for performance tuning
+
+Runtime options can be set via
+[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
+
+* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
+
+ Enabling jemalloc background threads generally improves the tail latency for
+ application threads, since unused memory purging is shifted to the dedicated
+ background threads. In addition, unintended purging delay caused by
+ application inactivity is avoided with background threads.
+
+ Suggested: `background_thread:true` when jemalloc managed threads can be
+ allowed.
+
+* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
+
+ Allowing jemalloc to utilize transparent huge pages for its internal
+ metadata usually reduces TLB misses significantly, especially for programs
+ with large memory footprint and frequent allocation / deallocation
+ activities. Metadata memory usage may increase due to the use of huge
+ pages.
+
+ Suggested for allocation intensive programs: `metadata_thp:auto` or
+ `metadata_thp:always`, which is expected to improve CPU utilization at a
+ small memory cost.
+
+* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
+ [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
+
+ Decay time determines how fast jemalloc returns unused pages back to the
+ operating system, and therefore provides a fairly straightforward trade-off
+ between CPU and memory usage. Shorter decay time purges unused pages faster
+ to reduces memory usage (usually at the cost of more CPU cycles spent on
+ purging), and vice versa.
+
+ Suggested: tune the values based on the desired trade-offs.
+
+* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
+
+ By default jemalloc uses multiple arenas to reduce internal lock contention.
+ However high arena count may also increase overall memory fragmentation,
+ since arenas manage memory independently. When high degree of parallelism
+ is not expected at the allocator level, lower number of arenas often
+ improves memory usage.
+
+ Suggested: if low parallelism is expected, try lower arena count while
+ monitoring CPU and memory usage.
+
+* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
+
+ Enable dynamic thread to arena association based on running CPU. This has
+ the potential to improve locality, e.g. when thread to CPU affinity is
+ present.
+
+ Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
+ thread migration between processors is expected to be infrequent.
+
+Examples:
+
+* High resource consumption application, prioritizing CPU utilization:
+
+ `background_thread:true,metadata_thp:auto` combined with relaxed decay time
+ (increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
+ e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
+
+* High resource consumption application, prioritizing memory usage:
+
+ `background_thread:true` combined with shorter decay time (decreased
+ `dirty_decay_ms` and / or `muzzy_decay_ms`,
+ e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
+ (e.g. number of CPUs).
+
+* Low resource consumption application:
+
+ `narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased
+ `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
+ `dirty_decay_ms:1000,muzzy_decay_ms:0`).
+
+* Extremely conservative -- minimize memory usage at all costs, only suitable when
+allocation activity is very rare:
+
+ `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
+
+Note that it is recommended to combine the options with `abort_conf:true` which
+aborts immediately on illegal options.
+
+## Beyond runtime options
+
+In addition to the runtime options, there are a number of programmatic ways to
+improve application performance with jemalloc.
+
+* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
+
+ Manually created arenas can help performance in various ways, e.g. by
+ managing locality and contention for specific usages. For example,
+ applications can explicitly allocate frequently accessed objects from a
+ dedicated arena with
+ [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
+ locality. In addition, explicit arenas often benefit from individually
+ tuned options, e.g. relaxed [decay
+ time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
+ frequent reuse is expected.
+
+* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
+
+ Extent hooks allow customization for managing underlying memory. One use
+ case for performance purpose is to utilize huge pages -- for example,
+ [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
+ uses explicit arenas with customized extent hooks to manage 1GB huge pages
+ for frequently accessed data, which reduces TLB misses significantly.
+
+* [Explicit thread-to-arena
+ binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
+
+ It is common for some threads in an application to have different memory
+ access / allocation patterns. Threads with heavy workloads often benefit
+ from explicit binding, e.g. binding very active threads to dedicated arenas
+ may reduce contention at the allocator level.
diff --git a/contrib/libs/jemalloc/VERSION b/contrib/libs/jemalloc/VERSION
index 428ded8c07..6dccd965a8 100644
--- a/contrib/libs/jemalloc/VERSION
+++ b/contrib/libs/jemalloc/VERSION
@@ -1 +1 @@
-5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756
+5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756
diff --git a/contrib/libs/jemalloc/dynamic/ya.make b/contrib/libs/jemalloc/dynamic/ya.make
index 44abbefda0..dac999d441 100644
--- a/contrib/libs/jemalloc/dynamic/ya.make
+++ b/contrib/libs/jemalloc/dynamic/ya.make
@@ -1,5 +1,5 @@
-# Generated by devtools/yamaker.
-
-OWNER(g:contrib g:cpp-contrib)
-
+# Generated by devtools/yamaker.
+
+OWNER(g:contrib g:cpp-contrib)
+
DLL_FOR(contrib/libs/jemalloc jemalloc)
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_externs.h
index a4523ae0c4..6f1b3872b6 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_externs.h
@@ -1,104 +1,104 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
-#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
-
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/hook.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/stats.h"
-
-extern ssize_t opt_dirty_decay_ms;
-extern ssize_t opt_muzzy_decay_ms;
-
-extern percpu_arena_mode_t opt_percpu_arena;
-extern const char *percpu_arena_mode_names[];
-
-extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
-extern malloc_mutex_t arenas_lock;
-
-extern size_t opt_oversize_threshold;
-extern size_t oversize_threshold;
-
-void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
- unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
- ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
-void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
- size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
- bin_stats_t *bstats, arena_stats_large_t *lstats,
- arena_stats_extents_t *estats);
-void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent);
-#ifdef JEMALLOC_JET
-size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
-#endif
-extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
- size_t usize, size_t alignment, bool *zero);
-void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent);
-void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t oldsize);
-void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t oldsize);
-ssize_t arena_dirty_decay_ms_get(arena_t *arena);
-bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
-ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
-bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
-void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
- bool all);
-void arena_reset(tsd_t *tsd, arena_t *arena);
-void arena_destroy(tsd_t *tsd, arena_t *arena);
-void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
-void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
- bool zero);
-
-typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
-extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
-
-void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
- szind_t ind, bool zero);
-void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
-void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- bool slow_path);
-void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr);
-void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
-bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero, size_t *newsize);
-void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
- size_t size, size_t alignment, bool zero, tcache_t *tcache,
- hook_ralloc_args_t *hook_args);
-dss_prec_t arena_dss_prec_get(arena_t *arena);
-bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
-ssize_t arena_dirty_decay_ms_default_get(void);
-bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
-ssize_t arena_muzzy_decay_ms_default_get(void);
-bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
-bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
- size_t *old_limit, size_t *new_limit);
-unsigned arena_nthreads_get(arena_t *arena, bool internal);
-void arena_nthreads_inc(arena_t *arena, bool internal);
-void arena_nthreads_dec(arena_t *arena, bool internal);
-size_t arena_extent_sn_next(arena_t *arena);
-arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-bool arena_init_huge(void);
-bool arena_is_huge(unsigned arena_ind);
-arena_t *arena_choose_huge(tsd_t *tsd);
-bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- unsigned *binshard);
-void arena_boot(sc_data_t *sc_data);
-void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
-
-#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
+#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
+
+#include "jemalloc/internal/bin.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/hook.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/stats.h"
+
+extern ssize_t opt_dirty_decay_ms;
+extern ssize_t opt_muzzy_decay_ms;
+
+extern percpu_arena_mode_t opt_percpu_arena;
+extern const char *percpu_arena_mode_names[];
+
+extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
+extern malloc_mutex_t arenas_lock;
+
+extern size_t opt_oversize_threshold;
+extern size_t oversize_threshold;
+
+void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
+ unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
+ ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
+void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
+ bin_stats_t *bstats, arena_stats_large_t *lstats,
+ arena_stats_extents_t *estats);
+void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent);
+#ifdef JEMALLOC_JET
+size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
+#endif
+extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
+ size_t usize, size_t alignment, bool *zero);
+void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent);
+void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, size_t oldsize);
+void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, size_t oldsize);
+ssize_t arena_dirty_decay_ms_get(arena_t *arena);
+bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
+ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
+bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
+void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all);
+void arena_reset(tsd_t *tsd, arena_t *arena);
+void arena_destroy(tsd_t *tsd, arena_t *arena);
+void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
+void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
+ bool zero);
+
+typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
+extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
+
+void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
+ szind_t ind, bool zero);
+void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache);
+void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
+void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ bool slow_path);
+void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, extent_t *extent, void *ptr);
+void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
+bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero, size_t *newsize);
+void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache,
+ hook_ralloc_args_t *hook_args);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+ssize_t arena_dirty_decay_ms_default_get(void);
+bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
+ssize_t arena_muzzy_decay_ms_default_get(void);
+bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
+bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
+ size_t *old_limit, size_t *new_limit);
+unsigned arena_nthreads_get(arena_t *arena, bool internal);
+void arena_nthreads_inc(arena_t *arena, bool internal);
+void arena_nthreads_dec(arena_t *arena, bool internal);
+size_t arena_extent_sn_next(arena_t *arena);
+arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+bool arena_init_huge(void);
+bool arena_is_huge(unsigned arena_ind);
+arena_t *arena_choose_huge(tsd_t *tsd);
+bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ unsigned *binshard);
+void arena_boot(sc_data_t *sc_data);
+void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
+
+#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_a.h
index 9abf7f6ac7..82add969a2 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_a.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_a.h
@@ -1,57 +1,57 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
-#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
-
-static inline unsigned
-arena_ind_get(const arena_t *arena) {
- return base_ind_get(arena->base);
-}
-
-static inline void
-arena_internal_add(arena_t *arena, size_t size) {
- atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
-}
-
-static inline void
-arena_internal_sub(arena_t *arena, size_t size) {
- atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
-}
-
-static inline size_t
-arena_internal_get(arena_t *arena) {
- return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
-}
-
-static inline bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
- cassert(config_prof);
-
- if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
- return false;
- }
-
- return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
-}
-
-static inline void
-percpu_arena_update(tsd_t *tsd, unsigned cpu) {
- assert(have_percpu_arena);
- arena_t *oldarena = tsd_arena_get(tsd);
- assert(oldarena != NULL);
- unsigned oldind = arena_ind_get(oldarena);
-
- if (oldind != cpu) {
- unsigned newind = cpu;
- arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
- assert(newarena != NULL);
-
- /* Set new arena/tcache associations. */
- arena_migrate(tsd, oldind, newind);
- tcache_t *tcache = tcache_get(tsd);
- if (tcache != NULL) {
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
- newarena);
- }
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
+#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
+
+static inline unsigned
+arena_ind_get(const arena_t *arena) {
+ return base_ind_get(arena->base);
+}
+
+static inline void
+arena_internal_add(arena_t *arena, size_t size) {
+ atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
+}
+
+static inline void
+arena_internal_sub(arena_t *arena, size_t size) {
+ atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
+}
+
+static inline size_t
+arena_internal_get(arena_t *arena) {
+ return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
+}
+
+static inline bool
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
+ cassert(config_prof);
+
+ if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
+ return false;
+ }
+
+ return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
+}
+
+static inline void
+percpu_arena_update(tsd_t *tsd, unsigned cpu) {
+ assert(have_percpu_arena);
+ arena_t *oldarena = tsd_arena_get(tsd);
+ assert(oldarena != NULL);
+ unsigned oldind = arena_ind_get(oldarena);
+
+ if (oldind != cpu) {
+ unsigned newind = cpu;
+ arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ assert(newarena != NULL);
+
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
+ newarena);
+ }
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_b.h
index dd926575fc..38a677c6ad 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_b.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_inlines_b.h
@@ -1,427 +1,427 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
-#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/ticker.h"
-
-JEMALLOC_ALWAYS_INLINE bool
-arena_has_default_hooks(arena_t *arena) {
- return (extent_hooks_get(arena) == &extent_hooks_default);
-}
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
- if (arena != NULL) {
- return arena;
- }
-
- /*
- * For huge allocations, use the dedicated huge arena if both are true:
- * 1) is using auto arena selection (i.e. arena == NULL), and 2) the
- * thread is not assigned to a manual arena.
- */
- if (unlikely(size >= oversize_threshold)) {
- arena_t *tsd_arena = tsd_arena_get(tsd);
- if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
- return arena_choose_huge(tsd);
- }
- }
-
- return arena_choose(tsd, NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- /* Static check. */
- if (alloc_ctx == NULL) {
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(!extent_slab_get(extent))) {
- return large_prof_tctx_get(tsdn, extent);
- }
- } else {
- if (unlikely(!alloc_ctx->slab)) {
- return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
- }
- }
- return (prof_tctx_t *)(uintptr_t)1U;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- /* Static check. */
- if (alloc_ctx == NULL) {
- extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(!extent_slab_get(extent))) {
- large_prof_tctx_set(tsdn, extent, tctx);
- }
- } else {
- if (unlikely(!alloc_ctx->slab)) {
- large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
- }
- }
-}
-
-static inline void
-arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- extent_t *extent = iealloc(tsdn, ptr);
- assert(!extent_slab_get(extent));
-
- large_prof_tctx_reset(tsdn, extent);
-}
-
-JEMALLOC_ALWAYS_INLINE nstime_t
-arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
- alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- extent_t *extent = iealloc(tsdn, ptr);
- /*
- * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
- * sure we have a sampled allocation.
- */
- assert(!extent_slab_get(extent));
- return large_prof_alloc_time_get(extent);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
- nstime_t t) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- extent_t *extent = iealloc(tsdn, ptr);
- assert(!extent_slab_get(extent));
- large_prof_alloc_time_set(extent, t);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
- tsd_t *tsd;
- ticker_t *decay_ticker;
-
- if (unlikely(tsdn_null(tsdn))) {
- return;
- }
- tsd = tsdn_tsd(tsdn);
- decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
- if (unlikely(decay_ticker == NULL)) {
- return;
- }
- if (unlikely(ticker_ticks(decay_ticker, nticks))) {
- arena_decay(tsdn, arena, false, false);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
-
- arena_decay_ticks(tsdn, arena, 1);
-}
-
-/* Purge a single extent to retained / unmapped directly. */
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
- extent_t *extent) {
- size_t extent_size = extent_size_get(extent);
- extent_dalloc_wrapper(tsdn, arena,
- r_extent_hooks, extent);
- if (config_stats) {
- /* Update stats accordingly. */
- arena_stats_lock(tsdn, &arena->stats);
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->decay_dirty.stats->nmadvise, 1);
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
- arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
- extent_size);
- arena_stats_unlock(tsdn, &arena->stats);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool slow_path) {
- assert(!tsdn_null(tsdn) || tcache == NULL);
-
- if (likely(tcache != NULL)) {
- if (likely(size <= SC_SMALL_MAXCLASS)) {
- return tcache_alloc_small(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path);
- }
- if (likely(size <= tcache_maxclass)) {
- return tcache_alloc_large(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path);
- }
- /* (size > tcache_maxclass) case falls through. */
- assert(size > tcache_maxclass);
- }
-
- return arena_malloc_hard(tsdn, arena, size, ind, zero);
-}
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-arena_aalloc(tsdn_t *tsdn, const void *ptr) {
- return extent_arena_get(iealloc(tsdn, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const void *ptr) {
- assert(ptr != NULL);
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
- assert(szind != SC_NSIZES);
-
- return sz_index2size(szind);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
- /*
- * Return 0 if ptr is not within an extent managed by jemalloc. This
- * function has two extra costs relative to isalloc():
- * - The rtree calls cannot claim to be dependent lookups, which induces
- * rtree lookup load dependencies.
- * - The lookup may fail, so there is an extra branch to check for
- * failure.
- */
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent;
- szind_t szind;
- if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, false, &extent, &szind)) {
- return 0;
- }
-
- if (extent == NULL) {
- return 0;
- }
- assert(extent_state_get(extent) == extent_state_active);
- /* Only slab members should be looked up via interior pointers. */
- assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
-
- assert(szind != SC_NSIZES);
-
- return sz_index2size(szind);
-}
-
-static inline void
-arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
- if (config_prof && unlikely(szind < SC_NBINS)) {
- arena_dalloc_promoted(tsdn, ptr, NULL, true);
- } else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
- }
-}
-
-static inline void
-arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
- assert(ptr != NULL);
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- szind_t szind;
- bool slab;
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- true, &szind, &slab);
-
- if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
- }
-
- if (likely(slab)) {
- /* Small allocation. */
- arena_dalloc_small(tsdn, ptr);
- } else {
- arena_dalloc_large_no_tcache(tsdn, ptr, szind);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
- bool slow_path) {
- if (szind < nhbins) {
- if (config_prof && unlikely(szind < SC_NBINS)) {
- arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
- } else {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
- }
- } else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
- assert(!tsdn_null(tsdn) || tcache == NULL);
- assert(ptr != NULL);
-
- if (unlikely(tcache == NULL)) {
- arena_dalloc_no_tcache(tsdn, ptr);
- return;
- }
-
- szind_t szind;
- bool slab;
- rtree_ctx_t *rtree_ctx;
- if (alloc_ctx != NULL) {
- szind = alloc_ctx->szind;
- slab = alloc_ctx->slab;
- assert(szind != SC_NSIZES);
- } else {
- rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
- }
-
- if (config_debug) {
- rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
- }
-
- if (likely(slab)) {
- /* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
- } else {
- arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
- }
-}
-
-static inline void
-arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
- assert(ptr != NULL);
- assert(size <= SC_LARGE_MAXCLASS);
-
- szind_t szind;
- bool slab;
- if (!config_prof || !opt_prof) {
- /*
- * There is no risk of being confused by a promoted sampled
- * object, so base szind and slab on the given size.
- */
- szind = sz_size2index(size);
- slab = (szind < SC_NBINS);
- }
-
- if ((config_prof && opt_prof) || config_debug) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
-
- assert(szind == sz_size2index(size));
- assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
-
- if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn,
- &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
- }
- }
-
- if (likely(slab)) {
- /* Small allocation. */
- arena_dalloc_small(tsdn, ptr);
- } else {
- arena_dalloc_large_no_tcache(tsdn, ptr, szind);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
- assert(!tsdn_null(tsdn) || tcache == NULL);
- assert(ptr != NULL);
- assert(size <= SC_LARGE_MAXCLASS);
-
- if (unlikely(tcache == NULL)) {
- arena_sdalloc_no_tcache(tsdn, ptr, size);
- return;
- }
-
- szind_t szind;
- bool slab;
- alloc_ctx_t local_ctx;
- if (config_prof && opt_prof) {
- if (alloc_ctx == NULL) {
- /* Uncommon case and should be a static check. */
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &local_ctx.szind,
- &local_ctx.slab);
- assert(local_ctx.szind == sz_size2index(size));
- alloc_ctx = &local_ctx;
- }
- slab = alloc_ctx->slab;
- szind = alloc_ctx->szind;
- } else {
- /*
- * There is no risk of being confused by a promoted sampled
- * object, so base szind and slab on the given size.
- */
- szind = sz_size2index(size);
- slab = (szind < SC_NBINS);
- }
-
- if (config_debug) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
- extent_t *extent = rtree_extent_read(tsdn,
- &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
- }
-
- if (likely(slab)) {
- /* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
- } else {
- arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
+#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+
+JEMALLOC_ALWAYS_INLINE bool
+arena_has_default_hooks(arena_t *arena) {
+ return (extent_hooks_get(arena) == &extent_hooks_default);
+}
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
+ if (arena != NULL) {
+ return arena;
+ }
+
+ /*
+ * For huge allocations, use the dedicated huge arena if both are true:
+ * 1) is using auto arena selection (i.e. arena == NULL), and 2) the
+ * thread is not assigned to a manual arena.
+ */
+ if (unlikely(size >= oversize_threshold)) {
+ arena_t *tsd_arena = tsd_arena_get(tsd);
+ if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
+ return arena_choose_huge(tsd);
+ }
+ }
+
+ return arena_choose(tsd, NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ /* Static check. */
+ if (alloc_ctx == NULL) {
+ const extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(!extent_slab_get(extent))) {
+ return large_prof_tctx_get(tsdn, extent);
+ }
+ } else {
+ if (unlikely(!alloc_ctx->slab)) {
+ return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
+ }
+ }
+ return (prof_tctx_t *)(uintptr_t)1U;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ /* Static check. */
+ if (alloc_ctx == NULL) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(!extent_slab_get(extent))) {
+ large_prof_tctx_set(tsdn, extent, tctx);
+ }
+ } else {
+ if (unlikely(!alloc_ctx->slab)) {
+ large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
+ }
+ }
+}
+
+static inline void
+arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ assert(!extent_slab_get(extent));
+
+ large_prof_tctx_reset(tsdn, extent);
+}
+
+JEMALLOC_ALWAYS_INLINE nstime_t
+arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
+ alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ /*
+ * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
+ * sure we have a sampled allocation.
+ */
+ assert(!extent_slab_get(extent));
+ return large_prof_alloc_time_get(extent);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
+ nstime_t t) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ assert(!extent_slab_get(extent));
+ large_prof_alloc_time_set(extent, t);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
+ tsd_t *tsd;
+ ticker_t *decay_ticker;
+
+ if (unlikely(tsdn_null(tsdn))) {
+ return;
+ }
+ tsd = tsdn_tsd(tsdn);
+ decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
+ if (unlikely(decay_ticker == NULL)) {
+ return;
+ }
+ if (unlikely(ticker_ticks(decay_ticker, nticks))) {
+ arena_decay(tsdn, arena, false, false);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
+
+ arena_decay_ticks(tsdn, arena, 1);
+}
+
+/* Purge a single extent to retained / unmapped directly. */
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extent_t *extent) {
+ size_t extent_size = extent_size_get(extent);
+ extent_dalloc_wrapper(tsdn, arena,
+ r_extent_hooks, extent);
+ if (config_stats) {
+ /* Update stats accordingly. */
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->decay_dirty.stats->nmadvise, 1);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
+ arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
+ extent_size);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
+ tcache_t *tcache, bool slow_path) {
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+
+ if (likely(tcache != NULL)) {
+ if (likely(size <= SC_SMALL_MAXCLASS)) {
+ return tcache_alloc_small(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path);
+ }
+ if (likely(size <= tcache_maxclass)) {
+ return tcache_alloc_large(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path);
+ }
+ /* (size > tcache_maxclass) case falls through. */
+ assert(size > tcache_maxclass);
+ }
+
+ return arena_malloc_hard(tsdn, arena, size, ind, zero);
+}
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+arena_aalloc(tsdn_t *tsdn, const void *ptr) {
+ return extent_arena_get(iealloc(tsdn, ptr));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_salloc(tsdn_t *tsdn, const void *ptr) {
+ assert(ptr != NULL);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+ assert(szind != SC_NSIZES);
+
+ return sz_index2size(szind);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
+ /*
+ * Return 0 if ptr is not within an extent managed by jemalloc. This
+ * function has two extra costs relative to isalloc():
+ * - The rtree calls cannot claim to be dependent lookups, which induces
+ * rtree lookup load dependencies.
+ * - The lookup may fail, so there is an extra branch to check for
+ * failure.
+ */
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *extent;
+ szind_t szind;
+ if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, false, &extent, &szind)) {
+ return 0;
+ }
+
+ if (extent == NULL) {
+ return 0;
+ }
+ assert(extent_state_get(extent) == extent_state_active);
+ /* Only slab members should be looked up via interior pointers. */
+ assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
+
+ assert(szind != SC_NSIZES);
+
+ return sz_index2size(szind);
+}
+
+static inline void
+arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
+ if (config_prof && unlikely(szind < SC_NBINS)) {
+ arena_dalloc_promoted(tsdn, ptr, NULL, true);
+ } else {
+ extent_t *extent = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, extent);
+ }
+}
+
+static inline void
+arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
+ assert(ptr != NULL);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ szind_t szind;
+ bool slab;
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ true, &szind, &slab);
+
+ if (config_debug) {
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(szind < SC_NSIZES);
+ assert(slab == extent_slab_get(extent));
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ arena_dalloc_small(tsdn, ptr);
+ } else {
+ arena_dalloc_large_no_tcache(tsdn, ptr, szind);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
+ bool slow_path) {
+ if (szind < nhbins) {
+ if (config_prof && unlikely(szind < SC_NBINS)) {
+ arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
+ } else {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
+ slow_path);
+ }
+ } else {
+ extent_t *extent = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, extent);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ alloc_ctx_t *alloc_ctx, bool slow_path) {
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(ptr != NULL);
+
+ if (unlikely(tcache == NULL)) {
+ arena_dalloc_no_tcache(tsdn, ptr);
+ return;
+ }
+
+ szind_t szind;
+ bool slab;
+ rtree_ctx_t *rtree_ctx;
+ if (alloc_ctx != NULL) {
+ szind = alloc_ctx->szind;
+ slab = alloc_ctx->slab;
+ assert(szind != SC_NSIZES);
+ } else {
+ rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &szind, &slab);
+ }
+
+ if (config_debug) {
+ rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(szind < SC_NSIZES);
+ assert(slab == extent_slab_get(extent));
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
+ slow_path);
+ } else {
+ arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
+ }
+}
+
+static inline void
+arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
+ assert(ptr != NULL);
+ assert(size <= SC_LARGE_MAXCLASS);
+
+ szind_t szind;
+ bool slab;
+ if (!config_prof || !opt_prof) {
+ /*
+ * There is no risk of being confused by a promoted sampled
+ * object, so base szind and slab on the given size.
+ */
+ szind = sz_size2index(size);
+ slab = (szind < SC_NBINS);
+ }
+
+ if ((config_prof && opt_prof) || config_debug) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &szind, &slab);
+
+ assert(szind == sz_size2index(size));
+ assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
+
+ if (config_debug) {
+ extent_t *extent = rtree_extent_read(tsdn,
+ &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(slab == extent_slab_get(extent));
+ }
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ arena_dalloc_small(tsdn, ptr);
+ } else {
+ arena_dalloc_large_no_tcache(tsdn, ptr, szind);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ alloc_ctx_t *alloc_ctx, bool slow_path) {
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(ptr != NULL);
+ assert(size <= SC_LARGE_MAXCLASS);
+
+ if (unlikely(tcache == NULL)) {
+ arena_sdalloc_no_tcache(tsdn, ptr, size);
+ return;
+ }
+
+ szind_t szind;
+ bool slab;
+ alloc_ctx_t local_ctx;
+ if (config_prof && opt_prof) {
+ if (alloc_ctx == NULL) {
+ /* Uncommon case and should be a static check. */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &local_ctx.szind,
+ &local_ctx.slab);
+ assert(local_ctx.szind == sz_size2index(size));
+ alloc_ctx = &local_ctx;
+ }
+ slab = alloc_ctx->slab;
+ szind = alloc_ctx->szind;
+ } else {
+ /*
+ * There is no risk of being confused by a promoted sampled
+ * object, so base szind and slab on the given size.
+ */
+ szind = sz_size2index(size);
+ slab = (szind < SC_NBINS);
+ }
+
+ if (config_debug) {
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &szind, &slab);
+ extent_t *extent = rtree_extent_read(tsdn,
+ &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(slab == extent_slab_get(extent));
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
+ slow_path);
+ } else {
+ arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_stats.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_stats.h
index 23949ed926..ef6955a2f6 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_stats.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_stats.h
@@ -1,271 +1,271 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
-#define JEMALLOC_INTERNAL_ARENA_STATS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_prof.h"
-#include "jemalloc/internal/sc.h"
-
-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-
-/*
- * In those architectures that support 64-bit atomics, we use atomic updates for
- * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
- * externally.
- */
-#ifdef JEMALLOC_ATOMIC_U64
-typedef atomic_u64_t arena_stats_u64_t;
-#else
-/* Must hold the arena stats mutex while reading atomically. */
-typedef uint64_t arena_stats_u64_t;
-#endif
-
-typedef struct arena_stats_large_s arena_stats_large_t;
-struct arena_stats_large_s {
- /*
- * Total number of allocation/deallocation requests served directly by
- * the arena.
- */
- arena_stats_u64_t nmalloc;
- arena_stats_u64_t ndalloc;
-
- /*
- * Number of allocation requests that correspond to this size class.
- * This includes requests served by tcache, though tcache only
- * periodically merges into this counter.
- */
- arena_stats_u64_t nrequests; /* Partially derived. */
- /*
- * Number of tcache fills / flushes for large (similarly, periodically
- * merged). Note that there is no large tcache batch-fill currently
- * (i.e. only fill 1 at a time); however flush may be batched.
- */
- arena_stats_u64_t nfills; /* Partially derived. */
- arena_stats_u64_t nflushes; /* Partially derived. */
-
- /* Current number of allocations of this size class. */
- size_t curlextents; /* Derived. */
-};
-
-typedef struct arena_stats_decay_s arena_stats_decay_t;
-struct arena_stats_decay_s {
- /* Total number of purge sweeps. */
- arena_stats_u64_t npurge;
- /* Total number of madvise calls made. */
- arena_stats_u64_t nmadvise;
- /* Total number of pages purged. */
- arena_stats_u64_t purged;
-};
-
-typedef struct arena_stats_extents_s arena_stats_extents_t;
-struct arena_stats_extents_s {
- /*
- * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
- * We track both bytes and # of extents: two extents in the same bucket
- * may have different sizes if adjacent size classes differ by more than
- * a page, so bytes cannot always be derived from # of extents.
- */
- atomic_zu_t ndirty;
- atomic_zu_t dirty_bytes;
- atomic_zu_t nmuzzy;
- atomic_zu_t muzzy_bytes;
- atomic_zu_t nretained;
- atomic_zu_t retained_bytes;
-};
-
-/*
- * Arena stats. Note that fields marked "derived" are not directly maintained
- * within the arena code; rather their values are derived during stats merge
- * requests.
- */
-typedef struct arena_stats_s arena_stats_t;
-struct arena_stats_s {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_t mtx;
-#endif
-
- /* Number of bytes currently mapped, excluding retained memory. */
- atomic_zu_t mapped; /* Partially derived. */
-
- /*
- * Number of unused virtual memory bytes currently retained. Retained
- * bytes are technically mapped (though always decommitted or purged),
- * but they are excluded from the mapped statistic (above).
- */
- atomic_zu_t retained; /* Derived. */
-
- /* Number of extent_t structs allocated by base, but not being used. */
- atomic_zu_t extent_avail;
-
- arena_stats_decay_t decay_dirty;
- arena_stats_decay_t decay_muzzy;
-
- atomic_zu_t base; /* Derived. */
- atomic_zu_t internal;
- atomic_zu_t resident; /* Derived. */
- atomic_zu_t metadata_thp;
-
- atomic_zu_t allocated_large; /* Derived. */
- arena_stats_u64_t nmalloc_large; /* Derived. */
- arena_stats_u64_t ndalloc_large; /* Derived. */
- arena_stats_u64_t nfills_large; /* Derived. */
- arena_stats_u64_t nflushes_large; /* Derived. */
- arena_stats_u64_t nrequests_large; /* Derived. */
-
- /* VM space had to be leaked (undocumented). Normally 0. */
- atomic_zu_t abandoned_vm;
-
- /* Number of bytes cached in tcache associated with this arena. */
- atomic_zu_t tcache_bytes; /* Derived. */
-
- mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
-
- /* One element for each large size class. */
- arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
-
- /* Arena uptime. */
- nstime_t uptime;
-};
-
-static inline bool
-arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
- if (config_debug) {
- for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
- assert(((char *)arena_stats)[i] == 0);
- }
- }
-#ifndef JEMALLOC_ATOMIC_U64
- if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
- WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
- return true;
- }
-#endif
- /* Memory is zeroed, so there is no need to clear stats. */
- return false;
-}
-
-static inline void
-arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_lock(tsdn, &arena_stats->mtx);
-#endif
-}
-
-static inline void
-arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_unlock(tsdn, &arena_stats->mtx);
-#endif
-}
-
-static inline uint64_t
-arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
- arena_stats_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(p, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- return *p;
-#endif
-}
-
-static inline void
-arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
- arena_stats_u64_t *p, uint64_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- *p += x;
-#endif
-}
-
-static inline void
-arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
- arena_stats_u64_t *p, uint64_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
- assert(r - x <= r);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- *p -= x;
- assert(*p + x >= *p);
-#endif
-}
-
-/*
- * Non-atomically sets *dst += src. *dst needs external synchronization.
- * This lets us avoid the cost of a fetch_add when its unnecessary (note that
- * the types here are atomic).
- */
-static inline void
-arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
- atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
-#else
- *dst += src;
-#endif
-}
-
-static inline size_t
-arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
- atomic_zu_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_zu(p, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- return atomic_load_zu(p, ATOMIC_RELAXED);
-#endif
-}
-
-static inline void
-arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
- atomic_zu_t *p, size_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
- atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
-#endif
-}
-
-static inline void
-arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
- atomic_zu_t *p, size_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
- assert(r - x <= r);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
- atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
-#endif
-}
-
-/* Like the _u64 variant, needs an externally synchronized *dst. */
-static inline void
-arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
- size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
- atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
-}
-
-static inline void
-arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
- szind_t szind, uint64_t nrequests) {
- arena_stats_lock(tsdn, arena_stats);
- arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
- arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
- arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
- arena_stats_unlock(tsdn, arena_stats);
-}
-
-static inline void
-arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
- arena_stats_lock(tsdn, arena_stats);
- arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
- arena_stats_unlock(tsdn, arena_stats);
-}
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
+#define JEMALLOC_INTERNAL_ARENA_STATS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/sc.h"
+
+JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
+
+/*
+ * In those architectures that support 64-bit atomics, we use atomic updates for
+ * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
+ * externally.
+ */
+#ifdef JEMALLOC_ATOMIC_U64
+typedef atomic_u64_t arena_stats_u64_t;
+#else
+/* Must hold the arena stats mutex while reading atomically. */
+typedef uint64_t arena_stats_u64_t;
+#endif
+
+typedef struct arena_stats_large_s arena_stats_large_t;
+struct arena_stats_large_s {
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the arena.
+ */
+ arena_stats_u64_t nmalloc;
+ arena_stats_u64_t ndalloc;
+
+ /*
+ * Number of allocation requests that correspond to this size class.
+ * This includes requests served by tcache, though tcache only
+ * periodically merges into this counter.
+ */
+ arena_stats_u64_t nrequests; /* Partially derived. */
+ /*
+ * Number of tcache fills / flushes for large (similarly, periodically
+ * merged). Note that there is no large tcache batch-fill currently
+ * (i.e. only fill 1 at a time); however flush may be batched.
+ */
+ arena_stats_u64_t nfills; /* Partially derived. */
+ arena_stats_u64_t nflushes; /* Partially derived. */
+
+ /* Current number of allocations of this size class. */
+ size_t curlextents; /* Derived. */
+};
+
+typedef struct arena_stats_decay_s arena_stats_decay_t;
+struct arena_stats_decay_s {
+ /* Total number of purge sweeps. */
+ arena_stats_u64_t npurge;
+ /* Total number of madvise calls made. */
+ arena_stats_u64_t nmadvise;
+ /* Total number of pages purged. */
+ arena_stats_u64_t purged;
+};
+
+typedef struct arena_stats_extents_s arena_stats_extents_t;
+struct arena_stats_extents_s {
+ /*
+ * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
+ * We track both bytes and # of extents: two extents in the same bucket
+ * may have different sizes if adjacent size classes differ by more than
+ * a page, so bytes cannot always be derived from # of extents.
+ */
+ atomic_zu_t ndirty;
+ atomic_zu_t dirty_bytes;
+ atomic_zu_t nmuzzy;
+ atomic_zu_t muzzy_bytes;
+ atomic_zu_t nretained;
+ atomic_zu_t retained_bytes;
+};
+
+/*
+ * Arena stats. Note that fields marked "derived" are not directly maintained
+ * within the arena code; rather their values are derived during stats merge
+ * requests.
+ */
+typedef struct arena_stats_s arena_stats_t;
+struct arena_stats_s {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_t mtx;
+#endif
+
+ /* Number of bytes currently mapped, excluding retained memory. */
+ atomic_zu_t mapped; /* Partially derived. */
+
+ /*
+ * Number of unused virtual memory bytes currently retained. Retained
+ * bytes are technically mapped (though always decommitted or purged),
+ * but they are excluded from the mapped statistic (above).
+ */
+ atomic_zu_t retained; /* Derived. */
+
+ /* Number of extent_t structs allocated by base, but not being used. */
+ atomic_zu_t extent_avail;
+
+ arena_stats_decay_t decay_dirty;
+ arena_stats_decay_t decay_muzzy;
+
+ atomic_zu_t base; /* Derived. */
+ atomic_zu_t internal;
+ atomic_zu_t resident; /* Derived. */
+ atomic_zu_t metadata_thp;
+
+ atomic_zu_t allocated_large; /* Derived. */
+ arena_stats_u64_t nmalloc_large; /* Derived. */
+ arena_stats_u64_t ndalloc_large; /* Derived. */
+ arena_stats_u64_t nfills_large; /* Derived. */
+ arena_stats_u64_t nflushes_large; /* Derived. */
+ arena_stats_u64_t nrequests_large; /* Derived. */
+
+ /* VM space had to be leaked (undocumented). Normally 0. */
+ atomic_zu_t abandoned_vm;
+
+ /* Number of bytes cached in tcache associated with this arena. */
+ atomic_zu_t tcache_bytes; /* Derived. */
+
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
+
+ /* One element for each large size class. */
+ arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
+
+ /* Arena uptime. */
+ nstime_t uptime;
+};
+
+static inline bool
+arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
+ assert(((char *)arena_stats)[i] == 0);
+ }
+ }
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
+ WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+#endif
+ /* Memory is zeroed, so there is no need to clear stats. */
+ return false;
+}
+
+static inline void
+arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_lock(tsdn, &arena_stats->mtx);
+#endif
+}
+
+static inline void
+arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_unlock(tsdn, &arena_stats->mtx);
+#endif
+}
+
+static inline uint64_t
+arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ return *p;
+#endif
+}
+
+static inline void
+arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ *p += x;
+#endif
+}
+
+static inline void
+arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ *p -= x;
+ assert(*p + x >= *p);
+#endif
+}
+
+/*
+ * Non-atomically sets *dst += src. *dst needs external synchronization.
+ * This lets us avoid the cost of a fetch_add when its unnecessary (note that
+ * the types here are atomic).
+ */
+static inline void
+arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
+#else
+ *dst += src;
+#endif
+}
+
+static inline size_t
+arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ atomic_zu_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_zu(p, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ return atomic_load_zu(p, ATOMIC_RELAXED);
+#endif
+}
+
+static inline void
+arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ atomic_zu_t *p, size_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
+ atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
+#endif
+}
+
+static inline void
+arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ atomic_zu_t *p, size_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
+ atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
+#endif
+}
+
+/* Like the _u64 variant, needs an externally synchronized *dst. */
+static inline void
+arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
+ size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
+ atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
+}
+
+static inline void
+arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ szind_t szind, uint64_t nrequests) {
+ arena_stats_lock(tsdn, arena_stats);
+ arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
+ arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
+ arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
+ arena_stats_unlock(tsdn, arena_stats);
+}
+
+static inline void
+arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
+ arena_stats_lock(tsdn, arena_stats);
+ arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
+ arena_stats_unlock(tsdn, arena_stats);
+}
+
+#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_a.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_a.h
index 46aa77c884..d19bb116f4 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_a.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_a.h
@@ -1,11 +1,11 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
-
-#include "jemalloc/internal/bitmap.h"
-
-struct arena_slab_data_s {
- /* Per region allocated/deallocated bitmap. */
- bitmap_t bitmap[BITMAP_GROUPS_MAX];
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
+#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
+
+#include "jemalloc/internal/bitmap.h"
+
+struct arena_slab_data_s {
+ /* Per region allocated/deallocated bitmap. */
+ bitmap_t bitmap[BITMAP_GROUPS_MAX];
+};
+
+#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_b.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_b.h
index eeab57fd6e..6b5f4ef7e9 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_b.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_structs_b.h
@@ -1,232 +1,232 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
-
-#include "jemalloc/internal/arena_stats.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/ticker.h"
-
-struct arena_decay_s {
- /* Synchronizes all non-atomic fields. */
- malloc_mutex_t mtx;
- /*
- * True if a thread is currently purging the extents associated with
- * this decay structure.
- */
- bool purging;
- /*
- * Approximate time in milliseconds from the creation of a set of unused
- * dirty pages until an equivalent set of unused dirty pages is purged
- * and/or reused.
- */
- atomic_zd_t time_ms;
- /* time / SMOOTHSTEP_NSTEPS. */
- nstime_t interval;
- /*
- * Time at which the current decay interval logically started. We do
- * not actually advance to a new epoch until sometime after it starts
- * because of scheduling and computation delays, and it is even possible
- * to completely skip epochs. In all cases, during epoch advancement we
- * merge all relevant activity into the most recently recorded epoch.
- */
- nstime_t epoch;
- /* Deadline randomness generator. */
- uint64_t jitter_state;
- /*
- * Deadline for current epoch. This is the sum of interval and per
- * epoch jitter which is a uniform random variable in [0..interval).
- * Epochs always advance by precise multiples of interval, but we
- * randomize the deadline to reduce the likelihood of arenas purging in
- * lockstep.
- */
- nstime_t deadline;
- /*
- * Number of unpurged pages at beginning of current epoch. During epoch
- * advancement we use the delta between arena->decay_*.nunpurged and
- * extents_npages_get(&arena->extents_*) to determine how many dirty
- * pages, if any, were generated.
- */
- size_t nunpurged;
- /*
- * Trailing log of how many unused dirty pages were generated during
- * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
- * element is the most recent epoch. Corresponding epoch times are
- * relative to epoch.
- */
- size_t backlog[SMOOTHSTEP_NSTEPS];
-
- /*
- * Pointer to associated stats. These stats are embedded directly in
- * the arena's stats due to how stats structures are shared between the
- * arena and ctl code.
- *
- * Synchronization: Same as associated arena's stats field. */
- arena_stats_decay_t *stats;
- /* Peak number of pages in associated extents. Used for debug only. */
- uint64_t ceil_npages;
-};
-
-struct arena_s {
- /*
- * Number of threads currently assigned to this arena. Each thread has
- * two distinct assignments, one for application-serving allocation, and
- * the other for internal metadata allocation. Internal metadata must
- * not be allocated from arenas explicitly created via the arenas.create
- * mallctl, because the arena.<i>.reset mallctl indiscriminately
- * discards all allocations for the affected arena.
- *
- * 0: Application allocation.
- * 1: Internal metadata allocation.
- *
- * Synchronization: atomic.
- */
- atomic_u_t nthreads[2];
-
- /* Next bin shard for binding new threads. Synchronization: atomic. */
- atomic_u_t binshard_next;
-
- /*
- * When percpu_arena is enabled, to amortize the cost of reading /
- * updating the current CPU id, track the most recent thread accessing
- * this arena, and only read CPU if there is a mismatch.
- */
- tsdn_t *last_thd;
-
- /* Synchronization: internal. */
- arena_stats_t stats;
-
- /*
- * Lists of tcaches and cache_bin_array_descriptors for extant threads
- * associated with this arena. Stats from these are merged
- * incrementally, and at exit if opt_stats_print is enabled.
- *
- * Synchronization: tcache_ql_mtx.
- */
- ql_head(tcache_t) tcache_ql;
- ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
- malloc_mutex_t tcache_ql_mtx;
-
- /* Synchronization: internal. */
- prof_accum_t prof_accum;
-
- /*
- * PRNG state for cache index randomization of large allocation base
- * pointers.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t offset_state;
-
- /*
- * Extent serial number generator state.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t extent_sn_next;
-
- /*
- * Represents a dss_prec_t, but atomically.
- *
- * Synchronization: atomic.
- */
- atomic_u_t dss_prec;
-
- /*
- * Number of pages in active extents.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t nactive;
-
- /*
- * Extant large allocations.
- *
- * Synchronization: large_mtx.
- */
- extent_list_t large;
- /* Synchronizes all large allocation/update/deallocation. */
- malloc_mutex_t large_mtx;
-
- /*
- * Collections of extents that were previously allocated. These are
- * used when allocating extents, in an attempt to re-use address space.
- *
- * Synchronization: internal.
- */
- extents_t extents_dirty;
- extents_t extents_muzzy;
- extents_t extents_retained;
-
- /*
- * Decay-based purging state, responsible for scheduling extent state
- * transitions.
- *
- * Synchronization: internal.
- */
- arena_decay_t decay_dirty; /* dirty --> muzzy */
- arena_decay_t decay_muzzy; /* muzzy --> retained */
-
- /*
- * Next extent size class in a growing series to use when satisfying a
- * request via the extent hooks (only if opt_retain). This limits the
- * number of disjoint virtual memory ranges so that extent merging can
- * be effective even if multiple arenas' extent allocation requests are
- * highly interleaved.
- *
- * retain_grow_limit is the max allowed size ind to expand (unless the
- * required size is greater). Default is no limit, and controlled
- * through mallctl only.
- *
- * Synchronization: extent_grow_mtx
- */
- pszind_t extent_grow_next;
- pszind_t retain_grow_limit;
- malloc_mutex_t extent_grow_mtx;
-
- /*
- * Available extent structures that were allocated via
- * base_alloc_extent().
- *
- * Synchronization: extent_avail_mtx.
- */
- extent_tree_t extent_avail;
- atomic_zu_t extent_avail_cnt;
- malloc_mutex_t extent_avail_mtx;
-
- /*
- * bins is used to store heaps of free regions.
- *
- * Synchronization: internal.
- */
- bins_t bins[SC_NBINS];
-
- /*
- * Base allocator, from which arena metadata are allocated.
- *
- * Synchronization: internal.
- */
- base_t *base;
- /* Used to determine uptime. Read-only after initialization. */
- nstime_t create_time;
-};
-
-/* Used in conjunction with tsd for fast arena-related context lookup. */
-struct arena_tdata_s {
- ticker_t decay_ticker;
-};
-
-/* Used to pass rtree lookup context down the path. */
-struct alloc_ctx_s {
- szind_t szind;
- bool slab;
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
+#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
+
+#include "jemalloc/internal/arena_stats.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bin.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/smoothstep.h"
+#include "jemalloc/internal/ticker.h"
+
+struct arena_decay_s {
+ /* Synchronizes all non-atomic fields. */
+ malloc_mutex_t mtx;
+ /*
+ * True if a thread is currently purging the extents associated with
+ * this decay structure.
+ */
+ bool purging;
+ /*
+ * Approximate time in milliseconds from the creation of a set of unused
+ * dirty pages until an equivalent set of unused dirty pages is purged
+ * and/or reused.
+ */
+ atomic_zd_t time_ms;
+ /* time / SMOOTHSTEP_NSTEPS. */
+ nstime_t interval;
+ /*
+ * Time at which the current decay interval logically started. We do
+ * not actually advance to a new epoch until sometime after it starts
+ * because of scheduling and computation delays, and it is even possible
+ * to completely skip epochs. In all cases, during epoch advancement we
+ * merge all relevant activity into the most recently recorded epoch.
+ */
+ nstime_t epoch;
+ /* Deadline randomness generator. */
+ uint64_t jitter_state;
+ /*
+ * Deadline for current epoch. This is the sum of interval and per
+ * epoch jitter which is a uniform random variable in [0..interval).
+ * Epochs always advance by precise multiples of interval, but we
+ * randomize the deadline to reduce the likelihood of arenas purging in
+ * lockstep.
+ */
+ nstime_t deadline;
+ /*
+ * Number of unpurged pages at beginning of current epoch. During epoch
+ * advancement we use the delta between arena->decay_*.nunpurged and
+ * extents_npages_get(&arena->extents_*) to determine how many dirty
+ * pages, if any, were generated.
+ */
+ size_t nunpurged;
+ /*
+ * Trailing log of how many unused dirty pages were generated during
+ * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
+ * element is the most recent epoch. Corresponding epoch times are
+ * relative to epoch.
+ */
+ size_t backlog[SMOOTHSTEP_NSTEPS];
+
+ /*
+ * Pointer to associated stats. These stats are embedded directly in
+ * the arena's stats due to how stats structures are shared between the
+ * arena and ctl code.
+ *
+ * Synchronization: Same as associated arena's stats field. */
+ arena_stats_decay_t *stats;
+ /* Peak number of pages in associated extents. Used for debug only. */
+ uint64_t ceil_npages;
+};
+
+struct arena_s {
+ /*
+ * Number of threads currently assigned to this arena. Each thread has
+ * two distinct assignments, one for application-serving allocation, and
+ * the other for internal metadata allocation. Internal metadata must
+ * not be allocated from arenas explicitly created via the arenas.create
+ * mallctl, because the arena.<i>.reset mallctl indiscriminately
+ * discards all allocations for the affected arena.
+ *
+ * 0: Application allocation.
+ * 1: Internal metadata allocation.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_u_t nthreads[2];
+
+ /* Next bin shard for binding new threads. Synchronization: atomic. */
+ atomic_u_t binshard_next;
+
+ /*
+ * When percpu_arena is enabled, to amortize the cost of reading /
+ * updating the current CPU id, track the most recent thread accessing
+ * this arena, and only read CPU if there is a mismatch.
+ */
+ tsdn_t *last_thd;
+
+ /* Synchronization: internal. */
+ arena_stats_t stats;
+
+ /*
+ * Lists of tcaches and cache_bin_array_descriptors for extant threads
+ * associated with this arena. Stats from these are merged
+ * incrementally, and at exit if opt_stats_print is enabled.
+ *
+ * Synchronization: tcache_ql_mtx.
+ */
+ ql_head(tcache_t) tcache_ql;
+ ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
+ malloc_mutex_t tcache_ql_mtx;
+
+ /* Synchronization: internal. */
+ prof_accum_t prof_accum;
+
+ /*
+ * PRNG state for cache index randomization of large allocation base
+ * pointers.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t offset_state;
+
+ /*
+ * Extent serial number generator state.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t extent_sn_next;
+
+ /*
+ * Represents a dss_prec_t, but atomically.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_u_t dss_prec;
+
+ /*
+ * Number of pages in active extents.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t nactive;
+
+ /*
+ * Extant large allocations.
+ *
+ * Synchronization: large_mtx.
+ */
+ extent_list_t large;
+ /* Synchronizes all large allocation/update/deallocation. */
+ malloc_mutex_t large_mtx;
+
+ /*
+ * Collections of extents that were previously allocated. These are
+ * used when allocating extents, in an attempt to re-use address space.
+ *
+ * Synchronization: internal.
+ */
+ extents_t extents_dirty;
+ extents_t extents_muzzy;
+ extents_t extents_retained;
+
+ /*
+ * Decay-based purging state, responsible for scheduling extent state
+ * transitions.
+ *
+ * Synchronization: internal.
+ */
+ arena_decay_t decay_dirty; /* dirty --> muzzy */
+ arena_decay_t decay_muzzy; /* muzzy --> retained */
+
+ /*
+ * Next extent size class in a growing series to use when satisfying a
+ * request via the extent hooks (only if opt_retain). This limits the
+ * number of disjoint virtual memory ranges so that extent merging can
+ * be effective even if multiple arenas' extent allocation requests are
+ * highly interleaved.
+ *
+ * retain_grow_limit is the max allowed size ind to expand (unless the
+ * required size is greater). Default is no limit, and controlled
+ * through mallctl only.
+ *
+ * Synchronization: extent_grow_mtx
+ */
+ pszind_t extent_grow_next;
+ pszind_t retain_grow_limit;
+ malloc_mutex_t extent_grow_mtx;
+
+ /*
+ * Available extent structures that were allocated via
+ * base_alloc_extent().
+ *
+ * Synchronization: extent_avail_mtx.
+ */
+ extent_tree_t extent_avail;
+ atomic_zu_t extent_avail_cnt;
+ malloc_mutex_t extent_avail_mtx;
+
+ /*
+ * bins is used to store heaps of free regions.
+ *
+ * Synchronization: internal.
+ */
+ bins_t bins[SC_NBINS];
+
+ /*
+ * Base allocator, from which arena metadata are allocated.
+ *
+ * Synchronization: internal.
+ */
+ base_t *base;
+ /* Used to determine uptime. Read-only after initialization. */
+ nstime_t create_time;
+};
+
+/* Used in conjunction with tsd for fast arena-related context lookup. */
+struct arena_tdata_s {
+ ticker_t decay_ticker;
+};
+
+/* Used to pass rtree lookup context down the path. */
+struct alloc_ctx_s {
+ szind_t szind;
+ bool slab;
+};
+
+#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/arena_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/arena_types.h
index 624937e4f5..095f17d258 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/arena_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/arena_types.h
@@ -1,51 +1,51 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
-#define JEMALLOC_INTERNAL_ARENA_TYPES_H
-
-#include "jemalloc/internal/sc.h"
-
-/* Maximum number of regions in one slab. */
-#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
-#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
-
-/* Default decay times in milliseconds. */
-#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
-#define MUZZY_DECAY_MS_DEFAULT (0)
-/* Number of event ticks between time checks. */
-#define DECAY_NTICKS_PER_UPDATE 1000
-
-typedef struct arena_slab_data_s arena_slab_data_t;
-typedef struct arena_decay_s arena_decay_t;
-typedef struct arena_s arena_t;
-typedef struct arena_tdata_s arena_tdata_t;
-typedef struct alloc_ctx_s alloc_ctx_t;
-
-typedef enum {
- percpu_arena_mode_names_base = 0, /* Used for options processing. */
-
- /*
- * *_uninit are used only during bootstrapping, and must correspond
- * to initialized variant plus percpu_arena_mode_enabled_base.
- */
- percpu_arena_uninit = 0,
- per_phycpu_arena_uninit = 1,
-
- /* All non-disabled modes must come after percpu_arena_disabled. */
- percpu_arena_disabled = 2,
-
- percpu_arena_mode_names_limit = 3, /* Used for options processing. */
- percpu_arena_mode_enabled_base = 3,
-
- percpu_arena = 3,
- per_phycpu_arena = 4 /* Hyper threads share arena. */
-} percpu_arena_mode_t;
-
-#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
-#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
-
-/*
- * When allocation_size >= oversize_threshold, use the dedicated huge arena
- * (unless have explicitly spicified arena index). 0 disables the feature.
- */
-#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
-
-#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
+#define JEMALLOC_INTERNAL_ARENA_TYPES_H
+
+#include "jemalloc/internal/sc.h"
+
+/* Maximum number of regions in one slab. */
+#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
+#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
+
+/* Default decay times in milliseconds. */
+#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
+#define MUZZY_DECAY_MS_DEFAULT (0)
+/* Number of event ticks between time checks. */
+#define DECAY_NTICKS_PER_UPDATE 1000
+
+typedef struct arena_slab_data_s arena_slab_data_t;
+typedef struct arena_decay_s arena_decay_t;
+typedef struct arena_s arena_t;
+typedef struct arena_tdata_s arena_tdata_t;
+typedef struct alloc_ctx_s alloc_ctx_t;
+
+typedef enum {
+ percpu_arena_mode_names_base = 0, /* Used for options processing. */
+
+ /*
+ * *_uninit are used only during bootstrapping, and must correspond
+ * to initialized variant plus percpu_arena_mode_enabled_base.
+ */
+ percpu_arena_uninit = 0,
+ per_phycpu_arena_uninit = 1,
+
+ /* All non-disabled modes must come after percpu_arena_disabled. */
+ percpu_arena_disabled = 2,
+
+ percpu_arena_mode_names_limit = 3, /* Used for options processing. */
+ percpu_arena_mode_enabled_base = 3,
+
+ percpu_arena = 3,
+ per_phycpu_arena = 4 /* Hyper threads share arena. */
+} percpu_arena_mode_t;
+
+#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
+#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
+
+/*
+ * When allocation_size >= oversize_threshold, use the dedicated huge arena
+ * (unless have explicitly spicified arena index). 0 disables the feature.
+ */
+#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
+
+#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/assert.h b/contrib/libs/jemalloc/include/jemalloc/internal/assert.h
index be4d45b321..9bdcc0408e 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/assert.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/assert.h
@@ -1,56 +1,56 @@
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/util.h"
-
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-#define assert(e) do { \
- if (unlikely(config_debug && !(e))) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
- __FILE__, __LINE__, #e); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef not_reached
-#define not_reached() do { \
- if (config_debug) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Unreachable code reached\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
- unreachable(); \
-} while (0)
-#endif
-
-#ifndef not_implemented
-#define not_implemented() do { \
- if (config_debug) { \
- malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef assert_not_implemented
-#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) { \
- not_implemented(); \
- } \
-} while (0)
-#endif
-
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#ifndef cassert
-#define cassert(c) do { \
- if (unlikely(!(c))) { \
- not_reached(); \
- } \
-} while (0)
-#endif
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/util.h"
+
+/*
+ * Define a custom assert() in order to reduce the chances of deadlock during
+ * assertion failure.
+ */
+#ifndef assert
+#define assert(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+#endif
+
+#ifndef not_reached
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+#endif
+
+#ifndef not_implemented
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+#endif
+
+#ifndef assert_not_implemented
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ not_implemented(); \
+ } \
+} while (0)
+#endif
+
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#ifndef cassert
+#define cassert(c) do { \
+ if (unlikely(!(c))) { \
+ not_reached(); \
+ } \
+} while (0)
+#endif
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h b/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h
index cc254c56ac..46bfd3cd06 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h
@@ -1,86 +1,86 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_H
-#define JEMALLOC_INTERNAL_ATOMIC_H
+#ifndef JEMALLOC_INTERNAL_ATOMIC_H
+#define JEMALLOC_INTERNAL_ATOMIC_H
-#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
+#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
-#define JEMALLOC_U8_ATOMICS
-#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
-# include "jemalloc/internal/atomic_gcc_atomic.h"
-# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
-# undef JEMALLOC_U8_ATOMICS
-# endif
-#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
-# error #include "jemalloc/internal/atomic_gcc_sync.h"
-# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
-# undef JEMALLOC_U8_ATOMICS
-# endif
-#elif defined(_MSC_VER)
-# include "jemalloc/internal/atomic_msvc.h"
-#elif defined(JEMALLOC_C11_ATOMICS)
-# error #include "jemalloc/internal/atomic_c11.h"
-#else
-# error "Don't have atomics implemented on this platform."
-#endif
+#define JEMALLOC_U8_ATOMICS
+#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
+# include "jemalloc/internal/atomic_gcc_atomic.h"
+# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
+# undef JEMALLOC_U8_ATOMICS
+# endif
+#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
+# error #include "jemalloc/internal/atomic_gcc_sync.h"
+# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
+# undef JEMALLOC_U8_ATOMICS
+# endif
+#elif defined(_MSC_VER)
+# include "jemalloc/internal/atomic_msvc.h"
+#elif defined(JEMALLOC_C11_ATOMICS)
+# error #include "jemalloc/internal/atomic_c11.h"
+#else
+# error "Don't have atomics implemented on this platform."
+#endif
-/*
- * This header gives more or less a backport of C11 atomics. The user can write
- * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
- * counterparts of the C11 atomic functions for type, as so:
- * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
- * and then write things like:
- * int *some_ptr;
- * atomic_pi_t atomic_ptr_to_int;
- * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
- * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
- * assert(some_ptr == prev_value);
- * and expect things to work in the obvious way.
- *
- * Also included (with naming differences to avoid conflicts with the standard
- * library):
- * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
- * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
- */
+/*
+ * This header gives more or less a backport of C11 atomics. The user can write
+ * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
+ * counterparts of the C11 atomic functions for type, as so:
+ * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
+ * and then write things like:
+ * int *some_ptr;
+ * atomic_pi_t atomic_ptr_to_int;
+ * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
+ * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
+ * assert(some_ptr == prev_value);
+ * and expect things to work in the obvious way.
+ *
+ * Also included (with naming differences to avoid conflicts with the standard
+ * library):
+ * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
+ * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
+ */
-/*
- * Pure convenience, so that we don't have to type "atomic_memory_order_"
- * quite so often.
- */
-#define ATOMIC_RELAXED atomic_memory_order_relaxed
-#define ATOMIC_ACQUIRE atomic_memory_order_acquire
-#define ATOMIC_RELEASE atomic_memory_order_release
-#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
-#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
+/*
+ * Pure convenience, so that we don't have to type "atomic_memory_order_"
+ * quite so often.
+ */
+#define ATOMIC_RELAXED atomic_memory_order_relaxed
+#define ATOMIC_ACQUIRE atomic_memory_order_acquire
+#define ATOMIC_RELEASE atomic_memory_order_release
+#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
+#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
-/*
- * Not all platforms have 64-bit atomics. If we do, this #define exposes that
- * fact.
- */
+/*
+ * Not all platforms have 64-bit atomics. If we do, this #define exposes that
+ * fact.
+ */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-# define JEMALLOC_ATOMIC_U64
+# define JEMALLOC_ATOMIC_U64
#endif
-JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
+JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
-/*
- * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
- * platform that actually needs to know the size, MSVC.
- */
-JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
+/*
+ * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
+ * platform that actually needs to know the size, MSVC.
+ */
+JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
-JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
+JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
-JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
+JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
-JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
+JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
-JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
+JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
-JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
+JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
-#ifdef JEMALLOC_ATOMIC_U64
-JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
+#ifdef JEMALLOC_ATOMIC_U64
+JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
#endif
-#undef ATOMIC_INLINE
+#undef ATOMIC_INLINE
-#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
+#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/contrib/libs/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
index 471515e82f..5019e6accc 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
@@ -1,129 +1,129 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
-#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
-
-#include "jemalloc/internal/assert.h"
-
-#define ATOMIC_INIT(...) {__VA_ARGS__}
-
-typedef enum {
- atomic_memory_order_relaxed,
- atomic_memory_order_acquire,
- atomic_memory_order_release,
- atomic_memory_order_acq_rel,
- atomic_memory_order_seq_cst
-} atomic_memory_order_t;
-
-ATOMIC_INLINE int
-atomic_enum_to_builtin(atomic_memory_order_t mo) {
- switch (mo) {
- case atomic_memory_order_relaxed:
- return __ATOMIC_RELAXED;
- case atomic_memory_order_acquire:
- return __ATOMIC_ACQUIRE;
- case atomic_memory_order_release:
- return __ATOMIC_RELEASE;
- case atomic_memory_order_acq_rel:
- return __ATOMIC_ACQ_REL;
- case atomic_memory_order_seq_cst:
- return __ATOMIC_SEQ_CST;
- }
- /* Can't happen; the switch is exhaustive. */
- not_reached();
-}
-
-ATOMIC_INLINE void
-atomic_fence(atomic_memory_order_t mo) {
- __atomic_thread_fence(atomic_enum_to_builtin(mo));
-}
-
-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-typedef struct { \
- type repr; \
-} atomic_##short_type##_t; \
- \
-ATOMIC_INLINE type \
-atomic_load_##short_type(const atomic_##short_type##_t *a, \
- atomic_memory_order_t mo) { \
- type result; \
- __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
- return result; \
-} \
- \
-ATOMIC_INLINE void \
-atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- type result; \
- __atomic_exchange(&a->repr, &val, &result, \
- atomic_enum_to_builtin(mo)); \
- return result; \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
- UNUSED type *expected, type desired, \
- atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- return __atomic_compare_exchange(&a->repr, expected, &desired, \
- true, atomic_enum_to_builtin(success_mo), \
- atomic_enum_to_builtin(failure_mo)); \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
- UNUSED type *expected, type desired, \
- atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- return __atomic_compare_exchange(&a->repr, expected, &desired, \
- false, \
- atomic_enum_to_builtin(success_mo), \
- atomic_enum_to_builtin(failure_mo)); \
-}
-
-
-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
- \
-ATOMIC_INLINE type \
-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_add(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_sub(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_and(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_or(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_xor(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-}
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
+#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
+#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
+
+#include "jemalloc/internal/assert.h"
+
+#define ATOMIC_INIT(...) {__VA_ARGS__}
+
+typedef enum {
+ atomic_memory_order_relaxed,
+ atomic_memory_order_acquire,
+ atomic_memory_order_release,
+ atomic_memory_order_acq_rel,
+ atomic_memory_order_seq_cst
+} atomic_memory_order_t;
+
+ATOMIC_INLINE int
+atomic_enum_to_builtin(atomic_memory_order_t mo) {
+ switch (mo) {
+ case atomic_memory_order_relaxed:
+ return __ATOMIC_RELAXED;
+ case atomic_memory_order_acquire:
+ return __ATOMIC_ACQUIRE;
+ case atomic_memory_order_release:
+ return __ATOMIC_RELEASE;
+ case atomic_memory_order_acq_rel:
+ return __ATOMIC_ACQ_REL;
+ case atomic_memory_order_seq_cst:
+ return __ATOMIC_SEQ_CST;
+ }
+ /* Can't happen; the switch is exhaustive. */
+ not_reached();
+}
+
+ATOMIC_INLINE void
+atomic_fence(atomic_memory_order_t mo) {
+ __atomic_thread_fence(atomic_enum_to_builtin(mo));
+}
+
+#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+typedef struct { \
+ type repr; \
+} atomic_##short_type##_t; \
+ \
+ATOMIC_INLINE type \
+atomic_load_##short_type(const atomic_##short_type##_t *a, \
+ atomic_memory_order_t mo) { \
+ type result; \
+ __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
+ return result; \
+} \
+ \
+ATOMIC_INLINE void \
+atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ type result; \
+ __atomic_exchange(&a->repr, &val, &result, \
+ atomic_enum_to_builtin(mo)); \
+ return result; \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
+ UNUSED type *expected, type desired, \
+ atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ return __atomic_compare_exchange(&a->repr, expected, &desired, \
+ true, atomic_enum_to_builtin(success_mo), \
+ atomic_enum_to_builtin(failure_mo)); \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
+ UNUSED type *expected, type desired, \
+ atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ return __atomic_compare_exchange(&a->repr, expected, &desired, \
+ false, \
+ atomic_enum_to_builtin(success_mo), \
+ atomic_enum_to_builtin(failure_mo)); \
+}
+
+
+#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_add(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_sub(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_and(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_or(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_xor(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+}
+
+#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/atomic_msvc.h b/contrib/libs/jemalloc/include/jemalloc/internal/atomic_msvc.h
index 67057ce508..d8d680a7f1 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/atomic_msvc.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/atomic_msvc.h
@@ -1,158 +1,158 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
-#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
-
-#define ATOMIC_INIT(...) {__VA_ARGS__}
-
-typedef enum {
- atomic_memory_order_relaxed,
- atomic_memory_order_acquire,
- atomic_memory_order_release,
- atomic_memory_order_acq_rel,
- atomic_memory_order_seq_cst
-} atomic_memory_order_t;
-
-typedef char atomic_repr_0_t;
-typedef short atomic_repr_1_t;
-typedef long atomic_repr_2_t;
-typedef __int64 atomic_repr_3_t;
-
-ATOMIC_INLINE void
-atomic_fence(atomic_memory_order_t mo) {
- _ReadWriteBarrier();
-# if defined(_M_ARM) || defined(_M_ARM64)
- /* ARM needs a barrier for everything but relaxed. */
- if (mo != atomic_memory_order_relaxed) {
- MemoryBarrier();
- }
-# elif defined(_M_IX86) || defined (_M_X64)
- /* x86 needs a barrier only for seq_cst. */
- if (mo == atomic_memory_order_seq_cst) {
- MemoryBarrier();
- }
-# else
-# error "Don't know how to create atomics for this platform for MSVC."
-# endif
- _ReadWriteBarrier();
-}
-
-#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
-
-#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
-#define ATOMIC_RAW_CONCAT(a, b) a ## b
-
-#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
- base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
-
-#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
- ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
-
-#define ATOMIC_INTERLOCKED_SUFFIX_0 8
-#define ATOMIC_INTERLOCKED_SUFFIX_1 16
-#define ATOMIC_INTERLOCKED_SUFFIX_2
-#define ATOMIC_INTERLOCKED_SUFFIX_3 64
-
-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
-typedef struct { \
- ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
-} atomic_##short_type##_t; \
- \
-ATOMIC_INLINE type \
-atomic_load_##short_type(const atomic_##short_type##_t *a, \
- atomic_memory_order_t mo) { \
- ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
- if (mo != atomic_memory_order_relaxed) { \
- atomic_fence(atomic_memory_order_acquire); \
- } \
- return (type) ret; \
-} \
- \
-ATOMIC_INLINE void \
-atomic_store_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- if (mo != atomic_memory_order_relaxed) { \
- atomic_fence(atomic_memory_order_release); \
- } \
- a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
- if (mo == atomic_memory_order_seq_cst) { \
- atomic_fence(atomic_memory_order_seq_cst); \
- } \
-} \
- \
-ATOMIC_INLINE type \
-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
- lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- ATOMIC_INTERLOCKED_REPR(lg_size) e = \
- (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
- ATOMIC_INTERLOCKED_REPR(lg_size) d = \
- (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
- ATOMIC_INTERLOCKED_REPR(lg_size) old = \
- ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
- lg_size)(&a->repr, d, e); \
- if (old == e) { \
- return true; \
- } else { \
- *expected = (type)old; \
- return false; \
- } \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- /* We implement the weak version with strong semantics. */ \
- return atomic_compare_exchange_weak_##short_type(a, expected, \
- desired, success_mo, failure_mo); \
-}
-
-
-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
-JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
- \
-ATOMIC_INLINE type \
-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
- lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- /* \
- * MSVC warns on negation of unsigned operands, but for us it \
- * gives exactly the right semantics (MAX_TYPE + 1 - operand). \
- */ \
- __pragma(warning(push)) \
- __pragma(warning(disable: 4146)) \
- return atomic_fetch_add_##short_type(a, -val, mo); \
- __pragma(warning(pop)) \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
- &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
- &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
- &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-}
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
+#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
+#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
+
+#define ATOMIC_INIT(...) {__VA_ARGS__}
+
+typedef enum {
+ atomic_memory_order_relaxed,
+ atomic_memory_order_acquire,
+ atomic_memory_order_release,
+ atomic_memory_order_acq_rel,
+ atomic_memory_order_seq_cst
+} atomic_memory_order_t;
+
+typedef char atomic_repr_0_t;
+typedef short atomic_repr_1_t;
+typedef long atomic_repr_2_t;
+typedef __int64 atomic_repr_3_t;
+
+ATOMIC_INLINE void
+atomic_fence(atomic_memory_order_t mo) {
+ _ReadWriteBarrier();
+# if defined(_M_ARM) || defined(_M_ARM64)
+ /* ARM needs a barrier for everything but relaxed. */
+ if (mo != atomic_memory_order_relaxed) {
+ MemoryBarrier();
+ }
+# elif defined(_M_IX86) || defined (_M_X64)
+ /* x86 needs a barrier only for seq_cst. */
+ if (mo == atomic_memory_order_seq_cst) {
+ MemoryBarrier();
+ }
+# else
+# error "Don't know how to create atomics for this platform for MSVC."
+# endif
+ _ReadWriteBarrier();
+}
+
+#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
+
+#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
+#define ATOMIC_RAW_CONCAT(a, b) a ## b
+
+#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
+ base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
+
+#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
+ ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
+
+#define ATOMIC_INTERLOCKED_SUFFIX_0 8
+#define ATOMIC_INTERLOCKED_SUFFIX_1 16
+#define ATOMIC_INTERLOCKED_SUFFIX_2
+#define ATOMIC_INTERLOCKED_SUFFIX_3 64
+
+#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
+typedef struct { \
+ ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
+} atomic_##short_type##_t; \
+ \
+ATOMIC_INLINE type \
+atomic_load_##short_type(const atomic_##short_type##_t *a, \
+ atomic_memory_order_t mo) { \
+ ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
+ if (mo != atomic_memory_order_relaxed) { \
+ atomic_fence(atomic_memory_order_acquire); \
+ } \
+ return (type) ret; \
+} \
+ \
+ATOMIC_INLINE void \
+atomic_store_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ if (mo != atomic_memory_order_relaxed) { \
+ atomic_fence(atomic_memory_order_release); \
+ } \
+ a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
+ if (mo == atomic_memory_order_seq_cst) { \
+ atomic_fence(atomic_memory_order_seq_cst); \
+ } \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
+ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ ATOMIC_INTERLOCKED_REPR(lg_size) e = \
+ (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
+ ATOMIC_INTERLOCKED_REPR(lg_size) d = \
+ (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
+ ATOMIC_INTERLOCKED_REPR(lg_size) old = \
+ ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
+ lg_size)(&a->repr, d, e); \
+ if (old == e) { \
+ return true; \
+ } else { \
+ *expected = (type)old; \
+ return false; \
+ } \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ /* We implement the weak version with strong semantics. */ \
+ return atomic_compare_exchange_weak_##short_type(a, expected, \
+ desired, success_mo, failure_mo); \
+}
+
+
+#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
+JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
+ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ /* \
+ * MSVC warns on negation of unsigned operands, but for us it \
+ * gives exactly the right semantics (MAX_TYPE + 1 - operand). \
+ */ \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: 4146)) \
+ return atomic_fetch_add_##short_type(a, -val, mo); \
+ __pragma(warning(pop)) \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
+ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
+ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
+ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+}
+
+#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_externs.h
index 0f997e18be..6bc48c18a4 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_externs.h
@@ -1,32 +1,32 @@
-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
-
-extern bool opt_background_thread;
-extern size_t opt_max_background_threads;
-extern malloc_mutex_t background_thread_lock;
-extern atomic_b_t background_thread_enabled_state;
-extern size_t n_background_threads;
-extern size_t max_background_threads;
-extern background_thread_info_t *background_thread_info;
-
-bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
-bool background_threads_enable(tsd_t *tsd);
-bool background_threads_disable(tsd_t *tsd);
-void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new);
-void background_thread_prefork0(tsdn_t *tsdn);
-void background_thread_prefork1(tsdn_t *tsdn);
-void background_thread_postfork_parent(tsdn_t *tsdn);
-void background_thread_postfork_child(tsdn_t *tsdn);
-bool background_thread_stats_read(tsdn_t *tsdn,
- background_thread_stats_t *stats);
-void background_thread_ctl_init(tsdn_t *tsdn);
-
-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
-extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
- void *(*)(void *), void *__restrict);
-#endif
-bool background_thread_boot0(void);
-bool background_thread_boot1(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
+#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
+
+extern bool opt_background_thread;
+extern size_t opt_max_background_threads;
+extern malloc_mutex_t background_thread_lock;
+extern atomic_b_t background_thread_enabled_state;
+extern size_t n_background_threads;
+extern size_t max_background_threads;
+extern background_thread_info_t *background_thread_info;
+
+bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
+bool background_threads_enable(tsd_t *tsd);
+bool background_threads_disable(tsd_t *tsd);
+void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, size_t npages_new);
+void background_thread_prefork0(tsdn_t *tsdn);
+void background_thread_prefork1(tsdn_t *tsdn);
+void background_thread_postfork_parent(tsdn_t *tsdn);
+void background_thread_postfork_child(tsdn_t *tsdn);
+bool background_thread_stats_read(tsdn_t *tsdn,
+ background_thread_stats_t *stats);
+void background_thread_ctl_init(tsdn_t *tsdn);
+
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+#endif
+bool background_thread_boot0(void);
+bool background_thread_boot1(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_inlines.h
index f85e86fa37..20f73a5525 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_inlines.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_inlines.h
@@ -1,62 +1,62 @@
-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
-
-JEMALLOC_ALWAYS_INLINE bool
-background_thread_enabled(void) {
- return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-background_thread_enabled_set(tsdn_t *tsdn, bool state) {
- malloc_mutex_assert_owner(tsdn, &background_thread_lock);
- atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
-}
-
-JEMALLOC_ALWAYS_INLINE background_thread_info_t *
-arena_background_thread_info_get(arena_t *arena) {
- unsigned arena_ind = arena_ind_get(arena);
- return &background_thread_info[arena_ind % max_background_threads];
-}
-
-JEMALLOC_ALWAYS_INLINE background_thread_info_t *
-background_thread_info_get(size_t ind) {
- return &background_thread_info[ind % max_background_threads];
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-background_thread_wakeup_time_get(background_thread_info_t *info) {
- uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
- assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
- (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
- return next_wakeup;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
- uint64_t wakeup_time) {
- malloc_mutex_assert_owner(tsdn, &info->mtx);
- atomic_store_b(&info->indefinite_sleep,
- wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
- nstime_init(&info->next_wakeup, wakeup_time);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-background_thread_indefinite_sleep(background_thread_info_t *info) {
- return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
- bool is_background_thread) {
- if (!background_thread_enabled() || is_background_thread) {
- return;
- }
- background_thread_info_t *info =
- arena_background_thread_info_get(arena);
- if (background_thread_indefinite_sleep(info)) {
- background_thread_interval_check(tsdn, arena,
- &arena->decay_dirty, 0);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
+#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
+#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
+
+JEMALLOC_ALWAYS_INLINE bool
+background_thread_enabled(void) {
+ return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+background_thread_enabled_set(tsdn_t *tsdn, bool state) {
+ malloc_mutex_assert_owner(tsdn, &background_thread_lock);
+ atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
+}
+
+JEMALLOC_ALWAYS_INLINE background_thread_info_t *
+arena_background_thread_info_get(arena_t *arena) {
+ unsigned arena_ind = arena_ind_get(arena);
+ return &background_thread_info[arena_ind % max_background_threads];
+}
+
+JEMALLOC_ALWAYS_INLINE background_thread_info_t *
+background_thread_info_get(size_t ind) {
+ return &background_thread_info[ind % max_background_threads];
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+background_thread_wakeup_time_get(background_thread_info_t *info) {
+ uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
+ assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
+ (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
+ return next_wakeup;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
+ uint64_t wakeup_time) {
+ malloc_mutex_assert_owner(tsdn, &info->mtx);
+ atomic_store_b(&info->indefinite_sleep,
+ wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
+ nstime_init(&info->next_wakeup, wakeup_time);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+background_thread_indefinite_sleep(background_thread_info_t *info) {
+ return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread) {
+ if (!background_thread_enabled() || is_background_thread) {
+ return;
+ }
+ background_thread_info_t *info =
+ arena_background_thread_info_get(arena);
+ if (background_thread_indefinite_sleep(info)) {
+ background_thread_interval_check(tsdn, arena,
+ &arena->decay_dirty, 0);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_structs.h b/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_structs.h
index c02aa434c7..8b4dc0711d 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_structs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/background_thread_structs.h
@@ -1,54 +1,54 @@
-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
-
-/* This file really combines "structs" and "types", but only transitionally. */
-
-#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
-# define JEMALLOC_PTHREAD_CREATE_WRAPPER
-#endif
-
-#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
-#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
-#define DEFAULT_NUM_BACKGROUND_THREAD 4
-
-typedef enum {
- background_thread_stopped,
- background_thread_started,
- /* Thread waits on the global lock when paused (for arena_reset). */
- background_thread_paused,
-} background_thread_state_t;
-
-struct background_thread_info_s {
-#ifdef JEMALLOC_BACKGROUND_THREAD
- /* Background thread is pthread specific. */
- pthread_t thread;
- pthread_cond_t cond;
-#endif
- malloc_mutex_t mtx;
- background_thread_state_t state;
- /* When true, it means no wakeup scheduled. */
- atomic_b_t indefinite_sleep;
- /* Next scheduled wakeup time (absolute time in ns). */
- nstime_t next_wakeup;
- /*
- * Since the last background thread run, newly added number of pages
- * that need to be purged by the next wakeup. This is adjusted on
- * epoch advance, and is used to determine whether we should signal the
- * background thread to wake up earlier.
- */
- size_t npages_to_purge_new;
- /* Stats: total number of runs since started. */
- uint64_t tot_n_runs;
- /* Stats: total sleep time since started. */
- nstime_t tot_sleep_time;
-};
-typedef struct background_thread_info_s background_thread_info_t;
-
-struct background_thread_stats_s {
- size_t num_threads;
- uint64_t num_runs;
- nstime_t run_interval;
-};
-typedef struct background_thread_stats_s background_thread_stats_t;
-
-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
+#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
+#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
+
+/* This file really combines "structs" and "types", but only transitionally. */
+
+#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
+# define JEMALLOC_PTHREAD_CREATE_WRAPPER
+#endif
+
+#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
+#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
+#define DEFAULT_NUM_BACKGROUND_THREAD 4
+
+typedef enum {
+ background_thread_stopped,
+ background_thread_started,
+ /* Thread waits on the global lock when paused (for arena_reset). */
+ background_thread_paused,
+} background_thread_state_t;
+
+struct background_thread_info_s {
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ /* Background thread is pthread specific. */
+ pthread_t thread;
+ pthread_cond_t cond;
+#endif
+ malloc_mutex_t mtx;
+ background_thread_state_t state;
+ /* When true, it means no wakeup scheduled. */
+ atomic_b_t indefinite_sleep;
+ /* Next scheduled wakeup time (absolute time in ns). */
+ nstime_t next_wakeup;
+ /*
+ * Since the last background thread run, newly added number of pages
+ * that need to be purged by the next wakeup. This is adjusted on
+ * epoch advance, and is used to determine whether we should signal the
+ * background thread to wake up earlier.
+ */
+ size_t npages_to_purge_new;
+ /* Stats: total number of runs since started. */
+ uint64_t tot_n_runs;
+ /* Stats: total sleep time since started. */
+ nstime_t tot_sleep_time;
+};
+typedef struct background_thread_info_s background_thread_info_t;
+
+struct background_thread_stats_s {
+ size_t num_threads;
+ uint64_t num_runs;
+ nstime_t run_interval;
+};
+typedef struct background_thread_stats_s background_thread_stats_t;
+
+#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/base_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/base_externs.h
index 7b705c9b4d..1b289373f4 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/base_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/base_externs.h
@@ -1,22 +1,22 @@
-#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
-#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
-
-extern metadata_thp_mode_t opt_metadata_thp;
-extern const char *metadata_thp_mode_names[];
-
-base_t *b0get(void);
-base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-void base_delete(tsdn_t *tsdn, base_t *base);
-extent_hooks_t *base_extent_hooks_get(base_t *base);
-extent_hooks_t *base_extent_hooks_set(base_t *base,
- extent_hooks_t *extent_hooks);
-void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
-extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
-void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
- size_t *resident, size_t *mapped, size_t *n_thp);
-void base_prefork(tsdn_t *tsdn, base_t *base);
-void base_postfork_parent(tsdn_t *tsdn, base_t *base);
-void base_postfork_child(tsdn_t *tsdn, base_t *base);
-bool base_boot(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
+#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
+
+extern metadata_thp_mode_t opt_metadata_thp;
+extern const char *metadata_thp_mode_names[];
+
+base_t *b0get(void);
+base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+void base_delete(tsdn_t *tsdn, base_t *base);
+extent_hooks_t *base_extent_hooks_get(base_t *base);
+extent_hooks_t *base_extent_hooks_set(base_t *base,
+ extent_hooks_t *extent_hooks);
+void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
+extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
+void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
+ size_t *resident, size_t *mapped, size_t *n_thp);
+void base_prefork(tsdn_t *tsdn, base_t *base);
+void base_postfork_parent(tsdn_t *tsdn, base_t *base);
+void base_postfork_child(tsdn_t *tsdn, base_t *base);
+bool base_boot(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/base_inlines.h b/contrib/libs/jemalloc/include/jemalloc/internal/base_inlines.h
index aec0e2e1e1..2c6763ada9 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/base_inlines.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/base_inlines.h
@@ -1,13 +1,13 @@
-#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
-#define JEMALLOC_INTERNAL_BASE_INLINES_H
-
-static inline unsigned
-base_ind_get(const base_t *base) {
- return base->ind;
-}
-
-static inline bool
-metadata_thp_enabled(void) {
- return (opt_metadata_thp != metadata_thp_disabled);
-}
-#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
+#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
+#define JEMALLOC_INTERNAL_BASE_INLINES_H
+
+static inline unsigned
+base_ind_get(const base_t *base) {
+ return base->ind;
+}
+
+static inline bool
+metadata_thp_enabled(void) {
+ return (opt_metadata_thp != metadata_thp_disabled);
+}
+#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/base_structs.h b/contrib/libs/jemalloc/include/jemalloc/internal/base_structs.h
index 07f214eb2f..c1bed8812c 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/base_structs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/base_structs.h
@@ -1,59 +1,59 @@
-#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
-#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/sc.h"
-
-/* Embedded at the beginning of every block of base-managed virtual memory. */
-struct base_block_s {
- /* Total size of block's virtual memory mapping. */
- size_t size;
-
- /* Next block in list of base's blocks. */
- base_block_t *next;
-
- /* Tracks unused trailing space. */
- extent_t extent;
-};
-
-struct base_s {
- /* Associated arena's index within the arenas array. */
- unsigned ind;
-
- /*
- * User-configurable extent hook functions. Points to an
- * extent_hooks_t.
- */
- atomic_p_t extent_hooks;
-
- /* Protects base_alloc() and base_stats_get() operations. */
- malloc_mutex_t mtx;
-
- /* Using THP when true (metadata_thp auto mode). */
- bool auto_thp_switched;
- /*
- * Most recent size class in the series of increasingly large base
- * extents. Logarithmic spacing between subsequent allocations ensures
- * that the total number of distinct mappings remains small.
- */
- pszind_t pind_last;
-
- /* Serial number generation state. */
- size_t extent_sn_next;
-
- /* Chain of all blocks associated with base. */
- base_block_t *blocks;
-
- /* Heap of extents that track unused trailing space within blocks. */
- extent_heap_t avail[SC_NSIZES];
-
- /* Stats, only maintained if config_stats. */
- size_t allocated;
- size_t resident;
- size_t mapped;
- /* Number of THP regions touched. */
- size_t n_thp;
-};
-
-#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
+#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
+#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/sc.h"
+
+/* Embedded at the beginning of every block of base-managed virtual memory. */
+struct base_block_s {
+ /* Total size of block's virtual memory mapping. */
+ size_t size;
+
+ /* Next block in list of base's blocks. */
+ base_block_t *next;
+
+ /* Tracks unused trailing space. */
+ extent_t extent;
+};
+
+struct base_s {
+ /* Associated arena's index within the arenas array. */
+ unsigned ind;
+
+ /*
+ * User-configurable extent hook functions. Points to an
+ * extent_hooks_t.
+ */
+ atomic_p_t extent_hooks;
+
+ /* Protects base_alloc() and base_stats_get() operations. */
+ malloc_mutex_t mtx;
+
+ /* Using THP when true (metadata_thp auto mode). */
+ bool auto_thp_switched;
+ /*
+ * Most recent size class in the series of increasingly large base
+ * extents. Logarithmic spacing between subsequent allocations ensures
+ * that the total number of distinct mappings remains small.
+ */
+ pszind_t pind_last;
+
+ /* Serial number generation state. */
+ size_t extent_sn_next;
+
+ /* Chain of all blocks associated with base. */
+ base_block_t *blocks;
+
+ /* Heap of extents that track unused trailing space within blocks. */
+ extent_heap_t avail[SC_NSIZES];
+
+ /* Stats, only maintained if config_stats. */
+ size_t allocated;
+ size_t resident;
+ size_t mapped;
+ /* Number of THP regions touched. */
+ size_t n_thp;
+};
+
+#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/base_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/base_types.h
index b6db77df7c..251a67ceff 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/base_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/base_types.h
@@ -1,33 +1,33 @@
-#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
-#define JEMALLOC_INTERNAL_BASE_TYPES_H
-
-typedef struct base_block_s base_block_t;
-typedef struct base_s base_t;
-
-#define METADATA_THP_DEFAULT metadata_thp_disabled
-
-/*
- * In auto mode, arenas switch to huge pages for the base allocator on the
- * second base block. a0 switches to thp on the 5th block (after 20 megabytes
- * of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
- */
-
-#define BASE_AUTO_THP_THRESHOLD 2
-#define BASE_AUTO_THP_THRESHOLD_A0 5
-
-typedef enum {
- metadata_thp_disabled = 0,
- /*
- * Lazily enable hugepage for metadata. To avoid high RSS caused by THP
- * + low usage arena (i.e. THP becomes a significant percentage), the
- * "auto" option only starts using THP after a base allocator used up
- * the first THP region. Starting from the second hugepage (in a single
- * arena), "auto" behaves the same as "always", i.e. madvise hugepage
- * right away.
- */
- metadata_thp_auto = 1,
- metadata_thp_always = 2,
- metadata_thp_mode_limit = 3
-} metadata_thp_mode_t;
-
-#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
+#define JEMALLOC_INTERNAL_BASE_TYPES_H
+
+typedef struct base_block_s base_block_t;
+typedef struct base_s base_t;
+
+#define METADATA_THP_DEFAULT metadata_thp_disabled
+
+/*
+ * In auto mode, arenas switch to huge pages for the base allocator on the
+ * second base block. a0 switches to thp on the 5th block (after 20 megabytes
+ * of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
+ */
+
+#define BASE_AUTO_THP_THRESHOLD 2
+#define BASE_AUTO_THP_THRESHOLD_A0 5
+
+typedef enum {
+ metadata_thp_disabled = 0,
+ /*
+ * Lazily enable hugepage for metadata. To avoid high RSS caused by THP
+ * + low usage arena (i.e. THP becomes a significant percentage), the
+ * "auto" option only starts using THP after a base allocator used up
+ * the first THP region. Starting from the second hugepage (in a single
+ * arena), "auto" behaves the same as "always", i.e. madvise hugepage
+ * right away.
+ */
+ metadata_thp_auto = 1,
+ metadata_thp_always = 2,
+ metadata_thp_mode_limit = 3
+} metadata_thp_mode_t;
+
+#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/bin.h b/contrib/libs/jemalloc/include/jemalloc/internal/bin.h
index 8547e89309..6142beb60c 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/bin.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/bin.h
@@ -1,123 +1,123 @@
-#ifndef JEMALLOC_INTERNAL_BIN_H
-#define JEMALLOC_INTERNAL_BIN_H
-
-#include "jemalloc/internal/bin_stats.h"
-#include "jemalloc/internal/bin_types.h"
-#include "jemalloc/internal/extent_types.h"
-#include "jemalloc/internal/extent_structs.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/sc.h"
-
-/*
- * A bin contains a set of extents that are currently being used for slab
- * allocations.
- */
-
-/*
- * Read-only information associated with each element of arena_t's bins array
- * is stored separately, partly to reduce memory usage (only one copy, rather
- * than one per arena), but mainly to avoid false cacheline sharing.
- *
- * Each slab has the following layout:
- *
- * /--------------------\
- * | region 0 |
- * |--------------------|
- * | region 1 |
- * |--------------------|
- * | ... |
- * | ... |
- * | ... |
- * |--------------------|
- * | region nregs-1 |
- * \--------------------/
- */
-typedef struct bin_info_s bin_info_t;
-struct bin_info_s {
- /* Size of regions in a slab for this bin's size class. */
- size_t reg_size;
-
- /* Total size of a slab for this bin's size class. */
- size_t slab_size;
-
- /* Total number of regions in a slab for this bin's size class. */
- uint32_t nregs;
-
- /* Number of sharded bins in each arena for this size class. */
- uint32_t n_shards;
-
- /*
- * Metadata used to manipulate bitmaps for slabs associated with this
- * bin.
- */
- bitmap_info_t bitmap_info;
-};
-
-extern bin_info_t bin_infos[SC_NBINS];
-
-typedef struct bin_s bin_t;
-struct bin_s {
- /* All operations on bin_t fields require lock ownership. */
- malloc_mutex_t lock;
-
- /*
- * Current slab being used to service allocations of this bin's size
- * class. slabcur is independent of slabs_{nonfull,full}; whenever
- * slabcur is reassigned, the previous slab must be deallocated or
- * inserted into slabs_{nonfull,full}.
- */
- extent_t *slabcur;
-
- /*
- * Heap of non-full slabs. This heap is used to assure that new
- * allocations come from the non-full slab that is oldest/lowest in
- * memory.
- */
- extent_heap_t slabs_nonfull;
-
- /* List used to track full slabs. */
- extent_list_t slabs_full;
-
- /* Bin statistics. */
- bin_stats_t stats;
-};
-
-/* A set of sharded bins of the same size class. */
-typedef struct bins_s bins_t;
-struct bins_s {
- /* Sharded bins. Dynamically sized. */
- bin_t *bin_shards;
-};
-
-void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
-bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
- size_t end_size, size_t nshards);
-void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
-
-/* Initializes a bin to empty. Returns true on error. */
-bool bin_init(bin_t *bin);
-
-/* Forking. */
-void bin_prefork(tsdn_t *tsdn, bin_t *bin);
-void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
-void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
-
-/* Stats. */
-static inline void
-bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
- malloc_mutex_lock(tsdn, &bin->lock);
- malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
- dst_bin_stats->nmalloc += bin->stats.nmalloc;
- dst_bin_stats->ndalloc += bin->stats.ndalloc;
- dst_bin_stats->nrequests += bin->stats.nrequests;
- dst_bin_stats->curregs += bin->stats.curregs;
- dst_bin_stats->nfills += bin->stats.nfills;
- dst_bin_stats->nflushes += bin->stats.nflushes;
- dst_bin_stats->nslabs += bin->stats.nslabs;
- dst_bin_stats->reslabs += bin->stats.reslabs;
- dst_bin_stats->curslabs += bin->stats.curslabs;
- dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
- malloc_mutex_unlock(tsdn, &bin->lock);
-}
-
-#endif /* JEMALLOC_INTERNAL_BIN_H */
+#ifndef JEMALLOC_INTERNAL_BIN_H
+#define JEMALLOC_INTERNAL_BIN_H
+
+#include "jemalloc/internal/bin_stats.h"
+#include "jemalloc/internal/bin_types.h"
+#include "jemalloc/internal/extent_types.h"
+#include "jemalloc/internal/extent_structs.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/sc.h"
+
+/*
+ * A bin contains a set of extents that are currently being used for slab
+ * allocations.
+ */
+
+/*
+ * Read-only information associated with each element of arena_t's bins array
+ * is stored separately, partly to reduce memory usage (only one copy, rather
+ * than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each slab has the following layout:
+ *
+ * /--------------------\
+ * | region 0 |
+ * |--------------------|
+ * | region 1 |
+ * |--------------------|
+ * | ... |
+ * | ... |
+ * | ... |
+ * |--------------------|
+ * | region nregs-1 |
+ * \--------------------/
+ */
+typedef struct bin_info_s bin_info_t;
+struct bin_info_s {
+ /* Size of regions in a slab for this bin's size class. */
+ size_t reg_size;
+
+ /* Total size of a slab for this bin's size class. */
+ size_t slab_size;
+
+ /* Total number of regions in a slab for this bin's size class. */
+ uint32_t nregs;
+
+ /* Number of sharded bins in each arena for this size class. */
+ uint32_t n_shards;
+
+ /*
+ * Metadata used to manipulate bitmaps for slabs associated with this
+ * bin.
+ */
+ bitmap_info_t bitmap_info;
+};
+
+extern bin_info_t bin_infos[SC_NBINS];
+
+typedef struct bin_s bin_t;
+struct bin_s {
+ /* All operations on bin_t fields require lock ownership. */
+ malloc_mutex_t lock;
+
+ /*
+ * Current slab being used to service allocations of this bin's size
+ * class. slabcur is independent of slabs_{nonfull,full}; whenever
+ * slabcur is reassigned, the previous slab must be deallocated or
+ * inserted into slabs_{nonfull,full}.
+ */
+ extent_t *slabcur;
+
+ /*
+ * Heap of non-full slabs. This heap is used to assure that new
+ * allocations come from the non-full slab that is oldest/lowest in
+ * memory.
+ */
+ extent_heap_t slabs_nonfull;
+
+ /* List used to track full slabs. */
+ extent_list_t slabs_full;
+
+ /* Bin statistics. */
+ bin_stats_t stats;
+};
+
+/* A set of sharded bins of the same size class. */
+typedef struct bins_s bins_t;
+struct bins_s {
+ /* Sharded bins. Dynamically sized. */
+ bin_t *bin_shards;
+};
+
+void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
+bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
+ size_t end_size, size_t nshards);
+void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
+
+/* Initializes a bin to empty. Returns true on error. */
+bool bin_init(bin_t *bin);
+
+/* Forking. */
+void bin_prefork(tsdn_t *tsdn, bin_t *bin);
+void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
+void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
+
+/* Stats. */
+static inline void
+bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
+ malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
+ dst_bin_stats->nmalloc += bin->stats.nmalloc;
+ dst_bin_stats->ndalloc += bin->stats.ndalloc;
+ dst_bin_stats->nrequests += bin->stats.nrequests;
+ dst_bin_stats->curregs += bin->stats.curregs;
+ dst_bin_stats->nfills += bin->stats.nfills;
+ dst_bin_stats->nflushes += bin->stats.nflushes;
+ dst_bin_stats->nslabs += bin->stats.nslabs;
+ dst_bin_stats->reslabs += bin->stats.reslabs;
+ dst_bin_stats->curslabs += bin->stats.curslabs;
+ dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
+
+#endif /* JEMALLOC_INTERNAL_BIN_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/bin_stats.h b/contrib/libs/jemalloc/include/jemalloc/internal/bin_stats.h
index d04519c824..f457e810dc 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/bin_stats.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/bin_stats.h
@@ -1,54 +1,54 @@
-#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
-#define JEMALLOC_INTERNAL_BIN_STATS_H
-
-#include "jemalloc/internal/mutex_prof.h"
-
-typedef struct bin_stats_s bin_stats_t;
-struct bin_stats_s {
- /*
- * Total number of allocation/deallocation requests served directly by
- * the bin. Note that tcache may allocate an object, then recycle it
- * many times, resulting many increments to nrequests, but only one
- * each to nmalloc and ndalloc.
- */
- uint64_t nmalloc;
- uint64_t ndalloc;
-
- /*
- * Number of allocation requests that correspond to the size of this
- * bin. This includes requests served by tcache, though tcache only
- * periodically merges into this counter.
- */
- uint64_t nrequests;
-
- /*
- * Current number of regions of this size class, including regions
- * currently cached by tcache.
- */
- size_t curregs;
-
- /* Number of tcache fills from this bin. */
- uint64_t nfills;
-
- /* Number of tcache flushes to this bin. */
- uint64_t nflushes;
-
- /* Total number of slabs created for this bin's size class. */
- uint64_t nslabs;
-
- /*
- * Total number of slabs reused by extracting them from the slabs heap
- * for this bin's size class.
- */
- uint64_t reslabs;
-
- /* Current number of slabs in this bin. */
- size_t curslabs;
-
- /* Current size of nonfull slabs heap in this bin. */
- size_t nonfull_slabs;
-
- mutex_prof_data_t mutex_data;
-};
-
-#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
+#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
+#define JEMALLOC_INTERNAL_BIN_STATS_H
+
+#include "jemalloc/internal/mutex_prof.h"
+
+typedef struct bin_stats_s bin_stats_t;
+struct bin_stats_s {
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the bin. Note that tcache may allocate an object, then recycle it
+ * many times, resulting many increments to nrequests, but only one
+ * each to nmalloc and ndalloc.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /*
+ * Number of allocation requests that correspond to the size of this
+ * bin. This includes requests served by tcache, though tcache only
+ * periodically merges into this counter.
+ */
+ uint64_t nrequests;
+
+ /*
+ * Current number of regions of this size class, including regions
+ * currently cached by tcache.
+ */
+ size_t curregs;
+
+ /* Number of tcache fills from this bin. */
+ uint64_t nfills;
+
+ /* Number of tcache flushes to this bin. */
+ uint64_t nflushes;
+
+ /* Total number of slabs created for this bin's size class. */
+ uint64_t nslabs;
+
+ /*
+ * Total number of slabs reused by extracting them from the slabs heap
+ * for this bin's size class.
+ */
+ uint64_t reslabs;
+
+ /* Current number of slabs in this bin. */
+ size_t curslabs;
+
+ /* Current size of nonfull slabs heap in this bin. */
+ size_t nonfull_slabs;
+
+ mutex_prof_data_t mutex_data;
+};
+
+#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/bin_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/bin_types.h
index 3533606b90..463632d79c 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/bin_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/bin_types.h
@@ -1,17 +1,17 @@
-#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H
-#define JEMALLOC_INTERNAL_BIN_TYPES_H
-
-#include "jemalloc/internal/sc.h"
-
-#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
-#define N_BIN_SHARDS_DEFAULT 1
-
-/* Used in TSD static initializer only. Real init in arena_bind(). */
-#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
-
-typedef struct tsd_binshards_s tsd_binshards_t;
-struct tsd_binshards_s {
- uint8_t binshard[SC_NBINS];
-};
-
-#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H
+#define JEMALLOC_INTERNAL_BIN_TYPES_H
+
+#include "jemalloc/internal/sc.h"
+
+#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
+#define N_BIN_SHARDS_DEFAULT 1
+
+/* Used in TSD static initializer only. Real init in arena_bind(). */
+#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
+
+typedef struct tsd_binshards_s tsd_binshards_t;
+struct tsd_binshards_s {
+ uint8_t binshard[SC_NBINS];
+};
+
+#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/bit_util.h b/contrib/libs/jemalloc/include/jemalloc/internal/bit_util.h
index c045eb8687..29df995310 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/bit_util.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/bit_util.h
@@ -1,239 +1,239 @@
-#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
-#define JEMALLOC_INTERNAL_BIT_UTIL_H
-
-#include "jemalloc/internal/assert.h"
-
-#define BIT_UTIL_INLINE static inline
-
-/* Sanity check. */
-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
- || !defined(JEMALLOC_INTERNAL_FFS)
-# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
-#endif
-
-
-BIT_UTIL_INLINE unsigned
-ffs_llu(unsigned long long bitmap) {
- return JEMALLOC_INTERNAL_FFSLL(bitmap);
-}
-
-BIT_UTIL_INLINE unsigned
-ffs_lu(unsigned long bitmap) {
- return JEMALLOC_INTERNAL_FFSL(bitmap);
-}
-
-BIT_UTIL_INLINE unsigned
-ffs_u(unsigned bitmap) {
- return JEMALLOC_INTERNAL_FFS(bitmap);
-}
-
-#ifdef JEMALLOC_INTERNAL_POPCOUNTL
-BIT_UTIL_INLINE unsigned
-popcount_lu(unsigned long bitmap) {
- return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
-}
-#endif
-
-/*
- * Clears first unset bit in bitmap, and returns
- * place of bit. bitmap *must not* be 0.
- */
-
-BIT_UTIL_INLINE size_t
-cfs_lu(unsigned long* bitmap) {
- size_t bit = ffs_lu(*bitmap) - 1;
- *bitmap ^= ZU(1) << bit;
- return bit;
-}
-
-BIT_UTIL_INLINE unsigned
-ffs_zu(size_t bitmap) {
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return ffs_u(bitmap);
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return ffs_lu(bitmap);
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return ffs_llu(bitmap);
-#else
-#error No implementation for size_t ffs()
-#endif
-}
-
-BIT_UTIL_INLINE unsigned
-ffs_u64(uint64_t bitmap) {
-#if LG_SIZEOF_LONG == 3
- return ffs_lu(bitmap);
-#elif LG_SIZEOF_LONG_LONG == 3
- return ffs_llu(bitmap);
-#else
-#error No implementation for 64-bit ffs()
-#endif
-}
-
-BIT_UTIL_INLINE unsigned
-ffs_u32(uint32_t bitmap) {
-#if LG_SIZEOF_INT == 2
- return ffs_u(bitmap);
-#else
-#error No implementation for 32-bit ffs()
-#endif
- return ffs_u(bitmap);
-}
-
-BIT_UTIL_INLINE uint64_t
-pow2_ceil_u64(uint64_t x) {
-#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
- if(unlikely(x <= 1)) {
- return x;
- }
- size_t msb_on_index;
-#if (defined(__amd64__) || defined(__x86_64__))
- asm ("bsrq %1, %0"
- : "=r"(msb_on_index) // Outputs.
- : "r"(x-1) // Inputs.
- );
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
- msb_on_index = (63 ^ __builtin_clzll(x - 1));
-#endif
- assert(msb_on_index < 63);
- return 1ULL << (msb_on_index + 1);
-#else
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x |= x >> 32;
- x++;
- return x;
-#endif
-}
-
-BIT_UTIL_INLINE uint32_t
-pow2_ceil_u32(uint32_t x) {
-#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
- if(unlikely(x <= 1)) {
- return x;
- }
- size_t msb_on_index;
-#if (defined(__i386__))
- asm ("bsr %1, %0"
- : "=r"(msb_on_index) // Outputs.
- : "r"(x-1) // Inputs.
- );
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
- msb_on_index = (31 ^ __builtin_clz(x - 1));
-#endif
- assert(msb_on_index < 31);
- return 1U << (msb_on_index + 1);
-#else
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
- return x;
-#endif
-}
-
-/* Compute the smallest power of 2 that is >= x. */
-BIT_UTIL_INLINE size_t
-pow2_ceil_zu(size_t x) {
-#if (LG_SIZEOF_PTR == 3)
- return pow2_ceil_u64(x);
-#else
- return pow2_ceil_u32(x);
-#endif
-}
-
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- size_t ret;
- assert(x != 0);
-
- asm ("bsr %1, %0"
- : "=r"(ret) // Outputs.
- : "r"(x) // Inputs.
- );
- assert(ret < UINT_MAX);
- return (unsigned)ret;
-}
-#elif (defined(_MSC_VER))
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- unsigned long ret;
-
- assert(x != 0);
-
-#if (LG_SIZEOF_PTR == 3)
- _BitScanReverse64(&ret, x);
-#elif (LG_SIZEOF_PTR == 2)
- _BitScanReverse(&ret, x);
-#else
-# error "Unsupported type size for lg_floor()"
-#endif
- assert(ret < UINT_MAX);
- return (unsigned)ret;
-}
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- assert(x != 0);
-
-#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
- return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
-#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
- return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
-#else
-# error "Unsupported type size for lg_floor()"
-#endif
-}
-#else
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- assert(x != 0);
-
- x |= (x >> 1);
- x |= (x >> 2);
- x |= (x >> 4);
- x |= (x >> 8);
- x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3)
- x |= (x >> 32);
-#endif
- if (x == SIZE_T_MAX) {
- return (8 << LG_SIZEOF_PTR) - 1;
- }
- x++;
- return ffs_zu(x) - 2;
-}
-#endif
-
-BIT_UTIL_INLINE unsigned
-lg_ceil(size_t x) {
- return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
-}
-
-#undef BIT_UTIL_INLINE
-
-/* A compile-time version of lg_floor and lg_ceil. */
-#define LG_FLOOR_1(x) 0
-#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
-#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
-#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
-#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
-#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
-#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
-#if LG_SIZEOF_PTR == 2
-# define LG_FLOOR(x) LG_FLOOR_32((x))
-#else
-# define LG_FLOOR(x) LG_FLOOR_64((x))
-#endif
-
-#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))
-
-#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
+#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
+#define JEMALLOC_INTERNAL_BIT_UTIL_H
+
+#include "jemalloc/internal/assert.h"
+
+#define BIT_UTIL_INLINE static inline
+
+/* Sanity check. */
+#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
+ || !defined(JEMALLOC_INTERNAL_FFS)
+# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
+#endif
+
+
+BIT_UTIL_INLINE unsigned
+ffs_llu(unsigned long long bitmap) {
+ return JEMALLOC_INTERNAL_FFSLL(bitmap);
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_lu(unsigned long bitmap) {
+ return JEMALLOC_INTERNAL_FFSL(bitmap);
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_u(unsigned bitmap) {
+ return JEMALLOC_INTERNAL_FFS(bitmap);
+}
+
+#ifdef JEMALLOC_INTERNAL_POPCOUNTL
+BIT_UTIL_INLINE unsigned
+popcount_lu(unsigned long bitmap) {
+ return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
+}
+#endif
+
+/*
+ * Clears first unset bit in bitmap, and returns
+ * place of bit. bitmap *must not* be 0.
+ */
+
+BIT_UTIL_INLINE size_t
+cfs_lu(unsigned long* bitmap) {
+ size_t bit = ffs_lu(*bitmap) - 1;
+ *bitmap ^= ZU(1) << bit;
+ return bit;
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_zu(size_t bitmap) {
+#if LG_SIZEOF_PTR == LG_SIZEOF_INT
+ return ffs_u(bitmap);
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
+ return ffs_lu(bitmap);
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
+ return ffs_llu(bitmap);
+#else
+#error No implementation for size_t ffs()
+#endif
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_u64(uint64_t bitmap) {
+#if LG_SIZEOF_LONG == 3
+ return ffs_lu(bitmap);
+#elif LG_SIZEOF_LONG_LONG == 3
+ return ffs_llu(bitmap);
+#else
+#error No implementation for 64-bit ffs()
+#endif
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_u32(uint32_t bitmap) {
+#if LG_SIZEOF_INT == 2
+ return ffs_u(bitmap);
+#else
+#error No implementation for 32-bit ffs()
+#endif
+ return ffs_u(bitmap);
+}
+
+BIT_UTIL_INLINE uint64_t
+pow2_ceil_u64(uint64_t x) {
+#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+ if(unlikely(x <= 1)) {
+ return x;
+ }
+ size_t msb_on_index;
+#if (defined(__amd64__) || defined(__x86_64__))
+ asm ("bsrq %1, %0"
+ : "=r"(msb_on_index) // Outputs.
+ : "r"(x-1) // Inputs.
+ );
+#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+ msb_on_index = (63 ^ __builtin_clzll(x - 1));
+#endif
+ assert(msb_on_index < 63);
+ return 1ULL << (msb_on_index + 1);
+#else
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x |= x >> 32;
+ x++;
+ return x;
+#endif
+}
+
+BIT_UTIL_INLINE uint32_t
+pow2_ceil_u32(uint32_t x) {
+#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
+ if(unlikely(x <= 1)) {
+ return x;
+ }
+ size_t msb_on_index;
+#if (defined(__i386__))
+ asm ("bsr %1, %0"
+ : "=r"(msb_on_index) // Outputs.
+ : "r"(x-1) // Inputs.
+ );
+#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+ msb_on_index = (31 ^ __builtin_clz(x - 1));
+#endif
+ assert(msb_on_index < 31);
+ return 1U << (msb_on_index + 1);
+#else
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+ return x;
+#endif
+}
+
+/* Compute the smallest power of 2 that is >= x. */
+BIT_UTIL_INLINE size_t
+pow2_ceil_zu(size_t x) {
+#if (LG_SIZEOF_PTR == 3)
+ return pow2_ceil_u64(x);
+#else
+ return pow2_ceil_u32(x);
+#endif
+}
+
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ size_t ret;
+ assert(x != 0);
+
+ asm ("bsr %1, %0"
+ : "=r"(ret) // Outputs.
+ : "r"(x) // Inputs.
+ );
+ assert(ret < UINT_MAX);
+ return (unsigned)ret;
+}
+#elif (defined(_MSC_VER))
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ unsigned long ret;
+
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == 3)
+ _BitScanReverse64(&ret, x);
+#elif (LG_SIZEOF_PTR == 2)
+ _BitScanReverse(&ret, x);
+#else
+# error "Unsupported type size for lg_floor()"
+#endif
+ assert(ret < UINT_MAX);
+ return (unsigned)ret;
+}
+#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
+ return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
+#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
+ return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
+#else
+# error "Unsupported type size for lg_floor()"
+#endif
+}
+#else
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ assert(x != 0);
+
+ x |= (x >> 1);
+ x |= (x >> 2);
+ x |= (x >> 4);
+ x |= (x >> 8);
+ x |= (x >> 16);
+#if (LG_SIZEOF_PTR == 3)
+ x |= (x >> 32);
+#endif
+ if (x == SIZE_T_MAX) {
+ return (8 << LG_SIZEOF_PTR) - 1;
+ }
+ x++;
+ return ffs_zu(x) - 2;
+}
+#endif
+
+BIT_UTIL_INLINE unsigned
+lg_ceil(size_t x) {
+ return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
+}
+
+#undef BIT_UTIL_INLINE
+
+/* A compile-time version of lg_floor and lg_ceil. */
+#define LG_FLOOR_1(x) 0
+#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
+#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
+#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
+#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
+#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
+#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
+#if LG_SIZEOF_PTR == 2
+# define LG_FLOOR(x) LG_FLOOR_32((x))
+#else
+# define LG_FLOOR(x) LG_FLOOR_64((x))
+#endif
+
+#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))
+
+#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h b/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h
index c3f9cb490f..d826a72d54 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h
@@ -1,159 +1,159 @@
-#ifndef JEMALLOC_INTERNAL_BITMAP_H
-#define JEMALLOC_INTERNAL_BITMAP_H
+#ifndef JEMALLOC_INTERNAL_BITMAP_H
+#define JEMALLOC_INTERNAL_BITMAP_H
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/arena_types.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/sc.h"
typedef unsigned long bitmap_t;
-#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
-
-/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
-#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
-/* Maximum bitmap bit count is determined by maximum regions per slab. */
-# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
-#else
-/* Maximum bitmap bit count is determined by number of extent size classes. */
-# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
-#endif
-#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
-
+#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
+
+/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
+#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
+/* Maximum bitmap bit count is determined by maximum regions per slab. */
+# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
+#else
+/* Maximum bitmap bit count is determined by number of extent size classes. */
+# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
+#endif
+#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
+
/* Number of bits per group. */
-#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
-#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
-#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
-
-/*
- * Do some analysis on how big the bitmap is before we use a tree. For a brute
- * force linear search, if we would have to call ffs_lu() more than 2^3 times,
- * use a tree instead.
- */
-#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
-# define BITMAP_USE_TREE
-#endif
-
-/* Number of groups required to store a given number of bits. */
-#define BITMAP_BITS2GROUPS(nbits) \
- (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
-
-/*
- * Number of groups required at a particular level for a given number of bits.
- */
-#define BITMAP_GROUPS_L0(nbits) \
- BITMAP_BITS2GROUPS(nbits)
-#define BITMAP_GROUPS_L1(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
-#define BITMAP_GROUPS_L2(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
-#define BITMAP_GROUPS_L3(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
- BITMAP_BITS2GROUPS((nbits)))))
-#define BITMAP_GROUPS_L4(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
-
-/*
- * Assuming the number of levels, number of groups required for a given number
- * of bits.
- */
-#define BITMAP_GROUPS_1_LEVEL(nbits) \
- BITMAP_GROUPS_L0(nbits)
-#define BITMAP_GROUPS_2_LEVEL(nbits) \
- (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
-#define BITMAP_GROUPS_3_LEVEL(nbits) \
- (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
-#define BITMAP_GROUPS_4_LEVEL(nbits) \
- (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
-#define BITMAP_GROUPS_5_LEVEL(nbits) \
- (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
-
-/*
- * Maximum number of groups required to support LG_BITMAP_MAXBITS.
- */
-#ifdef BITMAP_USE_TREE
-
-#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
-#else
-# error "Unsupported bitmap size"
-#endif
-
-/*
- * Maximum number of levels possible. This could be statically computed based
- * on LG_BITMAP_MAXBITS:
- *
- * #define BITMAP_MAX_LEVELS \
- * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
- * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
- *
- * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
- * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
- * various cascading macros. The only additional cost this incurs is some
- * unused trailing entries in bitmap_info_t structures; the bitmaps themselves
- * are not impacted.
- */
-#define BITMAP_MAX_LEVELS 5
-
-#define BITMAP_INFO_INITIALIZER(nbits) { \
- /* nbits. */ \
- nbits, \
- /* nlevels. */ \
- (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
- (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
- (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
- (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
- /* levels. */ \
- { \
- {0}, \
- {BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
- BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
- BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
- BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
- + BITMAP_GROUPS_L0(nbits)} \
- } \
-}
-
-#else /* BITMAP_USE_TREE */
-
-#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
-#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
-
-#define BITMAP_INFO_INITIALIZER(nbits) { \
- /* nbits. */ \
- nbits, \
- /* ngroups. */ \
- BITMAP_BITS2GROUPS(nbits) \
-}
-
-#endif /* BITMAP_USE_TREE */
-
-typedef struct bitmap_level_s {
+#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
+#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
+#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
+
+/*
+ * Do some analysis on how big the bitmap is before we use a tree. For a brute
+ * force linear search, if we would have to call ffs_lu() more than 2^3 times,
+ * use a tree instead.
+ */
+#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
+# define BITMAP_USE_TREE
+#endif
+
+/* Number of groups required to store a given number of bits. */
+#define BITMAP_BITS2GROUPS(nbits) \
+ (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
+
+/*
+ * Number of groups required at a particular level for a given number of bits.
+ */
+#define BITMAP_GROUPS_L0(nbits) \
+ BITMAP_BITS2GROUPS(nbits)
+#define BITMAP_GROUPS_L1(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
+#define BITMAP_GROUPS_L2(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
+#define BITMAP_GROUPS_L3(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
+ BITMAP_BITS2GROUPS((nbits)))))
+#define BITMAP_GROUPS_L4(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
+
+/*
+ * Assuming the number of levels, number of groups required for a given number
+ * of bits.
+ */
+#define BITMAP_GROUPS_1_LEVEL(nbits) \
+ BITMAP_GROUPS_L0(nbits)
+#define BITMAP_GROUPS_2_LEVEL(nbits) \
+ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
+#define BITMAP_GROUPS_3_LEVEL(nbits) \
+ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
+#define BITMAP_GROUPS_4_LEVEL(nbits) \
+ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
+#define BITMAP_GROUPS_5_LEVEL(nbits) \
+ (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
+
+/*
+ * Maximum number of groups required to support LG_BITMAP_MAXBITS.
+ */
+#ifdef BITMAP_USE_TREE
+
+#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
+#else
+# error "Unsupported bitmap size"
+#endif
+
+/*
+ * Maximum number of levels possible. This could be statically computed based
+ * on LG_BITMAP_MAXBITS:
+ *
+ * #define BITMAP_MAX_LEVELS \
+ * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
+ *
+ * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
+ * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
+ * various cascading macros. The only additional cost this incurs is some
+ * unused trailing entries in bitmap_info_t structures; the bitmaps themselves
+ * are not impacted.
+ */
+#define BITMAP_MAX_LEVELS 5
+
+#define BITMAP_INFO_INITIALIZER(nbits) { \
+ /* nbits. */ \
+ nbits, \
+ /* nlevels. */ \
+ (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
+ (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
+ (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
+ (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
+ /* levels. */ \
+ { \
+ {0}, \
+ {BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
+ BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
+ BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
+ BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
+ + BITMAP_GROUPS_L0(nbits)} \
+ } \
+}
+
+#else /* BITMAP_USE_TREE */
+
+#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
+#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
+
+#define BITMAP_INFO_INITIALIZER(nbits) { \
+ /* nbits. */ \
+ nbits, \
+ /* ngroups. */ \
+ BITMAP_BITS2GROUPS(nbits) \
+}
+
+#endif /* BITMAP_USE_TREE */
+
+typedef struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
-} bitmap_level_t;
+} bitmap_level_t;
-typedef struct bitmap_info_s {
+typedef struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
-#ifdef BITMAP_USE_TREE
+#ifdef BITMAP_USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
@@ -162,62 +162,62 @@ typedef struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
-#else /* BITMAP_USE_TREE */
- /* Number of groups necessary for nbits. */
- size_t ngroups;
-#endif /* BITMAP_USE_TREE */
-} bitmap_info_t;
-
-void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
-void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
-size_t bitmap_size(const bitmap_info_t *binfo);
-
-static inline bool
-bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
-#ifdef BITMAP_USE_TREE
- size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+#else /* BITMAP_USE_TREE */
+ /* Number of groups necessary for nbits. */
+ size_t ngroups;
+#endif /* BITMAP_USE_TREE */
+} bitmap_info_t;
+
+void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
+void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
+size_t bitmap_size(const bitmap_info_t *binfo);
+
+static inline bool
+bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
+#ifdef BITMAP_USE_TREE
+ size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
-#else
- size_t i;
-
- for (i = 0; i < binfo->ngroups; i++) {
- if (bitmap[i] != 0) {
- return false;
- }
- }
- return true;
-#endif
+#else
+ size_t i;
+
+ for (i = 0; i < binfo->ngroups; i++) {
+ if (bitmap[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+#endif
}
-static inline bool
-bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
+static inline bool
+bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
- return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+ return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
}
-static inline void
-bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
+static inline void
+bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
- assert(!bitmap_get(bitmap, binfo, bit));
+ assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
-#ifdef BITMAP_USE_TREE
+#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
@@ -226,113 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (g != 0) {
+ if (g != 0) {
break;
- }
+ }
}
}
-#endif
-}
-
-/* ffu: find first unset >= bit. */
-static inline size_t
-bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- assert(min_bit < binfo->nbits);
-
-#ifdef BITMAP_USE_TREE
- size_t bit = 0;
- for (unsigned level = binfo->nlevels; level--;) {
- size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
- 1));
- bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
- >> lg_bits_per_group)];
- unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
- bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
- assert(group_nmask <= BITMAP_GROUP_NBITS);
- bitmap_t group_mask = ~((1LU << group_nmask) - 1);
- bitmap_t group_masked = group & group_mask;
- if (group_masked == 0LU) {
- if (group == 0LU) {
- return binfo->nbits;
- }
- /*
- * min_bit was preceded by one or more unset bits in
- * this group, but there are no other unset bits in this
- * group. Try again starting at the first bit of the
- * next sibling. This will recurse at most once per
- * non-root level.
- */
- size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
- assert(sib_base > min_bit);
- assert(sib_base > bit);
- if (sib_base >= binfo->nbits) {
- return binfo->nbits;
- }
- return bitmap_ffu(bitmap, binfo, sib_base);
- }
- bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
- (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
- }
- assert(bit >= min_bit);
- assert(bit < binfo->nbits);
- return bit;
-#else
- size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
- bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
- - 1);
- size_t bit;
- do {
- bit = ffs_lu(g);
- if (bit != 0) {
- return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
- }
- i++;
- g = bitmap[i];
- } while (i < binfo->ngroups);
- return binfo->nbits;
-#endif
+#endif
}
+/* ffu: find first unset >= bit. */
+static inline size_t
+bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
+ assert(min_bit < binfo->nbits);
+
+#ifdef BITMAP_USE_TREE
+ size_t bit = 0;
+ for (unsigned level = binfo->nlevels; level--;) {
+ size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
+ 1));
+ bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
+ >> lg_bits_per_group)];
+ unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
+ bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
+ assert(group_nmask <= BITMAP_GROUP_NBITS);
+ bitmap_t group_mask = ~((1LU << group_nmask) - 1);
+ bitmap_t group_masked = group & group_mask;
+ if (group_masked == 0LU) {
+ if (group == 0LU) {
+ return binfo->nbits;
+ }
+ /*
+ * min_bit was preceded by one or more unset bits in
+ * this group, but there are no other unset bits in this
+ * group. Try again starting at the first bit of the
+ * next sibling. This will recurse at most once per
+ * non-root level.
+ */
+ size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
+ assert(sib_base > min_bit);
+ assert(sib_base > bit);
+ if (sib_base >= binfo->nbits) {
+ return binfo->nbits;
+ }
+ return bitmap_ffu(bitmap, binfo, sib_base);
+ }
+ bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
+ (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
+ }
+ assert(bit >= min_bit);
+ assert(bit < binfo->nbits);
+ return bit;
+#else
+ size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
+ bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
+ - 1);
+ size_t bit;
+ do {
+ bit = ffs_lu(g);
+ if (bit != 0) {
+ return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
+ }
+ i++;
+ g = bitmap[i];
+ } while (i < binfo->ngroups);
+ return binfo->nbits;
+#endif
+}
+
/* sfu: set first unset. */
-static inline size_t
-bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
+static inline size_t
+bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t bit;
bitmap_t g;
unsigned i;
- assert(!bitmap_full(bitmap, binfo));
+ assert(!bitmap_full(bitmap, binfo));
-#ifdef BITMAP_USE_TREE
+#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
- bit = ffs_lu(g) - 1;
+ bit = ffs_lu(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
- }
-#else
- i = 0;
- g = bitmap[0];
- while ((bit = ffs_lu(g)) == 0) {
- i++;
- g = bitmap[i];
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
}
- bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
-#endif
+#else
+ i = 0;
+ g = bitmap[0];
+ while ((bit = ffs_lu(g)) == 0) {
+ i++;
+ g = bitmap[i];
+ }
+ bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
+#endif
bitmap_set(bitmap, binfo, bit);
- return bit;
+ return bit;
}
-static inline void
-bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
+static inline void
+bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
- UNUSED bool propagate;
+ UNUSED bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
@@ -340,11 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- assert(!bitmap_get(bitmap, binfo, bit));
-#ifdef BITMAP_USE_TREE
+ assert(!bitmap_get(bitmap, binfo, bit));
+#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@@ -354,16 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
+ assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (!propagate) {
+ if (!propagate) {
break;
- }
+ }
}
}
-#endif /* BITMAP_USE_TREE */
+#endif /* BITMAP_USE_TREE */
}
-#endif /* JEMALLOC_INTERNAL_BITMAP_H */
+#endif /* JEMALLOC_INTERNAL_BITMAP_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/cache_bin.h b/contrib/libs/jemalloc/include/jemalloc/internal/cache_bin.h
index d14556a3da..12a5f3c19a 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/cache_bin.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/cache_bin.h
@@ -1,131 +1,131 @@
-#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
-#define JEMALLOC_INTERNAL_CACHE_BIN_H
-
-#include "jemalloc/internal/ql.h"
-
-/*
- * The cache_bins are the mechanism that the tcache and the arena use to
- * communicate. The tcache fills from and flushes to the arena by passing a
- * cache_bin_t to fill/flush. When the arena needs to pull stats from the
- * tcaches associated with it, it does so by iterating over its
- * cache_bin_array_descriptor_t objects and reading out per-bin stats it
- * contains. This makes it so that the arena need not know about the existence
- * of the tcache at all.
- */
-
-
-/*
- * The count of the number of cached allocations in a bin. We make this signed
- * so that negative numbers can encode "invalid" states (e.g. a low water mark
- * of -1 for a cache that has been depleted).
- */
-typedef int32_t cache_bin_sz_t;
-
-typedef struct cache_bin_stats_s cache_bin_stats_t;
-struct cache_bin_stats_s {
- /*
- * Number of allocation requests that corresponded to the size of this
- * bin.
- */
- uint64_t nrequests;
-};
-
-/*
- * Read-only information associated with each element of tcache_t's tbins array
- * is stored separately, mainly to reduce memory usage.
- */
-typedef struct cache_bin_info_s cache_bin_info_t;
-struct cache_bin_info_s {
- /* Upper limit on ncached. */
- cache_bin_sz_t ncached_max;
-};
-
-typedef struct cache_bin_s cache_bin_t;
-struct cache_bin_s {
- /* Min # cached since last GC. */
- cache_bin_sz_t low_water;
- /* # of cached objects. */
- cache_bin_sz_t ncached;
- /*
- * ncached and stats are both modified frequently. Let's keep them
- * close so that they have a higher chance of being on the same
- * cacheline, thus less write-backs.
- */
- cache_bin_stats_t tstats;
- /*
- * Stack of available objects.
- *
- * To make use of adjacent cacheline prefetch, the items in the avail
- * stack goes to higher address for newer allocations. avail points
- * just above the available space, which means that
- * avail[-ncached, ... -1] are available items and the lowest item will
- * be allocated first.
- */
- void **avail;
-};
-
-typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
-struct cache_bin_array_descriptor_s {
- /*
- * The arena keeps a list of the cache bins associated with it, for
- * stats collection.
- */
- ql_elm(cache_bin_array_descriptor_t) link;
- /* Pointers to the tcache bins. */
- cache_bin_t *bins_small;
- cache_bin_t *bins_large;
-};
-
-static inline void
-cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
- cache_bin_t *bins_small, cache_bin_t *bins_large) {
- ql_elm_new(descriptor, link);
- descriptor->bins_small = bins_small;
- descriptor->bins_large = bins_large;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
- void *ret;
-
- bin->ncached--;
-
- /*
- * Check for both bin->ncached == 0 and ncached < low_water
- * in a single branch.
- */
- if (unlikely(bin->ncached <= bin->low_water)) {
- bin->low_water = bin->ncached;
- if (bin->ncached == -1) {
- bin->ncached = 0;
- *success = false;
- return NULL;
- }
- }
-
- /*
- * success (instead of ret) should be checked upon the return of this
- * function. We avoid checking (ret == NULL) because there is never a
- * null stored on the avail stack (which is unknown to the compiler),
- * and eagerly checking ret would cause pipeline stall (waiting for the
- * cacheline).
- */
- *success = true;
- ret = *(bin->avail - (bin->ncached + 1));
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) {
- if (unlikely(bin->ncached == bin_info->ncached_max)) {
- return false;
- }
- assert(bin->ncached < bin_info->ncached_max);
- bin->ncached++;
- *(bin->avail - bin->ncached) = ptr;
-
- return true;
-}
-
-#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
+#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
+#define JEMALLOC_INTERNAL_CACHE_BIN_H
+
+#include "jemalloc/internal/ql.h"
+
+/*
+ * The cache_bins are the mechanism that the tcache and the arena use to
+ * communicate. The tcache fills from and flushes to the arena by passing a
+ * cache_bin_t to fill/flush. When the arena needs to pull stats from the
+ * tcaches associated with it, it does so by iterating over its
+ * cache_bin_array_descriptor_t objects and reading out per-bin stats it
+ * contains. This makes it so that the arena need not know about the existence
+ * of the tcache at all.
+ */
+
+
+/*
+ * The count of the number of cached allocations in a bin. We make this signed
+ * so that negative numbers can encode "invalid" states (e.g. a low water mark
+ * of -1 for a cache that has been depleted).
+ */
+typedef int32_t cache_bin_sz_t;
+
+typedef struct cache_bin_stats_s cache_bin_stats_t;
+struct cache_bin_stats_s {
+ /*
+ * Number of allocation requests that corresponded to the size of this
+ * bin.
+ */
+ uint64_t nrequests;
+};
+
+/*
+ * Read-only information associated with each element of tcache_t's tbins array
+ * is stored separately, mainly to reduce memory usage.
+ */
+typedef struct cache_bin_info_s cache_bin_info_t;
+struct cache_bin_info_s {
+ /* Upper limit on ncached. */
+ cache_bin_sz_t ncached_max;
+};
+
+typedef struct cache_bin_s cache_bin_t;
+struct cache_bin_s {
+ /* Min # cached since last GC. */
+ cache_bin_sz_t low_water;
+ /* # of cached objects. */
+ cache_bin_sz_t ncached;
+ /*
+ * ncached and stats are both modified frequently. Let's keep them
+ * close so that they have a higher chance of being on the same
+ * cacheline, thus less write-backs.
+ */
+ cache_bin_stats_t tstats;
+ /*
+ * Stack of available objects.
+ *
+ * To make use of adjacent cacheline prefetch, the items in the avail
+ * stack goes to higher address for newer allocations. avail points
+ * just above the available space, which means that
+ * avail[-ncached, ... -1] are available items and the lowest item will
+ * be allocated first.
+ */
+ void **avail;
+};
+
+typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
+struct cache_bin_array_descriptor_s {
+ /*
+ * The arena keeps a list of the cache bins associated with it, for
+ * stats collection.
+ */
+ ql_elm(cache_bin_array_descriptor_t) link;
+ /* Pointers to the tcache bins. */
+ cache_bin_t *bins_small;
+ cache_bin_t *bins_large;
+};
+
+static inline void
+cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
+ cache_bin_t *bins_small, cache_bin_t *bins_large) {
+ ql_elm_new(descriptor, link);
+ descriptor->bins_small = bins_small;
+ descriptor->bins_large = bins_large;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
+ void *ret;
+
+ bin->ncached--;
+
+ /*
+ * Check for both bin->ncached == 0 and ncached < low_water
+ * in a single branch.
+ */
+ if (unlikely(bin->ncached <= bin->low_water)) {
+ bin->low_water = bin->ncached;
+ if (bin->ncached == -1) {
+ bin->ncached = 0;
+ *success = false;
+ return NULL;
+ }
+ }
+
+ /*
+ * success (instead of ret) should be checked upon the return of this
+ * function. We avoid checking (ret == NULL) because there is never a
+ * null stored on the avail stack (which is unknown to the compiler),
+ * and eagerly checking ret would cause pipeline stall (waiting for the
+ * cacheline).
+ */
+ *success = true;
+ ret = *(bin->avail - (bin->ncached + 1));
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) {
+ if (unlikely(bin->ncached == bin_info->ncached_max)) {
+ return false;
+ }
+ assert(bin->ncached < bin_info->ncached_max);
+ bin->ncached++;
+ *(bin->avail - bin->ncached) = ptr;
+
+ return true;
+}
+
+#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h b/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h
index 7b3850bc16..5e01567b4b 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h
@@ -1,101 +1,101 @@
-#ifndef JEMALLOC_INTERNAL_CKH_H
-#define JEMALLOC_INTERNAL_CKH_H
+#ifndef JEMALLOC_INTERNAL_CKH_H
+#define JEMALLOC_INTERNAL_CKH_H
-#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/tsd.h"
-/* Cuckoo hashing implementation. Skip to the end for the interface. */
-
-/******************************************************************************/
-/* INTERNAL DEFINITIONS -- IGNORE */
-/******************************************************************************/
+/* Cuckoo hashing implementation. Skip to the end for the interface. */
+/******************************************************************************/
+/* INTERNAL DEFINITIONS -- IGNORE */
+/******************************************************************************/
+
/* Maintain counters used to get an idea of performance. */
-/* #define CKH_COUNT */
+/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
-/* #define CKH_VERBOSE */
+/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
-/* Typedefs to allow easy function pointer passing. */
-typedef void ckh_hash_t (const void *, size_t[2]);
-typedef bool ckh_keycomp_t (const void *, const void *);
+/* Typedefs to allow easy function pointer passing. */
+typedef void ckh_hash_t (const void *, size_t[2]);
+typedef bool ckh_keycomp_t (const void *, const void *);
/* Hash table cell. */
-typedef struct {
- const void *key;
- const void *data;
-} ckhc_t;
+typedef struct {
+ const void *key;
+ const void *data;
+} ckhc_t;
-/* The hash table itself. */
-typedef struct {
+/* The hash table itself. */
+typedef struct {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
- uint64_t ngrows;
- uint64_t nshrinks;
- uint64_t nshrinkfails;
- uint64_t ninserts;
- uint64_t nrelocs;
+ uint64_t ngrows;
+ uint64_t nshrinks;
+ uint64_t nshrinkfails;
+ uint64_t ninserts;
+ uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
- uint64_t prng_state;
+ uint64_t prng_state;
/* Total number of items. */
- size_t count;
+ size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
- unsigned lg_minbuckets;
- unsigned lg_curbuckets;
+ unsigned lg_minbuckets;
+ unsigned lg_curbuckets;
/* Hash and comparison functions. */
- ckh_hash_t *hash;
- ckh_keycomp_t *keycomp;
+ ckh_hash_t *hash;
+ ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
- ckhc_t *tab;
-} ckh_t;
+ ckhc_t *tab;
+} ckh_t;
/******************************************************************************/
-/* BEGIN PUBLIC API */
-/******************************************************************************/
+/* BEGIN PUBLIC API */
+/******************************************************************************/
-/* Lifetime management. Minitems is the initial capacity. */
-bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+/* Lifetime management. Minitems is the initial capacity. */
+bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
-void ckh_delete(tsd_t *tsd, ckh_t *ckh);
-
-/* Get the number of elements in the set. */
-size_t ckh_count(ckh_t *ckh);
-
-/*
- * To iterate over the elements in the table, initialize *tabind to 0 and call
- * this function until it returns true. Each call that returns false will
- * update *key and *data to the next element in the table, assuming the pointers
- * are non-NULL.
- */
-bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-
-/*
- * Basic hash table operations -- insert, removal, lookup. For ckh_remove and
- * ckh_search, key or data can be NULL. The hash-table only stores pointers to
- * the key and value, and doesn't do any lifetime management.
- */
-bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
-bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+void ckh_delete(tsd_t *tsd, ckh_t *ckh);
+
+/* Get the number of elements in the set. */
+size_t ckh_count(ckh_t *ckh);
+
+/*
+ * To iterate over the elements in the table, initialize *tabind to 0 and call
+ * this function until it returns true. Each call that returns false will
+ * update *key and *data to the next element in the table, assuming the pointers
+ * are non-NULL.
+ */
+bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
+
+/*
+ * Basic hash table operations -- insert, removal, lookup. For ckh_remove and
+ * ckh_search, key or data can be NULL. The hash-table only stores pointers to
+ * the key and value, and doesn't do any lifetime management.
+ */
+bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
+bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
-bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
+bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
-/* Some useful hash and comparison functions for strings and pointers. */
-void ckh_string_hash(const void *key, size_t r_hash[2]);
-bool ckh_string_keycomp(const void *k1, const void *k2);
-void ckh_pointer_hash(const void *key, size_t r_hash[2]);
-bool ckh_pointer_keycomp(const void *k1, const void *k2);
+/* Some useful hash and comparison functions for strings and pointers. */
+void ckh_string_hash(const void *key, size_t r_hash[2]);
+bool ckh_string_keycomp(const void *k1, const void *k2);
+void ckh_pointer_hash(const void *key, size_t r_hash[2]);
+bool ckh_pointer_keycomp(const void *k1, const void *k2);
-#endif /* JEMALLOC_INTERNAL_CKH_H */
+#endif /* JEMALLOC_INTERNAL_CKH_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h b/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h
index 1d1aacc6f4..febb28fe24 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h
@@ -1,110 +1,110 @@
-#ifndef JEMALLOC_INTERNAL_CTL_H
-#define JEMALLOC_INTERNAL_CTL_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/mutex_prof.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/stats.h"
-
-/* Maximum ctl tree depth. */
-#define CTL_MAX_DEPTH 7
-
-typedef struct ctl_node_s {
- bool named;
-} ctl_node_t;
-
-typedef struct ctl_named_node_s {
- ctl_node_t node;
- const char *name;
+#ifndef JEMALLOC_INTERNAL_CTL_H
+#define JEMALLOC_INTERNAL_CTL_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/stats.h"
+
+/* Maximum ctl tree depth. */
+#define CTL_MAX_DEPTH 7
+
+typedef struct ctl_node_s {
+ bool named;
+} ctl_node_t;
+
+typedef struct ctl_named_node_s {
+ ctl_node_t node;
+ const char *name;
/* If (nchildren == 0), this is a terminal node. */
- size_t nchildren;
- const ctl_node_t *children;
- int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
- size_t);
-} ctl_named_node_t;
+ size_t nchildren;
+ const ctl_node_t *children;
+ int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
+ size_t);
+} ctl_named_node_t;
-typedef struct ctl_indexed_node_s {
- struct ctl_node_s node;
- const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
- size_t);
-} ctl_indexed_node_t;
+typedef struct ctl_indexed_node_s {
+ struct ctl_node_s node;
+ const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
+ size_t);
+} ctl_indexed_node_t;
-typedef struct ctl_arena_stats_s {
- arena_stats_t astats;
+typedef struct ctl_arena_stats_s {
+ arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
- size_t allocated_small;
- uint64_t nmalloc_small;
- uint64_t ndalloc_small;
- uint64_t nrequests_small;
- uint64_t nfills_small;
- uint64_t nflushes_small;
-
- bin_stats_t bstats[SC_NBINS];
- arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
- arena_stats_extents_t estats[SC_NPSIZES];
-} ctl_arena_stats_t;
-
-typedef struct ctl_stats_s {
- size_t allocated;
- size_t active;
- size_t metadata;
- size_t metadata_thp;
- size_t resident;
- size_t mapped;
- size_t retained;
-
- background_thread_stats_t background_thread;
- mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
-} ctl_stats_t;
-
-typedef struct ctl_arena_s ctl_arena_t;
-struct ctl_arena_s {
- unsigned arena_ind;
- bool initialized;
- ql_elm(ctl_arena_t) destroyed_link;
-
- /* Basic stats, supported even if !config_stats. */
- unsigned nthreads;
- const char *dss;
- ssize_t dirty_decay_ms;
- ssize_t muzzy_decay_ms;
- size_t pactive;
- size_t pdirty;
- size_t pmuzzy;
-
- /* NULL if !config_stats. */
- ctl_arena_stats_t *astats;
+ size_t allocated_small;
+ uint64_t nmalloc_small;
+ uint64_t ndalloc_small;
+ uint64_t nrequests_small;
+ uint64_t nfills_small;
+ uint64_t nflushes_small;
+
+ bin_stats_t bstats[SC_NBINS];
+ arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
+ arena_stats_extents_t estats[SC_NPSIZES];
+} ctl_arena_stats_t;
+
+typedef struct ctl_stats_s {
+ size_t allocated;
+ size_t active;
+ size_t metadata;
+ size_t metadata_thp;
+ size_t resident;
+ size_t mapped;
+ size_t retained;
+
+ background_thread_stats_t background_thread;
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
+} ctl_stats_t;
+
+typedef struct ctl_arena_s ctl_arena_t;
+struct ctl_arena_s {
+ unsigned arena_ind;
+ bool initialized;
+ ql_elm(ctl_arena_t) destroyed_link;
+
+ /* Basic stats, supported even if !config_stats. */
+ unsigned nthreads;
+ const char *dss;
+ ssize_t dirty_decay_ms;
+ ssize_t muzzy_decay_ms;
+ size_t pactive;
+ size_t pdirty;
+ size_t pmuzzy;
+
+ /* NULL if !config_stats. */
+ ctl_arena_stats_t *astats;
};
-typedef struct ctl_arenas_s {
- uint64_t epoch;
- unsigned narenas;
- ql_head(ctl_arena_t) destroyed;
-
- /*
- * Element 0 corresponds to merged stats for extant arenas (accessed via
- * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
- * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
- * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
- */
- ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
-} ctl_arenas_t;
-
-int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+typedef struct ctl_arenas_s {
+ uint64_t epoch;
+ unsigned narenas;
+ ql_head(ctl_arena_t) destroyed;
+
+ /*
+ * Element 0 corresponds to merged stats for extant arenas (accessed via
+ * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
+ * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
+ * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
+ */
+ ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
+} ctl_arenas_t;
+
+int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
-int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
-
-int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-bool ctl_boot(void);
-void ctl_prefork(tsdn_t *tsdn);
-void ctl_postfork_parent(tsdn_t *tsdn);
-void ctl_postfork_child(tsdn_t *tsdn);
-
-#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
+int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
+
+int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
+bool ctl_boot(void);
+void ctl_prefork(tsdn_t *tsdn);
+void ctl_postfork_parent(tsdn_t *tsdn);
+void ctl_postfork_child(tsdn_t *tsdn);
+
+#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
@@ -114,7 +114,7 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
-#define xmallctlnametomib(name, mibp, miblenp) do { \
+#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
@@ -122,7 +122,7 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
-#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
+#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
@@ -131,4 +131,4 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
-#endif /* JEMALLOC_INTERNAL_CTL_H */
+#endif /* JEMALLOC_INTERNAL_CTL_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/div.h b/contrib/libs/jemalloc/include/jemalloc/internal/div.h
index aebae9398c..2668c70fa1 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/div.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/div.h
@@ -1,41 +1,41 @@
-#ifndef JEMALLOC_INTERNAL_DIV_H
-#define JEMALLOC_INTERNAL_DIV_H
-
-#include "jemalloc/internal/assert.h"
-
-/*
- * This module does the division that computes the index of a region in a slab,
- * given its offset relative to the base.
- * That is, given a divisor d, an n = i * d (all integers), we'll return i.
- * We do some pre-computation to do this more quickly than a CPU division
- * instruction.
- * We bound n < 2^32, and don't support dividing by one.
- */
-
-typedef struct div_info_s div_info_t;
-struct div_info_s {
- uint32_t magic;
-#ifdef JEMALLOC_DEBUG
- size_t d;
-#endif
-};
-
-void div_init(div_info_t *div_info, size_t divisor);
-
-static inline size_t
-div_compute(div_info_t *div_info, size_t n) {
- assert(n <= (uint32_t)-1);
- /*
- * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
- * the compilers I tried were all smart enough to turn this into the
- * appropriate "get the high 32 bits of the result of a multiply" (e.g.
- * mul; mov edx eax; on x86, umull on arm, etc.).
- */
- size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
-#ifdef JEMALLOC_DEBUG
- assert(i * div_info->d == n);
-#endif
- return i;
-}
-
-#endif /* JEMALLOC_INTERNAL_DIV_H */
+#ifndef JEMALLOC_INTERNAL_DIV_H
+#define JEMALLOC_INTERNAL_DIV_H
+
+#include "jemalloc/internal/assert.h"
+
+/*
+ * This module does the division that computes the index of a region in a slab,
+ * given its offset relative to the base.
+ * That is, given a divisor d, an n = i * d (all integers), we'll return i.
+ * We do some pre-computation to do this more quickly than a CPU division
+ * instruction.
+ * We bound n < 2^32, and don't support dividing by one.
+ */
+
+typedef struct div_info_s div_info_t;
+struct div_info_s {
+ uint32_t magic;
+#ifdef JEMALLOC_DEBUG
+ size_t d;
+#endif
+};
+
+void div_init(div_info_t *div_info, size_t divisor);
+
+static inline size_t
+div_compute(div_info_t *div_info, size_t n) {
+ assert(n <= (uint32_t)-1);
+ /*
+ * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
+ * the compilers I tried were all smart enough to turn this into the
+ * appropriate "get the high 32 bits of the result of a multiply" (e.g.
+ * mul; mov edx eax; on x86, umull on arm, etc.).
+ */
+ size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
+#ifdef JEMALLOC_DEBUG
+ assert(i * div_info->d == n);
+#endif
+ return i;
+}
+
+#endif /* JEMALLOC_INTERNAL_DIV_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/emitter.h b/contrib/libs/jemalloc/include/jemalloc/internal/emitter.h
index 542bc79c36..8629fa5ab0 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/emitter.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/emitter.h
@@ -1,486 +1,486 @@
-#ifndef JEMALLOC_INTERNAL_EMITTER_H
-#define JEMALLOC_INTERNAL_EMITTER_H
-
-#include "jemalloc/internal/ql.h"
-
-typedef enum emitter_output_e emitter_output_t;
-enum emitter_output_e {
- emitter_output_json,
- emitter_output_table
-};
-
-typedef enum emitter_justify_e emitter_justify_t;
-enum emitter_justify_e {
- emitter_justify_left,
- emitter_justify_right,
- /* Not for users; just to pass to internal functions. */
- emitter_justify_none
-};
-
-typedef enum emitter_type_e emitter_type_t;
-enum emitter_type_e {
- emitter_type_bool,
- emitter_type_int,
- emitter_type_unsigned,
- emitter_type_uint32,
- emitter_type_uint64,
- emitter_type_size,
- emitter_type_ssize,
- emitter_type_string,
- /*
- * A title is a column title in a table; it's just a string, but it's
- * not quoted.
- */
- emitter_type_title,
-};
-
-typedef struct emitter_col_s emitter_col_t;
-struct emitter_col_s {
- /* Filled in by the user. */
- emitter_justify_t justify;
- int width;
- emitter_type_t type;
- union {
- bool bool_val;
- int int_val;
- unsigned unsigned_val;
- uint32_t uint32_val;
- uint32_t uint32_t_val;
- uint64_t uint64_val;
- uint64_t uint64_t_val;
- size_t size_val;
- ssize_t ssize_val;
- const char *str_val;
- };
-
- /* Filled in by initialization. */
- ql_elm(emitter_col_t) link;
-};
-
-typedef struct emitter_row_s emitter_row_t;
-struct emitter_row_s {
- ql_head(emitter_col_t) cols;
-};
-
-typedef struct emitter_s emitter_t;
-struct emitter_s {
- emitter_output_t output;
- /* The output information. */
- void (*write_cb)(void *, const char *);
- void *cbopaque;
- int nesting_depth;
- /* True if we've already emitted a value at the given depth. */
- bool item_at_depth;
- /* True if we emitted a key and will emit corresponding value next. */
- bool emitted_key;
-};
-
-/* Internal convenience function. Write to the emitter the given string. */
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static inline void
-emitter_printf(emitter_t *emitter, const char *format, ...) {
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
- va_end(ap);
-}
-
-static inline const char * JEMALLOC_FORMAT_ARG(3)
-emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
- emitter_justify_t justify, int width) {
- size_t written;
- fmt_specifier++;
- if (justify == emitter_justify_none) {
- written = malloc_snprintf(out_fmt, out_size,
- "%%%s", fmt_specifier);
- } else if (justify == emitter_justify_left) {
- written = malloc_snprintf(out_fmt, out_size,
- "%%-%d%s", width, fmt_specifier);
- } else {
- written = malloc_snprintf(out_fmt, out_size,
- "%%%d%s", width, fmt_specifier);
- }
- /* Only happens in case of bad format string, which *we* choose. */
- assert(written < out_size);
- return out_fmt;
-}
-
-/*
- * Internal. Emit the given value type in the relevant encoding (so that the
- * bool true gets mapped to json "true", but the string "true" gets mapped to
- * json "\"true\"", for instance.
- *
- * Width is ignored if justify is emitter_justify_none.
- */
-static inline void
-emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
- emitter_type_t value_type, const void *value) {
- size_t str_written;
-#define BUF_SIZE 256
-#define FMT_SIZE 10
- /*
- * We dynamically generate a format string to emit, to let us use the
- * snprintf machinery. This is kinda hacky, but gets the job done
- * quickly without having to think about the various snprintf edge
- * cases.
- */
- char fmt[FMT_SIZE];
- char buf[BUF_SIZE];
-
-#define EMIT_SIMPLE(type, format) \
- emitter_printf(emitter, \
- emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
- *(const type *)value);
-
- switch (value_type) {
- case emitter_type_bool:
- emitter_printf(emitter,
- emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
- *(const bool *)value ? "true" : "false");
- break;
- case emitter_type_int:
- EMIT_SIMPLE(int, "%d")
- break;
- case emitter_type_unsigned:
- EMIT_SIMPLE(unsigned, "%u")
- break;
- case emitter_type_ssize:
- EMIT_SIMPLE(ssize_t, "%zd")
- break;
- case emitter_type_size:
- EMIT_SIMPLE(size_t, "%zu")
- break;
- case emitter_type_string:
- str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
- *(const char *const *)value);
- /*
- * We control the strings we output; we shouldn't get anything
- * anywhere near the fmt size.
- */
- assert(str_written < BUF_SIZE);
- emitter_printf(emitter,
- emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
- break;
- case emitter_type_uint32:
- EMIT_SIMPLE(uint32_t, "%" FMTu32)
- break;
- case emitter_type_uint64:
- EMIT_SIMPLE(uint64_t, "%" FMTu64)
- break;
- case emitter_type_title:
- EMIT_SIMPLE(char *const, "%s");
- break;
- default:
- unreachable();
- }
-#undef BUF_SIZE
-#undef FMT_SIZE
-}
-
-
-/* Internal functions. In json mode, tracks nesting state. */
-static inline void
-emitter_nest_inc(emitter_t *emitter) {
- emitter->nesting_depth++;
- emitter->item_at_depth = false;
-}
-
-static inline void
-emitter_nest_dec(emitter_t *emitter) {
- emitter->nesting_depth--;
- emitter->item_at_depth = true;
-}
-
-static inline void
-emitter_indent(emitter_t *emitter) {
- int amount = emitter->nesting_depth;
- const char *indent_str;
- if (emitter->output == emitter_output_json) {
- indent_str = "\t";
- } else {
- amount *= 2;
- indent_str = " ";
- }
- for (int i = 0; i < amount; i++) {
- emitter_printf(emitter, "%s", indent_str);
- }
-}
-
-static inline void
-emitter_json_key_prefix(emitter_t *emitter) {
- if (emitter->emitted_key) {
- emitter->emitted_key = false;
- return;
- }
- emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
- emitter_indent(emitter);
-}
-
-/******************************************************************************/
-/* Public functions for emitter_t. */
-
-static inline void
-emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
- void (*write_cb)(void *, const char *), void *cbopaque) {
- emitter->output = emitter_output;
- emitter->write_cb = write_cb;
- emitter->cbopaque = cbopaque;
- emitter->item_at_depth = false;
- emitter->emitted_key = false;
- emitter->nesting_depth = 0;
-}
-
-/******************************************************************************/
-/* JSON public API. */
-
+#ifndef JEMALLOC_INTERNAL_EMITTER_H
+#define JEMALLOC_INTERNAL_EMITTER_H
+
+#include "jemalloc/internal/ql.h"
+
+typedef enum emitter_output_e emitter_output_t;
+enum emitter_output_e {
+ emitter_output_json,
+ emitter_output_table
+};
+
+typedef enum emitter_justify_e emitter_justify_t;
+enum emitter_justify_e {
+ emitter_justify_left,
+ emitter_justify_right,
+ /* Not for users; just to pass to internal functions. */
+ emitter_justify_none
+};
+
+typedef enum emitter_type_e emitter_type_t;
+enum emitter_type_e {
+ emitter_type_bool,
+ emitter_type_int,
+ emitter_type_unsigned,
+ emitter_type_uint32,
+ emitter_type_uint64,
+ emitter_type_size,
+ emitter_type_ssize,
+ emitter_type_string,
+ /*
+ * A title is a column title in a table; it's just a string, but it's
+ * not quoted.
+ */
+ emitter_type_title,
+};
+
+typedef struct emitter_col_s emitter_col_t;
+struct emitter_col_s {
+ /* Filled in by the user. */
+ emitter_justify_t justify;
+ int width;
+ emitter_type_t type;
+ union {
+ bool bool_val;
+ int int_val;
+ unsigned unsigned_val;
+ uint32_t uint32_val;
+ uint32_t uint32_t_val;
+ uint64_t uint64_val;
+ uint64_t uint64_t_val;
+ size_t size_val;
+ ssize_t ssize_val;
+ const char *str_val;
+ };
+
+ /* Filled in by initialization. */
+ ql_elm(emitter_col_t) link;
+};
+
+typedef struct emitter_row_s emitter_row_t;
+struct emitter_row_s {
+ ql_head(emitter_col_t) cols;
+};
+
+typedef struct emitter_s emitter_t;
+struct emitter_s {
+ emitter_output_t output;
+ /* The output information. */
+ void (*write_cb)(void *, const char *);
+ void *cbopaque;
+ int nesting_depth;
+ /* True if we've already emitted a value at the given depth. */
+ bool item_at_depth;
+ /* True if we emitted a key and will emit corresponding value next. */
+ bool emitted_key;
+};
+
+/* Internal convenience function. Write to the emitter the given string. */
+JEMALLOC_FORMAT_PRINTF(2, 3)
+static inline void
+emitter_printf(emitter_t *emitter, const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
+ va_end(ap);
+}
+
+static inline const char * JEMALLOC_FORMAT_ARG(3)
+emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
+ emitter_justify_t justify, int width) {
+ size_t written;
+ fmt_specifier++;
+ if (justify == emitter_justify_none) {
+ written = malloc_snprintf(out_fmt, out_size,
+ "%%%s", fmt_specifier);
+ } else if (justify == emitter_justify_left) {
+ written = malloc_snprintf(out_fmt, out_size,
+ "%%-%d%s", width, fmt_specifier);
+ } else {
+ written = malloc_snprintf(out_fmt, out_size,
+ "%%%d%s", width, fmt_specifier);
+ }
+ /* Only happens in case of bad format string, which *we* choose. */
+ assert(written < out_size);
+ return out_fmt;
+}
+
/*
- * Emits a key (e.g. as appears in an object). The next json entity emitted will
- * be the corresponding value.
- */
-static inline void
-emitter_json_key(emitter_t *emitter, const char *json_key) {
- if (emitter->output == emitter_output_json) {
- emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "\"%s\": ", json_key);
- emitter->emitted_key = true;
- }
-}
-
-static inline void
-emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
- const void *value) {
- if (emitter->output == emitter_output_json) {
- emitter_json_key_prefix(emitter);
- emitter_print_value(emitter, emitter_justify_none, -1,
- value_type, value);
- emitter->item_at_depth = true;
- }
-}
-
-/* Shorthand for calling emitter_json_key and then emitter_json_value. */
-static inline void
-emitter_json_kv(emitter_t *emitter, const char *json_key,
- emitter_type_t value_type, const void *value) {
- emitter_json_key(emitter, json_key);
- emitter_json_value(emitter, value_type, value);
-}
-
-static inline void
-emitter_json_array_begin(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "[");
- emitter_nest_inc(emitter);
- }
-}
-
-/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
-static inline void
-emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
- emitter_json_key(emitter, json_key);
- emitter_json_array_begin(emitter);
-}
-
-static inline void
-emitter_json_array_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- assert(emitter->nesting_depth > 0);
- emitter_nest_dec(emitter);
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
- emitter_printf(emitter, "]");
- }
-}
-
-static inline void
-emitter_json_object_begin(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "{");
- emitter_nest_inc(emitter);
- }
-}
-
-/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
-static inline void
-emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
- emitter_json_key(emitter, json_key);
- emitter_json_object_begin(emitter);
-}
-
-static inline void
-emitter_json_object_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- assert(emitter->nesting_depth > 0);
- emitter_nest_dec(emitter);
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
- emitter_printf(emitter, "}");
- }
-}
-
-
-/******************************************************************************/
-/* Table public API. */
-
-static inline void
-emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
- if (emitter->output == emitter_output_table) {
- emitter_indent(emitter);
- emitter_printf(emitter, "%s\n", table_key);
- emitter_nest_inc(emitter);
- }
-}
-
-static inline void
-emitter_table_dict_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_table) {
- emitter_nest_dec(emitter);
- }
-}
-
-static inline void
-emitter_table_kv_note(emitter_t *emitter, const char *table_key,
- emitter_type_t value_type, const void *value,
- const char *table_note_key, emitter_type_t table_note_value_type,
- const void *table_note_value) {
- if (emitter->output == emitter_output_table) {
- emitter_indent(emitter);
- emitter_printf(emitter, "%s: ", table_key);
- emitter_print_value(emitter, emitter_justify_none, -1,
- value_type, value);
- if (table_note_key != NULL) {
- emitter_printf(emitter, " (%s: ", table_note_key);
- emitter_print_value(emitter, emitter_justify_none, -1,
- table_note_value_type, table_note_value);
- emitter_printf(emitter, ")");
- }
- emitter_printf(emitter, "\n");
- }
- emitter->item_at_depth = true;
-}
-
-static inline void
-emitter_table_kv(emitter_t *emitter, const char *table_key,
- emitter_type_t value_type, const void *value) {
- emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
- emitter_type_bool, NULL);
-}
-
-
-/* Write to the emitter the given string, but only in table mode. */
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static inline void
-emitter_table_printf(emitter_t *emitter, const char *format, ...) {
- if (emitter->output == emitter_output_table) {
- va_list ap;
- va_start(ap, format);
- malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
- va_end(ap);
- }
-}
-
-static inline void
-emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
- if (emitter->output != emitter_output_table) {
- return;
- }
- emitter_col_t *col;
- ql_foreach(col, &row->cols, link) {
- emitter_print_value(emitter, col->justify, col->width,
- col->type, (const void *)&col->bool_val);
- }
- emitter_table_printf(emitter, "\n");
-}
-
-static inline void
-emitter_row_init(emitter_row_t *row) {
- ql_new(&row->cols);
-}
-
-static inline void
-emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
- ql_elm_new(col, link);
- ql_tail_insert(&row->cols, col, link);
-}
-
-
-/******************************************************************************/
-/*
- * Generalized public API. Emits using either JSON or table, according to
- * settings in the emitter_t. */
-
-/*
- * Note emits a different kv pair as well, but only in table mode. Omits the
- * note if table_note_key is NULL.
- */
-static inline void
-emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
- emitter_type_t value_type, const void *value,
- const char *table_note_key, emitter_type_t table_note_value_type,
- const void *table_note_value) {
- if (emitter->output == emitter_output_json) {
- emitter_json_key(emitter, json_key);
- emitter_json_value(emitter, value_type, value);
- } else {
- emitter_table_kv_note(emitter, table_key, value_type, value,
- table_note_key, table_note_value_type, table_note_value);
- }
- emitter->item_at_depth = true;
-}
-
-static inline void
-emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
- emitter_type_t value_type, const void *value) {
- emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
- emitter_type_bool, NULL);
-}
-
-static inline void
-emitter_dict_begin(emitter_t *emitter, const char *json_key,
- const char *table_header) {
- if (emitter->output == emitter_output_json) {
- emitter_json_key(emitter, json_key);
- emitter_json_object_begin(emitter);
- } else {
- emitter_table_dict_begin(emitter, table_header);
- }
-}
-
-static inline void
-emitter_dict_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- emitter_json_object_end(emitter);
- } else {
- emitter_table_dict_end(emitter);
- }
-}
-
-static inline void
-emitter_begin(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- assert(emitter->nesting_depth == 0);
- emitter_printf(emitter, "{");
- emitter_nest_inc(emitter);
- } else {
- /*
- * This guarantees that we always call write_cb at least once.
- * This is useful if some invariant is established by each call
- * to write_cb, but doesn't hold initially: e.g., some buffer
- * holds a null-terminated string.
- */
- emitter_printf(emitter, "%s", "");
- }
-}
-
-static inline void
-emitter_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
- assert(emitter->nesting_depth == 1);
- emitter_nest_dec(emitter);
- emitter_printf(emitter, "\n}\n");
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_EMITTER_H */
+ * Internal. Emit the given value type in the relevant encoding (so that the
+ * bool true gets mapped to json "true", but the string "true" gets mapped to
+ * json "\"true\"", for instance.
+ *
+ * Width is ignored if justify is emitter_justify_none.
+ */
+static inline void
+emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
+ emitter_type_t value_type, const void *value) {
+ size_t str_written;
+#define BUF_SIZE 256
+#define FMT_SIZE 10
+ /*
+ * We dynamically generate a format string to emit, to let us use the
+ * snprintf machinery. This is kinda hacky, but gets the job done
+ * quickly without having to think about the various snprintf edge
+ * cases.
+ */
+ char fmt[FMT_SIZE];
+ char buf[BUF_SIZE];
+
+#define EMIT_SIMPLE(type, format) \
+ emitter_printf(emitter, \
+ emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
+ *(const type *)value);
+
+ switch (value_type) {
+ case emitter_type_bool:
+ emitter_printf(emitter,
+ emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
+ *(const bool *)value ? "true" : "false");
+ break;
+ case emitter_type_int:
+ EMIT_SIMPLE(int, "%d")
+ break;
+ case emitter_type_unsigned:
+ EMIT_SIMPLE(unsigned, "%u")
+ break;
+ case emitter_type_ssize:
+ EMIT_SIMPLE(ssize_t, "%zd")
+ break;
+ case emitter_type_size:
+ EMIT_SIMPLE(size_t, "%zu")
+ break;
+ case emitter_type_string:
+ str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
+ *(const char *const *)value);
+ /*
+ * We control the strings we output; we shouldn't get anything
+ * anywhere near the fmt size.
+ */
+ assert(str_written < BUF_SIZE);
+ emitter_printf(emitter,
+ emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
+ break;
+ case emitter_type_uint32:
+ EMIT_SIMPLE(uint32_t, "%" FMTu32)
+ break;
+ case emitter_type_uint64:
+ EMIT_SIMPLE(uint64_t, "%" FMTu64)
+ break;
+ case emitter_type_title:
+ EMIT_SIMPLE(char *const, "%s");
+ break;
+ default:
+ unreachable();
+ }
+#undef BUF_SIZE
+#undef FMT_SIZE
+}
+
+
+/* Internal functions. In json mode, tracks nesting state. */
+static inline void
+emitter_nest_inc(emitter_t *emitter) {
+ emitter->nesting_depth++;
+ emitter->item_at_depth = false;
+}
+
+static inline void
+emitter_nest_dec(emitter_t *emitter) {
+ emitter->nesting_depth--;
+ emitter->item_at_depth = true;
+}
+
+static inline void
+emitter_indent(emitter_t *emitter) {
+ int amount = emitter->nesting_depth;
+ const char *indent_str;
+ if (emitter->output == emitter_output_json) {
+ indent_str = "\t";
+ } else {
+ amount *= 2;
+ indent_str = " ";
+ }
+ for (int i = 0; i < amount; i++) {
+ emitter_printf(emitter, "%s", indent_str);
+ }
+}
+
+static inline void
+emitter_json_key_prefix(emitter_t *emitter) {
+ if (emitter->emitted_key) {
+ emitter->emitted_key = false;
+ return;
+ }
+ emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
+ emitter_indent(emitter);
+}
+
+/******************************************************************************/
+/* Public functions for emitter_t. */
+
+static inline void
+emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
+ void (*write_cb)(void *, const char *), void *cbopaque) {
+ emitter->output = emitter_output;
+ emitter->write_cb = write_cb;
+ emitter->cbopaque = cbopaque;
+ emitter->item_at_depth = false;
+ emitter->emitted_key = false;
+ emitter->nesting_depth = 0;
+}
+
+/******************************************************************************/
+/* JSON public API. */
+
+/*
+ * Emits a key (e.g. as appears in an object). The next json entity emitted will
+ * be the corresponding value.
+ */
+static inline void
+emitter_json_key(emitter_t *emitter, const char *json_key) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_key_prefix(emitter);
+ emitter_printf(emitter, "\"%s\": ", json_key);
+ emitter->emitted_key = true;
+ }
+}
+
+static inline void
+emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
+ const void *value) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_key_prefix(emitter);
+ emitter_print_value(emitter, emitter_justify_none, -1,
+ value_type, value);
+ emitter->item_at_depth = true;
+ }
+}
+
+/* Shorthand for calling emitter_json_key and then emitter_json_value. */
+static inline void
+emitter_json_kv(emitter_t *emitter, const char *json_key,
+ emitter_type_t value_type, const void *value) {
+ emitter_json_key(emitter, json_key);
+ emitter_json_value(emitter, value_type, value);
+}
+
+static inline void
+emitter_json_array_begin(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_key_prefix(emitter);
+ emitter_printf(emitter, "[");
+ emitter_nest_inc(emitter);
+ }
+}
+
+/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
+static inline void
+emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
+ emitter_json_key(emitter, json_key);
+ emitter_json_array_begin(emitter);
+}
+
+static inline void
+emitter_json_array_end(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ assert(emitter->nesting_depth > 0);
+ emitter_nest_dec(emitter);
+ emitter_printf(emitter, "\n");
+ emitter_indent(emitter);
+ emitter_printf(emitter, "]");
+ }
+}
+
+static inline void
+emitter_json_object_begin(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_key_prefix(emitter);
+ emitter_printf(emitter, "{");
+ emitter_nest_inc(emitter);
+ }
+}
+
+/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
+static inline void
+emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
+ emitter_json_key(emitter, json_key);
+ emitter_json_object_begin(emitter);
+}
+
+static inline void
+emitter_json_object_end(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ assert(emitter->nesting_depth > 0);
+ emitter_nest_dec(emitter);
+ emitter_printf(emitter, "\n");
+ emitter_indent(emitter);
+ emitter_printf(emitter, "}");
+ }
+}
+
+
+/******************************************************************************/
+/* Table public API. */
+
+static inline void
+emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
+ if (emitter->output == emitter_output_table) {
+ emitter_indent(emitter);
+ emitter_printf(emitter, "%s\n", table_key);
+ emitter_nest_inc(emitter);
+ }
+}
+
+static inline void
+emitter_table_dict_end(emitter_t *emitter) {
+ if (emitter->output == emitter_output_table) {
+ emitter_nest_dec(emitter);
+ }
+}
+
+static inline void
+emitter_table_kv_note(emitter_t *emitter, const char *table_key,
+ emitter_type_t value_type, const void *value,
+ const char *table_note_key, emitter_type_t table_note_value_type,
+ const void *table_note_value) {
+ if (emitter->output == emitter_output_table) {
+ emitter_indent(emitter);
+ emitter_printf(emitter, "%s: ", table_key);
+ emitter_print_value(emitter, emitter_justify_none, -1,
+ value_type, value);
+ if (table_note_key != NULL) {
+ emitter_printf(emitter, " (%s: ", table_note_key);
+ emitter_print_value(emitter, emitter_justify_none, -1,
+ table_note_value_type, table_note_value);
+ emitter_printf(emitter, ")");
+ }
+ emitter_printf(emitter, "\n");
+ }
+ emitter->item_at_depth = true;
+}
+
+static inline void
+emitter_table_kv(emitter_t *emitter, const char *table_key,
+ emitter_type_t value_type, const void *value) {
+ emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
+ emitter_type_bool, NULL);
+}
+
+
+/* Write to the emitter the given string, but only in table mode. */
+JEMALLOC_FORMAT_PRINTF(2, 3)
+static inline void
+emitter_table_printf(emitter_t *emitter, const char *format, ...) {
+ if (emitter->output == emitter_output_table) {
+ va_list ap;
+ va_start(ap, format);
+ malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
+ va_end(ap);
+ }
+}
+
+static inline void
+emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
+ if (emitter->output != emitter_output_table) {
+ return;
+ }
+ emitter_col_t *col;
+ ql_foreach(col, &row->cols, link) {
+ emitter_print_value(emitter, col->justify, col->width,
+ col->type, (const void *)&col->bool_val);
+ }
+ emitter_table_printf(emitter, "\n");
+}
+
+static inline void
+emitter_row_init(emitter_row_t *row) {
+ ql_new(&row->cols);
+}
+
+static inline void
+emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
+ ql_elm_new(col, link);
+ ql_tail_insert(&row->cols, col, link);
+}
+
+
+/******************************************************************************/
+/*
+ * Generalized public API. Emits using either JSON or table, according to
+ * settings in the emitter_t. */
+
+/*
+ * Note emits a different kv pair as well, but only in table mode. Omits the
+ * note if table_note_key is NULL.
+ */
+static inline void
+emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
+ emitter_type_t value_type, const void *value,
+ const char *table_note_key, emitter_type_t table_note_value_type,
+ const void *table_note_value) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_key(emitter, json_key);
+ emitter_json_value(emitter, value_type, value);
+ } else {
+ emitter_table_kv_note(emitter, table_key, value_type, value,
+ table_note_key, table_note_value_type, table_note_value);
+ }
+ emitter->item_at_depth = true;
+}
+
+static inline void
+emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
+ emitter_type_t value_type, const void *value) {
+ emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
+ emitter_type_bool, NULL);
+}
+
+static inline void
+emitter_dict_begin(emitter_t *emitter, const char *json_key,
+ const char *table_header) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_key(emitter, json_key);
+ emitter_json_object_begin(emitter);
+ } else {
+ emitter_table_dict_begin(emitter, table_header);
+ }
+}
+
+static inline void
+emitter_dict_end(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ emitter_json_object_end(emitter);
+ } else {
+ emitter_table_dict_end(emitter);
+ }
+}
+
+static inline void
+emitter_begin(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ assert(emitter->nesting_depth == 0);
+ emitter_printf(emitter, "{");
+ emitter_nest_inc(emitter);
+ } else {
+ /*
+ * This guarantees that we always call write_cb at least once.
+ * This is useful if some invariant is established by each call
+ * to write_cb, but doesn't hold initially: e.g., some buffer
+ * holds a null-terminated string.
+ */
+ emitter_printf(emitter, "%s", "");
+ }
+}
+
+static inline void
+emitter_end(emitter_t *emitter) {
+ if (emitter->output == emitter_output_json) {
+ assert(emitter->nesting_depth == 1);
+ emitter_nest_dec(emitter);
+ emitter_printf(emitter, "\n}\n");
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_EMITTER_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/extent_dss.h b/contrib/libs/jemalloc/include/jemalloc/internal/extent_dss.h
index e8f02ce2ad..7468424b28 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/extent_dss.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/extent_dss.h
@@ -1,26 +1,26 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
-#define JEMALLOC_INTERNAL_EXTENT_DSS_H
-
-typedef enum {
- dss_prec_disabled = 0,
- dss_prec_primary = 1,
- dss_prec_secondary = 2,
-
- dss_prec_limit = 3
-} dss_prec_t;
-#define DSS_PREC_DEFAULT dss_prec_secondary
-#define DSS_DEFAULT "secondary"
-
-extern const char *dss_prec_names[];
-
-extern const char *opt_dss;
-
-dss_prec_t extent_dss_prec_get(void);
-bool extent_dss_prec_set(dss_prec_t dss_prec);
-void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit);
-bool extent_in_dss(void *addr);
-bool extent_dss_mergeable(void *addr_a, void *addr_b);
-void extent_dss_boot(void);
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
+#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
+#define JEMALLOC_INTERNAL_EXTENT_DSS_H
+
+typedef enum {
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
+
+ dss_prec_limit = 3
+} dss_prec_t;
+#define DSS_PREC_DEFAULT dss_prec_secondary
+#define DSS_DEFAULT "secondary"
+
+extern const char *dss_prec_names[];
+
+extern const char *opt_dss;
+
+dss_prec_t extent_dss_prec_get(void);
+bool extent_dss_prec_set(dss_prec_t dss_prec);
+void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit);
+bool extent_in_dss(void *addr);
+bool extent_dss_mergeable(void *addr_a, void *addr_b);
+void extent_dss_boot(void);
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/extent_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/extent_externs.h
index 8aba57633a..bd5a1ca840 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/extent_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/extent_externs.h
@@ -1,83 +1,83 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/rtree.h"
-
-extern size_t opt_lg_extent_max_active_fit;
-
-extern rtree_t extents_rtree;
-extern const extent_hooks_t extent_hooks_default;
-extern mutex_pool_t extent_mutex_pool;
-
-extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
-void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-
-extent_hooks_t *extent_hooks_get(arena_t *arena);
-extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
- extent_hooks_t *extent_hooks);
-
-#ifdef JEMALLOC_JET
-size_t extent_size_quantize_floor(size_t size);
-size_t extent_size_quantize_ceil(size_t size);
-#endif
-
-ph_proto(, extent_avail_, extent_tree_t, extent_t)
-ph_proto(, extent_heap_, extent_heap_t, extent_t)
-
-bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
- bool delay_coalesce);
-extent_state_t extents_state_get(const extents_t *extents);
-size_t extents_npages_get(extents_t *extents);
-/* Get the number of extents in the given page size index. */
-size_t extents_nextents_get(extents_t *extents, pszind_t ind);
-/* Get the sum total bytes of the extents in the given page size index. */
-size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
-extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
- size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
- bool *zero, bool *commit);
-void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
-extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
-void extents_prefork(tsdn_t *tsdn, extents_t *extents);
-void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
-void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
-extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
-void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent);
-void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent);
-bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
-bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
-
-bool extent_boot(void);
-
-void extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size);
-void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size,
- size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
+#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
+
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/rtree.h"
+
+extern size_t opt_lg_extent_max_active_fit;
+
+extern rtree_t extents_rtree;
+extern const extent_hooks_t extent_hooks_default;
+extern mutex_pool_t extent_mutex_pool;
+
+extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
+void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+
+extent_hooks_t *extent_hooks_get(arena_t *arena);
+extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
+ extent_hooks_t *extent_hooks);
+
+#ifdef JEMALLOC_JET
+size_t extent_size_quantize_floor(size_t size);
+size_t extent_size_quantize_ceil(size_t size);
+#endif
+
+ph_proto(, extent_avail_, extent_tree_t, extent_t)
+ph_proto(, extent_heap_, extent_heap_t, extent_t)
+
+bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
+ bool delay_coalesce);
+extent_state_t extents_state_get(const extents_t *extents);
+size_t extents_npages_get(extents_t *extents);
+/* Get the number of extents in the given page size index. */
+size_t extents_nextents_get(extents_t *extents, pszind_t ind);
+/* Get the sum total bytes of the extents in the given page size index. */
+size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
+extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
+ size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
+ bool *zero, bool *commit);
+void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
+extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
+void extents_prefork(tsdn_t *tsdn, extents_t *extents);
+void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
+void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
+extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
+void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent);
+void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent);
+bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
+bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
+
+bool extent_boot(void);
+
+void extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size);
+void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size,
+ size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/extent_inlines.h b/contrib/libs/jemalloc/include/jemalloc/internal/extent_inlines.h
index 77fa4c4a29..49cd5e93df 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/extent_inlines.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/extent_inlines.h
@@ -1,501 +1,501 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
-#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/sz.h"
-
-static inline void
-extent_lock(tsdn_t *tsdn, extent_t *extent) {
- assert(extent != NULL);
- mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
-}
-
-static inline void
-extent_unlock(tsdn_t *tsdn, extent_t *extent) {
- assert(extent != NULL);
- mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
-}
-
-static inline void
-extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
- assert(extent1 != NULL && extent2 != NULL);
- mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
- (uintptr_t)extent2);
-}
-
-static inline void
-extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
- assert(extent1 != NULL && extent2 != NULL);
- mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
- (uintptr_t)extent2);
-}
-
-static inline unsigned
-extent_arena_ind_get(const extent_t *extent) {
- unsigned arena_ind = (unsigned)((extent->e_bits &
- EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
- assert(arena_ind < MALLOCX_ARENA_LIMIT);
-
- return arena_ind;
-}
-
-static inline arena_t *
-extent_arena_get(const extent_t *extent) {
- unsigned arena_ind = extent_arena_ind_get(extent);
-
- return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
-}
-
-static inline szind_t
-extent_szind_get_maybe_invalid(const extent_t *extent) {
- szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
- EXTENT_BITS_SZIND_SHIFT);
- assert(szind <= SC_NSIZES);
- return szind;
-}
-
-static inline szind_t
-extent_szind_get(const extent_t *extent) {
- szind_t szind = extent_szind_get_maybe_invalid(extent);
- assert(szind < SC_NSIZES); /* Never call when "invalid". */
- return szind;
-}
-
-static inline size_t
-extent_usize_get(const extent_t *extent) {
- return sz_index2size(extent_szind_get(extent));
-}
-
-static inline unsigned
-extent_binshard_get(const extent_t *extent) {
- unsigned binshard = (unsigned)((extent->e_bits &
- EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- return binshard;
-}
-
-static inline size_t
-extent_sn_get(const extent_t *extent) {
- return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
- EXTENT_BITS_SN_SHIFT);
-}
-
-static inline extent_state_t
-extent_state_get(const extent_t *extent) {
- return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
- EXTENT_BITS_STATE_SHIFT);
-}
-
-static inline bool
-extent_zeroed_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
- EXTENT_BITS_ZEROED_SHIFT);
-}
-
-static inline bool
-extent_committed_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
- EXTENT_BITS_COMMITTED_SHIFT);
-}
-
-static inline bool
-extent_dumpable_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
- EXTENT_BITS_DUMPABLE_SHIFT);
-}
-
-static inline bool
-extent_slab_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
- EXTENT_BITS_SLAB_SHIFT);
-}
-
-static inline unsigned
-extent_nfree_get(const extent_t *extent) {
- assert(extent_slab_get(extent));
- return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
- EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void *
-extent_base_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent_slab_get(extent));
- return PAGE_ADDR2BASE(extent->e_addr);
-}
-
-static inline void *
-extent_addr_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent_slab_get(extent));
- return extent->e_addr;
-}
-
-static inline size_t
-extent_size_get(const extent_t *extent) {
- return (extent->e_size_esn & EXTENT_SIZE_MASK);
-}
-
-static inline size_t
-extent_esn_get(const extent_t *extent) {
- return (extent->e_size_esn & EXTENT_ESN_MASK);
-}
-
-static inline size_t
-extent_bsize_get(const extent_t *extent) {
- return extent->e_bsize;
-}
-
-static inline void *
-extent_before_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
-}
-
-static inline void *
-extent_last_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent) - PAGE);
-}
-
-static inline void *
-extent_past_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent));
-}
-
-static inline arena_slab_data_t *
-extent_slab_data_get(extent_t *extent) {
- assert(extent_slab_get(extent));
- return &extent->e_slab_data;
-}
-
-static inline const arena_slab_data_t *
-extent_slab_data_get_const(const extent_t *extent) {
- assert(extent_slab_get(extent));
- return &extent->e_slab_data;
-}
-
-static inline prof_tctx_t *
-extent_prof_tctx_get(const extent_t *extent) {
- return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
- ATOMIC_ACQUIRE);
-}
-
-static inline nstime_t
-extent_prof_alloc_time_get(const extent_t *extent) {
- return extent->e_alloc_time;
-}
-
-static inline void
-extent_arena_set(extent_t *extent, arena_t *arena) {
- unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
- MALLOCX_ARENA_BITS) - 1);
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
- ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
-}
-
-static inline void
-extent_binshard_set(extent_t *extent, unsigned binshard) {
- /* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
- ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
-}
-
-static inline void
-extent_addr_set(extent_t *extent, void *addr) {
- extent->e_addr = addr;
-}
-
-static inline void
-extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
- assert(extent_base_get(extent) == extent_addr_get(extent));
-
- if (alignment < PAGE) {
- unsigned lg_range = LG_PAGE -
- lg_floor(CACHELINE_CEILING(alignment));
- size_t r;
- if (!tsdn_null(tsdn)) {
- tsd_t *tsd = tsdn_tsd(tsdn);
- r = (size_t)prng_lg_range_u64(
- tsd_offset_statep_get(tsd), lg_range);
- } else {
- r = prng_lg_range_zu(
- &extent_arena_get(extent)->offset_state,
- lg_range, true);
- }
- uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
- lg_range);
- extent->e_addr = (void *)((uintptr_t)extent->e_addr +
- random_offset);
- assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
- extent->e_addr);
- }
-}
-
-static inline void
-extent_size_set(extent_t *extent, size_t size) {
- assert((size & ~EXTENT_SIZE_MASK) == 0);
- extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
-}
-
-static inline void
-extent_esn_set(extent_t *extent, size_t esn) {
- extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
- EXTENT_ESN_MASK);
-}
-
-static inline void
-extent_bsize_set(extent_t *extent, size_t bsize) {
- extent->e_bsize = bsize;
-}
-
-static inline void
-extent_szind_set(extent_t *extent, szind_t szind) {
- assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
- ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
-}
-
-static inline void
-extent_nfree_set(extent_t *extent, unsigned nfree) {
- assert(extent_slab_get(extent));
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
- ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
- /* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- extent->e_bits = (extent->e_bits &
- (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
- ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
- ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_inc(extent_t *extent) {
- assert(extent_slab_get(extent));
- extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_dec(extent_t *extent) {
- assert(extent_slab_get(extent));
- extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_sub(extent_t *extent, uint64_t n) {
- assert(extent_slab_get(extent));
- extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_sn_set(extent_t *extent, size_t sn) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
- ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
-}
-
-static inline void
-extent_state_set(extent_t *extent, extent_state_t state) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
- ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
-}
-
-static inline void
-extent_zeroed_set(extent_t *extent, bool zeroed) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
- ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
-}
-
-static inline void
-extent_committed_set(extent_t *extent, bool committed) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
- ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
-}
-
-static inline void
-extent_dumpable_set(extent_t *extent, bool dumpable) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
- ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
-}
-
-static inline void
-extent_slab_set(extent_t *extent, bool slab) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
- ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
-}
-
-static inline void
-extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
- atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
-}
-
-static inline void
-extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
- nstime_copy(&extent->e_alloc_time, &t);
-}
-
-static inline bool
-extent_is_head_get(extent_t *extent) {
- if (maps_coalesce) {
- not_reached();
- }
-
- return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
- EXTENT_BITS_IS_HEAD_SHIFT);
-}
-
-static inline void
-extent_is_head_set(extent_t *extent, bool is_head) {
- if (maps_coalesce) {
- not_reached();
- }
-
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
- ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
-}
-
-static inline void
-extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
- bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
- bool committed, bool dumpable, extent_head_state_t is_head) {
- assert(addr == PAGE_ADDR2BASE(addr) || !slab);
-
- extent_arena_set(extent, arena);
- extent_addr_set(extent, addr);
- extent_size_set(extent, size);
- extent_slab_set(extent, slab);
- extent_szind_set(extent, szind);
- extent_sn_set(extent, sn);
- extent_state_set(extent, state);
- extent_zeroed_set(extent, zeroed);
- extent_committed_set(extent, committed);
- extent_dumpable_set(extent, dumpable);
- ql_elm_new(extent, ql_link);
- if (!maps_coalesce) {
- extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
- false);
- }
- if (config_prof) {
- extent_prof_tctx_set(extent, NULL);
- }
-}
-
-static inline void
-extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
- extent_arena_set(extent, NULL);
- extent_addr_set(extent, addr);
- extent_bsize_set(extent, bsize);
- extent_slab_set(extent, false);
- extent_szind_set(extent, SC_NSIZES);
- extent_sn_set(extent, sn);
- extent_state_set(extent, extent_state_active);
- extent_zeroed_set(extent, true);
- extent_committed_set(extent, true);
- extent_dumpable_set(extent, true);
-}
-
-static inline void
-extent_list_init(extent_list_t *list) {
- ql_new(list);
-}
-
-static inline extent_t *
-extent_list_first(const extent_list_t *list) {
- return ql_first(list);
-}
-
-static inline extent_t *
-extent_list_last(const extent_list_t *list) {
- return ql_last(list, ql_link);
-}
-
-static inline void
-extent_list_append(extent_list_t *list, extent_t *extent) {
- ql_tail_insert(list, extent, ql_link);
-}
-
-static inline void
-extent_list_prepend(extent_list_t *list, extent_t *extent) {
- ql_head_insert(list, extent, ql_link);
-}
-
-static inline void
-extent_list_replace(extent_list_t *list, extent_t *to_remove,
- extent_t *to_insert) {
- ql_after_insert(to_remove, to_insert, ql_link);
- ql_remove(list, to_remove, ql_link);
-}
-
-static inline void
-extent_list_remove(extent_list_t *list, extent_t *extent) {
- ql_remove(list, extent, ql_link);
-}
-
-static inline int
-extent_sn_comp(const extent_t *a, const extent_t *b) {
- size_t a_sn = extent_sn_get(a);
- size_t b_sn = extent_sn_get(b);
-
- return (a_sn > b_sn) - (a_sn < b_sn);
-}
-
-static inline int
-extent_esn_comp(const extent_t *a, const extent_t *b) {
- size_t a_esn = extent_esn_get(a);
- size_t b_esn = extent_esn_get(b);
-
- return (a_esn > b_esn) - (a_esn < b_esn);
-}
-
-static inline int
-extent_ad_comp(const extent_t *a, const extent_t *b) {
- uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
- uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
-
- return (a_addr > b_addr) - (a_addr < b_addr);
-}
-
-static inline int
-extent_ead_comp(const extent_t *a, const extent_t *b) {
- uintptr_t a_eaddr = (uintptr_t)a;
- uintptr_t b_eaddr = (uintptr_t)b;
-
- return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
-}
-
-static inline int
-extent_snad_comp(const extent_t *a, const extent_t *b) {
- int ret;
-
- ret = extent_sn_comp(a, b);
- if (ret != 0) {
- return ret;
- }
-
- ret = extent_ad_comp(a, b);
- return ret;
-}
-
-static inline int
-extent_esnead_comp(const extent_t *a, const extent_t *b) {
- int ret;
-
- ret = extent_esn_comp(a, b);
- if (ret != 0) {
- return ret;
- }
-
- ret = extent_ead_comp(a, b);
- return ret;
-}
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
+#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
+#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
+
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/sz.h"
+
+static inline void
+extent_lock(tsdn_t *tsdn, extent_t *extent) {
+ assert(extent != NULL);
+ mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
+}
+
+static inline void
+extent_unlock(tsdn_t *tsdn, extent_t *extent) {
+ assert(extent != NULL);
+ mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
+}
+
+static inline void
+extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
+ assert(extent1 != NULL && extent2 != NULL);
+ mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
+ (uintptr_t)extent2);
+}
+
+static inline void
+extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
+ assert(extent1 != NULL && extent2 != NULL);
+ mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
+ (uintptr_t)extent2);
+}
+
+static inline unsigned
+extent_arena_ind_get(const extent_t *extent) {
+ unsigned arena_ind = (unsigned)((extent->e_bits &
+ EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
+ assert(arena_ind < MALLOCX_ARENA_LIMIT);
+
+ return arena_ind;
+}
+
+static inline arena_t *
+extent_arena_get(const extent_t *extent) {
+ unsigned arena_ind = extent_arena_ind_get(extent);
+
+ return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
+}
+
+static inline szind_t
+extent_szind_get_maybe_invalid(const extent_t *extent) {
+ szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
+ EXTENT_BITS_SZIND_SHIFT);
+ assert(szind <= SC_NSIZES);
+ return szind;
+}
+
+static inline szind_t
+extent_szind_get(const extent_t *extent) {
+ szind_t szind = extent_szind_get_maybe_invalid(extent);
+ assert(szind < SC_NSIZES); /* Never call when "invalid". */
+ return szind;
+}
+
+static inline size_t
+extent_usize_get(const extent_t *extent) {
+ return sz_index2size(extent_szind_get(extent));
+}
+
+static inline unsigned
+extent_binshard_get(const extent_t *extent) {
+ unsigned binshard = (unsigned)((extent->e_bits &
+ EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
+ assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
+ return binshard;
+}
+
+static inline size_t
+extent_sn_get(const extent_t *extent) {
+ return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
+ EXTENT_BITS_SN_SHIFT);
+}
+
+static inline extent_state_t
+extent_state_get(const extent_t *extent) {
+ return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
+ EXTENT_BITS_STATE_SHIFT);
+}
+
+static inline bool
+extent_zeroed_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
+ EXTENT_BITS_ZEROED_SHIFT);
+}
+
+static inline bool
+extent_committed_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
+ EXTENT_BITS_COMMITTED_SHIFT);
+}
+
+static inline bool
+extent_dumpable_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
+ EXTENT_BITS_DUMPABLE_SHIFT);
+}
+
+static inline bool
+extent_slab_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
+ EXTENT_BITS_SLAB_SHIFT);
+}
+
+static inline unsigned
+extent_nfree_get(const extent_t *extent) {
+ assert(extent_slab_get(extent));
+ return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
+ EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void *
+extent_base_get(const extent_t *extent) {
+ assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
+ !extent_slab_get(extent));
+ return PAGE_ADDR2BASE(extent->e_addr);
+}
+
+static inline void *
+extent_addr_get(const extent_t *extent) {
+ assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
+ !extent_slab_get(extent));
+ return extent->e_addr;
+}
+
+static inline size_t
+extent_size_get(const extent_t *extent) {
+ return (extent->e_size_esn & EXTENT_SIZE_MASK);
+}
+
+static inline size_t
+extent_esn_get(const extent_t *extent) {
+ return (extent->e_size_esn & EXTENT_ESN_MASK);
+}
+
+static inline size_t
+extent_bsize_get(const extent_t *extent) {
+ return extent->e_bsize;
+}
+
+static inline void *
+extent_before_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
+}
+
+static inline void *
+extent_last_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) +
+ extent_size_get(extent) - PAGE);
+}
+
+static inline void *
+extent_past_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) +
+ extent_size_get(extent));
+}
+
+static inline arena_slab_data_t *
+extent_slab_data_get(extent_t *extent) {
+ assert(extent_slab_get(extent));
+ return &extent->e_slab_data;
+}
+
+static inline const arena_slab_data_t *
+extent_slab_data_get_const(const extent_t *extent) {
+ assert(extent_slab_get(extent));
+ return &extent->e_slab_data;
+}
+
+static inline prof_tctx_t *
+extent_prof_tctx_get(const extent_t *extent) {
+ return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
+ ATOMIC_ACQUIRE);
+}
+
+static inline nstime_t
+extent_prof_alloc_time_get(const extent_t *extent) {
+ return extent->e_alloc_time;
+}
+
+static inline void
+extent_arena_set(extent_t *extent, arena_t *arena) {
+ unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
+ MALLOCX_ARENA_BITS) - 1);
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
+ ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
+}
+
+static inline void
+extent_binshard_set(extent_t *extent, unsigned binshard) {
+ /* The assertion assumes szind is set already. */
+ assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
+ ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
+}
+
+static inline void
+extent_addr_set(extent_t *extent, void *addr) {
+ extent->e_addr = addr;
+}
+
+static inline void
+extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
+ assert(extent_base_get(extent) == extent_addr_get(extent));
+
+ if (alignment < PAGE) {
+ unsigned lg_range = LG_PAGE -
+ lg_floor(CACHELINE_CEILING(alignment));
+ size_t r;
+ if (!tsdn_null(tsdn)) {
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ r = (size_t)prng_lg_range_u64(
+ tsd_offset_statep_get(tsd), lg_range);
+ } else {
+ r = prng_lg_range_zu(
+ &extent_arena_get(extent)->offset_state,
+ lg_range, true);
+ }
+ uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
+ lg_range);
+ extent->e_addr = (void *)((uintptr_t)extent->e_addr +
+ random_offset);
+ assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
+ extent->e_addr);
+ }
+}
+
+static inline void
+extent_size_set(extent_t *extent, size_t size) {
+ assert((size & ~EXTENT_SIZE_MASK) == 0);
+ extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
+}
+
+static inline void
+extent_esn_set(extent_t *extent, size_t esn) {
+ extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
+ EXTENT_ESN_MASK);
+}
+
+static inline void
+extent_bsize_set(extent_t *extent, size_t bsize) {
+ extent->e_bsize = bsize;
+}
+
+static inline void
+extent_szind_set(extent_t *extent, szind_t szind) {
+ assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
+ ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
+}
+
+static inline void
+extent_nfree_set(extent_t *extent, unsigned nfree) {
+ assert(extent_slab_get(extent));
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
+ ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
+ /* The assertion assumes szind is set already. */
+ assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
+ extent->e_bits = (extent->e_bits &
+ (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
+ ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
+ ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_nfree_inc(extent_t *extent) {
+ assert(extent_slab_get(extent));
+ extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_nfree_dec(extent_t *extent) {
+ assert(extent_slab_get(extent));
+ extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_nfree_sub(extent_t *extent, uint64_t n) {
+ assert(extent_slab_get(extent));
+ extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_sn_set(extent_t *extent, size_t sn) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
+ ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
+}
+
+static inline void
+extent_state_set(extent_t *extent, extent_state_t state) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
+ ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
+}
+
+static inline void
+extent_zeroed_set(extent_t *extent, bool zeroed) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
+ ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
+}
+
+static inline void
+extent_committed_set(extent_t *extent, bool committed) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
+ ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
+}
+
+static inline void
+extent_dumpable_set(extent_t *extent, bool dumpable) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
+ ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
+}
+
+static inline void
+extent_slab_set(extent_t *extent, bool slab) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
+ ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
+}
+
+static inline void
+extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
+ atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
+}
+
+static inline void
+extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
+ nstime_copy(&extent->e_alloc_time, &t);
+}
+
+static inline bool
+extent_is_head_get(extent_t *extent) {
+ if (maps_coalesce) {
+ not_reached();
+ }
+
+ return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
+ EXTENT_BITS_IS_HEAD_SHIFT);
+}
+
+static inline void
+extent_is_head_set(extent_t *extent, bool is_head) {
+ if (maps_coalesce) {
+ not_reached();
+ }
+
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
+ ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
+}
+
+static inline void
+extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
+ bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
+ bool committed, bool dumpable, extent_head_state_t is_head) {
+ assert(addr == PAGE_ADDR2BASE(addr) || !slab);
+
+ extent_arena_set(extent, arena);
+ extent_addr_set(extent, addr);
+ extent_size_set(extent, size);
+ extent_slab_set(extent, slab);
+ extent_szind_set(extent, szind);
+ extent_sn_set(extent, sn);
+ extent_state_set(extent, state);
+ extent_zeroed_set(extent, zeroed);
+ extent_committed_set(extent, committed);
+ extent_dumpable_set(extent, dumpable);
+ ql_elm_new(extent, ql_link);
+ if (!maps_coalesce) {
+ extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
+ false);
+ }
+ if (config_prof) {
+ extent_prof_tctx_set(extent, NULL);
+ }
+}
+
+static inline void
+extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
+ extent_arena_set(extent, NULL);
+ extent_addr_set(extent, addr);
+ extent_bsize_set(extent, bsize);
+ extent_slab_set(extent, false);
+ extent_szind_set(extent, SC_NSIZES);
+ extent_sn_set(extent, sn);
+ extent_state_set(extent, extent_state_active);
+ extent_zeroed_set(extent, true);
+ extent_committed_set(extent, true);
+ extent_dumpable_set(extent, true);
+}
+
+static inline void
+extent_list_init(extent_list_t *list) {
+ ql_new(list);
+}
+
+static inline extent_t *
+extent_list_first(const extent_list_t *list) {
+ return ql_first(list);
+}
+
+static inline extent_t *
+extent_list_last(const extent_list_t *list) {
+ return ql_last(list, ql_link);
+}
+
+static inline void
+extent_list_append(extent_list_t *list, extent_t *extent) {
+ ql_tail_insert(list, extent, ql_link);
+}
+
+static inline void
+extent_list_prepend(extent_list_t *list, extent_t *extent) {
+ ql_head_insert(list, extent, ql_link);
+}
+
+static inline void
+extent_list_replace(extent_list_t *list, extent_t *to_remove,
+ extent_t *to_insert) {
+ ql_after_insert(to_remove, to_insert, ql_link);
+ ql_remove(list, to_remove, ql_link);
+}
+
+static inline void
+extent_list_remove(extent_list_t *list, extent_t *extent) {
+ ql_remove(list, extent, ql_link);
+}
+
+static inline int
+extent_sn_comp(const extent_t *a, const extent_t *b) {
+ size_t a_sn = extent_sn_get(a);
+ size_t b_sn = extent_sn_get(b);
+
+ return (a_sn > b_sn) - (a_sn < b_sn);
+}
+
+static inline int
+extent_esn_comp(const extent_t *a, const extent_t *b) {
+ size_t a_esn = extent_esn_get(a);
+ size_t b_esn = extent_esn_get(b);
+
+ return (a_esn > b_esn) - (a_esn < b_esn);
+}
+
+static inline int
+extent_ad_comp(const extent_t *a, const extent_t *b) {
+ uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
+
+ return (a_addr > b_addr) - (a_addr < b_addr);
+}
+
+static inline int
+extent_ead_comp(const extent_t *a, const extent_t *b) {
+ uintptr_t a_eaddr = (uintptr_t)a;
+ uintptr_t b_eaddr = (uintptr_t)b;
+
+ return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
+}
+
+static inline int
+extent_snad_comp(const extent_t *a, const extent_t *b) {
+ int ret;
+
+ ret = extent_sn_comp(a, b);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = extent_ad_comp(a, b);
+ return ret;
+}
+
+static inline int
+extent_esnead_comp(const extent_t *a, const extent_t *b) {
+ int ret;
+
+ ret = extent_esn_comp(a, b);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = extent_ead_comp(a, b);
+ return ret;
+}
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/extent_mmap.h b/contrib/libs/jemalloc/include/jemalloc/internal/extent_mmap.h
index 55f17ee487..345b6fc813 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/extent_mmap.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/extent_mmap.h
@@ -1,10 +1,10 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
-
-extern bool opt_retain;
-
-void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit);
-bool extent_dalloc_mmap(void *addr, size_t size);
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
+#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
+
+extern bool opt_retain;
+
+void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit);
+bool extent_dalloc_mmap(void *addr, size_t size);
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/extent_structs.h b/contrib/libs/jemalloc/include/jemalloc/internal/extent_structs.h
index 767cd8930f..3c954a4c05 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/extent_structs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/extent_structs.h
@@ -1,256 +1,256 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
-#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/sc.h"
-
-typedef enum {
- extent_state_active = 0,
- extent_state_dirty = 1,
- extent_state_muzzy = 2,
- extent_state_retained = 3
-} extent_state_t;
-
-/* Extent (span of pages). Use accessor functions for e_* fields. */
-struct extent_s {
- /*
- * Bitfield containing several fields:
- *
- * a: arena_ind
- * b: slab
- * c: committed
- * d: dumpable
- * z: zeroed
- * t: state
- * i: szind
- * f: nfree
- * s: bin_shard
- * n: sn
- *
- * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
- *
- * arena_ind: Arena from which this extent came, or all 1 bits if
- * unassociated.
- *
- * slab: The slab flag indicates whether the extent is used for a slab
- * of small regions. This helps differentiate small size classes,
- * and it indicates whether interior pointers can be looked up via
- * iealloc().
- *
- * committed: The committed flag indicates whether physical memory is
- * committed to the extent, whether explicitly or implicitly
- * as on a system that overcommits and satisfies physical
- * memory needs on demand via soft page faults.
- *
- * dumpable: The dumpable flag indicates whether or not we've set the
- * memory in question to be dumpable. Note that this
- * interacts somewhat subtly with user-specified extent hooks,
- * since we don't know if *they* are fiddling with
- * dumpability (in which case, we don't want to undo whatever
- * they're doing). To deal with this scenario, we:
- * - Make dumpable false only for memory allocated with the
- * default hooks.
- * - Only allow memory to go from non-dumpable to dumpable,
- * and only once.
- * - Never make the OS call to allow dumping when the
- * dumpable bit is already set.
- * These three constraints mean that we will never
- * accidentally dump user memory that the user meant to set
- * nondumpable with their extent hooks.
- *
- *
- * zeroed: The zeroed flag is used by extent recycling code to track
- * whether memory is zero-filled.
- *
- * state: The state flag is an extent_state_t.
- *
- * szind: The szind flag indicates usable size class index for
- * allocations residing in this extent, regardless of whether the
- * extent is a slab. Extent size and usable size often differ
- * even for non-slabs, either due to sz_large_pad or promotion of
- * sampled small regions.
- *
- * nfree: Number of free regions in slab.
- *
- * bin_shard: the shard of the bin from which this extent came.
- *
- * sn: Serial number (potentially non-unique).
- *
- * Serial numbers may wrap around if !opt_retain, but as long as
- * comparison functions fall back on address comparison for equal
- * serial numbers, stable (if imperfect) ordering is maintained.
- *
- * Serial numbers may not be unique even in the absence of
- * wrap-around, e.g. when splitting an extent and assigning the same
- * serial number to both resulting adjacent extents.
- */
- uint64_t e_bits;
-#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
-
-#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
-#define EXTENT_BITS_ARENA_SHIFT 0
-#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
-
-#define EXTENT_BITS_SLAB_WIDTH 1
-#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
-#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
-
-#define EXTENT_BITS_COMMITTED_WIDTH 1
-#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
-#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
-
-#define EXTENT_BITS_DUMPABLE_WIDTH 1
-#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
-#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
-
-#define EXTENT_BITS_ZEROED_WIDTH 1
-#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
-#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
-
-#define EXTENT_BITS_STATE_WIDTH 2
-#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
-#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
-
-#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
-#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
-#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
-
-#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
-#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
-#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
-
-#define EXTENT_BITS_BINSHARD_WIDTH 6
-#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
-#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
-
-#define EXTENT_BITS_IS_HEAD_WIDTH 1
-#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
-#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
-
-#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
-#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
-
- /* Pointer to the extent that this structure is responsible for. */
- void *e_addr;
-
- union {
- /*
- * Extent size and serial number associated with the extent
- * structure (different than the serial number for the extent at
- * e_addr).
- *
- * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
- */
- size_t e_size_esn;
- #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
- #define EXTENT_ESN_MASK ((size_t)PAGE-1)
- /* Base extent size, which may not be a multiple of PAGE. */
- size_t e_bsize;
- };
-
- /*
- * List linkage, used by a variety of lists:
- * - bin_t's slabs_full
- * - extents_t's LRU
- * - stashed dirty extents
- * - arena's large allocations
- */
- ql_elm(extent_t) ql_link;
-
- /*
- * Linkage for per size class sn/address-ordered heaps, and
- * for extent_avail
- */
- phn(extent_t) ph_link;
-
- union {
- /* Small region slab metadata. */
- arena_slab_data_t e_slab_data;
-
- /* Profiling data, used for large objects. */
- struct {
- /* Time when this was allocated. */
- nstime_t e_alloc_time;
- /* Points to a prof_tctx_t. */
- atomic_p_t e_prof_tctx;
- };
- };
-};
-typedef ql_head(extent_t) extent_list_t;
-typedef ph(extent_t) extent_tree_t;
-typedef ph(extent_t) extent_heap_t;
-
-/* Quantized collection of extents, with built-in LRU queue. */
-struct extents_s {
- malloc_mutex_t mtx;
-
- /*
- * Quantized per size class heaps of extents.
- *
- * Synchronization: mtx.
- */
- extent_heap_t heaps[SC_NPSIZES + 1];
- atomic_zu_t nextents[SC_NPSIZES + 1];
- atomic_zu_t nbytes[SC_NPSIZES + 1];
-
- /*
- * Bitmap for which set bits correspond to non-empty heaps.
- *
- * Synchronization: mtx.
- */
- bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
-
- /*
- * LRU of all extents in heaps.
- *
- * Synchronization: mtx.
- */
- extent_list_t lru;
-
- /*
- * Page sum for all extents in heaps.
- *
- * The synchronization here is a little tricky. Modifications to npages
- * must hold mtx, but reads need not (though, a reader who sees npages
- * without holding the mutex can't assume anything about the rest of the
- * state of the extents_t).
- */
- atomic_zu_t npages;
-
- /* All stored extents must be in the same state. */
- extent_state_t state;
-
- /*
- * If true, delay coalescing until eviction; otherwise coalesce during
- * deallocation.
- */
- bool delay_coalesce;
-};
-
-/*
- * The following two structs are for experimental purposes. See
- * experimental_utilization_query_ctl and
- * experimental_utilization_batch_query_ctl in src/ctl.c.
- */
-
-struct extent_util_stats_s {
- size_t nfree;
- size_t nregs;
- size_t size;
-};
-
-struct extent_util_stats_verbose_s {
- void *slabcur_addr;
- size_t nfree;
- size_t nregs;
- size_t size;
- size_t bin_nfree;
- size_t bin_nregs;
-};
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
+#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
+#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/sc.h"
+
+typedef enum {
+ extent_state_active = 0,
+ extent_state_dirty = 1,
+ extent_state_muzzy = 2,
+ extent_state_retained = 3
+} extent_state_t;
+
+/* Extent (span of pages). Use accessor functions for e_* fields. */
+struct extent_s {
+ /*
+ * Bitfield containing several fields:
+ *
+ * a: arena_ind
+ * b: slab
+ * c: committed
+ * d: dumpable
+ * z: zeroed
+ * t: state
+ * i: szind
+ * f: nfree
+ * s: bin_shard
+ * n: sn
+ *
+ * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
+ *
+ * arena_ind: Arena from which this extent came, or all 1 bits if
+ * unassociated.
+ *
+ * slab: The slab flag indicates whether the extent is used for a slab
+ * of small regions. This helps differentiate small size classes,
+ * and it indicates whether interior pointers can be looked up via
+ * iealloc().
+ *
+ * committed: The committed flag indicates whether physical memory is
+ * committed to the extent, whether explicitly or implicitly
+ * as on a system that overcommits and satisfies physical
+ * memory needs on demand via soft page faults.
+ *
+ * dumpable: The dumpable flag indicates whether or not we've set the
+ * memory in question to be dumpable. Note that this
+ * interacts somewhat subtly with user-specified extent hooks,
+ * since we don't know if *they* are fiddling with
+ * dumpability (in which case, we don't want to undo whatever
+ * they're doing). To deal with this scenario, we:
+ * - Make dumpable false only for memory allocated with the
+ * default hooks.
+ * - Only allow memory to go from non-dumpable to dumpable,
+ * and only once.
+ * - Never make the OS call to allow dumping when the
+ * dumpable bit is already set.
+ * These three constraints mean that we will never
+ * accidentally dump user memory that the user meant to set
+ * nondumpable with their extent hooks.
+ *
+ *
+ * zeroed: The zeroed flag is used by extent recycling code to track
+ * whether memory is zero-filled.
+ *
+ * state: The state flag is an extent_state_t.
+ *
+ * szind: The szind flag indicates usable size class index for
+ * allocations residing in this extent, regardless of whether the
+ * extent is a slab. Extent size and usable size often differ
+ * even for non-slabs, either due to sz_large_pad or promotion of
+ * sampled small regions.
+ *
+ * nfree: Number of free regions in slab.
+ *
+ * bin_shard: the shard of the bin from which this extent came.
+ *
+ * sn: Serial number (potentially non-unique).
+ *
+ * Serial numbers may wrap around if !opt_retain, but as long as
+ * comparison functions fall back on address comparison for equal
+ * serial numbers, stable (if imperfect) ordering is maintained.
+ *
+ * Serial numbers may not be unique even in the absence of
+ * wrap-around, e.g. when splitting an extent and assigning the same
+ * serial number to both resulting adjacent extents.
+ */
+ uint64_t e_bits;
+#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
+
+#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
+#define EXTENT_BITS_ARENA_SHIFT 0
+#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
+
+#define EXTENT_BITS_SLAB_WIDTH 1
+#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
+#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
+
+#define EXTENT_BITS_COMMITTED_WIDTH 1
+#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
+#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
+
+#define EXTENT_BITS_DUMPABLE_WIDTH 1
+#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
+#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
+
+#define EXTENT_BITS_ZEROED_WIDTH 1
+#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
+#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
+
+#define EXTENT_BITS_STATE_WIDTH 2
+#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
+#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
+
+#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
+#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
+#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
+
+#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
+#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
+#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
+
+#define EXTENT_BITS_BINSHARD_WIDTH 6
+#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
+#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
+
+#define EXTENT_BITS_IS_HEAD_WIDTH 1
+#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
+#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
+
+#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
+#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
+
+ /* Pointer to the extent that this structure is responsible for. */
+ void *e_addr;
+
+ union {
+ /*
+ * Extent size and serial number associated with the extent
+ * structure (different than the serial number for the extent at
+ * e_addr).
+ *
+ * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
+ */
+ size_t e_size_esn;
+ #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
+ #define EXTENT_ESN_MASK ((size_t)PAGE-1)
+ /* Base extent size, which may not be a multiple of PAGE. */
+ size_t e_bsize;
+ };
+
+ /*
+ * List linkage, used by a variety of lists:
+ * - bin_t's slabs_full
+ * - extents_t's LRU
+ * - stashed dirty extents
+ * - arena's large allocations
+ */
+ ql_elm(extent_t) ql_link;
+
+ /*
+ * Linkage for per size class sn/address-ordered heaps, and
+ * for extent_avail
+ */
+ phn(extent_t) ph_link;
+
+ union {
+ /* Small region slab metadata. */
+ arena_slab_data_t e_slab_data;
+
+ /* Profiling data, used for large objects. */
+ struct {
+ /* Time when this was allocated. */
+ nstime_t e_alloc_time;
+ /* Points to a prof_tctx_t. */
+ atomic_p_t e_prof_tctx;
+ };
+ };
+};
+typedef ql_head(extent_t) extent_list_t;
+typedef ph(extent_t) extent_tree_t;
+typedef ph(extent_t) extent_heap_t;
+
+/* Quantized collection of extents, with built-in LRU queue. */
+struct extents_s {
+ malloc_mutex_t mtx;
+
+ /*
+ * Quantized per size class heaps of extents.
+ *
+ * Synchronization: mtx.
+ */
+ extent_heap_t heaps[SC_NPSIZES + 1];
+ atomic_zu_t nextents[SC_NPSIZES + 1];
+ atomic_zu_t nbytes[SC_NPSIZES + 1];
+
+ /*
+ * Bitmap for which set bits correspond to non-empty heaps.
+ *
+ * Synchronization: mtx.
+ */
+ bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
+
+ /*
+ * LRU of all extents in heaps.
+ *
+ * Synchronization: mtx.
+ */
+ extent_list_t lru;
+
+ /*
+ * Page sum for all extents in heaps.
+ *
+ * The synchronization here is a little tricky. Modifications to npages
+ * must hold mtx, but reads need not (though, a reader who sees npages
+ * without holding the mutex can't assume anything about the rest of the
+ * state of the extents_t).
+ */
+ atomic_zu_t npages;
+
+ /* All stored extents must be in the same state. */
+ extent_state_t state;
+
+ /*
+ * If true, delay coalescing until eviction; otherwise coalesce during
+ * deallocation.
+ */
+ bool delay_coalesce;
+};
+
+/*
+ * The following two structs are for experimental purposes. See
+ * experimental_utilization_query_ctl and
+ * experimental_utilization_batch_query_ctl in src/ctl.c.
+ */
+
+struct extent_util_stats_s {
+ size_t nfree;
+ size_t nregs;
+ size_t size;
+};
+
+struct extent_util_stats_verbose_s {
+ void *slabcur_addr;
+ size_t nfree;
+ size_t nregs;
+ size_t size;
+ size_t bin_nfree;
+ size_t bin_nregs;
+};
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/extent_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/extent_types.h
index 96925cf958..f144f3fe91 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/extent_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/extent_types.h
@@ -1,23 +1,23 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
-#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
-
-typedef struct extent_s extent_t;
-typedef struct extents_s extents_t;
-
-typedef struct extent_util_stats_s extent_util_stats_t;
-typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;
-
-#define EXTENT_HOOKS_INITIALIZER NULL
-
-/*
- * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
- * is the max ratio between the size of the active extent and the new extent.
- */
-#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
-
-typedef enum {
- EXTENT_NOT_HEAD,
- EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */
-} extent_head_state_t;
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
+#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
+
+typedef struct extent_s extent_t;
+typedef struct extents_s extents_t;
+
+typedef struct extent_util_stats_s extent_util_stats_t;
+typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;
+
+#define EXTENT_HOOKS_INITIALIZER NULL
+
+/*
+ * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
+ * is the max ratio between the size of the active extent and the new extent.
+ */
+#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
+
+typedef enum {
+ EXTENT_NOT_HEAD,
+ EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */
+} extent_head_state_t;
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/hash.h b/contrib/libs/jemalloc/include/jemalloc/internal/hash.h
index 0270034e87..6c482a5b9b 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/hash.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/hash.h
@@ -1,76 +1,76 @@
-#ifndef JEMALLOC_INTERNAL_HASH_H
-#define JEMALLOC_INTERNAL_HASH_H
-
-#include "jemalloc/internal/assert.h"
-
+#ifndef JEMALLOC_INTERNAL_HASH_H
+#define JEMALLOC_INTERNAL_HASH_H
+
+#include "jemalloc/internal/assert.h"
+
/*
* The following hash function is based on MurmurHash3, placed into the public
- * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
+ * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
/* Internal implementation. */
-static inline uint32_t
-hash_rotl_32(uint32_t x, int8_t r) {
- return ((x << r) | (x >> (32 - r)));
+static inline uint32_t
+hash_rotl_32(uint32_t x, int8_t r) {
+ return ((x << r) | (x >> (32 - r)));
}
-static inline uint64_t
-hash_rotl_64(uint64_t x, int8_t r) {
- return ((x << r) | (x >> (64 - r)));
+static inline uint64_t
+hash_rotl_64(uint64_t x, int8_t r) {
+ return ((x << r) | (x >> (64 - r)));
}
-static inline uint32_t
-hash_get_block_32(const uint32_t *p, int i) {
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
- uint32_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
- return ret;
- }
-
- return p[i];
+static inline uint32_t
+hash_get_block_32(const uint32_t *p, int i) {
+ /* Handle unaligned read. */
+ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
+ uint32_t ret;
+
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
+ return ret;
+ }
+
+ return p[i];
}
-static inline uint64_t
-hash_get_block_64(const uint64_t *p, int i) {
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
- uint64_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
- return ret;
- }
-
- return p[i];
+static inline uint64_t
+hash_get_block_64(const uint64_t *p, int i) {
+ /* Handle unaligned read. */
+ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
+ uint64_t ret;
+
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
+ return ret;
+ }
+
+ return p[i];
}
-static inline uint32_t
-hash_fmix_32(uint32_t h) {
+static inline uint32_t
+hash_fmix_32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
- return h;
+ return h;
}
-static inline uint64_t
-hash_fmix_64(uint64_t k) {
+static inline uint64_t
+hash_fmix_64(uint64_t k) {
k ^= k >> 33;
- k *= KQU(0xff51afd7ed558ccd);
+ k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
- k *= KQU(0xc4ceb9fe1a85ec53);
+ k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
- return k;
+ return k;
}
-static inline uint32_t
-hash_x86_32(const void *key, int len, uint32_t seed) {
+static inline uint32_t
+hash_x86_32(const void *key, int len, uint32_t seed) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
@@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
uint32_t k1 = 0;
switch (len & 3) {
- case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
- case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
+ case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
+ case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
@@ -116,12 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
h1 = hash_fmix_32(h1);
- return h1;
+ return h1;
}
-static inline void
+static inline void
hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2]) {
+ uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -177,29 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
uint32_t k4 = 0;
switch (len & 15) {
- case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
- case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
+ case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
+ case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
- JEMALLOC_FALLTHROUGH
- case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
- case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
- case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH
+ case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
+ case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
+ case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
- JEMALLOC_FALLTHROUGH
- case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
- case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
- case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH
+ case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
+ case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
+ case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
- JEMALLOC_FALLTHROUGH
- case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
- case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
- case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH
+ case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
+ case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
+ case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
- JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH
}
}
@@ -221,17 +221,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
-static inline void
+static inline void
hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2]) {
+ uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
- const uint64_t c1 = KQU(0x87c37b91114253d5);
- const uint64_t c2 = KQU(0x4cf5ad432745937f);
+ const uint64_t c1 = KQU(0x87c37b91114253d5);
+ const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
@@ -261,22 +261,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t k2 = 0;
switch (len & 15) {
- case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
- case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
- case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
- case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
- case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
- case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH
+ case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
+ case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
+ case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
+ case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
+ case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
+ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
- JEMALLOC_FALLTHROUGH
- case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
- case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
- case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH
- case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH
- case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
- case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
- case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH
+ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
+ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
+ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH
+ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH
+ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
+ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
+ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
@@ -300,20 +300,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/******************************************************************************/
/* API. */
-static inline void
-hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
- assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
-
+static inline void
+hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
+ assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
+
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
- hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
+ hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
- {
- uint64_t hashes[2];
- hash_x86_128(key, (int)len, seed, hashes);
- r_hash[0] = (size_t)hashes[0];
- r_hash[1] = (size_t)hashes[1];
- }
+ {
+ uint64_t hashes[2];
+ hash_x86_128(key, (int)len, seed, hashes);
+ r_hash[0] = (size_t)hashes[0];
+ r_hash[1] = (size_t)hashes[1];
+ }
#endif
}
-#endif /* JEMALLOC_INTERNAL_HASH_H */
+#endif /* JEMALLOC_INTERNAL_HASH_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/hook.h b/contrib/libs/jemalloc/include/jemalloc/internal/hook.h
index ee246b1e0b..d59e9c4e0b 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/hook.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/hook.h
@@ -1,163 +1,163 @@
-#ifndef JEMALLOC_INTERNAL_HOOK_H
-#define JEMALLOC_INTERNAL_HOOK_H
-
-#include "jemalloc/internal/tsd.h"
-
-/*
- * This API is *extremely* experimental, and may get ripped out, changed in API-
- * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
- *
- * It allows hooking the stateful parts of the API to see changes as they
- * happen.
- *
- * Allocation hooks are called after the allocation is done, free hooks are
- * called before the free is done, and expand hooks are called after the
- * allocation is expanded.
- *
- * For realloc and rallocx, if the expansion happens in place, the expansion
- * hook is called. If it is moved, then the alloc hook is called on the new
- * location, and then the free hook is called on the old location (i.e. both
- * hooks are invoked in between the alloc and the dalloc).
- *
- * If we return NULL from OOM, then usize might not be trustworthy. Calling
- * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
- * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
- * and only calls the alloc hook).
- *
- * Reentrancy:
- * Reentrancy is guarded against from within the hook implementation. If you
- * call allocator functions from within a hook, the hooks will not be invoked
- * again.
- * Threading:
- * The installation of a hook synchronizes with all its uses. If you can
- * prove the installation of a hook happens-before a jemalloc entry point,
- * then the hook will get invoked (unless there's a racing removal).
- *
- * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
- * allocates and has the alloc hook invoked, then a subsequent free on the
- * same thread will also have the free hook invoked).
- *
- * The *removal* of a hook does *not* block until all threads are done with
- * the hook. Hook authors have to be resilient to this, and need some
- * out-of-band mechanism for cleaning up any dynamically allocated memory
- * associated with their hook.
- * Ordering:
- * Order of hook execution is unspecified, and may be different than insertion
- * order.
- */
-
-#define HOOK_MAX 4
-
-enum hook_alloc_e {
- hook_alloc_malloc,
- hook_alloc_posix_memalign,
- hook_alloc_aligned_alloc,
- hook_alloc_calloc,
- hook_alloc_memalign,
- hook_alloc_valloc,
- hook_alloc_mallocx,
-
- /* The reallocating functions have both alloc and dalloc variants */
- hook_alloc_realloc,
- hook_alloc_rallocx,
-};
-/*
- * We put the enum typedef after the enum, since this file may get included by
- * jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
- */
-typedef enum hook_alloc_e hook_alloc_t;
-
-enum hook_dalloc_e {
- hook_dalloc_free,
- hook_dalloc_dallocx,
- hook_dalloc_sdallocx,
-
- /*
- * The dalloc halves of reallocation (not called if in-place expansion
- * happens).
- */
- hook_dalloc_realloc,
- hook_dalloc_rallocx,
-};
-typedef enum hook_dalloc_e hook_dalloc_t;
-
-
-enum hook_expand_e {
- hook_expand_realloc,
- hook_expand_rallocx,
- hook_expand_xallocx,
-};
-typedef enum hook_expand_e hook_expand_t;
-
-typedef void (*hook_alloc)(
- void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
- uintptr_t args_raw[3]);
-
-typedef void (*hook_dalloc)(
- void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
-
-typedef void (*hook_expand)(
- void *extra, hook_expand_t type, void *address, size_t old_usize,
- size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
-
-typedef struct hooks_s hooks_t;
-struct hooks_s {
- hook_alloc alloc_hook;
- hook_dalloc dalloc_hook;
- hook_expand expand_hook;
- void *extra;
-};
-
-/*
- * Begin implementation details; everything above this point might one day live
- * in a public API. Everything below this point never will.
- */
-
-/*
- * The realloc pathways haven't gotten any refactoring love in a while, and it's
- * fairly difficult to pass information from the entry point to the hooks. We
- * put the informaiton the hooks will need into a struct to encapsulate
- * everything.
- *
- * Much of these pathways are force-inlined, so that the compiler can avoid
- * materializing this struct until we hit an extern arena function. For fairly
- * goofy reasons, *many* of the realloc paths hit an extern arena function.
- * These paths are cold enough that it doesn't matter; eventually, we should
- * rewrite the realloc code to make the expand-in-place and the
- * free-then-realloc paths more orthogonal, at which point we don't need to
- * spread the hook logic all over the place.
- */
-typedef struct hook_ralloc_args_s hook_ralloc_args_t;
-struct hook_ralloc_args_s {
- /* I.e. as opposed to rallocx. */
- bool is_realloc;
- /*
- * The expand hook takes 4 arguments, even if only 3 are actually used;
- * we add an extra one in case the user decides to memcpy without
- * looking too closely at the hooked function.
- */
- uintptr_t args[4];
-};
-
-/*
- * Returns an opaque handle to be used when removing the hook. NULL means that
- * we couldn't install the hook.
- */
-bool hook_boot();
-
-void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
-/* Uninstalls the hook with the handle previously returned from hook_install. */
-void hook_remove(tsdn_t *tsdn, void *opaque);
-
-/* Hooks */
-
-void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
- uintptr_t args_raw[3]);
-
-void hook_invoke_dalloc(hook_dalloc_t type, void *address,
- uintptr_t args_raw[3]);
-
-void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
- size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
-
-#endif /* JEMALLOC_INTERNAL_HOOK_H */
+#ifndef JEMALLOC_INTERNAL_HOOK_H
+#define JEMALLOC_INTERNAL_HOOK_H
+
+#include "jemalloc/internal/tsd.h"
+
+/*
+ * This API is *extremely* experimental, and may get ripped out, changed in API-
+ * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
+ *
+ * It allows hooking the stateful parts of the API to see changes as they
+ * happen.
+ *
+ * Allocation hooks are called after the allocation is done, free hooks are
+ * called before the free is done, and expand hooks are called after the
+ * allocation is expanded.
+ *
+ * For realloc and rallocx, if the expansion happens in place, the expansion
+ * hook is called. If it is moved, then the alloc hook is called on the new
+ * location, and then the free hook is called on the old location (i.e. both
+ * hooks are invoked in between the alloc and the dalloc).
+ *
+ * If we return NULL from OOM, then usize might not be trustworthy. Calling
+ * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
+ * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
+ * and only calls the alloc hook).
+ *
+ * Reentrancy:
+ * Reentrancy is guarded against from within the hook implementation. If you
+ * call allocator functions from within a hook, the hooks will not be invoked
+ * again.
+ * Threading:
+ * The installation of a hook synchronizes with all its uses. If you can
+ * prove the installation of a hook happens-before a jemalloc entry point,
+ * then the hook will get invoked (unless there's a racing removal).
+ *
+ * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
+ * allocates and has the alloc hook invoked, then a subsequent free on the
+ * same thread will also have the free hook invoked).
+ *
+ * The *removal* of a hook does *not* block until all threads are done with
+ * the hook. Hook authors have to be resilient to this, and need some
+ * out-of-band mechanism for cleaning up any dynamically allocated memory
+ * associated with their hook.
+ * Ordering:
+ * Order of hook execution is unspecified, and may be different than insertion
+ * order.
+ */
+
+#define HOOK_MAX 4
+
+enum hook_alloc_e {
+ hook_alloc_malloc,
+ hook_alloc_posix_memalign,
+ hook_alloc_aligned_alloc,
+ hook_alloc_calloc,
+ hook_alloc_memalign,
+ hook_alloc_valloc,
+ hook_alloc_mallocx,
+
+ /* The reallocating functions have both alloc and dalloc variants */
+ hook_alloc_realloc,
+ hook_alloc_rallocx,
+};
+/*
+ * We put the enum typedef after the enum, since this file may get included by
+ * jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
+ */
+typedef enum hook_alloc_e hook_alloc_t;
+
+enum hook_dalloc_e {
+ hook_dalloc_free,
+ hook_dalloc_dallocx,
+ hook_dalloc_sdallocx,
+
+ /*
+ * The dalloc halves of reallocation (not called if in-place expansion
+ * happens).
+ */
+ hook_dalloc_realloc,
+ hook_dalloc_rallocx,
+};
+typedef enum hook_dalloc_e hook_dalloc_t;
+
+
+enum hook_expand_e {
+ hook_expand_realloc,
+ hook_expand_rallocx,
+ hook_expand_xallocx,
+};
+typedef enum hook_expand_e hook_expand_t;
+
+typedef void (*hook_alloc)(
+ void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
+ uintptr_t args_raw[3]);
+
+typedef void (*hook_dalloc)(
+ void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
+
+typedef void (*hook_expand)(
+ void *extra, hook_expand_t type, void *address, size_t old_usize,
+ size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
+
+typedef struct hooks_s hooks_t;
+struct hooks_s {
+ hook_alloc alloc_hook;
+ hook_dalloc dalloc_hook;
+ hook_expand expand_hook;
+ void *extra;
+};
+
+/*
+ * Begin implementation details; everything above this point might one day live
+ * in a public API. Everything below this point never will.
+ */
+
+/*
+ * The realloc pathways haven't gotten any refactoring love in a while, and it's
+ * fairly difficult to pass information from the entry point to the hooks. We
+ * put the informaiton the hooks will need into a struct to encapsulate
+ * everything.
+ *
+ * Much of these pathways are force-inlined, so that the compiler can avoid
+ * materializing this struct until we hit an extern arena function. For fairly
+ * goofy reasons, *many* of the realloc paths hit an extern arena function.
+ * These paths are cold enough that it doesn't matter; eventually, we should
+ * rewrite the realloc code to make the expand-in-place and the
+ * free-then-realloc paths more orthogonal, at which point we don't need to
+ * spread the hook logic all over the place.
+ */
+typedef struct hook_ralloc_args_s hook_ralloc_args_t;
+struct hook_ralloc_args_s {
+ /* I.e. as opposed to rallocx. */
+ bool is_realloc;
+ /*
+ * The expand hook takes 4 arguments, even if only 3 are actually used;
+ * we add an extra one in case the user decides to memcpy without
+ * looking too closely at the hooked function.
+ */
+ uintptr_t args[4];
+};
+
+/*
+ * Returns an opaque handle to be used when removing the hook. NULL means that
+ * we couldn't install the hook.
+ */
+bool hook_boot();
+
+void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
+/* Uninstalls the hook with the handle previously returned from hook_install. */
+void hook_remove(tsdn_t *tsdn, void *opaque);
+
+/* Hooks */
+
+void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
+ uintptr_t args_raw[3]);
+
+void hook_invoke_dalloc(hook_dalloc_t type, void *address,
+ uintptr_t args_raw[3]);
+
+void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
+ size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
+
+#endif /* JEMALLOC_INTERNAL_HOOK_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
index 46259be5aa..3b4cf65e82 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -1,98 +1,98 @@
-#ifndef JEMALLOC_INTERNAL_DECLS_H
-#define JEMALLOC_INTERNAL_DECLS_H
-
-// Yandex-specific: misc hacks.
-#include "../../../hack.h"
-// End of Yandex-specific.
-
-#include <math.h>
-#ifdef _WIN32
-# include <windows.h>
-# include "msvc_compat/windows_extra.h"
-# ifdef _WIN64
-# if LG_VADDR <= 32
-# error Generate the headers using x64 vcargs
-# endif
-# else
-# if LG_VADDR > 32
-# undef LG_VADDR
-# define LG_VADDR 32
-# endif
-# endif
-#else
-# include <sys/param.h>
-# include <sys/mman.h>
-# if !defined(__pnacl__) && !defined(__native_client__)
-# include <sys/syscall.h>
-# if !defined(SYS_write) && defined(__NR_write)
-# define SYS_write __NR_write
-# endif
-# if defined(SYS_open) && defined(__aarch64__)
- /* Android headers may define SYS_open to __NR_open even though
- * __NR_open may not exist on AArch64 (superseded by __NR_openat). */
-# undef SYS_open
-# endif
-# include <sys/uio.h>
-# endif
-# include <pthread.h>
-# ifdef __FreeBSD__
-# include <pthread_np.h>
-# endif
-# include <signal.h>
-# ifdef JEMALLOC_OS_UNFAIR_LOCK
-# include <os/lock.h>
-# endif
-# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
-# include <sched.h>
-# endif
-# include <errno.h>
-# include <sys/time.h>
-# include <time.h>
-# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-# include <mach/mach_time.h>
-# endif
-#endif
-#include <sys/types.h>
-
-#include <limits.h>
-#ifndef SIZE_T_MAX
-# define SIZE_T_MAX SIZE_MAX
-#endif
-#ifndef SSIZE_MAX
-# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
-#endif
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <stddef.h>
-#ifndef offsetof
-# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
-#endif
-#include <string.h>
-#include <strings.h>
-#include <ctype.h>
-#ifdef _MSC_VER
-# include <io.h>
-typedef intptr_t ssize_t;
-# define PATH_MAX 1024
-# define STDERR_FILENO 2
-# define __func__ __FUNCTION__
-# ifdef JEMALLOC_HAS_RESTRICT
-# define restrict __restrict
-# endif
-/* Disable warnings about deprecated system functions. */
-# pragma warning(disable: 4996)
-#if _MSC_VER < 1800
-static int
-isblank(int c) {
- return (c == '\t' || c == ' ');
-}
-#endif
-#else
-# include <unistd.h>
-#endif
-#include <fcntl.h>
-
-#endif /* JEMALLOC_INTERNAL_H */
+#ifndef JEMALLOC_INTERNAL_DECLS_H
+#define JEMALLOC_INTERNAL_DECLS_H
+
+// Yandex-specific: misc hacks.
+#include "../../../hack.h"
+// End of Yandex-specific.
+
+#include <math.h>
+#ifdef _WIN32
+# include <windows.h>
+# include "msvc_compat/windows_extra.h"
+# ifdef _WIN64
+# if LG_VADDR <= 32
+# error Generate the headers using x64 vcargs
+# endif
+# else
+# if LG_VADDR > 32
+# undef LG_VADDR
+# define LG_VADDR 32
+# endif
+# endif
+#else
+# include <sys/param.h>
+# include <sys/mman.h>
+# if !defined(__pnacl__) && !defined(__native_client__)
+# include <sys/syscall.h>
+# if !defined(SYS_write) && defined(__NR_write)
+# define SYS_write __NR_write
+# endif
+# if defined(SYS_open) && defined(__aarch64__)
+ /* Android headers may define SYS_open to __NR_open even though
+ * __NR_open may not exist on AArch64 (superseded by __NR_openat). */
+# undef SYS_open
+# endif
+# include <sys/uio.h>
+# endif
+# include <pthread.h>
+# ifdef __FreeBSD__
+# include <pthread_np.h>
+# endif
+# include <signal.h>
+# ifdef JEMALLOC_OS_UNFAIR_LOCK
+# include <os/lock.h>
+# endif
+# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
+# include <sched.h>
+# endif
+# include <errno.h>
+# include <sys/time.h>
+# include <time.h>
+# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+# include <mach/mach_time.h>
+# endif
+#endif
+#include <sys/types.h>
+
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#ifndef SSIZE_MAX
+# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
+#endif
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#ifndef offsetof
+# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
+#endif
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#ifdef _MSC_VER
+# include <io.h>
+typedef intptr_t ssize_t;
+# define PATH_MAX 1024
+# define STDERR_FILENO 2
+# define __func__ __FUNCTION__
+# ifdef JEMALLOC_HAS_RESTRICT
+# define restrict __restrict
+# endif
+/* Disable warnings about deprecated system functions. */
+# pragma warning(disable: 4996)
+#if _MSC_VER < 1800
+static int
+isblank(int c) {
+ return (c == '\t' || c == ' ');
+}
+#endif
+#else
+# include <unistd.h>
+#endif
+#include <fcntl.h>
+
+#endif /* JEMALLOC_INTERNAL_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h
index 28cc151f07..0677bd90e8 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h
@@ -1,6 +1,6 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
-#define JEMALLOC_INTERNAL_DEFS_H_
+#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
@@ -10,18 +10,18 @@
/* #undef JEMALLOC_CPREFIX */
/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#define JEMALLOC_OVERRIDE___LIBC_CALLOC
-#define JEMALLOC_OVERRIDE___LIBC_FREE
-#define JEMALLOC_OVERRIDE___LIBC_MALLOC
-#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
-#define JEMALLOC_OVERRIDE___LIBC_REALLOC
-#define JEMALLOC_OVERRIDE___LIBC_VALLOC
-/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
-
-/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#define JEMALLOC_OVERRIDE___LIBC_CALLOC
+#define JEMALLOC_OVERRIDE___LIBC_FREE
+#define JEMALLOC_OVERRIDE___LIBC_MALLOC
+#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+#define JEMALLOC_OVERRIDE___LIBC_REALLOC
+#define JEMALLOC_OVERRIDE___LIBC_VALLOC
+/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
+
+/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
@@ -33,80 +33,80 @@
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
-#if defined(__i386__) || defined(__amd64__)
-#define CPU_SPINWAIT __asm__ volatile("pause")
-/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
-#define HAVE_CPU_SPINWAIT 1
-#else
+#if defined(__i386__) || defined(__amd64__)
+#define CPU_SPINWAIT __asm__ volatile("pause")
+/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
+#define HAVE_CPU_SPINWAIT 1
+#else
#define CPU_SPINWAIT
-#define HAVE_CPU_SPINWAIT 0
+#define HAVE_CPU_SPINWAIT 0
#endif
-/*
- * Number of significant bits in virtual addresses. This may be less than the
- * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
- * bits are the same as bit 47.
- */
-#define LG_VADDR 48
-
-/* Defined if C11 atomics are available. */
-#define JEMALLOC_C11_ATOMICS 1
-
-/* Defined if GCC __atomic atomics are available. */
-#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
-/* and the 8-bit variant support. */
-#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
-
-/* Defined if GCC __sync atomics are available. */
-#define JEMALLOC_GCC_SYNC_ATOMICS 1
-/* and the 8-bit variant support. */
-#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
-
-/*
- * Defined if __builtin_clz() and __builtin_clzl() are available.
- */
-#define JEMALLOC_HAVE_BUILTIN_CLZ
-
-/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
- */
-/* #undef JEMALLOC_OS_UNFAIR_LOCK */
-
-/* Defined if syscall(2) is usable. */
-#define JEMALLOC_USE_SYSCALL
-
-/*
- * Defined if secure_getenv(3) is available.
- */
-#define JEMALLOC_HAVE_SECURE_GETENV
-
-/*
- * Defined if issetugid(2) is available.
- */
-/* #undef JEMALLOC_HAVE_ISSETUGID */
-
-/* Defined if pthread_atfork(3) is available. */
-#define JEMALLOC_HAVE_PTHREAD_ATFORK
-
-/* Defined if pthread_setname_np(3) is available. */
-/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
-
-/*
+/*
+ * Number of significant bits in virtual addresses. This may be less than the
+ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
+ * bits are the same as bit 47.
+ */
+#define LG_VADDR 48
+
+/* Defined if C11 atomics are available. */
+#define JEMALLOC_C11_ATOMICS 1
+
+/* Defined if GCC __atomic atomics are available. */
+#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
+/* and the 8-bit variant support. */
+#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
+
+/* Defined if GCC __sync atomics are available. */
+#define JEMALLOC_GCC_SYNC_ATOMICS 1
+/* and the 8-bit variant support. */
+#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
+
+/*
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
+ */
+#define JEMALLOC_HAVE_BUILTIN_CLZ
+
+/*
+ * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ */
+/* #undef JEMALLOC_OS_UNFAIR_LOCK */
+
+/* Defined if syscall(2) is usable. */
+#define JEMALLOC_USE_SYSCALL
+
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#define JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+/* #undef JEMALLOC_HAVE_ISSETUGID */
+
+/* Defined if pthread_atfork(3) is available. */
+#define JEMALLOC_HAVE_PTHREAD_ATFORK
+
+/* Defined if pthread_setname_np(3) is available. */
+/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
+ */
+#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
+ */
+#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
+
+/*
+ * Defined if mach_absolute_time() is available.
+ */
+/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
+
+/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
@@ -120,7 +120,7 @@
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
-#define JEMALLOC_THREADED_INIT
+#define JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
@@ -130,7 +130,7 @@
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
-#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
@@ -139,31 +139,31 @@
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
-#define JEMALLOC_STATS
-
-/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
-/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
+#define JEMALLOC_STATS
+/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
+/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
+
/* JEMALLOC_PROF enables allocation profiling. */
-#define JEMALLOC_PROF
+#define JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
-#define JEMALLOC_PROF_LIBUNWIND
+#define JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
-/* #undef JEMALLOC_PROF_LIBGCC */
+/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
-#define JEMALLOC_DSS
+#define JEMALLOC_DSS
-/* Support memory filling (junk/zero). */
-#define JEMALLOC_FILL
+/* Support memory filling (junk/zero). */
+#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
@@ -174,135 +174,135 @@
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-/* #undef LG_QUANTUM */
-
-/* One page is 2^LG_PAGE bytes. */
-#define LG_PAGE 12
-
-/*
- * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
- * system does not explicitly support huge pages; system calls that require
- * explicit huge page support are separately configured.
- */
-#define LG_HUGEPAGE 21
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+/* #undef LG_QUANTUM */
+/* One page is 2^LG_PAGE bytes. */
+#define LG_PAGE 12
+
/*
- * If defined, adjacent virtual memory mappings with identical attributes
- * automatically coalesce, and they fragment when changes are made to subranges.
- * This is the normal order of things for mmap()/munmap(), but on Windows
- * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
- * mappings do *not* coalesce/fragment.
+ * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
+ * system does not explicitly support huge pages; system calls that require
+ * explicit huge page support are separately configured.
*/
-#define JEMALLOC_MAPS_COALESCE
+#define LG_HUGEPAGE 21
/*
- * If defined, retain memory for later reuse by default rather than using e.g.
- * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
- * common sequences of mmap()/munmap() calls will cause virtual memory map
- * holes.
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
*/
-#define JEMALLOC_RETAIN
+#define JEMALLOC_MAPS_COALESCE
+/*
+ * If defined, retain memory for later reuse by default rather than using e.g.
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
+ */
+#define JEMALLOC_RETAIN
+
/* TLS is used to map arenas and magazine caches to threads. */
-#define JEMALLOC_TLS
-
-/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
- */
-#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
-
-/*
- * ffs*() functions to use for bitmapping. Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
-#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
-#define JEMALLOC_INTERNAL_FFS __builtin_ffs
-
-/*
- * popcount*() functions to use for bitmapping.
- */
-#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
-#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
-
-/*
- * If defined, explicitly attempt to more uniformly distribute large allocation
- * pointer alignments across all cache indices.
- */
-#define JEMALLOC_CACHE_OBLIVIOUS
-
-/*
- * If defined, enable logging facilities. We make this a configure option to
- * avoid taking extra branches everywhere.
- */
-/* #undef JEMALLOC_LOG */
-
-/*
- * If defined, use readlinkat() (instead of readlink()) to follow
- * /etc/malloc_conf.
- */
-/* #undef JEMALLOC_READLINKAT */
-
-/*
+#define JEMALLOC_TLS
+
+/*
+ * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
+ * Don't use this directly; instead use unreachable() from util.h
+ */
+#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
+
+/*
+ * ffs*() functions to use for bitmapping. Don't use these directly; instead,
+ * use ffs_*() from util.h.
+ */
+#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
+#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
+#define JEMALLOC_INTERNAL_FFS __builtin_ffs
+
+/*
+ * popcount*() functions to use for bitmapping.
+ */
+#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
+#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
+
+/*
+ * If defined, explicitly attempt to more uniformly distribute large allocation
+ * pointer alignments across all cache indices.
+ */
+#define JEMALLOC_CACHE_OBLIVIOUS
+
+/*
+ * If defined, enable logging facilities. We make this a configure option to
+ * avoid taking extra branches everywhere.
+ */
+/* #undef JEMALLOC_LOG */
+
+/*
+ * If defined, use readlinkat() (instead of readlink()) to follow
+ * /etc/malloc_conf.
+ */
+/* #undef JEMALLOC_READLINKAT */
+
+/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- * /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
-#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/* Defined if madvise(2) is available. */
-#define JEMALLOC_HAVE_MADVISE
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#define JEMALLOC_HAVE_MADVISE_HUGE
-
-/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
+#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/* Defined if madvise(2) is available. */
+#define JEMALLOC_HAVE_MADVISE
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#define JEMALLOC_HAVE_MADVISE_HUGE
+
+/*
* Methods for purging unused pages differ between operating systems.
*
- * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- * will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
- * defined, this immediately discards pages,
+ * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ * will be discarded rather than swapped out.
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
+ * defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
- * the address region is later touched;
- * otherwise this behaves similarly to
- * MADV_FREE, though typically with higher
- * system overhead.
- */
-#define JEMALLOC_PURGE_MADVISE_FREE
-#define JEMALLOC_PURGE_MADVISE_DONTNEED
-#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
-
-/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
-#define JEMALLOC_DEFINE_MADVISE_FREE
-
-/*
- * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
- */
-#define JEMALLOC_MADVISE_DONTDUMP
-
-/*
- * Defined if transparent huge pages (THPs) are supported via the
- * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
- */
-/* #undef JEMALLOC_THP */
-
-/* Define if operating system has alloca.h header. */
+ * the address region is later touched;
+ * otherwise this behaves similarly to
+ * MADV_FREE, though typically with higher
+ * system overhead.
+ */
+#define JEMALLOC_PURGE_MADVISE_FREE
+#define JEMALLOC_PURGE_MADVISE_DONTNEED
+#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+
+/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
+#define JEMALLOC_DEFINE_MADVISE_FREE
+
+/*
+ * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
+ */
+#define JEMALLOC_MADVISE_DONTDUMP
+
+/*
+ * Defined if transparent huge pages (THPs) are supported via the
+ * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
+ */
+/* #undef JEMALLOC_THP */
+
+/* Define if operating system has alloca.h header. */
#define JEMALLOC_HAS_ALLOCA_H 1
/* C99 restrict keyword supported. */
@@ -317,56 +317,56 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 3
-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
-#define LG_SIZEOF_LONG_LONG 3
-
+/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
+#define LG_SIZEOF_LONG_LONG 3
+
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
-#define JEMALLOC_GLIBC_MALLOC_HOOK
-
-/* glibc memalign hook. */
-#define JEMALLOC_GLIBC_MEMALIGN_HOOK
-
-/* pthread support */
-#define JEMALLOC_HAVE_PTHREAD
-
-/* dlsym() support */
-#define JEMALLOC_HAVE_DLSYM
-
-/* Adaptive mutex support in pthreads. */
-#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
-
-/* GNU specific sched_getcpu support */
-#define JEMALLOC_HAVE_SCHED_GETCPU
-
-/* GNU specific sched_setaffinity support */
-#define JEMALLOC_HAVE_SCHED_SETAFFINITY
-
-/*
- * If defined, all the features necessary for background threads are present.
- */
-#define JEMALLOC_BACKGROUND_THREAD 1
-
-/*
- * If defined, jemalloc symbols are not exported (doesn't work when
- * JEMALLOC_PREFIX is not defined).
- */
-/* #undef JEMALLOC_EXPORT */
-
-/* config.malloc_conf options string. */
-#define JEMALLOC_CONFIG_MALLOC_CONF ""
-
-/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
-#define JEMALLOC_IS_MALLOC 1
-
-/*
- * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
- */
-#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
-
-/* Performs additional safety checks when defined. */
-/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
-
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#define JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#define JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* pthread support */
+#define JEMALLOC_HAVE_PTHREAD
+
+/* dlsym() support */
+#define JEMALLOC_HAVE_DLSYM
+
+/* Adaptive mutex support in pthreads. */
+#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/* GNU specific sched_getcpu support */
+#define JEMALLOC_HAVE_SCHED_GETCPU
+
+/* GNU specific sched_setaffinity support */
+#define JEMALLOC_HAVE_SCHED_SETAFFINITY
+
+/*
+ * If defined, all the features necessary for background threads are present.
+ */
+#define JEMALLOC_BACKGROUND_THREAD 1
+
+/*
+ * If defined, jemalloc symbols are not exported (doesn't work when
+ * JEMALLOC_PREFIX is not defined).
+ */
+/* #undef JEMALLOC_EXPORT */
+
+/* config.malloc_conf options string. */
+#define JEMALLOC_CONFIG_MALLOC_CONF ""
+
+/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
+#define JEMALLOC_IS_MALLOC 1
+
+/*
+ * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
+ */
+#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
+
+/* Performs additional safety checks when defined. */
+/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
+
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx-arm64.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx-arm64.h
index bb80e1f91d..4d344631fc 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx-arm64.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx-arm64.h
@@ -1,6 +1,6 @@
-#pragma once
-
-#include "jemalloc_internal_defs-osx.h"
-
-/* One page is 2^LG_PAGE bytes. */
-#define LG_PAGE 14
+#pragma once
+
+#include "jemalloc_internal_defs-osx.h"
+
+/* One page is 2^LG_PAGE bytes. */
+#define LG_PAGE 14
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx.h
index 1a008eb05c..58c484cd08 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-osx.h
@@ -1,150 +1,150 @@
-#pragma once
-
-#include "jemalloc_internal_defs-linux.h"
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_FREE
-#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
-#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
-
-/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
- */
-#define JEMALLOC_OS_UNFAIR_LOCK
-
-/* Defined if syscall(2) is usable. */
-#undef JEMALLOC_USE_SYSCALL
-
-/*
- * Defined if secure_getenv(3) is available.
- */
-#undef JEMALLOC_HAVE_SECURE_GETENV
-
-/*
- * Defined if issetugid(2) is available.
- */
-#define JEMALLOC_HAVE_ISSETUGID
-
-/* Defined if pthread_setname_np(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
-#undef JEMALLOC_THREADED_INIT
-
-/* JEMALLOC_PROF enables allocation profiling. */
-#undef JEMALLOC_PROF
-
-/* Use libunwind for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBUNWIND
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
- * segment (DSS).
- */
-#undef JEMALLOC_DSS
-
-/*
- * If defined, retain memory for later reuse by default rather than using e.g.
- * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
- * common sequences of mmap()/munmap() calls will cause virtual memory map
- * holes.
- */
-#undef JEMALLOC_RETAIN
-
-/* TLS is used to map arenas and magazine caches to threads. */
-#undef JEMALLOC_TLS
-
-/*
- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
- */
-#define JEMALLOC_ZONE
-
-/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- * /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#undef JEMALLOC_HAVE_MADVISE_HUGE
-
-/*
- * Methods for purging unused pages differ between operating systems.
- *
- * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- * will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
- * defined, this immediately discards pages,
- * such that new pages will be demand-zeroed if
- * the address region is later touched;
- * otherwise this behaves similarly to
- * MADV_FREE, though typically with higher
- * system overhead.
- */
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
-
-/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
-#undef JEMALLOC_DEFINE_MADVISE_FREE
-
-/*
- * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
- */
-#undef JEMALLOC_MADVISE_DONTDUMP
-
-/* Define if operating system has alloca.h header. */
-#undef JEMALLOC_HAS_ALLOCA_H
-
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
-#undef JEMALLOC_GLIBC_MALLOC_HOOK
-
-/* glibc memalign hook. */
-#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
-
-/* Adaptive mutex support in pthreads. */
-#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
-
-/* GNU specific sched_getcpu support */
-#undef JEMALLOC_HAVE_SCHED_GETCPU
-
-/* GNU specific sched_setaffinity support */
-#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
-
-/*
- * If defined, all the features necessary for background threads are present.
- */
-#undef JEMALLOC_BACKGROUND_THREAD
-
-/*
- * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
- */
-#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
+#pragma once
+
+#include "jemalloc_internal_defs-linux.h"
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_FREE
+#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
+
+/*
+ * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ */
+#define JEMALLOC_OS_UNFAIR_LOCK
+
+/* Defined if syscall(2) is usable. */
+#undef JEMALLOC_USE_SYSCALL
+
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+#define JEMALLOC_HAVE_ISSETUGID
+
+/* Defined if pthread_setname_np(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
+
+/*
+ * Defined if mach_absolute_time() is available.
+ */
+#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+#undef JEMALLOC_THREADED_INIT
+
+/* JEMALLOC_PROF enables allocation profiling. */
+#undef JEMALLOC_PROF
+
+/* Use libunwind for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBUNWIND
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
+ * segment (DSS).
+ */
+#undef JEMALLOC_DSS
+
+/*
+ * If defined, retain memory for later reuse by default rather than using e.g.
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
+ */
+#undef JEMALLOC_RETAIN
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#undef JEMALLOC_TLS
+
+/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
+ */
+#define JEMALLOC_ZONE
+
+/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#undef JEMALLOC_HAVE_MADVISE_HUGE
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ * will be discarded rather than swapped out.
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
+ * defined, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched;
+ * otherwise this behaves similarly to
+ * MADV_FREE, though typically with higher
+ * system overhead.
+ */
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+
+/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
+#undef JEMALLOC_DEFINE_MADVISE_FREE
+
+/*
+ * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
+ */
+#undef JEMALLOC_MADVISE_DONTDUMP
+
+/* Define if operating system has alloca.h header. */
+#undef JEMALLOC_HAS_ALLOCA_H
+
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#undef JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* Adaptive mutex support in pthreads. */
+#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/* GNU specific sched_getcpu support */
+#undef JEMALLOC_HAVE_SCHED_GETCPU
+
+/* GNU specific sched_setaffinity support */
+#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
+
+/*
+ * If defined, all the features necessary for background threads are present.
+ */
+#undef JEMALLOC_BACKGROUND_THREAD
+
+/*
+ * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
+ */
+#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-win.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-win.h
index a49610dbd0..808f970efb 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-win.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-win.h
@@ -1,216 +1,216 @@
-#pragma once
-
-#include "jemalloc_internal_defs-linux.h"
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_FREE
-#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
-#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
-#if defined(_M_IX86) || defined(_M_AMD64)
-#define CPU_SPINWAIT _mm_pause()
-#define HAVE_CPU_SPINWAIT 1
-#endif
-
-/*
- * Number of significant bits in virtual addresses. This may be less than the
- * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
- * bits are the same as bit 47.
- */
-#define LG_VADDR 48
-
-/* Defined if C11 atomics are available. */
-#undef JEMALLOC_C11_ATOMICS
-
-/* Defined if GCC __atomic atomics are available. */
-#undef JEMALLOC_GCC_ATOMIC_ATOMICS
-/* and the 8-bit variant support. */
-#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
-
-/* Defined if GCC __sync atomics are available. */
-#undef JEMALLOC_GCC_SYNC_ATOMICS
-/* and the 8-bit variant support. */
-#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
-
-/*
- * Defined if __builtin_clz() and __builtin_clzl() are available.
- */
-#undef JEMALLOC_HAVE_BUILTIN_CLZ
-
-/* Defined if syscall(2) is usable. */
-#undef JEMALLOC_USE_SYSCALL
-
-/*
- * Defined if secure_getenv(3) is available.
- */
-#undef JEMALLOC_HAVE_SECURE_GETENV
-
-/* Defined if pthread_atfork(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_ATFORK
-
-/* Defined if pthread_setname_np(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
-#undef JEMALLOC_THREADED_INIT
-
-/* Non-empty if the tls_model attribute is supported. */
-#define JEMALLOC_TLS_MODEL
-
-/* JEMALLOC_PROF enables allocation profiling. */
-#undef JEMALLOC_PROF
-
-/* Use libunwind for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBUNWIND
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
- * segment (DSS).
- */
-#undef JEMALLOC_DSS
-
-/*
- * If defined, adjacent virtual memory mappings with identical attributes
- * automatically coalesce, and they fragment when changes are made to subranges.
- * This is the normal order of things for mmap()/munmap(), but on Windows
- * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
- * mappings do *not* coalesce/fragment.
- */
-#undef JEMALLOC_MAPS_COALESCE
-
-/*
- * If defined, retain memory for later reuse by default rather than using e.g.
- * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
- * common sequences of mmap()/munmap() calls will cause virtual memory map
- * holes.
- */
-#undef JEMALLOC_RETAIN
-
-/* TLS is used to map arenas and magazine caches to threads. */
-#undef JEMALLOC_TLS
-
-/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
- */
-#define JEMALLOC_INTERNAL_UNREACHABLE abort
-
-/*
- * ffs*() functions to use for bitmapping. Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#define JEMALLOC_INTERNAL_FFSLL ffsll
-#define JEMALLOC_INTERNAL_FFSL ffsl
-#define JEMALLOC_INTERNAL_FFS ffs
-
-/*
- * popcount*() functions to use for bitmapping.
- */
-#undef JEMALLOC_INTERNAL_POPCOUNTL
-#undef JEMALLOC_INTERNAL_POPCOUNT
-
-/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- * /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/* Defined if madvise(2) is available. */
-#undef JEMALLOC_HAVE_MADVISE
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#undef JEMALLOC_HAVE_MADVISE_HUGE
-
-/*
- * Methods for purging unused pages differ between operating systems.
- *
- * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- * will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
- * defined, this immediately discards pages,
- * such that new pages will be demand-zeroed if
- * the address region is later touched;
- * otherwise this behaves similarly to
- * MADV_FREE, though typically with higher
- * system overhead.
- */
-#undef JEMALLOC_PURGE_MADVISE_FREE
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
-
-/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
-#undef JEMALLOC_DEFINE_MADVISE_FREE
-
-/*
- * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
- */
-#undef JEMALLOC_MADVISE_DONTDUMP
-
-/* Define if operating system has alloca.h header. */
-#undef JEMALLOC_HAS_ALLOCA_H
-
-/* C99 restrict keyword supported. */
-#undef JEMALLOC_HAS_RESTRICT
-
-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
-#define LG_SIZEOF_LONG 2
-
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
-#undef JEMALLOC_GLIBC_MALLOC_HOOK
-
-/* glibc memalign hook. */
-#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
-
-/* pthread support */
-#undef JEMALLOC_HAVE_PTHREAD
-
-/* dlsym() support */
-#undef JEMALLOC_HAVE_DLSYM
-
-/* Adaptive mutex support in pthreads. */
-#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
-
-/* GNU specific sched_getcpu support */
-#undef JEMALLOC_HAVE_SCHED_GETCPU
-
-/* GNU specific sched_setaffinity support */
-#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
-
-/*
- * If defined, all the features necessary for background threads are present.
- */
-#undef JEMALLOC_BACKGROUND_THREAD
-
-/*
- * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
- */
-#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
+#pragma once
+
+#include "jemalloc_internal_defs-linux.h"
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_FREE
+#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.
+ */
+#if defined(_M_IX86) || defined(_M_AMD64)
+#define CPU_SPINWAIT _mm_pause()
+#define HAVE_CPU_SPINWAIT 1
+#endif
+
+/*
+ * Number of significant bits in virtual addresses. This may be less than the
+ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
+ * bits are the same as bit 47.
+ */
+#define LG_VADDR 48
+
+/* Defined if C11 atomics are available. */
+#undef JEMALLOC_C11_ATOMICS
+
+/* Defined if GCC __atomic atomics are available. */
+#undef JEMALLOC_GCC_ATOMIC_ATOMICS
+/* and the 8-bit variant support. */
+#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
+
+/* Defined if GCC __sync atomics are available. */
+#undef JEMALLOC_GCC_SYNC_ATOMICS
+/* and the 8-bit variant support. */
+#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
+
+/*
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
+ */
+#undef JEMALLOC_HAVE_BUILTIN_CLZ
+
+/* Defined if syscall(2) is usable. */
+#undef JEMALLOC_USE_SYSCALL
+
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/* Defined if pthread_atfork(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_ATFORK
+
+/* Defined if pthread_setname_np(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+#undef JEMALLOC_THREADED_INIT
+
+/* Non-empty if the tls_model attribute is supported. */
+#define JEMALLOC_TLS_MODEL
+
+/* JEMALLOC_PROF enables allocation profiling. */
+#undef JEMALLOC_PROF
+
+/* Use libunwind for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBUNWIND
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
+ * segment (DSS).
+ */
+#undef JEMALLOC_DSS
+
+/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
+
+/*
+ * If defined, retain memory for later reuse by default rather than using e.g.
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
+ */
+#undef JEMALLOC_RETAIN
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#undef JEMALLOC_TLS
+
+/*
+ * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
+ * Don't use this directly; instead use unreachable() from util.h
+ */
+#define JEMALLOC_INTERNAL_UNREACHABLE abort
+
+/*
+ * ffs*() functions to use for bitmapping. Don't use these directly; instead,
+ * use ffs_*() from util.h.
+ */
+#define JEMALLOC_INTERNAL_FFSLL ffsll
+#define JEMALLOC_INTERNAL_FFSL ffsl
+#define JEMALLOC_INTERNAL_FFS ffs
+
+/*
+ * popcount*() functions to use for bitmapping.
+ */
+#undef JEMALLOC_INTERNAL_POPCOUNTL
+#undef JEMALLOC_INTERNAL_POPCOUNT
+
+/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/* Defined if madvise(2) is available. */
+#undef JEMALLOC_HAVE_MADVISE
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#undef JEMALLOC_HAVE_MADVISE_HUGE
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ * will be discarded rather than swapped out.
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
+ * defined, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched;
+ * otherwise this behaves similarly to
+ * MADV_FREE, though typically with higher
+ * system overhead.
+ */
+#undef JEMALLOC_PURGE_MADVISE_FREE
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+
+/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
+#undef JEMALLOC_DEFINE_MADVISE_FREE
+
+/*
+ * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
+ */
+#undef JEMALLOC_MADVISE_DONTDUMP
+
+/* Define if operating system has alloca.h header. */
+#undef JEMALLOC_HAS_ALLOCA_H
+
+/* C99 restrict keyword supported. */
+#undef JEMALLOC_HAS_RESTRICT
+
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */
+#define LG_SIZEOF_LONG 2
+
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#undef JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* pthread support */
+#undef JEMALLOC_HAVE_PTHREAD
+
+/* dlsym() support */
+#undef JEMALLOC_HAVE_DLSYM
+
+/* Adaptive mutex support in pthreads. */
+#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/* GNU specific sched_getcpu support */
+#undef JEMALLOC_HAVE_SCHED_GETCPU
+
+/* GNU specific sched_setaffinity support */
+#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
+
+/*
+ * If defined, all the features necessary for background threads are present.
+ */
+#undef JEMALLOC_BACKGROUND_THREAD
+
+/*
+ * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
+ */
+#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
index fda39748b8..730255b086 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
@@ -1,11 +1,11 @@
#pragma once
#if defined(__APPLE__) && defined(__arm64__)
-# include "jemalloc_internal_defs-osx-arm64.h"
-#elif defined(__APPLE__)
-# include "jemalloc_internal_defs-osx.h"
-#elif defined(_MSC_VER)
-# include "jemalloc_internal_defs-win.h"
-#else
+# include "jemalloc_internal_defs-osx-arm64.h"
+#elif defined(__APPLE__)
+# include "jemalloc_internal_defs-osx.h"
+#elif defined(_MSC_VER)
+# include "jemalloc_internal_defs-win.h"
+#else
# include "jemalloc_internal_defs-linux.h"
#endif
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
index d291170bee..a3ce965c02 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
@@ -1,57 +1,57 @@
-#ifndef JEMALLOC_INTERNAL_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTERNS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/tsd_types.h"
-
-/* TSD checks this to set thread local slow state accordingly. */
-extern bool malloc_slow;
-
-/* Run-time options. */
-extern bool opt_abort;
-extern bool opt_abort_conf;
-extern bool opt_confirm_conf;
-extern const char *opt_junk;
-extern bool opt_junk_alloc;
-extern bool opt_junk_free;
-extern bool opt_utrace;
-extern bool opt_xmalloc;
-extern bool opt_zero;
-extern unsigned opt_narenas;
-
-/* Number of CPUs. */
-extern unsigned ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned narenas_auto;
-
-/* Base index for manual arenas. */
-extern unsigned manual_arena_base;
-
-/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern atomic_p_t arenas[];
-
-void *a0malloc(size_t size);
-void a0dalloc(void *ptr);
-void *bootstrap_malloc(size_t size);
-void *bootstrap_calloc(size_t num, size_t size);
-void bootstrap_free(void *ptr);
-void arena_set(unsigned ind, arena_t *arena);
-unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
-void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
-void iarena_cleanup(tsd_t *tsd);
-void arena_cleanup(tsd_t *tsd);
-void arenas_tdata_cleanup(tsd_t *tsd);
-void jemalloc_prefork(void);
-void jemalloc_postfork_parent(void);
-void jemalloc_postfork_child(void);
-bool malloc_initialized(void);
-void je_sdallocx_noflags(void *ptr, size_t size);
-
-#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_EXTERNS_H
+#define JEMALLOC_INTERNAL_EXTERNS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/tsd_types.h"
+
+/* TSD checks this to set thread local slow state accordingly. */
+extern bool malloc_slow;
+
+/* Run-time options. */
+extern bool opt_abort;
+extern bool opt_abort_conf;
+extern bool opt_confirm_conf;
+extern const char *opt_junk;
+extern bool opt_junk_alloc;
+extern bool opt_junk_free;
+extern bool opt_utrace;
+extern bool opt_xmalloc;
+extern bool opt_zero;
+extern unsigned opt_narenas;
+
+/* Number of CPUs. */
+extern unsigned ncpus;
+
+/* Number of arenas used for automatic multiplexing of threads and arenas. */
+extern unsigned narenas_auto;
+
+/* Base index for manual arenas. */
+extern unsigned manual_arena_base;
+
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+extern atomic_p_t arenas[];
+
+void *a0malloc(size_t size);
+void a0dalloc(void *ptr);
+void *bootstrap_malloc(size_t size);
+void *bootstrap_calloc(size_t num, size_t size);
+void bootstrap_free(void *ptr);
+void arena_set(unsigned ind, arena_t *arena);
+unsigned narenas_total_get(void);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
+arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
+void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+void iarena_cleanup(tsd_t *tsd);
+void arena_cleanup(tsd_t *tsd);
+void arenas_tdata_cleanup(tsd_t *tsd);
+void jemalloc_prefork(void);
+void jemalloc_postfork_parent(void);
+void jemalloc_postfork_child(void);
+bool malloc_initialized(void);
+void je_sdallocx_noflags(void *ptr, size_t size);
+
+#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
index 437eaa4079..57ca2549f3 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
@@ -1,94 +1,94 @@
-#ifndef JEMALLOC_INTERNAL_INCLUDES_H
-#define JEMALLOC_INTERNAL_INCLUDES_H
-
-/*
- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
- * but there are circular dependencies that cannot be broken without
- * substantial performance degradation.
- *
- * Historically, we dealt with this by each header into four sections (types,
- * structs, externs, and inlines), and included each header file multiple times
- * in this file, picking out the portion we want on each pass using the
- * following #defines:
- * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
- * types.
- * JEMALLOC_H_STRUCTS : Data structures.
- * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
- * JEMALLOC_H_INLINES : Inline functions.
- *
- * We're moving toward a world in which the dependencies are explicit; each file
- * will #include the headers it depends on (rather than relying on them being
- * implicitly available via this file including every header file in the
- * project).
- *
- * We're now in an intermediate state: we've broken up the header files to avoid
- * having to include each one multiple times, but have not yet moved the
- * dependency information into the header files (i.e. we still rely on the
- * ordering in this file to ensure all a header's dependencies are available in
- * its translation unit). Each component is now broken up into multiple header
- * files, corresponding to the sections above (e.g. instead of "foo.h", we now
- * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
- *
- * Those files which have been converted to explicitly include their
- * inter-component dependencies are now in the initial HERMETIC HEADERS
- * section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
- * must be included first in every translation unit) for system headers and
- * global jemalloc definitions, however.
- */
-
-/******************************************************************************/
-/* TYPES */
-/******************************************************************************/
-
-#include "jemalloc/internal/extent_types.h"
-#include "jemalloc/internal/base_types.h"
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/tcache_types.h"
-#include "jemalloc/internal/prof_types.h"
-
-/******************************************************************************/
-/* STRUCTS */
-/******************************************************************************/
-
-#include "jemalloc/internal/arena_structs_a.h"
-#include "jemalloc/internal/extent_structs.h"
-#include "jemalloc/internal/base_structs.h"
-#include "jemalloc/internal/prof_structs.h"
-#include "jemalloc/internal/arena_structs_b.h"
-#include "jemalloc/internal/tcache_structs.h"
-#include "jemalloc/internal/background_thread_structs.h"
-
-/******************************************************************************/
-/* EXTERNS */
-/******************************************************************************/
-
-#include "jemalloc/internal/jemalloc_internal_externs.h"
-#include "jemalloc/internal/extent_externs.h"
-#include "jemalloc/internal/base_externs.h"
-#include "jemalloc/internal/arena_externs.h"
-#include "jemalloc/internal/large_externs.h"
-#include "jemalloc/internal/tcache_externs.h"
-#include "jemalloc/internal/prof_externs.h"
-#include "jemalloc/internal/background_thread_externs.h"
-
-/******************************************************************************/
-/* INLINES */
-/******************************************************************************/
-
-#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
-#include "jemalloc/internal/base_inlines.h"
-/*
- * Include portions of arena code interleaved with tcache code in order to
- * resolve circular dependencies.
- */
-#include "jemalloc/internal/prof_inlines_a.h"
-#include "jemalloc/internal/arena_inlines_a.h"
-#include "jemalloc/internal/extent_inlines.h"
-#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
-#include "jemalloc/internal/tcache_inlines.h"
-#include "jemalloc/internal/arena_inlines_b.h"
-#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
-#include "jemalloc/internal/prof_inlines_b.h"
-#include "jemalloc/internal/background_thread_inlines.h"
-
-#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
+#ifndef JEMALLOC_INTERNAL_INCLUDES_H
+#define JEMALLOC_INTERNAL_INCLUDES_H
+
+/*
+ * jemalloc can conceptually be broken into components (arena, tcache, etc.),
+ * but there are circular dependencies that cannot be broken without
+ * substantial performance degradation.
+ *
+ * Historically, we dealt with this by each header into four sections (types,
+ * structs, externs, and inlines), and included each header file multiple times
+ * in this file, picking out the portion we want on each pass using the
+ * following #defines:
+ * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
+ * types.
+ * JEMALLOC_H_STRUCTS : Data structures.
+ * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
+ * JEMALLOC_H_INLINES : Inline functions.
+ *
+ * We're moving toward a world in which the dependencies are explicit; each file
+ * will #include the headers it depends on (rather than relying on them being
+ * implicitly available via this file including every header file in the
+ * project).
+ *
+ * We're now in an intermediate state: we've broken up the header files to avoid
+ * having to include each one multiple times, but have not yet moved the
+ * dependency information into the header files (i.e. we still rely on the
+ * ordering in this file to ensure all a header's dependencies are available in
+ * its translation unit). Each component is now broken up into multiple header
+ * files, corresponding to the sections above (e.g. instead of "foo.h", we now
+ * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
+ *
+ * Those files which have been converted to explicitly include their
+ * inter-component dependencies are now in the initial HERMETIC HEADERS
+ * section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
+ * must be included first in every translation unit) for system headers and
+ * global jemalloc definitions, however.
+ */
+
+/******************************************************************************/
+/* TYPES */
+/******************************************************************************/
+
+#include "jemalloc/internal/extent_types.h"
+#include "jemalloc/internal/base_types.h"
+#include "jemalloc/internal/arena_types.h"
+#include "jemalloc/internal/tcache_types.h"
+#include "jemalloc/internal/prof_types.h"
+
+/******************************************************************************/
+/* STRUCTS */
+/******************************************************************************/
+
+#include "jemalloc/internal/arena_structs_a.h"
+#include "jemalloc/internal/extent_structs.h"
+#include "jemalloc/internal/base_structs.h"
+#include "jemalloc/internal/prof_structs.h"
+#include "jemalloc/internal/arena_structs_b.h"
+#include "jemalloc/internal/tcache_structs.h"
+#include "jemalloc/internal/background_thread_structs.h"
+
+/******************************************************************************/
+/* EXTERNS */
+/******************************************************************************/
+
+#include "jemalloc/internal/jemalloc_internal_externs.h"
+#include "jemalloc/internal/extent_externs.h"
+#include "jemalloc/internal/base_externs.h"
+#include "jemalloc/internal/arena_externs.h"
+#include "jemalloc/internal/large_externs.h"
+#include "jemalloc/internal/tcache_externs.h"
+#include "jemalloc/internal/prof_externs.h"
+#include "jemalloc/internal/background_thread_externs.h"
+
+/******************************************************************************/
+/* INLINES */
+/******************************************************************************/
+
+#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
+#include "jemalloc/internal/base_inlines.h"
+/*
+ * Include portions of arena code interleaved with tcache code in order to
+ * resolve circular dependencies.
+ */
+#include "jemalloc/internal/prof_inlines_a.h"
+#include "jemalloc/internal/arena_inlines_a.h"
+#include "jemalloc/internal/extent_inlines.h"
+#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
+#include "jemalloc/internal/tcache_inlines.h"
+#include "jemalloc/internal/arena_inlines_b.h"
+#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
+#include "jemalloc/internal/prof_inlines_b.h"
+#include "jemalloc/internal/background_thread_inlines.h"
+
+#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
index ddde9b4e63..4851ef7332 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
@@ -1,174 +1,174 @@
-#ifndef JEMALLOC_INTERNAL_INLINES_A_H
-#define JEMALLOC_INTERNAL_INLINES_A_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/ticker.h"
-
-JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
-malloc_getcpu(void) {
- assert(have_percpu_arena);
-#if defined(_WIN32)
- return GetCurrentProcessorNumber();
-#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
- return (malloc_cpuid_t)sched_getcpu();
-#else
- not_reached();
- return -1;
-#endif
-}
-
-/* Return the chosen arena index based on current cpu. */
-JEMALLOC_ALWAYS_INLINE unsigned
-percpu_arena_choose(void) {
- assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
-
- malloc_cpuid_t cpuid = malloc_getcpu();
- assert(cpuid >= 0);
-
- unsigned arena_ind;
- if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
- 2)) {
- arena_ind = cpuid;
- } else {
- assert(opt_percpu_arena == per_phycpu_arena);
- /* Hyper threads on the same physical CPU share arena. */
- arena_ind = cpuid - ncpus / 2;
- }
-
- return arena_ind;
-}
-
-/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
-JEMALLOC_ALWAYS_INLINE unsigned
-percpu_arena_ind_limit(percpu_arena_mode_t mode) {
- assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
- if (mode == per_phycpu_arena && ncpus > 1) {
- if (ncpus % 2) {
- /* This likely means a misconfig. */
- return ncpus / 2 + 1;
- }
- return ncpus / 2;
- } else {
- return ncpus;
- }
-}
-
-static inline arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
- arena_tdata_t *tdata;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
- if (unlikely(arenas_tdata == NULL)) {
- /* arenas_tdata hasn't been initialized yet. */
- return arena_tdata_get_hard(tsd, ind);
- }
- if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
- /*
- * ind is invalid, cache is old (too small), or tdata to be
- * initialized.
- */
- return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
- NULL);
- }
-
- tdata = &arenas_tdata[ind];
- if (likely(tdata != NULL) || !refresh_if_missing) {
- return tdata;
- }
- return arena_tdata_get_hard(tsd, ind);
-}
-
-static inline arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
- arena_t *ret;
-
- assert(ind < MALLOCX_ARENA_LIMIT);
-
- ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
- if (unlikely(ret == NULL)) {
- if (init_if_missing) {
- ret = arena_init(tsdn, ind,
- (extent_hooks_t *)&extent_hooks_default);
- }
- }
- return ret;
-}
-
-static inline ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind) {
- arena_tdata_t *tdata;
-
- tdata = arena_tdata_get(tsd, ind, true);
- if (unlikely(tdata == NULL)) {
- return NULL;
- }
- return &tdata->decay_ticker;
-}
-
-JEMALLOC_ALWAYS_INLINE cache_bin_t *
-tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
- assert(binind < SC_NBINS);
- return &tcache->bins_small[binind];
-}
-
-JEMALLOC_ALWAYS_INLINE cache_bin_t *
-tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
- assert(binind >= SC_NBINS &&binind < nhbins);
- return &tcache->bins_large[binind - SC_NBINS];
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tcache_available(tsd_t *tsd) {
- /*
- * Thread specific auto tcache might be unavailable if: 1) during tcache
- * initialization, or 2) disabled through thread.tcache.enabled mallctl
- * or config options. This check covers all cases.
- */
- if (likely(tsd_tcache_enabled_get(tsd))) {
- /* Associated arena == NULL implies tcache init in progress. */
- assert(tsd_tcachep_get(tsd)->arena == NULL ||
- tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
- NULL);
- return true;
- }
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(tsd_t *tsd) {
- if (!tcache_available(tsd)) {
- return NULL;
- }
-
- return tsd_tcachep_get(tsd);
-}
-
-static inline void
-pre_reentrancy(tsd_t *tsd, arena_t *arena) {
- /* arena is the current context. Reentry from a0 is not allowed. */
- assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
-
- bool fast = tsd_fast(tsd);
- assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
- ++*tsd_reentrancy_levelp_get(tsd);
- if (fast) {
- /* Prepare slow path for reentrancy. */
- tsd_slow_update(tsd);
- assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
- }
-}
-
-static inline void
-post_reentrancy(tsd_t *tsd) {
- int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
- assert(*reentrancy_level > 0);
- if (--*reentrancy_level == 0) {
- tsd_slow_update(tsd);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
+#ifndef JEMALLOC_INTERNAL_INLINES_A_H
+#define JEMALLOC_INTERNAL_INLINES_A_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/ticker.h"
+
+JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
+malloc_getcpu(void) {
+ assert(have_percpu_arena);
+#if defined(_WIN32)
+ return GetCurrentProcessorNumber();
+#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
+ return (malloc_cpuid_t)sched_getcpu();
+#else
+ not_reached();
+ return -1;
+#endif
+}
+
+/* Return the chosen arena index based on current cpu. */
+JEMALLOC_ALWAYS_INLINE unsigned
+percpu_arena_choose(void) {
+ assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
+
+ malloc_cpuid_t cpuid = malloc_getcpu();
+ assert(cpuid >= 0);
+
+ unsigned arena_ind;
+ if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
+ 2)) {
+ arena_ind = cpuid;
+ } else {
+ assert(opt_percpu_arena == per_phycpu_arena);
+ /* Hyper threads on the same physical CPU share arena. */
+ arena_ind = cpuid - ncpus / 2;
+ }
+
+ return arena_ind;
+}
+
+/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
+JEMALLOC_ALWAYS_INLINE unsigned
+percpu_arena_ind_limit(percpu_arena_mode_t mode) {
+ assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
+ if (mode == per_phycpu_arena && ncpus > 1) {
+ if (ncpus % 2) {
+ /* This likely means a misconfig. */
+ return ncpus / 2 + 1;
+ }
+ return ncpus / 2;
+ } else {
+ return ncpus;
+ }
+}
+
+static inline arena_tdata_t *
+arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
+ arena_tdata_t *tdata;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+
+ if (unlikely(arenas_tdata == NULL)) {
+ /* arenas_tdata hasn't been initialized yet. */
+ return arena_tdata_get_hard(tsd, ind);
+ }
+ if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
+ /*
+ * ind is invalid, cache is old (too small), or tdata to be
+ * initialized.
+ */
+ return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
+ NULL);
+ }
+
+ tdata = &arenas_tdata[ind];
+ if (likely(tdata != NULL) || !refresh_if_missing) {
+ return tdata;
+ }
+ return arena_tdata_get_hard(tsd, ind);
+}
+
+static inline arena_t *
+arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
+ arena_t *ret;
+
+ assert(ind < MALLOCX_ARENA_LIMIT);
+
+ ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
+ if (unlikely(ret == NULL)) {
+ if (init_if_missing) {
+ ret = arena_init(tsdn, ind,
+ (extent_hooks_t *)&extent_hooks_default);
+ }
+ }
+ return ret;
+}
+
+static inline ticker_t *
+decay_ticker_get(tsd_t *tsd, unsigned ind) {
+ arena_tdata_t *tdata;
+
+ tdata = arena_tdata_get(tsd, ind, true);
+ if (unlikely(tdata == NULL)) {
+ return NULL;
+ }
+ return &tdata->decay_ticker;
+}
+
+JEMALLOC_ALWAYS_INLINE cache_bin_t *
+tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
+ assert(binind < SC_NBINS);
+ return &tcache->bins_small[binind];
+}
+
+JEMALLOC_ALWAYS_INLINE cache_bin_t *
+tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
+ assert(binind >= SC_NBINS &&binind < nhbins);
+ return &tcache->bins_large[binind - SC_NBINS];
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tcache_available(tsd_t *tsd) {
+ /*
+ * Thread specific auto tcache might be unavailable if: 1) during tcache
+ * initialization, or 2) disabled through thread.tcache.enabled mallctl
+ * or config options. This check covers all cases.
+ */
+ if (likely(tsd_tcache_enabled_get(tsd))) {
+ /* Associated arena == NULL implies tcache init in progress. */
+ assert(tsd_tcachep_get(tsd)->arena == NULL ||
+ tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
+ NULL);
+ return true;
+ }
+
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcache_get(tsd_t *tsd) {
+ if (!tcache_available(tsd)) {
+ return NULL;
+ }
+
+ return tsd_tcachep_get(tsd);
+}
+
+static inline void
+pre_reentrancy(tsd_t *tsd, arena_t *arena) {
+ /* arena is the current context. Reentry from a0 is not allowed. */
+ assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
+
+ bool fast = tsd_fast(tsd);
+ assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
+ ++*tsd_reentrancy_levelp_get(tsd);
+ if (fast) {
+ /* Prepare slow path for reentrancy. */
+ tsd_slow_update(tsd);
+ assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
+ }
+}
+
+static inline void
+post_reentrancy(tsd_t *tsd) {
+ int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
+ assert(*reentrancy_level > 0);
+ if (--*reentrancy_level == 0) {
+ tsd_slow_update(tsd);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
index 70d6e57885..bb10d14c6b 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
@@ -1,87 +1,87 @@
-#ifndef JEMALLOC_INTERNAL_INLINES_B_H
-#define JEMALLOC_INTERNAL_INLINES_B_H
-
-#include "jemalloc/internal/rtree.h"
-
-/* Choose an arena based on a per-thread value. */
-static inline arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
- arena_t *ret;
-
- if (arena != NULL) {
- return arena;
- }
-
- /* During reentrancy, arena 0 is the safest bet. */
- if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
- return arena_get(tsd_tsdn(tsd), 0, true);
- }
-
- ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
- if (unlikely(ret == NULL)) {
- ret = arena_choose_hard(tsd, internal);
- assert(ret);
- if (tcache_available(tsd)) {
- tcache_t *tcache = tcache_get(tsd);
- if (tcache->arena != NULL) {
- /* See comments in tcache_data_init().*/
- assert(tcache->arena ==
- arena_get(tsd_tsdn(tsd), 0, false));
- if (tcache->arena != ret) {
- tcache_arena_reassociate(tsd_tsdn(tsd),
- tcache, ret);
- }
- } else {
- tcache_arena_associate(tsd_tsdn(tsd), tcache,
- ret);
- }
- }
- }
-
- /*
- * Note that for percpu arena, if the current arena is outside of the
- * auto percpu arena range, (i.e. thread is assigned to a manually
- * managed arena), then percpu arena is skipped.
- */
- if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
- !internal && (arena_ind_get(ret) <
- percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
- tsd_tsdn(tsd))) {
- unsigned ind = percpu_arena_choose();
- if (arena_ind_get(ret) != ind) {
- percpu_arena_update(tsd, ind);
- ret = tsd_arena_get(tsd);
- }
- ret->last_thd = tsd_tsdn(tsd);
- }
-
- return ret;
-}
-
-static inline arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena) {
- return arena_choose_impl(tsd, arena, false);
-}
-
-static inline arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena) {
- return arena_choose_impl(tsd, arena, true);
-}
-
-static inline bool
-arena_is_auto(arena_t *arena) {
- assert(narenas_auto > 0);
-
- return (arena_ind_get(arena) < manual_arena_base);
-}
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-iealloc(tsdn_t *tsdn, const void *ptr) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
-}
-
-#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
+#ifndef JEMALLOC_INTERNAL_INLINES_B_H
+#define JEMALLOC_INTERNAL_INLINES_B_H
+
+#include "jemalloc/internal/rtree.h"
+
+/* Choose an arena based on a per-thread value. */
+static inline arena_t *
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
+ arena_t *ret;
+
+ if (arena != NULL) {
+ return arena;
+ }
+
+ /* During reentrancy, arena 0 is the safest bet. */
+ if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
+ return arena_get(tsd_tsdn(tsd), 0, true);
+ }
+
+ ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
+ if (unlikely(ret == NULL)) {
+ ret = arena_choose_hard(tsd, internal);
+ assert(ret);
+ if (tcache_available(tsd)) {
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache->arena != NULL) {
+ /* See comments in tcache_data_init().*/
+ assert(tcache->arena ==
+ arena_get(tsd_tsdn(tsd), 0, false));
+ if (tcache->arena != ret) {
+ tcache_arena_reassociate(tsd_tsdn(tsd),
+ tcache, ret);
+ }
+ } else {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache,
+ ret);
+ }
+ }
+ }
+
+ /*
+ * Note that for percpu arena, if the current arena is outside of the
+ * auto percpu arena range, (i.e. thread is assigned to a manually
+ * managed arena), then percpu arena is skipped.
+ */
+ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
+ !internal && (arena_ind_get(ret) <
+ percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
+ tsd_tsdn(tsd))) {
+ unsigned ind = percpu_arena_choose();
+ if (arena_ind_get(ret) != ind) {
+ percpu_arena_update(tsd, ind);
+ ret = tsd_arena_get(tsd);
+ }
+ ret->last_thd = tsd_tsdn(tsd);
+ }
+
+ return ret;
+}
+
+static inline arena_t *
+arena_choose(tsd_t *tsd, arena_t *arena) {
+ return arena_choose_impl(tsd, arena, false);
+}
+
+static inline arena_t *
+arena_ichoose(tsd_t *tsd, arena_t *arena) {
+ return arena_choose_impl(tsd, arena, true);
+}
+
+static inline bool
+arena_is_auto(arena_t *arena) {
+ assert(narenas_auto > 0);
+
+ return (arena_ind_get(arena) < manual_arena_base);
+}
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+iealloc(tsdn_t *tsdn, const void *ptr) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+}
+
+#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index cdb10eb21f..db66b30c3c 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -1,222 +1,222 @@
-#ifndef JEMALLOC_INTERNAL_INLINES_C_H
-#define JEMALLOC_INTERNAL_INLINES_C_H
-
-#include "jemalloc/internal/hook.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/witness.h"
-
-/*
- * Translating the names of the 'i' functions:
- * Abbreviations used in the first part of the function name (before
- * alloc/dalloc) describe what that function accomplishes:
- * a: arena (query)
- * s: size (query, or sized deallocation)
- * e: extent (query)
- * p: aligned (allocates)
- * vs: size (query, without knowing that the pointer is into the heap)
- * r: rallocx implementation
- * x: xallocx implementation
- * Abbreviations used in the second part of the function name (after
- * alloc/dalloc) describe the arguments it takes
- * z: whether to return zeroed memory
- * t: accepts a tcache_t * parameter
- * m: accepts an arena_t * parameter
- */
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(tsdn_t *tsdn, const void *ptr) {
- assert(ptr != NULL);
-
- return arena_aalloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const void *ptr) {
- assert(ptr != NULL);
-
- return arena_salloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
- bool is_internal, arena_t *arena, bool slow_path) {
- void *ret;
-
- assert(!is_internal || tcache == NULL);
- assert(!is_internal || arena == NULL || arena_is_auto(arena));
- if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- }
-
- ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
- if (config_stats && is_internal && likely(ret != NULL)) {
- arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
- }
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
- return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
- NULL, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, bool is_internal, arena_t *arena) {
- void *ret;
-
- assert(usize != 0);
- assert(usize == sz_sa2u(usize, alignment));
- assert(!is_internal || tcache == NULL);
- assert(!is_internal || arena == NULL || arena_is_auto(arena));
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
- assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
- if (config_stats && is_internal && likely(ret != NULL)) {
- arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
- }
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena) {
- return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
- return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
- tcache_get(tsd), false, NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr) {
- return arena_vsalloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
- bool is_internal, bool slow_path) {
- assert(ptr != NULL);
- assert(!is_internal || tcache == NULL);
- assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- if (config_stats && is_internal) {
- arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
- }
- if (!is_internal && !tsdn_null(tsdn) &&
- tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
- assert(tcache == NULL);
- }
- arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloc(tsd_t *tsd, void *ptr) {
- idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
- hook_ralloc_args_t *hook_args) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- void *p;
- size_t usize, copysize;
-
- usize = sz_sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
- return NULL;
- }
- p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
- if (p == NULL) {
- return NULL;
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(p, ptr, copysize);
- hook_invoke_alloc(hook_args->is_realloc
- ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
- hook_args->args);
- hook_invoke_dalloc(hook_args->is_realloc
- ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
- isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
- return p;
-}
-
-/*
- * is_realloc threads through the knowledge of whether or not this call comes
- * from je_realloc (as opposed to je_rallocx); this ensures that we pass the
- * correct entry point into any hooks.
- * Note that these functions are all force-inlined, so no actual bool gets
- * passed-around anywhere.
- */
-JEMALLOC_ALWAYS_INLINE void *
-iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
- bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
-{
- assert(ptr != NULL);
- assert(size != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
- != 0) {
- /*
- * Existing object alignment is inadequate; allocate new space
- * and copy.
- */
- return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
- zero, tcache, arena, hook_args);
- }
-
- return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
- tcache, hook_args);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
- bool zero, hook_ralloc_args_t *hook_args) {
- return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
- tcache_get(tsd), NULL, hook_args);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, size_t *newsize) {
- assert(ptr != NULL);
- assert(size != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
- != 0) {
- /* Existing object alignment is inadequate. */
- *newsize = oldsize;
- return true;
- }
-
- return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
- newsize);
-}
-
-#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
+#ifndef JEMALLOC_INTERNAL_INLINES_C_H
+#define JEMALLOC_INTERNAL_INLINES_C_H
+
+#include "jemalloc/internal/hook.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/witness.h"
+
+/*
+ * Translating the names of the 'i' functions:
+ * Abbreviations used in the first part of the function name (before
+ * alloc/dalloc) describe what that function accomplishes:
+ * a: arena (query)
+ * s: size (query, or sized deallocation)
+ * e: extent (query)
+ * p: aligned (allocates)
+ * vs: size (query, without knowing that the pointer is into the heap)
+ * r: rallocx implementation
+ * x: xallocx implementation
+ * Abbreviations used in the second part of the function name (after
+ * alloc/dalloc) describe the arguments it takes
+ * z: whether to return zeroed memory
+ * t: accepts a tcache_t * parameter
+ * m: accepts an arena_t * parameter
+ */
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+iaalloc(tsdn_t *tsdn, const void *ptr) {
+ assert(ptr != NULL);
+
+ return arena_aalloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+isalloc(tsdn_t *tsdn, const void *ptr) {
+ assert(ptr != NULL);
+
+ return arena_salloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
+ bool is_internal, arena_t *arena, bool slow_path) {
+ void *ret;
+
+ assert(!is_internal || tcache == NULL);
+ assert(!is_internal || arena == NULL || arena_is_auto(arena));
+ if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ }
+
+ ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
+ if (config_stats && is_internal && likely(ret != NULL)) {
+ arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
+ }
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
+ return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
+ NULL, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, bool is_internal, arena_t *arena) {
+ void *ret;
+
+ assert(usize != 0);
+ assert(usize == sz_sa2u(usize, alignment));
+ assert(!is_internal || tcache == NULL);
+ assert(!is_internal || arena == NULL || arena_is_auto(arena));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
+ assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
+ if (config_stats && is_internal && likely(ret != NULL)) {
+ arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
+ }
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena) {
+ return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
+ return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
+ tcache_get(tsd), false, NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+ivsalloc(tsdn_t *tsdn, const void *ptr) {
+ return arena_vsalloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
+ bool is_internal, bool slow_path) {
+ assert(ptr != NULL);
+ assert(!is_internal || tcache == NULL);
+ assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ if (config_stats && is_internal) {
+ arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
+ }
+ if (!is_internal && !tsdn_null(tsdn) &&
+ tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
+ assert(tcache == NULL);
+ }
+ arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloc(tsd_t *tsd, void *ptr) {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ alloc_ctx_t *alloc_ctx, bool slow_path) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+ hook_ralloc_args_t *hook_args) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ void *p;
+ size_t usize, copysize;
+
+ usize = sz_sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
+ if (p == NULL) {
+ return NULL;
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ hook_invoke_alloc(hook_args->is_realloc
+ ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
+ hook_args->args);
+ hook_invoke_dalloc(hook_args->is_realloc
+ ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
+ return p;
+}
+
+/*
+ * is_realloc threads through the knowledge of whether or not this call comes
+ * from je_realloc (as opposed to je_rallocx); this ensures that we pass the
+ * correct entry point into any hooks.
+ * Note that these functions are all force-inlined, so no actual bool gets
+ * passed-around anywhere.
+ */
+JEMALLOC_ALWAYS_INLINE void *
+iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
+{
+ assert(ptr != NULL);
+ assert(size != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /*
+ * Existing object alignment is inadequate; allocate new space
+ * and copy.
+ */
+ return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
+ zero, tcache, arena, hook_args);
+ }
+
+ return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
+ tcache, hook_args);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero, hook_ralloc_args_t *hook_args) {
+ return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
+ tcache_get(tsd), NULL, hook_args);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, size_t *newsize) {
+ assert(ptr != NULL);
+ assert(size != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ *newsize = oldsize;
+ return true;
+ }
+
+ return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
+ newsize);
+}
+
+#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
index d8ea06f6d0..bd97c302bf 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
@@ -1,114 +1,114 @@
-#ifndef JEMALLOC_INTERNAL_MACROS_H
-#define JEMALLOC_INTERNAL_MACROS_H
+#ifndef JEMALLOC_INTERNAL_MACROS_H
+#define JEMALLOC_INTERNAL_MACROS_H
-#ifdef JEMALLOC_DEBUG
-# define JEMALLOC_ALWAYS_INLINE static inline
+#ifdef JEMALLOC_DEBUG
+# define JEMALLOC_ALWAYS_INLINE static inline
#else
-# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
+# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
#endif
-#ifdef _MSC_VER
-# define inline _inline
-#endif
-
-#define UNUSED JEMALLOC_ATTR(unused)
-
-#define ZU(z) ((size_t)z)
-#define ZD(z) ((ssize_t)z)
-#define QU(q) ((uint64_t)q)
-#define QD(q) ((int64_t)q)
+#ifdef _MSC_VER
+# define inline _inline
+#endif
-#define KZU(z) ZU(z##ULL)
-#define KZD(z) ZD(z##LL)
-#define KQU(q) QU(q##ULL)
-#define KQD(q) QI(q##LL)
+#define UNUSED JEMALLOC_ATTR(unused)
+#define ZU(z) ((size_t)z)
+#define ZD(z) ((ssize_t)z)
+#define QU(q) ((uint64_t)q)
+#define QD(q) ((int64_t)q)
+
+#define KZU(z) ZU(z##ULL)
+#define KZD(z) ZD(z##LL)
+#define KQU(q) QU(q##ULL)
+#define KQD(q) QI(q##LL)
+
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
-#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
+#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
# define restrict
#endif
-
-/* Various function pointers are static and immutable except during testing. */
-#ifdef JEMALLOC_JET
-# define JET_MUTABLE
-#else
-# define JET_MUTABLE const
-#endif
-
-#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
-#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
-
-#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
- && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
-#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
-#else
-#define JEMALLOC_FALLTHROUGH /* falls through */
-#endif
-
-/* Diagnostic suppression macros */
-#if defined(_MSC_VER) && !defined(__clang__)
-# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
-# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
-# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
-# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-/* #pragma GCC diagnostic first appeared in gcc 4.6. */
-#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
- (__GNUC_MINOR__ > 5)))) || defined(__clang__)
-/*
- * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
- * diagnostic suppression macros and should not be used anywhere else.
- */
-# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
-# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
-# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
-# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
- JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
-
-/*
- * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
- * all clang versions up to version 7 (currently trunk, unreleased). This macro
- * suppresses the warning for the affected compiler versions only.
- */
-# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
- defined(__clang__)
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
-# else
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-# endif
-
-# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
-# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
-# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
-# else
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
-# endif
-# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
- JEMALLOC_DIAGNOSTIC_PUSH \
- JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
-#else
-# define JEMALLOC_DIAGNOSTIC_PUSH
-# define JEMALLOC_DIAGNOSTIC_POP
-# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
-# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-#endif
-
-/*
- * Disables spurious diagnostics for all headers. Since these headers are not
- * included by users directly, it does not affect their diagnostic settings.
- */
-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-
-#endif /* JEMALLOC_INTERNAL_MACROS_H */
+
+/* Various function pointers are static and immutable except during testing. */
+#ifdef JEMALLOC_JET
+# define JET_MUTABLE
+#else
+# define JET_MUTABLE const
+#endif
+
+#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
+#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
+
+#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
+ && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
+#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
+#else
+#define JEMALLOC_FALLTHROUGH /* falls through */
+#endif
+
+/* Diagnostic suppression macros */
+#if defined(_MSC_VER) && !defined(__clang__)
+# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
+# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
+# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
+# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
+# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
+/* #pragma GCC diagnostic first appeared in gcc 4.6. */
+#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
+ (__GNUC_MINOR__ > 5)))) || defined(__clang__)
+/*
+ * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
+ * diagnostic suppression macros and should not be used anywhere else.
+ */
+# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
+# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
+# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
+# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
+ JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
+
+/*
+ * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
+ * all clang versions up to version 7 (currently trunk, unreleased). This macro
+ * suppresses the warning for the affected compiler versions only.
+ */
+# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
+ defined(__clang__)
+# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
+# else
+# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+# endif
+
+# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
+# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
+# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
+# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
+ JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
+# else
+# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+# endif
+# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
+ JEMALLOC_DIAGNOSTIC_PUSH \
+ JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+#else
+# define JEMALLOC_DIAGNOSTIC_PUSH
+# define JEMALLOC_DIAGNOSTIC_POP
+# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
+# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
+# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
+#endif
+
+/*
+ * Disables spurious diagnostics for all headers. Since these headers are not
+ * included by users directly, it does not affect their diagnostic settings.
+ */
+JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
+
+#endif /* JEMALLOC_INTERNAL_MACROS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
index e296c5a7e8..bf5d01b27c 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
@@ -1,114 +1,114 @@
-#ifndef JEMALLOC_INTERNAL_TYPES_H
-#define JEMALLOC_INTERNAL_TYPES_H
-
-#include "jemalloc/internal/quantum.h"
-
-/* Page size index type. */
-typedef unsigned pszind_t;
-
-/* Size class index type. */
-typedef unsigned szind_t;
-
-/* Processor / core id type. */
-typedef int malloc_cpuid_t;
-
-/*
- * Flags bits:
- *
- * a: arena
- * t: tcache
- * 0: unused
- * z: zero
- * n: alignment
- *
- * aaaaaaaa aaaatttt tttttttt 0znnnnnn
- */
-#define MALLOCX_ARENA_BITS 12
-#define MALLOCX_TCACHE_BITS 12
-#define MALLOCX_LG_ALIGN_BITS 6
-#define MALLOCX_ARENA_SHIFT 20
-#define MALLOCX_TCACHE_SHIFT 8
-#define MALLOCX_ARENA_MASK \
- (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
-/* NB: Arena index bias decreases the maximum number of arenas by 1. */
-#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
-#define MALLOCX_TCACHE_MASK \
- (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
-#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
-#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
-/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
-#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
- (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
-#define MALLOCX_ALIGN_GET(flags) \
- (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
-#define MALLOCX_ZERO_GET(flags) \
- ((bool)(flags & MALLOCX_ZERO))
-
-#define MALLOCX_TCACHE_GET(flags) \
- (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
-#define MALLOCX_ARENA_GET(flags) \
- (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
-
-/* Smallest size class to support. */
-#define TINY_MIN (1U << LG_TINY_MIN)
-
-#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
-#define LONG_MASK (LONG - 1)
-
-/* Return the smallest long multiple that is >= a. */
-#define LONG_CEILING(a) \
- (((a) + LONG_MASK) & ~LONG_MASK)
-
-#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
-#define PTR_MASK (SIZEOF_PTR - 1)
-
-/* Return the smallest (void *) multiple that is >= a. */
-#define PTR_CEILING(a) \
- (((a) + PTR_MASK) & ~PTR_MASK)
-
-/*
- * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
- * In addition, this controls the spacing of cacheline-spaced size classes.
- *
- * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
- * only handle raw constants.
- */
-#define LG_CACHELINE 6
-#define CACHELINE 64
-#define CACHELINE_MASK (CACHELINE - 1)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define CACHELINE_CEILING(s) \
- (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-
-/* Return the nearest aligned address at or below a. */
-#define ALIGNMENT_ADDR2BASE(a, alignment) \
- ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
-
-/* Return the offset between a and the nearest aligned address at or below a. */
-#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
- ((size_t)((uintptr_t)(a) & (alignment - 1)))
-
-/* Return the smallest alignment multiple that is >= s. */
-#define ALIGNMENT_CEILING(s, alignment) \
- (((s) + (alignment - 1)) & ((~(alignment)) + 1))
-
-/* Declare a variable-length array. */
-#if __STDC_VERSION__ < 199901L
-# ifdef _MSC_VER
-# include <malloc.h>
-# define alloca _alloca
-# else
-# ifdef JEMALLOC_HAS_ALLOCA_H
-# include <alloca.h>
-# else
-# include <stdlib.h>
-# endif
-# endif
-# define VARIABLE_ARRAY(type, name, count) \
- type *name = alloca(sizeof(type) * (count))
-#else
-# define VARIABLE_ARRAY(type, name, count) type name[(count)]
-#endif
-
-#endif /* JEMALLOC_INTERNAL_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_TYPES_H
+#define JEMALLOC_INTERNAL_TYPES_H
+
+#include "jemalloc/internal/quantum.h"
+
+/* Page size index type. */
+typedef unsigned pszind_t;
+
+/* Size class index type. */
+typedef unsigned szind_t;
+
+/* Processor / core id type. */
+typedef int malloc_cpuid_t;
+
+/*
+ * Flags bits:
+ *
+ * a: arena
+ * t: tcache
+ * 0: unused
+ * z: zero
+ * n: alignment
+ *
+ * aaaaaaaa aaaatttt tttttttt 0znnnnnn
+ */
+#define MALLOCX_ARENA_BITS 12
+#define MALLOCX_TCACHE_BITS 12
+#define MALLOCX_LG_ALIGN_BITS 6
+#define MALLOCX_ARENA_SHIFT 20
+#define MALLOCX_TCACHE_SHIFT 8
+#define MALLOCX_ARENA_MASK \
+ (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
+/* NB: Arena index bias decreases the maximum number of arenas by 1. */
+#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
+#define MALLOCX_TCACHE_MASK \
+ (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
+#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
+#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
+/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
+#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
+ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
+#define MALLOCX_ALIGN_GET(flags) \
+ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
+#define MALLOCX_ZERO_GET(flags) \
+ ((bool)(flags & MALLOCX_ZERO))
+
+#define MALLOCX_TCACHE_GET(flags) \
+ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
+#define MALLOCX_ARENA_GET(flags) \
+ (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
+
+/* Smallest size class to support. */
+#define TINY_MIN (1U << LG_TINY_MIN)
+
+#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
+#define LONG_MASK (LONG - 1)
+
+/* Return the smallest long multiple that is >= a. */
+#define LONG_CEILING(a) \
+ (((a) + LONG_MASK) & ~LONG_MASK)
+
+#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
+#define PTR_MASK (SIZEOF_PTR - 1)
+
+/* Return the smallest (void *) multiple that is >= a. */
+#define PTR_CEILING(a) \
+ (((a) + PTR_MASK) & ~PTR_MASK)
+
+/*
+ * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
+ * In addition, this controls the spacing of cacheline-spaced size classes.
+ *
+ * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
+ * only handle raw constants.
+ */
+#define LG_CACHELINE 6
+#define CACHELINE 64
+#define CACHELINE_MASK (CACHELINE - 1)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define CACHELINE_CEILING(s) \
+ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
+
+/* Return the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2BASE(a, alignment) \
+ ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
+
+/* Return the offset between a and the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
+
+/* Return the smallest alignment multiple that is >= s. */
+#define ALIGNMENT_CEILING(s, alignment) \
+ (((s) + (alignment - 1)) & ((~(alignment)) + 1))
+
+/* Declare a variable-length array. */
+#if __STDC_VERSION__ < 199901L
+# ifdef _MSC_VER
+# include <malloc.h>
+# define alloca _alloca
+# else
+# ifdef JEMALLOC_HAS_ALLOCA_H
+# include <alloca.h>
+# else
+# include <stdlib.h>
+# endif
+# endif
+# define VARIABLE_ARRAY(type, name, count) \
+ type *name = alloca(sizeof(type) * (count))
+#else
+# define VARIABLE_ARRAY(type, name, count) type name[(count)]
+#endif
+
+#endif /* JEMALLOC_INTERNAL_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_preamble.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
index 7f5076018f..f133465984 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
@@ -1,213 +1,213 @@
-#ifndef JEMALLOC_PREAMBLE_H
-#define JEMALLOC_PREAMBLE_H
-
-#include "jemalloc_internal_defs.h"
-#include "jemalloc/internal/jemalloc_internal_decls.h"
-
-#ifdef JEMALLOC_UTRACE
-#include <sys/ktrace.h>
-#endif
-
-#define JEMALLOC_NO_DEMANGLE
-#ifdef JEMALLOC_JET
-# undef JEMALLOC_IS_MALLOC
-# define JEMALLOC_N(n) jet_##n
-# error #include "jemalloc/internal/public_namespace.h"
-# define JEMALLOC_NO_RENAME
-# include "../jemalloc.h"
-# undef JEMALLOC_NO_RENAME
-#else
-# define JEMALLOC_N(n) je_##n
-# include "../jemalloc.h"
-#endif
-
-#if defined(JEMALLOC_OSATOMIC)
-#include <libkern/OSAtomic.h>
-#endif
-
-#ifdef JEMALLOC_ZONE
-#include <mach/mach_error.h>
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
-#endif
-
-#include "jemalloc/internal/jemalloc_internal_macros.h"
-
-/*
- * Note that the ordering matters here; the hook itself is name-mangled. We
- * want the inclusion of hooks to happen early, so that we hook as much as
- * possible.
- */
-#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
-# ifndef JEMALLOC_JET
-# include "jemalloc/internal/private_namespace.h"
-# else
-# error #include "jemalloc/internal/private_namespace_jet.h"
-# endif
-#endif
-#include "jemalloc/internal/test_hooks.h"
-
-#ifdef JEMALLOC_DEFINE_MADVISE_FREE
-# define JEMALLOC_MADV_FREE 8
-#endif
-
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
- true
-#else
- false
-#endif
- ;
-static const bool have_dss =
-#ifdef JEMALLOC_DSS
- true
-#else
- false
-#endif
- ;
-static const bool have_madvise_huge =
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
- true
-#else
- false
-#endif
- ;
-static const bool config_fill =
-#ifdef JEMALLOC_FILL
- true
-#else
- false
-#endif
- ;
-static const bool config_lazy_lock =
-#ifdef JEMALLOC_LAZY_LOCK
- true
-#else
- false
-#endif
- ;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
-static const bool config_prof =
-#ifdef JEMALLOC_PROF
- true
-#else
- false
-#endif
- ;
-static const bool config_prof_libgcc =
-#ifdef JEMALLOC_PROF_LIBGCC
- true
-#else
- false
-#endif
- ;
-static const bool config_prof_libunwind =
-#ifdef JEMALLOC_PROF_LIBUNWIND
- true
-#else
- false
-#endif
- ;
-static const bool maps_coalesce =
-#ifdef JEMALLOC_MAPS_COALESCE
- true
-#else
- false
-#endif
- ;
-static const bool config_stats =
-#ifdef JEMALLOC_STATS
- true
-#else
- false
-#endif
- ;
-static const bool config_tls =
-#ifdef JEMALLOC_TLS
- true
-#else
- false
-#endif
- ;
-static const bool config_utrace =
-#ifdef JEMALLOC_UTRACE
- true
-#else
- false
-#endif
- ;
-static const bool config_xmalloc =
-#ifdef JEMALLOC_XMALLOC
- true
-#else
- false
-#endif
- ;
-static const bool config_cache_oblivious =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- true
-#else
- false
-#endif
- ;
-/*
- * Undocumented, for jemalloc development use only at the moment. See the note
- * in jemalloc/internal/log.h.
- */
-static const bool config_log =
-#ifdef JEMALLOC_LOG
- true
-#else
- false
-#endif
- ;
-/*
- * Are extra safety checks enabled; things like checking the size of sized
- * deallocations, double-frees, etc.
- */
-static const bool config_opt_safety_checks =
-#ifdef JEMALLOC_OPT_SAFETY_CHECKS
- true
-#elif defined(JEMALLOC_DEBUG)
- /*
- * This lets us only guard safety checks by one flag instead of two; fast
- * checks can guard solely by config_opt_safety_checks and run in debug mode
- * too.
- */
- true
-#else
- false
-#endif
- ;
-
-#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
-/* Currently percpu_arena depends on sched_getcpu. */
-#define JEMALLOC_PERCPU_ARENA
-#endif
-static const bool have_percpu_arena =
-#ifdef JEMALLOC_PERCPU_ARENA
- true
-#else
- false
-#endif
- ;
-/*
- * Undocumented, and not recommended; the application should take full
- * responsibility for tracking provenance.
- */
-static const bool force_ivsalloc =
-#ifdef JEMALLOC_FORCE_IVSALLOC
- true
-#else
- false
-#endif
- ;
-static const bool have_background_thread =
-#ifdef JEMALLOC_BACKGROUND_THREAD
- true
-#else
- false
-#endif
- ;
-
-#endif /* JEMALLOC_PREAMBLE_H */
+#ifndef JEMALLOC_PREAMBLE_H
+#define JEMALLOC_PREAMBLE_H
+
+#include "jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+#ifdef JEMALLOC_UTRACE
+#include <sys/ktrace.h>
+#endif
+
+#define JEMALLOC_NO_DEMANGLE
+#ifdef JEMALLOC_JET
+# undef JEMALLOC_IS_MALLOC
+# define JEMALLOC_N(n) jet_##n
+# error #include "jemalloc/internal/public_namespace.h"
+# define JEMALLOC_NO_RENAME
+# include "../jemalloc.h"
+# undef JEMALLOC_NO_RENAME
+#else
+# define JEMALLOC_N(n) je_##n
+# include "../jemalloc.h"
+#endif
+
+#if defined(JEMALLOC_OSATOMIC)
+#include <libkern/OSAtomic.h>
+#endif
+
+#ifdef JEMALLOC_ZONE
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#include "jemalloc/internal/jemalloc_internal_macros.h"
+
+/*
+ * Note that the ordering matters here; the hook itself is name-mangled. We
+ * want the inclusion of hooks to happen early, so that we hook as much as
+ * possible.
+ */
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
+# ifndef JEMALLOC_JET
+# include "jemalloc/internal/private_namespace.h"
+# else
+# error #include "jemalloc/internal/private_namespace_jet.h"
+# endif
+#endif
+#include "jemalloc/internal/test_hooks.h"
+
+#ifdef JEMALLOC_DEFINE_MADVISE_FREE
+# define JEMALLOC_MADV_FREE 8
+#endif
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_madvise_huge =
+#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_lazy_lock =
+#ifdef JEMALLOC_LAZY_LOCK
+ true
+#else
+ false
+#endif
+ ;
+static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, for jemalloc development use only at the moment. See the note
+ * in jemalloc/internal/log.h.
+ */
+static const bool config_log =
+#ifdef JEMALLOC_LOG
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Are extra safety checks enabled; things like checking the size of sized
+ * deallocations, double-frees, etc.
+ */
+static const bool config_opt_safety_checks =
+#ifdef JEMALLOC_OPT_SAFETY_CHECKS
+ true
+#elif defined(JEMALLOC_DEBUG)
+ /*
+ * This lets us only guard safety checks by one flag instead of two; fast
+ * checks can guard solely by config_opt_safety_checks and run in debug mode
+ * too.
+ */
+ true
+#else
+ false
+#endif
+ ;
+
+#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
+/* Currently percpu_arena depends on sched_getcpu. */
+#define JEMALLOC_PERCPU_ARENA
+#endif
+static const bool have_percpu_arena =
+#ifdef JEMALLOC_PERCPU_ARENA
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, and not recommended; the application should take full
+ * responsibility for tracking provenance.
+ */
+static const bool force_ivsalloc =
+#ifdef JEMALLOC_FORCE_IVSALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_background_thread =
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ true
+#else
+ false
+#endif
+ ;
+
+#endif /* JEMALLOC_PREAMBLE_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/large_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/large_externs.h
index a05019e8a5..40f9b7c77f 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/large_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/large_externs.h
@@ -1,32 +1,32 @@
-#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
-#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
-
-#include "jemalloc/internal/hook.h"
-
-void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
-void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero);
-bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
- size_t usize_max, bool zero);
-void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache,
- hook_ralloc_args_t *hook_args);
-
-typedef void (large_dalloc_junk_t)(void *, size_t);
-extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
-
-typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
-extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
-
-void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
-void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
-void large_dalloc(tsdn_t *tsdn, extent_t *extent);
-size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
-prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
-void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
-void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
-
-nstime_t large_prof_alloc_time_get(const extent_t *extent);
-void large_prof_alloc_time_set(extent_t *extent, nstime_t time);
-
-#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
+#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
+
+#include "jemalloc/internal/hook.h"
+
+void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
+void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero);
+bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero);
+void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache,
+ hook_ralloc_args_t *hook_args);
+
+typedef void (large_dalloc_junk_t)(void *, size_t);
+extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
+
+typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
+extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
+
+void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
+void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
+void large_dalloc(tsdn_t *tsdn, extent_t *extent);
+size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
+prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
+void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
+void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
+
+nstime_t large_prof_alloc_time_get(const extent_t *extent);
+void large_prof_alloc_time_set(extent_t *extent, nstime_t time);
+
+#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/log.h b/contrib/libs/jemalloc/include/jemalloc/internal/log.h
index 6420858635..43e8e8c80c 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/log.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/log.h
@@ -1,115 +1,115 @@
-#ifndef JEMALLOC_INTERNAL_LOG_H
-#define JEMALLOC_INTERNAL_LOG_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/mutex.h"
-
-#ifdef JEMALLOC_LOG
-# define JEMALLOC_LOG_VAR_BUFSIZE 1000
-#else
-# define JEMALLOC_LOG_VAR_BUFSIZE 1
-#endif
-
-#define JEMALLOC_LOG_BUFSIZE 4096
-
-/*
- * The log malloc_conf option is a '|'-delimited list of log_var name segments
- * which should be logged. The names are themselves hierarchical, with '.' as
- * the delimiter (a "segment" is just a prefix in the log namespace). So, if
- * you have:
- *
- * log("arena", "log msg for arena"); // 1
- * log("arena.a", "log msg for arena.a"); // 2
- * log("arena.b", "log msg for arena.b"); // 3
- * log("arena.a.a", "log msg for arena.a.a"); // 4
- * log("extent.a", "log msg for extent.a"); // 5
- * log("extent.b", "log msg for extent.b"); // 6
- *
- * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
- * 6 will print at runtime. You can enable logging from all log vars by
- * writing "log=.".
- *
- * None of this should be regarded as a stable API for right now. It's intended
- * as a debugging interface, to let us keep around some of our printf-debugging
- * statements.
- */
-
-extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
-extern atomic_b_t log_init_done;
-
-typedef struct log_var_s log_var_t;
-struct log_var_s {
- /*
- * Lowest bit is "inited", second lowest is "enabled". Putting them in
- * a single word lets us avoid any fences on weak architectures.
- */
- atomic_u_t state;
- const char *name;
-};
-
-#define LOG_NOT_INITIALIZED 0U
-#define LOG_INITIALIZED_NOT_ENABLED 1U
-#define LOG_ENABLED 2U
-
-#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
-
-/*
- * Returns the value we should assume for state (which is not necessarily
- * accurate; if logging is done before logging has finished initializing, then
- * we default to doing the safe thing by logging everything).
- */
-unsigned log_var_update_state(log_var_t *log_var);
-
-/* We factor out the metadata management to allow us to test more easily. */
-#define log_do_begin(log_var) \
-if (config_log) { \
- unsigned log_state = atomic_load_u(&(log_var).state, \
- ATOMIC_RELAXED); \
- if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
- log_state = log_var_update_state(&(log_var)); \
- assert(log_state != LOG_NOT_INITIALIZED); \
- } \
- if (log_state == LOG_ENABLED) { \
- {
- /* User code executes here. */
-#define log_do_end(log_var) \
- } \
- } \
-}
-
-/*
- * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
- * preprocessing. To work around this, we take all potential extra arguments in
- * a var-args functions. Since a varargs macro needs at least one argument in
- * the "...", we accept the format string there, and require that the first
- * argument in this "..." is a const char *.
- */
-static inline void
-log_impl_varargs(const char *name, ...) {
- char buf[JEMALLOC_LOG_BUFSIZE];
- va_list ap;
-
- va_start(ap, name);
- const char *format = va_arg(ap, const char *);
- size_t dst_offset = 0;
- dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
- dst_offset += malloc_vsnprintf(buf + dst_offset,
- JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
- dst_offset += malloc_snprintf(buf + dst_offset,
- JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
- va_end(ap);
-
- malloc_write(buf);
-}
-
-/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
-#define LOG(log_var_str, ...) \
-do { \
- static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
- log_do_begin(log_var) \
- log_impl_varargs((log_var).name, __VA_ARGS__); \
- log_do_end(log_var) \
-} while (0)
-
-#endif /* JEMALLOC_INTERNAL_LOG_H */
+#ifndef JEMALLOC_INTERNAL_LOG_H
+#define JEMALLOC_INTERNAL_LOG_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+
+#ifdef JEMALLOC_LOG
+# define JEMALLOC_LOG_VAR_BUFSIZE 1000
+#else
+# define JEMALLOC_LOG_VAR_BUFSIZE 1
+#endif
+
+#define JEMALLOC_LOG_BUFSIZE 4096
+
+/*
+ * The log malloc_conf option is a '|'-delimited list of log_var name segments
+ * which should be logged. The names are themselves hierarchical, with '.' as
+ * the delimiter (a "segment" is just a prefix in the log namespace). So, if
+ * you have:
+ *
+ * log("arena", "log msg for arena"); // 1
+ * log("arena.a", "log msg for arena.a"); // 2
+ * log("arena.b", "log msg for arena.b"); // 3
+ * log("arena.a.a", "log msg for arena.a.a"); // 4
+ * log("extent.a", "log msg for extent.a"); // 5
+ * log("extent.b", "log msg for extent.b"); // 6
+ *
+ * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
+ * 6 will print at runtime. You can enable logging from all log vars by
+ * writing "log=.".
+ *
+ * None of this should be regarded as a stable API for right now. It's intended
+ * as a debugging interface, to let us keep around some of our printf-debugging
+ * statements.
+ */
+
+extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
+extern atomic_b_t log_init_done;
+
+typedef struct log_var_s log_var_t;
+struct log_var_s {
+ /*
+ * Lowest bit is "inited", second lowest is "enabled". Putting them in
+ * a single word lets us avoid any fences on weak architectures.
+ */
+ atomic_u_t state;
+ const char *name;
+};
+
+#define LOG_NOT_INITIALIZED 0U
+#define LOG_INITIALIZED_NOT_ENABLED 1U
+#define LOG_ENABLED 2U
+
+#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
+
+/*
+ * Returns the value we should assume for state (which is not necessarily
+ * accurate; if logging is done before logging has finished initializing, then
+ * we default to doing the safe thing by logging everything).
+ */
+unsigned log_var_update_state(log_var_t *log_var);
+
+/* We factor out the metadata management to allow us to test more easily. */
+#define log_do_begin(log_var) \
+if (config_log) { \
+ unsigned log_state = atomic_load_u(&(log_var).state, \
+ ATOMIC_RELAXED); \
+ if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
+ log_state = log_var_update_state(&(log_var)); \
+ assert(log_state != LOG_NOT_INITIALIZED); \
+ } \
+ if (log_state == LOG_ENABLED) { \
+ {
+ /* User code executes here. */
+#define log_do_end(log_var) \
+ } \
+ } \
+}
+
+/*
+ * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
+ * preprocessing. To work around this, we take all potential extra arguments in
+ * a var-args functions. Since a varargs macro needs at least one argument in
+ * the "...", we accept the format string there, and require that the first
+ * argument in this "..." is a const char *.
+ */
+static inline void
+log_impl_varargs(const char *name, ...) {
+ char buf[JEMALLOC_LOG_BUFSIZE];
+ va_list ap;
+
+ va_start(ap, name);
+ const char *format = va_arg(ap, const char *);
+ size_t dst_offset = 0;
+ dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
+ dst_offset += malloc_vsnprintf(buf + dst_offset,
+ JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
+ dst_offset += malloc_snprintf(buf + dst_offset,
+ JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
+ va_end(ap);
+
+ malloc_write(buf);
+}
+
+/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
+#define LOG(log_var_str, ...) \
+do { \
+ static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
+ log_do_begin(log_var) \
+ log_impl_varargs((log_var).name, __VA_ARGS__); \
+ log_do_end(log_var) \
+} while (0)
+
+#endif /* JEMALLOC_INTERNAL_LOG_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/malloc_io.h b/contrib/libs/jemalloc/include/jemalloc/internal/malloc_io.h
index 1d1a414e0f..aeb6e39523 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/malloc_io.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/malloc_io.h
@@ -1,102 +1,102 @@
-#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
-#define JEMALLOC_INTERNAL_MALLOC_IO_H
-
-#ifdef _WIN32
-# ifdef _WIN64
-# define FMT64_PREFIX "ll"
-# define FMTPTR_PREFIX "ll"
-# else
-# define FMT64_PREFIX "ll"
-# define FMTPTR_PREFIX ""
-# endif
-# define FMTd32 "d"
-# define FMTu32 "u"
-# define FMTx32 "x"
-# define FMTd64 FMT64_PREFIX "d"
-# define FMTu64 FMT64_PREFIX "u"
-# define FMTx64 FMT64_PREFIX "x"
-# define FMTdPTR FMTPTR_PREFIX "d"
-# define FMTuPTR FMTPTR_PREFIX "u"
-# define FMTxPTR FMTPTR_PREFIX "x"
-#else
-# include <inttypes.h>
-# define FMTd32 PRId32
-# define FMTu32 PRIu32
-# define FMTx32 PRIx32
-# define FMTd64 PRId64
-# define FMTu64 PRIu64
-# define FMTx64 PRIx64
-# define FMTdPTR PRIdPTR
-# define FMTuPTR PRIuPTR
-# define FMTxPTR PRIxPTR
-#endif
-
-/* Size of stack-allocated buffer passed to buferror(). */
-#define BUFERROR_BUF 64
-
-/*
- * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
- * large enough for all possible uses within jemalloc.
- */
-#define MALLOC_PRINTF_BUFSIZE 4096
-
-int buferror(int err, char *buf, size_t buflen);
-uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
- int base);
-void malloc_write(const char *s);
-
-/*
- * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
- * point math.
- */
-size_t malloc_vsnprintf(char *str, size_t size, const char *format,
- va_list ap);
-size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
- JEMALLOC_FORMAT_PRINTF(3, 4);
-/*
- * The caller can set write_cb to null to choose to print with the
- * je_malloc_message hook.
- */
-void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap);
-void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
-void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
-
-static inline ssize_t
-malloc_write_fd(int fd, const void *buf, size_t count) {
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
- /*
- * Use syscall(2) rather than write(2) when possible in order to avoid
- * the possibility of memory allocation within libc. This is necessary
- * on FreeBSD; most operating systems do not have this problem though.
- *
- * syscall() returns long or int, depending on platform, so capture the
- * result in the widest plausible type to avoid compiler warnings.
- */
- long result = syscall(SYS_write, fd, buf, count);
-#else
- ssize_t result = (ssize_t)write(fd, buf,
-#ifdef _WIN32
- (unsigned int)
-#endif
- count);
-#endif
- return (ssize_t)result;
-}
-
-static inline ssize_t
-malloc_read_fd(int fd, void *buf, size_t count) {
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
- long result = syscall(SYS_read, fd, buf, count);
-#else
- ssize_t result = read(fd, buf,
-#ifdef _WIN32
- (unsigned int)
-#endif
- count);
-#endif
- return (ssize_t)result;
-}
-
-#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
+#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
+#define JEMALLOC_INTERNAL_MALLOC_IO_H
+
+#ifdef _WIN32
+# ifdef _WIN64
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX "ll"
+# else
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX ""
+# endif
+# define FMTd32 "d"
+# define FMTu32 "u"
+# define FMTx32 "x"
+# define FMTd64 FMT64_PREFIX "d"
+# define FMTu64 FMT64_PREFIX "u"
+# define FMTx64 FMT64_PREFIX "x"
+# define FMTdPTR FMTPTR_PREFIX "d"
+# define FMTuPTR FMTPTR_PREFIX "u"
+# define FMTxPTR FMTPTR_PREFIX "x"
+#else
+# include <inttypes.h>
+# define FMTd32 PRId32
+# define FMTu32 PRIu32
+# define FMTx32 PRIx32
+# define FMTd64 PRId64
+# define FMTu64 PRIu64
+# define FMTx64 PRIx64
+# define FMTdPTR PRIdPTR
+# define FMTuPTR PRIuPTR
+# define FMTxPTR PRIxPTR
+#endif
+
+/* Size of stack-allocated buffer passed to buferror(). */
+#define BUFERROR_BUF 64
+
+/*
+ * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
+ * large enough for all possible uses within jemalloc.
+ */
+#define MALLOC_PRINTF_BUFSIZE 4096
+
+int buferror(int err, char *buf, size_t buflen);
+uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
+ int base);
+void malloc_write(const char *s);
+
+/*
+ * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
+ * point math.
+ */
+size_t malloc_vsnprintf(char *str, size_t size, const char *format,
+ va_list ap);
+size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
+ JEMALLOC_FORMAT_PRINTF(3, 4);
+/*
+ * The caller can set write_cb to null to choose to print with the
+ * je_malloc_message hook.
+ */
+void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap);
+void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+
+static inline ssize_t
+malloc_write_fd(int fd, const void *buf, size_t count) {
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
+ /*
+ * Use syscall(2) rather than write(2) when possible in order to avoid
+ * the possibility of memory allocation within libc. This is necessary
+ * on FreeBSD; most operating systems do not have this problem though.
+ *
+ * syscall() returns long or int, depending on platform, so capture the
+ * result in the widest plausible type to avoid compiler warnings.
+ */
+ long result = syscall(SYS_write, fd, buf, count);
+#else
+ ssize_t result = (ssize_t)write(fd, buf,
+#ifdef _WIN32
+ (unsigned int)
+#endif
+ count);
+#endif
+ return (ssize_t)result;
+}
+
+static inline ssize_t
+malloc_read_fd(int fd, void *buf, size_t count) {
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
+ long result = syscall(SYS_read, fd, buf, count);
+#else
+ ssize_t result = read(fd, buf,
+#ifdef _WIN32
+ (unsigned int)
+#endif
+ count);
+#endif
+ return (ssize_t)result;
+}
+
+#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h b/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h
index 7c24f0725e..6b7c45d62b 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h
@@ -1,136 +1,136 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_H
-#define JEMALLOC_INTERNAL_MUTEX_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/mutex_prof.h"
-#include "jemalloc/internal/tsd.h"
-#include "jemalloc/internal/witness.h"
-
-typedef enum {
- /* Can only acquire one mutex of a given witness rank at a time. */
- malloc_mutex_rank_exclusive,
- /*
- * Can acquire multiple mutexes of the same witness rank, but in
- * address-ascending order only.
- */
- malloc_mutex_address_ordered
-} malloc_mutex_lock_order_t;
+#ifndef JEMALLOC_INTERNAL_MUTEX_H
+#define JEMALLOC_INTERNAL_MUTEX_H
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/witness.h"
+
+typedef enum {
+ /* Can only acquire one mutex of a given witness rank at a time. */
+ malloc_mutex_rank_exclusive,
+ /*
+ * Can acquire multiple mutexes of the same witness rank, but in
+ * address-ascending order only.
+ */
+ malloc_mutex_address_ordered
+} malloc_mutex_lock_order_t;
+
typedef struct malloc_mutex_s malloc_mutex_t;
-struct malloc_mutex_s {
- union {
- struct {
- /*
- * prof_data is defined first to reduce cacheline
- * bouncing: the data is not touched by the mutex holder
- * during unlocking, while might be modified by
- * contenders. Having it before the mutex itself could
- * avoid prefetching a modified cacheline (for the
- * unlocking thread).
- */
- mutex_prof_data_t prof_data;
+struct malloc_mutex_s {
+ union {
+ struct {
+ /*
+ * prof_data is defined first to reduce cacheline
+ * bouncing: the data is not touched by the mutex holder
+ * during unlocking, while might be modified by
+ * contenders. Having it before the mutex itself could
+ * avoid prefetching a modified cacheline (for the
+ * unlocking thread).
+ */
+ mutex_prof_data_t prof_data;
#ifdef _WIN32
-# if _WIN32_WINNT >= 0x0600
- SRWLOCK lock;
-# else
- CRITICAL_SECTION lock;
-# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock lock;
+# if _WIN32_WINNT >= 0x0600
+ SRWLOCK lock;
+# else
+ CRITICAL_SECTION lock;
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
- pthread_mutex_t lock;
- malloc_mutex_t *postponed_next;
+ pthread_mutex_t lock;
+ malloc_mutex_t *postponed_next;
#else
- pthread_mutex_t lock;
-#endif
- /*
- * Hint flag to avoid exclusive cache line contention
- * during spin waiting
- */
- atomic_b_t locked;
- };
- /*
- * We only touch witness when configured w/ debug. However we
- * keep the field in a union when !debug so that we don't have
- * to pollute the code base with #ifdefs, while avoid paying the
- * memory cost.
- */
-#if !defined(JEMALLOC_DEBUG)
- witness_t witness;
- malloc_mutex_lock_order_t lock_order;
-#endif
- };
-
-#if defined(JEMALLOC_DEBUG)
- witness_t witness;
- malloc_mutex_lock_order_t lock_order;
-#endif
-};
-
-/*
- * Based on benchmark results, a fixed spin with this amount of retries works
- * well for our critical sections.
- */
-#define MALLOC_MUTEX_MAX_SPIN 250
-
-#ifdef _WIN32
-# if _WIN32_WINNT >= 0x0600
-# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
+ pthread_mutex_t lock;
+#endif
+ /*
+ * Hint flag to avoid exclusive cache line contention
+ * during spin waiting
+ */
+ atomic_b_t locked;
+ };
+ /*
+ * We only touch witness when configured w/ debug. However we
+ * keep the field in a union when !debug so that we don't have
+ * to pollute the code base with #ifdefs, while avoid paying the
+ * memory cost.
+ */
+#if !defined(JEMALLOC_DEBUG)
+ witness_t witness;
+ malloc_mutex_lock_order_t lock_order;
+#endif
+ };
+
+#if defined(JEMALLOC_DEBUG)
+ witness_t witness;
+ malloc_mutex_lock_order_t lock_order;
+#endif
+};
+
+/*
+ * Based on benchmark results, a fixed spin with this amount of retries works
+ * well for our critical sections.
+ */
+#define MALLOC_MUTEX_MAX_SPIN 250
+
+#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
-# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
+# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
-#else
-# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
+#else
+# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
-#define LOCK_PROF_DATA_INITIALIZER \
- {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
- ATOMIC_INIT(0), 0, NULL, 0}
+#define LOCK_PROF_DATA_INITIALIZER \
+ {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
+ ATOMIC_INIT(0), 0, NULL, 0}
#ifdef _WIN32
-# define MALLOC_MUTEX_INITIALIZER
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# if defined(JEMALLOC_DEBUG)
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
-# else
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
+# define MALLOC_MUTEX_INITIALIZER
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+# if defined(JEMALLOC_DEBUG)
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
+# else
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
+# endif
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# if (defined(JEMALLOC_DEBUG))
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
-# else
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
-
+# if (defined(JEMALLOC_DEBUG))
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
+# else
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
+# endif
+
#else
-# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# if defined(JEMALLOC_DEBUG)
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
-# else
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
+# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
+# if defined(JEMALLOC_DEBUG)
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
+# else
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
+# endif
#endif
#ifdef JEMALLOC_LAZY_LOCK
@@ -140,149 +140,149 @@ extern bool isthreaded;
# define isthreaded true
#endif
-bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
-void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-bool malloc_mutex_boot(void);
-void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
+void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_boot(void);
+void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
+void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
-static inline void
-malloc_mutex_lock_final(malloc_mutex_t *mutex) {
- MALLOC_MUTEX_LOCK(mutex);
- atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
-}
-
-static inline bool
-malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
- return MALLOC_MUTEX_TRYLOCK(mutex);
-}
+static inline void
+malloc_mutex_lock_final(malloc_mutex_t *mutex) {
+ MALLOC_MUTEX_LOCK(mutex);
+ atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
+}
-static inline void
-mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- if (config_stats) {
- mutex_prof_data_t *data = &mutex->prof_data;
- data->n_lock_ops++;
- if (data->prev_owner != tsdn) {
- data->prev_owner = tsdn;
- data->n_owner_switches++;
- }
- }
-}
+static inline bool
+malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
+ return MALLOC_MUTEX_TRYLOCK(mutex);
+}
-/* Trylock: return false if the lock is successfully acquired. */
-static inline bool
-malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+static inline void
+mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ if (config_stats) {
+ mutex_prof_data_t *data = &mutex->prof_data;
+ data->n_lock_ops++;
+ if (data->prev_owner != tsdn) {
+ data->prev_owner = tsdn;
+ data->n_owner_switches++;
+ }
+ }
+}
+
+/* Trylock: return false if the lock is successfully acquired. */
+static inline bool
+malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
- if (malloc_mutex_trylock_final(mutex)) {
- atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
- return true;
- }
- mutex_owner_stats_update(tsdn, mutex);
- }
- witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-
- return false;
-}
-
-/* Aggregate lock prof data. */
-static inline void
-malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
- nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
- if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
- nstime_copy(&sum->max_wait_time, &data->max_wait_time);
+ if (malloc_mutex_trylock_final(mutex)) {
+ atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
+ return true;
+ }
+ mutex_owner_stats_update(tsdn, mutex);
}
-
- sum->n_wait_times += data->n_wait_times;
- sum->n_spin_acquired += data->n_spin_acquired;
-
- if (sum->max_n_thds < data->max_n_thds) {
- sum->max_n_thds = data->max_n_thds;
- }
- uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
- ATOMIC_RELAXED);
- uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
- &data->n_waiting_thds, ATOMIC_RELAXED);
- atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
- ATOMIC_RELAXED);
- sum->n_owner_switches += data->n_owner_switches;
- sum->n_lock_ops += data->n_lock_ops;
+ witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+
+ return false;
}
-static inline void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
- if (isthreaded) {
- if (malloc_mutex_trylock_final(mutex)) {
- malloc_mutex_lock_slow(mutex);
- atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
- }
- mutex_owner_stats_update(tsdn, mutex);
- }
- witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-}
+/* Aggregate lock prof data. */
+static inline void
+malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
+ nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
+ if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
+ nstime_copy(&sum->max_wait_time, &data->max_wait_time);
+ }
-static inline void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
- witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+ sum->n_wait_times += data->n_wait_times;
+ sum->n_spin_acquired += data->n_spin_acquired;
+
+ if (sum->max_n_thds < data->max_n_thds) {
+ sum->max_n_thds = data->max_n_thds;
+ }
+ uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
+ ATOMIC_RELAXED);
+ uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
+ &data->n_waiting_thds, ATOMIC_RELAXED);
+ atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
+ ATOMIC_RELAXED);
+ sum->n_owner_switches += data->n_owner_switches;
+ sum->n_lock_ops += data->n_lock_ops;
+}
+
+static inline void
+malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
- MALLOC_MUTEX_UNLOCK(mutex);
- }
-}
-
-static inline void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-}
-
-static inline void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-}
-
-/* Copy the prof data from mutex for processing. */
-static inline void
-malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- mutex_prof_data_t *source = &mutex->prof_data;
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
-
- /*
- * Not *really* allowed (we shouldn't be doing non-atomic loads of
- * atomic data), but the mutex protection makes this safe, and writing
- * a member-for-member copy is tedious for this situation.
- */
- *data = *source;
- /* n_wait_thds is not reported (modified w/o locking). */
- atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
-}
-
-static inline void
-malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- mutex_prof_data_t *source = &mutex->prof_data;
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
-
- nstime_add(&data->tot_wait_time, &source->tot_wait_time);
- if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
- nstime_copy(&data->max_wait_time, &source->max_wait_time);
- }
- data->n_wait_times += source->n_wait_times;
- data->n_spin_acquired += source->n_spin_acquired;
- if (data->max_n_thds < source->max_n_thds) {
- data->max_n_thds = source->max_n_thds;
+ if (malloc_mutex_trylock_final(mutex)) {
+ malloc_mutex_lock_slow(mutex);
+ atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
+ }
+ mutex_owner_stats_update(tsdn, mutex);
}
- /* n_wait_thds is not reported. */
- atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
- data->n_owner_switches += source->n_owner_switches;
- data->n_lock_ops += source->n_lock_ops;
+ witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
-#endif /* JEMALLOC_INTERNAL_MUTEX_H */
+static inline void
+malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
+ witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+ if (isthreaded) {
+ MALLOC_MUTEX_UNLOCK(mutex);
+ }
+}
+
+static inline void
+malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+}
+
+static inline void
+malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+}
+
+/* Copy the prof data from mutex for processing. */
+static inline void
+malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ mutex_prof_data_t *source = &mutex->prof_data;
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+
+ /*
+ * Not *really* allowed (we shouldn't be doing non-atomic loads of
+ * atomic data), but the mutex protection makes this safe, and writing
+ * a member-for-member copy is tedious for this situation.
+ */
+ *data = *source;
+ /* n_wait_thds is not reported (modified w/o locking). */
+ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
+}
+
+static inline void
+malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ mutex_prof_data_t *source = &mutex->prof_data;
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+
+ nstime_add(&data->tot_wait_time, &source->tot_wait_time);
+ if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
+ nstime_copy(&data->max_wait_time, &source->max_wait_time);
+ }
+ data->n_wait_times += source->n_wait_times;
+ data->n_spin_acquired += source->n_spin_acquired;
+ if (data->max_n_thds < source->max_n_thds) {
+ data->max_n_thds = source->max_n_thds;
+ }
+ /* n_wait_thds is not reported. */
+ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
+ data->n_owner_switches += source->n_owner_switches;
+ data->n_lock_ops += source->n_lock_ops;
+}
+
+#endif /* JEMALLOC_INTERNAL_MUTEX_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/mutex_pool.h b/contrib/libs/jemalloc/include/jemalloc/internal/mutex_pool.h
index 726cece90b..52a40b25e6 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/mutex_pool.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/mutex_pool.h
@@ -1,94 +1,94 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
-#define JEMALLOC_INTERNAL_MUTEX_POOL_H
-
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/witness.h"
-
-/* We do mod reductions by this value, so it should be kept a power of 2. */
-#define MUTEX_POOL_SIZE 256
-
-typedef struct mutex_pool_s mutex_pool_t;
-struct mutex_pool_s {
- malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
-};
-
-bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
-
-/* Internal helper - not meant to be called outside this module. */
-static inline malloc_mutex_t *
-mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
- size_t hash_result[2];
- hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
- return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
-}
-
-static inline void
-mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
- for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
- malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
- }
-}
-
-/*
- * Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
- * You're not allowed to acquire mutexes in the pool one at a time. You have to
- * acquire all the mutexes you'll need in a single function call, and then
- * release them all in a single function call.
- */
-
-static inline void
-mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
- mutex_pool_assert_not_held(tsdn, pool);
-
- malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
- malloc_mutex_lock(tsdn, mutex);
-}
-
-static inline void
-mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
- malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
- malloc_mutex_unlock(tsdn, mutex);
-
- mutex_pool_assert_not_held(tsdn, pool);
-}
-
-static inline void
-mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
- uintptr_t key2) {
- mutex_pool_assert_not_held(tsdn, pool);
-
- malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
- malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
- if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
- malloc_mutex_lock(tsdn, mutex1);
- malloc_mutex_lock(tsdn, mutex2);
- } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
- malloc_mutex_lock(tsdn, mutex1);
- } else {
- malloc_mutex_lock(tsdn, mutex2);
- malloc_mutex_lock(tsdn, mutex1);
- }
-}
-
-static inline void
-mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
- uintptr_t key2) {
- malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
- malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
- if (mutex1 == mutex2) {
- malloc_mutex_unlock(tsdn, mutex1);
- } else {
- malloc_mutex_unlock(tsdn, mutex1);
- malloc_mutex_unlock(tsdn, mutex2);
- }
-
- mutex_pool_assert_not_held(tsdn, pool);
-}
-
-static inline void
-mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
- malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
-}
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */
+#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
+#define JEMALLOC_INTERNAL_MUTEX_POOL_H
+
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/witness.h"
+
+/* We do mod reductions by this value, so it should be kept a power of 2. */
+#define MUTEX_POOL_SIZE 256
+
+typedef struct mutex_pool_s mutex_pool_t;
+struct mutex_pool_s {
+ malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
+};
+
+bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
+
+/* Internal helper - not meant to be called outside this module. */
+static inline malloc_mutex_t *
+mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
+ size_t hash_result[2];
+ hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
+ return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
+}
+
+static inline void
+mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
+ for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
+ malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
+ }
+}
+
+/*
+ * Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
+ * You're not allowed to acquire mutexes in the pool one at a time. You have to
+ * acquire all the mutexes you'll need in a single function call, and then
+ * release them all in a single function call.
+ */
+
+static inline void
+mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
+ mutex_pool_assert_not_held(tsdn, pool);
+
+ malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
+ malloc_mutex_lock(tsdn, mutex);
+}
+
+static inline void
+mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
+ malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
+ malloc_mutex_unlock(tsdn, mutex);
+
+ mutex_pool_assert_not_held(tsdn, pool);
+}
+
+static inline void
+mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
+ uintptr_t key2) {
+ mutex_pool_assert_not_held(tsdn, pool);
+
+ malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
+ malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
+ if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
+ malloc_mutex_lock(tsdn, mutex1);
+ malloc_mutex_lock(tsdn, mutex2);
+ } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
+ malloc_mutex_lock(tsdn, mutex1);
+ } else {
+ malloc_mutex_lock(tsdn, mutex2);
+ malloc_mutex_lock(tsdn, mutex1);
+ }
+}
+
+static inline void
+mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
+ uintptr_t key2) {
+ malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
+ malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
+ if (mutex1 == mutex2) {
+ malloc_mutex_unlock(tsdn, mutex1);
+ } else {
+ malloc_mutex_unlock(tsdn, mutex1);
+ malloc_mutex_unlock(tsdn, mutex2);
+ }
+
+ mutex_pool_assert_not_held(tsdn, pool);
+}
+
+static inline void
+mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
+ malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
+}
+
+#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/mutex_prof.h b/contrib/libs/jemalloc/include/jemalloc/internal/mutex_prof.h
index 2cb8fb0cbf..2422630b07 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/mutex_prof.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/mutex_prof.h
@@ -1,108 +1,108 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
-#define JEMALLOC_INTERNAL_MUTEX_PROF_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/tsd_types.h"
-
-#define MUTEX_PROF_GLOBAL_MUTEXES \
- OP(background_thread) \
- OP(ctl) \
- OP(prof)
-
-typedef enum {
-#define OP(mtx) global_prof_mutex_##mtx,
- MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
- mutex_prof_num_global_mutexes
-} mutex_prof_global_ind_t;
-
-#define MUTEX_PROF_ARENA_MUTEXES \
- OP(large) \
- OP(extent_avail) \
- OP(extents_dirty) \
- OP(extents_muzzy) \
- OP(extents_retained) \
- OP(decay_dirty) \
- OP(decay_muzzy) \
- OP(base) \
- OP(tcache_list)
-
-typedef enum {
-#define OP(mtx) arena_prof_mutex_##mtx,
- MUTEX_PROF_ARENA_MUTEXES
-#undef OP
- mutex_prof_num_arena_mutexes
-} mutex_prof_arena_ind_t;
-
-/*
- * The forth parameter is a boolean value that is true for derived rate counters
- * and false for real ones.
- */
-#define MUTEX_PROF_UINT64_COUNTERS \
- OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
- OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
- OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
- OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
- OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
- OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
- OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
- OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
- OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
- OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
- OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
-
-#define MUTEX_PROF_UINT32_COUNTERS \
- OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
-
-#define MUTEX_PROF_COUNTERS \
- MUTEX_PROF_UINT64_COUNTERS \
- MUTEX_PROF_UINT32_COUNTERS
-
-#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
-
-#define COUNTER_ENUM(counter_list, t) \
- typedef enum { \
- counter_list \
- mutex_prof_num_##t##_counters \
- } mutex_prof_##t##_counter_ind_t;
-
-COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
-COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
-
-#undef COUNTER_ENUM
-#undef OP
-
-typedef struct {
- /*
- * Counters touched on the slow path, i.e. when there is lock
- * contention. We update them once we have the lock.
- */
- /* Total time (in nano seconds) spent waiting on this mutex. */
- nstime_t tot_wait_time;
- /* Max time (in nano seconds) spent on a single lock operation. */
- nstime_t max_wait_time;
- /* # of times have to wait for this mutex (after spinning). */
- uint64_t n_wait_times;
- /* # of times acquired the mutex through local spinning. */
- uint64_t n_spin_acquired;
- /* Max # of threads waiting for the mutex at the same time. */
- uint32_t max_n_thds;
- /* Current # of threads waiting on the lock. Atomic synced. */
- atomic_u32_t n_waiting_thds;
-
- /*
- * Data touched on the fast path. These are modified right after we
- * grab the lock, so it's placed closest to the end (i.e. right before
- * the lock) so that we have a higher chance of them being on the same
- * cacheline.
- */
- /* # of times the mutex holder is different than the previous one. */
- uint64_t n_owner_switches;
- /* Previous mutex holder, to facilitate n_owner_switches. */
- tsdn_t *prev_owner;
- /* # of lock() operations in total. */
- uint64_t n_lock_ops;
-} mutex_prof_data_t;
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
+#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
+#define JEMALLOC_INTERNAL_MUTEX_PROF_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/tsd_types.h"
+
+#define MUTEX_PROF_GLOBAL_MUTEXES \
+ OP(background_thread) \
+ OP(ctl) \
+ OP(prof)
+
+typedef enum {
+#define OP(mtx) global_prof_mutex_##mtx,
+ MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+ mutex_prof_num_global_mutexes
+} mutex_prof_global_ind_t;
+
+#define MUTEX_PROF_ARENA_MUTEXES \
+ OP(large) \
+ OP(extent_avail) \
+ OP(extents_dirty) \
+ OP(extents_muzzy) \
+ OP(extents_retained) \
+ OP(decay_dirty) \
+ OP(decay_muzzy) \
+ OP(base) \
+ OP(tcache_list)
+
+typedef enum {
+#define OP(mtx) arena_prof_mutex_##mtx,
+ MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+ mutex_prof_num_arena_mutexes
+} mutex_prof_arena_ind_t;
+
+/*
+ * The forth parameter is a boolean value that is true for derived rate counters
+ * and false for real ones.
+ */
+#define MUTEX_PROF_UINT64_COUNTERS \
+ OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
+ OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
+ OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
+ OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
+ OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
+ OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
+ OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
+ OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
+ OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
+ OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
+ OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
+
+#define MUTEX_PROF_UINT32_COUNTERS \
+ OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
+
+#define MUTEX_PROF_COUNTERS \
+ MUTEX_PROF_UINT64_COUNTERS \
+ MUTEX_PROF_UINT32_COUNTERS
+
+#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
+
+#define COUNTER_ENUM(counter_list, t) \
+ typedef enum { \
+ counter_list \
+ mutex_prof_num_##t##_counters \
+ } mutex_prof_##t##_counter_ind_t;
+
+COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
+COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
+
+#undef COUNTER_ENUM
+#undef OP
+
+typedef struct {
+ /*
+ * Counters touched on the slow path, i.e. when there is lock
+ * contention. We update them once we have the lock.
+ */
+ /* Total time (in nano seconds) spent waiting on this mutex. */
+ nstime_t tot_wait_time;
+ /* Max time (in nano seconds) spent on a single lock operation. */
+ nstime_t max_wait_time;
+ /* # of times have to wait for this mutex (after spinning). */
+ uint64_t n_wait_times;
+ /* # of times acquired the mutex through local spinning. */
+ uint64_t n_spin_acquired;
+ /* Max # of threads waiting for the mutex at the same time. */
+ uint32_t max_n_thds;
+ /* Current # of threads waiting on the lock. Atomic synced. */
+ atomic_u32_t n_waiting_thds;
+
+ /*
+ * Data touched on the fast path. These are modified right after we
+ * grab the lock, so it's placed closest to the end (i.e. right before
+ * the lock) so that we have a higher chance of them being on the same
+ * cacheline.
+ */
+ /* # of times the mutex holder is different than the previous one. */
+ uint64_t n_owner_switches;
+ /* Previous mutex holder, to facilitate n_owner_switches. */
+ tsdn_t *prev_owner;
+ /* # of lock() operations in total. */
+ uint64_t n_lock_ops;
+} mutex_prof_data_t;
+
+#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/nstime.h b/contrib/libs/jemalloc/include/jemalloc/internal/nstime.h
index 17c177c7f4..b72eeba230 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/nstime.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/nstime.h
@@ -1,34 +1,34 @@
-#ifndef JEMALLOC_INTERNAL_NSTIME_H
-#define JEMALLOC_INTERNAL_NSTIME_H
-
-/* Maximum supported number of seconds (~584 years). */
-#define NSTIME_SEC_MAX KQU(18446744072)
-#define NSTIME_ZERO_INITIALIZER {0}
-
-typedef struct {
- uint64_t ns;
-} nstime_t;
-
-void nstime_init(nstime_t *time, uint64_t ns);
-void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
-uint64_t nstime_ns(const nstime_t *time);
-uint64_t nstime_sec(const nstime_t *time);
-uint64_t nstime_msec(const nstime_t *time);
-uint64_t nstime_nsec(const nstime_t *time);
-void nstime_copy(nstime_t *time, const nstime_t *source);
-int nstime_compare(const nstime_t *a, const nstime_t *b);
-void nstime_add(nstime_t *time, const nstime_t *addend);
-void nstime_iadd(nstime_t *time, uint64_t addend);
-void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
-void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
-void nstime_imultiply(nstime_t *time, uint64_t multiplier);
-void nstime_idivide(nstime_t *time, uint64_t divisor);
-uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
-
-typedef bool (nstime_monotonic_t)(void);
-extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
-
-typedef bool (nstime_update_t)(nstime_t *);
-extern nstime_update_t *JET_MUTABLE nstime_update;
-
-#endif /* JEMALLOC_INTERNAL_NSTIME_H */
+#ifndef JEMALLOC_INTERNAL_NSTIME_H
+#define JEMALLOC_INTERNAL_NSTIME_H
+
+/* Maximum supported number of seconds (~584 years). */
+#define NSTIME_SEC_MAX KQU(18446744072)
+#define NSTIME_ZERO_INITIALIZER {0}
+
+typedef struct {
+ uint64_t ns;
+} nstime_t;
+
+void nstime_init(nstime_t *time, uint64_t ns);
+void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
+uint64_t nstime_ns(const nstime_t *time);
+uint64_t nstime_sec(const nstime_t *time);
+uint64_t nstime_msec(const nstime_t *time);
+uint64_t nstime_nsec(const nstime_t *time);
+void nstime_copy(nstime_t *time, const nstime_t *source);
+int nstime_compare(const nstime_t *a, const nstime_t *b);
+void nstime_add(nstime_t *time, const nstime_t *addend);
+void nstime_iadd(nstime_t *time, uint64_t addend);
+void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
+void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
+void nstime_imultiply(nstime_t *time, uint64_t multiplier);
+void nstime_idivide(nstime_t *time, uint64_t divisor);
+uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
+
+typedef bool (nstime_monotonic_t)(void);
+extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
+
+typedef bool (nstime_update_t)(nstime_t *);
+extern nstime_update_t *JET_MUTABLE nstime_update;
+
+#endif /* JEMALLOC_INTERNAL_NSTIME_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/pages.h b/contrib/libs/jemalloc/include/jemalloc/internal/pages.h
index 7dae633afe..cb7d3972fd 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/pages.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/pages.h
@@ -1,88 +1,88 @@
-#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-
-/* Page size. LG_PAGE is determined by the configure script. */
-#ifdef PAGE_MASK
-# undef PAGE_MASK
-#endif
-#define PAGE ((size_t)(1U << LG_PAGE))
-#define PAGE_MASK ((size_t)(PAGE - 1))
-/* Return the page base address for the page containing address a. */
-#define PAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~PAGE_MASK))
-/* Return the smallest pagesize multiple that is >= s. */
-#define PAGE_CEILING(s) \
- (((s) + PAGE_MASK) & ~PAGE_MASK)
-
-/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
-#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
-#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
-/* Return the huge page base address for the huge page containing address a. */
-#define HUGEPAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
-/* Return the smallest pagesize multiple that is >= s. */
-#define HUGEPAGE_CEILING(s) \
- (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
-
-/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
-#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define PAGES_CAN_PURGE_LAZY
-#endif
-/*
- * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
- *
- * The only supported way to hard-purge on Windows is to decommit and then
- * re-commit, but doing so is racy, and if re-commit fails it's a pain to
- * propagate the "poisoned" memory state. Since we typically decommit as the
- * next step after purging on Windows anyway, there's no point in adding such
- * complexity.
- */
-#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
- defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
- defined(JEMALLOC_MAPS_COALESCE))
-# define PAGES_CAN_PURGE_FORCED
-#endif
-
-static const bool pages_can_purge_lazy =
-#ifdef PAGES_CAN_PURGE_LAZY
- true
-#else
- false
-#endif
- ;
-static const bool pages_can_purge_forced =
-#ifdef PAGES_CAN_PURGE_FORCED
- true
-#else
- false
-#endif
- ;
-
-typedef enum {
- thp_mode_default = 0, /* Do not change hugepage settings. */
- thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
- thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
-
- thp_mode_names_limit = 3, /* Used for option processing. */
- thp_mode_not_supported = 3 /* No THP support detected. */
-} thp_mode_t;
-
-#define THP_MODE_DEFAULT thp_mode_default
-extern thp_mode_t opt_thp;
-extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
-extern const char *thp_mode_names[];
-
-void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
-void pages_unmap(void *addr, size_t size);
-bool pages_commit(void *addr, size_t size);
-bool pages_decommit(void *addr, size_t size);
-bool pages_purge_lazy(void *addr, size_t size);
-bool pages_purge_forced(void *addr, size_t size);
-bool pages_huge(void *addr, size_t size);
-bool pages_nohuge(void *addr, size_t size);
-bool pages_dontdump(void *addr, size_t size);
-bool pages_dodump(void *addr, size_t size);
-bool pages_boot(void);
-void pages_set_thp_state (void *ptr, size_t size);
-
-#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
+#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
+
+/* Page size. LG_PAGE is determined by the configure script. */
+#ifdef PAGE_MASK
+# undef PAGE_MASK
+#endif
+#define PAGE ((size_t)(1U << LG_PAGE))
+#define PAGE_MASK ((size_t)(PAGE - 1))
+/* Return the page base address for the page containing address a. */
+#define PAGE_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~PAGE_MASK))
+/* Return the smallest pagesize multiple that is >= s. */
+#define PAGE_CEILING(s) \
+ (((s) + PAGE_MASK) & ~PAGE_MASK)
+
+/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
+#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
+#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
+/* Return the huge page base address for the huge page containing address a. */
+#define HUGEPAGE_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
+/* Return the smallest pagesize multiple that is >= s. */
+#define HUGEPAGE_CEILING(s) \
+ (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
+
+/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
+#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define PAGES_CAN_PURGE_LAZY
+#endif
+/*
+ * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
+ *
+ * The only supported way to hard-purge on Windows is to decommit and then
+ * re-commit, but doing so is racy, and if re-commit fails it's a pain to
+ * propagate the "poisoned" memory state. Since we typically decommit as the
+ * next step after purging on Windows anyway, there's no point in adding such
+ * complexity.
+ */
+#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
+ defined(JEMALLOC_MAPS_COALESCE))
+# define PAGES_CAN_PURGE_FORCED
+#endif
+
+static const bool pages_can_purge_lazy =
+#ifdef PAGES_CAN_PURGE_LAZY
+ true
+#else
+ false
+#endif
+ ;
+static const bool pages_can_purge_forced =
+#ifdef PAGES_CAN_PURGE_FORCED
+ true
+#else
+ false
+#endif
+ ;
+
+typedef enum {
+ thp_mode_default = 0, /* Do not change hugepage settings. */
+ thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
+ thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
+
+ thp_mode_names_limit = 3, /* Used for option processing. */
+ thp_mode_not_supported = 3 /* No THP support detected. */
+} thp_mode_t;
+
+#define THP_MODE_DEFAULT thp_mode_default
+extern thp_mode_t opt_thp;
+extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
+extern const char *thp_mode_names[];
+
+void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
+void pages_unmap(void *addr, size_t size);
+bool pages_commit(void *addr, size_t size);
+bool pages_decommit(void *addr, size_t size);
+bool pages_purge_lazy(void *addr, size_t size);
+bool pages_purge_forced(void *addr, size_t size);
+bool pages_huge(void *addr, size_t size);
+bool pages_nohuge(void *addr, size_t size);
+bool pages_dontdump(void *addr, size_t size);
+bool pages_dodump(void *addr, size_t size);
+bool pages_boot(void);
+void pages_set_thp_state (void *ptr, size_t size);
+
+#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ph.h b/contrib/libs/jemalloc/include/jemalloc/internal/ph.h
index 84d6778a90..a5066b64b1 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/ph.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/ph.h
@@ -1,391 +1,391 @@
-/*
- * A Pairing Heap implementation.
- *
- * "The Pairing Heap: A New Form of Self-Adjusting Heap"
- * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
- *
- * With auxiliary twopass list, described in a follow on paper.
- *
- * "Pairing Heaps: Experiments and Analysis"
- * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
- *
- *******************************************************************************
- */
-
-#ifndef PH_H_
-#define PH_H_
-
-/* Node structure. */
-#define phn(a_type) \
-struct { \
- a_type *phn_prev; \
- a_type *phn_next; \
- a_type *phn_lchild; \
-}
-
-/* Root structure. */
-#define ph(a_type) \
-struct { \
- a_type *ph_root; \
-}
-
-/* Internal utility macros. */
-#define phn_lchild_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_lchild)
-#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
- a_phn->a_field.phn_lchild = a_lchild; \
-} while (0)
-
-#define phn_next_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_next)
-#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
- a_phn->a_field.phn_prev = a_prev; \
-} while (0)
-
-#define phn_prev_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_prev)
-#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
- a_phn->a_field.phn_next = a_next; \
-} while (0)
-
-#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
- a_type *phn0child; \
- \
- assert(a_phn0 != NULL); \
- assert(a_phn1 != NULL); \
- assert(a_cmp(a_phn0, a_phn1) <= 0); \
- \
- phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
- phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
- phn_next_set(a_type, a_field, a_phn1, phn0child); \
- if (phn0child != NULL) { \
- phn_prev_set(a_type, a_field, phn0child, a_phn1); \
- } \
- phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
-} while (0)
-
-#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
- if (a_phn0 == NULL) { \
- r_phn = a_phn1; \
- } else if (a_phn1 == NULL) { \
- r_phn = a_phn0; \
- } else if (a_cmp(a_phn0, a_phn1) < 0) { \
- phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
- a_cmp); \
- r_phn = a_phn0; \
- } else { \
- phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
- a_cmp); \
- r_phn = a_phn1; \
- } \
-} while (0)
-
-#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *head = NULL; \
- a_type *tail = NULL; \
- a_type *phn0 = a_phn; \
- a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
- \
- /* \
- * Multipass merge, wherein the first two elements of a FIFO \
- * are repeatedly merged, and each result is appended to the \
- * singly linked FIFO, until the FIFO contains only a single \
- * element. We start with a sibling list but no reference to \
- * its tail, so we do a single pass over the sibling list to \
- * populate the FIFO. \
- */ \
- if (phn1 != NULL) { \
- a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
- if (phnrest != NULL) { \
- phn_prev_set(a_type, a_field, phnrest, NULL); \
- } \
- phn_prev_set(a_type, a_field, phn0, NULL); \
- phn_next_set(a_type, a_field, phn0, NULL); \
- phn_prev_set(a_type, a_field, phn1, NULL); \
- phn_next_set(a_type, a_field, phn1, NULL); \
- phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
- head = tail = phn0; \
- phn0 = phnrest; \
- while (phn0 != NULL) { \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- phnrest = phn_next_get(a_type, a_field, \
- phn1); \
- if (phnrest != NULL) { \
- phn_prev_set(a_type, a_field, \
- phnrest, NULL); \
- } \
- phn_prev_set(a_type, a_field, phn0, \
- NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- phn_prev_set(a_type, a_field, phn1, \
- NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = phnrest; \
- } else { \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = NULL; \
- } \
- } \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- while (true) { \
- head = phn_next_get(a_type, a_field, \
- phn1); \
- assert(phn_prev_get(a_type, a_field, \
- phn0) == NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- assert(phn_prev_get(a_type, a_field, \
- phn1) == NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- if (head == NULL) { \
- break; \
- } \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, \
- phn0); \
- } \
- } \
- } \
- r_phn = phn0; \
-} while (0)
-
-#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
- a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
- if (phn != NULL) { \
- phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_prev_set(a_type, a_field, phn, NULL); \
- ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
- assert(phn_next_get(a_type, a_field, phn) == NULL); \
- phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
- a_ph->ph_root); \
- } \
-} while (0)
-
-#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
- if (lchild == NULL) { \
- r_phn = NULL; \
- } else { \
- ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
- r_phn); \
- } \
-} while (0)
-
-/*
- * The ph_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to ph_gen().
- */
-#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
-a_attr void a_prefix##new(a_ph_type *ph); \
-a_attr bool a_prefix##empty(a_ph_type *ph); \
-a_attr a_type *a_prefix##first(a_ph_type *ph); \
-a_attr a_type *a_prefix##any(a_ph_type *ph); \
-a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
-a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
-a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
-a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
-
-/*
- * The ph_gen() macro generates a type-specific pairing heap implementation,
- * based on the above cpp macros.
- */
-#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
-a_attr void \
-a_prefix##new(a_ph_type *ph) { \
- memset(ph, 0, sizeof(ph(a_type))); \
-} \
-a_attr bool \
-a_prefix##empty(a_ph_type *ph) { \
- return (ph->ph_root == NULL); \
-} \
-a_attr a_type * \
-a_prefix##first(a_ph_type *ph) { \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- return ph->ph_root; \
-} \
-a_attr a_type * \
-a_prefix##any(a_ph_type *ph) { \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
- if (aux != NULL) { \
- return aux; \
- } \
- return ph->ph_root; \
-} \
-a_attr void \
-a_prefix##insert(a_ph_type *ph, a_type *phn) { \
- memset(&phn->a_field, 0, sizeof(phn(a_type))); \
- \
- /* \
- * Treat the root as an aux list during insertion, and lazily \
- * merge during a_prefix##remove_first(). For elements that \
- * are inserted, then removed via a_prefix##remove() before the \
- * aux list is ever processed, this makes insert/remove \
- * constant-time, whereas eager merging would make insert \
- * O(log n). \
- */ \
- if (ph->ph_root == NULL) { \
- ph->ph_root = phn; \
- } else { \
- phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
- a_field, ph->ph_root)); \
- if (phn_next_get(a_type, a_field, ph->ph_root) != \
- NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, ph->ph_root), \
- phn); \
- } \
- phn_prev_set(a_type, a_field, phn, ph->ph_root); \
- phn_next_set(a_type, a_field, ph->ph_root, phn); \
- } \
-} \
-a_attr a_type * \
-a_prefix##remove_first(a_ph_type *ph) { \
- a_type *ret; \
- \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- \
- ret = ph->ph_root; \
- \
- ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
- ph->ph_root); \
- \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##remove_any(a_ph_type *ph) { \
- /* \
- * Remove the most recently inserted aux list element, or the \
- * root if the aux list is empty. This has the effect of \
- * behaving as a LIFO (and insertion/removal is therefore \
- * constant-time) if a_prefix##[remove_]first() are never \
- * called. \
- */ \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
- if (ret != NULL) { \
- a_type *aux = phn_next_get(a_type, a_field, ret); \
- phn_next_set(a_type, a_field, ph->ph_root, aux); \
- if (aux != NULL) { \
- phn_prev_set(a_type, a_field, aux, \
- ph->ph_root); \
- } \
- return ret; \
- } \
- ret = ph->ph_root; \
- ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
- ph->ph_root); \
- return ret; \
-} \
-a_attr void \
-a_prefix##remove(a_ph_type *ph, a_type *phn) { \
- a_type *replace, *parent; \
- \
- if (ph->ph_root == phn) { \
- /* \
- * We can delete from aux list without merging it, but \
- * we need to merge if we are dealing with the root \
- * node and it has children. \
- */ \
- if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
- ph->ph_root = phn_next_get(a_type, a_field, \
- phn); \
- if (ph->ph_root != NULL) { \
- phn_prev_set(a_type, a_field, \
- ph->ph_root, NULL); \
- } \
- return; \
- } \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- if (ph->ph_root == phn) { \
- ph_merge_children(a_type, a_field, ph->ph_root, \
- a_cmp, ph->ph_root); \
- return; \
- } \
- } \
- \
- /* Get parent (if phn is leftmost child) before mutating. */ \
- if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
- if (phn_lchild_get(a_type, a_field, parent) != phn) { \
- parent = NULL; \
- } \
- } \
- /* Find a possible replacement node, and link to parent. */ \
- ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
- /* Set next/prev for sibling linked list. */ \
- if (replace != NULL) { \
- if (parent != NULL) { \
- phn_prev_set(a_type, a_field, replace, parent); \
- phn_lchild_set(a_type, a_field, parent, \
- replace); \
- } else { \
- phn_prev_set(a_type, a_field, replace, \
- phn_prev_get(a_type, a_field, phn)); \
- if (phn_prev_get(a_type, a_field, phn) != \
- NULL) { \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- replace); \
- } \
- } \
- phn_next_set(a_type, a_field, replace, \
- phn_next_get(a_type, a_field, phn)); \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- replace); \
- } \
- } else { \
- if (parent != NULL) { \
- a_type *next = phn_next_get(a_type, a_field, \
- phn); \
- phn_lchild_set(a_type, a_field, parent, next); \
- if (next != NULL) { \
- phn_prev_set(a_type, a_field, next, \
- parent); \
- } \
- } else { \
- assert(phn_prev_get(a_type, a_field, phn) != \
- NULL); \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- phn_next_get(a_type, a_field, phn)); \
- } \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- phn_prev_get(a_type, a_field, phn)); \
- } \
- } \
-}
-
-#endif /* PH_H_ */
+/*
+ * A Pairing Heap implementation.
+ *
+ * "The Pairing Heap: A New Form of Self-Adjusting Heap"
+ * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
+ *
+ * With auxiliary twopass list, described in a follow on paper.
+ *
+ * "Pairing Heaps: Experiments and Analysis"
+ * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
+ *
+ *******************************************************************************
+ */
+
+#ifndef PH_H_
+#define PH_H_
+
+/* Node structure. */
+#define phn(a_type) \
+struct { \
+ a_type *phn_prev; \
+ a_type *phn_next; \
+ a_type *phn_lchild; \
+}
+
+/* Root structure. */
+#define ph(a_type) \
+struct { \
+ a_type *ph_root; \
+}
+
+/* Internal utility macros. */
+#define phn_lchild_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_lchild)
+#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
+ a_phn->a_field.phn_lchild = a_lchild; \
+} while (0)
+
+#define phn_next_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_next)
+#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
+ a_phn->a_field.phn_prev = a_prev; \
+} while (0)
+
+#define phn_prev_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_prev)
+#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
+ a_phn->a_field.phn_next = a_next; \
+} while (0)
+
+#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
+ a_type *phn0child; \
+ \
+ assert(a_phn0 != NULL); \
+ assert(a_phn1 != NULL); \
+ assert(a_cmp(a_phn0, a_phn1) <= 0); \
+ \
+ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
+ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
+ phn_next_set(a_type, a_field, a_phn1, phn0child); \
+ if (phn0child != NULL) { \
+ phn_prev_set(a_type, a_field, phn0child, a_phn1); \
+ } \
+ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
+} while (0)
+
+#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
+ if (a_phn0 == NULL) { \
+ r_phn = a_phn1; \
+ } else if (a_phn1 == NULL) { \
+ r_phn = a_phn0; \
+ } else if (a_cmp(a_phn0, a_phn1) < 0) { \
+ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
+ a_cmp); \
+ r_phn = a_phn0; \
+ } else { \
+ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
+ a_cmp); \
+ r_phn = a_phn1; \
+ } \
+} while (0)
+
+#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *head = NULL; \
+ a_type *tail = NULL; \
+ a_type *phn0 = a_phn; \
+ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
+ \
+ /* \
+ * Multipass merge, wherein the first two elements of a FIFO \
+ * are repeatedly merged, and each result is appended to the \
+ * singly linked FIFO, until the FIFO contains only a single \
+ * element. We start with a sibling list but no reference to \
+ * its tail, so we do a single pass over the sibling list to \
+ * populate the FIFO. \
+ */ \
+ if (phn1 != NULL) { \
+ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
+ if (phnrest != NULL) { \
+ phn_prev_set(a_type, a_field, phnrest, NULL); \
+ } \
+ phn_prev_set(a_type, a_field, phn0, NULL); \
+ phn_next_set(a_type, a_field, phn0, NULL); \
+ phn_prev_set(a_type, a_field, phn1, NULL); \
+ phn_next_set(a_type, a_field, phn1, NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
+ head = tail = phn0; \
+ phn0 = phnrest; \
+ while (phn0 != NULL) { \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ phnrest = phn_next_get(a_type, a_field, \
+ phn1); \
+ if (phnrest != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phnrest, NULL); \
+ } \
+ phn_prev_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_prev_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = phnrest; \
+ } else { \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = NULL; \
+ } \
+ } \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ while (true) { \
+ head = phn_next_get(a_type, a_field, \
+ phn1); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn0) == NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn1) == NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ if (head == NULL) { \
+ break; \
+ } \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, \
+ phn0); \
+ } \
+ } \
+ } \
+ r_phn = phn0; \
+} while (0)
+
+#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
+ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
+ if (phn != NULL) { \
+ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_prev_set(a_type, a_field, phn, NULL); \
+ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
+ assert(phn_next_get(a_type, a_field, phn) == NULL); \
+ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
+ a_ph->ph_root); \
+ } \
+} while (0)
+
+#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
+ if (lchild == NULL) { \
+ r_phn = NULL; \
+ } else { \
+ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
+ r_phn); \
+ } \
+} while (0)
+
+/*
+ * The ph_proto() macro generates function prototypes that correspond to the
+ * functions generated by an equivalently parameterized call to ph_gen().
+ */
+#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
+a_attr void a_prefix##new(a_ph_type *ph); \
+a_attr bool a_prefix##empty(a_ph_type *ph); \
+a_attr a_type *a_prefix##first(a_ph_type *ph); \
+a_attr a_type *a_prefix##any(a_ph_type *ph); \
+a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
+a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
+a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
+a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
+
+/*
+ * The ph_gen() macro generates a type-specific pairing heap implementation,
+ * based on the above cpp macros.
+ */
+#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_ph_type *ph) { \
+ memset(ph, 0, sizeof(ph(a_type))); \
+} \
+a_attr bool \
+a_prefix##empty(a_ph_type *ph) { \
+ return (ph->ph_root == NULL); \
+} \
+a_attr a_type * \
+a_prefix##first(a_ph_type *ph) { \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ return ph->ph_root; \
+} \
+a_attr a_type * \
+a_prefix##any(a_ph_type *ph) { \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
+ a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
+ if (aux != NULL) { \
+ return aux; \
+ } \
+ return ph->ph_root; \
+} \
+a_attr void \
+a_prefix##insert(a_ph_type *ph, a_type *phn) { \
+ memset(&phn->a_field, 0, sizeof(phn(a_type))); \
+ \
+ /* \
+ * Treat the root as an aux list during insertion, and lazily \
+ * merge during a_prefix##remove_first(). For elements that \
+ * are inserted, then removed via a_prefix##remove() before the \
+ * aux list is ever processed, this makes insert/remove \
+ * constant-time, whereas eager merging would make insert \
+ * O(log n). \
+ */ \
+ if (ph->ph_root == NULL) { \
+ ph->ph_root = phn; \
+ } else { \
+ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
+ a_field, ph->ph_root)); \
+ if (phn_next_get(a_type, a_field, ph->ph_root) != \
+ NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, ph->ph_root), \
+ phn); \
+ } \
+ phn_prev_set(a_type, a_field, phn, ph->ph_root); \
+ phn_next_set(a_type, a_field, ph->ph_root, phn); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##remove_first(a_ph_type *ph) { \
+ a_type *ret; \
+ \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ \
+ ret = ph->ph_root; \
+ \
+ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
+ ph->ph_root); \
+ \
+ return ret; \
+} \
+a_attr a_type * \
+a_prefix##remove_any(a_ph_type *ph) { \
+ /* \
+ * Remove the most recently inserted aux list element, or the \
+ * root if the aux list is empty. This has the effect of \
+ * behaving as a LIFO (and insertion/removal is therefore \
+ * constant-time) if a_prefix##[remove_]first() are never \
+ * called. \
+ */ \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
+ a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
+ if (ret != NULL) { \
+ a_type *aux = phn_next_get(a_type, a_field, ret); \
+ phn_next_set(a_type, a_field, ph->ph_root, aux); \
+ if (aux != NULL) { \
+ phn_prev_set(a_type, a_field, aux, \
+ ph->ph_root); \
+ } \
+ return ret; \
+ } \
+ ret = ph->ph_root; \
+ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
+ ph->ph_root); \
+ return ret; \
+} \
+a_attr void \
+a_prefix##remove(a_ph_type *ph, a_type *phn) { \
+ a_type *replace, *parent; \
+ \
+ if (ph->ph_root == phn) { \
+ /* \
+ * We can delete from aux list without merging it, but \
+ * we need to merge if we are dealing with the root \
+ * node and it has children. \
+ */ \
+ if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
+ ph->ph_root = phn_next_get(a_type, a_field, \
+ phn); \
+ if (ph->ph_root != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ ph->ph_root, NULL); \
+ } \
+ return; \
+ } \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ if (ph->ph_root == phn) { \
+ ph_merge_children(a_type, a_field, ph->ph_root, \
+ a_cmp, ph->ph_root); \
+ return; \
+ } \
+ } \
+ \
+ /* Get parent (if phn is leftmost child) before mutating. */ \
+ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
+ if (phn_lchild_get(a_type, a_field, parent) != phn) { \
+ parent = NULL; \
+ } \
+ } \
+ /* Find a possible replacement node, and link to parent. */ \
+ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
+ /* Set next/prev for sibling linked list. */ \
+ if (replace != NULL) { \
+ if (parent != NULL) { \
+ phn_prev_set(a_type, a_field, replace, parent); \
+ phn_lchild_set(a_type, a_field, parent, \
+ replace); \
+ } else { \
+ phn_prev_set(a_type, a_field, replace, \
+ phn_prev_get(a_type, a_field, phn)); \
+ if (phn_prev_get(a_type, a_field, phn) != \
+ NULL) { \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } \
+ phn_next_set(a_type, a_field, replace, \
+ phn_next_get(a_type, a_field, phn)); \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } else { \
+ if (parent != NULL) { \
+ a_type *next = phn_next_get(a_type, a_field, \
+ phn); \
+ phn_lchild_set(a_type, a_field, parent, next); \
+ if (next != NULL) { \
+ phn_prev_set(a_type, a_field, next, \
+ parent); \
+ } \
+ } else { \
+ assert(phn_prev_get(a_type, a_field, phn) != \
+ NULL); \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ phn_next_get(a_type, a_field, phn)); \
+ } \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ phn_prev_get(a_type, a_field, phn)); \
+ } \
+ } \
+}
+
+#endif /* PH_H_ */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-linux.h b/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-linux.h
index e7a783a5ee..f7ef7dfa81 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-linux.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-linux.h
@@ -1,422 +1,422 @@
-#define a0dalloc JEMALLOC_N(a0dalloc)
-#define a0malloc JEMALLOC_N(a0malloc)
-#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
-#define arena_cleanup JEMALLOC_N(arena_cleanup)
-#define arena_init JEMALLOC_N(arena_init)
-#define arena_migrate JEMALLOC_N(arena_migrate)
-#define arena_set JEMALLOC_N(arena_set)
-#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
-#define arenas JEMALLOC_N(arenas)
-#define arenas_lock JEMALLOC_N(arenas_lock)
-#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
-#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
-#define bootstrap_free JEMALLOC_N(bootstrap_free)
-#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
-#define free_default JEMALLOC_N(free_default)
-#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
-#define je_sdallocx_noflags JEMALLOC_N(je_sdallocx_noflags)
-#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
-#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
-#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
-#define malloc_default JEMALLOC_N(malloc_default)
-#define malloc_initialized JEMALLOC_N(malloc_initialized)
-#define malloc_slow JEMALLOC_N(malloc_slow)
-#define manual_arena_base JEMALLOC_N(manual_arena_base)
-#define narenas_auto JEMALLOC_N(narenas_auto)
-#define narenas_total_get JEMALLOC_N(narenas_total_get)
-#define ncpus JEMALLOC_N(ncpus)
-#define opt_abort JEMALLOC_N(opt_abort)
-#define opt_abort_conf JEMALLOC_N(opt_abort_conf)
-#define opt_confirm_conf JEMALLOC_N(opt_confirm_conf)
-#define opt_junk JEMALLOC_N(opt_junk)
-#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
-#define opt_junk_free JEMALLOC_N(opt_junk_free)
-#define opt_narenas JEMALLOC_N(opt_narenas)
-#define opt_utrace JEMALLOC_N(opt_utrace)
-#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
-#define opt_zero JEMALLOC_N(opt_zero)
-#define sdallocx_default JEMALLOC_N(sdallocx_default)
-#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
-#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
-#define arena_bin_choose_lock JEMALLOC_N(arena_bin_choose_lock)
-#define arena_boot JEMALLOC_N(arena_boot)
-#define arena_choose_huge JEMALLOC_N(arena_choose_huge)
-#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-#define arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted)
-#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
-#define arena_decay JEMALLOC_N(arena_decay)
-#define arena_destroy JEMALLOC_N(arena_destroy)
-#define arena_dirty_decay_ms_default_get JEMALLOC_N(arena_dirty_decay_ms_default_get)
-#define arena_dirty_decay_ms_default_set JEMALLOC_N(arena_dirty_decay_ms_default_set)
-#define arena_dirty_decay_ms_get JEMALLOC_N(arena_dirty_decay_ms_get)
-#define arena_dirty_decay_ms_set JEMALLOC_N(arena_dirty_decay_ms_set)
-#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
-#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
-#define arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large)
-#define arena_extent_dalloc_large_prep JEMALLOC_N(arena_extent_dalloc_large_prep)
-#define arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand)
-#define arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink)
-#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
-#define arena_extents_dirty_dalloc JEMALLOC_N(arena_extents_dirty_dalloc)
-#define arena_init_huge JEMALLOC_N(arena_init_huge)
-#define arena_is_huge JEMALLOC_N(arena_is_huge)
-#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
-#define arena_muzzy_decay_ms_default_get JEMALLOC_N(arena_muzzy_decay_ms_default_get)
-#define arena_muzzy_decay_ms_default_set JEMALLOC_N(arena_muzzy_decay_ms_default_set)
-#define arena_muzzy_decay_ms_get JEMALLOC_N(arena_muzzy_decay_ms_get)
-#define arena_muzzy_decay_ms_set JEMALLOC_N(arena_muzzy_decay_ms_set)
-#define arena_new JEMALLOC_N(arena_new)
-#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
-#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
-#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
-#define arena_palloc JEMALLOC_N(arena_palloc)
-#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
-#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
-#define arena_prefork0 JEMALLOC_N(arena_prefork0)
-#define arena_prefork1 JEMALLOC_N(arena_prefork1)
-#define arena_prefork2 JEMALLOC_N(arena_prefork2)
-#define arena_prefork3 JEMALLOC_N(arena_prefork3)
-#define arena_prefork4 JEMALLOC_N(arena_prefork4)
-#define arena_prefork5 JEMALLOC_N(arena_prefork5)
-#define arena_prefork6 JEMALLOC_N(arena_prefork6)
-#define arena_prefork7 JEMALLOC_N(arena_prefork7)
-#define arena_prof_promote JEMALLOC_N(arena_prof_promote)
-#define arena_ralloc JEMALLOC_N(arena_ralloc)
-#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
-#define arena_reset JEMALLOC_N(arena_reset)
-#define arena_retain_grow_limit_get_set JEMALLOC_N(arena_retain_grow_limit_get_set)
-#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
-#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
-#define h_steps JEMALLOC_N(h_steps)
-#define opt_dirty_decay_ms JEMALLOC_N(opt_dirty_decay_ms)
-#define opt_muzzy_decay_ms JEMALLOC_N(opt_muzzy_decay_ms)
-#define opt_oversize_threshold JEMALLOC_N(opt_oversize_threshold)
-#define opt_percpu_arena JEMALLOC_N(opt_percpu_arena)
-#define oversize_threshold JEMALLOC_N(oversize_threshold)
-#define percpu_arena_mode_names JEMALLOC_N(percpu_arena_mode_names)
-#define background_thread_boot0 JEMALLOC_N(background_thread_boot0)
-#define background_thread_boot1 JEMALLOC_N(background_thread_boot1)
-#define background_thread_create JEMALLOC_N(background_thread_create)
-#define background_thread_ctl_init JEMALLOC_N(background_thread_ctl_init)
-#define background_thread_enabled_state JEMALLOC_N(background_thread_enabled_state)
-#define background_thread_info JEMALLOC_N(background_thread_info)
-#define background_thread_interval_check JEMALLOC_N(background_thread_interval_check)
-#define background_thread_lock JEMALLOC_N(background_thread_lock)
-#define background_thread_postfork_child JEMALLOC_N(background_thread_postfork_child)
-#define background_thread_postfork_parent JEMALLOC_N(background_thread_postfork_parent)
-#define background_thread_prefork0 JEMALLOC_N(background_thread_prefork0)
-#define background_thread_prefork1 JEMALLOC_N(background_thread_prefork1)
-#define background_thread_stats_read JEMALLOC_N(background_thread_stats_read)
-#define background_threads_disable JEMALLOC_N(background_threads_disable)
-#define background_threads_enable JEMALLOC_N(background_threads_enable)
-#define max_background_threads JEMALLOC_N(max_background_threads)
-#define n_background_threads JEMALLOC_N(n_background_threads)
-#define opt_background_thread JEMALLOC_N(opt_background_thread)
-#define opt_max_background_threads JEMALLOC_N(opt_max_background_threads)
-#define pthread_create_wrapper JEMALLOC_N(pthread_create_wrapper)
-#define b0get JEMALLOC_N(b0get)
-#define base_alloc JEMALLOC_N(base_alloc)
-#define base_alloc_extent JEMALLOC_N(base_alloc_extent)
-#define base_boot JEMALLOC_N(base_boot)
-#define base_delete JEMALLOC_N(base_delete)
-#define base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get)
-#define base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set)
-#define base_new JEMALLOC_N(base_new)
-#define base_postfork_child JEMALLOC_N(base_postfork_child)
-#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
-#define base_prefork JEMALLOC_N(base_prefork)
-#define base_stats_get JEMALLOC_N(base_stats_get)
-#define metadata_thp_mode_names JEMALLOC_N(metadata_thp_mode_names)
-#define opt_metadata_thp JEMALLOC_N(opt_metadata_thp)
-#define bin_boot JEMALLOC_N(bin_boot)
-#define bin_infos JEMALLOC_N(bin_infos)
-#define bin_init JEMALLOC_N(bin_init)
-#define bin_postfork_child JEMALLOC_N(bin_postfork_child)
-#define bin_postfork_parent JEMALLOC_N(bin_postfork_parent)
-#define bin_prefork JEMALLOC_N(bin_prefork)
-#define bin_shard_sizes_boot JEMALLOC_N(bin_shard_sizes_boot)
-#define bin_update_shard_size JEMALLOC_N(bin_update_shard_size)
-#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
-#define bitmap_init JEMALLOC_N(bitmap_init)
-#define bitmap_size JEMALLOC_N(bitmap_size)
-#define ckh_count JEMALLOC_N(ckh_count)
-#define ckh_delete JEMALLOC_N(ckh_delete)
-#define ckh_insert JEMALLOC_N(ckh_insert)
-#define ckh_iter JEMALLOC_N(ckh_iter)
-#define ckh_new JEMALLOC_N(ckh_new)
-#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
-#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
-#define ckh_remove JEMALLOC_N(ckh_remove)
-#define ckh_search JEMALLOC_N(ckh_search)
-#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
-#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
-#define ctl_boot JEMALLOC_N(ctl_boot)
-#define ctl_bymib JEMALLOC_N(ctl_bymib)
-#define ctl_byname JEMALLOC_N(ctl_byname)
-#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
-#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
-#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
-#define ctl_prefork JEMALLOC_N(ctl_prefork)
-#define div_init JEMALLOC_N(div_init)
-#define extent_alloc JEMALLOC_N(extent_alloc)
-#define extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper)
-#define extent_avail_any JEMALLOC_N(extent_avail_any)
-#define extent_avail_empty JEMALLOC_N(extent_avail_empty)
-#define extent_avail_first JEMALLOC_N(extent_avail_first)
-#define extent_avail_insert JEMALLOC_N(extent_avail_insert)
-#define extent_avail_new JEMALLOC_N(extent_avail_new)
-#define extent_avail_remove JEMALLOC_N(extent_avail_remove)
-#define extent_avail_remove_any JEMALLOC_N(extent_avail_remove_any)
-#define extent_avail_remove_first JEMALLOC_N(extent_avail_remove_first)
-#define extent_boot JEMALLOC_N(extent_boot)
-#define extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper)
-#define extent_dalloc JEMALLOC_N(extent_dalloc)
-#define extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap)
-#define extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper)
-#define extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper)
-#define extent_destroy_wrapper JEMALLOC_N(extent_destroy_wrapper)
-#define extent_heap_any JEMALLOC_N(extent_heap_any)
-#define extent_heap_empty JEMALLOC_N(extent_heap_empty)
-#define extent_heap_first JEMALLOC_N(extent_heap_first)
-#define extent_heap_insert JEMALLOC_N(extent_heap_insert)
-#define extent_heap_new JEMALLOC_N(extent_heap_new)
-#define extent_heap_remove JEMALLOC_N(extent_heap_remove)
-#define extent_heap_remove_any JEMALLOC_N(extent_heap_remove_any)
-#define extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first)
-#define extent_hooks_default JEMALLOC_N(extent_hooks_default)
-#define extent_hooks_get JEMALLOC_N(extent_hooks_get)
-#define extent_hooks_set JEMALLOC_N(extent_hooks_set)
-#define extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper)
-#define extent_mutex_pool JEMALLOC_N(extent_mutex_pool)
-#define extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper)
-#define extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper)
-#define extent_split_wrapper JEMALLOC_N(extent_split_wrapper)
-#define extent_util_stats_get JEMALLOC_N(extent_util_stats_get)
-#define extent_util_stats_verbose_get JEMALLOC_N(extent_util_stats_verbose_get)
-#define extents_alloc JEMALLOC_N(extents_alloc)
-#define extents_dalloc JEMALLOC_N(extents_dalloc)
-#define extents_evict JEMALLOC_N(extents_evict)
-#define extents_init JEMALLOC_N(extents_init)
-#define extents_nbytes_get JEMALLOC_N(extents_nbytes_get)
-#define extents_nextents_get JEMALLOC_N(extents_nextents_get)
-#define extents_npages_get JEMALLOC_N(extents_npages_get)
-#define extents_postfork_child JEMALLOC_N(extents_postfork_child)
-#define extents_postfork_parent JEMALLOC_N(extents_postfork_parent)
-#define extents_prefork JEMALLOC_N(extents_prefork)
-#define extents_rtree JEMALLOC_N(extents_rtree)
-#define extents_state_get JEMALLOC_N(extents_state_get)
-#define opt_lg_extent_max_active_fit JEMALLOC_N(opt_lg_extent_max_active_fit)
-#define dss_prec_names JEMALLOC_N(dss_prec_names)
-#define extent_alloc_dss JEMALLOC_N(extent_alloc_dss)
-#define extent_dss_boot JEMALLOC_N(extent_dss_boot)
-#define extent_dss_mergeable JEMALLOC_N(extent_dss_mergeable)
-#define extent_dss_prec_get JEMALLOC_N(extent_dss_prec_get)
-#define extent_dss_prec_set JEMALLOC_N(extent_dss_prec_set)
-#define extent_in_dss JEMALLOC_N(extent_in_dss)
-#define opt_dss JEMALLOC_N(opt_dss)
-#define extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap)
-#define extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap)
-#define opt_retain JEMALLOC_N(opt_retain)
-#define hook_boot JEMALLOC_N(hook_boot)
-#define hook_install JEMALLOC_N(hook_install)
-#define hook_invoke_alloc JEMALLOC_N(hook_invoke_alloc)
-#define hook_invoke_dalloc JEMALLOC_N(hook_invoke_dalloc)
-#define hook_invoke_expand JEMALLOC_N(hook_invoke_expand)
-#define hook_remove JEMALLOC_N(hook_remove)
-#define large_dalloc JEMALLOC_N(large_dalloc)
-#define large_dalloc_finish JEMALLOC_N(large_dalloc_finish)
-#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
-#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
-#define large_dalloc_prep_junked_locked JEMALLOC_N(large_dalloc_prep_junked_locked)
-#define large_malloc JEMALLOC_N(large_malloc)
-#define large_palloc JEMALLOC_N(large_palloc)
-#define large_prof_alloc_time_get JEMALLOC_N(large_prof_alloc_time_get)
-#define large_prof_alloc_time_set JEMALLOC_N(large_prof_alloc_time_set)
-#define large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get)
-#define large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset)
-#define large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set)
-#define large_ralloc JEMALLOC_N(large_ralloc)
-#define large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move)
-#define large_salloc JEMALLOC_N(large_salloc)
-#define log_init_done JEMALLOC_N(log_init_done)
-#define log_var_names JEMALLOC_N(log_var_names)
-#define log_var_update_state JEMALLOC_N(log_var_update_state)
-#define buferror JEMALLOC_N(buferror)
-#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
-#define malloc_printf JEMALLOC_N(malloc_printf)
-#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
-#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
-#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
-#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
-#define malloc_write JEMALLOC_N(malloc_write)
-#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
-#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
-#define malloc_mutex_lock_slow JEMALLOC_N(malloc_mutex_lock_slow)
-#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
-#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
-#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
-#define malloc_mutex_prof_data_reset JEMALLOC_N(malloc_mutex_prof_data_reset)
-#define mutex_pool_init JEMALLOC_N(mutex_pool_init)
-#define nstime_add JEMALLOC_N(nstime_add)
-#define nstime_compare JEMALLOC_N(nstime_compare)
-#define nstime_copy JEMALLOC_N(nstime_copy)
-#define nstime_divide JEMALLOC_N(nstime_divide)
-#define nstime_iadd JEMALLOC_N(nstime_iadd)
-#define nstime_idivide JEMALLOC_N(nstime_idivide)
-#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
-#define nstime_init JEMALLOC_N(nstime_init)
-#define nstime_init2 JEMALLOC_N(nstime_init2)
-#define nstime_isubtract JEMALLOC_N(nstime_isubtract)
-#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
-#define nstime_msec JEMALLOC_N(nstime_msec)
-#define nstime_ns JEMALLOC_N(nstime_ns)
-#define nstime_nsec JEMALLOC_N(nstime_nsec)
-#define nstime_sec JEMALLOC_N(nstime_sec)
-#define nstime_subtract JEMALLOC_N(nstime_subtract)
-#define nstime_update JEMALLOC_N(nstime_update)
-#define init_system_thp_mode JEMALLOC_N(init_system_thp_mode)
-#define opt_thp JEMALLOC_N(opt_thp)
-#define pages_boot JEMALLOC_N(pages_boot)
-#define pages_commit JEMALLOC_N(pages_commit)
-#define pages_decommit JEMALLOC_N(pages_decommit)
-#define pages_dodump JEMALLOC_N(pages_dodump)
-#define pages_dontdump JEMALLOC_N(pages_dontdump)
-#define pages_huge JEMALLOC_N(pages_huge)
-#define pages_map JEMALLOC_N(pages_map)
-#define pages_nohuge JEMALLOC_N(pages_nohuge)
-#define pages_purge_forced JEMALLOC_N(pages_purge_forced)
-#define pages_purge_lazy JEMALLOC_N(pages_purge_lazy)
-#define pages_set_thp_state JEMALLOC_N(pages_set_thp_state)
-#define pages_unmap JEMALLOC_N(pages_unmap)
-#define thp_mode_names JEMALLOC_N(thp_mode_names)
-#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx)
-#define bt_init JEMALLOC_N(bt_init)
-#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
-#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
-#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
-#define opt_prof JEMALLOC_N(opt_prof)
-#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
-#define opt_prof_active JEMALLOC_N(opt_prof_active)
-#define opt_prof_final JEMALLOC_N(opt_prof_final)
-#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
-#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
-#define opt_prof_log JEMALLOC_N(opt_prof_log)
-#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
-#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
-#define prof_accum_init JEMALLOC_N(prof_accum_init)
-#define prof_active JEMALLOC_N(prof_active)
-#define prof_active_get JEMALLOC_N(prof_active_get)
-#define prof_active_set JEMALLOC_N(prof_active_set)
-#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
-#define prof_backtrace JEMALLOC_N(prof_backtrace)
-#define prof_boot0 JEMALLOC_N(prof_boot0)
-#define prof_boot1 JEMALLOC_N(prof_boot1)
-#define prof_boot2 JEMALLOC_N(prof_boot2)
-#define prof_dump_header JEMALLOC_N(prof_dump_header)
-#define prof_dump_open JEMALLOC_N(prof_dump_open)
-#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
-#define prof_gdump JEMALLOC_N(prof_gdump)
-#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
-#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
-#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
-#define prof_idump JEMALLOC_N(prof_idump)
-#define prof_interval JEMALLOC_N(prof_interval)
-#define prof_log_start JEMALLOC_N(prof_log_start)
-#define prof_log_stop JEMALLOC_N(prof_log_stop)
-#define prof_logging_state JEMALLOC_N(prof_logging_state)
-#define prof_lookup JEMALLOC_N(prof_lookup)
-#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
-#define prof_mdump JEMALLOC_N(prof_mdump)
-#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
-#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
-#define prof_prefork0 JEMALLOC_N(prof_prefork0)
-#define prof_prefork1 JEMALLOC_N(prof_prefork1)
-#define prof_reset JEMALLOC_N(prof_reset)
-#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
-#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
-#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
-#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
-#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
-#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
-#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
-#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
-#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
-#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
-#define rtree_ctx_data_init JEMALLOC_N(rtree_ctx_data_init)
-#define rtree_leaf_alloc JEMALLOC_N(rtree_leaf_alloc)
-#define rtree_leaf_dalloc JEMALLOC_N(rtree_leaf_dalloc)
-#define rtree_leaf_elm_lookup_hard JEMALLOC_N(rtree_leaf_elm_lookup_hard)
-#define rtree_new JEMALLOC_N(rtree_new)
-#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
-#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
-#define safety_check_fail JEMALLOC_N(safety_check_fail)
-#define safety_check_set_abort JEMALLOC_N(safety_check_set_abort)
-#define arena_mutex_names JEMALLOC_N(arena_mutex_names)
-#define global_mutex_names JEMALLOC_N(global_mutex_names)
-#define opt_stats_print JEMALLOC_N(opt_stats_print)
-#define opt_stats_print_opts JEMALLOC_N(opt_stats_print_opts)
-#define stats_print JEMALLOC_N(stats_print)
-#define sc_boot JEMALLOC_N(sc_boot)
-#define sc_data_global JEMALLOC_N(sc_data_global)
-#define sc_data_init JEMALLOC_N(sc_data_init)
-#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size)
-#define sz_boot JEMALLOC_N(sz_boot)
-#define sz_index2size_tab JEMALLOC_N(sz_index2size_tab)
-#define sz_pind2sz_tab JEMALLOC_N(sz_pind2sz_tab)
-#define sz_size2index_tab JEMALLOC_N(sz_size2index_tab)
-#define nhbins JEMALLOC_N(nhbins)
-#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
-#define opt_tcache JEMALLOC_N(opt_tcache)
-#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
-#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
-#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
-#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
-#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
-#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
-#define tcache_boot JEMALLOC_N(tcache_boot)
-#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
-#define tcache_create_explicit JEMALLOC_N(tcache_create_explicit)
-#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
-#define tcache_flush JEMALLOC_N(tcache_flush)
-#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
-#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
-#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
-#define tcache_prefork JEMALLOC_N(tcache_prefork)
-#define tcache_salloc JEMALLOC_N(tcache_salloc)
-#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
-#define tcaches JEMALLOC_N(tcaches)
-#define tcaches_create JEMALLOC_N(tcaches_create)
-#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
-#define tcaches_flush JEMALLOC_N(tcaches_flush)
-#define tsd_tcache_data_init JEMALLOC_N(tsd_tcache_data_init)
-#define tsd_tcache_enabled_data_init JEMALLOC_N(tsd_tcache_enabled_data_init)
-#define test_hooks_arena_new_hook JEMALLOC_N(test_hooks_arena_new_hook)
-#define test_hooks_libc_hook JEMALLOC_N(test_hooks_libc_hook)
-#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
-#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
-#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
-#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
-#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
-#define tsd_booted JEMALLOC_N(tsd_booted)
-#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
-#define tsd_fetch_slow JEMALLOC_N(tsd_fetch_slow)
-#define tsd_global_slow JEMALLOC_N(tsd_global_slow)
-#define tsd_global_slow_dec JEMALLOC_N(tsd_global_slow_dec)
-#define tsd_global_slow_inc JEMALLOC_N(tsd_global_slow_inc)
-#define tsd_postfork_child JEMALLOC_N(tsd_postfork_child)
-#define tsd_postfork_parent JEMALLOC_N(tsd_postfork_parent)
-#define tsd_prefork JEMALLOC_N(tsd_prefork)
-#define tsd_slow_update JEMALLOC_N(tsd_slow_update)
-#define tsd_state_set JEMALLOC_N(tsd_state_set)
-#define tsd_tls JEMALLOC_N(tsd_tls)
-#define tsd_tsd JEMALLOC_N(tsd_tsd)
-#define witness_depth_error JEMALLOC_N(witness_depth_error)
-#define witness_init JEMALLOC_N(witness_init)
-#define witness_lock_error JEMALLOC_N(witness_lock_error)
-#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-#define witness_owner_error JEMALLOC_N(witness_owner_error)
-#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
-#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
-#define witness_prefork JEMALLOC_N(witness_prefork)
-#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
+#define a0dalloc JEMALLOC_N(a0dalloc)
+#define a0malloc JEMALLOC_N(a0malloc)
+#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
+#define arena_cleanup JEMALLOC_N(arena_cleanup)
+#define arena_init JEMALLOC_N(arena_init)
+#define arena_migrate JEMALLOC_N(arena_migrate)
+#define arena_set JEMALLOC_N(arena_set)
+#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
+#define arenas JEMALLOC_N(arenas)
+#define arenas_lock JEMALLOC_N(arenas_lock)
+#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
+#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
+#define bootstrap_free JEMALLOC_N(bootstrap_free)
+#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
+#define free_default JEMALLOC_N(free_default)
+#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
+#define je_sdallocx_noflags JEMALLOC_N(je_sdallocx_noflags)
+#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
+#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
+#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
+#define malloc_default JEMALLOC_N(malloc_default)
+#define malloc_initialized JEMALLOC_N(malloc_initialized)
+#define malloc_slow JEMALLOC_N(malloc_slow)
+#define manual_arena_base JEMALLOC_N(manual_arena_base)
+#define narenas_auto JEMALLOC_N(narenas_auto)
+#define narenas_total_get JEMALLOC_N(narenas_total_get)
+#define ncpus JEMALLOC_N(ncpus)
+#define opt_abort JEMALLOC_N(opt_abort)
+#define opt_abort_conf JEMALLOC_N(opt_abort_conf)
+#define opt_confirm_conf JEMALLOC_N(opt_confirm_conf)
+#define opt_junk JEMALLOC_N(opt_junk)
+#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
+#define opt_junk_free JEMALLOC_N(opt_junk_free)
+#define opt_narenas JEMALLOC_N(opt_narenas)
+#define opt_utrace JEMALLOC_N(opt_utrace)
+#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
+#define opt_zero JEMALLOC_N(opt_zero)
+#define sdallocx_default JEMALLOC_N(sdallocx_default)
+#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
+#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
+#define arena_bin_choose_lock JEMALLOC_N(arena_bin_choose_lock)
+#define arena_boot JEMALLOC_N(arena_boot)
+#define arena_choose_huge JEMALLOC_N(arena_choose_huge)
+#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+#define arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted)
+#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
+#define arena_decay JEMALLOC_N(arena_decay)
+#define arena_destroy JEMALLOC_N(arena_destroy)
+#define arena_dirty_decay_ms_default_get JEMALLOC_N(arena_dirty_decay_ms_default_get)
+#define arena_dirty_decay_ms_default_set JEMALLOC_N(arena_dirty_decay_ms_default_set)
+#define arena_dirty_decay_ms_get JEMALLOC_N(arena_dirty_decay_ms_get)
+#define arena_dirty_decay_ms_set JEMALLOC_N(arena_dirty_decay_ms_set)
+#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
+#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
+#define arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large)
+#define arena_extent_dalloc_large_prep JEMALLOC_N(arena_extent_dalloc_large_prep)
+#define arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand)
+#define arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink)
+#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
+#define arena_extents_dirty_dalloc JEMALLOC_N(arena_extents_dirty_dalloc)
+#define arena_init_huge JEMALLOC_N(arena_init_huge)
+#define arena_is_huge JEMALLOC_N(arena_is_huge)
+#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
+#define arena_muzzy_decay_ms_default_get JEMALLOC_N(arena_muzzy_decay_ms_default_get)
+#define arena_muzzy_decay_ms_default_set JEMALLOC_N(arena_muzzy_decay_ms_default_set)
+#define arena_muzzy_decay_ms_get JEMALLOC_N(arena_muzzy_decay_ms_get)
+#define arena_muzzy_decay_ms_set JEMALLOC_N(arena_muzzy_decay_ms_set)
+#define arena_new JEMALLOC_N(arena_new)
+#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
+#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
+#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
+#define arena_palloc JEMALLOC_N(arena_palloc)
+#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
+#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
+#define arena_prefork0 JEMALLOC_N(arena_prefork0)
+#define arena_prefork1 JEMALLOC_N(arena_prefork1)
+#define arena_prefork2 JEMALLOC_N(arena_prefork2)
+#define arena_prefork3 JEMALLOC_N(arena_prefork3)
+#define arena_prefork4 JEMALLOC_N(arena_prefork4)
+#define arena_prefork5 JEMALLOC_N(arena_prefork5)
+#define arena_prefork6 JEMALLOC_N(arena_prefork6)
+#define arena_prefork7 JEMALLOC_N(arena_prefork7)
+#define arena_prof_promote JEMALLOC_N(arena_prof_promote)
+#define arena_ralloc JEMALLOC_N(arena_ralloc)
+#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
+#define arena_reset JEMALLOC_N(arena_reset)
+#define arena_retain_grow_limit_get_set JEMALLOC_N(arena_retain_grow_limit_get_set)
+#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
+#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
+#define h_steps JEMALLOC_N(h_steps)
+#define opt_dirty_decay_ms JEMALLOC_N(opt_dirty_decay_ms)
+#define opt_muzzy_decay_ms JEMALLOC_N(opt_muzzy_decay_ms)
+#define opt_oversize_threshold JEMALLOC_N(opt_oversize_threshold)
+#define opt_percpu_arena JEMALLOC_N(opt_percpu_arena)
+#define oversize_threshold JEMALLOC_N(oversize_threshold)
+#define percpu_arena_mode_names JEMALLOC_N(percpu_arena_mode_names)
+#define background_thread_boot0 JEMALLOC_N(background_thread_boot0)
+#define background_thread_boot1 JEMALLOC_N(background_thread_boot1)
+#define background_thread_create JEMALLOC_N(background_thread_create)
+#define background_thread_ctl_init JEMALLOC_N(background_thread_ctl_init)
+#define background_thread_enabled_state JEMALLOC_N(background_thread_enabled_state)
+#define background_thread_info JEMALLOC_N(background_thread_info)
+#define background_thread_interval_check JEMALLOC_N(background_thread_interval_check)
+#define background_thread_lock JEMALLOC_N(background_thread_lock)
+#define background_thread_postfork_child JEMALLOC_N(background_thread_postfork_child)
+#define background_thread_postfork_parent JEMALLOC_N(background_thread_postfork_parent)
+#define background_thread_prefork0 JEMALLOC_N(background_thread_prefork0)
+#define background_thread_prefork1 JEMALLOC_N(background_thread_prefork1)
+#define background_thread_stats_read JEMALLOC_N(background_thread_stats_read)
+#define background_threads_disable JEMALLOC_N(background_threads_disable)
+#define background_threads_enable JEMALLOC_N(background_threads_enable)
+#define max_background_threads JEMALLOC_N(max_background_threads)
+#define n_background_threads JEMALLOC_N(n_background_threads)
+#define opt_background_thread JEMALLOC_N(opt_background_thread)
+#define opt_max_background_threads JEMALLOC_N(opt_max_background_threads)
+#define pthread_create_wrapper JEMALLOC_N(pthread_create_wrapper)
+#define b0get JEMALLOC_N(b0get)
+#define base_alloc JEMALLOC_N(base_alloc)
+#define base_alloc_extent JEMALLOC_N(base_alloc_extent)
+#define base_boot JEMALLOC_N(base_boot)
+#define base_delete JEMALLOC_N(base_delete)
+#define base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get)
+#define base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set)
+#define base_new JEMALLOC_N(base_new)
+#define base_postfork_child JEMALLOC_N(base_postfork_child)
+#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
+#define base_prefork JEMALLOC_N(base_prefork)
+#define base_stats_get JEMALLOC_N(base_stats_get)
+#define metadata_thp_mode_names JEMALLOC_N(metadata_thp_mode_names)
+#define opt_metadata_thp JEMALLOC_N(opt_metadata_thp)
+#define bin_boot JEMALLOC_N(bin_boot)
+#define bin_infos JEMALLOC_N(bin_infos)
+#define bin_init JEMALLOC_N(bin_init)
+#define bin_postfork_child JEMALLOC_N(bin_postfork_child)
+#define bin_postfork_parent JEMALLOC_N(bin_postfork_parent)
+#define bin_prefork JEMALLOC_N(bin_prefork)
+#define bin_shard_sizes_boot JEMALLOC_N(bin_shard_sizes_boot)
+#define bin_update_shard_size JEMALLOC_N(bin_update_shard_size)
+#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
+#define bitmap_init JEMALLOC_N(bitmap_init)
+#define bitmap_size JEMALLOC_N(bitmap_size)
+#define ckh_count JEMALLOC_N(ckh_count)
+#define ckh_delete JEMALLOC_N(ckh_delete)
+#define ckh_insert JEMALLOC_N(ckh_insert)
+#define ckh_iter JEMALLOC_N(ckh_iter)
+#define ckh_new JEMALLOC_N(ckh_new)
+#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
+#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
+#define ckh_remove JEMALLOC_N(ckh_remove)
+#define ckh_search JEMALLOC_N(ckh_search)
+#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
+#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
+#define ctl_boot JEMALLOC_N(ctl_boot)
+#define ctl_bymib JEMALLOC_N(ctl_bymib)
+#define ctl_byname JEMALLOC_N(ctl_byname)
+#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
+#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
+#define ctl_prefork JEMALLOC_N(ctl_prefork)
+#define div_init JEMALLOC_N(div_init)
+#define extent_alloc JEMALLOC_N(extent_alloc)
+#define extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper)
+#define extent_avail_any JEMALLOC_N(extent_avail_any)
+#define extent_avail_empty JEMALLOC_N(extent_avail_empty)
+#define extent_avail_first JEMALLOC_N(extent_avail_first)
+#define extent_avail_insert JEMALLOC_N(extent_avail_insert)
+#define extent_avail_new JEMALLOC_N(extent_avail_new)
+#define extent_avail_remove JEMALLOC_N(extent_avail_remove)
+#define extent_avail_remove_any JEMALLOC_N(extent_avail_remove_any)
+#define extent_avail_remove_first JEMALLOC_N(extent_avail_remove_first)
+#define extent_boot JEMALLOC_N(extent_boot)
+#define extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper)
+#define extent_dalloc JEMALLOC_N(extent_dalloc)
+#define extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap)
+#define extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper)
+#define extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper)
+#define extent_destroy_wrapper JEMALLOC_N(extent_destroy_wrapper)
+#define extent_heap_any JEMALLOC_N(extent_heap_any)
+#define extent_heap_empty JEMALLOC_N(extent_heap_empty)
+#define extent_heap_first JEMALLOC_N(extent_heap_first)
+#define extent_heap_insert JEMALLOC_N(extent_heap_insert)
+#define extent_heap_new JEMALLOC_N(extent_heap_new)
+#define extent_heap_remove JEMALLOC_N(extent_heap_remove)
+#define extent_heap_remove_any JEMALLOC_N(extent_heap_remove_any)
+#define extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first)
+#define extent_hooks_default JEMALLOC_N(extent_hooks_default)
+#define extent_hooks_get JEMALLOC_N(extent_hooks_get)
+#define extent_hooks_set JEMALLOC_N(extent_hooks_set)
+#define extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper)
+#define extent_mutex_pool JEMALLOC_N(extent_mutex_pool)
+#define extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper)
+#define extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper)
+#define extent_split_wrapper JEMALLOC_N(extent_split_wrapper)
+#define extent_util_stats_get JEMALLOC_N(extent_util_stats_get)
+#define extent_util_stats_verbose_get JEMALLOC_N(extent_util_stats_verbose_get)
+#define extents_alloc JEMALLOC_N(extents_alloc)
+#define extents_dalloc JEMALLOC_N(extents_dalloc)
+#define extents_evict JEMALLOC_N(extents_evict)
+#define extents_init JEMALLOC_N(extents_init)
+#define extents_nbytes_get JEMALLOC_N(extents_nbytes_get)
+#define extents_nextents_get JEMALLOC_N(extents_nextents_get)
+#define extents_npages_get JEMALLOC_N(extents_npages_get)
+#define extents_postfork_child JEMALLOC_N(extents_postfork_child)
+#define extents_postfork_parent JEMALLOC_N(extents_postfork_parent)
+#define extents_prefork JEMALLOC_N(extents_prefork)
+#define extents_rtree JEMALLOC_N(extents_rtree)
+#define extents_state_get JEMALLOC_N(extents_state_get)
+#define opt_lg_extent_max_active_fit JEMALLOC_N(opt_lg_extent_max_active_fit)
+#define dss_prec_names JEMALLOC_N(dss_prec_names)
+#define extent_alloc_dss JEMALLOC_N(extent_alloc_dss)
+#define extent_dss_boot JEMALLOC_N(extent_dss_boot)
+#define extent_dss_mergeable JEMALLOC_N(extent_dss_mergeable)
+#define extent_dss_prec_get JEMALLOC_N(extent_dss_prec_get)
+#define extent_dss_prec_set JEMALLOC_N(extent_dss_prec_set)
+#define extent_in_dss JEMALLOC_N(extent_in_dss)
+#define opt_dss JEMALLOC_N(opt_dss)
+#define extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap)
+#define extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap)
+#define opt_retain JEMALLOC_N(opt_retain)
+#define hook_boot JEMALLOC_N(hook_boot)
+#define hook_install JEMALLOC_N(hook_install)
+#define hook_invoke_alloc JEMALLOC_N(hook_invoke_alloc)
+#define hook_invoke_dalloc JEMALLOC_N(hook_invoke_dalloc)
+#define hook_invoke_expand JEMALLOC_N(hook_invoke_expand)
+#define hook_remove JEMALLOC_N(hook_remove)
+#define large_dalloc JEMALLOC_N(large_dalloc)
+#define large_dalloc_finish JEMALLOC_N(large_dalloc_finish)
+#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
+#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
+#define large_dalloc_prep_junked_locked JEMALLOC_N(large_dalloc_prep_junked_locked)
+#define large_malloc JEMALLOC_N(large_malloc)
+#define large_palloc JEMALLOC_N(large_palloc)
+#define large_prof_alloc_time_get JEMALLOC_N(large_prof_alloc_time_get)
+#define large_prof_alloc_time_set JEMALLOC_N(large_prof_alloc_time_set)
+#define large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get)
+#define large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset)
+#define large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set)
+#define large_ralloc JEMALLOC_N(large_ralloc)
+#define large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move)
+#define large_salloc JEMALLOC_N(large_salloc)
+#define log_init_done JEMALLOC_N(log_init_done)
+#define log_var_names JEMALLOC_N(log_var_names)
+#define log_var_update_state JEMALLOC_N(log_var_update_state)
+#define buferror JEMALLOC_N(buferror)
+#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
+#define malloc_printf JEMALLOC_N(malloc_printf)
+#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
+#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
+#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
+#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
+#define malloc_write JEMALLOC_N(malloc_write)
+#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
+#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
+#define malloc_mutex_lock_slow JEMALLOC_N(malloc_mutex_lock_slow)
+#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
+#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
+#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
+#define malloc_mutex_prof_data_reset JEMALLOC_N(malloc_mutex_prof_data_reset)
+#define mutex_pool_init JEMALLOC_N(mutex_pool_init)
+#define nstime_add JEMALLOC_N(nstime_add)
+#define nstime_compare JEMALLOC_N(nstime_compare)
+#define nstime_copy JEMALLOC_N(nstime_copy)
+#define nstime_divide JEMALLOC_N(nstime_divide)
+#define nstime_iadd JEMALLOC_N(nstime_iadd)
+#define nstime_idivide JEMALLOC_N(nstime_idivide)
+#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
+#define nstime_init JEMALLOC_N(nstime_init)
+#define nstime_init2 JEMALLOC_N(nstime_init2)
+#define nstime_isubtract JEMALLOC_N(nstime_isubtract)
+#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
+#define nstime_msec JEMALLOC_N(nstime_msec)
+#define nstime_ns JEMALLOC_N(nstime_ns)
+#define nstime_nsec JEMALLOC_N(nstime_nsec)
+#define nstime_sec JEMALLOC_N(nstime_sec)
+#define nstime_subtract JEMALLOC_N(nstime_subtract)
+#define nstime_update JEMALLOC_N(nstime_update)
+#define init_system_thp_mode JEMALLOC_N(init_system_thp_mode)
+#define opt_thp JEMALLOC_N(opt_thp)
+#define pages_boot JEMALLOC_N(pages_boot)
+#define pages_commit JEMALLOC_N(pages_commit)
+#define pages_decommit JEMALLOC_N(pages_decommit)
+#define pages_dodump JEMALLOC_N(pages_dodump)
+#define pages_dontdump JEMALLOC_N(pages_dontdump)
+#define pages_huge JEMALLOC_N(pages_huge)
+#define pages_map JEMALLOC_N(pages_map)
+#define pages_nohuge JEMALLOC_N(pages_nohuge)
+#define pages_purge_forced JEMALLOC_N(pages_purge_forced)
+#define pages_purge_lazy JEMALLOC_N(pages_purge_lazy)
+#define pages_set_thp_state JEMALLOC_N(pages_set_thp_state)
+#define pages_unmap JEMALLOC_N(pages_unmap)
+#define thp_mode_names JEMALLOC_N(thp_mode_names)
+#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx)
+#define bt_init JEMALLOC_N(bt_init)
+#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
+#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
+#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
+#define opt_prof JEMALLOC_N(opt_prof)
+#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
+#define opt_prof_active JEMALLOC_N(opt_prof_active)
+#define opt_prof_final JEMALLOC_N(opt_prof_final)
+#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
+#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
+#define opt_prof_log JEMALLOC_N(opt_prof_log)
+#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
+#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
+#define prof_accum_init JEMALLOC_N(prof_accum_init)
+#define prof_active JEMALLOC_N(prof_active)
+#define prof_active_get JEMALLOC_N(prof_active_get)
+#define prof_active_set JEMALLOC_N(prof_active_set)
+#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
+#define prof_backtrace JEMALLOC_N(prof_backtrace)
+#define prof_boot0 JEMALLOC_N(prof_boot0)
+#define prof_boot1 JEMALLOC_N(prof_boot1)
+#define prof_boot2 JEMALLOC_N(prof_boot2)
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
+#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
+#define prof_gdump JEMALLOC_N(prof_gdump)
+#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
+#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
+#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
+#define prof_idump JEMALLOC_N(prof_idump)
+#define prof_interval JEMALLOC_N(prof_interval)
+#define prof_log_start JEMALLOC_N(prof_log_start)
+#define prof_log_stop JEMALLOC_N(prof_log_stop)
+#define prof_logging_state JEMALLOC_N(prof_logging_state)
+#define prof_lookup JEMALLOC_N(prof_lookup)
+#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
+#define prof_mdump JEMALLOC_N(prof_mdump)
+#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
+#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
+#define prof_prefork0 JEMALLOC_N(prof_prefork0)
+#define prof_prefork1 JEMALLOC_N(prof_prefork1)
+#define prof_reset JEMALLOC_N(prof_reset)
+#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
+#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
+#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
+#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
+#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
+#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
+#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
+#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
+#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
+#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
+#define rtree_ctx_data_init JEMALLOC_N(rtree_ctx_data_init)
+#define rtree_leaf_alloc JEMALLOC_N(rtree_leaf_alloc)
+#define rtree_leaf_dalloc JEMALLOC_N(rtree_leaf_dalloc)
+#define rtree_leaf_elm_lookup_hard JEMALLOC_N(rtree_leaf_elm_lookup_hard)
+#define rtree_new JEMALLOC_N(rtree_new)
+#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
+#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
+#define safety_check_fail JEMALLOC_N(safety_check_fail)
+#define safety_check_set_abort JEMALLOC_N(safety_check_set_abort)
+#define arena_mutex_names JEMALLOC_N(arena_mutex_names)
+#define global_mutex_names JEMALLOC_N(global_mutex_names)
+#define opt_stats_print JEMALLOC_N(opt_stats_print)
+#define opt_stats_print_opts JEMALLOC_N(opt_stats_print_opts)
+#define stats_print JEMALLOC_N(stats_print)
+#define sc_boot JEMALLOC_N(sc_boot)
+#define sc_data_global JEMALLOC_N(sc_data_global)
+#define sc_data_init JEMALLOC_N(sc_data_init)
+#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size)
+#define sz_boot JEMALLOC_N(sz_boot)
+#define sz_index2size_tab JEMALLOC_N(sz_index2size_tab)
+#define sz_pind2sz_tab JEMALLOC_N(sz_pind2sz_tab)
+#define sz_size2index_tab JEMALLOC_N(sz_size2index_tab)
+#define nhbins JEMALLOC_N(nhbins)
+#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
+#define opt_tcache JEMALLOC_N(opt_tcache)
+#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
+#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
+#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
+#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
+#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
+#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
+#define tcache_boot JEMALLOC_N(tcache_boot)
+#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
+#define tcache_create_explicit JEMALLOC_N(tcache_create_explicit)
+#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
+#define tcache_flush JEMALLOC_N(tcache_flush)
+#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
+#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
+#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
+#define tcache_prefork JEMALLOC_N(tcache_prefork)
+#define tcache_salloc JEMALLOC_N(tcache_salloc)
+#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
+#define tcaches JEMALLOC_N(tcaches)
+#define tcaches_create JEMALLOC_N(tcaches_create)
+#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
+#define tcaches_flush JEMALLOC_N(tcaches_flush)
+#define tsd_tcache_data_init JEMALLOC_N(tsd_tcache_data_init)
+#define tsd_tcache_enabled_data_init JEMALLOC_N(tsd_tcache_enabled_data_init)
+#define test_hooks_arena_new_hook JEMALLOC_N(test_hooks_arena_new_hook)
+#define test_hooks_libc_hook JEMALLOC_N(test_hooks_libc_hook)
+#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
+#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
+#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
+#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
+#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
+#define tsd_booted JEMALLOC_N(tsd_booted)
+#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
+#define tsd_fetch_slow JEMALLOC_N(tsd_fetch_slow)
+#define tsd_global_slow JEMALLOC_N(tsd_global_slow)
+#define tsd_global_slow_dec JEMALLOC_N(tsd_global_slow_dec)
+#define tsd_global_slow_inc JEMALLOC_N(tsd_global_slow_inc)
+#define tsd_postfork_child JEMALLOC_N(tsd_postfork_child)
+#define tsd_postfork_parent JEMALLOC_N(tsd_postfork_parent)
+#define tsd_prefork JEMALLOC_N(tsd_prefork)
+#define tsd_slow_update JEMALLOC_N(tsd_slow_update)
+#define tsd_state_set JEMALLOC_N(tsd_state_set)
+#define tsd_tls JEMALLOC_N(tsd_tls)
+#define tsd_tsd JEMALLOC_N(tsd_tsd)
+#define witness_depth_error JEMALLOC_N(witness_depth_error)
+#define witness_init JEMALLOC_N(witness_init)
+#define witness_lock_error JEMALLOC_N(witness_lock_error)
+#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
+#define witness_owner_error JEMALLOC_N(witness_owner_error)
+#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
+#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
+#define witness_prefork JEMALLOC_N(witness_prefork)
+#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-osx.h b/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-osx.h
index 29284a67c7..0b97ec66d5 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-osx.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace-osx.h
@@ -1,11 +1,11 @@
-#pragma once
-
-#include "private_namespace-linux.h"
-
-#undef pthread_create_wrapper
-#define tsd_boot_wrapper JEMALLOC_N(tsd_boot_wrapper)
-#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
-#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
-#define tsd_init_head JEMALLOC_N(tsd_init_head)
-#undef tsd_tls
-#define zone_register JEMALLOC_N(zone_register)
+#pragma once
+
+#include "private_namespace-linux.h"
+
+#undef pthread_create_wrapper
+#define tsd_boot_wrapper JEMALLOC_N(tsd_boot_wrapper)
+#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
+#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
+#define tsd_init_head JEMALLOC_N(tsd_init_head)
+#undef tsd_tls
+#define zone_register JEMALLOC_N(zone_register)
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace.h
index e6a31e4266..dc56ec2587 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -1,7 +1,7 @@
-#pragma once
-
-#if defined(__APPLE__)
-# include "private_namespace-osx.h"
-#else
-# include "private_namespace-linux.h"
-#endif
+#pragma once
+
+#if defined(__APPLE__)
+# include "private_namespace-osx.h"
+#else
+# include "private_namespace-linux.h"
+#endif
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prng.h b/contrib/libs/jemalloc/include/jemalloc/internal/prng.h
index 15cc2d18fa..b49f7d62ce 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/prng.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/prng.h
@@ -1,9 +1,9 @@
-#ifndef JEMALLOC_INTERNAL_PRNG_H
-#define JEMALLOC_INTERNAL_PRNG_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bit_util.h"
+#ifndef JEMALLOC_INTERNAL_PRNG_H
+#define JEMALLOC_INTERNAL_PRNG_H
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bit_util.h"
+
/*
* Simple linear congruential pseudo-random number generator:
*
@@ -18,168 +18,168 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
- * proportional to bit position. For example, the lowest bit has a cycle of 2,
+ * proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*/
/******************************************************************************/
-/* INTERNAL DEFINITIONS -- IGNORE */
+/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
-#define PRNG_A_32 UINT32_C(1103515241)
-#define PRNG_C_32 UINT32_C(12347)
-
-#define PRNG_A_64 UINT64_C(6364136223846793005)
-#define PRNG_C_64 UINT64_C(1442695040888963407)
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state) {
- return (state * PRNG_A_32) + PRNG_C_32;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state) {
- return (state * PRNG_A_64) + PRNG_C_64;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state) {
-#if LG_SIZEOF_PTR == 2
- return (state * PRNG_A_32) + PRNG_C_32;
-#elif LG_SIZEOF_PTR == 3
- return (state * PRNG_A_64) + PRNG_C_64;
-#else
-#error Unsupported pointer size
-#endif
-}
-
+#define PRNG_A_32 UINT32_C(1103515241)
+#define PRNG_C_32 UINT32_C(12347)
+
+#define PRNG_A_64 UINT64_C(6364136223846793005)
+#define PRNG_C_64 UINT64_C(1442695040888963407)
+
+JEMALLOC_ALWAYS_INLINE uint32_t
+prng_state_next_u32(uint32_t state) {
+ return (state * PRNG_A_32) + PRNG_C_32;
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_state_next_u64(uint64_t state) {
+ return (state * PRNG_A_64) + PRNG_C_64;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prng_state_next_zu(size_t state) {
+#if LG_SIZEOF_PTR == 2
+ return (state * PRNG_A_32) + PRNG_C_32;
+#elif LG_SIZEOF_PTR == 3
+ return (state * PRNG_A_64) + PRNG_C_64;
+#else
+#error Unsupported pointer size
+#endif
+}
+
/******************************************************************************/
-/* BEGIN PUBLIC API */
-/******************************************************************************/
-
-/*
- * The prng_lg_range functions give a uniform int in the half-open range [0,
- * 2**lg_range). If atomic is true, they do so safely from multiple threads.
- * Multithreaded 64-bit prngs aren't supported.
- */
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
- uint32_t ret, state0, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= 32);
-
- state0 = atomic_load_u32(state, ATOMIC_RELAXED);
-
- if (atomic) {
- do {
- state1 = prng_state_next_u32(state0);
- } while (!atomic_compare_exchange_weak_u32(state, &state0,
- state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
- } else {
- state1 = prng_state_next_u32(state0);
- atomic_store_u32(state, state1, ATOMIC_RELAXED);
- }
- ret = state1 >> (32 - lg_range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
- uint64_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= 64);
-
- state1 = prng_state_next_u64(*state);
- *state = state1;
- ret = state1 >> (64 - lg_range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
- size_t ret, state0, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
-
- state0 = atomic_load_zu(state, ATOMIC_RELAXED);
-
- if (atomic) {
- do {
- state1 = prng_state_next_zu(state0);
- } while (atomic_compare_exchange_weak_zu(state, &state0,
- state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
- } else {
- state1 = prng_state_next_zu(state0);
- atomic_store_zu(state, state1, ATOMIC_RELAXED);
- }
- ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
-
- return ret;
-}
-
-/*
- * The prng_range functions behave like the prng_lg_range, but return a result
- * in [0, range) instead of [0, 2**lg_range).
- */
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
- uint32_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_u32(state, lg_range, atomic);
- } while (ret >= range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range) {
- uint64_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_u64(state, lg_range);
- } while (ret >= range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
- size_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_zu(state, lg_range, atomic);
- } while (ret >= range);
-
- return ret;
-}
-
-#endif /* JEMALLOC_INTERNAL_PRNG_H */
+/* BEGIN PUBLIC API */
+/******************************************************************************/
+
+/*
+ * The prng_lg_range functions give a uniform int in the half-open range [0,
+ * 2**lg_range). If atomic is true, they do so safely from multiple threads.
+ * Multithreaded 64-bit prngs aren't supported.
+ */
+
+JEMALLOC_ALWAYS_INLINE uint32_t
+prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
+ uint32_t ret, state0, state1;
+
+ assert(lg_range > 0);
+ assert(lg_range <= 32);
+
+ state0 = atomic_load_u32(state, ATOMIC_RELAXED);
+
+ if (atomic) {
+ do {
+ state1 = prng_state_next_u32(state0);
+ } while (!atomic_compare_exchange_weak_u32(state, &state0,
+ state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
+ } else {
+ state1 = prng_state_next_u32(state0);
+ atomic_store_u32(state, state1, ATOMIC_RELAXED);
+ }
+ ret = state1 >> (32 - lg_range);
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
+ uint64_t ret, state1;
+
+ assert(lg_range > 0);
+ assert(lg_range <= 64);
+
+ state1 = prng_state_next_u64(*state);
+ *state = state1;
+ ret = state1 >> (64 - lg_range);
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
+ size_t ret, state0, state1;
+
+ assert(lg_range > 0);
+ assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
+
+ state0 = atomic_load_zu(state, ATOMIC_RELAXED);
+
+ if (atomic) {
+ do {
+ state1 = prng_state_next_zu(state0);
+ } while (atomic_compare_exchange_weak_zu(state, &state0,
+ state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
+ } else {
+ state1 = prng_state_next_zu(state0);
+ atomic_store_zu(state, state1, ATOMIC_RELAXED);
+ }
+ ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
+
+ return ret;
+}
+
+/*
+ * The prng_range functions behave like the prng_lg_range, but return a result
+ * in [0, range) instead of [0, 2**lg_range).
+ */
+
+JEMALLOC_ALWAYS_INLINE uint32_t
+prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
+ uint32_t ret;
+ unsigned lg_range;
+
+ assert(range > 1);
+
+ /* Compute the ceiling of lg(range). */
+ lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
+
+ /* Generate a result in [0..range) via repeated trial. */
+ do {
+ ret = prng_lg_range_u32(state, lg_range, atomic);
+ } while (ret >= range);
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_range_u64(uint64_t *state, uint64_t range) {
+ uint64_t ret;
+ unsigned lg_range;
+
+ assert(range > 1);
+
+ /* Compute the ceiling of lg(range). */
+ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+
+ /* Generate a result in [0..range) via repeated trial. */
+ do {
+ ret = prng_lg_range_u64(state, lg_range);
+ } while (ret >= range);
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
+ size_t ret;
+ unsigned lg_range;
+
+ assert(range > 1);
+
+ /* Compute the ceiling of lg(range). */
+ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+
+ /* Generate a result in [0..range) via repeated trial. */
+ do {
+ ret = prng_lg_range_zu(state, lg_range, atomic);
+ } while (ret >= range);
+
+ return ret;
+}
+
+#endif /* JEMALLOC_INTERNAL_PRNG_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prof_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/prof_externs.h
index 094f3e170a..2acb2f946a 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/prof_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/prof_externs.h
@@ -1,105 +1,105 @@
-#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
-#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
-
-#include "jemalloc/internal/mutex.h"
-
-extern malloc_mutex_t bt2gctx_mtx;
-
-extern bool opt_prof;
-extern bool opt_prof_active;
-extern bool opt_prof_thread_active_init;
-extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
-extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
-extern bool opt_prof_gdump; /* High-water memory dumping. */
-extern bool opt_prof_final; /* Final profile dumping. */
-extern bool opt_prof_leak; /* Dump leak summary at exit. */
-extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern bool opt_prof_log; /* Turn logging on at boot. */
-extern char opt_prof_prefix[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
-
-/* Accessed via prof_active_[gs]et{_unlocked,}(). */
-extern bool prof_active;
-
-/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
-extern bool prof_gdump_val;
-
-/*
- * Profile dump interval, measured in bytes allocated. Each arena triggers a
- * profile dump when it reaches this threshold. The effect is that the
- * interval between profile dumps averages prof_interval, though the actual
- * interval between dumps will tend to be sporadic, and the interval will be a
- * maximum of approximately (prof_interval * narenas).
- */
-extern uint64_t prof_interval;
-
-/*
- * Initialized as opt_lg_prof_sample, and potentially modified during profiling
- * resets.
- */
-extern size_t lg_prof_sample;
-
-void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void bt_init(prof_bt_t *bt, void **vec);
-void prof_backtrace(prof_bt_t *bt);
-prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
-#ifdef JEMALLOC_JET
-size_t prof_tdata_count(void);
-size_t prof_bt_count(void);
-#endif
-typedef int (prof_dump_open_t)(bool, const char *);
-extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
-
-typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
-extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
-#ifdef JEMALLOC_JET
-void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
- uint64_t *accumbytes);
-#endif
-bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
-void prof_idump(tsdn_t *tsdn);
-bool prof_mdump(tsd_t *tsd, const char *filename);
-void prof_gdump(tsdn_t *tsdn);
-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
-prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-void prof_reset(tsd_t *tsd, size_t lg_sample);
-void prof_tdata_cleanup(tsd_t *tsd);
-bool prof_active_get(tsdn_t *tsdn);
-bool prof_active_set(tsdn_t *tsdn, bool active);
-const char *prof_thread_name_get(tsd_t *tsd);
-int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(tsd_t *tsd);
-bool prof_thread_active_set(tsd_t *tsd, bool active);
-bool prof_thread_active_init_get(tsdn_t *tsdn);
-bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
-bool prof_gdump_get(tsdn_t *tsdn);
-bool prof_gdump_set(tsdn_t *tsdn, bool active);
-void prof_boot0(void);
-void prof_boot1(void);
-bool prof_boot2(tsd_t *tsd);
-void prof_prefork0(tsdn_t *tsdn);
-void prof_prefork1(tsdn_t *tsdn);
-void prof_postfork_parent(tsdn_t *tsdn);
-void prof_postfork_child(tsdn_t *tsdn);
-void prof_sample_threshold_update(prof_tdata_t *tdata);
-
-bool prof_log_start(tsdn_t *tsdn, const char *filename);
-bool prof_log_stop(tsdn_t *tsdn);
-#ifdef JEMALLOC_JET
-size_t prof_log_bt_count(void);
-size_t prof_log_alloc_count(void);
-size_t prof_log_thr_count(void);
-bool prof_log_is_logging(void);
-bool prof_log_rep_check(void);
-void prof_log_dummy_set(bool new_value);
-#endif
-
-#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
+#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
+
+#include "jemalloc/internal/mutex.h"
+
+extern malloc_mutex_t bt2gctx_mtx;
+
+extern bool opt_prof;
+extern bool opt_prof_active;
+extern bool opt_prof_thread_active_init;
+extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
+extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
+extern bool opt_prof_gdump; /* High-water memory dumping. */
+extern bool opt_prof_final; /* Final profile dumping. */
+extern bool opt_prof_leak; /* Dump leak summary at exit. */
+extern bool opt_prof_accum; /* Report cumulative bytes. */
+extern bool opt_prof_log; /* Turn logging on at boot. */
+extern char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Accessed via prof_active_[gs]et{_unlocked,}(). */
+extern bool prof_active;
+
+/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
+extern bool prof_gdump_val;
+
+/*
+ * Profile dump interval, measured in bytes allocated. Each arena triggers a
+ * profile dump when it reaches this threshold. The effect is that the
+ * interval between profile dumps averages prof_interval, though the actual
+ * interval between dumps will tend to be sporadic, and the interval will be a
+ * maximum of approximately (prof_interval * narenas).
+ */
+extern uint64_t prof_interval;
+
+/*
+ * Initialized as opt_lg_prof_sample, and potentially modified during profiling
+ * resets.
+ */
+extern size_t lg_prof_sample;
+
+void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
+void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void bt_init(prof_bt_t *bt, void **vec);
+void prof_backtrace(prof_bt_t *bt);
+prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
+#ifdef JEMALLOC_JET
+size_t prof_tdata_count(void);
+size_t prof_bt_count(void);
+#endif
+typedef int (prof_dump_open_t)(bool, const char *);
+extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
+
+typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
+extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
+#ifdef JEMALLOC_JET
+void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
+ uint64_t *accumbytes);
+#endif
+bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
+void prof_idump(tsdn_t *tsdn);
+bool prof_mdump(tsd_t *tsd, const char *filename);
+void prof_gdump(tsdn_t *tsdn);
+prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
+void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_tdata_cleanup(tsd_t *tsd);
+bool prof_active_get(tsdn_t *tsdn);
+bool prof_active_set(tsdn_t *tsdn, bool active);
+const char *prof_thread_name_get(tsd_t *tsd);
+int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
+bool prof_thread_active_get(tsd_t *tsd);
+bool prof_thread_active_set(tsd_t *tsd, bool active);
+bool prof_thread_active_init_get(tsdn_t *tsdn);
+bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
+bool prof_gdump_get(tsdn_t *tsdn);
+bool prof_gdump_set(tsdn_t *tsdn, bool active);
+void prof_boot0(void);
+void prof_boot1(void);
+bool prof_boot2(tsd_t *tsd);
+void prof_prefork0(tsdn_t *tsdn);
+void prof_prefork1(tsdn_t *tsdn);
+void prof_postfork_parent(tsdn_t *tsdn);
+void prof_postfork_child(tsdn_t *tsdn);
+void prof_sample_threshold_update(prof_tdata_t *tdata);
+
+bool prof_log_start(tsdn_t *tsdn, const char *filename);
+bool prof_log_stop(tsdn_t *tsdn);
+#ifdef JEMALLOC_JET
+size_t prof_log_bt_count(void);
+size_t prof_log_alloc_count(void);
+size_t prof_log_thr_count(void);
+bool prof_log_is_logging(void);
+bool prof_log_rep_check(void);
+void prof_log_dummy_set(bool new_value);
+#endif
+
+#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_a.h
index 471d9853cf..3948ad4ba9 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_a.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_a.h
@@ -1,85 +1,85 @@
-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
-#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
-
-#include "jemalloc/internal/mutex.h"
-
-static inline bool
-prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
- uint64_t accumbytes) {
- cassert(config_prof);
-
- bool overflow;
- uint64_t a0, a1;
-
- /*
- * If the application allocates fast enough (and/or if idump is slow
- * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
- * idump trigger coalescing. This is an intentional mechanism that
- * avoids rate-limiting allocation.
- */
-#ifdef JEMALLOC_ATOMIC_U64
- a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
- do {
- a1 = a0 + accumbytes;
- assert(a1 >= a0);
- overflow = (a1 >= prof_interval);
- if (overflow) {
- a1 %= prof_interval;
- }
- } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
- a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
-#else
- malloc_mutex_lock(tsdn, &prof_accum->mtx);
- a0 = prof_accum->accumbytes;
- a1 = a0 + accumbytes;
- overflow = (a1 >= prof_interval);
- if (overflow) {
- a1 %= prof_interval;
- }
- prof_accum->accumbytes = a1;
- malloc_mutex_unlock(tsdn, &prof_accum->mtx);
-#endif
- return overflow;
-}
-
-static inline void
-prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
- size_t usize) {
- cassert(config_prof);
-
- /*
- * Cancel out as much of the excessive prof_accumbytes increase as
- * possible without underflowing. Interval-triggered dumps occur
- * slightly more often than intended as a result of incomplete
- * canceling.
- */
- uint64_t a0, a1;
-#ifdef JEMALLOC_ATOMIC_U64
- a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
- do {
- a1 = (a0 >= SC_LARGE_MINCLASS - usize)
- ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
- } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
- a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
-#else
- malloc_mutex_lock(tsdn, &prof_accum->mtx);
- a0 = prof_accum->accumbytes;
- a1 = (a0 >= SC_LARGE_MINCLASS - usize)
- ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
- prof_accum->accumbytes = a1;
- malloc_mutex_unlock(tsdn, &prof_accum->mtx);
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_active_get_unlocked(void) {
- /*
- * Even if opt_prof is true, sampling can be temporarily disabled by
- * setting prof_active to false. No locking is used when reading
- * prof_active in the fast path, so there are no guarantees regarding
- * how long it will take for all threads to notice state changes.
- */
- return prof_active;
-}
-
-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
+#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
+#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
+
+#include "jemalloc/internal/mutex.h"
+
+static inline bool
+prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
+ uint64_t accumbytes) {
+ cassert(config_prof);
+
+ bool overflow;
+ uint64_t a0, a1;
+
+ /*
+ * If the application allocates fast enough (and/or if idump is slow
+ * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
+ * idump trigger coalescing. This is an intentional mechanism that
+ * avoids rate-limiting allocation.
+ */
+#ifdef JEMALLOC_ATOMIC_U64
+ a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
+ do {
+ a1 = a0 + accumbytes;
+ assert(a1 >= a0);
+ overflow = (a1 >= prof_interval);
+ if (overflow) {
+ a1 %= prof_interval;
+ }
+ } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
+ a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
+#else
+ malloc_mutex_lock(tsdn, &prof_accum->mtx);
+ a0 = prof_accum->accumbytes;
+ a1 = a0 + accumbytes;
+ overflow = (a1 >= prof_interval);
+ if (overflow) {
+ a1 %= prof_interval;
+ }
+ prof_accum->accumbytes = a1;
+ malloc_mutex_unlock(tsdn, &prof_accum->mtx);
+#endif
+ return overflow;
+}
+
+static inline void
+prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
+ size_t usize) {
+ cassert(config_prof);
+
+ /*
+ * Cancel out as much of the excessive prof_accumbytes increase as
+ * possible without underflowing. Interval-triggered dumps occur
+ * slightly more often than intended as a result of incomplete
+ * canceling.
+ */
+ uint64_t a0, a1;
+#ifdef JEMALLOC_ATOMIC_U64
+ a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
+ do {
+ a1 = (a0 >= SC_LARGE_MINCLASS - usize)
+ ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
+ } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
+ a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
+#else
+ malloc_mutex_lock(tsdn, &prof_accum->mtx);
+ a0 = prof_accum->accumbytes;
+ a1 = (a0 >= SC_LARGE_MINCLASS - usize)
+ ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
+ prof_accum->accumbytes = a1;
+ malloc_mutex_unlock(tsdn, &prof_accum->mtx);
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_active_get_unlocked(void) {
+ /*
+ * Even if opt_prof is true, sampling can be temporarily disabled by
+ * setting prof_active to false. No locking is used when reading
+ * prof_active in the fast path, so there are no guarantees regarding
+ * how long it will take for all threads to notice state changes.
+ */
+ return prof_active;
+}
+
+#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_b.h
index 8ba8a1e1ff..5a4818e96e 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_b.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/prof_inlines_b.h
@@ -1,250 +1,250 @@
-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
-#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
-
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/sz.h"
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_gdump_get_unlocked(void) {
- /*
- * No locking is used when reading prof_gdump_val in the fast path, so
- * there are no guarantees regarding how long it will take for all
- * threads to notice state changes.
- */
- return prof_gdump_val;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tdata_t *
-prof_tdata_get(tsd_t *tsd, bool create) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- tdata = tsd_prof_tdata_get(tsd);
- if (create) {
- if (unlikely(tdata == NULL)) {
- if (tsd_nominal(tsd)) {
- tdata = prof_tdata_init(tsd);
- tsd_prof_tdata_set(tsd, tdata);
- }
- } else if (unlikely(tdata->expired)) {
- tdata = prof_tdata_reinit(tsd, tdata);
- tsd_prof_tdata_set(tsd, tdata);
- }
- assert(tdata == NULL || tdata->attached);
- }
-
- return tdata;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_reset(tsdn, ptr, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE nstime_t
-prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
- nstime_t t) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
- ssize_t check = update ? 0 : usize;
-
- int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
- if (update) {
- bytes_until_sample -= usize;
- if (tsd_nominal(tsd)) {
- tsd_bytes_until_sample_set(tsd, bytes_until_sample);
- }
- }
- if (likely(bytes_until_sample >= check)) {
- return true;
- }
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
- prof_tdata_t **tdata_out) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- /* Fastpath: no need to load tdata */
- if (likely(prof_sample_check(tsd, usize, update))) {
- return true;
- }
-
- bool booted = tsd_prof_tdata_get(tsd);
- tdata = prof_tdata_get(tsd, true);
- if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
- tdata = NULL;
- }
-
- if (tdata_out != NULL) {
- *tdata_out = tdata;
- }
-
- if (unlikely(tdata == NULL)) {
- return true;
- }
-
- /*
- * If this was the first creation of tdata, then
- * prof_tdata_get() reset bytes_until_sample, so decrement and
- * check it again
- */
- if (!booted && prof_sample_check(tsd, usize, update)) {
- return true;
- }
-
- if (tsd_reentrancy_level_get(tsd) > 0) {
- return true;
- }
- /* Compute new sample threshold. */
- if (update) {
- prof_sample_threshold_update(tdata);
- }
- return !tdata->active;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
- prof_tctx_t *ret;
- prof_tdata_t *tdata;
- prof_bt_t bt;
-
- assert(usize == sz_s2u(usize));
-
- if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
- &tdata))) {
- ret = (prof_tctx_t *)(uintptr_t)1U;
- } else {
- bt_init(&bt, tdata->vec);
- prof_backtrace(&bt);
- ret = prof_lookup(tsd, &bt);
- }
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
- prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(usize == isalloc(tsdn, ptr));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
- prof_malloc_sample_object(tsdn, ptr, usize, tctx);
- } else {
- prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
- (prof_tctx_t *)(uintptr_t)1U);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
- bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
- prof_tctx_t *old_tctx) {
- bool sampled, old_sampled, moved;
-
- cassert(config_prof);
- assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
-
- if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
- if (prof_sample_accum_update(tsd, usize, true, NULL)) {
- /*
- * Don't sample. The usize passed to prof_alloc_prep()
- * was larger than what actually got allocated, so a
- * backtrace was captured for this allocation, even
- * though its actual usize was insufficient to cross the
- * sample threshold.
- */
- prof_alloc_rollback(tsd, tctx, true);
- tctx = (prof_tctx_t *)(uintptr_t)1U;
- }
- }
-
- sampled = ((uintptr_t)tctx > (uintptr_t)1U);
- old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
- moved = (ptr != old_ptr);
-
- if (unlikely(sampled)) {
- prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
- } else if (moved) {
- prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
- (prof_tctx_t *)(uintptr_t)1U);
- } else if (unlikely(old_sampled)) {
- /*
- * prof_tctx_set() would work for the !moved case as well, but
- * prof_tctx_reset() is slightly cheaper, and the proper thing
- * to do here in the presence of explicit knowledge re: moved
- * state.
- */
- prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
- } else {
- assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
- (uintptr_t)1U);
- }
-
- /*
- * The prof_free_sampled_object() call must come after the
- * prof_malloc_sample_object() call, because tctx and old_tctx may be
- * the same, in which case reversing the call order could cause the tctx
- * to be prematurely destroyed as a side effect of momentarily zeroed
- * counters.
- */
- if (unlikely(old_sampled)) {
- prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
- prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
-
- cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
- prof_free_sampled_object(tsd, ptr, usize, tctx);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
+#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
+#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
+
+#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/sz.h"
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_gdump_get_unlocked(void) {
+ /*
+ * No locking is used when reading prof_gdump_val in the fast path, so
+ * there are no guarantees regarding how long it will take for all
+ * threads to notice state changes.
+ */
+ return prof_gdump_val;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tdata_t *
+prof_tdata_get(tsd_t *tsd, bool create) {
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (create) {
+ if (unlikely(tdata == NULL)) {
+ if (tsd_nominal(tsd)) {
+ tdata = prof_tdata_init(tsd);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ } else if (unlikely(tdata->expired)) {
+ tdata = prof_tdata_reinit(tsd, tdata);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ assert(tdata == NULL || tdata->attached);
+ }
+
+ return tdata;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_reset(tsdn, ptr, tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE nstime_t
+prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
+ nstime_t t) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
+ ssize_t check = update ? 0 : usize;
+
+ int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
+ if (update) {
+ bytes_until_sample -= usize;
+ if (tsd_nominal(tsd)) {
+ tsd_bytes_until_sample_set(tsd, bytes_until_sample);
+ }
+ }
+ if (likely(bytes_until_sample >= check)) {
+ return true;
+ }
+
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
+ prof_tdata_t **tdata_out) {
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ /* Fastpath: no need to load tdata */
+ if (likely(prof_sample_check(tsd, usize, update))) {
+ return true;
+ }
+
+ bool booted = tsd_prof_tdata_get(tsd);
+ tdata = prof_tdata_get(tsd, true);
+ if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
+ tdata = NULL;
+ }
+
+ if (tdata_out != NULL) {
+ *tdata_out = tdata;
+ }
+
+ if (unlikely(tdata == NULL)) {
+ return true;
+ }
+
+ /*
+ * If this was the first creation of tdata, then
+ * prof_tdata_get() reset bytes_until_sample, so decrement and
+ * check it again
+ */
+ if (!booted && prof_sample_check(tsd, usize, update)) {
+ return true;
+ }
+
+ if (tsd_reentrancy_level_get(tsd) > 0) {
+ return true;
+ }
+ /* Compute new sample threshold. */
+ if (update) {
+ prof_sample_threshold_update(tdata);
+ }
+ return !tdata->active;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
+ prof_tctx_t *ret;
+ prof_tdata_t *tdata;
+ prof_bt_t bt;
+
+ assert(usize == sz_s2u(usize));
+
+ if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+ &tdata))) {
+ ret = (prof_tctx_t *)(uintptr_t)1U;
+ } else {
+ bt_init(&bt, tdata->vec);
+ prof_backtrace(&bt);
+ ret = prof_lookup(tsd, &bt);
+ }
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
+ prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(usize == isalloc(tsdn, ptr));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
+ prof_malloc_sample_object(tsdn, ptr, usize, tctx);
+ } else {
+ prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
+ (prof_tctx_t *)(uintptr_t)1U);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
+ bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
+ prof_tctx_t *old_tctx) {
+ bool sampled, old_sampled, moved;
+
+ cassert(config_prof);
+ assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
+
+ if (prof_active && !updated && ptr != NULL) {
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ if (prof_sample_accum_update(tsd, usize, true, NULL)) {
+ /*
+ * Don't sample. The usize passed to prof_alloc_prep()
+ * was larger than what actually got allocated, so a
+ * backtrace was captured for this allocation, even
+ * though its actual usize was insufficient to cross the
+ * sample threshold.
+ */
+ prof_alloc_rollback(tsd, tctx, true);
+ tctx = (prof_tctx_t *)(uintptr_t)1U;
+ }
+ }
+
+ sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+ old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
+ moved = (ptr != old_ptr);
+
+ if (unlikely(sampled)) {
+ prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
+ } else if (moved) {
+ prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
+ (prof_tctx_t *)(uintptr_t)1U);
+ } else if (unlikely(old_sampled)) {
+ /*
+ * prof_tctx_set() would work for the !moved case as well, but
+ * prof_tctx_reset() is slightly cheaper, and the proper thing
+ * to do here in the presence of explicit knowledge re: moved
+ * state.
+ */
+ prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
+ } else {
+ assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
+ (uintptr_t)1U);
+ }
+
+ /*
+ * The prof_free_sampled_object() call must come after the
+ * prof_malloc_sample_object() call, because tctx and old_tctx may be
+ * the same, in which case reversing the call order could cause the tctx
+ * to be prematurely destroyed as a side effect of momentarily zeroed
+ * counters.
+ */
+ if (unlikely(old_sampled)) {
+ prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
+ prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
+
+ cassert(config_prof);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
+ prof_free_sampled_object(tsd, ptr, usize, tctx);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prof_structs.h b/contrib/libs/jemalloc/include/jemalloc/internal/prof_structs.h
index 34ed4822b6..09d8d048c6 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/prof_structs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/prof_structs.h
@@ -1,200 +1,200 @@
-#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
-#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
-
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/rb.h"
-
-struct prof_bt_s {
- /* Backtrace, stored as len program counters. */
- void **vec;
- unsigned len;
-};
-
-#ifdef JEMALLOC_PROF_LIBGCC
-/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
-typedef struct {
- prof_bt_t *bt;
- unsigned max;
-} prof_unwind_data_t;
-#endif
-
-struct prof_accum_s {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_t mtx;
- uint64_t accumbytes;
-#else
- atomic_u64_t accumbytes;
-#endif
-};
-
-struct prof_cnt_s {
- /* Profiling counters. */
- uint64_t curobjs;
- uint64_t curbytes;
- uint64_t accumobjs;
- uint64_t accumbytes;
-};
-
-typedef enum {
- prof_tctx_state_initializing,
- prof_tctx_state_nominal,
- prof_tctx_state_dumping,
- prof_tctx_state_purgatory /* Dumper must finish destroying. */
-} prof_tctx_state_t;
-
-struct prof_tctx_s {
- /* Thread data for thread that performed the allocation. */
- prof_tdata_t *tdata;
-
- /*
- * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
- * defunct during teardown.
- */
- uint64_t thr_uid;
- uint64_t thr_discrim;
-
- /* Profiling counters, protected by tdata->lock. */
- prof_cnt_t cnts;
-
- /* Associated global context. */
- prof_gctx_t *gctx;
-
- /*
- * UID that distinguishes multiple tctx's created by the same thread,
- * but coexisting in gctx->tctxs. There are two ways that such
- * coexistence can occur:
- * - A dumper thread can cause a tctx to be retained in the purgatory
- * state.
- * - Although a single "producer" thread must create all tctx's which
- * share the same thr_uid, multiple "consumers" can each concurrently
- * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
- * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
- * threshold can be hit again before the first consumer finishes
- * executing prof_tctx_destroy().
- */
- uint64_t tctx_uid;
-
- /* Linkage into gctx's tctxs. */
- rb_node(prof_tctx_t) tctx_link;
-
- /*
- * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
- * sample vs destroy race.
- */
- bool prepared;
-
- /* Current dump-related state, protected by gctx->lock. */
- prof_tctx_state_t state;
-
- /*
- * Copy of cnts snapshotted during early dump phase, protected by
- * dump_mtx.
- */
- prof_cnt_t dump_cnts;
-};
-typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
-
-struct prof_gctx_s {
- /* Protects nlimbo, cnt_summed, and tctxs. */
- malloc_mutex_t *lock;
-
- /*
- * Number of threads that currently cause this gctx to be in a state of
- * limbo due to one of:
- * - Initializing this gctx.
- * - Initializing per thread counters associated with this gctx.
- * - Preparing to destroy this gctx.
- * - Dumping a heap profile that includes this gctx.
- * nlimbo must be 1 (single destroyer) in order to safely destroy the
- * gctx.
- */
- unsigned nlimbo;
-
- /*
- * Tree of profile counters, one for each thread that has allocated in
- * this context.
- */
- prof_tctx_tree_t tctxs;
-
- /* Linkage for tree of contexts to be dumped. */
- rb_node(prof_gctx_t) dump_link;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* Associated backtrace. */
- prof_bt_t bt;
-
- /* Backtrace vector, variable size, referred to by bt. */
- void *vec[1];
-};
-typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
-
-struct prof_tdata_s {
- malloc_mutex_t *lock;
-
- /* Monotonically increasing unique thread identifier. */
- uint64_t thr_uid;
-
- /*
- * Monotonically increasing discriminator among tdata structures
- * associated with the same thr_uid.
- */
- uint64_t thr_discrim;
-
- /* Included in heap profile dumps if non-NULL. */
- char *thread_name;
-
- bool attached;
- bool expired;
-
- rb_node(prof_tdata_t) tdata_link;
-
- /*
- * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
- * necessary when incrementing this field, because only one thread ever
- * does so.
- */
- uint64_t tctx_uid_next;
-
- /*
- * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
- * backtraces for which it has non-zero allocation/deallocation counters
- * associated with thread-specific prof_tctx_t objects. Other threads
- * may write to prof_tctx_t contents when freeing associated objects.
- */
- ckh_t bt2tctx;
-
- /* Sampling state. */
- uint64_t prng_state;
-
- /* State used to avoid dumping while operating on prof internals. */
- bool enq;
- bool enq_idump;
- bool enq_gdump;
-
- /*
- * Set to true during an early dump phase for tdata's which are
- * currently being dumped. New threads' tdata's have this initialized
- * to false so that they aren't accidentally included in later dump
- * phases.
- */
- bool dumping;
-
- /*
- * True if profiling is active for this tdata's thread
- * (thread.prof.active mallctl).
- */
- bool active;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* Backtrace vector, used for calls to prof_backtrace(). */
- void *vec[PROF_BT_MAX];
-};
-typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
-
-#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
+#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
+#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
+
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/rb.h"
+
+struct prof_bt_s {
+ /* Backtrace, stored as len program counters. */
+ void **vec;
+ unsigned len;
+};
+
+#ifdef JEMALLOC_PROF_LIBGCC
+/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
+typedef struct {
+ prof_bt_t *bt;
+ unsigned max;
+} prof_unwind_data_t;
+#endif
+
+struct prof_accum_s {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_t mtx;
+ uint64_t accumbytes;
+#else
+ atomic_u64_t accumbytes;
+#endif
+};
+
+struct prof_cnt_s {
+ /* Profiling counters. */
+ uint64_t curobjs;
+ uint64_t curbytes;
+ uint64_t accumobjs;
+ uint64_t accumbytes;
+};
+
+typedef enum {
+ prof_tctx_state_initializing,
+ prof_tctx_state_nominal,
+ prof_tctx_state_dumping,
+ prof_tctx_state_purgatory /* Dumper must finish destroying. */
+} prof_tctx_state_t;
+
+struct prof_tctx_s {
+ /* Thread data for thread that performed the allocation. */
+ prof_tdata_t *tdata;
+
+ /*
+ * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+ * defunct during teardown.
+ */
+ uint64_t thr_uid;
+ uint64_t thr_discrim;
+
+ /* Profiling counters, protected by tdata->lock. */
+ prof_cnt_t cnts;
+
+ /* Associated global context. */
+ prof_gctx_t *gctx;
+
+ /*
+ * UID that distinguishes multiple tctx's created by the same thread,
+ * but coexisting in gctx->tctxs. There are two ways that such
+ * coexistence can occur:
+ * - A dumper thread can cause a tctx to be retained in the purgatory
+ * state.
+ * - Although a single "producer" thread must create all tctx's which
+ * share the same thr_uid, multiple "consumers" can each concurrently
+ * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
+ * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
+ * threshold can be hit again before the first consumer finishes
+ * executing prof_tctx_destroy().
+ */
+ uint64_t tctx_uid;
+
+ /* Linkage into gctx's tctxs. */
+ rb_node(prof_tctx_t) tctx_link;
+
+ /*
+ * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
+ * sample vs destroy race.
+ */
+ bool prepared;
+
+ /* Current dump-related state, protected by gctx->lock. */
+ prof_tctx_state_t state;
+
+ /*
+ * Copy of cnts snapshotted during early dump phase, protected by
+ * dump_mtx.
+ */
+ prof_cnt_t dump_cnts;
+};
+typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
+
+struct prof_gctx_s {
+ /* Protects nlimbo, cnt_summed, and tctxs. */
+ malloc_mutex_t *lock;
+
+ /*
+ * Number of threads that currently cause this gctx to be in a state of
+ * limbo due to one of:
+ * - Initializing this gctx.
+ * - Initializing per thread counters associated with this gctx.
+ * - Preparing to destroy this gctx.
+ * - Dumping a heap profile that includes this gctx.
+ * nlimbo must be 1 (single destroyer) in order to safely destroy the
+ * gctx.
+ */
+ unsigned nlimbo;
+
+ /*
+ * Tree of profile counters, one for each thread that has allocated in
+ * this context.
+ */
+ prof_tctx_tree_t tctxs;
+
+ /* Linkage for tree of contexts to be dumped. */
+ rb_node(prof_gctx_t) dump_link;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Associated backtrace. */
+ prof_bt_t bt;
+
+ /* Backtrace vector, variable size, referred to by bt. */
+ void *vec[1];
+};
+typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
+
+struct prof_tdata_s {
+ malloc_mutex_t *lock;
+
+ /* Monotonically increasing unique thread identifier. */
+ uint64_t thr_uid;
+
+ /*
+ * Monotonically increasing discriminator among tdata structures
+ * associated with the same thr_uid.
+ */
+ uint64_t thr_discrim;
+
+ /* Included in heap profile dumps if non-NULL. */
+ char *thread_name;
+
+ bool attached;
+ bool expired;
+
+ rb_node(prof_tdata_t) tdata_link;
+
+ /*
+ * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
+ * necessary when incrementing this field, because only one thread ever
+ * does so.
+ */
+ uint64_t tctx_uid_next;
+
+ /*
+ * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
+ * backtraces for which it has non-zero allocation/deallocation counters
+ * associated with thread-specific prof_tctx_t objects. Other threads
+ * may write to prof_tctx_t contents when freeing associated objects.
+ */
+ ckh_t bt2tctx;
+
+ /* Sampling state. */
+ uint64_t prng_state;
+
+ /* State used to avoid dumping while operating on prof internals. */
+ bool enq;
+ bool enq_idump;
+ bool enq_gdump;
+
+ /*
+ * Set to true during an early dump phase for tdata's which are
+ * currently being dumped. New threads' tdata's have this initialized
+ * to false so that they aren't accidentally included in later dump
+ * phases.
+ */
+ bool dumping;
+
+ /*
+ * True if profiling is active for this tdata's thread
+ * (thread.prof.active mallctl).
+ */
+ bool active;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Backtrace vector, used for calls to prof_backtrace(). */
+ void *vec[PROF_BT_MAX];
+};
+typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
+
+#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prof_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/prof_types.h
index 1eff995ecf..ea343db858 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/prof_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/prof_types.h
@@ -1,56 +1,56 @@
-#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
-#define JEMALLOC_INTERNAL_PROF_TYPES_H
-
-typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_accum_s prof_accum_t;
-typedef struct prof_cnt_s prof_cnt_t;
-typedef struct prof_tctx_s prof_tctx_t;
-typedef struct prof_gctx_s prof_gctx_t;
-typedef struct prof_tdata_s prof_tdata_t;
-
-/* Option defaults. */
-#ifdef JEMALLOC_PROF
-# define PROF_PREFIX_DEFAULT "jeprof"
-#else
-# define PROF_PREFIX_DEFAULT ""
-#endif
-#define LG_PROF_SAMPLE_DEFAULT 19
-#define LG_PROF_INTERVAL_DEFAULT -1
-
-/*
- * Hard limit on stack backtrace depth. The version of prof_backtrace() that
- * is based on __builtin_return_address() necessarily has a hard-coded number
- * of backtrace frame handlers, and should be kept in sync with this setting.
- */
-#define PROF_BT_MAX 128
-
-/* Initial hash table size. */
-#define PROF_CKH_MINITEMS 64
-
-/* Size of memory buffer to use when writing dump files. */
-#define PROF_DUMP_BUFSIZE 65536
-
-/* Size of stack-allocated buffer used by prof_printf(). */
-#define PROF_PRINTF_BUFSIZE 128
-
-/*
- * Number of mutexes shared among all gctx's. No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define PROF_NCTX_LOCKS 1024
-
-/*
- * Number of mutexes shared among all tdata's. No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define PROF_NTDATA_LOCKS 256
-
-/*
- * prof_tdata pointers close to NULL are used to encode state information that
- * is used for cleaning up during thread shutdown.
- */
-#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
-#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
-#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
-
-#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
+#define JEMALLOC_INTERNAL_PROF_TYPES_H
+
+typedef struct prof_bt_s prof_bt_t;
+typedef struct prof_accum_s prof_accum_t;
+typedef struct prof_cnt_s prof_cnt_t;
+typedef struct prof_tctx_s prof_tctx_t;
+typedef struct prof_gctx_s prof_gctx_t;
+typedef struct prof_tdata_s prof_tdata_t;
+
+/* Option defaults. */
+#ifdef JEMALLOC_PROF
+# define PROF_PREFIX_DEFAULT "jeprof"
+#else
+# define PROF_PREFIX_DEFAULT ""
+#endif
+#define LG_PROF_SAMPLE_DEFAULT 19
+#define LG_PROF_INTERVAL_DEFAULT -1
+
+/*
+ * Hard limit on stack backtrace depth. The version of prof_backtrace() that
+ * is based on __builtin_return_address() necessarily has a hard-coded number
+ * of backtrace frame handlers, and should be kept in sync with this setting.
+ */
+#define PROF_BT_MAX 128
+
+/* Initial hash table size. */
+#define PROF_CKH_MINITEMS 64
+
+/* Size of memory buffer to use when writing dump files. */
+#define PROF_DUMP_BUFSIZE 65536
+
+/* Size of stack-allocated buffer used by prof_printf(). */
+#define PROF_PRINTF_BUFSIZE 128
+
+/*
+ * Number of mutexes shared among all gctx's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NCTX_LOCKS 1024
+
+/*
+ * Number of mutexes shared among all tdata's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NTDATA_LOCKS 256
+
+/*
+ * prof_tdata pointers close to NULL are used to encode state information that
+ * is used for cleaning up during thread shutdown.
+ */
+#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
+#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
+#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
+
+#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ql.h b/contrib/libs/jemalloc/include/jemalloc/internal/ql.h
index 8029040771..14e7856796 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/ql.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/ql.h
@@ -1,64 +1,64 @@
-#ifndef JEMALLOC_INTERNAL_QL_H
-#define JEMALLOC_INTERNAL_QL_H
-
-#include "jemalloc/internal/qr.h"
-
-/* List definitions. */
-#define ql_head(a_type) \
+#ifndef JEMALLOC_INTERNAL_QL_H
+#define JEMALLOC_INTERNAL_QL_H
+
+#include "jemalloc/internal/qr.h"
+
+/* List definitions. */
+#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
-#define ql_head_initializer(a_head) {NULL}
+#define ql_head_initializer(a_head) {NULL}
-#define ql_elm(a_type) qr(a_type)
+#define ql_elm(a_type) qr(a_type)
/* List functions. */
-#define ql_new(a_head) do { \
+#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-#define ql_first(a_head) ((a_head)->qlh_first)
+#define ql_first(a_head) ((a_head)->qlh_first)
-#define ql_last(a_head, a_field) \
+#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
-#define ql_next(a_head, a_elm, a_field) \
+#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
-#define ql_prev(a_head, a_elm, a_field) \
+#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
-#define ql_head_insert(a_head, a_elm, a_field) do { \
+#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
-#define ql_remove(a_head, a_elm, a_field) do { \
+#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
@@ -69,20 +69,20 @@ struct { \
} \
} while (0)
-#define ql_head_remove(a_head, a_type, a_field) do { \
+#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_tail_remove(a_head, a_type, a_field) do { \
+#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_foreach(a_var, a_head, a_field) \
+#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
-#define ql_reverse_foreach(a_var, a_head, a_field) \
+#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
-
-#endif /* JEMALLOC_INTERNAL_QL_H */
+
+#endif /* JEMALLOC_INTERNAL_QL_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/qr.h b/contrib/libs/jemalloc/include/jemalloc/internal/qr.h
index 1e1056b386..b54b0a2e22 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/qr.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/qr.h
@@ -1,39 +1,39 @@
-#ifndef JEMALLOC_INTERNAL_QR_H
-#define JEMALLOC_INTERNAL_QR_H
-
+#ifndef JEMALLOC_INTERNAL_QR_H
+#define JEMALLOC_INTERNAL_QR_H
+
/* Ring definitions. */
-#define qr(a_type) \
+#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
-#define qr_new(a_qr, a_field) do { \
+#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
+#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
-} while (0)
+} while (0)
-#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
- a_type *t; \
+#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
+ a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
@@ -41,14 +41,14 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
-/*
- * qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code.
- */
-#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
- qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
+/*
+ * qr_meld() and qr_split() are functionally equivalent, so there's no need to
+ * have two copies of the code.
+ */
+#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
+ qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
-#define qr_remove(a_qr, a_field) do { \
+#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
@@ -57,16 +57,16 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_foreach(var, a_qr, a_field) \
+#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
-#define qr_reverse_foreach(var, a_qr, a_field) \
+#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
-
-#endif /* JEMALLOC_INTERNAL_QR_H */
+
+#endif /* JEMALLOC_INTERNAL_QR_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/quantum.h b/contrib/libs/jemalloc/include/jemalloc/internal/quantum.h
index 821086e992..f5381e8c41 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/quantum.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/quantum.h
@@ -1,77 +1,77 @@
-#ifndef JEMALLOC_INTERNAL_QUANTUM_H
-#define JEMALLOC_INTERNAL_QUANTUM_H
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#ifndef LG_QUANTUM
-# if (defined(__i386__) || defined(_M_IX86))
-# define LG_QUANTUM 4
-# endif
-# ifdef __ia64__
-# define LG_QUANTUM 4
-# endif
-# ifdef __alpha__
-# define LG_QUANTUM 4
-# endif
-# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
-# define LG_QUANTUM 4
-# endif
-# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
-# define LG_QUANTUM 4
-# endif
-# ifdef __arm__
-# define LG_QUANTUM 3
-# endif
-# ifdef __aarch64__
-# define LG_QUANTUM 4
-# endif
-# ifdef __hppa__
-# define LG_QUANTUM 4
-# endif
-# ifdef __m68k__
-# define LG_QUANTUM 3
-# endif
-# ifdef __mips__
-# define LG_QUANTUM 3
-# endif
-# ifdef __nios2__
-# define LG_QUANTUM 3
-# endif
-# ifdef __or1k__
-# define LG_QUANTUM 3
-# endif
-# ifdef __powerpc__
-# define LG_QUANTUM 4
-# endif
-# if defined(__riscv) || defined(__riscv__)
-# define LG_QUANTUM 4
-# endif
-# ifdef __s390__
-# define LG_QUANTUM 4
-# endif
-# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
- defined(__SH4_SINGLE_ONLY__))
-# define LG_QUANTUM 4
-# endif
-# ifdef __tile__
-# define LG_QUANTUM 4
-# endif
-# ifdef __le32__
-# define LG_QUANTUM 4
-# endif
-# ifndef LG_QUANTUM
-# error "Unknown minimum alignment for architecture; specify via "
- "--with-lg-quantum"
-# endif
-#endif
-
-#define QUANTUM ((size_t)(1U << LG_QUANTUM))
-#define QUANTUM_MASK (QUANTUM - 1)
-
-/* Return the smallest quantum multiple that is >= a. */
-#define QUANTUM_CEILING(a) \
- (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
-
-#endif /* JEMALLOC_INTERNAL_QUANTUM_H */
+#ifndef JEMALLOC_INTERNAL_QUANTUM_H
+#define JEMALLOC_INTERNAL_QUANTUM_H
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#ifndef LG_QUANTUM
+# if (defined(__i386__) || defined(_M_IX86))
+# define LG_QUANTUM 4
+# endif
+# ifdef __ia64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __alpha__
+# define LG_QUANTUM 4
+# endif
+# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
+# define LG_QUANTUM 4
+# endif
+# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
+# define LG_QUANTUM 4
+# endif
+# ifdef __arm__
+# define LG_QUANTUM 3
+# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __hppa__
+# define LG_QUANTUM 4
+# endif
+# ifdef __m68k__
+# define LG_QUANTUM 3
+# endif
+# ifdef __mips__
+# define LG_QUANTUM 3
+# endif
+# ifdef __nios2__
+# define LG_QUANTUM 3
+# endif
+# ifdef __or1k__
+# define LG_QUANTUM 3
+# endif
+# ifdef __powerpc__
+# define LG_QUANTUM 4
+# endif
+# if defined(__riscv) || defined(__riscv__)
+# define LG_QUANTUM 4
+# endif
+# ifdef __s390__
+# define LG_QUANTUM 4
+# endif
+# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
+ defined(__SH4_SINGLE_ONLY__))
+# define LG_QUANTUM 4
+# endif
+# ifdef __tile__
+# define LG_QUANTUM 4
+# endif
+# ifdef __le32__
+# define LG_QUANTUM 4
+# endif
+# ifndef LG_QUANTUM
+# error "Unknown minimum alignment for architecture; specify via "
+ "--with-lg-quantum"
+# endif
+#endif
+
+#define QUANTUM ((size_t)(1U << LG_QUANTUM))
+#define QUANTUM_MASK (QUANTUM - 1)
+
+/* Return the smallest quantum multiple that is >= a. */
+#define QUANTUM_CEILING(a) \
+ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
+
+#endif /* JEMALLOC_INTERNAL_QUANTUM_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/rb.h b/contrib/libs/jemalloc/include/jemalloc/internal/rb.h
index 47fa5ca99b..919013a0f4 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/rb.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/rb.h
@@ -20,21 +20,21 @@
*/
#ifndef RB_H_
-#define RB_H_
-
-#ifndef __PGI
-#define RB_COMPACT
-#endif
+#define RB_H_
+#ifndef __PGI
+#define RB_COMPACT
+#endif
+
#ifdef RB_COMPACT
/* Node structure. */
-#define rb_node(a_type) \
+#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
-#define rb_node(a_type) \
+#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
@@ -43,116 +43,116 @@ struct { \
#endif
/* Root structure. */
-#define rb_tree(a_type) \
+#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
}
/* Left accessors. */
-#define rbtn_left_get(a_type, a_field, a_node) \
+#define rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
-#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
+#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
-#define rbtn_right_get(a_type, a_field, a_node) \
+#define rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
-#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
+#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
-#define rbtn_red_get(a_type, a_field, a_node) \
+#define rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
-#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
+#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
-#define rbtn_red_set(a_type, a_field, a_node) do { \
+#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
-#define rbtn_black_set(a_type, a_field, a_node) do { \
+#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
-
-/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- /* Bookkeeping bit cannot be used by node pointer. */ \
- assert(((uintptr_t)(a_node) & 0x1) == 0); \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
- rbtn_red_set(a_type, a_field, (a_node)); \
-} while (0)
+
+/* Node initializer. */
+#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
+ /* Bookkeeping bit cannot be used by node pointer. */ \
+ assert(((uintptr_t)(a_node) & 0x1) == 0); \
+ rbtn_left_set(a_type, a_field, (a_node), NULL); \
+ rbtn_right_set(a_type, a_field, (a_node), NULL); \
+ rbtn_red_set(a_type, a_field, (a_node)); \
+} while (0)
#else
/* Right accessors. */
-#define rbtn_right_get(a_type, a_field, a_node) \
+#define rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
-#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
+#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
-#define rbtn_red_get(a_type, a_field, a_node) \
+#define rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
-#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
+#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
-#define rbtn_red_set(a_type, a_field, a_node) do { \
+#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
-#define rbtn_black_set(a_type, a_field, a_node) do { \
+#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
+#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
+ rbtn_left_set(a_type, a_field, (a_node), NULL); \
+ rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
-#endif
+#endif
/* Tree initializer. */
-#define rb_new(a_type, a_field, a_rbt) do { \
- (a_rbt)->rbt_root = NULL; \
+#define rb_new(a_type, a_field, a_rbt) do { \
+ (a_rbt)->rbt_root = NULL; \
} while (0)
/* Internal utility macros. */
-#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
+#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
- if ((r_node) != NULL) { \
+ if ((r_node) != NULL) { \
for (; \
- rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
+ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
-#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
+#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
- if ((r_node) != NULL) { \
- for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
- (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
+ if ((r_node) != NULL) { \
+ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
+ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
-#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
+#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
-#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
+#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
@@ -164,11 +164,11 @@ struct { \
* functions generated by an equivalently parameterized call to rb_gen().
*/
-#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
+#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
-a_attr bool \
-a_prefix##empty(a_rbt_type *rbtree); \
+a_attr bool \
+a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
@@ -178,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
@@ -192,10 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg);
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
+a_attr void \
+a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
+ void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
@@ -212,7 +212,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
- * Interpretation of comparison function return values:
+ * Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
@@ -238,13 +238,13 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* Args:
* tree: Pointer to an uninitialized red-black tree object.
*
- * static bool
- * ex_empty(ex_t *tree);
- * Description: Determine whether tree is empty.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * Ret: True if tree is empty, false otherwise.
- *
+ * static bool
+ * ex_empty(ex_t *tree);
+ * Description: Determine whether tree is empty.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * Ret: True if tree is empty, false otherwise.
+ *
* static ex_node_t *
* ex_first(ex_t *tree);
* static ex_node_t *
@@ -266,7 +266,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* last/first.
*
* static ex_node_t *
- * ex_search(ex_t *tree, const ex_node_t *key);
+ * ex_search(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
@@ -274,9 +274,9 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
- * ex_nsearch(ex_t *tree, const ex_node_t *key);
+ * ex_nsearch(ex_t *tree, const ex_node_t *key);
* static ex_node_t *
- * ex_psearch(ex_t *tree, const ex_node_t *key);
+ * ex_psearch(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
@@ -324,52 +324,52 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
- *
- * static void
- * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
- * Description: Iterate over the tree with post-order traversal, remove
- * each node, and run the callback if non-null. This is
- * used for destroying a tree without paying the cost to
- * rebalance it. The tree must not be otherwise altered
- * during traversal.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * cb : Callback function, which, if non-null, is called for each node
- * during iteration. There is no way to stop iteration once it
- * has begun.
- * arg : Opaque pointer passed to cb().
+ *
+ * static void
+ * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
+ * Description: Iterate over the tree with post-order traversal, remove
+ * each node, and run the callback if non-null. This is
+ * used for destroying a tree without paying the cost to
+ * rebalance it. The tree must not be otherwise altered
+ * during traversal.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * cb : Callback function, which, if non-null, is called for each node
+ * during iteration. There is no way to stop iteration once it
+ * has begun.
+ * arg : Opaque pointer passed to cb().
*/
-#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
+#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
-a_attr bool \
-a_prefix##empty(a_rbt_type *rbtree) { \
- return (rbtree->rbt_root == NULL); \
-} \
+a_attr bool \
+a_prefix##empty(a_rbt_type *rbtree) { \
+ return (rbtree->rbt_root == NULL); \
+} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
- if (rbtn_right_get(a_type, a_field, node) != NULL) { \
+ if (rbtn_right_get(a_type, a_field, node) != NULL) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
+ assert(tnode != NULL); \
+ ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -380,21 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
- assert(tnode != NULL); \
+ assert(tnode != NULL); \
} \
} \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
- if (rbtn_left_get(a_type, a_field, node) != NULL) { \
+ if (rbtn_left_get(a_type, a_field, node) != NULL) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
+ assert(tnode != NULL); \
+ ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -405,17 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
- assert(tnode != NULL); \
+ assert(tnode != NULL); \
} \
} \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
- while (ret != NULL \
+ while (ret != NULL \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
@@ -423,14 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
+ ret = NULL; \
+ while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
@@ -442,14 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
+ ret = NULL; \
+ while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
@@ -461,7 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
- return ret; \
+ return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
@@ -472,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
+ for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
@@ -492,8 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
+ leftleft)) { \
/* Fix up 4-node. */ \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
@@ -508,8 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
- if (left != NULL && rbtn_red_get(a_type, a_field, \
- left)) { \
+ if (left != NULL && rbtn_red_get(a_type, a_field, \
+ left)) { \
/* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
@@ -542,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
+ for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
@@ -554,7 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
- for (pathp++; pathp->node != NULL; pathp++) { \
+ for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
@@ -596,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
- if (left != NULL) { \
+ if (left != NULL) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
- assert(!rbtn_red_get(a_type, a_field, node)); \
+ assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
@@ -616,19 +616,19 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */ \
- rbtree->rbt_root = NULL; \
+ rbtree->rbt_root = NULL; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
- rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
- pathp->node = NULL; \
+ pathp->node = NULL; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
@@ -640,8 +640,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
+ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
+ rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\
/* */\
@@ -684,8 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
+ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
+ rightleft)) { \
/* || */\
/* pathp(b) */\
/* // \ */\
@@ -699,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
- /* subtree root, which may actually be the tree */\
+ /* subtree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
@@ -739,8 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
- if (leftrightleft != NULL && rbtn_red_get(a_type, \
- a_field, leftrightleft)) { \
+ if (leftrightleft != NULL && rbtn_red_get(a_type, \
+ a_field, leftrightleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -766,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */\
/* / */\
/* (b) */\
- assert(leftright != NULL); \
+ assert(leftright != NULL); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
@@ -789,8 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
+ leftleft)) { \
/* || */\
/* pathp(r) */\
/* / \\ */\
@@ -828,8 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
+ leftleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -870,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
/* Set root. */ \
rbtree->rbt_root = path->node; \
- assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
+ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return NULL; \
+ if (node == NULL) { \
+ return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
- a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
- arg)) != NULL) { \
- return ret; \
+ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
+ arg)) != NULL) { \
+ return ret; \
} \
- return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg); \
+ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -895,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
+ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
+ (ret = cb(rbtree, node, arg)) != NULL) { \
+ return ret; \
} \
- return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg); \
+ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg); \
} else if (cmp > 0) { \
- return a_prefix##iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg); \
+ return a_prefix##iter_start(rbtree, start, \
+ rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
+ return ret; \
} \
- return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg); \
+ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -923,22 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
- return ret; \
+ return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return NULL; \
+ if (node == NULL) { \
+ return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
+ (ret = cb(rbtree, node, arg)) != NULL) { \
+ return ret; \
} \
- return a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
+ return a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -949,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
+ (ret = cb(rbtree, node, arg)) != NULL) { \
+ return ret; \
} \
- return a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
+ return a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
- return a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
+ return a_prefix##reverse_iter_start(rbtree, start, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
+ return ret; \
} \
- return a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
+ return a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -978,29 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
- return ret; \
-} \
-a_attr void \
-a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
- a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return; \
- } \
- a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_left_set(a_type, a_field, (node), NULL); \
- a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_right_set(a_type, a_field, (node), NULL); \
- if (cb) { \
- cb(node, arg); \
+ return ret; \
+} \
+a_attr void \
+a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
+ a_type *, void *), void *arg) { \
+ if (node == NULL) { \
+ return; \
} \
-} \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg) { \
- a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
- rbtree->rbt_root = NULL; \
+ a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
+ node), cb, arg); \
+ rbtn_left_set(a_type, a_field, (node), NULL); \
+ a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
+ node), cb, arg); \
+ rbtn_right_set(a_type, a_field, (node), NULL); \
+ if (cb) { \
+ cb(node, arg); \
+ } \
+} \
+a_attr void \
+a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
+ void *arg) { \
+ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
+ rbtree->rbt_root = NULL; \
}
#endif /* RB_H_ */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h b/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h
index 16ccbebee7..6402837a59 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h
@@ -1,528 +1,528 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_H
-#define JEMALLOC_INTERNAL_RTREE_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree_tsd.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/tsd.h"
-
+#ifndef JEMALLOC_INTERNAL_RTREE_H
+#define JEMALLOC_INTERNAL_RTREE_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree_tsd.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/tsd.h"
+
/*
* This radix tree implementation is tailored to the singular purpose of
- * associating metadata with extents that are currently owned by jemalloc.
+ * associating metadata with extents that are currently owned by jemalloc.
*
*******************************************************************************
*/
-/* Number of high insignificant bits. */
-#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
-/* Number of low insigificant bits. */
-#define RTREE_NLIB LG_PAGE
-/* Number of significant bits. */
-#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
-/* Number of levels in radix tree. */
-#if RTREE_NSB <= 10
-# define RTREE_HEIGHT 1
-#elif RTREE_NSB <= 36
-# define RTREE_HEIGHT 2
-#elif RTREE_NSB <= 52
-# define RTREE_HEIGHT 3
-#else
-# error Unsupported number of significant virtual address bits
-#endif
-/* Use compact leaf representation if virtual address encoding allows. */
-#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
-# define RTREE_LEAF_COMPACT
-#endif
-
-/* Needed for initialization only. */
-#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
-
-typedef struct rtree_node_elm_s rtree_node_elm_t;
-struct rtree_node_elm_s {
- atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
-};
-
-struct rtree_leaf_elm_s {
-#ifdef RTREE_LEAF_COMPACT
- /*
- * Single pointer-width field containing all three leaf element fields.
- * For example, on a 64-bit x64 system with 48 significant virtual
- * memory address bits, the index, extent, and slab fields are packed as
- * such:
- *
- * x: index
- * e: extent
- * b: slab
- *
- * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
- */
- atomic_p_t le_bits;
-#else
- atomic_p_t le_extent; /* (extent_t *) */
- atomic_u_t le_szind; /* (szind_t) */
- atomic_b_t le_slab; /* (bool) */
-#endif
-};
-
-typedef struct rtree_level_s rtree_level_t;
-struct rtree_level_s {
- /* Number of key bits distinguished by this level. */
- unsigned bits;
- /*
- * Cumulative number of key bits distinguished by traversing to
- * corresponding tree level.
- */
- unsigned cumbits;
-};
-
+/* Number of high insignificant bits. */
+#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
+/* Number of low insigificant bits. */
+#define RTREE_NLIB LG_PAGE
+/* Number of significant bits. */
+#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
+/* Number of levels in radix tree. */
+#if RTREE_NSB <= 10
+# define RTREE_HEIGHT 1
+#elif RTREE_NSB <= 36
+# define RTREE_HEIGHT 2
+#elif RTREE_NSB <= 52
+# define RTREE_HEIGHT 3
+#else
+# error Unsupported number of significant virtual address bits
+#endif
+/* Use compact leaf representation if virtual address encoding allows. */
+#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
+# define RTREE_LEAF_COMPACT
+#endif
+
+/* Needed for initialization only. */
+#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
+
+typedef struct rtree_node_elm_s rtree_node_elm_t;
+struct rtree_node_elm_s {
+ atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
+};
+
+struct rtree_leaf_elm_s {
+#ifdef RTREE_LEAF_COMPACT
+ /*
+ * Single pointer-width field containing all three leaf element fields.
+ * For example, on a 64-bit x64 system with 48 significant virtual
+ * memory address bits, the index, extent, and slab fields are packed as
+ * such:
+ *
+ * x: index
+ * e: extent
+ * b: slab
+ *
+ * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
+ */
+ atomic_p_t le_bits;
+#else
+ atomic_p_t le_extent; /* (extent_t *) */
+ atomic_u_t le_szind; /* (szind_t) */
+ atomic_b_t le_slab; /* (bool) */
+#endif
+};
+
+typedef struct rtree_level_s rtree_level_t;
+struct rtree_level_s {
+ /* Number of key bits distinguished by this level. */
+ unsigned bits;
+ /*
+ * Cumulative number of key bits distinguished by traversing to
+ * corresponding tree level.
+ */
+ unsigned cumbits;
+};
+
typedef struct rtree_s rtree_t;
-struct rtree_s {
- malloc_mutex_t init_lock;
- /* Number of elements based on rtree_levels[0].bits. */
-#if RTREE_HEIGHT > 1
- rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
-#else
- rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
-#endif
-};
+struct rtree_s {
+ malloc_mutex_t init_lock;
+ /* Number of elements based on rtree_levels[0].bits. */
+#if RTREE_HEIGHT > 1
+ rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
+#else
+ rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
+#endif
+};
/*
- * Split the bits into one to three partitions depending on number of
- * significant bits. It the number of bits does not divide evenly into the
- * number of levels, place one remainder bit per level starting at the leaf
- * level.
+ * Split the bits into one to three partitions depending on number of
+ * significant bits. It the number of bits does not divide evenly into the
+ * number of levels, place one remainder bit per level starting at the leaf
+ * level.
*/
-static const rtree_level_t rtree_levels[] = {
-#if RTREE_HEIGHT == 1
- {RTREE_NSB, RTREE_NHIB + RTREE_NSB}
-#elif RTREE_HEIGHT == 2
- {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
- {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
-#elif RTREE_HEIGHT == 3
- {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
- {RTREE_NSB/3 + RTREE_NSB%3/2,
- RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
- {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
-#else
-# error Unsupported rtree height
+static const rtree_level_t rtree_levels[] = {
+#if RTREE_HEIGHT == 1
+ {RTREE_NSB, RTREE_NHIB + RTREE_NSB}
+#elif RTREE_HEIGHT == 2
+ {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
+ {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
+#elif RTREE_HEIGHT == 3
+ {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
+ {RTREE_NSB/3 + RTREE_NSB%3/2,
+ RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
+ {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
+#else
+# error Unsupported rtree height
+#endif
+};
+
+bool rtree_new(rtree_t *rtree, bool zeroed);
+
+typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
+extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
+
+typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
+extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
+
+typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
+extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
+
+typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
+extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
+#ifdef JEMALLOC_JET
+void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
+#endif
+rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
+
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leafkey(uintptr_t key) {
+ unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
+ unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ unsigned maskbits = ptrbits - cumbits;
+ uintptr_t mask = ~((ZU(1) << maskbits) - 1);
+ return (key & mask);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+rtree_cache_direct_map(uintptr_t key) {
+ unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
+ unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ unsigned maskbits = ptrbits - cumbits;
+ return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
+}
+
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_subkey(uintptr_t key, unsigned level) {
+ unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
+ unsigned cumbits = rtree_levels[level].cumbits;
+ unsigned shiftbits = ptrbits - cumbits;
+ unsigned maskbits = rtree_levels[level].bits;
+ uintptr_t mask = (ZU(1) << maskbits) - 1;
+ return ((key >> shiftbits) & mask);
+}
+
+/*
+ * Atomic getters.
+ *
+ * dependent: Reading a value on behalf of a pointer to a valid allocation
+ * is guaranteed to be a clean read even without synchronization,
+ * because the rtree update became visible in memory before the
+ * pointer came into existence.
+ * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
+ * dependent on a previous rtree write, which means a stale read
+ * could result if synchronization were omitted here.
+ */
+# ifdef RTREE_LEAF_COMPACT
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, bool dependent) {
+ return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
+ ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
+}
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
+# ifdef __aarch64__
+ /*
+ * aarch64 doesn't sign extend the highest virtual address bit to set
+ * the higher ones. Instead, the high bits gets zeroed.
+ */
+ uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
+ /* Mask off the slab bit. */
+ uintptr_t low_bit_mask = ~(uintptr_t)1;
+ uintptr_t mask = high_bit_mask & low_bit_mask;
+ return (extent_t *)(bits & mask);
+# else
+ /* Restore sign-extended high bits, mask slab bit. */
+ return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
+ RTREE_NHIB) & ~((uintptr_t)0x1));
+# endif
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
+ return (szind_t)(bits >> LG_VADDR);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
+ return (bool)(bits & (uintptr_t)0x1);
+}
+
+# endif
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, bool dependent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_bits_extent_get(bits);
+#else
+ extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
+ ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
+ return extent;
#endif
-};
-
-bool rtree_new(rtree_t *rtree, bool zeroed);
-
-typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
-
-typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
-
-typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
-extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
-
-typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
-extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
-#ifdef JEMALLOC_JET
-void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, bool dependent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_bits_szind_get(bits);
+#else
+ return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
+ : ATOMIC_ACQUIRE);
#endif
-rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
- rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
-
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leafkey(uintptr_t key) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
- rtree_levels[RTREE_HEIGHT-1].bits);
- unsigned maskbits = ptrbits - cumbits;
- uintptr_t mask = ~((ZU(1) << maskbits) - 1);
- return (key & mask);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, bool dependent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_bits_slab_get(bits);
+#else
+ return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
+ ATOMIC_ACQUIRE);
+#endif
}
-JEMALLOC_ALWAYS_INLINE size_t
-rtree_cache_direct_map(uintptr_t key) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
- rtree_levels[RTREE_HEIGHT-1].bits);
- unsigned maskbits = ptrbits - cumbits;
- return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
-}
-
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_subkey(uintptr_t key, unsigned level) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = rtree_levels[level].cumbits;
- unsigned shiftbits = ptrbits - cumbits;
- unsigned maskbits = rtree_levels[level].bits;
- uintptr_t mask = (ZU(1) << maskbits) - 1;
- return ((key >> shiftbits) & mask);
-}
-
-/*
- * Atomic getters.
- *
- * dependent: Reading a value on behalf of a pointer to a valid allocation
- * is guaranteed to be a clean read even without synchronization,
- * because the rtree update became visible in memory before the
- * pointer came into existence.
- * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
- * dependent on a previous rtree write, which means a stale read
- * could result if synchronization were omitted here.
- */
-# ifdef RTREE_LEAF_COMPACT
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
- return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
- ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
-}
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
-# ifdef __aarch64__
- /*
- * aarch64 doesn't sign extend the highest virtual address bit to set
- * the higher ones. Instead, the high bits gets zeroed.
- */
- uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
- /* Mask off the slab bit. */
- uintptr_t low_bit_mask = ~(uintptr_t)1;
- uintptr_t mask = high_bit_mask & low_bit_mask;
- return (extent_t *)(bits & mask);
-# else
- /* Restore sign-extended high bits, mask slab bit. */
- return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
- RTREE_NHIB) & ~((uintptr_t)0x1));
-# endif
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
- return (szind_t)(bits >> LG_VADDR);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
- return (bool)(bits & (uintptr_t)0x1);
-}
-
-# endif
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_extent_get(bits);
-#else
- extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
- ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
- return extent;
+static inline void
+rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, extent_t *extent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
+ uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
+ LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
+ | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
#endif
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_szind_get(bits);
+}
+
+static inline void
+rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, szind_t szind) {
+ assert(szind <= SC_NSIZES);
+
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
+ true);
+ uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
+ ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
+ (((uintptr_t)0x1 << LG_VADDR) - 1)) |
+ ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
- return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
- : ATOMIC_ACQUIRE);
+ atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
#endif
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_slab_get(bits);
-#else
- return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
- ATOMIC_ACQUIRE);
-#endif
-}
-
-static inline void
-rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
- uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
- | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
-#endif
-}
-
-static inline void
-rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, szind_t szind) {
- assert(szind <= SC_NSIZES);
-
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
- true);
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
-#endif
-}
-
-static inline void
-rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool slab) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
- true);
- uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
-#endif
-}
-
-static inline void
-rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)slab);
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
- rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
- /*
- * Write extent last, since the element is atomically considered valid
- * as soon as the extent field is non-NULL.
- */
- rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
-#endif
-}
-
-static inline void
-rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
- assert(!slab || szind < SC_NBINS);
-
- /*
- * The caller implicitly assures that it is the only writer to the szind
- * and slab fields, and that the extent field cannot currently change.
- */
- rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
- rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
-rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, bool init_missing) {
- assert(key != 0);
- assert(!dependent || !init_missing);
-
- size_t slot = rtree_cache_direct_map(key);
- uintptr_t leafkey = rtree_leafkey(key);
- assert(leafkey != RTREE_LEAFKEY_INVALID);
-
- /* Fast path: L1 direct mapped cache. */
- if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
- rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
- assert(leaf != NULL);
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
- return &leaf[subkey];
- }
- /*
- * Search the L2 LRU cache. On hit, swap the matching element into the
- * slot in L1 cache, and move the position in L2 up by 1.
- */
-#define RTREE_CACHE_CHECK_L2(i) do { \
- if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
- rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
- assert(leaf != NULL); \
- if (i > 0) { \
- /* Bubble up by one. */ \
- rtree_ctx->l2_cache[i].leafkey = \
- rtree_ctx->l2_cache[i - 1].leafkey; \
- rtree_ctx->l2_cache[i].leaf = \
- rtree_ctx->l2_cache[i - 1].leaf; \
- rtree_ctx->l2_cache[i - 1].leafkey = \
- rtree_ctx->cache[slot].leafkey; \
- rtree_ctx->l2_cache[i - 1].leaf = \
- rtree_ctx->cache[slot].leaf; \
- } else { \
- rtree_ctx->l2_cache[0].leafkey = \
- rtree_ctx->cache[slot].leafkey; \
- rtree_ctx->l2_cache[0].leaf = \
- rtree_ctx->cache[slot].leaf; \
- } \
- rtree_ctx->cache[slot].leafkey = leafkey; \
- rtree_ctx->cache[slot].leaf = leaf; \
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
- return &leaf[subkey]; \
- } \
-} while (0)
- /* Check the first cache entry. */
- RTREE_CACHE_CHECK_L2(0);
- /* Search the remaining cache elements. */
- for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
- RTREE_CACHE_CHECK_L2(i);
- }
-#undef RTREE_CACHE_CHECK_L2
-
- return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
- dependent, init_missing);
-}
-
-static inline bool
-rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- extent_t *extent, szind_t szind, bool slab) {
- /* Use rtree_clear() to set the extent to NULL. */
- assert(extent != NULL);
-
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, false, true);
- if (elm == NULL) {
- return true;
- }
-
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
-rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- bool dependent) {
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, dependent, false);
- if (!dependent && elm == NULL) {
- return NULL;
- }
- assert(elm != NULL);
- return elm;
-}
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return NULL;
+}
+
+static inline void
+rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, bool slab) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
+ true);
+ uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
+ LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
+ (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
+#endif
+}
+
+static inline void
+rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
+ ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
+ ((uintptr_t)slab);
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
+ rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
+ /*
+ * Write extent last, since the element is atomically considered valid
+ * as soon as the extent field is non-NULL.
+ */
+ rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
+#endif
+}
+
+static inline void
+rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
+ assert(!slab || szind < SC_NBINS);
+
+ /*
+ * The caller implicitly assures that it is the only writer to the szind
+ * and slab fields, and that the extent field cannot currently change.
+ */
+ rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
+ rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
+rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, bool init_missing) {
+ assert(key != 0);
+ assert(!dependent || !init_missing);
+
+ size_t slot = rtree_cache_direct_map(key);
+ uintptr_t leafkey = rtree_leafkey(key);
+ assert(leafkey != RTREE_LEAFKEY_INVALID);
+
+ /* Fast path: L1 direct mapped cache. */
+ if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
+ rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
+ assert(leaf != NULL);
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
+ return &leaf[subkey];
}
- return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return SC_NSIZES;
+ /*
+ * Search the L2 LRU cache. On hit, swap the matching element into the
+ * slot in L1 cache, and move the position in L2 up by 1.
+ */
+#define RTREE_CACHE_CHECK_L2(i) do { \
+ if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
+ rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
+ assert(leaf != NULL); \
+ if (i > 0) { \
+ /* Bubble up by one. */ \
+ rtree_ctx->l2_cache[i].leafkey = \
+ rtree_ctx->l2_cache[i - 1].leafkey; \
+ rtree_ctx->l2_cache[i].leaf = \
+ rtree_ctx->l2_cache[i - 1].leaf; \
+ rtree_ctx->l2_cache[i - 1].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[i - 1].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ } else { \
+ rtree_ctx->l2_cache[0].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[0].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ } \
+ rtree_ctx->cache[slot].leafkey = leafkey; \
+ rtree_ctx->cache[slot].leaf = leaf; \
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
+ return &leaf[subkey]; \
+ } \
+} while (0)
+ /* Check the first cache entry. */
+ RTREE_CACHE_CHECK_L2(0);
+ /* Search the remaining cache elements. */
+ for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
+ RTREE_CACHE_CHECK_L2(i);
+ }
+#undef RTREE_CACHE_CHECK_L2
+
+ return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
+ dependent, init_missing);
+}
+
+static inline bool
+rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
+ extent_t *extent, szind_t szind, bool slab) {
+ /* Use rtree_clear() to set the extent to NULL. */
+ assert(extent != NULL);
+
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, false, true);
+ if (elm == NULL) {
+ return true;
}
- return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
-}
-/*
- * rtree_slab_read() is intentionally omitted because slab is always read in
- * conjunction with szind, which makes rtree_szind_slab_read() a better choice.
- */
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return true;
- }
- *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
- return false;
+ assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
+ rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
+
+ return false;
}
-
-/*
- * Try to read szind_slab from the L1 cache. Returns true on a hit,
- * and fills in r_szind and r_slab. Otherwise returns false.
- *
- * Key is allowed to be NULL in order to save an extra branch on the
- * fastpath. returns false in this case.
- */
-JEMALLOC_ALWAYS_INLINE bool
-rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, szind_t *r_szind, bool *r_slab) {
- rtree_leaf_elm_t *elm;
-
- size_t slot = rtree_cache_direct_map(key);
- uintptr_t leafkey = rtree_leafkey(key);
- assert(leafkey != RTREE_LEAFKEY_INVALID);
-
- if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
- rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
- assert(leaf != NULL);
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
- elm = &leaf[subkey];
-
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree,
- elm, true);
- *r_szind = rtree_leaf_elm_bits_szind_get(bits);
- *r_slab = rtree_leaf_elm_bits_slab_get(bits);
-#else
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true);
- *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true);
+
+JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
+rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
+ bool dependent) {
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, dependent, false);
+ if (!dependent && elm == NULL) {
+ return NULL;
+ }
+ assert(elm != NULL);
+ return elm;
+}
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return NULL;
+ }
+ return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return SC_NSIZES;
+ }
+ return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+}
+
+/*
+ * rtree_slab_read() is intentionally omitted because slab is always read in
+ * conjunction with szind, which makes rtree_szind_slab_read() a better choice.
+ */
+
+JEMALLOC_ALWAYS_INLINE bool
+rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return true;
+ }
+ *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
+ *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+ return false;
+}
+
+/*
+ * Try to read szind_slab from the L1 cache. Returns true on a hit,
+ * and fills in r_szind and r_slab. Otherwise returns false.
+ *
+ * Key is allowed to be NULL in order to save an extra branch on the
+ * fastpath. returns false in this case.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, szind_t *r_szind, bool *r_slab) {
+ rtree_leaf_elm_t *elm;
+
+ size_t slot = rtree_cache_direct_map(key);
+ uintptr_t leafkey = rtree_leafkey(key);
+ assert(leafkey != RTREE_LEAFKEY_INVALID);
+
+ if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
+ rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
+ assert(leaf != NULL);
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
+ elm = &leaf[subkey];
+
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree,
+ elm, true);
+ *r_szind = rtree_leaf_elm_bits_szind_get(bits);
+ *r_slab = rtree_leaf_elm_bits_slab_get(bits);
+#else
+ *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true);
+ *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true);
#endif
- return true;
- } else {
- return false;
- }
-}
-JEMALLOC_ALWAYS_INLINE bool
-rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return true;
- }
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- *r_szind = rtree_leaf_elm_bits_szind_get(bits);
- *r_slab = rtree_leaf_elm_bits_slab_get(bits);
-#else
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
- *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
-#endif
- return false;
-}
-
-static inline void
-rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, szind_t szind, bool slab) {
- assert(!slab || szind < SC_NBINS);
-
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
-}
-
-static inline void
-rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
- NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
-}
-
-#endif /* JEMALLOC_INTERNAL_RTREE_H */
+ return true;
+ } else {
+ return false;
+ }
+}
+JEMALLOC_ALWAYS_INLINE bool
+rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return true;
+ }
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ *r_szind = rtree_leaf_elm_bits_szind_get(bits);
+ *r_slab = rtree_leaf_elm_bits_slab_get(bits);
+#else
+ *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+ *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
+#endif
+ return false;
+}
+
+static inline void
+rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, szind_t szind, bool slab) {
+ assert(!slab || szind < SC_NBINS);
+
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
+ rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
+}
+
+static inline void
+rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
+ assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
+ NULL);
+ rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
+}
+
+#endif /* JEMALLOC_INTERNAL_RTREE_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/rtree_tsd.h b/contrib/libs/jemalloc/include/jemalloc/internal/rtree_tsd.h
index 562e29297a..642c89b71d 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/rtree_tsd.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/rtree_tsd.h
@@ -1,50 +1,50 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
-#define JEMALLOC_INTERNAL_RTREE_CTX_H
-
-/*
- * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
- * entry supports an entire leaf, so the cache hit rate is typically high even
- * with a small number of entries. In rare cases extent activity will straddle
- * the boundary between two leaf nodes. Furthermore, an arena may use a
- * combination of dss and mmap. Note that as memory usage grows past the amount
- * that this cache can directly cover, the cache will become less effective if
- * locality of reference is low, but the consequence is merely cache misses
- * while traversing the tree nodes.
- *
- * The L1 direct mapped cache offers consistent and low cost on cache hit.
- * However collision could affect hit rate negatively. This is resolved by
- * combining with a L2 LRU cache, which requires linear search and re-ordering
- * on access but suffers no collision. Note that, the cache will itself suffer
- * cache misses if made overly large, plus the cost of linear search in the LRU
- * cache.
- */
-#define RTREE_CTX_LG_NCACHE 4
-#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
-#define RTREE_CTX_NCACHE_L2 8
-
-/*
- * Zero initializer required for tsd initialization only. Proper initialization
- * done via rtree_ctx_data_init().
- */
-#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
-
-
-typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
-
-typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
-struct rtree_ctx_cache_elm_s {
- uintptr_t leafkey;
- rtree_leaf_elm_t *leaf;
-};
-
-typedef struct rtree_ctx_s rtree_ctx_t;
-struct rtree_ctx_s {
- /* Direct mapped cache. */
- rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
- /* L2 LRU cache. */
- rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
-};
-
-void rtree_ctx_data_init(rtree_ctx_t *ctx);
-
-#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
+#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
+#define JEMALLOC_INTERNAL_RTREE_CTX_H
+
+/*
+ * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
+ * entry supports an entire leaf, so the cache hit rate is typically high even
+ * with a small number of entries. In rare cases extent activity will straddle
+ * the boundary between two leaf nodes. Furthermore, an arena may use a
+ * combination of dss and mmap. Note that as memory usage grows past the amount
+ * that this cache can directly cover, the cache will become less effective if
+ * locality of reference is low, but the consequence is merely cache misses
+ * while traversing the tree nodes.
+ *
+ * The L1 direct mapped cache offers consistent and low cost on cache hit.
+ * However collision could affect hit rate negatively. This is resolved by
+ * combining with a L2 LRU cache, which requires linear search and re-ordering
+ * on access but suffers no collision. Note that, the cache will itself suffer
+ * cache misses if made overly large, plus the cost of linear search in the LRU
+ * cache.
+ */
+#define RTREE_CTX_LG_NCACHE 4
+#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
+#define RTREE_CTX_NCACHE_L2 8
+
+/*
+ * Zero initializer required for tsd initialization only. Proper initialization
+ * done via rtree_ctx_data_init().
+ */
+#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
+
+
+typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
+
+typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
+struct rtree_ctx_cache_elm_s {
+ uintptr_t leafkey;
+ rtree_leaf_elm_t *leaf;
+};
+
+typedef struct rtree_ctx_s rtree_ctx_t;
+struct rtree_ctx_s {
+ /* Direct mapped cache. */
+ rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
+ /* L2 LRU cache. */
+ rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
+};
+
+void rtree_ctx_data_init(rtree_ctx_t *ctx);
+
+#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/safety_check.h b/contrib/libs/jemalloc/include/jemalloc/internal/safety_check.h
index 53339ac12f..8e48a8cdad 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/safety_check.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/safety_check.h
@@ -1,26 +1,26 @@
-#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
-#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
-
-void safety_check_fail(const char *format, ...);
-/* Can set to NULL for a default. */
-void safety_check_set_abort(void (*abort_fn)());
-
-JEMALLOC_ALWAYS_INLINE void
-safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
- assert(usize < bumped_usize);
- for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
- *((unsigned char *)ptr + i) = 0xBC;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
-{
- for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
- if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) {
- safety_check_fail("Use after free error\n");
- }
- }
-}
-
-#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
+#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
+#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
+
+void safety_check_fail(const char *format, ...);
+/* Can set to NULL for a default. */
+void safety_check_set_abort(void (*abort_fn)());
+
+JEMALLOC_ALWAYS_INLINE void
+safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
+ assert(usize < bumped_usize);
+ for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
+ *((unsigned char *)ptr + i) = 0xBC;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
+{
+ for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
+ if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) {
+ safety_check_fail("Use after free error\n");
+ }
+ }
+}
+
+#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/sc.h b/contrib/libs/jemalloc/include/jemalloc/internal/sc.h
index 9a099d8b64..5a16bb8213 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/sc.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/sc.h
@@ -1,333 +1,333 @@
-#ifndef JEMALLOC_INTERNAL_SC_H
-#define JEMALLOC_INTERNAL_SC_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-
-/*
- * Size class computations:
- *
- * These are a little tricky; we'll first start by describing how things
- * generally work, and then describe some of the details.
- *
- * Ignore the first few size classes for a moment. We can then split all the
- * remaining size classes into groups. The size classes in a group are spaced
- * such that they cover allocation request sizes in a power-of-2 range. The
- * power of two is called the base of the group, and the size classes in it
- * satisfy allocations in the half-open range (base, base * 2]. There are
- * SC_NGROUP size classes in each group, equally spaced in the range, so that
- * each one covers allocations for base / SC_NGROUP possible allocation sizes.
- * We call that value (base / SC_NGROUP) the delta of the group. Each size class
- * is delta larger than the one before it (including the initial size class in a
- * group, which is delta larger than base, the largest size class in the
- * previous group).
- * To make the math all work out nicely, we require that SC_NGROUP is a power of
- * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
- * lg_base and lg_delta. For each of these groups then, we have that
- * lg_delta == lg_base - SC_LG_NGROUP.
- * The size classes in a group with a given lg_base and lg_delta (which, recall,
- * can be computed from lg_base for these groups) are therefore:
- * base + 1 * delta
- * which covers allocations in (base, base + 1 * delta]
- * base + 2 * delta
- * which covers allocations in (base + 1 * delta, base + 2 * delta].
- * base + 3 * delta
- * which covers allocations in (base + 2 * delta, base + 3 * delta].
- * ...
- * base + SC_NGROUP * delta ( == 2 * base)
- * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
- * (Note that currently SC_NGROUP is always 4, so the "..." is empty in
- * practice.)
- * Note that the last size class in the group is the next power of two (after
- * base), so that we've set up the induction correctly for the next group's
- * selection of delta.
- *
- * Now, let's start considering the first few size classes. Two extra constants
- * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
- * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
- * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
- * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
- * highest required alignment of a platform. For allocation sizes smaller than
- * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
- * platforms with types with alignment larger than their size). To allow such
- * allocations (without wasting space unnecessarily), we introduce tiny size
- * classes; one per power of two, up until we hit the quantum size. There are
- * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
- *
- * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the
- * start of a group in the sense we described above (covering a power of two
- * range) since, if we divided into it to pick a value of delta, we'd get a
- * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
- * is against the rules.
- *
- * The first base we can divide by SC_NGROUP while still being at least
- * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
- * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
- * classes are:
- * 1 * (1 << LG_QUANTUM)
- * 2 * (1 << LG_QUANTUM)
- * 3 * (1 << LG_QUANTUM)
- * ... (although, as above, this "..." is empty in practice)
- * SC_NGROUP * (1 << LG_QUANTUM).
- *
- * There are SC_NGROUP of these size classes, so we can regard it as a sort of
- * pseudo-group, even though it spans multiple powers of 2, is divided
- * differently, and both starts and ends on a power of 2 (as opposed to just
- * ending). SC_NGROUP is itself a power of two, so the first group after the
- * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
- * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
- * sizes without violating our LG_QUANTUM requirements, so we can safely set
- * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
- *
- * So, in order, the size classes are:
- *
- * Tiny size classes:
- * - Count: LG_QUANTUM - SC_LG_TINY_MIN.
- * - Sizes:
- * 1 << SC_LG_TINY_MIN
- * 1 << (SC_LG_TINY_MIN + 1)
- * 1 << (SC_LG_TINY_MIN + 2)
- * ...
- * 1 << (LG_QUANTUM - 1)
- *
- * Initial pseudo-group:
- * - Count: SC_NGROUP
- * - Sizes:
- * 1 * (1 << LG_QUANTUM)
- * 2 * (1 << LG_QUANTUM)
- * 3 * (1 << LG_QUANTUM)
- * ...
- * SC_NGROUP * (1 << LG_QUANTUM)
- *
- * Regular group 0:
- * - Count: SC_NGROUP
- * - Sizes:
- * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
- * lg_base - SC_LG_NGROUP)
- * (1 << lg_base) + 1 * (1 << lg_delta)
- * (1 << lg_base) + 2 * (1 << lg_delta)
- * (1 << lg_base) + 3 * (1 << lg_delta)
- * ...
- * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
- *
- * Regular group 1:
- * - Count: SC_NGROUP
- * - Sizes:
- * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
- * lg_base - SC_LG_NGROUP)
- * (1 << lg_base) + 1 * (1 << lg_delta)
- * (1 << lg_base) + 2 * (1 << lg_delta)
- * (1 << lg_base) + 3 * (1 << lg_delta)
- * ...
- * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
- *
- * ...
- *
- * Regular group N:
- * - Count: SC_NGROUP
- * - Sizes:
- * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
- * lg_base - SC_LG_NGROUP)
- * (1 << lg_base) + 1 * (1 << lg_delta)
- * (1 << lg_base) + 2 * (1 << lg_delta)
- * (1 << lg_base) + 3 * (1 << lg_delta)
- * ...
- * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
- *
- *
- * Representation of metadata:
- * To make the math easy, we'll mostly work in lg quantities. We record lg_base,
- * lg_delta, and ndelta (i.e. number of deltas above the base) on a
- * per-size-class basis, and maintain the invariant that, across all size
- * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
- *
- * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
- * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
- *
- * For the initial tiny size classes (if any), lg_base is lg(size class size).
- * lg_delta is lg_base for the first size class, and lg_base - 1 for all
- * subsequent ones. ndelta is always 0.
- *
- * For the pseudo-group, if there are no tiny size classes, then we set
- * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
- * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
- * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
- * indeed get a power of two that way). If there *are* tiny size classes, then
- * the first size class needs to have lg_delta relative to the largest tiny size
- * class. We therefore set lg_base == LG_QUANTUM - 1,
- * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
- * pseudo-group the same.
- *
- *
- * Other terminology:
- * "Small" size classes mean those that are allocated out of bins, which is the
- * same as those that are slab allocated.
- * "Large" size classes are those that are not small. The cutoff for counting as
- * large is page size * group size.
- */
-
-/*
- * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
- */
-#define SC_LG_NGROUP 2
-#define SC_LG_TINY_MIN 3
-
-#if SC_LG_TINY_MIN == 0
-/* The div module doesn't support division by 1, which this would require. */
-#error "Unsupported LG_TINY_MIN"
-#endif
-
-/*
- * The definitions below are all determined by the above settings and system
- * characteristics.
- */
-#define SC_NGROUP (1ULL << SC_LG_NGROUP)
-#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
-#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
-#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
-#define SC_NPSEUDO SC_NGROUP
-#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
-/*
- * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
- * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
- * size class shorter than the others).
- * We could probably save some space in arenas by capping this at LG_VADDR size.
- */
-#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
-#define SC_NREGULAR (SC_NGROUP * \
- (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
-#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
-
-/* The number of size classes that are a multiple of the page size. */
-#define SC_NPSIZES ( \
- /* Start with all the size classes. */ \
- SC_NSIZES \
- /* Subtract out those groups with too small a base. */ \
- - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
- /* And the pseudo-group. */ \
- - SC_NPSEUDO \
- /* And the tiny group. */ \
- - SC_NTINY \
- /* Sizes where ndelta*delta is not a multiple of the page size. */ \
- - (SC_LG_NGROUP * SC_NGROUP))
-/*
- * Note that the last line is computed as the sum of the second column in the
- * following table:
- * lg(base) | count of sizes to exclude
- * ------------------------------|-----------------------------
- * LG_PAGE - 1 | SC_NGROUP - 1
- * LG_PAGE | SC_NGROUP - 1
- * LG_PAGE + 1 | SC_NGROUP - 2
- * LG_PAGE + 2 | SC_NGROUP - 4
- * ... | ...
- * LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
- */
-
-/*
- * We declare a size class is binnable if size < page size * group. Or, in other
- * words, lg(size) < lg(page size) + lg(group size).
- */
-#define SC_NBINS ( \
- /* Sub-regular size classes. */ \
- SC_NTINY + SC_NPSEUDO \
- /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
- + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
- /* Last SC of the last group hits the bound exactly; exclude it. */ \
- - 1)
-
-/*
- * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
- * cannot support more than 256 small size classes.
- */
-#if (SC_NBINS > 256)
-# error "Too many small size classes"
-#endif
-
-/* The largest size class in the lookup table. */
-#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
-
-/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
-#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
-#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
-
-/* The largest size class allocated out of a slab. */
-#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
- + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
-
-/* The smallest size class not allocated out of a slab. */
-#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
-#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
-
-/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
-#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
-#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
-
-/* The largest size class supported. */
-#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
-
-typedef struct sc_s sc_t;
-struct sc_s {
- /* Size class index, or -1 if not a valid size class. */
- int index;
- /* Lg group base size (no deltas added). */
- int lg_base;
- /* Lg delta to previous size class. */
- int lg_delta;
- /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
- int ndelta;
- /*
- * True if the size class is a multiple of the page size, false
- * otherwise.
- */
- bool psz;
- /*
- * True if the size class is a small, bin, size class. False otherwise.
- */
- bool bin;
- /* The slab page count if a small bin size class, 0 otherwise. */
- int pgs;
- /* Same as lg_delta if a lookup table size class, 0 otherwise. */
- int lg_delta_lookup;
-};
-
-typedef struct sc_data_s sc_data_t;
-struct sc_data_s {
- /* Number of tiny size classes. */
- unsigned ntiny;
- /* Number of bins supported by the lookup table. */
- int nlbins;
- /* Number of small size class bins. */
- int nbins;
- /* Number of size classes. */
- int nsizes;
- /* Number of bits required to store NSIZES. */
- int lg_ceil_nsizes;
- /* Number of size classes that are a multiple of (1U << LG_PAGE). */
- unsigned npsizes;
- /* Lg of maximum tiny size class (or -1, if none). */
- int lg_tiny_maxclass;
- /* Maximum size class included in lookup table. */
- size_t lookup_maxclass;
- /* Maximum small size class. */
- size_t small_maxclass;
- /* Lg of minimum large size class. */
- int lg_large_minclass;
- /* The minimum large size class. */
- size_t large_minclass;
- /* Maximum (large) size class. */
- size_t large_maxclass;
- /* True if the sc_data_t has been initialized (for debugging only). */
- bool initialized;
-
- sc_t sc[SC_NSIZES];
-};
-
-void sc_data_init(sc_data_t *data);
-/*
- * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
- * Otherwise, does its best to accomodate the request.
- */
-void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
- int pgs);
-void sc_boot(sc_data_t *data);
-
-#endif /* JEMALLOC_INTERNAL_SC_H */
+#ifndef JEMALLOC_INTERNAL_SC_H
+#define JEMALLOC_INTERNAL_SC_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+
+/*
+ * Size class computations:
+ *
+ * These are a little tricky; we'll first start by describing how things
+ * generally work, and then describe some of the details.
+ *
+ * Ignore the first few size classes for a moment. We can then split all the
+ * remaining size classes into groups. The size classes in a group are spaced
+ * such that they cover allocation request sizes in a power-of-2 range. The
+ * power of two is called the base of the group, and the size classes in it
+ * satisfy allocations in the half-open range (base, base * 2]. There are
+ * SC_NGROUP size classes in each group, equally spaced in the range, so that
+ * each one covers allocations for base / SC_NGROUP possible allocation sizes.
+ * We call that value (base / SC_NGROUP) the delta of the group. Each size class
+ * is delta larger than the one before it (including the initial size class in a
+ * group, which is delta larger than base, the largest size class in the
+ * previous group).
+ * To make the math all work out nicely, we require that SC_NGROUP is a power of
+ * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
+ * lg_base and lg_delta. For each of these groups then, we have that
+ * lg_delta == lg_base - SC_LG_NGROUP.
+ * The size classes in a group with a given lg_base and lg_delta (which, recall,
+ * can be computed from lg_base for these groups) are therefore:
+ * base + 1 * delta
+ * which covers allocations in (base, base + 1 * delta]
+ * base + 2 * delta
+ * which covers allocations in (base + 1 * delta, base + 2 * delta].
+ * base + 3 * delta
+ * which covers allocations in (base + 2 * delta, base + 3 * delta].
+ * ...
+ * base + SC_NGROUP * delta ( == 2 * base)
+ * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
+ * (Note that currently SC_NGROUP is always 4, so the "..." is empty in
+ * practice.)
+ * Note that the last size class in the group is the next power of two (after
+ * base), so that we've set up the induction correctly for the next group's
+ * selection of delta.
+ *
+ * Now, let's start considering the first few size classes. Two extra constants
+ * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
+ * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
+ * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
+ * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
+ * highest required alignment of a platform. For allocation sizes smaller than
+ * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
+ * platforms with types with alignment larger than their size). To allow such
+ * allocations (without wasting space unnecessarily), we introduce tiny size
+ * classes; one per power of two, up until we hit the quantum size. There are
+ * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
+ *
+ * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the
+ * start of a group in the sense we described above (covering a power of two
+ * range) since, if we divided into it to pick a value of delta, we'd get a
+ * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
+ * is against the rules.
+ *
+ * The first base we can divide by SC_NGROUP while still being at least
+ * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
+ * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
+ * classes are:
+ * 1 * (1 << LG_QUANTUM)
+ * 2 * (1 << LG_QUANTUM)
+ * 3 * (1 << LG_QUANTUM)
+ * ... (although, as above, this "..." is empty in practice)
+ * SC_NGROUP * (1 << LG_QUANTUM).
+ *
+ * There are SC_NGROUP of these size classes, so we can regard it as a sort of
+ * pseudo-group, even though it spans multiple powers of 2, is divided
+ * differently, and both starts and ends on a power of 2 (as opposed to just
+ * ending). SC_NGROUP is itself a power of two, so the first group after the
+ * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
+ * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
+ * sizes without violating our LG_QUANTUM requirements, so we can safely set
+ * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
+ *
+ * So, in order, the size classes are:
+ *
+ * Tiny size classes:
+ * - Count: LG_QUANTUM - SC_LG_TINY_MIN.
+ * - Sizes:
+ * 1 << SC_LG_TINY_MIN
+ * 1 << (SC_LG_TINY_MIN + 1)
+ * 1 << (SC_LG_TINY_MIN + 2)
+ * ...
+ * 1 << (LG_QUANTUM - 1)
+ *
+ * Initial pseudo-group:
+ * - Count: SC_NGROUP
+ * - Sizes:
+ * 1 * (1 << LG_QUANTUM)
+ * 2 * (1 << LG_QUANTUM)
+ * 3 * (1 << LG_QUANTUM)
+ * ...
+ * SC_NGROUP * (1 << LG_QUANTUM)
+ *
+ * Regular group 0:
+ * - Count: SC_NGROUP
+ * - Sizes:
+ * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
+ * lg_base - SC_LG_NGROUP)
+ * (1 << lg_base) + 1 * (1 << lg_delta)
+ * (1 << lg_base) + 2 * (1 << lg_delta)
+ * (1 << lg_base) + 3 * (1 << lg_delta)
+ * ...
+ * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
+ *
+ * Regular group 1:
+ * - Count: SC_NGROUP
+ * - Sizes:
+ * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
+ * lg_base - SC_LG_NGROUP)
+ * (1 << lg_base) + 1 * (1 << lg_delta)
+ * (1 << lg_base) + 2 * (1 << lg_delta)
+ * (1 << lg_base) + 3 * (1 << lg_delta)
+ * ...
+ * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
+ *
+ * ...
+ *
+ * Regular group N:
+ * - Count: SC_NGROUP
+ * - Sizes:
+ * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
+ * lg_base - SC_LG_NGROUP)
+ * (1 << lg_base) + 1 * (1 << lg_delta)
+ * (1 << lg_base) + 2 * (1 << lg_delta)
+ * (1 << lg_base) + 3 * (1 << lg_delta)
+ * ...
+ * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
+ *
+ *
+ * Representation of metadata:
+ * To make the math easy, we'll mostly work in lg quantities. We record lg_base,
+ * lg_delta, and ndelta (i.e. number of deltas above the base) on a
+ * per-size-class basis, and maintain the invariant that, across all size
+ * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
+ *
+ * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
+ * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
+ *
+ * For the initial tiny size classes (if any), lg_base is lg(size class size).
+ * lg_delta is lg_base for the first size class, and lg_base - 1 for all
+ * subsequent ones. ndelta is always 0.
+ *
+ * For the pseudo-group, if there are no tiny size classes, then we set
+ * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
+ * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
+ * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
+ * indeed get a power of two that way). If there *are* tiny size classes, then
+ * the first size class needs to have lg_delta relative to the largest tiny size
+ * class. We therefore set lg_base == LG_QUANTUM - 1,
+ * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
+ * pseudo-group the same.
+ *
+ *
+ * Other terminology:
+ * "Small" size classes mean those that are allocated out of bins, which is the
+ * same as those that are slab allocated.
+ * "Large" size classes are those that are not small. The cutoff for counting as
+ * large is page size * group size.
+ */
+
+/*
+ * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
+ */
+#define SC_LG_NGROUP 2
+#define SC_LG_TINY_MIN 3
+
+#if SC_LG_TINY_MIN == 0
+/* The div module doesn't support division by 1, which this would require. */
+#error "Unsupported LG_TINY_MIN"
+#endif
+
+/*
+ * The definitions below are all determined by the above settings and system
+ * characteristics.
+ */
+#define SC_NGROUP (1ULL << SC_LG_NGROUP)
+#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
+#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
+#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
+#define SC_NPSEUDO SC_NGROUP
+#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
+/*
+ * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
+ * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
+ * size class shorter than the others).
+ * We could probably save some space in arenas by capping this at LG_VADDR size.
+ */
+#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
+#define SC_NREGULAR (SC_NGROUP * \
+ (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
+#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
+
+/* The number of size classes that are a multiple of the page size. */
+#define SC_NPSIZES ( \
+ /* Start with all the size classes. */ \
+ SC_NSIZES \
+ /* Subtract out those groups with too small a base. */ \
+ - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
+ /* And the pseudo-group. */ \
+ - SC_NPSEUDO \
+ /* And the tiny group. */ \
+ - SC_NTINY \
+ /* Sizes where ndelta*delta is not a multiple of the page size. */ \
+ - (SC_LG_NGROUP * SC_NGROUP))
+/*
+ * Note that the last line is computed as the sum of the second column in the
+ * following table:
+ * lg(base) | count of sizes to exclude
+ * ------------------------------|-----------------------------
+ * LG_PAGE - 1 | SC_NGROUP - 1
+ * LG_PAGE | SC_NGROUP - 1
+ * LG_PAGE + 1 | SC_NGROUP - 2
+ * LG_PAGE + 2 | SC_NGROUP - 4
+ * ... | ...
+ * LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
+ */
+
+/*
+ * We declare a size class is binnable if size < page size * group. Or, in other
+ * words, lg(size) < lg(page size) + lg(group size).
+ */
+#define SC_NBINS ( \
+ /* Sub-regular size classes. */ \
+ SC_NTINY + SC_NPSEUDO \
+ /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
+ + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
+ /* Last SC of the last group hits the bound exactly; exclude it. */ \
+ - 1)
+
+/*
+ * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
+ * cannot support more than 256 small size classes.
+ */
+#if (SC_NBINS > 256)
+# error "Too many small size classes"
+#endif
+
+/* The largest size class in the lookup table. */
+#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
+
+/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
+#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
+#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
+
+/* The largest size class allocated out of a slab. */
+#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
+
+/* The smallest size class not allocated out of a slab. */
+#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
+#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
+
+/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
+#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
+#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
+
+/* The largest size class supported. */
+#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
+
+typedef struct sc_s sc_t;
+struct sc_s {
+ /* Size class index, or -1 if not a valid size class. */
+ int index;
+ /* Lg group base size (no deltas added). */
+ int lg_base;
+ /* Lg delta to previous size class. */
+ int lg_delta;
+ /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
+ int ndelta;
+ /*
+ * True if the size class is a multiple of the page size, false
+ * otherwise.
+ */
+ bool psz;
+ /*
+ * True if the size class is a small, bin, size class. False otherwise.
+ */
+ bool bin;
+ /* The slab page count if a small bin size class, 0 otherwise. */
+ int pgs;
+ /* Same as lg_delta if a lookup table size class, 0 otherwise. */
+ int lg_delta_lookup;
+};
+
+typedef struct sc_data_s sc_data_t;
+struct sc_data_s {
+ /* Number of tiny size classes. */
+ unsigned ntiny;
+ /* Number of bins supported by the lookup table. */
+ int nlbins;
+ /* Number of small size class bins. */
+ int nbins;
+ /* Number of size classes. */
+ int nsizes;
+ /* Number of bits required to store NSIZES. */
+ int lg_ceil_nsizes;
+ /* Number of size classes that are a multiple of (1U << LG_PAGE). */
+ unsigned npsizes;
+ /* Lg of maximum tiny size class (or -1, if none). */
+ int lg_tiny_maxclass;
+ /* Maximum size class included in lookup table. */
+ size_t lookup_maxclass;
+ /* Maximum small size class. */
+ size_t small_maxclass;
+ /* Lg of minimum large size class. */
+ int lg_large_minclass;
+ /* The minimum large size class. */
+ size_t large_minclass;
+ /* Maximum (large) size class. */
+ size_t large_maxclass;
+ /* True if the sc_data_t has been initialized (for debugging only). */
+ bool initialized;
+
+ sc_t sc[SC_NSIZES];
+};
+
+void sc_data_init(sc_data_t *data);
+/*
+ * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
+ * Otherwise, does its best to accomodate the request.
+ */
+void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
+ int pgs);
+void sc_boot(sc_data_t *data);
+
+#endif /* JEMALLOC_INTERNAL_SC_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/seq.h b/contrib/libs/jemalloc/include/jemalloc/internal/seq.h
index ef2df4c6ee..d6cba8bb88 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/seq.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/seq.h
@@ -1,55 +1,55 @@
-#ifndef JEMALLOC_INTERNAL_SEQ_H
-#define JEMALLOC_INTERNAL_SEQ_H
-
-#include "jemalloc/internal/atomic.h"
-
-/*
- * A simple seqlock implementation.
- */
-
-#define seq_define(type, short_type) \
-typedef struct { \
- atomic_zu_t seq; \
- atomic_zu_t data[ \
- (sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \
-} seq_##short_type##_t; \
- \
-/* \
- * No internal synchronization -- the caller must ensure that there's \
- * only a single writer at a time. \
- */ \
-static inline void \
-seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \
- size_t buf[sizeof(dst->data) / sizeof(size_t)]; \
- buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \
- memcpy(buf, src, sizeof(type)); \
- size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \
- atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \
- atomic_fence(ATOMIC_RELEASE); \
- for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
- atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \
- } \
- atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \
-} \
- \
-/* Returns whether or not the read was consistent. */ \
-static inline bool \
-seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \
- size_t buf[sizeof(src->data) / sizeof(size_t)]; \
- size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \
- if (seq1 % 2 != 0) { \
- return false; \
- } \
- for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
- buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \
- } \
- atomic_fence(ATOMIC_ACQUIRE); \
- size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \
- if (seq1 != seq2) { \
- return false; \
- } \
- memcpy(dst, buf, sizeof(type)); \
- return true; \
-}
-
-#endif /* JEMALLOC_INTERNAL_SEQ_H */
+#ifndef JEMALLOC_INTERNAL_SEQ_H
+#define JEMALLOC_INTERNAL_SEQ_H
+
+#include "jemalloc/internal/atomic.h"
+
+/*
+ * A simple seqlock implementation.
+ */
+
+#define seq_define(type, short_type) \
+typedef struct { \
+ atomic_zu_t seq; \
+ atomic_zu_t data[ \
+ (sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \
+} seq_##short_type##_t; \
+ \
+/* \
+ * No internal synchronization -- the caller must ensure that there's \
+ * only a single writer at a time. \
+ */ \
+static inline void \
+seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \
+ size_t buf[sizeof(dst->data) / sizeof(size_t)]; \
+ buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \
+ memcpy(buf, src, sizeof(type)); \
+ size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \
+ atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \
+ atomic_fence(ATOMIC_RELEASE); \
+ for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
+ atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \
+ } \
+ atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \
+} \
+ \
+/* Returns whether or not the read was consistent. */ \
+static inline bool \
+seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \
+ size_t buf[sizeof(src->data) / sizeof(size_t)]; \
+ size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \
+ if (seq1 % 2 != 0) { \
+ return false; \
+ } \
+ for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
+ buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \
+ } \
+ atomic_fence(ATOMIC_ACQUIRE); \
+ size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \
+ if (seq1 != seq2) { \
+ return false; \
+ } \
+ memcpy(dst, buf, sizeof(type)); \
+ return true; \
+}
+
+#endif /* JEMALLOC_INTERNAL_SEQ_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/smoothstep.h b/contrib/libs/jemalloc/include/jemalloc/internal/smoothstep.h
index 2e14430f5f..718f3c48a6 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/smoothstep.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/smoothstep.h
@@ -1,232 +1,232 @@
-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
-
-/*
- * This file was generated by the following command:
- * sh smoothstep.sh smoother 200 24 3 15
- */
-/******************************************************************************/
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "smoother"
-#define SMOOTHSTEP_NSTEPS 200
-#define SMOOTHSTEP_BFP 24
-#define SMOOTHSTEP \
- /* STEP(step, h, x, y) */ \
- STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
- STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
- STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
- STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
- STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
- STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
- STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
- STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
- STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
- STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
- STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
- STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
- STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
- STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
- STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
- STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
- STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
- STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
- STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
- STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
- STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
- STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
- STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
- STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
- STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
- STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
- STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
- STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
- STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
- STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
- STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
- STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
- STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
- STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
- STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
- STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
- STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
- STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
- STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
- STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
- STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
- STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
- STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
- STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
- STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
- STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
- STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
- STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
- STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
- STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
- STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
- STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
- STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
- STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
- STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
- STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
- STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
- STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
- STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
- STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
- STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
- STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
- STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
- STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
- STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
- STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
- STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
- STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
- STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
- STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
- STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
- STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
- STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
- STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
- STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
- STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
- STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
- STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
- STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
- STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
- STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
- STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
- STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
- STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
- STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
- STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
- STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
- STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
- STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
- STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
- STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
- STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
- STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
- STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
- STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
- STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
- STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
- STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
- STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
- STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
- STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
- STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
- STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
- STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
- STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
- STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
- STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
- STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
- STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
- STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
- STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
- STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
- STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
- STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
- STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
- STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
- STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
- STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
- STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
- STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
- STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
- STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
- STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
- STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
- STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
- STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
- STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
- STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
- STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
- STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
- STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
- STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
- STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
- STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
- STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
- STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
- STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
- STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
- STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
- STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
- STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
- STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
- STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
- STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
- STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
- STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
- STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
- STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
- STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
- STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
- STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
- STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
- STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
- STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
- STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
- STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
- STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
- STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
- STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
- STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
- STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
- STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
- STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
- STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
- STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
- STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
- STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
- STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
- STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
- STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
- STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
- STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
- STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
- STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
- STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
- STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
- STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
- STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
- STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
- STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
- STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
- STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
- STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
- STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
- STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
- STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
- STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
- STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
- STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
- STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
- STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
- STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
- STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
- STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
- STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
- STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
- STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
- STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
- STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
- STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
-
-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
+#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
+#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
+
+/*
+ * This file was generated by the following command:
+ * sh smoothstep.sh smoother 200 24 3 15
+ */
+/******************************************************************************/
+
+/*
+ * This header defines a precomputed table based on the smoothstep family of
+ * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
+ * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
+ * that floating point math can be avoided.
+ *
+ * 3 2
+ * smoothstep(x) = -2x + 3x
+ *
+ * 5 4 3
+ * smootherstep(x) = 6x - 15x + 10x
+ *
+ * 7 6 5 4
+ * smootheststep(x) = -20x + 70x - 84x + 35x
+ */
+
+#define SMOOTHSTEP_VARIANT "smoother"
+#define SMOOTHSTEP_NSTEPS 200
+#define SMOOTHSTEP_BFP 24
+#define SMOOTHSTEP \
+ /* STEP(step, h, x, y) */ \
+ STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
+ STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
+ STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
+ STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
+ STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
+ STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
+ STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
+ STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
+ STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
+ STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
+ STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
+ STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
+ STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
+ STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
+ STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
+ STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
+ STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
+ STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
+ STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
+ STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
+ STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
+ STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
+ STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
+ STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
+ STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
+ STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
+ STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
+ STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
+ STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
+ STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
+ STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
+ STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
+ STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
+ STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
+ STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
+ STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
+ STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
+ STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
+ STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
+ STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
+ STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
+ STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
+ STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
+ STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
+ STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
+ STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
+ STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
+ STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
+ STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
+ STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
+ STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
+ STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
+ STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
+ STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
+ STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
+ STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
+ STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
+ STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
+ STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
+ STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
+ STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
+ STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
+ STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
+ STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
+ STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
+ STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
+ STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
+ STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
+ STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
+ STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
+ STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
+ STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
+ STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
+ STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
+ STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
+ STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
+ STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
+ STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
+ STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
+ STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
+ STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
+ STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
+ STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
+ STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
+ STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
+ STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
+ STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
+ STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
+ STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
+ STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
+ STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
+ STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
+ STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
+ STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
+ STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
+ STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
+ STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
+ STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
+ STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
+ STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
+ STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
+ STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
+ STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
+ STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
+ STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
+ STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
+ STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
+ STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
+ STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
+ STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
+ STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
+ STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
+ STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
+ STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
+ STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
+ STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
+ STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
+ STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
+ STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
+ STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
+ STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
+ STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
+ STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
+ STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
+ STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
+ STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
+ STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
+ STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
+ STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
+ STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
+ STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
+ STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
+ STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
+ STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
+ STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
+ STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
+ STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
+ STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
+ STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
+ STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
+ STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
+ STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
+ STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
+ STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
+ STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
+ STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
+ STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
+ STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
+ STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
+ STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
+ STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
+ STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
+ STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
+ STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
+ STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
+ STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
+ STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
+ STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
+ STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
+ STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
+ STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
+ STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
+ STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
+ STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
+ STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
+ STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
+ STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
+ STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
+ STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
+ STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
+ STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
+ STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
+ STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
+ STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
+ STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
+ STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
+ STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
+ STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
+ STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
+ STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
+ STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
+ STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
+ STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
+ STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
+ STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
+ STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
+ STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
+ STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
+ STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
+ STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
+ STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
+ STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
+ STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
+ STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
+ STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
+ STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
+ STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
+ STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
+ STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
+ STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
+
+#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/spin.h b/contrib/libs/jemalloc/include/jemalloc/internal/spin.h
index 22804c687f..3d99e7dd99 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/spin.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/spin.h
@@ -1,40 +1,40 @@
-#ifndef JEMALLOC_INTERNAL_SPIN_H
-#define JEMALLOC_INTERNAL_SPIN_H
-
-#define SPIN_INITIALIZER {0U}
-
-typedef struct {
- unsigned iteration;
-} spin_t;
-
-static inline void
-spin_cpu_spinwait() {
-# if HAVE_CPU_SPINWAIT
- CPU_SPINWAIT;
-# else
- volatile int x = 0;
- x = x;
-# endif
-}
-
-static inline void
-spin_adaptive(spin_t *spin) {
- volatile uint32_t i;
-
- if (spin->iteration < 5) {
- for (i = 0; i < (1U << spin->iteration); i++) {
- spin_cpu_spinwait();
- }
- spin->iteration++;
- } else {
-#ifdef _WIN32
- SwitchToThread();
-#else
- sched_yield();
-#endif
- }
-}
-
-#undef SPIN_INLINE
-
-#endif /* JEMALLOC_INTERNAL_SPIN_H */
+#ifndef JEMALLOC_INTERNAL_SPIN_H
+#define JEMALLOC_INTERNAL_SPIN_H
+
+#define SPIN_INITIALIZER {0U}
+
+typedef struct {
+ unsigned iteration;
+} spin_t;
+
+static inline void
+spin_cpu_spinwait() {
+# if HAVE_CPU_SPINWAIT
+ CPU_SPINWAIT;
+# else
+ volatile int x = 0;
+ x = x;
+# endif
+}
+
+static inline void
+spin_adaptive(spin_t *spin) {
+ volatile uint32_t i;
+
+ if (spin->iteration < 5) {
+ for (i = 0; i < (1U << spin->iteration); i++) {
+ spin_cpu_spinwait();
+ }
+ spin->iteration++;
+ } else {
+#ifdef _WIN32
+ SwitchToThread();
+#else
+ sched_yield();
+#endif
+ }
+}
+
+#undef SPIN_INLINE
+
+#endif /* JEMALLOC_INTERNAL_SPIN_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/stats.h b/contrib/libs/jemalloc/include/jemalloc/internal/stats.h
index 3b9e0eac12..c31b36ecab 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/stats.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/stats.h
@@ -1,31 +1,31 @@
-#ifndef JEMALLOC_INTERNAL_STATS_H
-#define JEMALLOC_INTERNAL_STATS_H
+#ifndef JEMALLOC_INTERNAL_STATS_H
+#define JEMALLOC_INTERNAL_STATS_H
-/* OPTION(opt, var_name, default, set_value_to) */
-#define STATS_PRINT_OPTIONS \
- OPTION('J', json, false, true) \
- OPTION('g', general, true, false) \
- OPTION('m', merged, config_stats, false) \
- OPTION('d', destroyed, config_stats, false) \
- OPTION('a', unmerged, config_stats, false) \
- OPTION('b', bins, true, false) \
- OPTION('l', large, true, false) \
- OPTION('x', mutex, true, false) \
- OPTION('e', extents, true, false)
+/* OPTION(opt, var_name, default, set_value_to) */
+#define STATS_PRINT_OPTIONS \
+ OPTION('J', json, false, true) \
+ OPTION('g', general, true, false) \
+ OPTION('m', merged, config_stats, false) \
+ OPTION('d', destroyed, config_stats, false) \
+ OPTION('a', unmerged, config_stats, false) \
+ OPTION('b', bins, true, false) \
+ OPTION('l', large, true, false) \
+ OPTION('x', mutex, true, false) \
+ OPTION('e', extents, true, false)
-enum {
-#define OPTION(o, v, d, s) stats_print_option_num_##v,
- STATS_PRINT_OPTIONS
-#undef OPTION
- stats_print_tot_num_options
+enum {
+#define OPTION(o, v, d, s) stats_print_option_num_##v,
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ stats_print_tot_num_options
};
-/* Options for stats_print. */
-extern bool opt_stats_print;
-extern char opt_stats_print_opts[stats_print_tot_num_options+1];
+/* Options for stats_print. */
+extern bool opt_stats_print;
+extern char opt_stats_print_opts[stats_print_tot_num_options+1];
-/* Implements je_malloc_stats_print. */
-void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+/* Implements je_malloc_stats_print. */
+void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts);
-#endif /* JEMALLOC_INTERNAL_STATS_H */
+#endif /* JEMALLOC_INTERNAL_STATS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/sz.h b/contrib/libs/jemalloc/include/jemalloc/internal/sz.h
index 68e558abfe..4b770a83a2 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/sz.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/sz.h
@@ -1,318 +1,318 @@
-#ifndef JEMALLOC_INTERNAL_SIZE_H
-#define JEMALLOC_INTERNAL_SIZE_H
-
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/util.h"
-
-/*
- * sz module: Size computations.
- *
- * Some abbreviations used here:
- * p: Page
- * ind: Index
- * s, sz: Size
- * u: Usable size
- * a: Aligned
- *
- * These are not always used completely consistently, but should be enough to
- * interpret function names. E.g. sz_psz2ind converts page size to page size
- * index; sz_sa2u converts a (size, alignment) allocation request to the usable
- * size that would result from such an allocation.
- */
-
-/*
- * sz_pind2sz_tab encodes the same information as could be computed by
- * sz_pind2sz_compute().
- */
-extern size_t sz_pind2sz_tab[SC_NPSIZES + 1];
-/*
- * sz_index2size_tab encodes the same information as could be computed (at
- * unacceptable cost in some code paths) by sz_index2size_compute().
- */
-extern size_t sz_index2size_tab[SC_NSIZES];
-/*
- * sz_size2index_tab is a compact lookup table that rounds request sizes up to
- * size classes. In order to reduce cache footprint, the table is compressed,
- * and all accesses are via sz_size2index().
- */
-extern uint8_t sz_size2index_tab[];
-
-static const size_t sz_large_pad =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- PAGE
-#else
- 0
-#endif
- ;
-
-extern void sz_boot(const sc_data_t *sc_data);
-
-JEMALLOC_ALWAYS_INLINE pszind_t
-sz_psz2ind(size_t psz) {
- if (unlikely(psz > SC_LARGE_MAXCLASS)) {
- return SC_NPSIZES;
- }
- pszind_t x = lg_floor((psz<<1)-1);
- pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
- 0 : x - (SC_LG_NGROUP + LG_PAGE);
- pszind_t grp = shift << SC_LG_NGROUP;
-
- pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
- LG_PAGE : x - SC_LG_NGROUP - 1;
-
- size_t delta_inverse_mask = ZU(-1) << lg_delta;
- pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << SC_LG_NGROUP) - 1);
-
- pszind_t ind = grp + mod;
- return ind;
-}
-
-static inline size_t
-sz_pind2sz_compute(pszind_t pind) {
- if (unlikely(pind == SC_NPSIZES)) {
- return SC_LARGE_MAXCLASS + PAGE;
- }
- size_t grp = pind >> SC_LG_NGROUP;
- size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
- & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_PAGE-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t sz = grp_size + mod_size;
- return sz;
-}
-
-static inline size_t
-sz_pind2sz_lookup(pszind_t pind) {
- size_t ret = (size_t)sz_pind2sz_tab[pind];
- assert(ret == sz_pind2sz_compute(pind));
- return ret;
-}
-
-static inline size_t
-sz_pind2sz(pszind_t pind) {
- assert(pind < SC_NPSIZES + 1);
- return sz_pind2sz_lookup(pind);
-}
-
-static inline size_t
-sz_psz2u(size_t psz) {
- if (unlikely(psz > SC_LARGE_MAXCLASS)) {
- return SC_LARGE_MAXCLASS + PAGE;
- }
- size_t x = lg_floor((psz<<1)-1);
- size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
- LG_PAGE : x - SC_LG_NGROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (psz + delta_mask) & ~delta_mask;
- return usize;
-}
-
-static inline szind_t
-sz_size2index_compute(size_t size) {
- if (unlikely(size > SC_LARGE_MAXCLASS)) {
- return SC_NSIZES;
- }
-
- if (size == 0) {
- return 0;
- }
-#if (SC_NTINY != 0)
- if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
- szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
- szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
- }
-#endif
- {
- szind_t x = lg_floor((size<<1)-1);
- szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
- x - (SC_LG_NGROUP + LG_QUANTUM);
- szind_t grp = shift << SC_LG_NGROUP;
-
- szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
-
- size_t delta_inverse_mask = ZU(-1) << lg_delta;
- szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << SC_LG_NGROUP) - 1);
-
- szind_t index = SC_NTINY + grp + mod;
- return index;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-sz_size2index_lookup(size_t size) {
- assert(size <= SC_LOOKUP_MAXCLASS);
- szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
- >> SC_LG_TINY_MIN]);
- assert(ret == sz_size2index_compute(size));
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-sz_size2index(size_t size) {
- if (likely(size <= SC_LOOKUP_MAXCLASS)) {
- return sz_size2index_lookup(size);
- }
- return sz_size2index_compute(size);
-}
-
-static inline size_t
-sz_index2size_compute(szind_t index) {
-#if (SC_NTINY > 0)
- if (index < SC_NTINY) {
- return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index));
- }
-#endif
- {
- size_t reduced_index = index - SC_NTINY;
- size_t grp = reduced_index >> SC_LG_NGROUP;
- size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
- 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_QUANTUM +
- (SC_LG_NGROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_QUANTUM-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t usize = grp_size + mod_size;
- return usize;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_index2size_lookup(szind_t index) {
- size_t ret = (size_t)sz_index2size_tab[index];
- assert(ret == sz_index2size_compute(index));
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_index2size(szind_t index) {
- assert(index < SC_NSIZES);
- return sz_index2size_lookup(index);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_s2u_compute(size_t size) {
- if (unlikely(size > SC_LARGE_MAXCLASS)) {
- return 0;
- }
-
- if (size == 0) {
- size++;
- }
-#if (SC_NTINY > 0)
- if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
- size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
- size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
- (ZU(1) << lg_ceil));
- }
-#endif
- {
- size_t x = lg_floor((size<<1)-1);
- size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (size + delta_mask) & ~delta_mask;
- return usize;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_s2u_lookup(size_t size) {
- size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
-
- assert(ret == sz_s2u_compute(size));
- return ret;
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sz_s2u(size_t size) {
- if (likely(size <= SC_LOOKUP_MAXCLASS)) {
- return sz_s2u_lookup(size);
- }
- return sz_s2u_compute(size);
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size and alignment.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sz_sa2u(size_t size, size_t alignment) {
- size_t usize;
-
- assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
-
- /* Try for a small size class. */
- if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
- /*
- * Round size up to the nearest multiple of alignment.
- *
- * This done, we can take advantage of the fact that for each
- * small size class, every object is aligned at the smallest
- * power of two that is non-zero in the base two representation
- * of the size. For example:
- *
- * Size | Base 2 | Minimum alignment
- * -----+----------+------------------
- * 96 | 1100000 | 32
- * 144 | 10100000 | 32
- * 192 | 11000000 | 64
- */
- usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
- if (usize < SC_LARGE_MINCLASS) {
- return usize;
- }
- }
-
- /* Large size class. Beware of overflow. */
-
- if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
- return 0;
- }
-
- /* Make sure result is a large size class. */
- if (size <= SC_LARGE_MINCLASS) {
- usize = SC_LARGE_MINCLASS;
- } else {
- usize = sz_s2u(size);
- if (usize < size) {
- /* size_t overflow. */
- return 0;
- }
- }
-
- /*
- * Calculate the multi-page mapping that large_palloc() would need in
- * order to guarantee the alignment.
- */
- if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
- /* size_t overflow. */
- return 0;
- }
- return usize;
-}
-
-#endif /* JEMALLOC_INTERNAL_SIZE_H */
+#ifndef JEMALLOC_INTERNAL_SIZE_H
+#define JEMALLOC_INTERNAL_SIZE_H
+
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/util.h"
+
+/*
+ * sz module: Size computations.
+ *
+ * Some abbreviations used here:
+ * p: Page
+ * ind: Index
+ * s, sz: Size
+ * u: Usable size
+ * a: Aligned
+ *
+ * These are not always used completely consistently, but should be enough to
+ * interpret function names. E.g. sz_psz2ind converts page size to page size
+ * index; sz_sa2u converts a (size, alignment) allocation request to the usable
+ * size that would result from such an allocation.
+ */
+
+/*
+ * sz_pind2sz_tab encodes the same information as could be computed by
+ * sz_pind2sz_compute().
+ */
+extern size_t sz_pind2sz_tab[SC_NPSIZES + 1];
+/*
+ * sz_index2size_tab encodes the same information as could be computed (at
+ * unacceptable cost in some code paths) by sz_index2size_compute().
+ */
+extern size_t sz_index2size_tab[SC_NSIZES];
+/*
+ * sz_size2index_tab is a compact lookup table that rounds request sizes up to
+ * size classes. In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via sz_size2index().
+ */
+extern uint8_t sz_size2index_tab[];
+
+static const size_t sz_large_pad =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ PAGE
+#else
+ 0
+#endif
+ ;
+
+extern void sz_boot(const sc_data_t *sc_data);
+
+JEMALLOC_ALWAYS_INLINE pszind_t
+sz_psz2ind(size_t psz) {
+ if (unlikely(psz > SC_LARGE_MAXCLASS)) {
+ return SC_NPSIZES;
+ }
+ pszind_t x = lg_floor((psz<<1)-1);
+ pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
+ 0 : x - (SC_LG_NGROUP + LG_PAGE);
+ pszind_t grp = shift << SC_LG_NGROUP;
+
+ pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - SC_LG_NGROUP - 1;
+
+ size_t delta_inverse_mask = ZU(-1) << lg_delta;
+ pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << SC_LG_NGROUP) - 1);
+
+ pszind_t ind = grp + mod;
+ return ind;
+}
+
+static inline size_t
+sz_pind2sz_compute(pszind_t pind) {
+ if (unlikely(pind == SC_NPSIZES)) {
+ return SC_LARGE_MAXCLASS + PAGE;
+ }
+ size_t grp = pind >> SC_LG_NGROUP;
+ size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
+ & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_PAGE-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t sz = grp_size + mod_size;
+ return sz;
+}
+
+static inline size_t
+sz_pind2sz_lookup(pszind_t pind) {
+ size_t ret = (size_t)sz_pind2sz_tab[pind];
+ assert(ret == sz_pind2sz_compute(pind));
+ return ret;
+}
+
+static inline size_t
+sz_pind2sz(pszind_t pind) {
+ assert(pind < SC_NPSIZES + 1);
+ return sz_pind2sz_lookup(pind);
+}
+
+static inline size_t
+sz_psz2u(size_t psz) {
+ if (unlikely(psz > SC_LARGE_MAXCLASS)) {
+ return SC_LARGE_MAXCLASS + PAGE;
+ }
+ size_t x = lg_floor((psz<<1)-1);
+ size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - SC_LG_NGROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (psz + delta_mask) & ~delta_mask;
+ return usize;
+}
+
+static inline szind_t
+sz_size2index_compute(size_t size) {
+ if (unlikely(size > SC_LARGE_MAXCLASS)) {
+ return SC_NSIZES;
+ }
+
+ if (size == 0) {
+ return 0;
+ }
+#if (SC_NTINY != 0)
+ if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
+ szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
+ szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
+ }
+#endif
+ {
+ szind_t x = lg_floor((size<<1)-1);
+ szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
+ x - (SC_LG_NGROUP + LG_QUANTUM);
+ szind_t grp = shift << SC_LG_NGROUP;
+
+ szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
+
+ size_t delta_inverse_mask = ZU(-1) << lg_delta;
+ szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << SC_LG_NGROUP) - 1);
+
+ szind_t index = SC_NTINY + grp + mod;
+ return index;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index_lookup(size_t size) {
+ assert(size <= SC_LOOKUP_MAXCLASS);
+ szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
+ >> SC_LG_TINY_MIN]);
+ assert(ret == sz_size2index_compute(size));
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index(size_t size) {
+ if (likely(size <= SC_LOOKUP_MAXCLASS)) {
+ return sz_size2index_lookup(size);
+ }
+ return sz_size2index_compute(size);
+}
+
+static inline size_t
+sz_index2size_compute(szind_t index) {
+#if (SC_NTINY > 0)
+ if (index < SC_NTINY) {
+ return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index));
+ }
+#endif
+ {
+ size_t reduced_index = index - SC_NTINY;
+ size_t grp = reduced_index >> SC_LG_NGROUP;
+ size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
+ 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_QUANTUM +
+ (SC_LG_NGROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_QUANTUM-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t usize = grp_size + mod_size;
+ return usize;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size_lookup(szind_t index) {
+ size_t ret = (size_t)sz_index2size_tab[index];
+ assert(ret == sz_index2size_compute(index));
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size(szind_t index) {
+ assert(index < SC_NSIZES);
+ return sz_index2size_lookup(index);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u_compute(size_t size) {
+ if (unlikely(size > SC_LARGE_MAXCLASS)) {
+ return 0;
+ }
+
+ if (size == 0) {
+ size++;
+ }
+#if (SC_NTINY > 0)
+ if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
+ size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
+ (ZU(1) << lg_ceil));
+ }
+#endif
+ {
+ size_t x = lg_floor((size<<1)-1);
+ size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (size + delta_mask) & ~delta_mask;
+ return usize;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u_lookup(size_t size) {
+ size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
+
+ assert(ret == sz_s2u_compute(size));
+ return ret;
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u(size_t size) {
+ if (likely(size <= SC_LOOKUP_MAXCLASS)) {
+ return sz_s2u_lookup(size);
+ }
+ return sz_s2u_compute(size);
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size and alignment.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sz_sa2u(size_t size, size_t alignment) {
+ size_t usize;
+
+ assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
+
+ /* Try for a small size class. */
+ if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each
+ * small size class, every object is aligned at the smallest
+ * power of two that is non-zero in the base two representation
+ * of the size. For example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ */
+ usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
+ if (usize < SC_LARGE_MINCLASS) {
+ return usize;
+ }
+ }
+
+ /* Large size class. Beware of overflow. */
+
+ if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
+ return 0;
+ }
+
+ /* Make sure result is a large size class. */
+ if (size <= SC_LARGE_MINCLASS) {
+ usize = SC_LARGE_MINCLASS;
+ } else {
+ usize = sz_s2u(size);
+ if (usize < size) {
+ /* size_t overflow. */
+ return 0;
+ }
+ }
+
+ /*
+ * Calculate the multi-page mapping that large_palloc() would need in
+ * order to guarantee the alignment.
+ */
+ if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
+ /* size_t overflow. */
+ return 0;
+ }
+ return usize;
+}
+
+#endif /* JEMALLOC_INTERNAL_SIZE_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_externs.h b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_externs.h
index d63eafde8c..330776b6d5 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_externs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_externs.h
@@ -1,53 +1,53 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-
-extern bool opt_tcache;
-extern ssize_t opt_lg_tcache_max;
-
-extern cache_bin_info_t *tcache_bin_info;
-
-/*
- * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
- * large-object bins.
- */
-extern unsigned nhbins;
-
-/* Maximum cached size class. */
-extern size_t tcache_maxclass;
-
-/*
- * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
- * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
- * completely disjoint from this data structure. tcaches starts off as a sparse
- * array, so it has no physical memory footprint until individual pages are
- * touched. This allows the entire array to be allocated the first time an
- * explicit tcache is created without a disproportionate impact on memory usage.
- */
-extern tcaches_t *tcaches;
-
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
-void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, bool *tcache_success);
-void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
- szind_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
- arena_t *arena);
-tcache_t *tcache_create_explicit(tsd_t *tsd);
-void tcache_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
-void tcaches_flush(tsd_t *tsd, unsigned ind);
-void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn);
-void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-void tcache_prefork(tsdn_t *tsdn);
-void tcache_postfork_parent(tsdn_t *tsdn);
-void tcache_postfork_child(tsdn_t *tsdn);
-void tcache_flush(tsd_t *tsd);
-bool tsd_tcache_data_init(tsd_t *tsd);
-bool tsd_tcache_enabled_data_init(tsd_t *tsd);
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
+#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
+#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
+
+extern bool opt_tcache;
+extern ssize_t opt_lg_tcache_max;
+
+extern cache_bin_info_t *tcache_bin_info;
+
+/*
+ * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
+ * large-object bins.
+ */
+extern unsigned nhbins;
+
+/* Maximum cached size class. */
+extern size_t tcache_maxclass;
+
+/*
+ * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
+ * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
+ * completely disjoint from this data structure. tcaches starts off as a sparse
+ * array, so it has no physical memory footprint until individual pages are
+ * touched. This allows the entire array to be allocated the first time an
+ * explicit tcache is created without a disproportionate impact on memory usage.
+ */
+extern tcaches_t *tcaches;
+
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ cache_bin_t *tbin, szind_t binind, bool *tcache_success);
+void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
+ arena_t *arena);
+tcache_t *tcache_create_explicit(tsd_t *tsd);
+void tcache_cleanup(tsd_t *tsd);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcaches_flush(tsd_t *tsd, unsigned ind);
+void tcaches_destroy(tsd_t *tsd, unsigned ind);
+bool tcache_boot(tsdn_t *tsdn);
+void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+void tcache_prefork(tsdn_t *tsdn);
+void tcache_postfork_parent(tsdn_t *tsdn);
+void tcache_postfork_child(tsdn_t *tsdn);
+void tcache_flush(tsd_t *tsd);
+bool tsd_tcache_data_init(tsd_t *tsd);
+bool tsd_tcache_enabled_data_init(tsd_t *tsd);
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_inlines.h b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_inlines.h
index 5eca20e893..73f30074a5 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_inlines.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_inlines.h
@@ -1,227 +1,227 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
-#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
-
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/util.h"
-
-static inline bool
-tcache_enabled_get(tsd_t *tsd) {
- return tsd_tcache_enabled_get(tsd);
-}
-
-static inline void
-tcache_enabled_set(tsd_t *tsd, bool enabled) {
- bool was_enabled = tsd_tcache_enabled_get(tsd);
-
- if (!was_enabled && enabled) {
- tsd_tcache_data_init(tsd);
- } else if (was_enabled && !enabled) {
- tcache_cleanup(tsd);
- }
- /* Commit the state last. Above calls check current state. */
- tsd_tcache_enabled_set(tsd, enabled);
- tsd_slow_update(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_event(tsd_t *tsd, tcache_t *tcache) {
- if (TCACHE_GC_INCR == 0) {
- return;
- }
-
- if (unlikely(ticker_tick(&tcache->gc_ticker))) {
- tcache_event_hard(tsd, tcache);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t binind, bool zero, bool slow_path) {
- void *ret;
- cache_bin_t *bin;
- bool tcache_success;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- assert(binind < SC_NBINS);
- bin = tcache_small_bin_get(tcache, binind);
- ret = cache_bin_alloc_easy(bin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- bool tcache_hard_success;
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL)) {
- return NULL;
- }
-
- ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
- bin, binind, &tcache_hard_success);
- if (tcache_hard_success == false) {
- return NULL;
- }
- }
-
- assert(ret);
- /*
- * Only compute usize if required. The checks in the following if
- * statement are all static.
- */
- if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
- usize = sz_index2size(binind);
- assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
- }
-
- if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind],
- false);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
- }
- } else {
- if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind], true);
- }
- memset(ret, 0, usize);
- }
-
- if (config_stats) {
- bin->tstats.nrequests++;
- }
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
- tcache_event(tsd, tcache);
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path) {
- void *ret;
- cache_bin_t *bin;
- bool tcache_success;
-
- assert(binind >= SC_NBINS &&binind < nhbins);
- bin = tcache_large_bin_get(tcache, binind);
- ret = cache_bin_alloc_easy(bin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- /*
- * Only allocate one large object at a time, because it's quite
- * expensive to create one and not use it.
- */
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL)) {
- return NULL;
- }
-
- ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
- if (ret == NULL) {
- return NULL;
- }
- } else {
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- /* Only compute usize on demand */
- if (config_prof || (slow_path && config_fill) ||
- unlikely(zero)) {
- usize = sz_index2size(binind);
- assert(usize <= tcache_maxclass);
- }
-
- if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- memset(ret, JEMALLOC_ALLOC_JUNK,
- usize);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
- }
- } else {
- memset(ret, 0, usize);
- }
-
- if (config_stats) {
- bin->tstats.nrequests++;
- }
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
- }
-
- tcache_event(tsd, tcache);
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path) {
- cache_bin_t *bin;
- cache_bin_info_t *bin_info;
-
- assert(tcache_salloc(tsd_tsdn(tsd), ptr)
- <= SC_SMALL_MAXCLASS);
-
- if (slow_path && config_fill && unlikely(opt_junk_free)) {
- arena_dalloc_junk_small(ptr, &bin_infos[binind]);
- }
-
- bin = tcache_small_bin_get(tcache, binind);
- bin_info = &tcache_bin_info[binind];
- if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) {
- tcache_bin_flush_small(tsd, tcache, bin, binind,
- (bin_info->ncached_max >> 1));
- bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr);
- assert(ret);
- }
-
- tcache_event(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path) {
- cache_bin_t *bin;
- cache_bin_info_t *bin_info;
-
- assert(tcache_salloc(tsd_tsdn(tsd), ptr)
- > SC_SMALL_MAXCLASS);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
-
- if (slow_path && config_fill && unlikely(opt_junk_free)) {
- large_dalloc_junk(ptr, sz_index2size(binind));
- }
-
- bin = tcache_large_bin_get(tcache, binind);
- bin_info = &tcache_bin_info[binind];
- if (unlikely(bin->ncached == bin_info->ncached_max)) {
- tcache_bin_flush_large(tsd, bin, binind,
- (bin_info->ncached_max >> 1), tcache);
- }
- assert(bin->ncached < bin_info->ncached_max);
- bin->ncached++;
- *(bin->avail - bin->ncached) = ptr;
-
- tcache_event(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcaches_get(tsd_t *tsd, unsigned ind) {
- tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL)) {
- malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
- abort();
- } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
- elm->tcache = tcache_create_explicit(tsd);
- }
- return elm->tcache;
-}
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
+#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
+#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
+
+#include "jemalloc/internal/bin.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/util.h"
+
+static inline bool
+tcache_enabled_get(tsd_t *tsd) {
+ return tsd_tcache_enabled_get(tsd);
+}
+
+static inline void
+tcache_enabled_set(tsd_t *tsd, bool enabled) {
+ bool was_enabled = tsd_tcache_enabled_get(tsd);
+
+ if (!was_enabled && enabled) {
+ tsd_tcache_data_init(tsd);
+ } else if (was_enabled && !enabled) {
+ tcache_cleanup(tsd);
+ }
+ /* Commit the state last. Above calls check current state. */
+ tsd_tcache_enabled_set(tsd, enabled);
+ tsd_slow_update(tsd);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_event(tsd_t *tsd, tcache_t *tcache) {
+ if (TCACHE_GC_INCR == 0) {
+ return;
+ }
+
+ if (unlikely(ticker_tick(&tcache->gc_ticker))) {
+ tcache_event_hard(tsd, tcache);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ size_t size, szind_t binind, bool zero, bool slow_path) {
+ void *ret;
+ cache_bin_t *bin;
+ bool tcache_success;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(binind < SC_NBINS);
+ bin = tcache_small_bin_get(tcache, binind);
+ ret = cache_bin_alloc_easy(bin, &tcache_success);
+ assert(tcache_success == (ret != NULL));
+ if (unlikely(!tcache_success)) {
+ bool tcache_hard_success;
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL)) {
+ return NULL;
+ }
+
+ ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
+ bin, binind, &tcache_hard_success);
+ if (tcache_hard_success == false) {
+ return NULL;
+ }
+ }
+
+ assert(ret);
+ /*
+ * Only compute usize if required. The checks in the following if
+ * statement are all static.
+ */
+ if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
+ usize = sz_index2size(binind);
+ assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
+ }
+
+ if (likely(!zero)) {
+ if (slow_path && config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &bin_infos[binind],
+ false);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
+ }
+ } else {
+ if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &bin_infos[binind], true);
+ }
+ memset(ret, 0, usize);
+ }
+
+ if (config_stats) {
+ bin->tstats.nrequests++;
+ }
+ if (config_prof) {
+ tcache->prof_accumbytes += usize;
+ }
+ tcache_event(tsd, tcache);
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ szind_t binind, bool zero, bool slow_path) {
+ void *ret;
+ cache_bin_t *bin;
+ bool tcache_success;
+
+ assert(binind >= SC_NBINS &&binind < nhbins);
+ bin = tcache_large_bin_get(tcache, binind);
+ ret = cache_bin_alloc_easy(bin, &tcache_success);
+ assert(tcache_success == (ret != NULL));
+ if (unlikely(!tcache_success)) {
+ /*
+ * Only allocate one large object at a time, because it's quite
+ * expensive to create one and not use it.
+ */
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL)) {
+ return NULL;
+ }
+
+ ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
+ if (ret == NULL) {
+ return NULL;
+ }
+ } else {
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ /* Only compute usize on demand */
+ if (config_prof || (slow_path && config_fill) ||
+ unlikely(zero)) {
+ usize = sz_index2size(binind);
+ assert(usize <= tcache_maxclass);
+ }
+
+ if (likely(!zero)) {
+ if (slow_path && config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ memset(ret, JEMALLOC_ALLOC_JUNK,
+ usize);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
+ }
+ } else {
+ memset(ret, 0, usize);
+ }
+
+ if (config_stats) {
+ bin->tstats.nrequests++;
+ }
+ if (config_prof) {
+ tcache->prof_accumbytes += usize;
+ }
+ }
+
+ tcache_event(tsd, tcache);
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
+ bool slow_path) {
+ cache_bin_t *bin;
+ cache_bin_info_t *bin_info;
+
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr)
+ <= SC_SMALL_MAXCLASS);
+
+ if (slow_path && config_fill && unlikely(opt_junk_free)) {
+ arena_dalloc_junk_small(ptr, &bin_infos[binind]);
+ }
+
+ bin = tcache_small_bin_get(tcache, binind);
+ bin_info = &tcache_bin_info[binind];
+ if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) {
+ tcache_bin_flush_small(tsd, tcache, bin, binind,
+ (bin_info->ncached_max >> 1));
+ bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr);
+ assert(ret);
+ }
+
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
+ bool slow_path) {
+ cache_bin_t *bin;
+ cache_bin_info_t *bin_info;
+
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr)
+ > SC_SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
+
+ if (slow_path && config_fill && unlikely(opt_junk_free)) {
+ large_dalloc_junk(ptr, sz_index2size(binind));
+ }
+
+ bin = tcache_large_bin_get(tcache, binind);
+ bin_info = &tcache_bin_info[binind];
+ if (unlikely(bin->ncached == bin_info->ncached_max)) {
+ tcache_bin_flush_large(tsd, bin, binind,
+ (bin_info->ncached_max >> 1), tcache);
+ }
+ assert(bin->ncached < bin_info->ncached_max);
+ bin->ncached++;
+ *(bin->avail - bin->ncached) = ptr;
+
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcaches_get(tsd_t *tsd, unsigned ind) {
+ tcaches_t *elm = &tcaches[ind];
+ if (unlikely(elm->tcache == NULL)) {
+ malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
+ abort();
+ } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
+ elm->tcache = tcache_create_explicit(tsd);
+ }
+ return elm->tcache;
+}
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_structs.h b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_structs.h
index 172ef9040c..d4941981c1 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_structs.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_structs.h
@@ -1,70 +1,70 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
-#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
-
-#include "jemalloc/internal/cache_bin.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/tsd_types.h"
-
-/* Various uses of this struct need it to be a named type. */
-typedef ql_elm(tsd_t) tsd_link_t;
-
-struct tcache_s {
- /*
- * To minimize our cache-footprint, we put the frequently accessed data
- * together at the start of this struct.
- */
-
- /* Cleared after arena_prof_accum(). */
- uint64_t prof_accumbytes;
- /* Drives incremental GC. */
- ticker_t gc_ticker;
- /*
- * The pointer stacks associated with bins follow as a contiguous array.
- * During tcache initialization, the avail pointer in each element of
- * tbins is initialized to point to the proper offset within this array.
- */
- cache_bin_t bins_small[SC_NBINS];
-
- /*
- * This data is less hot; we can be a little less careful with our
- * footprint here.
- */
- /* Lets us track all the tcaches in an arena. */
- ql_elm(tcache_t) link;
-
- /* Logically scoped to tsd, but put here for cache layout reasons. */
- ql_elm(tsd_t) tsd_link;
- bool in_hook;
-
- /*
- * The descriptor lets the arena find our cache bins without seeing the
- * tcache definition. This enables arenas to aggregate stats across
- * tcaches without having a tcache dependency.
- */
- cache_bin_array_descriptor_t cache_bin_array_descriptor;
-
- /* The arena this tcache is associated with. */
- arena_t *arena;
- /* Next bin to GC. */
- szind_t next_gc_bin;
- /* For small bins, fill (ncached_max >> lg_fill_div). */
- uint8_t lg_fill_div[SC_NBINS];
- /*
- * We put the cache bins for large size classes at the end of the
- * struct, since some of them might not get used. This might end up
- * letting us avoid touching an extra page if we don't have to.
- */
- cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
-};
-
-/* Linkage for list of available (previously used) explicit tcache IDs. */
-struct tcaches_s {
- union {
- tcache_t *tcache;
- tcaches_t *next;
- };
-};
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
+#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
+#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
+
+#include "jemalloc/internal/cache_bin.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/tsd_types.h"
+
+/* Various uses of this struct need it to be a named type. */
+typedef ql_elm(tsd_t) tsd_link_t;
+
+struct tcache_s {
+ /*
+ * To minimize our cache-footprint, we put the frequently accessed data
+ * together at the start of this struct.
+ */
+
+ /* Cleared after arena_prof_accum(). */
+ uint64_t prof_accumbytes;
+ /* Drives incremental GC. */
+ ticker_t gc_ticker;
+ /*
+ * The pointer stacks associated with bins follow as a contiguous array.
+ * During tcache initialization, the avail pointer in each element of
+ * tbins is initialized to point to the proper offset within this array.
+ */
+ cache_bin_t bins_small[SC_NBINS];
+
+ /*
+ * This data is less hot; we can be a little less careful with our
+ * footprint here.
+ */
+ /* Lets us track all the tcaches in an arena. */
+ ql_elm(tcache_t) link;
+
+ /* Logically scoped to tsd, but put here for cache layout reasons. */
+ ql_elm(tsd_t) tsd_link;
+ bool in_hook;
+
+ /*
+ * The descriptor lets the arena find our cache bins without seeing the
+ * tcache definition. This enables arenas to aggregate stats across
+ * tcaches without having a tcache dependency.
+ */
+ cache_bin_array_descriptor_t cache_bin_array_descriptor;
+
+ /* The arena this tcache is associated with. */
+ arena_t *arena;
+ /* Next bin to GC. */
+ szind_t next_gc_bin;
+ /* For small bins, fill (ncached_max >> lg_fill_div). */
+ uint8_t lg_fill_div[SC_NBINS];
+ /*
+ * We put the cache bins for large size classes at the end of the
+ * struct, since some of them might not get used. This might end up
+ * letting us avoid touching an extra page if we don't have to.
+ */
+ cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
+};
+
+/* Linkage for list of available (previously used) explicit tcache IDs. */
+struct tcaches_s {
+ union {
+ tcache_t *tcache;
+ tcaches_t *next;
+ };
+};
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_types.h
index dce69382eb..d64e5daba7 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tcache_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tcache_types.h
@@ -1,59 +1,59 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
-#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
-
-#include "jemalloc/internal/sc.h"
-
-typedef struct tcache_s tcache_t;
-typedef struct tcaches_s tcaches_t;
-
-/*
- * tcache pointers close to NULL are used to encode state information that is
- * used for two purposes: preventing thread caching on a per thread basis and
- * cleaning up during thread shutdown.
- */
-#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
-#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
-#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
-#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
-
-/*
- * Absolute minimum number of cache slots for each small bin.
- */
-#define TCACHE_NSLOTS_SMALL_MIN 20
-
-/*
- * Absolute maximum number of cache slots for each small bin in the thread
- * cache. This is an additional constraint beyond that imposed as: twice the
- * number of regions per slab for this size class.
- *
- * This constant must be an even number.
- */
-#define TCACHE_NSLOTS_SMALL_MAX 200
-
-/* Number of cache slots for large size classes. */
-#define TCACHE_NSLOTS_LARGE 20
-
-/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
-#define LG_TCACHE_MAXCLASS_DEFAULT 15
-
-/*
- * TCACHE_GC_SWEEP is the approximate number of allocation events between
- * full GC sweeps. Integer rounding may cause the actual number to be
- * slightly higher, since GC is performed incrementally.
- */
-#define TCACHE_GC_SWEEP 8192
-
-/* Number of tcache allocation/deallocation events between incremental GCs. */
-#define TCACHE_GC_INCR \
- ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
-
-/* Used in TSD static initializer only. Real init in tcache_data_init(). */
-#define TCACHE_ZERO_INITIALIZER {0}
-
-/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
-#define TCACHE_ENABLED_ZERO_INITIALIZER false
-
-/* Used for explicit tcache only. Means flushed but not destroyed. */
-#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
+#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
+
+#include "jemalloc/internal/sc.h"
+
+typedef struct tcache_s tcache_t;
+typedef struct tcaches_s tcaches_t;
+
+/*
+ * tcache pointers close to NULL are used to encode state information that is
+ * used for two purposes: preventing thread caching on a per thread basis and
+ * cleaning up during thread shutdown.
+ */
+#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
+#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
+#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
+#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
+
+/*
+ * Absolute minimum number of cache slots for each small bin.
+ */
+#define TCACHE_NSLOTS_SMALL_MIN 20
+
+/*
+ * Absolute maximum number of cache slots for each small bin in the thread
+ * cache. This is an additional constraint beyond that imposed as: twice the
+ * number of regions per slab for this size class.
+ *
+ * This constant must be an even number.
+ */
+#define TCACHE_NSLOTS_SMALL_MAX 200
+
+/* Number of cache slots for large size classes. */
+#define TCACHE_NSLOTS_LARGE 20
+
+/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
+#define LG_TCACHE_MAXCLASS_DEFAULT 15
+
+/*
+ * TCACHE_GC_SWEEP is the approximate number of allocation events between
+ * full GC sweeps. Integer rounding may cause the actual number to be
+ * slightly higher, since GC is performed incrementally.
+ */
+#define TCACHE_GC_SWEEP 8192
+
+/* Number of tcache allocation/deallocation events between incremental GCs. */
+#define TCACHE_GC_INCR \
+ ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
+
+/* Used in TSD static initializer only. Real init in tcache_data_init(). */
+#define TCACHE_ZERO_INITIALIZER {0}
+
+/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
+#define TCACHE_ENABLED_ZERO_INITIALIZER false
+
+/* Used for explicit tcache only. Means flushed but not destroyed. */
+#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/test_hooks.h b/contrib/libs/jemalloc/include/jemalloc/internal/test_hooks.h
index a6351e59af..a57f599330 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/test_hooks.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/test_hooks.h
@@ -1,19 +1,19 @@
-#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
-#define JEMALLOC_INTERNAL_TEST_HOOKS_H
-
-extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
-extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
-
-#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
-
-#define open JEMALLOC_HOOK(open, test_hooks_libc_hook)
-#define read JEMALLOC_HOOK(read, test_hooks_libc_hook)
-#define write JEMALLOC_HOOK(write, test_hooks_libc_hook)
-#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook)
-#define close JEMALLOC_HOOK(close, test_hooks_libc_hook)
-#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook)
-#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook)
-/* Note that this is undef'd and re-define'd in src/prof.c. */
-#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
-
-#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
+#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
+#define JEMALLOC_INTERNAL_TEST_HOOKS_H
+
+extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
+extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
+
+#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
+
+#define open JEMALLOC_HOOK(open, test_hooks_libc_hook)
+#define read JEMALLOC_HOOK(read, test_hooks_libc_hook)
+#define write JEMALLOC_HOOK(write, test_hooks_libc_hook)
+#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook)
+#define close JEMALLOC_HOOK(close, test_hooks_libc_hook)
+#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook)
+#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook)
+/* Note that this is undef'd and re-define'd in src/prof.c. */
+#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
+
+#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ticker.h b/contrib/libs/jemalloc/include/jemalloc/internal/ticker.h
index 52d0db4c89..1094865b54 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/ticker.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/ticker.h
@@ -1,91 +1,91 @@
-#ifndef JEMALLOC_INTERNAL_TICKER_H
-#define JEMALLOC_INTERNAL_TICKER_H
-
-#include "jemalloc/internal/util.h"
-
-/**
- * A ticker makes it easy to count-down events until some limit. You
- * ticker_init the ticker to trigger every nticks events. You then notify it
- * that an event has occurred with calls to ticker_tick (or that nticks events
- * have occurred with a call to ticker_ticks), which will return true (and reset
- * the counter) if the countdown hit zero.
- */
-
-typedef struct {
- int32_t tick;
- int32_t nticks;
-} ticker_t;
-
-static inline void
-ticker_init(ticker_t *ticker, int32_t nticks) {
- ticker->tick = nticks;
- ticker->nticks = nticks;
-}
-
-static inline void
-ticker_copy(ticker_t *ticker, const ticker_t *other) {
- *ticker = *other;
-}
-
-static inline int32_t
-ticker_read(const ticker_t *ticker) {
- return ticker->tick;
-}
-
-/*
- * Not intended to be a public API. Unfortunately, on x86, neither gcc nor
- * clang seems smart enough to turn
- * ticker->tick -= nticks;
- * if (unlikely(ticker->tick < 0)) {
- * fixup ticker
- * return true;
- * }
- * return false;
- * into
- * subq %nticks_reg, (%ticker_reg)
- * js fixup ticker
- *
- * unless we force "fixup ticker" out of line. In that case, gcc gets it right,
- * but clang now does worse than before. So, on x86 with gcc, we force it out
- * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
- * worth the hassle, but this is on the fast path of both malloc and free (via
- * tcache_event).
- */
-#if defined(__GNUC__) && !defined(__clang__) \
- && (defined(__x86_64__) || defined(__i386__))
-JEMALLOC_NOINLINE
-#endif
-static bool
-ticker_fixup(ticker_t *ticker) {
- ticker->tick = ticker->nticks;
- return true;
-}
-
-static inline bool
-ticker_ticks(ticker_t *ticker, int32_t nticks) {
- ticker->tick -= nticks;
- if (unlikely(ticker->tick < 0)) {
- return ticker_fixup(ticker);
- }
- return false;
-}
-
-static inline bool
-ticker_tick(ticker_t *ticker) {
- return ticker_ticks(ticker, 1);
-}
-
+#ifndef JEMALLOC_INTERNAL_TICKER_H
+#define JEMALLOC_INTERNAL_TICKER_H
+
+#include "jemalloc/internal/util.h"
+
+/**
+ * A ticker makes it easy to count-down events until some limit. You
+ * ticker_init the ticker to trigger every nticks events. You then notify it
+ * that an event has occurred with calls to ticker_tick (or that nticks events
+ * have occurred with a call to ticker_ticks), which will return true (and reset
+ * the counter) if the countdown hit zero.
+ */
+
+typedef struct {
+ int32_t tick;
+ int32_t nticks;
+} ticker_t;
+
+static inline void
+ticker_init(ticker_t *ticker, int32_t nticks) {
+ ticker->tick = nticks;
+ ticker->nticks = nticks;
+}
+
+static inline void
+ticker_copy(ticker_t *ticker, const ticker_t *other) {
+ *ticker = *other;
+}
+
+static inline int32_t
+ticker_read(const ticker_t *ticker) {
+ return ticker->tick;
+}
+
/*
- * Try to tick. If ticker would fire, return true, but rely on
- * slowpath to reset ticker.
- */
-static inline bool
-ticker_trytick(ticker_t *ticker) {
- --ticker->tick;
- if (unlikely(ticker->tick < 0)) {
- return true;
- }
- return false;
-}
-
-#endif /* JEMALLOC_INTERNAL_TICKER_H */
+ * Not intended to be a public API. Unfortunately, on x86, neither gcc nor
+ * clang seems smart enough to turn
+ * ticker->tick -= nticks;
+ * if (unlikely(ticker->tick < 0)) {
+ * fixup ticker
+ * return true;
+ * }
+ * return false;
+ * into
+ * subq %nticks_reg, (%ticker_reg)
+ * js fixup ticker
+ *
+ * unless we force "fixup ticker" out of line. In that case, gcc gets it right,
+ * but clang now does worse than before. So, on x86 with gcc, we force it out
+ * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
+ * worth the hassle, but this is on the fast path of both malloc and free (via
+ * tcache_event).
+ */
+#if defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__x86_64__) || defined(__i386__))
+JEMALLOC_NOINLINE
+#endif
+static bool
+ticker_fixup(ticker_t *ticker) {
+ ticker->tick = ticker->nticks;
+ return true;
+}
+
+static inline bool
+ticker_ticks(ticker_t *ticker, int32_t nticks) {
+ ticker->tick -= nticks;
+ if (unlikely(ticker->tick < 0)) {
+ return ticker_fixup(ticker);
+ }
+ return false;
+}
+
+static inline bool
+ticker_tick(ticker_t *ticker) {
+ return ticker_ticks(ticker, 1);
+}
+
+/*
+ * Try to tick. If ticker would fire, return true, but rely on
+ * slowpath to reset ticker.
+ */
+static inline bool
+ticker_trytick(ticker_t *ticker) {
+ --ticker->tick;
+ if (unlikely(ticker->tick < 0)) {
+ return true;
+ }
+ return false;
+}
+
+#endif /* JEMALLOC_INTERNAL_TICKER_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h b/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h
index fd6bd0892e..da0603a6b6 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h
@@ -1,415 +1,415 @@
-#ifndef JEMALLOC_INTERNAL_TSD_H
-#define JEMALLOC_INTERNAL_TSD_H
-
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/bin_types.h"
-#include "jemalloc/internal/jemalloc_internal_externs.h"
-#include "jemalloc/internal/prof_types.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/rtree_tsd.h"
-#include "jemalloc/internal/tcache_types.h"
-#include "jemalloc/internal/tcache_structs.h"
-#include "jemalloc/internal/util.h"
-#include "jemalloc/internal/witness.h"
-
-/*
- * Thread-Specific-Data layout
- * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
- * s: state
- * e: tcache_enabled
- * m: thread_allocated (config_stats)
- * f: thread_deallocated (config_stats)
- * p: prof_tdata (config_prof)
- * c: rtree_ctx (rtree cache accessed on deallocation)
- * t: tcache
- * --- data not accessed on tcache fast path: arena-related fields ---
- * d: arenas_tdata_bypass
- * r: reentrancy_level
- * x: narenas_tdata
- * i: iarena
- * a: arena
- * o: arenas_tdata
- * Loading TSD data is on the critical path of basically all malloc operations.
- * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
- * Use a compact layout to reduce cache footprint.
- * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
- * |---------------------------- 1st cacheline ----------------------------|
- * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
- * |---------------------------- 2nd cacheline ----------------------------|
- * | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
- * |---------------------------- 3nd cacheline ----------------------------|
- * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
- * +-------------------------------------------------------------------------+
- * Note: the entire tcache is embedded into TSD and spans multiple cachelines.
- *
- * The last 3 members (i, a and o) before tcache isn't really needed on tcache
- * fast path. However we have a number of unused tcache bins and witnesses
- * (never touched unless config_debug) at the end of tcache, so we place them
- * there to avoid breaking the cachelines and possibly paging in an extra page.
- */
-#ifdef JEMALLOC_JET
-typedef void (*test_callback_t)(int *);
-# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
-# define MALLOC_TEST_TSD \
- O(test_data, int, int) \
- O(test_callback, test_callback_t, int)
-# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
-#else
-# define MALLOC_TEST_TSD
-# define MALLOC_TEST_TSD_INITIALIZER
+#ifndef JEMALLOC_INTERNAL_TSD_H
+#define JEMALLOC_INTERNAL_TSD_H
+
+#include "jemalloc/internal/arena_types.h"
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/bin_types.h"
+#include "jemalloc/internal/jemalloc_internal_externs.h"
+#include "jemalloc/internal/prof_types.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/rtree_tsd.h"
+#include "jemalloc/internal/tcache_types.h"
+#include "jemalloc/internal/tcache_structs.h"
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/witness.h"
+
+/*
+ * Thread-Specific-Data layout
+ * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
+ * s: state
+ * e: tcache_enabled
+ * m: thread_allocated (config_stats)
+ * f: thread_deallocated (config_stats)
+ * p: prof_tdata (config_prof)
+ * c: rtree_ctx (rtree cache accessed on deallocation)
+ * t: tcache
+ * --- data not accessed on tcache fast path: arena-related fields ---
+ * d: arenas_tdata_bypass
+ * r: reentrancy_level
+ * x: narenas_tdata
+ * i: iarena
+ * a: arena
+ * o: arenas_tdata
+ * Loading TSD data is on the critical path of basically all malloc operations.
+ * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
+ * Use a compact layout to reduce cache footprint.
+ * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
+ * |---------------------------- 1st cacheline ----------------------------|
+ * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
+ * |---------------------------- 2nd cacheline ----------------------------|
+ * | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
+ * |---------------------------- 3nd cacheline ----------------------------|
+ * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
+ * +-------------------------------------------------------------------------+
+ * Note: the entire tcache is embedded into TSD and spans multiple cachelines.
+ *
+ * The last 3 members (i, a and o) before tcache isn't really needed on tcache
+ * fast path. However we have a number of unused tcache bins and witnesses
+ * (never touched unless config_debug) at the end of tcache, so we place them
+ * there to avoid breaking the cachelines and possibly paging in an extra page.
+ */
+#ifdef JEMALLOC_JET
+typedef void (*test_callback_t)(int *);
+# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
+# define MALLOC_TEST_TSD \
+ O(test_data, int, int) \
+ O(test_callback, test_callback_t, int)
+# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
+#else
+# define MALLOC_TEST_TSD
+# define MALLOC_TEST_TSD_INITIALIZER
#endif
-/* O(name, type, nullable type */
-#define MALLOC_TSD \
- O(tcache_enabled, bool, bool) \
- O(arenas_tdata_bypass, bool, bool) \
- O(reentrancy_level, int8_t, int8_t) \
- O(narenas_tdata, uint32_t, uint32_t) \
- O(offset_state, uint64_t, uint64_t) \
- O(thread_allocated, uint64_t, uint64_t) \
- O(thread_deallocated, uint64_t, uint64_t) \
- O(bytes_until_sample, int64_t, int64_t) \
- O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
- O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
- O(iarena, arena_t *, arena_t *) \
- O(arena, arena_t *, arena_t *) \
- O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
- O(binshards, tsd_binshards_t, tsd_binshards_t)\
- O(tcache, tcache_t, tcache_t) \
- O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
- MALLOC_TEST_TSD
-
-#define TSD_INITIALIZER { \
- ATOMIC_INIT(tsd_state_uninitialized), \
- TCACHE_ENABLED_ZERO_INITIALIZER, \
- false, \
- 0, \
- 0, \
- 0, \
- 0, \
- 0, \
- 0, \
- NULL, \
- RTREE_CTX_ZERO_INITIALIZER, \
- NULL, \
- NULL, \
- NULL, \
- TSD_BINSHARDS_ZERO_INITIALIZER, \
- TCACHE_ZERO_INITIALIZER, \
- WITNESS_TSD_INITIALIZER \
- MALLOC_TEST_TSD_INITIALIZER \
-}
-
-void *malloc_tsd_malloc(size_t size);
-void malloc_tsd_dalloc(void *wrapper);
-void malloc_tsd_cleanup_register(bool (*f)(void));
-tsd_t *malloc_tsd_boot0(void);
-void malloc_tsd_boot1(void);
-void tsd_cleanup(void *arg);
-tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
-void tsd_state_set(tsd_t *tsd, uint8_t new_state);
-void tsd_slow_update(tsd_t *tsd);
-void tsd_prefork(tsd_t *tsd);
-void tsd_postfork_parent(tsd_t *tsd);
-void tsd_postfork_child(tsd_t *tsd);
-
+/* O(name, type, nullable type */
+#define MALLOC_TSD \
+ O(tcache_enabled, bool, bool) \
+ O(arenas_tdata_bypass, bool, bool) \
+ O(reentrancy_level, int8_t, int8_t) \
+ O(narenas_tdata, uint32_t, uint32_t) \
+ O(offset_state, uint64_t, uint64_t) \
+ O(thread_allocated, uint64_t, uint64_t) \
+ O(thread_deallocated, uint64_t, uint64_t) \
+ O(bytes_until_sample, int64_t, int64_t) \
+ O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
+ O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
+ O(iarena, arena_t *, arena_t *) \
+ O(arena, arena_t *, arena_t *) \
+ O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
+ O(binshards, tsd_binshards_t, tsd_binshards_t)\
+ O(tcache, tcache_t, tcache_t) \
+ O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
+ MALLOC_TEST_TSD
+
+#define TSD_INITIALIZER { \
+ ATOMIC_INIT(tsd_state_uninitialized), \
+ TCACHE_ENABLED_ZERO_INITIALIZER, \
+ false, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ NULL, \
+ RTREE_CTX_ZERO_INITIALIZER, \
+ NULL, \
+ NULL, \
+ NULL, \
+ TSD_BINSHARDS_ZERO_INITIALIZER, \
+ TCACHE_ZERO_INITIALIZER, \
+ WITNESS_TSD_INITIALIZER \
+ MALLOC_TEST_TSD_INITIALIZER \
+}
+
+void *malloc_tsd_malloc(size_t size);
+void malloc_tsd_dalloc(void *wrapper);
+void malloc_tsd_cleanup_register(bool (*f)(void));
+tsd_t *malloc_tsd_boot0(void);
+void malloc_tsd_boot1(void);
+void tsd_cleanup(void *arg);
+tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
+void tsd_state_set(tsd_t *tsd, uint8_t new_state);
+void tsd_slow_update(tsd_t *tsd);
+void tsd_prefork(tsd_t *tsd);
+void tsd_postfork_parent(tsd_t *tsd);
+void tsd_postfork_child(tsd_t *tsd);
+
/*
- * Call ..._inc when your module wants to take all threads down the slow paths,
- * and ..._dec when it no longer needs to.
+ * Call ..._inc when your module wants to take all threads down the slow paths,
+ * and ..._dec when it no longer needs to.
*/
-void tsd_global_slow_inc(tsdn_t *tsdn);
-void tsd_global_slow_dec(tsdn_t *tsdn);
-bool tsd_global_slow();
-
-enum {
- /* Common case --> jnz. */
- tsd_state_nominal = 0,
- /* Initialized but on slow path. */
- tsd_state_nominal_slow = 1,
- /*
- * Some thread has changed global state in such a way that all nominal
- * threads need to recompute their fast / slow status the next time they
- * get a chance.
- *
- * Any thread can change another thread's status *to* recompute, but
- * threads are the only ones who can change their status *from*
- * recompute.
- */
- tsd_state_nominal_recompute = 2,
- /*
- * The above nominal states should be lower values. We use
- * tsd_nominal_max to separate nominal states from threads in the
- * process of being born / dying.
- */
- tsd_state_nominal_max = 2,
-
- /*
- * A thread might free() during its death as its only allocator action;
- * in such scenarios, we need tsd, but set up in such a way that no
- * cleanup is necessary.
- */
- tsd_state_minimal_initialized = 3,
- /* States during which we know we're in thread death. */
- tsd_state_purgatory = 4,
- tsd_state_reincarnated = 5,
- /*
- * What it says on the tin; tsd that hasn't been initialized. Note
- * that even when the tsd struct lives in TLS, when need to keep track
- * of stuff like whether or not our pthread destructors have been
- * scheduled, so this really truly is different than the nominal state.
- */
- tsd_state_uninitialized = 6
-};
-
-/*
- * Some TSD accesses can only be done in a nominal state. To enforce this, we
- * wrap TSD member access in a function that asserts on TSD state, and mangle
- * field names to prevent touching them accidentally.
- */
-#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n
-
-#ifdef JEMALLOC_U8_ATOMICS
-# define tsd_state_t atomic_u8_t
-# define tsd_atomic_load atomic_load_u8
-# define tsd_atomic_store atomic_store_u8
-# define tsd_atomic_exchange atomic_exchange_u8
+void tsd_global_slow_inc(tsdn_t *tsdn);
+void tsd_global_slow_dec(tsdn_t *tsdn);
+bool tsd_global_slow();
+
+enum {
+ /* Common case --> jnz. */
+ tsd_state_nominal = 0,
+ /* Initialized but on slow path. */
+ tsd_state_nominal_slow = 1,
+ /*
+ * Some thread has changed global state in such a way that all nominal
+ * threads need to recompute their fast / slow status the next time they
+ * get a chance.
+ *
+ * Any thread can change another thread's status *to* recompute, but
+ * threads are the only ones who can change their status *from*
+ * recompute.
+ */
+ tsd_state_nominal_recompute = 2,
+ /*
+ * The above nominal states should be lower values. We use
+ * tsd_nominal_max to separate nominal states from threads in the
+ * process of being born / dying.
+ */
+ tsd_state_nominal_max = 2,
+
+ /*
+ * A thread might free() during its death as its only allocator action;
+ * in such scenarios, we need tsd, but set up in such a way that no
+ * cleanup is necessary.
+ */
+ tsd_state_minimal_initialized = 3,
+ /* States during which we know we're in thread death. */
+ tsd_state_purgatory = 4,
+ tsd_state_reincarnated = 5,
+ /*
+ * What it says on the tin; tsd that hasn't been initialized. Note
+ * that even when the tsd struct lives in TLS, when need to keep track
+ * of stuff like whether or not our pthread destructors have been
+ * scheduled, so this really truly is different than the nominal state.
+ */
+ tsd_state_uninitialized = 6
+};
+
+/*
+ * Some TSD accesses can only be done in a nominal state. To enforce this, we
+ * wrap TSD member access in a function that asserts on TSD state, and mangle
+ * field names to prevent touching them accidentally.
+ */
+#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n
+
+#ifdef JEMALLOC_U8_ATOMICS
+# define tsd_state_t atomic_u8_t
+# define tsd_atomic_load atomic_load_u8
+# define tsd_atomic_store atomic_store_u8
+# define tsd_atomic_exchange atomic_exchange_u8
#else
-# define tsd_state_t atomic_u32_t
-# define tsd_atomic_load atomic_load_u32
-# define tsd_atomic_store atomic_store_u32
-# define tsd_atomic_exchange atomic_exchange_u32
+# define tsd_state_t atomic_u32_t
+# define tsd_atomic_load atomic_load_u32
+# define tsd_atomic_store atomic_store_u32
+# define tsd_atomic_exchange atomic_exchange_u32
#endif
-/* The actual tsd. */
-struct tsd_s {
- /*
- * The contents should be treated as totally opaque outside the tsd
- * module. Access any thread-local state through the getters and
- * setters below.
- */
-
- /*
- * We manually limit the state to just a single byte. Unless the 8-bit
- * atomics are unavailable (which is rare).
- */
- tsd_state_t state;
-#define O(n, t, nt) \
- t TSD_MANGLE(n);
-MALLOC_TSD
-#undef O
-};
-
-JEMALLOC_ALWAYS_INLINE uint8_t
-tsd_state_get(tsd_t *tsd) {
- /*
- * This should be atomic. Unfortunately, compilers right now can't tell
- * that this can be done as a memory comparison, and forces a load into
- * a register that hurts fast-path performance.
- */
- /* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */
- return *(uint8_t *)&tsd->state;
-}
-
-/*
- * Wrapper around tsd_t that makes it possible to avoid implicit conversion
- * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
- * explicitly converted to tsd_t, which is non-nullable.
- */
-struct tsdn_s {
- tsd_t tsd;
-};
-#define TSDN_NULL ((tsdn_t *)0)
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd) {
- return (tsdn_t *)tsd;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn) {
- return tsdn == NULL;
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn) {
- assert(!tsdn_null(tsdn));
-
- return &tsdn->tsd;
-}
-
-/*
- * We put the platform-specific data declarations and inlines into their own
- * header files to avoid cluttering this file. They define tsd_boot0,
- * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
- */
+/* The actual tsd. */
+struct tsd_s {
+ /*
+ * The contents should be treated as totally opaque outside the tsd
+ * module. Access any thread-local state through the getters and
+ * setters below.
+ */
+
+ /*
+ * We manually limit the state to just a single byte. Unless the 8-bit
+ * atomics are unavailable (which is rare).
+ */
+ tsd_state_t state;
+#define O(n, t, nt) \
+ t TSD_MANGLE(n);
+MALLOC_TSD
+#undef O
+};
+
+JEMALLOC_ALWAYS_INLINE uint8_t
+tsd_state_get(tsd_t *tsd) {
+ /*
+ * This should be atomic. Unfortunately, compilers right now can't tell
+ * that this can be done as a memory comparison, and forces a load into
+ * a register that hurts fast-path performance.
+ */
+ /* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */
+ return *(uint8_t *)&tsd->state;
+}
+
+/*
+ * Wrapper around tsd_t that makes it possible to avoid implicit conversion
+ * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
+ * explicitly converted to tsd_t, which is non-nullable.
+ */
+struct tsdn_s {
+ tsd_t tsd;
+};
+#define TSDN_NULL ((tsdn_t *)0)
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsd_tsdn(tsd_t *tsd) {
+ return (tsdn_t *)tsd;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsdn_null(const tsdn_t *tsdn) {
+ return tsdn == NULL;
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsdn_tsd(tsdn_t *tsdn) {
+ assert(!tsdn_null(tsdn));
+
+ return &tsdn->tsd;
+}
+
+/*
+ * We put the platform-specific data declarations and inlines into their own
+ * header files to avoid cluttering this file. They define tsd_boot0,
+ * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
+ */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#error #include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
+#error #include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
#elif (defined(JEMALLOC_TLS))
-#include "jemalloc/internal/tsd_tls.h"
+#include "jemalloc/internal/tsd_tls.h"
#elif (defined(_WIN32))
-#include "jemalloc/internal/tsd_win.h"
+#include "jemalloc/internal/tsd_win.h"
#else
-#include "jemalloc/internal/tsd_generic.h"
+#include "jemalloc/internal/tsd_generic.h"
#endif
-/*
- * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
- * foo. This omits some safety checks, and so can be used during tsd
- * initialization and cleanup.
- */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE t * \
-tsd_##n##p_get_unsafe(tsd_t *tsd) { \
- return &tsd->TSD_MANGLE(n); \
+/*
+ * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
+ * foo. This omits some safety checks, and so can be used during tsd
+ * initialization and cleanup.
+ */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE t * \
+tsd_##n##p_get_unsafe(tsd_t *tsd) { \
+ return &tsd->TSD_MANGLE(n); \
}
-MALLOC_TSD
-#undef O
-
-/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE t * \
-tsd_##n##p_get(tsd_t *tsd) { \
- /* \
- * Because the state might change asynchronously if it's \
- * nominal, we need to make sure that we only read it once. \
- */ \
- uint8_t state = tsd_state_get(tsd); \
- assert(state == tsd_state_nominal || \
- state == tsd_state_nominal_slow || \
- state == tsd_state_nominal_recompute || \
- state == tsd_state_reincarnated || \
- state == tsd_state_minimal_initialized); \
- return tsd_##n##p_get_unsafe(tsd); \
+MALLOC_TSD
+#undef O
+
+/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE t * \
+tsd_##n##p_get(tsd_t *tsd) { \
+ /* \
+ * Because the state might change asynchronously if it's \
+ * nominal, we need to make sure that we only read it once. \
+ */ \
+ uint8_t state = tsd_state_get(tsd); \
+ assert(state == tsd_state_nominal || \
+ state == tsd_state_nominal_slow || \
+ state == tsd_state_nominal_recompute || \
+ state == tsd_state_reincarnated || \
+ state == tsd_state_minimal_initialized); \
+ return tsd_##n##p_get_unsafe(tsd); \
}
-MALLOC_TSD
-#undef O
-
-/*
- * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
- * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
- */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE nt * \
-tsdn_##n##p_get(tsdn_t *tsdn) { \
- if (tsdn_null(tsdn)) { \
- return NULL; \
+MALLOC_TSD
+#undef O
+
+/*
+ * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
+ * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
+ */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE nt * \
+tsdn_##n##p_get(tsdn_t *tsdn) { \
+ if (tsdn_null(tsdn)) { \
+ return NULL; \
} \
- tsd_t *tsd = tsdn_tsd(tsdn); \
- return (nt *)tsd_##n##p_get(tsd); \
+ tsd_t *tsd = tsdn_tsd(tsdn); \
+ return (nt *)tsd_##n##p_get(tsd); \
}
-MALLOC_TSD
-#undef O
-
-/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE t \
-tsd_##n##_get(tsd_t *tsd) { \
- return *tsd_##n##p_get(tsd); \
+MALLOC_TSD
+#undef O
+
+/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE t \
+tsd_##n##_get(tsd_t *tsd) { \
+ return *tsd_##n##p_get(tsd); \
}
-MALLOC_TSD
-#undef O
-
-/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE void \
-tsd_##n##_set(tsd_t *tsd, t val) { \
- assert(tsd_state_get(tsd) != tsd_state_reincarnated && \
- tsd_state_get(tsd) != tsd_state_minimal_initialized); \
- *tsd_##n##p_get(tsd) = val; \
-}
-MALLOC_TSD
-#undef O
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_assert_fast(tsd_t *tsd) {
- /*
- * Note that our fastness assertion does *not* include global slowness
- * counters; it's not in general possible to ensure that they won't
- * change asynchronously from underneath us.
- */
- assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
- tsd_reentrancy_level_get(tsd) == 0);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_fast(tsd_t *tsd) {
- bool fast = (tsd_state_get(tsd) == tsd_state_nominal);
- if (fast) {
- tsd_assert_fast(tsd);
- }
-
- return fast;
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init, bool minimal) {
- tsd_t *tsd = tsd_get(init);
-
- if (!init && tsd_get_allocates() && tsd == NULL) {
- return NULL;
- }
- assert(tsd != NULL);
-
- if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) {
- return tsd_fetch_slow(tsd, minimal);
- }
- assert(tsd_fast(tsd));
- tsd_assert_fast(tsd);
-
- return tsd;
-}
-
-/* Get a minimal TSD that requires no cleanup. See comments in free(). */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_min(void) {
- return tsd_fetch_impl(true, true);
-}
-
-/* For internal background threads use only. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_internal_fetch(void) {
- tsd_t *tsd = tsd_fetch_min();
- /* Use reincarnated state to prevent full initialization. */
- tsd_state_set(tsd, tsd_state_reincarnated);
-
- return tsd;
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void) {
- return tsd_fetch_impl(true, false);
-}
-
-static inline bool
-tsd_nominal(tsd_t *tsd) {
- return (tsd_state_get(tsd) <= tsd_state_nominal_max);
-}
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void) {
- if (!tsd_booted_get()) {
- return NULL;
- }
-
- return tsd_tsdn(tsd_fetch_impl(false, false));
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
-tsd_rtree_ctx(tsd_t *tsd) {
- return tsd_rtree_ctxp_get(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
-tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
- /*
- * If tsd cannot be accessed, initialize the fallback rtree_ctx and
- * return a pointer to it.
- */
- if (unlikely(tsdn_null(tsdn))) {
- rtree_ctx_data_init(fallback);
- return fallback;
- }
- return tsd_rtree_ctx(tsdn_tsd(tsdn));
-}
-
-#endif /* JEMALLOC_INTERNAL_TSD_H */
+MALLOC_TSD
+#undef O
+
+/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE void \
+tsd_##n##_set(tsd_t *tsd, t val) { \
+ assert(tsd_state_get(tsd) != tsd_state_reincarnated && \
+ tsd_state_get(tsd) != tsd_state_minimal_initialized); \
+ *tsd_##n##p_get(tsd) = val; \
+}
+MALLOC_TSD
+#undef O
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_assert_fast(tsd_t *tsd) {
+ /*
+ * Note that our fastness assertion does *not* include global slowness
+ * counters; it's not in general possible to ensure that they won't
+ * change asynchronously from underneath us.
+ */
+ assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
+ tsd_reentrancy_level_get(tsd) == 0);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_fast(tsd_t *tsd) {
+ bool fast = (tsd_state_get(tsd) == tsd_state_nominal);
+ if (fast) {
+ tsd_assert_fast(tsd);
+ }
+
+ return fast;
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch_impl(bool init, bool minimal) {
+ tsd_t *tsd = tsd_get(init);
+
+ if (!init && tsd_get_allocates() && tsd == NULL) {
+ return NULL;
+ }
+ assert(tsd != NULL);
+
+ if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) {
+ return tsd_fetch_slow(tsd, minimal);
+ }
+ assert(tsd_fast(tsd));
+ tsd_assert_fast(tsd);
+
+ return tsd;
+}
+
+/* Get a minimal TSD that requires no cleanup. See comments in free(). */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch_min(void) {
+ return tsd_fetch_impl(true, true);
+}
+
+/* For internal background threads use only. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_internal_fetch(void) {
+ tsd_t *tsd = tsd_fetch_min();
+ /* Use reincarnated state to prevent full initialization. */
+ tsd_state_set(tsd, tsd_state_reincarnated);
+
+ return tsd;
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch(void) {
+ return tsd_fetch_impl(true, false);
+}
+
+static inline bool
+tsd_nominal(tsd_t *tsd) {
+ return (tsd_state_get(tsd) <= tsd_state_nominal_max);
+}
+
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsdn_fetch(void) {
+ if (!tsd_booted_get()) {
+ return NULL;
+ }
+
+ return tsd_tsdn(tsd_fetch_impl(false, false));
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
+tsd_rtree_ctx(tsd_t *tsd) {
+ return tsd_rtree_ctxp_get(tsd);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
+tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
+ /*
+ * If tsd cannot be accessed, initialize the fallback rtree_ctx and
+ * return a pointer to it.
+ */
+ if (unlikely(tsdn_null(tsdn))) {
+ rtree_ctx_data_init(fallback);
+ return fallback;
+ }
+ return tsd_rtree_ctx(tsdn_tsd(tsdn));
+}
+
+#endif /* JEMALLOC_INTERNAL_TSD_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_generic.h b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_generic.h
index cf73c0c715..fc6d33ea67 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_generic.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_generic.h
@@ -1,163 +1,163 @@
-#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_GENERIC_H
-
-typedef struct tsd_init_block_s tsd_init_block_t;
-struct tsd_init_block_s {
- ql_elm(tsd_init_block_t) link;
- pthread_t thread;
- void *data;
-};
-
-/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
-typedef struct tsd_init_head_s tsd_init_head_t;
-
-typedef struct {
- bool initialized;
- tsd_t val;
-} tsd_wrapper_t;
-
-void *tsd_init_check_recursion(tsd_init_head_t *head,
- tsd_init_block_t *block);
-void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
-
-extern pthread_key_t tsd_tsd;
-extern tsd_init_head_t tsd_init_head;
-extern tsd_wrapper_t tsd_boot_wrapper;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE void
-tsd_cleanup_wrapper(void *arg) {
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
-
- if (wrapper->initialized) {
- wrapper->initialized = false;
- tsd_cleanup(&wrapper->val);
- if (wrapper->initialized) {
- /* Trigger another cleanup round. */
- if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
- {
- malloc_write("<jemalloc>: Error setting TSD\n");
- if (opt_abort) {
- abort();
- }
- }
- return;
- }
- }
- malloc_tsd_dalloc(wrapper);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_wrapper_set(tsd_wrapper_t *wrapper) {
- if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
- malloc_write("<jemalloc>: Error setting TSD\n");
- abort();
- }
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
-tsd_wrapper_get(bool init) {
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
-
- if (init && unlikely(wrapper == NULL)) {
- tsd_init_block_t block;
- wrapper = (tsd_wrapper_t *)
- tsd_init_check_recursion(&tsd_init_head, &block);
- if (wrapper) {
- return wrapper;
- }
- wrapper = (tsd_wrapper_t *)
- malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- block.data = (void *)wrapper;
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- } else {
- wrapper->initialized = false;
- JEMALLOC_DIAGNOSTIC_PUSH
- JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
- tsd_t initializer = TSD_INITIALIZER;
- JEMALLOC_DIAGNOSTIC_POP
- wrapper->val = initializer;
- }
- tsd_wrapper_set(wrapper);
- tsd_init_finish(&tsd_init_head, &block);
- }
- return wrapper;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
- return true;
- }
- tsd_wrapper_set(&tsd_boot_wrapper);
- tsd_booted = true;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- tsd_wrapper_t *wrapper;
- wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- }
- tsd_boot_wrapper.initialized = false;
- tsd_cleanup(&tsd_boot_wrapper.val);
- wrapper->initialized = false;
- JEMALLOC_DIAGNOSTIC_PUSH
- JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
- tsd_t initializer = TSD_INITIALIZER;
- JEMALLOC_DIAGNOSTIC_POP
- wrapper->val = initializer;
- tsd_wrapper_set(wrapper);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- if (tsd_boot0()) {
- return true;
- }
- tsd_boot1();
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return true;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(init);
- if (tsd_get_allocates() && !init && wrapper == NULL) {
- return NULL;
- }
- return &wrapper->val;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(true);
- if (likely(&wrapper->val != val)) {
- wrapper->val = *(val);
- }
- wrapper->initialized = true;
-}
+#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_GENERIC_H
+
+typedef struct tsd_init_block_s tsd_init_block_t;
+struct tsd_init_block_s {
+ ql_elm(tsd_init_block_t) link;
+ pthread_t thread;
+ void *data;
+};
+
+/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
+typedef struct tsd_init_head_s tsd_init_head_t;
+
+typedef struct {
+ bool initialized;
+ tsd_t val;
+} tsd_wrapper_t;
+
+void *tsd_init_check_recursion(tsd_init_head_t *head,
+ tsd_init_block_t *block);
+void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
+
+extern pthread_key_t tsd_tsd;
+extern tsd_init_head_t tsd_init_head;
+extern tsd_wrapper_t tsd_boot_wrapper;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE void
+tsd_cleanup_wrapper(void *arg) {
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
+
+ if (wrapper->initialized) {
+ wrapper->initialized = false;
+ tsd_cleanup(&wrapper->val);
+ if (wrapper->initialized) {
+ /* Trigger another cleanup round. */
+ if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
+ {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+ return;
+ }
+ }
+ malloc_tsd_dalloc(wrapper);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_wrapper_set(tsd_wrapper_t *wrapper) {
+ if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ abort();
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
+tsd_wrapper_get(bool init) {
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
+
+ if (init && unlikely(wrapper == NULL)) {
+ tsd_init_block_t block;
+ wrapper = (tsd_wrapper_t *)
+ tsd_init_check_recursion(&tsd_init_head, &block);
+ if (wrapper) {
+ return wrapper;
+ }
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ block.data = (void *)wrapper;
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ } else {
+ wrapper->initialized = false;
+ JEMALLOC_DIAGNOSTIC_PUSH
+ JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+ tsd_t initializer = TSD_INITIALIZER;
+ JEMALLOC_DIAGNOSTIC_POP
+ wrapper->val = initializer;
+ }
+ tsd_wrapper_set(wrapper);
+ tsd_init_finish(&tsd_init_head, &block);
+ }
+ return wrapper;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
+ return true;
+ }
+ tsd_wrapper_set(&tsd_boot_wrapper);
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ tsd_wrapper_t *wrapper;
+ wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ }
+ tsd_boot_wrapper.initialized = false;
+ tsd_cleanup(&tsd_boot_wrapper.val);
+ wrapper->initialized = false;
+ JEMALLOC_DIAGNOSTIC_PUSH
+ JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+ tsd_t initializer = TSD_INITIALIZER;
+ JEMALLOC_DIAGNOSTIC_POP
+ wrapper->val = initializer;
+ tsd_wrapper_set(wrapper);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ if (tsd_boot0()) {
+ return true;
+ }
+ tsd_boot1();
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return true;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(init);
+ if (tsd_get_allocates() && !init && wrapper == NULL) {
+ return NULL;
+ }
+ return &wrapper->val;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(true);
+ if (likely(&wrapper->val != val)) {
+ wrapper->val = *(val);
+ }
+ wrapper->initialized = true;
+}
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_tls.h b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_tls.h
index 7d6c805beb..25ffcf89d9 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_tls.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_tls.h
@@ -1,60 +1,60 @@
-#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_TLS_H
-
-#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
-
-extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls;
-extern pthread_key_t tsd_tsd;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) {
- return true;
- }
- tsd_booted = true;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- /* Do nothing. */
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- return tsd_boot0();
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return false;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- return &tsd_tls;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- assert(tsd_booted);
- if (likely(&tsd_tls != val)) {
- tsd_tls = (*val);
- }
- if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) {
- malloc_write("<jemalloc>: Error setting tsd.\n");
- if (opt_abort) {
- abort();
- }
- }
-}
+#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_TLS_H
+
+#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
+
+extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls;
+extern pthread_key_t tsd_tsd;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) {
+ return true;
+ }
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ /* Do nothing. */
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ return tsd_boot0();
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return false;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ return &tsd_tls;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ assert(tsd_booted);
+ if (likely(&tsd_tls != val)) {
+ tsd_tls = (*val);
+ }
+ if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) {
+ malloc_write("<jemalloc>: Error setting tsd.\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+}
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_types.h b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_types.h
index 6200af61f3..6aac2043a1 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_types.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_types.h
@@ -1,10 +1,10 @@
-#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
-#define JEMALLOC_INTERNAL_TSD_TYPES_H
-
-#define MALLOC_TSD_CLEANUPS_MAX 2
-
-typedef struct tsd_s tsd_t;
-typedef struct tsdn_s tsdn_t;
-typedef bool (*malloc_tsd_cleanup_t)(void);
-
-#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */
+#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
+#define JEMALLOC_INTERNAL_TSD_TYPES_H
+
+#define MALLOC_TSD_CLEANUPS_MAX 2
+
+typedef struct tsd_s tsd_t;
+typedef struct tsdn_s tsdn_t;
+typedef bool (*malloc_tsd_cleanup_t)(void);
+
+#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_win.h b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_win.h
index cf30d18e3c..85cbc0a818 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/tsd_win.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/tsd_win.h
@@ -1,139 +1,139 @@
-#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_WIN_H
-
-typedef struct {
- bool initialized;
- tsd_t val;
-} tsd_wrapper_t;
-
-extern DWORD tsd_tsd;
-extern tsd_wrapper_t tsd_boot_wrapper;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE bool
-tsd_cleanup_wrapper(void) {
- DWORD error = GetLastError();
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
- SetLastError(error);
-
- if (wrapper == NULL) {
- return false;
- }
-
- if (wrapper->initialized) {
- wrapper->initialized = false;
- tsd_cleanup(&wrapper->val);
- if (wrapper->initialized) {
- /* Trigger another cleanup round. */
- return true;
- }
- }
- malloc_tsd_dalloc(wrapper);
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_wrapper_set(tsd_wrapper_t *wrapper) {
- if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
- malloc_write("<jemalloc>: Error setting TSD\n");
- abort();
- }
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
-tsd_wrapper_get(bool init) {
- DWORD error = GetLastError();
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
- SetLastError(error);
-
- if (init && unlikely(wrapper == NULL)) {
- wrapper = (tsd_wrapper_t *)
- malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- } else {
- wrapper->initialized = false;
- /* MSVC is finicky about aggregate initialization. */
- tsd_t tsd_initializer = TSD_INITIALIZER;
- wrapper->val = tsd_initializer;
- }
- tsd_wrapper_set(wrapper);
- }
- return wrapper;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- tsd_tsd = TlsAlloc();
- if (tsd_tsd == TLS_OUT_OF_INDEXES) {
- return true;
- }
- malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
- tsd_wrapper_set(&tsd_boot_wrapper);
- tsd_booted = true;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- tsd_wrapper_t *wrapper;
- wrapper = (tsd_wrapper_t *)
- malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- }
- tsd_boot_wrapper.initialized = false;
- tsd_cleanup(&tsd_boot_wrapper.val);
- wrapper->initialized = false;
- tsd_t initializer = TSD_INITIALIZER;
- wrapper->val = initializer;
- tsd_wrapper_set(wrapper);
-}
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- if (tsd_boot0()) {
- return true;
- }
- tsd_boot1();
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return true;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(init);
- if (tsd_get_allocates() && !init && wrapper == NULL) {
- return NULL;
- }
- return &wrapper->val;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(true);
- if (likely(&wrapper->val != val)) {
- wrapper->val = *(val);
- }
- wrapper->initialized = true;
-}
+#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_WIN_H
+
+typedef struct {
+ bool initialized;
+ tsd_t val;
+} tsd_wrapper_t;
+
+extern DWORD tsd_tsd;
+extern tsd_wrapper_t tsd_boot_wrapper;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE bool
+tsd_cleanup_wrapper(void) {
+ DWORD error = GetLastError();
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
+ SetLastError(error);
+
+ if (wrapper == NULL) {
+ return false;
+ }
+
+ if (wrapper->initialized) {
+ wrapper->initialized = false;
+ tsd_cleanup(&wrapper->val);
+ if (wrapper->initialized) {
+ /* Trigger another cleanup round. */
+ return true;
+ }
+ }
+ malloc_tsd_dalloc(wrapper);
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_wrapper_set(tsd_wrapper_t *wrapper) {
+ if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ abort();
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
+tsd_wrapper_get(bool init) {
+ DWORD error = GetLastError();
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
+ SetLastError(error);
+
+ if (init && unlikely(wrapper == NULL)) {
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ } else {
+ wrapper->initialized = false;
+ /* MSVC is finicky about aggregate initialization. */
+ tsd_t tsd_initializer = TSD_INITIALIZER;
+ wrapper->val = tsd_initializer;
+ }
+ tsd_wrapper_set(wrapper);
+ }
+ return wrapper;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ tsd_tsd = TlsAlloc();
+ if (tsd_tsd == TLS_OUT_OF_INDEXES) {
+ return true;
+ }
+ malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
+ tsd_wrapper_set(&tsd_boot_wrapper);
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ tsd_wrapper_t *wrapper;
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ }
+ tsd_boot_wrapper.initialized = false;
+ tsd_cleanup(&tsd_boot_wrapper.val);
+ wrapper->initialized = false;
+ tsd_t initializer = TSD_INITIALIZER;
+ wrapper->val = initializer;
+ tsd_wrapper_set(wrapper);
+}
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ if (tsd_boot0()) {
+ return true;
+ }
+ tsd_boot1();
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return true;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(init);
+ if (tsd_get_allocates() && !init && wrapper == NULL) {
+ return NULL;
+ }
+ return &wrapper->val;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(true);
+ if (likely(&wrapper->val != val)) {
+ wrapper->val = *(val);
+ }
+ wrapper->initialized = true;
+}
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/util.h b/contrib/libs/jemalloc/include/jemalloc/internal/util.h
index 304cb545af..1c2f65507e 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/util.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/util.h
@@ -1,50 +1,50 @@
-#ifndef JEMALLOC_INTERNAL_UTIL_H
-#define JEMALLOC_INTERNAL_UTIL_H
+#ifndef JEMALLOC_INTERNAL_UTIL_H
+#define JEMALLOC_INTERNAL_UTIL_H
-#define UTIL_INLINE static inline
+#define UTIL_INLINE static inline
-/* Junk fill patterns. */
-#ifndef JEMALLOC_ALLOC_JUNK
-# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
-#endif
-#ifndef JEMALLOC_FREE_JUNK
-# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
-#endif
+/* Junk fill patterns. */
+#ifndef JEMALLOC_ALLOC_JUNK
+# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
+#endif
+#ifndef JEMALLOC_FREE_JUNK
+# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
+#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
-#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
-
-/* cpp macro definition stringification. */
-#define STRINGIFY_HELPER(x) #x
-#define STRINGIFY(x) STRINGIFY_HELPER(x)
+#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
+/* cpp macro definition stringification. */
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
-#define JEMALLOC_CC_SILENCE_INIT(v) = v
-
-#ifdef __GNUC__
-# define likely(x) __builtin_expect(!!(x), 1)
-# define unlikely(x) __builtin_expect(!!(x), 0)
+#define JEMALLOC_CC_SILENCE_INIT(v) = v
+
+#ifdef __GNUC__
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
#else
-# define likely(x) !!(x)
-# define unlikely(x) !!(x)
+# define likely(x) !!(x)
+# define unlikely(x) !!(x)
#endif
-#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
-# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
+#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
+# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
#endif
-#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
+#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
-/* Set error code. */
-UTIL_INLINE void
-set_errno(int errnum) {
+/* Set error code. */
+UTIL_INLINE void
+set_errno(int errnum) {
#ifdef _WIN32
SetLastError(errnum);
#else
@@ -52,16 +52,16 @@ set_errno(int errnum) {
#endif
}
-/* Get last error code. */
-UTIL_INLINE int
-get_errno(void) {
+/* Get last error code. */
+UTIL_INLINE int
+get_errno(void) {
#ifdef _WIN32
- return GetLastError();
+ return GetLastError();
#else
- return errno;
+ return errno;
#endif
}
-#undef UTIL_INLINE
-
-#endif /* JEMALLOC_INTERNAL_UTIL_H */
+#undef UTIL_INLINE
+
+#endif /* JEMALLOC_INTERNAL_UTIL_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/witness.h b/contrib/libs/jemalloc/include/jemalloc/internal/witness.h
index fff9e98cb6..7706aa44ba 100644
--- a/contrib/libs/jemalloc/include/jemalloc/internal/witness.h
+++ b/contrib/libs/jemalloc/include/jemalloc/internal/witness.h
@@ -1,347 +1,347 @@
-#ifndef JEMALLOC_INTERNAL_WITNESS_H
-#define JEMALLOC_INTERNAL_WITNESS_H
-
-#include "jemalloc/internal/ql.h"
-
-/******************************************************************************/
-/* LOCK RANKS */
-/******************************************************************************/
-
-/*
- * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
- * machinery.
- */
-
-#define WITNESS_RANK_OMIT 0U
-
-#define WITNESS_RANK_MIN 1U
-
-#define WITNESS_RANK_INIT 1U
-#define WITNESS_RANK_CTL 1U
-#define WITNESS_RANK_TCACHES 2U
-#define WITNESS_RANK_ARENAS 3U
-
-#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
-
-#define WITNESS_RANK_PROF_DUMP 5U
-#define WITNESS_RANK_PROF_BT2GCTX 6U
-#define WITNESS_RANK_PROF_TDATAS 7U
-#define WITNESS_RANK_PROF_TDATA 8U
-#define WITNESS_RANK_PROF_LOG 9U
-#define WITNESS_RANK_PROF_GCTX 10U
-#define WITNESS_RANK_BACKGROUND_THREAD 11U
-
-/*
- * Used as an argument to witness_assert_depth_to_rank() in order to validate
- * depth excluding non-core locks with lower ranks. Since the rank argument to
- * witness_assert_depth_to_rank() is inclusive rather than exclusive, this
- * definition can have the same value as the minimally ranked core lock.
- */
-#define WITNESS_RANK_CORE 12U
-
-#define WITNESS_RANK_DECAY 12U
-#define WITNESS_RANK_TCACHE_QL 13U
-#define WITNESS_RANK_EXTENT_GROW 14U
-#define WITNESS_RANK_EXTENTS 15U
-#define WITNESS_RANK_EXTENT_AVAIL 16U
-
-#define WITNESS_RANK_EXTENT_POOL 17U
-#define WITNESS_RANK_RTREE 18U
-#define WITNESS_RANK_BASE 19U
-#define WITNESS_RANK_ARENA_LARGE 20U
-#define WITNESS_RANK_HOOK 21U
-
-#define WITNESS_RANK_LEAF 0xffffffffU
-#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
-#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
-#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
-
-/******************************************************************************/
-/* PER-WITNESS DATA */
-/******************************************************************************/
-#if defined(JEMALLOC_DEBUG)
-# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
-#else
-# define WITNESS_INITIALIZER(name, rank)
-#endif
-
-typedef struct witness_s witness_t;
-typedef unsigned witness_rank_t;
-typedef ql_head(witness_t) witness_list_t;
-typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
- void *);
-
-struct witness_s {
- /* Name, used for printing lock order reversal messages. */
- const char *name;
-
- /*
- * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
- * must be acquired in order of increasing rank.
- */
- witness_rank_t rank;
-
- /*
- * If two witnesses are of equal rank and they have the samp comp
- * function pointer, it is called as a last attempt to differentiate
- * between witnesses of equal rank.
- */
- witness_comp_t *comp;
-
- /* Opaque data, passed to comp(). */
- void *opaque;
-
- /* Linkage for thread's currently owned locks. */
- ql_elm(witness_t) link;
-};
-
-/******************************************************************************/
-/* PER-THREAD DATA */
-/******************************************************************************/
-typedef struct witness_tsd_s witness_tsd_t;
-struct witness_tsd_s {
- witness_list_t witnesses;
- bool forking;
-};
-
-#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
-#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
-
-/******************************************************************************/
-/* (PER-THREAD) NULLABILITY HELPERS */
-/******************************************************************************/
-typedef struct witness_tsdn_s witness_tsdn_t;
-struct witness_tsdn_s {
- witness_tsd_t witness_tsd;
-};
-
-JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
-witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
- return (witness_tsdn_t *)witness_tsd;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
- return witness_tsdn == NULL;
-}
-
-JEMALLOC_ALWAYS_INLINE witness_tsd_t *
-witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
- assert(!witness_tsdn_null(witness_tsdn));
- return &witness_tsdn->witness_tsd;
-}
-
-/******************************************************************************/
-/* API */
-/******************************************************************************/
-void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp, void *opaque);
-
-typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
-extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
-
-typedef void (witness_owner_error_t)(const witness_t *);
-extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
-
-typedef void (witness_not_owner_error_t)(const witness_t *);
-extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
-
-typedef void (witness_depth_error_t)(const witness_list_t *,
- witness_rank_t rank_inclusive, unsigned depth);
-extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
-
-void witnesses_cleanup(witness_tsd_t *witness_tsd);
-void witness_prefork(witness_tsd_t *witness_tsd);
-void witness_postfork_parent(witness_tsd_t *witness_tsd);
-void witness_postfork_child(witness_tsd_t *witness_tsd);
-
-/* Helper, not intended for direct use. */
-static inline bool
-witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
- witness_list_t *witnesses;
- witness_t *w;
-
- cassert(config_debug);
-
- witnesses = &witness_tsd->witnesses;
- ql_foreach(w, witnesses, link) {
- if (w == witness) {
- return true;
- }
- }
-
- return false;
-}
-
-static inline void
-witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
- witness_tsd_t *witness_tsd;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- if (witness_owner(witness_tsd, witness)) {
- return;
- }
- witness_owner_error(witness);
-}
-
-static inline void
-witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
- const witness_t *witness) {
- witness_tsd_t *witness_tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- witnesses = &witness_tsd->witnesses;
- ql_foreach(w, witnesses, link) {
- if (w == witness) {
- witness_not_owner_error(witness);
- }
- }
-}
-
-static inline void
-witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
- witness_rank_t rank_inclusive, unsigned depth) {
- witness_tsd_t *witness_tsd;
- unsigned d;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
-
- d = 0;
- witnesses = &witness_tsd->witnesses;
- w = ql_last(witnesses, link);
- if (w != NULL) {
- ql_reverse_foreach(w, witnesses, link) {
- if (w->rank < rank_inclusive) {
- break;
- }
- d++;
- }
- }
- if (d != depth) {
- witness_depth_error(witnesses, rank_inclusive, depth);
- }
-}
-
-static inline void
-witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
- witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
-}
-
-static inline void
-witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
- witness_assert_depth(witness_tsdn, 0);
-}
-
-static inline void
-witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
- witness_tsd_t *witness_tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- witness_assert_not_owner(witness_tsdn, witness);
-
- witnesses = &witness_tsd->witnesses;
- w = ql_last(witnesses, link);
- if (w == NULL) {
- /* No other locks; do nothing. */
- } else if (witness_tsd->forking && w->rank <= witness->rank) {
- /* Forking, and relaxed ranking satisfied. */
- } else if (w->rank > witness->rank) {
- /* Not forking, rank order reversal. */
- witness_lock_error(witnesses, witness);
- } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
- witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
- 0)) {
- /*
- * Missing/incompatible comparison function, or comparison
- * function indicates rank order reversal.
- */
- witness_lock_error(witnesses, witness);
- }
-
- ql_elm_new(witness, link);
- ql_tail_insert(witnesses, witness, link);
-}
-
-static inline void
-witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
- witness_tsd_t *witness_tsd;
- witness_list_t *witnesses;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- /*
- * Check whether owner before removal, rather than relying on
- * witness_assert_owner() to abort, so that unit tests can test this
- * function's failure mode without causing undefined behavior.
- */
- if (witness_owner(witness_tsd, witness)) {
- witnesses = &witness_tsd->witnesses;
- ql_remove(witnesses, witness, link);
- } else {
- witness_assert_owner(witness_tsdn, witness);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_WITNESS_H */
+#ifndef JEMALLOC_INTERNAL_WITNESS_H
+#define JEMALLOC_INTERNAL_WITNESS_H
+
+#include "jemalloc/internal/ql.h"
+
+/******************************************************************************/
+/* LOCK RANKS */
+/******************************************************************************/
+
+/*
+ * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
+ * machinery.
+ */
+
+#define WITNESS_RANK_OMIT 0U
+
+#define WITNESS_RANK_MIN 1U
+
+#define WITNESS_RANK_INIT 1U
+#define WITNESS_RANK_CTL 1U
+#define WITNESS_RANK_TCACHES 2U
+#define WITNESS_RANK_ARENAS 3U
+
+#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
+
+#define WITNESS_RANK_PROF_DUMP 5U
+#define WITNESS_RANK_PROF_BT2GCTX 6U
+#define WITNESS_RANK_PROF_TDATAS 7U
+#define WITNESS_RANK_PROF_TDATA 8U
+#define WITNESS_RANK_PROF_LOG 9U
+#define WITNESS_RANK_PROF_GCTX 10U
+#define WITNESS_RANK_BACKGROUND_THREAD 11U
+
+/*
+ * Used as an argument to witness_assert_depth_to_rank() in order to validate
+ * depth excluding non-core locks with lower ranks. Since the rank argument to
+ * witness_assert_depth_to_rank() is inclusive rather than exclusive, this
+ * definition can have the same value as the minimally ranked core lock.
+ */
+#define WITNESS_RANK_CORE 12U
+
+#define WITNESS_RANK_DECAY 12U
+#define WITNESS_RANK_TCACHE_QL 13U
+#define WITNESS_RANK_EXTENT_GROW 14U
+#define WITNESS_RANK_EXTENTS 15U
+#define WITNESS_RANK_EXTENT_AVAIL 16U
+
+#define WITNESS_RANK_EXTENT_POOL 17U
+#define WITNESS_RANK_RTREE 18U
+#define WITNESS_RANK_BASE 19U
+#define WITNESS_RANK_ARENA_LARGE 20U
+#define WITNESS_RANK_HOOK 21U
+
+#define WITNESS_RANK_LEAF 0xffffffffU
+#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
+#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
+#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
+
+/******************************************************************************/
+/* PER-WITNESS DATA */
+/******************************************************************************/
+#if defined(JEMALLOC_DEBUG)
+# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
+#else
+# define WITNESS_INITIALIZER(name, rank)
+#endif
+
+typedef struct witness_s witness_t;
+typedef unsigned witness_rank_t;
+typedef ql_head(witness_t) witness_list_t;
+typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
+ void *);
+
+struct witness_s {
+ /* Name, used for printing lock order reversal messages. */
+ const char *name;
+
+ /*
+ * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
+ * must be acquired in order of increasing rank.
+ */
+ witness_rank_t rank;
+
+ /*
+ * If two witnesses are of equal rank and they have the samp comp
+ * function pointer, it is called as a last attempt to differentiate
+ * between witnesses of equal rank.
+ */
+ witness_comp_t *comp;
+
+ /* Opaque data, passed to comp(). */
+ void *opaque;
+
+ /* Linkage for thread's currently owned locks. */
+ ql_elm(witness_t) link;
+};
+
+/******************************************************************************/
+/* PER-THREAD DATA */
+/******************************************************************************/
+typedef struct witness_tsd_s witness_tsd_t;
+struct witness_tsd_s {
+ witness_list_t witnesses;
+ bool forking;
+};
+
+#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
+#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
+
+/******************************************************************************/
+/* (PER-THREAD) NULLABILITY HELPERS */
+/******************************************************************************/
+typedef struct witness_tsdn_s witness_tsdn_t;
+struct witness_tsdn_s {
+ witness_tsd_t witness_tsd;
+};
+
+JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
+witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
+ return (witness_tsdn_t *)witness_tsd;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
+ return witness_tsdn == NULL;
+}
+
+JEMALLOC_ALWAYS_INLINE witness_tsd_t *
+witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
+ assert(!witness_tsdn_null(witness_tsdn));
+ return &witness_tsdn->witness_tsd;
+}
+
+/******************************************************************************/
+/* API */
+/******************************************************************************/
+void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp, void *opaque);
+
+typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
+extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
+
+typedef void (witness_owner_error_t)(const witness_t *);
+extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
+
+typedef void (witness_not_owner_error_t)(const witness_t *);
+extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
+
+typedef void (witness_depth_error_t)(const witness_list_t *,
+ witness_rank_t rank_inclusive, unsigned depth);
+extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
+
+void witnesses_cleanup(witness_tsd_t *witness_tsd);
+void witness_prefork(witness_tsd_t *witness_tsd);
+void witness_postfork_parent(witness_tsd_t *witness_tsd);
+void witness_postfork_child(witness_tsd_t *witness_tsd);
+
+/* Helper, not intended for direct use. */
+static inline bool
+witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ cassert(config_debug);
+
+ witnesses = &witness_tsd->witnesses;
+ ql_foreach(w, witnesses, link) {
+ if (w == witness) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline void
+witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
+ witness_tsd_t *witness_tsd;
+
+ if (!config_debug) {
+ return;
+ }
+
+ if (witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
+ return;
+ }
+
+ if (witness_owner(witness_tsd, witness)) {
+ return;
+ }
+ witness_owner_error(witness);
+}
+
+static inline void
+witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
+ const witness_t *witness) {
+ witness_tsd_t *witness_tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug) {
+ return;
+ }
+
+ if (witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
+ return;
+ }
+
+ witnesses = &witness_tsd->witnesses;
+ ql_foreach(w, witnesses, link) {
+ if (w == witness) {
+ witness_not_owner_error(witness);
+ }
+ }
+}
+
+static inline void
+witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
+ witness_rank_t rank_inclusive, unsigned depth) {
+ witness_tsd_t *witness_tsd;
+ unsigned d;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug) {
+ return;
+ }
+
+ if (witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+
+ d = 0;
+ witnesses = &witness_tsd->witnesses;
+ w = ql_last(witnesses, link);
+ if (w != NULL) {
+ ql_reverse_foreach(w, witnesses, link) {
+ if (w->rank < rank_inclusive) {
+ break;
+ }
+ d++;
+ }
+ }
+ if (d != depth) {
+ witness_depth_error(witnesses, rank_inclusive, depth);
+ }
+}
+
+static inline void
+witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
+ witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
+}
+
+static inline void
+witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
+ witness_assert_depth(witness_tsdn, 0);
+}
+
+static inline void
+witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
+ witness_tsd_t *witness_tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug) {
+ return;
+ }
+
+ if (witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
+ return;
+ }
+
+ witness_assert_not_owner(witness_tsdn, witness);
+
+ witnesses = &witness_tsd->witnesses;
+ w = ql_last(witnesses, link);
+ if (w == NULL) {
+ /* No other locks; do nothing. */
+ } else if (witness_tsd->forking && w->rank <= witness->rank) {
+ /* Forking, and relaxed ranking satisfied. */
+ } else if (w->rank > witness->rank) {
+ /* Not forking, rank order reversal. */
+ witness_lock_error(witnesses, witness);
+ } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
+ witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
+ 0)) {
+ /*
+ * Missing/incompatible comparison function, or comparison
+ * function indicates rank order reversal.
+ */
+ witness_lock_error(witnesses, witness);
+ }
+
+ ql_elm_new(witness, link);
+ ql_tail_insert(witnesses, witness, link);
+}
+
+static inline void
+witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
+ witness_tsd_t *witness_tsd;
+ witness_list_t *witnesses;
+
+ if (!config_debug) {
+ return;
+ }
+
+ if (witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
+ return;
+ }
+
+ /*
+ * Check whether owner before removal, rather than relying on
+ * witness_assert_owner() to abort, so that unit tests can test this
+ * function's failure mode without causing undefined behavior.
+ */
+ if (witness_owner(witness_tsd, witness)) {
+ witnesses = &witness_tsd->witnesses;
+ ql_remove(witnesses, witness, link);
+ } else {
+ witness_assert_owner(witness_tsdn, witness);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_WITNESS_H */
diff --git a/contrib/libs/jemalloc/include/jemalloc/jemalloc-linux.h b/contrib/libs/jemalloc/include/jemalloc/jemalloc-linux.h
index 84b2e8945d..d6cfd7c133 100644
--- a/contrib/libs/jemalloc/include/jemalloc/jemalloc-linux.h
+++ b/contrib/libs/jemalloc/include/jemalloc/jemalloc-linux.h
@@ -1,434 +1,434 @@
-#ifndef JEMALLOC_H_
-#define JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Defined if __attribute__((...)) syntax is supported. */
-#define JEMALLOC_HAVE_ATTR
-
-/* Defined if alloc_size attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-
-/* Defined if format_arg(...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-
-/* Defined if format(printf, ...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#define JEMALLOC_OVERRIDE_MEMALIGN
-#define JEMALLOC_OVERRIDE_VALLOC
-
-/*
- * At least Linux omits the "const" in:
- *
- * size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#define JEMALLOC_USABLE_SIZE_CONST
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++. The only justification for this is to match the prototypes that
- * glibc defines.
- */
-/* #undef JEMALLOC_USE_CXX_THROW */
-
-#ifdef _MSC_VER
-# ifdef _WIN64
-# define LG_SIZEOF_PTR_WIN 3
-# else
-# define LG_SIZEOF_PTR_WIN 2
-# endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#define LG_SIZEOF_PTR 3
-
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-# define je_aligned_alloc aligned_alloc
-# define je_calloc calloc
-# define je_dallocx dallocx
-# define je_free free
-# define je_mallctl mallctl
-# define je_mallctlbymib mallctlbymib
-# define je_mallctlnametomib mallctlnametomib
-# define je_malloc malloc
-# define je_malloc_conf malloc_conf
-# define je_malloc_message malloc_message
-# define je_malloc_stats_print malloc_stats_print
-# define je_malloc_usable_size malloc_usable_size
-# define je_mallocx mallocx
-# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# define je_nallocx nallocx
-# define je_posix_memalign posix_memalign
-# define je_rallocx rallocx
-# define je_realloc realloc
-# define je_sallocx sallocx
-# define je_sdallocx sdallocx
-# define je_xallocx xallocx
-# define je_memalign memalign
-# define je_valloc valloc
-#endif
-
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_MAJOR 5
-#define JEMALLOC_VERSION_MINOR 2
-#define JEMALLOC_VERSION_BUGFIX 1
-#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
-
-#define MALLOCX_LG_ALIGN(la) ((int)(la))
-#if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
-#else
-# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define MALLOCX_ZERO ((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
-#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- * #define STRINGIFY_HELPER(x) #x
- * #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- * 0);
- */
-#define MALLCTL_ARENAS_ALL 4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define MALLCTL_ARENAS_DESTROYED 4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-# define JEMALLOC_CXX_THROW throw()
-#else
-# define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s) __declspec(align(s))
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# ifndef JEMALLOC_EXPORT
-# ifdef DLLEXPORT
-# define JEMALLOC_EXPORT __declspec(dllexport)
-# else
-# define JEMALLOC_EXPORT __declspec(dllimport)
-# endif
-# endif
-# define JEMALLOC_FORMAT_ARG(i)
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_NOINLINE __declspec(noinline)
-# ifdef __cplusplus
-# define JEMALLOC_NOTHROW __declspec(nothrow)
-# else
-# define JEMALLOC_NOTHROW
-# endif
-# define JEMALLOC_SECTION(s) __declspec(allocate(s))
-# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-# if _MSC_VER >= 1900 && !defined(__EDG__)
-# define JEMALLOC_ALLOCATOR __declspec(allocator)
-# else
-# define JEMALLOC_ALLOCATOR
-# endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-# else
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# endif
-# ifndef JEMALLOC_EXPORT
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
-# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
-# else
-# define JEMALLOC_FORMAT_ARG(i)
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-# else
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# endif
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-# define JEMALLOC_NOTHROW
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-#else
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s)
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# define JEMALLOC_EXPORT
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_NOINLINE
-# define JEMALLOC_NOTHROW
-# define JEMALLOC_SECTION(s)
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-#endif
-
-/*
- * The je_ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
- */
-extern JEMALLOC_EXPORT const char *je_malloc_conf;
-extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
- const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_malloc(size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
- size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
- size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
- JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
- JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
- JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
- int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
- size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
- int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
- int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
- JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
- size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
- void (*write_cb)(void *, const char *), void *je_cbopaque,
- const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
- JEMALLOC_ATTR(malloc);
-#endif
-
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
- bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * void
- * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
- size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
- bool, unsigned);
-
-struct extent_hooks_s {
- extent_alloc_t *alloc;
- extent_dalloc_t *dalloc;
- extent_destroy_t *destroy;
- extent_commit_t *commit;
- extent_decommit_t *decommit;
- extent_purge_t *purge_lazy;
- extent_purge_t *purge_forced;
- extent_split_t *split;
- extent_merge_t *merge;
-};
-
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-# ifndef JEMALLOC_NO_DEMANGLE
-# define JEMALLOC_NO_DEMANGLE
-# endif
-# define aligned_alloc je_aligned_alloc
-# define calloc je_calloc
-# define dallocx je_dallocx
-# define free je_free
-# define mallctl je_mallctl
-# define mallctlbymib je_mallctlbymib
-# define mallctlnametomib je_mallctlnametomib
-# define malloc je_malloc
-# define malloc_conf je_malloc_conf
-# define malloc_message je_malloc_message
-# define malloc_stats_print je_malloc_stats_print
-# define malloc_usable_size je_malloc_usable_size
-# define mallocx je_mallocx
-# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# define nallocx je_nallocx
-# define posix_memalign je_posix_memalign
-# define rallocx je_rallocx
-# define realloc je_realloc
-# define sallocx je_sallocx
-# define sdallocx je_sdallocx
-# define xallocx je_xallocx
-# define memalign je_memalign
-# define valloc je_valloc
-#endif
-
-/*
- * The je_* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-# undef je_aligned_alloc
-# undef je_calloc
-# undef je_dallocx
-# undef je_free
-# undef je_mallctl
-# undef je_mallctlbymib
-# undef je_mallctlnametomib
-# undef je_malloc
-# undef je_malloc_conf
-# undef je_malloc_message
-# undef je_malloc_stats_print
-# undef je_malloc_usable_size
-# undef je_mallocx
-# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# undef je_nallocx
-# undef je_posix_memalign
-# undef je_rallocx
-# undef je_realloc
-# undef je_sallocx
-# undef je_sdallocx
-# undef je_xallocx
-# undef je_memalign
-# undef je_valloc
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* JEMALLOC_H_ */
+#ifndef JEMALLOC_H_
+#define JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Defined if __attribute__((...)) syntax is supported. */
+#define JEMALLOC_HAVE_ATTR
+
+/* Defined if alloc_size attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format_arg(...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#define JEMALLOC_OVERRIDE_MEMALIGN
+#define JEMALLOC_OVERRIDE_VALLOC
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#define JEMALLOC_USABLE_SIZE_CONST
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+/* #undef JEMALLOC_USE_CXX_THROW */
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#define LG_SIZEOF_PTR 3
+
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#ifndef JEMALLOC_NO_RENAME
+# define je_aligned_alloc aligned_alloc
+# define je_calloc calloc
+# define je_dallocx dallocx
+# define je_free free
+# define je_mallctl mallctl
+# define je_mallctlbymib mallctlbymib
+# define je_mallctlnametomib mallctlnametomib
+# define je_malloc malloc
+# define je_malloc_conf malloc_conf
+# define je_malloc_message malloc_message
+# define je_malloc_stats_print malloc_stats_print
+# define je_malloc_usable_size malloc_usable_size
+# define je_mallocx mallocx
+# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define je_nallocx nallocx
+# define je_posix_memalign posix_memalign
+# define je_rallocx rallocx
+# define je_realloc realloc
+# define je_sallocx sallocx
+# define je_sdallocx sdallocx
+# define je_xallocx xallocx
+# define je_memalign memalign
+# define je_valloc valloc
+#endif
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION_MAJOR 5
+#define JEMALLOC_VERSION_MINOR 2
+#define JEMALLOC_VERSION_BUGFIX 1
+#define JEMALLOC_VERSION_NREV 0
+#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
+
+#define MALLOCX_LG_ALIGN(la) ((int)(la))
+#if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+#else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+#endif
+#define MALLOCX_ZERO ((int)0x40)
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+/*
+ * Use as arena index in "arena.<i>.{purge,decay,dss}" and
+ * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
+ * definition is intentionally specified in raw decimal format to support
+ * cpp-based string concatenation, e.g.
+ *
+ * #define STRINGIFY_HELPER(x) #x
+ * #define STRINGIFY(x) STRINGIFY_HELPER(x)
+ *
+ * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
+ * 0);
+ */
+#define MALLCTL_ARENAS_ALL 4096
+/*
+ * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
+ * destroyed arenas.
+ */
+#define MALLCTL_ARENAS_DESTROYED 4097
+
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
+#endif
+
+#if defined(_MSC_VER)
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_ARG(i)
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
+# else
+# define JEMALLOC_NOTHROW
+# endif
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
+#elif defined(JEMALLOC_HAVE_ATTR)
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
+# else
+# define JEMALLOC_FORMAT_ARG(i)
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#endif
+
+/*
+ * The je_ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
+ */
+extern JEMALLOC_EXPORT const char *je_malloc_conf;
+extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
+ size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
+ JEMALLOC_CXX_THROW;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
+ int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *je_cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
+#endif
+
+typedef struct extent_hooks_s extent_hooks_t;
+
+/*
+ * void *
+ * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+ */
+typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
+ bool *, unsigned);
+
+/*
+ * bool
+ * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * void
+ * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * bool
+ * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
+ size_t, unsigned);
+
+/*
+ * bool
+ * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ bool, unsigned);
+
+/*
+ * bool
+ * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
+ bool, unsigned);
+
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
+
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+# define aligned_alloc je_aligned_alloc
+# define calloc je_calloc
+# define dallocx je_dallocx
+# define free je_free
+# define mallctl je_mallctl
+# define mallctlbymib je_mallctlbymib
+# define mallctlnametomib je_mallctlnametomib
+# define malloc je_malloc
+# define malloc_conf je_malloc_conf
+# define malloc_message je_malloc_message
+# define malloc_stats_print je_malloc_stats_print
+# define malloc_usable_size je_malloc_usable_size
+# define mallocx je_mallocx
+# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define nallocx je_nallocx
+# define posix_memalign je_posix_memalign
+# define rallocx je_rallocx
+# define realloc je_realloc
+# define sallocx je_sallocx
+# define sdallocx je_sdallocx
+# define xallocx je_xallocx
+# define memalign je_memalign
+# define valloc je_valloc
+#endif
+
+/*
+ * The je_* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+# undef je_aligned_alloc
+# undef je_calloc
+# undef je_dallocx
+# undef je_free
+# undef je_mallctl
+# undef je_mallctlbymib
+# undef je_mallctlnametomib
+# undef je_malloc
+# undef je_malloc_conf
+# undef je_malloc_message
+# undef je_malloc_stats_print
+# undef je_malloc_usable_size
+# undef je_mallocx
+# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# undef je_nallocx
+# undef je_posix_memalign
+# undef je_rallocx
+# undef je_realloc
+# undef je_sallocx
+# undef je_sdallocx
+# undef je_xallocx
+# undef je_memalign
+# undef je_valloc
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* JEMALLOC_H_ */
diff --git a/contrib/libs/jemalloc/include/jemalloc/jemalloc-osx.h b/contrib/libs/jemalloc/include/jemalloc/jemalloc-osx.h
index a461f2a740..3574f32b42 100644
--- a/contrib/libs/jemalloc/include/jemalloc/jemalloc-osx.h
+++ b/contrib/libs/jemalloc/include/jemalloc/jemalloc-osx.h
@@ -1,431 +1,431 @@
-#ifndef JEMALLOC_H_
-#define JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Defined if __attribute__((...)) syntax is supported. */
-#define JEMALLOC_HAVE_ATTR
-
-/* Defined if alloc_size attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-
-/* Defined if format_arg(...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
-
-/* Defined if format(printf, ...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
-#define JEMALLOC_OVERRIDE_VALLOC
-
-/*
- * At least Linux omits the "const" in:
- *
- * size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#define JEMALLOC_USABLE_SIZE_CONST const
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++. The only justification for this is to match the prototypes that
- * glibc defines.
- */
-/* #undef JEMALLOC_USE_CXX_THROW */
-
-#ifdef _MSC_VER
-# ifdef _WIN64
-# define LG_SIZEOF_PTR_WIN 3
-# else
-# define LG_SIZEOF_PTR_WIN 2
-# endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#define LG_SIZEOF_PTR 3
-
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-# define je_aligned_alloc aligned_alloc
-# define je_calloc calloc
-# define je_dallocx dallocx
-# define je_free free
-# define je_mallctl mallctl
-# define je_mallctlbymib mallctlbymib
-# define je_mallctlnametomib mallctlnametomib
-# define je_malloc malloc
-# define je_malloc_conf malloc_conf
-# define je_malloc_message malloc_message
-# define je_malloc_stats_print malloc_stats_print
-# define je_malloc_usable_size malloc_usable_size
-# define je_mallocx mallocx
-# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# define je_nallocx nallocx
-# define je_posix_memalign posix_memalign
-# define je_rallocx rallocx
-# define je_realloc realloc
-# define je_sallocx sallocx
-# define je_sdallocx sdallocx
-# define je_xallocx xallocx
-# define je_valloc valloc
-#endif
-
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_MAJOR 5
-#define JEMALLOC_VERSION_MINOR 2
-#define JEMALLOC_VERSION_BUGFIX 1
-#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
-
-#define MALLOCX_LG_ALIGN(la) ((int)(la))
-#if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
-#else
-# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define MALLOCX_ZERO ((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
-#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- * #define STRINGIFY_HELPER(x) #x
- * #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- * 0);
- */
-#define MALLCTL_ARENAS_ALL 4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define MALLCTL_ARENAS_DESTROYED 4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-# define JEMALLOC_CXX_THROW throw()
-#else
-# define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s) __declspec(align(s))
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# ifndef JEMALLOC_EXPORT
-# ifdef DLLEXPORT
-# define JEMALLOC_EXPORT __declspec(dllexport)
-# else
-# define JEMALLOC_EXPORT __declspec(dllimport)
-# endif
-# endif
-# define JEMALLOC_FORMAT_ARG(i)
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_NOINLINE __declspec(noinline)
-# ifdef __cplusplus
-# define JEMALLOC_NOTHROW __declspec(nothrow)
-# else
-# define JEMALLOC_NOTHROW
-# endif
-# define JEMALLOC_SECTION(s) __declspec(allocate(s))
-# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-# if _MSC_VER >= 1900 && !defined(__EDG__)
-# define JEMALLOC_ALLOCATOR __declspec(allocator)
-# else
-# define JEMALLOC_ALLOCATOR
-# endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-# else
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# endif
-# ifndef JEMALLOC_EXPORT
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
-# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
-# else
-# define JEMALLOC_FORMAT_ARG(i)
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-# else
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# endif
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-# define JEMALLOC_NOTHROW
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-#else
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s)
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# define JEMALLOC_EXPORT
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_NOINLINE
-# define JEMALLOC_NOTHROW
-# define JEMALLOC_SECTION(s)
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-#endif
-
-/*
- * The je_ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
- */
-extern JEMALLOC_EXPORT const char *je_malloc_conf;
-extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
- const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_malloc(size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
- size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
- size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
- JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
- JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
- JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
- int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
- size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
- int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
- int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
- JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
- size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
- void (*write_cb)(void *, const char *), void *je_cbopaque,
- const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
- JEMALLOC_ATTR(malloc);
-#endif
-
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
- bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * void
- * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
- size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
- bool, unsigned);
-
-struct extent_hooks_s {
- extent_alloc_t *alloc;
- extent_dalloc_t *dalloc;
- extent_destroy_t *destroy;
- extent_commit_t *commit;
- extent_decommit_t *decommit;
- extent_purge_t *purge_lazy;
- extent_purge_t *purge_forced;
- extent_split_t *split;
- extent_merge_t *merge;
-};
-
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-# ifndef JEMALLOC_NO_DEMANGLE
-# define JEMALLOC_NO_DEMANGLE
-# endif
-# define aligned_alloc je_aligned_alloc
-# define calloc je_calloc
-# define dallocx je_dallocx
-# define free je_free
-# define mallctl je_mallctl
-# define mallctlbymib je_mallctlbymib
-# define mallctlnametomib je_mallctlnametomib
-# define malloc je_malloc
-# define malloc_conf je_malloc_conf
-# define malloc_message je_malloc_message
-# define malloc_stats_print je_malloc_stats_print
-# define malloc_usable_size je_malloc_usable_size
-# define mallocx je_mallocx
-# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# define nallocx je_nallocx
-# define posix_memalign je_posix_memalign
-# define rallocx je_rallocx
-# define realloc je_realloc
-# define sallocx je_sallocx
-# define sdallocx je_sdallocx
-# define xallocx je_xallocx
-# define valloc je_valloc
-#endif
-
-/*
- * The je_* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-# undef je_aligned_alloc
-# undef je_calloc
-# undef je_dallocx
-# undef je_free
-# undef je_mallctl
-# undef je_mallctlbymib
-# undef je_mallctlnametomib
-# undef je_malloc
-# undef je_malloc_conf
-# undef je_malloc_message
-# undef je_malloc_stats_print
-# undef je_malloc_usable_size
-# undef je_mallocx
-# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# undef je_nallocx
-# undef je_posix_memalign
-# undef je_rallocx
-# undef je_realloc
-# undef je_sallocx
-# undef je_sdallocx
-# undef je_xallocx
-# undef je_valloc
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* JEMALLOC_H_ */
+#ifndef JEMALLOC_H_
+#define JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Defined if __attribute__((...)) syntax is supported. */
+#define JEMALLOC_HAVE_ATTR
+
+/* Defined if alloc_size attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format_arg(...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
+
+/* Defined if format(printf, ...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
+#define JEMALLOC_OVERRIDE_VALLOC
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#define JEMALLOC_USABLE_SIZE_CONST const
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+/* #undef JEMALLOC_USE_CXX_THROW */
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#define LG_SIZEOF_PTR 3
+
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#ifndef JEMALLOC_NO_RENAME
+# define je_aligned_alloc aligned_alloc
+# define je_calloc calloc
+# define je_dallocx dallocx
+# define je_free free
+# define je_mallctl mallctl
+# define je_mallctlbymib mallctlbymib
+# define je_mallctlnametomib mallctlnametomib
+# define je_malloc malloc
+# define je_malloc_conf malloc_conf
+# define je_malloc_message malloc_message
+# define je_malloc_stats_print malloc_stats_print
+# define je_malloc_usable_size malloc_usable_size
+# define je_mallocx mallocx
+# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define je_nallocx nallocx
+# define je_posix_memalign posix_memalign
+# define je_rallocx rallocx
+# define je_realloc realloc
+# define je_sallocx sallocx
+# define je_sdallocx sdallocx
+# define je_xallocx xallocx
+# define je_valloc valloc
+#endif
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION_MAJOR 5
+#define JEMALLOC_VERSION_MINOR 2
+#define JEMALLOC_VERSION_BUGFIX 1
+#define JEMALLOC_VERSION_NREV 0
+#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
+
+#define MALLOCX_LG_ALIGN(la) ((int)(la))
+#if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+#else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+#endif
+#define MALLOCX_ZERO ((int)0x40)
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+/*
+ * Use as arena index in "arena.<i>.{purge,decay,dss}" and
+ * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
+ * definition is intentionally specified in raw decimal format to support
+ * cpp-based string concatenation, e.g.
+ *
+ * #define STRINGIFY_HELPER(x) #x
+ * #define STRINGIFY(x) STRINGIFY_HELPER(x)
+ *
+ * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
+ * 0);
+ */
+#define MALLCTL_ARENAS_ALL 4096
+/*
+ * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
+ * destroyed arenas.
+ */
+#define MALLCTL_ARENAS_DESTROYED 4097
+
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
+#endif
+
+#if defined(_MSC_VER)
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_ARG(i)
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
+# else
+# define JEMALLOC_NOTHROW
+# endif
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
+#elif defined(JEMALLOC_HAVE_ATTR)
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
+# else
+# define JEMALLOC_FORMAT_ARG(i)
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#endif
+
+/*
+ * The je_ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
+ */
+extern JEMALLOC_EXPORT const char *je_malloc_conf;
+extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
+ size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
+ JEMALLOC_CXX_THROW;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
+ int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *je_cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
+#endif
+
+typedef struct extent_hooks_s extent_hooks_t;
+
+/*
+ * void *
+ * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+ */
+typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
+ bool *, unsigned);
+
+/*
+ * bool
+ * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * void
+ * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * bool
+ * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
+ size_t, unsigned);
+
+/*
+ * bool
+ * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ bool, unsigned);
+
+/*
+ * bool
+ * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
+ bool, unsigned);
+
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
+
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+# define aligned_alloc je_aligned_alloc
+# define calloc je_calloc
+# define dallocx je_dallocx
+# define free je_free
+# define mallctl je_mallctl
+# define mallctlbymib je_mallctlbymib
+# define mallctlnametomib je_mallctlnametomib
+# define malloc je_malloc
+# define malloc_conf je_malloc_conf
+# define malloc_message je_malloc_message
+# define malloc_stats_print je_malloc_stats_print
+# define malloc_usable_size je_malloc_usable_size
+# define mallocx je_mallocx
+# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define nallocx je_nallocx
+# define posix_memalign je_posix_memalign
+# define rallocx je_rallocx
+# define realloc je_realloc
+# define sallocx je_sallocx
+# define sdallocx je_sdallocx
+# define xallocx je_xallocx
+# define valloc je_valloc
+#endif
+
+/*
+ * The je_* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+# undef je_aligned_alloc
+# undef je_calloc
+# undef je_dallocx
+# undef je_free
+# undef je_mallctl
+# undef je_mallctlbymib
+# undef je_mallctlnametomib
+# undef je_malloc
+# undef je_malloc_conf
+# undef je_malloc_message
+# undef je_malloc_stats_print
+# undef je_malloc_usable_size
+# undef je_mallocx
+# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# undef je_nallocx
+# undef je_posix_memalign
+# undef je_rallocx
+# undef je_realloc
+# undef je_sallocx
+# undef je_sdallocx
+# undef je_xallocx
+# undef je_valloc
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* JEMALLOC_H_ */
diff --git a/contrib/libs/jemalloc/include/jemalloc/jemalloc-win.h b/contrib/libs/jemalloc/include/jemalloc/jemalloc-win.h
index 76fe7d9f6b..2b8d2ed434 100644
--- a/contrib/libs/jemalloc/include/jemalloc/jemalloc-win.h
+++ b/contrib/libs/jemalloc/include/jemalloc/jemalloc-win.h
@@ -1,428 +1,428 @@
-#ifndef JEMALLOC_H_
-#define JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Defined if __attribute__((...)) syntax is supported. */
-/* #undef JEMALLOC_HAVE_ATTR */
-
-/* Defined if alloc_size attribute is supported. */
-/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
-
-/* Defined if format_arg(...) attribute is supported. */
-/* #undef JEMALLOC_HAVE_ATTR_FORMAT_ARG */
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
-
-/* Defined if format(printf, ...) attribute is supported. */
-/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
-/* #undef JEMALLOC_OVERRIDE_VALLOC */
-
-/*
- * At least Linux omits the "const" in:
- *
- * size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#define JEMALLOC_USABLE_SIZE_CONST const
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++. The only justification for this is to match the prototypes that
- * glibc defines.
- */
-/* #undef JEMALLOC_USE_CXX_THROW */
-
-#ifdef _MSC_VER
-# ifdef _WIN64
-# define LG_SIZEOF_PTR_WIN 3
-# else
-# define LG_SIZEOF_PTR_WIN 2
-# endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
-
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-# define je_aligned_alloc aligned_alloc
-# define je_calloc calloc
-# define je_dallocx dallocx
-# define je_free free
-# define je_mallctl mallctl
-# define je_mallctlbymib mallctlbymib
-# define je_mallctlnametomib mallctlnametomib
-# define je_malloc malloc
-# define je_malloc_conf malloc_conf
-# define je_malloc_message malloc_message
-# define je_malloc_stats_print malloc_stats_print
-# define je_malloc_usable_size malloc_usable_size
-# define je_mallocx mallocx
-# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# define je_nallocx nallocx
-# define je_posix_memalign posix_memalign
-# define je_rallocx rallocx
-# define je_realloc realloc
-# define je_sallocx sallocx
-# define je_sdallocx sdallocx
-# define je_xallocx xallocx
-#endif
-
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_MAJOR 5
-#define JEMALLOC_VERSION_MINOR 2
-#define JEMALLOC_VERSION_BUGFIX 1
-#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
-
-#define MALLOCX_LG_ALIGN(la) ((int)(la))
-#if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
-#else
-# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define MALLOCX_ZERO ((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
-#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- * #define STRINGIFY_HELPER(x) #x
- * #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- * 0);
- */
-#define MALLCTL_ARENAS_ALL 4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define MALLCTL_ARENAS_DESTROYED 4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-# define JEMALLOC_CXX_THROW throw()
-#else
-# define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s) __declspec(align(s))
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# ifndef JEMALLOC_EXPORT
-# ifdef DLLEXPORT
-# define JEMALLOC_EXPORT __declspec(dllexport)
-# else
-# define JEMALLOC_EXPORT __declspec(dllimport)
-# endif
-# endif
-# define JEMALLOC_FORMAT_ARG(i)
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_NOINLINE __declspec(noinline)
-# ifdef __cplusplus
-# define JEMALLOC_NOTHROW __declspec(nothrow)
-# else
-# define JEMALLOC_NOTHROW
-# endif
-# define JEMALLOC_SECTION(s) __declspec(allocate(s))
-# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-# if _MSC_VER >= 1900 && !defined(__EDG__)
-# define JEMALLOC_ALLOCATOR __declspec(allocator)
-# else
-# define JEMALLOC_ALLOCATOR
-# endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-# else
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# endif
-# ifndef JEMALLOC_EXPORT
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
-# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
-# else
-# define JEMALLOC_FORMAT_ARG(i)
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-# else
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# endif
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-#else
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s)
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# define JEMALLOC_EXPORT
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_NOINLINE
-# define JEMALLOC_NOTHROW
-# define JEMALLOC_SECTION(s)
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-#endif
-
-/*
- * The je_ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
- */
-extern JEMALLOC_EXPORT const char *je_malloc_conf;
-extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
- const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_malloc(size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
- size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
- size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
- JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
- JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
- JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
- int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
- size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
- int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
- int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
- JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
- size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
- void (*write_cb)(void *, const char *), void *je_cbopaque,
- const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
- JEMALLOC_ATTR(malloc);
-#endif
-
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
- bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * void
- * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
- size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
- bool, unsigned);
-
-struct extent_hooks_s {
- extent_alloc_t *alloc;
- extent_dalloc_t *dalloc;
- extent_destroy_t *destroy;
- extent_commit_t *commit;
- extent_decommit_t *decommit;
- extent_purge_t *purge_lazy;
- extent_purge_t *purge_forced;
- extent_split_t *split;
- extent_merge_t *merge;
-};
-
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-# ifndef JEMALLOC_NO_DEMANGLE
-# define JEMALLOC_NO_DEMANGLE
-# endif
-# define aligned_alloc je_aligned_alloc
-# define calloc je_calloc
-# define dallocx je_dallocx
-# define free je_free
-# define mallctl je_mallctl
-# define mallctlbymib je_mallctlbymib
-# define mallctlnametomib je_mallctlnametomib
-# define malloc je_malloc
-# define malloc_conf je_malloc_conf
-# define malloc_message je_malloc_message
-# define malloc_stats_print je_malloc_stats_print
-# define malloc_usable_size je_malloc_usable_size
-# define mallocx je_mallocx
-# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# define nallocx je_nallocx
-# define posix_memalign je_posix_memalign
-# define rallocx je_rallocx
-# define realloc je_realloc
-# define sallocx je_sallocx
-# define sdallocx je_sdallocx
-# define xallocx je_xallocx
-#endif
-
-/*
- * The je_* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-# undef je_aligned_alloc
-# undef je_calloc
-# undef je_dallocx
-# undef je_free
-# undef je_mallctl
-# undef je_mallctlbymib
-# undef je_mallctlnametomib
-# undef je_malloc
-# undef je_malloc_conf
-# undef je_malloc_message
-# undef je_malloc_stats_print
-# undef je_malloc_usable_size
-# undef je_mallocx
-# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
-# undef je_nallocx
-# undef je_posix_memalign
-# undef je_rallocx
-# undef je_realloc
-# undef je_sallocx
-# undef je_sdallocx
-# undef je_xallocx
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* JEMALLOC_H_ */
+#ifndef JEMALLOC_H_
+#define JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Defined if __attribute__((...)) syntax is supported. */
+/* #undef JEMALLOC_HAVE_ATTR */
+
+/* Defined if alloc_size attribute is supported. */
+/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
+
+/* Defined if format_arg(...) attribute is supported. */
+/* #undef JEMALLOC_HAVE_ATTR_FORMAT_ARG */
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
+
+/* Defined if format(printf, ...) attribute is supported. */
+/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
+/* #undef JEMALLOC_OVERRIDE_VALLOC */
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#define JEMALLOC_USABLE_SIZE_CONST const
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+/* #undef JEMALLOC_USE_CXX_THROW */
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
+
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#ifndef JEMALLOC_NO_RENAME
+# define je_aligned_alloc aligned_alloc
+# define je_calloc calloc
+# define je_dallocx dallocx
+# define je_free free
+# define je_mallctl mallctl
+# define je_mallctlbymib mallctlbymib
+# define je_mallctlnametomib mallctlnametomib
+# define je_malloc malloc
+# define je_malloc_conf malloc_conf
+# define je_malloc_message malloc_message
+# define je_malloc_stats_print malloc_stats_print
+# define je_malloc_usable_size malloc_usable_size
+# define je_mallocx mallocx
+# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define je_nallocx nallocx
+# define je_posix_memalign posix_memalign
+# define je_rallocx rallocx
+# define je_realloc realloc
+# define je_sallocx sallocx
+# define je_sdallocx sdallocx
+# define je_xallocx xallocx
+#endif
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION_MAJOR 5
+#define JEMALLOC_VERSION_MINOR 2
+#define JEMALLOC_VERSION_BUGFIX 1
+#define JEMALLOC_VERSION_NREV 0
+#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
+
+#define MALLOCX_LG_ALIGN(la) ((int)(la))
+#if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+#else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+#endif
+#define MALLOCX_ZERO ((int)0x40)
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+/*
+ * Use as arena index in "arena.<i>.{purge,decay,dss}" and
+ * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
+ * definition is intentionally specified in raw decimal format to support
+ * cpp-based string concatenation, e.g.
+ *
+ * #define STRINGIFY_HELPER(x) #x
+ * #define STRINGIFY(x) STRINGIFY_HELPER(x)
+ *
+ * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
+ * 0);
+ */
+#define MALLCTL_ARENAS_ALL 4096
+/*
+ * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
+ * destroyed arenas.
+ */
+#define MALLCTL_ARENAS_DESTROYED 4097
+
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
+#endif
+
+#if defined(_MSC_VER)
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_ARG(i)
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
+# else
+# define JEMALLOC_NOTHROW
+# endif
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
+#elif defined(JEMALLOC_HAVE_ATTR)
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
+# else
+# define JEMALLOC_FORMAT_ARG(i)
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#endif
+
+/*
+ * The je_ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
+ */
+extern JEMALLOC_EXPORT const char *je_malloc_conf;
+extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
+ size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
+ JEMALLOC_CXX_THROW;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
+ int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *je_cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
+#endif
+
+typedef struct extent_hooks_s extent_hooks_t;
+
+/*
+ * void *
+ * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+ */
+typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
+ bool *, unsigned);
+
+/*
+ * bool
+ * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * void
+ * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * bool
+ * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
+ size_t, unsigned);
+
+/*
+ * bool
+ * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ bool, unsigned);
+
+/*
+ * bool
+ * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
+ bool, unsigned);
+
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
+
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+# define aligned_alloc je_aligned_alloc
+# define calloc je_calloc
+# define dallocx je_dallocx
+# define free je_free
+# define mallctl je_mallctl
+# define mallctlbymib je_mallctlbymib
+# define mallctlnametomib je_mallctlnametomib
+# define malloc je_malloc
+# define malloc_conf je_malloc_conf
+# define malloc_message je_malloc_message
+# define malloc_stats_print je_malloc_stats_print
+# define malloc_usable_size je_malloc_usable_size
+# define mallocx je_mallocx
+# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define nallocx je_nallocx
+# define posix_memalign je_posix_memalign
+# define rallocx je_rallocx
+# define realloc je_realloc
+# define sallocx je_sallocx
+# define sdallocx je_sdallocx
+# define xallocx je_xallocx
+#endif
+
+/*
+ * The je_* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+# undef je_aligned_alloc
+# undef je_calloc
+# undef je_dallocx
+# undef je_free
+# undef je_mallctl
+# undef je_mallctlbymib
+# undef je_mallctlnametomib
+# undef je_malloc
+# undef je_malloc_conf
+# undef je_malloc_message
+# undef je_malloc_stats_print
+# undef je_malloc_usable_size
+# undef je_mallocx
+# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# undef je_nallocx
+# undef je_posix_memalign
+# undef je_rallocx
+# undef je_realloc
+# undef je_sallocx
+# undef je_sdallocx
+# undef je_xallocx
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* JEMALLOC_H_ */
diff --git a/contrib/libs/jemalloc/include/jemalloc/jemalloc.h b/contrib/libs/jemalloc/include/jemalloc/jemalloc.h
index c9694d1ddb..1ac419ed64 100644
--- a/contrib/libs/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/libs/jemalloc/include/jemalloc/jemalloc.h
@@ -1,9 +1,9 @@
-#pragma once
+#pragma once
-#if defined(__APPLE__)
-# include "jemalloc-osx.h"
-#elif defined(_MSC_VER)
-# include "jemalloc-win.h"
+#if defined(__APPLE__)
+# include "jemalloc-osx.h"
+#elif defined(_MSC_VER)
+# include "jemalloc-win.h"
#else
-# include "jemalloc-linux.h"
+# include "jemalloc-linux.h"
#endif
diff --git a/contrib/libs/jemalloc/include/msvc_compat/strings.h b/contrib/libs/jemalloc/include/msvc_compat/strings.h
index 996f256ce8..73cffe5399 100644
--- a/contrib/libs/jemalloc/include/msvc_compat/strings.h
+++ b/contrib/libs/jemalloc/include/msvc_compat/strings.h
@@ -3,56 +3,56 @@
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
* for both */
-#ifdef _MSC_VER
-# include <intrin.h>
-# pragma intrinsic(_BitScanForward)
-static __forceinline int ffsl(long x) {
+#ifdef _MSC_VER
+# include <intrin.h>
+# pragma intrinsic(_BitScanForward)
+static __forceinline int ffsl(long x) {
unsigned long i;
- if (_BitScanForward(&i, x)) {
- return i + 1;
- }
- return 0;
+ if (_BitScanForward(&i, x)) {
+ return i + 1;
+ }
+ return 0;
}
-static __forceinline int ffs(int x) {
- return ffsl(x);
+static __forceinline int ffs(int x) {
+ return ffsl(x);
+}
+
+# ifdef _M_X64
+# pragma intrinsic(_BitScanForward64)
+# endif
+
+static __forceinline int ffsll(unsigned __int64 x) {
+ unsigned long i;
+#ifdef _M_X64
+ if (_BitScanForward64(&i, x)) {
+ return i + 1;
+ }
+ return 0;
+#else
+// Fallback for 32-bit build where 64-bit version not available
+// assuming little endian
+ union {
+ unsigned __int64 ll;
+ unsigned long l[2];
+ } s;
+
+ s.ll = x;
+
+ if (_BitScanForward(&i, s.l[0])) {
+ return i + 1;
+ } else if(_BitScanForward(&i, s.l[1])) {
+ return i + 33;
+ }
+ return 0;
+#endif
}
-# ifdef _M_X64
-# pragma intrinsic(_BitScanForward64)
-# endif
-
-static __forceinline int ffsll(unsigned __int64 x) {
- unsigned long i;
-#ifdef _M_X64
- if (_BitScanForward64(&i, x)) {
- return i + 1;
- }
- return 0;
-#else
-// Fallback for 32-bit build where 64-bit version not available
-// assuming little endian
- union {
- unsigned __int64 ll;
- unsigned long l[2];
- } s;
-
- s.ll = x;
-
- if (_BitScanForward(&i, s.l[0])) {
- return i + 1;
- } else if(_BitScanForward(&i, s.l[1])) {
- return i + 33;
- }
- return 0;
+#else
+# define ffsll(x) __builtin_ffsll(x)
+# define ffsl(x) __builtin_ffsl(x)
+# define ffs(x) __builtin_ffs(x)
#endif
-}
-
-#else
-# define ffsll(x) __builtin_ffsll(x)
-# define ffsl(x) __builtin_ffsl(x)
-# define ffs(x) __builtin_ffs(x)
-#endif
-
-#endif /* strings_h */
+
+#endif /* strings_h */
diff --git a/contrib/libs/jemalloc/include/msvc_compat/windows_extra.h b/contrib/libs/jemalloc/include/msvc_compat/windows_extra.h
index a6ebb9306f..21e5482881 100644
--- a/contrib/libs/jemalloc/include/msvc_compat/windows_extra.h
+++ b/contrib/libs/jemalloc/include/msvc_compat/windows_extra.h
@@ -1,6 +1,6 @@
-#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
-#define MSVC_COMPAT_WINDOWS_EXTRA_H
-
-#include <errno.h>
-
-#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
+#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
+#define MSVC_COMPAT_WINDOWS_EXTRA_H
+
+#include <errno.h>
+
+#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/contrib/libs/jemalloc/reg_zone.cpp b/contrib/libs/jemalloc/reg_zone.cpp
index 6a7b9d69e1..ba0fcd5345 100644
--- a/contrib/libs/jemalloc/reg_zone.cpp
+++ b/contrib/libs/jemalloc/reg_zone.cpp
@@ -1,6 +1,6 @@
#include <util/system/compiler.h>
-extern "C" void je_zone_register();
+extern "C" void je_zone_register();
static volatile bool initialized = false;
@@ -8,19 +8,19 @@ namespace {
struct TInit {
inline TInit() {
if (!initialized) {
- je_zone_register();
+ je_zone_register();
initialized = true;
}
}
};
- void zone_register() {
+ void zone_register() {
static TInit init;
}
}
extern "C" {
- void je_assure_zone_register() {
+ void je_assure_zone_register() {
if (Y_LIKELY(initialized)) {
return;
}
@@ -28,6 +28,6 @@ extern "C" {
// Even if we have read false "initialized", real init will be syncronized once by
// Meyers singleton in <anonymous>::register_zone(). We could do a few
// redundant "initialized" and singleton creation checks, but no more than that.
- zone_register();
+ zone_register();
}
}
diff --git a/contrib/libs/jemalloc/src/arena.c b/contrib/libs/jemalloc/src/arena.c
index ba50e41033..449c367c23 100644
--- a/contrib/libs/jemalloc/src/arena.c
+++ b/contrib/libs/jemalloc/src/arena.c
@@ -1,2298 +1,2298 @@
-#define JEMALLOC_ARENA_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/div.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/extent_mmap.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/util.h"
-
-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/div.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/util.h"
+
+JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
+
/******************************************************************************/
/* Data. */
-/*
- * Define names for both unininitialized and initialized phases, so that
- * options and mallctl processing are straightforward.
- */
-const char *percpu_arena_mode_names[] = {
- "percpu",
- "phycpu",
- "disabled",
- "percpu",
- "phycpu"
-};
-percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
-
-ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
-ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
-
-static atomic_zd_t dirty_decay_ms_default;
-static atomic_zd_t muzzy_decay_ms_default;
-
-const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
+/*
+ * Define names for both unininitialized and initialized phases, so that
+ * options and mallctl processing are straightforward.
+ */
+const char *percpu_arena_mode_names[] = {
+ "percpu",
+ "phycpu",
+ "disabled",
+ "percpu",
+ "phycpu"
+};
+percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
+
+ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
+ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
+
+static atomic_zd_t dirty_decay_ms_default;
+static atomic_zd_t muzzy_decay_ms_default;
+
+const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
};
-static div_info_t arena_binind_div_info[SC_NBINS];
-
-size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
-size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
-static unsigned huge_arena_ind;
-
+static div_info_t arena_binind_div_info[SC_NBINS];
+
+size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
+size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
+static unsigned huge_arena_ind;
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
-static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
- size_t npages_decay_max, bool is_background_thread);
-static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
- bool is_background_thread, bool all);
-static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin);
-static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin);
+static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
+ size_t npages_decay_max, bool is_background_thread);
+static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread, bool all);
+static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin);
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin);
/******************************************************************************/
-void
-arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
- size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
- *nthreads += arena_nthreads_get(arena, false);
- *dss = dss_prec_names[arena_dss_prec_get(arena)];
- *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
- *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
- *ndirty += extents_npages_get(&arena->extents_dirty);
- *nmuzzy += extents_npages_get(&arena->extents_muzzy);
-}
-
-void
-arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
- size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
- bin_stats_t *bstats, arena_stats_large_t *lstats,
- arena_stats_extents_t *estats) {
- cassert(config_stats);
-
- arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
- muzzy_decay_ms, nactive, ndirty, nmuzzy);
-
- size_t base_allocated, base_resident, base_mapped, metadata_thp;
- base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
- &base_mapped, &metadata_thp);
-
- arena_stats_lock(tsdn, &arena->stats);
-
- arena_stats_accum_zu(&astats->mapped, base_mapped
- + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
- arena_stats_accum_zu(&astats->retained,
- extents_npages_get(&arena->extents_retained) << LG_PAGE);
-
- atomic_store_zu(&astats->extent_avail,
- atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
- ATOMIC_RELAXED);
-
- arena_stats_accum_u64(&astats->decay_dirty.npurge,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.npurge));
- arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.nmadvise));
- arena_stats_accum_u64(&astats->decay_dirty.purged,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.purged));
-
- arena_stats_accum_u64(&astats->decay_muzzy.npurge,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.npurge));
- arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.nmadvise));
- arena_stats_accum_u64(&astats->decay_muzzy.purged,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.purged));
-
- arena_stats_accum_zu(&astats->base, base_allocated);
- arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
- arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
- arena_stats_accum_zu(&astats->resident, base_resident +
- (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
- extents_npages_get(&arena->extents_dirty) +
- extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
- arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
- &arena->stats.abandoned_vm, ATOMIC_RELAXED));
-
- for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.lstats[i].nmalloc);
- arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
- arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
-
- uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.lstats[i].ndalloc);
- arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
- arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
-
- uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.lstats[i].nrequests);
- arena_stats_accum_u64(&lstats[i].nrequests,
- nmalloc + nrequests);
- arena_stats_accum_u64(&astats->nrequests_large,
- nmalloc + nrequests);
-
- /* nfill == nmalloc for large currently. */
- arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
- arena_stats_accum_u64(&astats->nfills_large, nmalloc);
-
- uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.lstats[i].nflushes);
- arena_stats_accum_u64(&lstats[i].nflushes, nflush);
- arena_stats_accum_u64(&astats->nflushes_large, nflush);
-
- assert(nmalloc >= ndalloc);
- assert(nmalloc - ndalloc <= SIZE_T_MAX);
- size_t curlextents = (size_t)(nmalloc - ndalloc);
- lstats[i].curlextents += curlextents;
- arena_stats_accum_zu(&astats->allocated_large,
- curlextents * sz_index2size(SC_NBINS + i));
- }
-
- for (pszind_t i = 0; i < SC_NPSIZES; i++) {
- size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
- retained_bytes;
- dirty = extents_nextents_get(&arena->extents_dirty, i);
- muzzy = extents_nextents_get(&arena->extents_muzzy, i);
- retained = extents_nextents_get(&arena->extents_retained, i);
- dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
- muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
- retained_bytes =
- extents_nbytes_get(&arena->extents_retained, i);
-
- atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
- ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
- ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
- ATOMIC_RELAXED);
- }
-
- arena_stats_unlock(tsdn, &arena->stats);
-
- /* tcache_bytes counts currently cached bytes. */
- atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
- malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
- cache_bin_array_descriptor_t *descriptor;
- ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
- szind_t i = 0;
- for (; i < SC_NBINS; i++) {
- cache_bin_t *tbin = &descriptor->bins_small[i];
- arena_stats_accum_zu(&astats->tcache_bytes,
- tbin->ncached * sz_index2size(i));
- }
- for (; i < nhbins; i++) {
- cache_bin_t *tbin = &descriptor->bins_large[i];
- arena_stats_accum_zu(&astats->tcache_bytes,
- tbin->ncached * sz_index2size(i));
+void
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
+ *nthreads += arena_nthreads_get(arena, false);
+ *dss = dss_prec_names[arena_dss_prec_get(arena)];
+ *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
+ *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
+ *ndirty += extents_npages_get(&arena->extents_dirty);
+ *nmuzzy += extents_npages_get(&arena->extents_muzzy);
+}
+
+void
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
+ bin_stats_t *bstats, arena_stats_large_t *lstats,
+ arena_stats_extents_t *estats) {
+ cassert(config_stats);
+
+ arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
+ muzzy_decay_ms, nactive, ndirty, nmuzzy);
+
+ size_t base_allocated, base_resident, base_mapped, metadata_thp;
+ base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
+ &base_mapped, &metadata_thp);
+
+ arena_stats_lock(tsdn, &arena->stats);
+
+ arena_stats_accum_zu(&astats->mapped, base_mapped
+ + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
+ arena_stats_accum_zu(&astats->retained,
+ extents_npages_get(&arena->extents_retained) << LG_PAGE);
+
+ atomic_store_zu(&astats->extent_avail,
+ atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
+ ATOMIC_RELAXED);
+
+ arena_stats_accum_u64(&astats->decay_dirty.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.npurge));
+ arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.nmadvise));
+ arena_stats_accum_u64(&astats->decay_dirty.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.purged));
+
+ arena_stats_accum_u64(&astats->decay_muzzy.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.npurge));
+ arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.nmadvise));
+ arena_stats_accum_u64(&astats->decay_muzzy.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.purged));
+
+ arena_stats_accum_zu(&astats->base, base_allocated);
+ arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
+ arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
+ arena_stats_accum_zu(&astats->resident, base_resident +
+ (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
+ extents_npages_get(&arena->extents_dirty) +
+ extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
+ arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
+ &arena->stats.abandoned_vm, ATOMIC_RELAXED));
+
+ for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
+ uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nmalloc);
+ arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
+ arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
+
+ uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].ndalloc);
+ arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
+ arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
+
+ uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nrequests);
+ arena_stats_accum_u64(&lstats[i].nrequests,
+ nmalloc + nrequests);
+ arena_stats_accum_u64(&astats->nrequests_large,
+ nmalloc + nrequests);
+
+ /* nfill == nmalloc for large currently. */
+ arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
+ arena_stats_accum_u64(&astats->nfills_large, nmalloc);
+
+ uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nflushes);
+ arena_stats_accum_u64(&lstats[i].nflushes, nflush);
+ arena_stats_accum_u64(&astats->nflushes_large, nflush);
+
+ assert(nmalloc >= ndalloc);
+ assert(nmalloc - ndalloc <= SIZE_T_MAX);
+ size_t curlextents = (size_t)(nmalloc - ndalloc);
+ lstats[i].curlextents += curlextents;
+ arena_stats_accum_zu(&astats->allocated_large,
+ curlextents * sz_index2size(SC_NBINS + i));
+ }
+
+ for (pszind_t i = 0; i < SC_NPSIZES; i++) {
+ size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
+ retained_bytes;
+ dirty = extents_nextents_get(&arena->extents_dirty, i);
+ muzzy = extents_nextents_get(&arena->extents_muzzy, i);
+ retained = extents_nextents_get(&arena->extents_retained, i);
+ dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
+ muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
+ retained_bytes =
+ extents_nbytes_get(&arena->extents_retained, i);
+
+ atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
+ atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
+ atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
+ atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
+ ATOMIC_RELAXED);
+ atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
+ ATOMIC_RELAXED);
+ atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
+ ATOMIC_RELAXED);
+ }
+
+ arena_stats_unlock(tsdn, &arena->stats);
+
+ /* tcache_bytes counts currently cached bytes. */
+ atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ cache_bin_array_descriptor_t *descriptor;
+ ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
+ szind_t i = 0;
+ for (; i < SC_NBINS; i++) {
+ cache_bin_t *tbin = &descriptor->bins_small[i];
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
}
- }
- malloc_mutex_prof_read(tsdn,
- &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
- &arena->tcache_ql_mtx);
- malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
-
-#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
- malloc_mutex_lock(tsdn, &arena->mtx); \
- malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
- &arena->mtx); \
- malloc_mutex_unlock(tsdn, &arena->mtx);
-
- /* Gather per arena mutex profiling data. */
- READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
- READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
- arena_prof_mutex_extent_avail)
- READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
- arena_prof_mutex_extents_dirty)
- READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
- arena_prof_mutex_extents_muzzy)
- READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
- arena_prof_mutex_extents_retained)
- READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
- arena_prof_mutex_decay_dirty)
- READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
- arena_prof_mutex_decay_muzzy)
- READ_ARENA_MUTEX_PROF_DATA(base->mtx,
- arena_prof_mutex_base)
-#undef READ_ARENA_MUTEX_PROF_DATA
-
- nstime_copy(&astats->uptime, &arena->create_time);
- nstime_update(&astats->uptime);
- nstime_subtract(&astats->uptime, &arena->create_time);
-
- for (szind_t i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_stats_merge(tsdn, &bstats[i],
- &arena->bins[i].bin_shards[j]);
- }
- }
-}
-
-void
-arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
- extent);
- if (arena_dirty_decay_ms_get(arena) == 0) {
- arena_decay_dirty(tsdn, arena, false, true);
- } else {
- arena_background_thread_inactivity_check(tsdn, arena, false);
- }
-}
-
-static void *
-arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
- void *ret;
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- size_t regind;
-
- assert(extent_nfree_get(slab) > 0);
- assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
-
- regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)extent_addr_get(slab) +
- (uintptr_t)(bin_info->reg_size * regind));
- extent_nfree_dec(slab);
- return ret;
-}
-
-static void
-arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
- unsigned cnt, void** ptrs) {
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
-
- assert(extent_nfree_get(slab) >= cnt);
- assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
-
-#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
- for (unsigned i = 0; i < cnt; i++) {
- size_t regind = bitmap_sfu(slab_data->bitmap,
- &bin_info->bitmap_info);
- *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
- (uintptr_t)(bin_info->reg_size * regind));
- }
-#else
- unsigned group = 0;
- bitmap_t g = slab_data->bitmap[group];
- unsigned i = 0;
- while (i < cnt) {
- while (g == 0) {
- g = slab_data->bitmap[++group];
- }
- size_t shift = group << LG_BITMAP_GROUP_NBITS;
- size_t pop = popcount_lu(g);
- if (pop > (cnt - i)) {
- pop = cnt - i;
- }
-
- /*
- * Load from memory locations only once, outside the
- * hot loop below.
- */
- uintptr_t base = (uintptr_t)extent_addr_get(slab);
- uintptr_t regsize = (uintptr_t)bin_info->reg_size;
- while (pop--) {
- size_t bit = cfs_lu(&g);
- size_t regind = shift + bit;
- *(ptrs + i) = (void *)(base + regsize * regind);
-
- i++;
- }
- slab_data->bitmap[group] = g;
- }
-#endif
- extent_nfree_sub(slab, cnt);
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
- size_t diff, regind;
-
- /* Freeing a pointer outside the slab can cause assertion failure. */
- assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
- assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
- (uintptr_t)bin_infos[binind].reg_size == 0);
-
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
-
- /* Avoid doing division with a variable divisor. */
- regind = div_compute(&arena_binind_div_info[binind], diff);
-
- assert(regind < bin_infos[binind].nregs);
-
- return regind;
-}
-
-static void
-arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
- szind_t binind = extent_szind_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t regind = arena_slab_regind(slab, binind, ptr);
-
- assert(extent_nfree_get(slab) < bin_info->nregs);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
- extent_nfree_inc(slab);
-}
-
-static void
-arena_nactive_add(arena_t *arena, size_t add_pages) {
- atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
-}
-
-static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages) {
- assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
- atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
+ for (; i < nhbins; i++) {
+ cache_bin_t *tbin = &descriptor->bins_large[i];
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
+ }
+ }
+ malloc_mutex_prof_read(tsdn,
+ &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
+ &arena->tcache_ql_mtx);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
+
+#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
+ malloc_mutex_lock(tsdn, &arena->mtx); \
+ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
+ &arena->mtx); \
+ malloc_mutex_unlock(tsdn, &arena->mtx);
+
+ /* Gather per arena mutex profiling data. */
+ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
+ READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
+ arena_prof_mutex_extent_avail)
+ READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
+ arena_prof_mutex_extents_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
+ arena_prof_mutex_extents_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
+ arena_prof_mutex_extents_retained)
+ READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
+ arena_prof_mutex_decay_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
+ arena_prof_mutex_decay_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(base->mtx,
+ arena_prof_mutex_base)
+#undef READ_ARENA_MUTEX_PROF_DATA
+
+ nstime_copy(&astats->uptime, &arena->create_time);
+ nstime_update(&astats->uptime);
+ nstime_subtract(&astats->uptime, &arena->create_time);
+
+ for (szind_t i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ bin_stats_merge(tsdn, &bstats[i],
+ &arena->bins[i].bin_shards[j]);
+ }
+ }
+}
+
+void
+arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
+ extent);
+ if (arena_dirty_decay_ms_get(arena) == 0) {
+ arena_decay_dirty(tsdn, arena, false, true);
+ } else {
+ arena_background_thread_inactivity_check(tsdn, arena, false);
+ }
+}
+
+static void *
+arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
+ void *ret;
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ size_t regind;
+
+ assert(extent_nfree_get(slab) > 0);
+ assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
+
+ regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ (uintptr_t)(bin_info->reg_size * regind));
+ extent_nfree_dec(slab);
+ return ret;
}
static void
-arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
- szind_t index, hindex;
+arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
+ unsigned cnt, void** ptrs) {
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+
+ assert(extent_nfree_get(slab) >= cnt);
+ assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
+
+#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
+ for (unsigned i = 0; i < cnt; i++) {
+ size_t regind = bitmap_sfu(slab_data->bitmap,
+ &bin_info->bitmap_info);
+ *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
+ (uintptr_t)(bin_info->reg_size * regind));
+ }
+#else
+ unsigned group = 0;
+ bitmap_t g = slab_data->bitmap[group];
+ unsigned i = 0;
+ while (i < cnt) {
+ while (g == 0) {
+ g = slab_data->bitmap[++group];
+ }
+ size_t shift = group << LG_BITMAP_GROUP_NBITS;
+ size_t pop = popcount_lu(g);
+ if (pop > (cnt - i)) {
+ pop = cnt - i;
+ }
+
+ /*
+ * Load from memory locations only once, outside the
+ * hot loop below.
+ */
+ uintptr_t base = (uintptr_t)extent_addr_get(slab);
+ uintptr_t regsize = (uintptr_t)bin_info->reg_size;
+ while (pop--) {
+ size_t bit = cfs_lu(&g);
+ size_t regind = shift + bit;
+ *(ptrs + i) = (void *)(base + regsize * regind);
+
+ i++;
+ }
+ slab_data->bitmap[group] = g;
+ }
+#endif
+ extent_nfree_sub(slab, cnt);
+}
+
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
+ size_t diff, regind;
+
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
+ (uintptr_t)bin_infos[binind].reg_size == 0);
+
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
+
+ /* Avoid doing division with a variable divisor. */
+ regind = div_compute(&arena_binind_div_info[binind], diff);
+
+ assert(regind < bin_infos[binind].nregs);
+
+ return regind;
+}
+
+static void
+arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
+ szind_t binind = extent_szind_get(slab);
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t regind = arena_slab_regind(slab, binind, ptr);
+
+ assert(extent_nfree_get(slab) < bin_info->nregs);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
+
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ extent_nfree_inc(slab);
+}
+
+static void
+arena_nactive_add(arena_t *arena, size_t add_pages) {
+ atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
+}
+
+static void
+arena_nactive_sub(arena_t *arena, size_t sub_pages) {
+ assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
+ atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
+}
+
+static void
+arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
+
+ cassert(config_stats);
- cassert(config_stats);
-
- if (usize < SC_LARGE_MINCLASS) {
- usize = SC_LARGE_MINCLASS;
- }
- index = sz_size2index(usize);
- hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
-
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->stats.lstats[hindex].nmalloc, 1);
+ if (usize < SC_LARGE_MINCLASS) {
+ usize = SC_LARGE_MINCLASS;
+ }
+ index = sz_size2index(usize);
+ hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
+
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].nmalloc, 1);
}
static void
-arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
- szind_t index, hindex;
-
- cassert(config_stats);
+arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
- if (usize < SC_LARGE_MINCLASS) {
- usize = SC_LARGE_MINCLASS;
+ cassert(config_stats);
+
+ if (usize < SC_LARGE_MINCLASS) {
+ usize = SC_LARGE_MINCLASS;
}
- index = sz_size2index(usize);
- hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
-
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->stats.lstats[hindex].ndalloc, 1);
+ index = sz_size2index(usize);
+ hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
+
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].ndalloc, 1);
}
static void
-arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
- size_t usize) {
- arena_large_dalloc_stats_update(tsdn, arena, oldusize);
- arena_large_malloc_stats_update(tsdn, arena, usize);
-}
-
-static bool
-arena_may_have_muzzy(arena_t *arena) {
- return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
-}
-
-extent_t *
-arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- szind_t szind = sz_size2index(usize);
- size_t mapped_add;
- bool commit = true;
- extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
- szind, zero, &commit);
- if (extent == NULL && arena_may_have_muzzy(arena)) {
- extent = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
- false, szind, zero, &commit);
- }
- size_t size = usize + sz_large_pad;
- if (extent == NULL) {
- extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
- usize, sz_large_pad, alignment, false, szind, zero,
- &commit);
- if (config_stats) {
+arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
+ size_t usize) {
+ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+}
+
+static bool
+arena_may_have_muzzy(arena_t *arena) {
+ return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
+}
+
+extent_t *
+arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ szind_t szind = sz_size2index(usize);
+ size_t mapped_add;
+ bool commit = true;
+ extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
+ szind, zero, &commit);
+ if (extent == NULL && arena_may_have_muzzy(arena)) {
+ extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
+ false, szind, zero, &commit);
+ }
+ size_t size = usize + sz_large_pad;
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
+ usize, sz_large_pad, alignment, false, szind, zero,
+ &commit);
+ if (config_stats) {
/*
- * extent may be NULL on OOM, but in that case
- * mapped_add isn't used below, so there's no need to
- * conditionlly set it to 0 here.
+ * extent may be NULL on OOM, but in that case
+ * mapped_add isn't used below, so there's no need to
+ * conditionlly set it to 0 here.
*/
- mapped_add = size;
- }
- } else if (config_stats) {
- mapped_add = 0;
- }
-
- if (extent != NULL) {
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_large_malloc_stats_update(tsdn, arena, usize);
- if (mapped_add != 0) {
- arena_stats_add_zu(tsdn, &arena->stats,
- &arena->stats.mapped, mapped_add);
+ mapped_add = size;
+ }
+ } else if (config_stats) {
+ mapped_add = 0;
+ }
+
+ if (extent != NULL) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+ if (mapped_add != 0) {
+ arena_stats_add_zu(tsdn, &arena->stats,
+ &arena->stats.mapped, mapped_add);
}
- arena_stats_unlock(tsdn, &arena->stats);
+ arena_stats_unlock(tsdn, &arena->stats);
}
- arena_nactive_add(arena, size >> LG_PAGE);
- }
-
- return extent;
-}
-
-void
-arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_large_dalloc_stats_update(tsdn, arena,
- extent_usize_get(extent));
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
-}
-
-void
-arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t oldusize) {
- size_t usize = extent_usize_get(extent);
- size_t udiff = oldusize - usize;
-
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_sub(arena, udiff >> LG_PAGE);
-}
-
-void
-arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t oldusize) {
- size_t usize = extent_usize_get(extent);
- size_t udiff = usize - oldusize;
-
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_add(arena, udiff >> LG_PAGE);
-}
-
-static ssize_t
-arena_decay_ms_read(arena_decay_t *decay) {
- return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
-}
-
-static void
-arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
- atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
-}
-
-static void
-arena_decay_deadline_init(arena_decay_t *decay) {
- /*
- * Generate a new deadline that is uniformly random within the next
- * epoch after the current one.
- */
- nstime_copy(&decay->deadline, &decay->epoch);
- nstime_add(&decay->deadline, &decay->interval);
- if (arena_decay_ms_read(decay) > 0) {
- nstime_t jitter;
-
- nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
- nstime_ns(&decay->interval)));
- nstime_add(&decay->deadline, &jitter);
- }
-}
-
-static bool
-arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
- return (nstime_compare(&decay->deadline, time) <= 0);
-}
-
-static size_t
-arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
- uint64_t sum;
- size_t npages_limit_backlog;
- unsigned i;
-
+ arena_nactive_add(arena, size >> LG_PAGE);
+ }
+
+ return extent;
+}
+
+void
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_dalloc_stats_update(tsdn, arena,
+ extent_usize_get(extent));
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
+}
+
+void
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = oldusize - usize;
+
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
+}
+
+void
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = usize - oldusize;
+
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_add(arena, udiff >> LG_PAGE);
+}
+
+static ssize_t
+arena_decay_ms_read(arena_decay_t *decay) {
+ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+}
+
+static void
+arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
+ atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
+}
+
+static void
+arena_decay_deadline_init(arena_decay_t *decay) {
/*
- * For each element of decay_backlog, multiply by the corresponding
- * fixed-point smoothstep decay factor. Sum the products, then divide
- * to round down to the nearest whole number of pages.
+ * Generate a new deadline that is uniformly random within the next
+ * epoch after the current one.
*/
- sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- sum += decay->backlog[i] * h_steps[i];
- }
- npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
-
- return npages_limit_backlog;
-}
-
-static void
-arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
- size_t npages_delta = (current_npages > decay->nunpurged) ?
- current_npages - decay->nunpurged : 0;
- decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
-
- if (config_debug) {
- if (current_npages > decay->ceil_npages) {
- decay->ceil_npages = current_npages;
- }
- size_t npages_limit = arena_decay_backlog_npages_limit(decay);
- assert(decay->ceil_npages >= npages_limit);
- if (decay->ceil_npages > npages_limit) {
- decay->ceil_npages = npages_limit;
+ nstime_copy(&decay->deadline, &decay->epoch);
+ nstime_add(&decay->deadline, &decay->interval);
+ if (arena_decay_ms_read(decay) > 0) {
+ nstime_t jitter;
+
+ nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
+ nstime_ns(&decay->interval)));
+ nstime_add(&decay->deadline, &jitter);
+ }
+}
+
+static bool
+arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
+ return (nstime_compare(&decay->deadline, time) <= 0);
+}
+
+static size_t
+arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
+ uint64_t sum;
+ size_t npages_limit_backlog;
+ unsigned i;
+
+ /*
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
+ */
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
+
+ return npages_limit_backlog;
+}
+
+static void
+arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
+ size_t npages_delta = (current_npages > decay->nunpurged) ?
+ current_npages - decay->nunpurged : 0;
+ decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
+
+ if (config_debug) {
+ if (current_npages > decay->ceil_npages) {
+ decay->ceil_npages = current_npages;
+ }
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ assert(decay->ceil_npages >= npages_limit);
+ if (decay->ceil_npages > npages_limit) {
+ decay->ceil_npages = npages_limit;
+ }
+ }
+}
+
+static void
+arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
+ size_t current_npages) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(decay->backlog, &decay->backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
-}
-static void
-arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
- size_t current_npages) {
- if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
- memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
- sizeof(size_t));
- } else {
- size_t nadvance_z = (size_t)nadvance_u64;
-
- assert((uint64_t)nadvance_z == nadvance_u64);
-
- memmove(decay->backlog, &decay->backlog[nadvance_z],
- (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
- if (nadvance_z > 1) {
- memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
- nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
- }
- }
-
- arena_decay_backlog_update_last(decay, current_npages);
+ arena_decay_backlog_update_last(decay, current_npages);
}
-static void
-arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, size_t current_npages, size_t npages_limit,
- bool is_background_thread) {
- if (current_npages > npages_limit) {
- arena_decay_to_limit(tsdn, arena, decay, extents, false,
- npages_limit, current_npages - npages_limit,
- is_background_thread);
+static void
+arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, size_t current_npages, size_t npages_limit,
+ bool is_background_thread) {
+ if (current_npages > npages_limit) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ npages_limit, current_npages - npages_limit,
+ is_background_thread);
}
}
static void
-arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
- size_t current_npages) {
- assert(arena_decay_deadline_reached(decay, time));
-
- nstime_t delta;
- nstime_copy(&delta, time);
- nstime_subtract(&delta, &decay->epoch);
-
- uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
- assert(nadvance_u64 > 0);
-
- /* Add nadvance_u64 decay intervals to epoch. */
- nstime_copy(&delta, &decay->interval);
- nstime_imultiply(&delta, nadvance_u64);
- nstime_add(&decay->epoch, &delta);
-
- /* Set a new deadline. */
- arena_decay_deadline_init(decay);
-
- /* Update the backlog. */
- arena_decay_backlog_update(decay, nadvance_u64, current_npages);
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, const nstime_t *time, bool is_background_thread) {
- size_t current_npages = extents_npages_get(extents);
- arena_decay_epoch_advance_helper(decay, time, current_npages);
-
- size_t npages_limit = arena_decay_backlog_npages_limit(decay);
- /* We may unlock decay->mtx when try_purge(). Finish logging first. */
- decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
- current_npages;
-
- if (!background_thread_enabled() || is_background_thread) {
- arena_decay_try_purge(tsdn, arena, decay, extents,
- current_npages, npages_limit, is_background_thread);
- }
-}
-
-static void
-arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
- arena_decay_ms_write(decay, decay_ms);
- if (decay_ms > 0) {
- nstime_init(&decay->interval, (uint64_t)decay_ms *
- KQU(1000000));
- nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
- }
-
- nstime_init(&decay->epoch, 0);
- nstime_update(&decay->epoch);
- decay->jitter_state = (uint64_t)(uintptr_t)decay;
- arena_decay_deadline_init(decay);
- decay->nunpurged = 0;
- memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
-}
-
-static bool
-arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
- arena_stats_decay_t *stats) {
- if (config_debug) {
- for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
- assert(((char *)decay)[i] == 0);
- }
- decay->ceil_npages = 0;
- }
- if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- decay->purging = false;
- arena_decay_reinit(decay, decay_ms);
- /* Memory is zeroed, so there is no need to clear stats. */
- if (config_stats) {
- decay->stats = stats;
- }
- return false;
-}
-
-static bool
-arena_decay_ms_valid(ssize_t decay_ms) {
- if (decay_ms < -1) {
- return false;
- }
- if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
- KQU(1000)) {
- return true;
- }
- return false;
-}
-
-static bool
-arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool is_background_thread) {
- malloc_mutex_assert_owner(tsdn, &decay->mtx);
-
- /* Purge all or nothing if the option is disabled. */
- ssize_t decay_ms = arena_decay_ms_read(decay);
- if (decay_ms <= 0) {
- if (decay_ms == 0) {
- arena_decay_to_limit(tsdn, arena, decay, extents, false,
- 0, extents_npages_get(extents),
- is_background_thread);
- }
- return false;
- }
-
- nstime_t time;
- nstime_init(&time, 0);
- nstime_update(&time);
- if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
- > 0)) {
- /*
- * Time went backwards. Move the epoch back in time and
- * generate a new deadline, with the expectation that time
- * typically flows forward for long enough periods of time that
- * epochs complete. Unfortunately, this strategy is susceptible
- * to clock jitter triggering premature epoch advances, but
- * clock jitter estimation and compensation isn't feasible here
- * because calls into this code are event-driven.
- */
- nstime_copy(&decay->epoch, &time);
- arena_decay_deadline_init(decay);
- } else {
- /* Verify that time does not go backwards. */
- assert(nstime_compare(&decay->epoch, &time) <= 0);
- }
+arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
+ size_t current_npages) {
+ assert(arena_decay_deadline_reached(decay, time));
+
+ nstime_t delta;
+ nstime_copy(&delta, time);
+ nstime_subtract(&delta, &decay->epoch);
+
+ uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
+ assert(nadvance_u64 > 0);
+
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &decay->interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&decay->epoch, &delta);
+
+ /* Set a new deadline. */
+ arena_decay_deadline_init(decay);
+
+ /* Update the backlog. */
+ arena_decay_backlog_update(decay, nadvance_u64, current_npages);
+}
+
+static void
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, const nstime_t *time, bool is_background_thread) {
+ size_t current_npages = extents_npages_get(extents);
+ arena_decay_epoch_advance_helper(decay, time, current_npages);
+
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ /* We may unlock decay->mtx when try_purge(). Finish logging first. */
+ decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
+ current_npages;
+
+ if (!background_thread_enabled() || is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ current_npages, npages_limit, is_background_thread);
+ }
+}
+
+static void
+arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
+ arena_decay_ms_write(decay, decay_ms);
+ if (decay_ms > 0) {
+ nstime_init(&decay->interval, (uint64_t)decay_ms *
+ KQU(1000000));
+ nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
+ }
+
+ nstime_init(&decay->epoch, 0);
+ nstime_update(&decay->epoch);
+ decay->jitter_state = (uint64_t)(uintptr_t)decay;
+ arena_decay_deadline_init(decay);
+ decay->nunpurged = 0;
+ memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+static bool
+arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
+ arena_stats_decay_t *stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
+ assert(((char *)decay)[i] == 0);
+ }
+ decay->ceil_npages = 0;
+ }
+ if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ decay->purging = false;
+ arena_decay_reinit(decay, decay_ms);
+ /* Memory is zeroed, so there is no need to clear stats. */
+ if (config_stats) {
+ decay->stats = stats;
+ }
+ return false;
+}
+
+static bool
+arena_decay_ms_valid(ssize_t decay_ms) {
+ if (decay_ms < -1) {
+ return false;
+ }
+ if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
+ KQU(1000)) {
+ return true;
+ }
+ return false;
+}
+
+static bool
+arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ /* Purge all or nothing if the option is disabled. */
+ ssize_t decay_ms = arena_decay_ms_read(decay);
+ if (decay_ms <= 0) {
+ if (decay_ms == 0) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ 0, extents_npages_get(extents),
+ is_background_thread);
+ }
+ return false;
+ }
+
+ nstime_t time;
+ nstime_init(&time, 0);
+ nstime_update(&time);
+ if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
+ > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&decay->epoch, &time);
+ arena_decay_deadline_init(decay);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&decay->epoch, &time) <= 0);
+ }
/*
- * If the deadline has been reached, advance to the current epoch and
- * purge to the new limit if necessary. Note that dirty pages created
- * during the current epoch are not subject to purge until a future
- * epoch, so as a result purging only happens during epoch advances, or
- * being triggered by background threads (scheduled event).
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances, or
+ * being triggered by background threads (scheduled event).
*/
- bool advance_epoch = arena_decay_deadline_reached(decay, &time);
- if (advance_epoch) {
- arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
- is_background_thread);
- } else if (is_background_thread) {
- arena_decay_try_purge(tsdn, arena, decay, extents,
- extents_npages_get(extents),
- arena_decay_backlog_npages_limit(decay),
- is_background_thread);
+ bool advance_epoch = arena_decay_deadline_reached(decay, &time);
+ if (advance_epoch) {
+ arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
+ is_background_thread);
+ } else if (is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ extents_npages_get(extents),
+ arena_decay_backlog_npages_limit(decay),
+ is_background_thread);
}
- return advance_epoch;
+ return advance_epoch;
}
-static ssize_t
-arena_decay_ms_get(arena_decay_t *decay) {
- return arena_decay_ms_read(decay);
-}
+static ssize_t
+arena_decay_ms_get(arena_decay_t *decay) {
+ return arena_decay_ms_read(decay);
+}
-ssize_t
-arena_dirty_decay_ms_get(arena_t *arena) {
- return arena_decay_ms_get(&arena->decay_dirty);
+ssize_t
+arena_dirty_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_dirty);
}
-ssize_t
-arena_muzzy_decay_ms_get(arena_t *arena) {
- return arena_decay_ms_get(&arena->decay_muzzy);
+ssize_t
+arena_muzzy_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_muzzy);
}
-static bool
-arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
- return true;
- }
+static bool
+arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
+ }
- malloc_mutex_lock(tsdn, &decay->mtx);
+ malloc_mutex_lock(tsdn, &decay->mtx);
/*
- * Restart decay backlog from scratch, which may cause many dirty pages
- * to be immediately purged. It would conceptually be possible to map
- * the old backlog onto the new backlog, but there is no justification
- * for such complexity since decay_ms changes are intended to be
- * infrequent, either between the {-1, 0, >0} states, or a one-time
- * arbitrary change during initial arena configuration.
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_ms changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
*/
- arena_decay_reinit(decay, decay_ms);
- arena_maybe_decay(tsdn, arena, decay, extents, false);
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return false;
-}
-
-bool
-arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t decay_ms) {
- return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
- &arena->extents_dirty, decay_ms);
-}
-
-bool
-arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t decay_ms) {
- return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
- &arena->extents_muzzy, decay_ms);
-}
-
-static size_t
-arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
- size_t npages_decay_max, extent_list_t *decay_extents) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- /* Stash extents according to npages_limit. */
- size_t nstashed = 0;
- extent_t *extent;
- while (nstashed < npages_decay_max &&
- (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
- npages_limit)) != NULL) {
- extent_list_append(decay_extents, extent);
- nstashed += extent_size_get(extent) >> LG_PAGE;
- }
- return nstashed;
+ arena_decay_reinit(decay, decay_ms);
+ arena_maybe_decay(tsdn, arena, decay, extents, false);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ return false;
+}
+
+bool
+arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, decay_ms);
+}
+
+bool
+arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, decay_ms);
+}
+
+static size_t
+arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
+ size_t npages_decay_max, extent_list_t *decay_extents) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* Stash extents according to npages_limit. */
+ size_t nstashed = 0;
+ extent_t *extent;
+ while (nstashed < npages_decay_max &&
+ (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
+ npages_limit)) != NULL) {
+ extent_list_append(decay_extents, extent);
+ nstashed += extent_size_get(extent) >> LG_PAGE;
+ }
+ return nstashed;
}
static size_t
-arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
- bool all, extent_list_t *decay_extents, bool is_background_thread) {
- size_t nmadvise, nunmapped;
- size_t npurged;
+arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
+ bool all, extent_list_t *decay_extents, bool is_background_thread) {
+ size_t nmadvise, nunmapped;
+ size_t npurged;
- if (config_stats) {
+ if (config_stats) {
nmadvise = 0;
- nunmapped = 0;
- }
+ nunmapped = 0;
+ }
npurged = 0;
- ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- for (extent_t *extent = extent_list_first(decay_extents); extent !=
- NULL; extent = extent_list_first(decay_extents)) {
- if (config_stats) {
- nmadvise++;
+ ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ for (extent_t *extent = extent_list_first(decay_extents); extent !=
+ NULL; extent = extent_list_first(decay_extents)) {
+ if (config_stats) {
+ nmadvise++;
}
- size_t npages = extent_size_get(extent) >> LG_PAGE;
+ size_t npages = extent_size_get(extent) >> LG_PAGE;
npurged += npages;
- extent_list_remove(decay_extents, extent);
- switch (extents_state_get(extents)) {
- case extent_state_active:
- not_reached();
- case extent_state_dirty:
- if (!all && muzzy_decay_ms != 0 &&
- !extent_purge_lazy_wrapper(tsdn, arena,
- r_extent_hooks, extent, 0,
- extent_size_get(extent))) {
- extents_dalloc(tsdn, arena, r_extent_hooks,
- &arena->extents_muzzy, extent);
- arena_background_thread_inactivity_check(tsdn,
- arena, is_background_thread);
- break;
- }
- /* Fall through. */
- case extent_state_muzzy:
- extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
- extent);
- if (config_stats) {
- nunmapped += npages;
- }
- break;
- case extent_state_retained:
- default:
- not_reached();
- }
- }
-
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
- 1);
- arena_stats_add_u64(tsdn, &arena->stats,
- &decay->stats->nmadvise, nmadvise);
- arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
- npurged);
- arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
- nunmapped << LG_PAGE);
- arena_stats_unlock(tsdn, &arena->stats);
- }
-
- return npurged;
-}
-
-/*
- * npages_limit: Decay at most npages_decay_max pages without violating the
- * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
- * bound on number of pages in order to prevent unbounded growth (namely in
- * stashed), otherwise unbounded new pages could be added to extents during the
- * current decay run, so that the purging thread never finishes.
- */
+ extent_list_remove(decay_extents, extent);
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ if (!all && muzzy_decay_ms != 0 &&
+ !extent_purge_lazy_wrapper(tsdn, arena,
+ r_extent_hooks, extent, 0,
+ extent_size_get(extent))) {
+ extents_dalloc(tsdn, arena, r_extent_hooks,
+ &arena->extents_muzzy, extent);
+ arena_background_thread_inactivity_check(tsdn,
+ arena, is_background_thread);
+ break;
+ }
+ /* Fall through. */
+ case extent_state_muzzy:
+ extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
+ extent);
+ if (config_stats) {
+ nunmapped += npages;
+ }
+ break;
+ case extent_state_retained:
+ default:
+ not_reached();
+ }
+ }
+
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
+ 1);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &decay->stats->nmadvise, nmadvise);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
+ npurged);
+ arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
+ nunmapped << LG_PAGE);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+
+ return npurged;
+}
+
+/*
+ * npages_limit: Decay at most npages_decay_max pages without violating the
+ * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
+ * bound on number of pages in order to prevent unbounded growth (namely in
+ * stashed), otherwise unbounded new pages could be added to extents during the
+ * current decay run, so that the purging thread never finishes.
+ */
static void
-arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
- bool is_background_thread) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 1);
- malloc_mutex_assert_owner(tsdn, &decay->mtx);
-
- if (decay->purging) {
- return;
- }
- decay->purging = true;
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-
- extent_list_t decay_extents;
- extent_list_init(&decay_extents);
-
- size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
- npages_limit, npages_decay_max, &decay_extents);
- if (npurge != 0) {
- size_t npurged = arena_decay_stashed(tsdn, arena,
- &extent_hooks, decay, extents, all, &decay_extents,
- is_background_thread);
- assert(npurged == npurge);
- }
-
- malloc_mutex_lock(tsdn, &decay->mtx);
- decay->purging = false;
-}
-
-static bool
-arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool is_background_thread, bool all) {
- if (all) {
- malloc_mutex_lock(tsdn, &decay->mtx);
- arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
- extents_npages_get(extents), is_background_thread);
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return false;
- }
-
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- /* No need to wait if another thread is in progress. */
- return true;
- }
-
- bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
- is_background_thread);
- size_t npages_new;
- if (epoch_advanced) {
- /* Backlog is updated on epoch advance. */
- npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
- }
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- if (have_background_thread && background_thread_enabled() &&
- epoch_advanced && !is_background_thread) {
- background_thread_interval_check(tsdn, arena, decay,
- npages_new);
- }
-
- return false;
-}
-
-static bool
-arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
- bool all) {
- return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
- &arena->extents_dirty, is_background_thread, all);
-}
-
-static bool
-arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
- bool all) {
- return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
- &arena->extents_muzzy, is_background_thread, all);
+arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
+ bool is_background_thread) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 1);
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ if (decay->purging) {
+ return;
+ }
+ decay->purging = true;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+
+ extent_list_t decay_extents;
+ extent_list_init(&decay_extents);
+
+ size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
+ npages_limit, npages_decay_max, &decay_extents);
+ if (npurge != 0) {
+ size_t npurged = arena_decay_stashed(tsdn, arena,
+ &extent_hooks, decay, extents, all, &decay_extents,
+ is_background_thread);
+ assert(npurged == npurge);
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ decay->purging = false;
+}
+
+static bool
+arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread, bool all) {
+ if (all) {
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
+ extents_npages_get(extents), is_background_thread);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ return false;
+ }
+
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* No need to wait if another thread is in progress. */
+ return true;
+ }
+
+ bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
+ is_background_thread);
+ size_t npages_new;
+ if (epoch_advanced) {
+ /* Backlog is updated on epoch advance. */
+ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
+ }
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ if (have_background_thread && background_thread_enabled() &&
+ epoch_advanced && !is_background_thread) {
+ background_thread_interval_check(tsdn, arena, decay,
+ npages_new);
+ }
+
+ return false;
+}
+
+static bool
+arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, is_background_thread, all);
+}
+
+static bool
+arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, is_background_thread, all);
}
void
-arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
- if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
- return;
- }
- arena_decay_muzzy(tsdn, arena, is_background_thread, all);
-}
-
-static void
-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
- arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
-
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
-}
-
-static void
-arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) > 0);
- extent_heap_insert(&bin->slabs_nonfull, slab);
- if (config_stats) {
- bin->stats.nonfull_slabs++;
- }
+arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
+ if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
+ return;
+ }
+ arena_decay_muzzy(tsdn, arena, is_background_thread, all);
}
static void
-arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
- extent_heap_remove(&bin->slabs_nonfull, slab);
- if (config_stats) {
- bin->stats.nonfull_slabs--;
- }
-}
-
-static extent_t *
-arena_bin_slabs_nonfull_tryget(bin_t *bin) {
- extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
- if (slab == NULL) {
- return NULL;
- }
- if (config_stats) {
- bin->stats.reslabs++;
- bin->stats.nonfull_slabs--;
- }
- return slab;
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
+ arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
+
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
+}
+
+static void
+arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) > 0);
+ extent_heap_insert(&bin->slabs_nonfull, slab);
+ if (config_stats) {
+ bin->stats.nonfull_slabs++;
+ }
+}
+
+static void
+arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
+ extent_heap_remove(&bin->slabs_nonfull, slab);
+ if (config_stats) {
+ bin->stats.nonfull_slabs--;
+ }
+}
+
+static extent_t *
+arena_bin_slabs_nonfull_tryget(bin_t *bin) {
+ extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ if (slab == NULL) {
+ return NULL;
+ }
+ if (config_stats) {
+ bin->stats.reslabs++;
+ bin->stats.nonfull_slabs--;
+ }
+ return slab;
+}
+
+static void
+arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) == 0);
+ /*
+ * Tracking extents is required by arena_reset, which is not allowed
+ * for auto arenas. Bypass this step to avoid touching the extent
+ * linkage (often results in cache misses) for auto arenas.
+ */
+ if (arena_is_auto(arena)) {
+ return;
+ }
+ extent_list_append(&bin->slabs_full, slab);
+}
+
+static void
+arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
+ if (arena_is_auto(arena)) {
+ return;
+ }
+ extent_list_remove(&bin->slabs_full, slab);
}
static void
-arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) == 0);
+arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
+ extent_t *slab;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (bin->slabcur != NULL) {
+ slab = bin->slabcur;
+ bin->slabcur = NULL;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
+ slab = extent_list_first(&bin->slabs_full)) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curslabs = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+}
+
+void
+arena_reset(tsd_t *tsd, arena_t *arena) {
/*
- * Tracking extents is required by arena_reset, which is not allowed
- * for auto arenas. Bypass this step to avoid touching the extent
- * linkage (often results in cache misses) for auto arenas.
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
*/
- if (arena_is_auto(arena)) {
- return;
- }
- extent_list_append(&bin->slabs_full, slab);
-}
-
-static void
-arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
- if (arena_is_auto(arena)) {
- return;
- }
- extent_list_remove(&bin->slabs_full, slab);
-}
-static void
-arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
- extent_t *slab;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- if (bin->slabcur != NULL) {
- slab = bin->slabcur;
- bin->slabcur = NULL;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- }
- while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- }
- for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
- slab = extent_list_first(&bin->slabs_full)) {
- arena_bin_slabs_full_remove(arena, bin, slab);
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- }
- if (config_stats) {
- bin->stats.curregs = 0;
- bin->stats.curslabs = 0;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-}
-
-void
-arena_reset(tsd_t *tsd, arena_t *arena) {
+ /* Large allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
+
+ for (extent_t *extent = extent_list_first(&arena->large); extent !=
+ NULL; extent = extent_list_first(&arena->large)) {
+ void *ptr = extent_base_get(extent);
+ size_t usize;
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != SC_NSIZES);
+
+ if (config_stats || (config_prof && opt_prof)) {
+ usize = sz_index2size(alloc_ctx.szind);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ }
+ /* Remove large allocation from prof sample set. */
+ if (config_prof && opt_prof) {
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ }
+ large_dalloc(tsd_tsdn(tsd), extent);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+
+ /* Bins. */
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ arena_bin_reset(tsd, arena,
+ &arena->bins[i].bin_shards[j]);
+ }
+ }
+
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
+}
+
+static void
+arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
/*
- * Locking in this function is unintuitive. The caller guarantees that
- * no concurrent operations are happening in this arena, but there are
- * still reasons that some locking is necessary:
- *
- * - Some of the functions in the transitive closure of calls assume
- * appropriate locks are held, and in some cases these locks are
- * temporarily dropped to avoid lock order reversal or deadlock due to
- * reentry.
- * - mallctl("epoch", ...) may concurrently refresh stats. While
- * strictly speaking this is a "concurrent operation", disallowing
- * stats refreshes would impose an inconvenient burden.
+ * Iterate over the retained extents and destroy them. This gives the
+ * extent allocator underlying the extent hooks an opportunity to unmap
+ * all retained memory without having to keep its own metadata
+ * structures. In practice, virtual memory for dss-allocated extents is
+ * leaked here, so best practice is to avoid dss for arenas to be
+ * destroyed, or provide custom extent hooks that track retained
+ * dss-based extents for later reuse.
*/
-
- /* Large allocations. */
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
-
- for (extent_t *extent = extent_list_first(&arena->large); extent !=
- NULL; extent = extent_list_first(&arena->large)) {
- void *ptr = extent_base_get(extent);
- size_t usize;
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
-
- if (config_stats || (config_prof && opt_prof)) {
- usize = sz_index2size(alloc_ctx.szind);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
- }
- /* Remove large allocation from prof sample set. */
- if (config_prof && opt_prof) {
- prof_free(tsd, ptr, usize, &alloc_ctx);
- }
- large_dalloc(tsd_tsdn(tsd), extent);
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
-
- /* Bins. */
- for (unsigned i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- arena_bin_reset(tsd, arena,
- &arena->bins[i].bin_shards[j]);
- }
- }
-
- atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
-}
-
-static void
-arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ extent_t *extent;
+ while ((extent = extents_evict(tsdn, arena, &extent_hooks,
+ &arena->extents_retained, 0)) != NULL) {
+ extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
+ }
+}
+
+void
+arena_destroy(tsd_t *tsd, arena_t *arena) {
+ assert(base_ind_get(arena->base) >= narenas_auto);
+ assert(arena_nthreads_get(arena, false) == 0);
+ assert(arena_nthreads_get(arena, true) == 0);
+
+ /*
+ * No allocations have occurred since arena_reset() was called.
+ * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
+ * extents, so only retained extents may remain.
+ */
+ assert(extents_npages_get(&arena->extents_dirty) == 0);
+ assert(extents_npages_get(&arena->extents_muzzy) == 0);
+
+ /* Deallocate retained memory. */
+ arena_destroy_retained(tsd_tsdn(tsd), arena);
+
/*
- * Iterate over the retained extents and destroy them. This gives the
- * extent allocator underlying the extent hooks an opportunity to unmap
- * all retained memory without having to keep its own metadata
- * structures. In practice, virtual memory for dss-allocated extents is
- * leaked here, so best practice is to avoid dss for arenas to be
- * destroyed, or provide custom extent hooks that track retained
- * dss-based extents for later reuse.
+ * Remove the arena pointer from the arenas array. We rely on the fact
+ * that there is no way for the application to get a dirty read from the
+ * arenas array unless there is an inherent race in the application
+ * involving access of an arena being concurrently destroyed. The
+ * application must synchronize knowledge of the arena's validity, so as
+ * long as we use an atomic write to update the arenas array, the
+ * application will get a clean read any time after it synchronizes
+ * knowledge that the arena is no longer valid.
*/
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- extent_t *extent;
- while ((extent = extents_evict(tsdn, arena, &extent_hooks,
- &arena->extents_retained, 0)) != NULL) {
- extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
- }
+ arena_set(base_ind_get(arena->base), NULL);
+
+ /*
+ * Destroy the base allocator, which manages all metadata ever mapped by
+ * this arena.
+ */
+ base_delete(tsd_tsdn(tsd), arena->base);
}
-void
-arena_destroy(tsd_t *tsd, arena_t *arena) {
- assert(base_ind_get(arena->base) >= narenas_auto);
- assert(arena_nthreads_get(arena, false) == 0);
- assert(arena_nthreads_get(arena, true) == 0);
+static extent_t *
+arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
+ szind_t szind) {
+ extent_t *slab;
+ bool zero, commit;
- /*
- * No allocations have occurred since arena_reset() was called.
- * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
- * extents, so only retained extents may remain.
- */
- assert(extents_npages_get(&arena->extents_dirty) == 0);
- assert(extents_npages_get(&arena->extents_muzzy) == 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- /* Deallocate retained memory. */
- arena_destroy_retained(tsd_tsdn(tsd), arena);
+ zero = false;
+ commit = true;
+ slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
+ bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
- /*
- * Remove the arena pointer from the arenas array. We rely on the fact
- * that there is no way for the application to get a dirty read from the
- * arenas array unless there is an inherent race in the application
- * involving access of an arena being concurrently destroyed. The
- * application must synchronize knowledge of the arena's validity, so as
- * long as we use an atomic write to update the arenas array, the
- * application will get a clean read any time after it synchronizes
- * knowledge that the arena is no longer valid.
- */
- arena_set(base_ind_get(arena->base), NULL);
+ if (config_stats && slab != NULL) {
+ arena_stats_mapped_add(tsdn, &arena->stats,
+ bin_info->slab_size);
+ }
- /*
- * Destroy the base allocator, which manages all metadata ever mapped by
- * this arena.
- */
- base_delete(tsd_tsdn(tsd), arena->base);
+ return slab;
}
-static extent_t *
-arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
- szind_t szind) {
- extent_t *slab;
- bool zero, commit;
-
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
+static extent_t *
+arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
+ const bin_info_t *bin_info) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- zero = false;
- commit = true;
- slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
- bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
-
- if (config_stats && slab != NULL) {
- arena_stats_mapped_add(tsdn, &arena->stats,
- bin_info->slab_size);
- }
-
- return slab;
-}
-
-static extent_t *
-arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
- const bin_info_t *bin_info) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- szind_t szind = sz_size2index(bin_info->reg_size);
- bool zero = false;
- bool commit = true;
- extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
- binind, &zero, &commit);
- if (slab == NULL && arena_may_have_muzzy(arena)) {
- slab = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
- true, binind, &zero, &commit);
- }
- if (slab == NULL) {
- slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
- bin_info, szind);
- if (slab == NULL) {
- return NULL;
- }
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ szind_t szind = sz_size2index(bin_info->reg_size);
+ bool zero = false;
+ bool commit = true;
+ extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
+ binind, &zero, &commit);
+ if (slab == NULL && arena_may_have_muzzy(arena)) {
+ slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
+ true, binind, &zero, &commit);
}
- assert(extent_slab_get(slab));
+ if (slab == NULL) {
+ slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
+ bin_info, szind);
+ if (slab == NULL) {
+ return NULL;
+ }
+ }
+ assert(extent_slab_get(slab));
- /* Initialize slab internals. */
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
- bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
+ /* Initialize slab internals. */
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
+ bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
- arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
+ arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
- return slab;
+ return slab;
}
-static extent_t *
-arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, unsigned binshard) {
- extent_t *slab;
- const bin_info_t *bin_info;
+static extent_t *
+arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, unsigned binshard) {
+ extent_t *slab;
+ const bin_info_t *bin_info;
- /* Look for a usable slab. */
- slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL) {
- return slab;
+ /* Look for a usable slab. */
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
}
- /* No existing slabs have any space available. */
+ /* No existing slabs have any space available. */
- bin_info = &bin_infos[binind];
+ bin_info = &bin_infos[binind];
- /* Allocate a new slab. */
- malloc_mutex_unlock(tsdn, &bin->lock);
+ /* Allocate a new slab. */
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
+ slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
/********************************/
- malloc_mutex_lock(tsdn, &bin->lock);
- if (slab != NULL) {
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (slab != NULL) {
if (config_stats) {
- bin->stats.nslabs++;
- bin->stats.curslabs++;
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
- return slab;
+ return slab;
}
/*
- * arena_slab_alloc() failed, but another thread may have made
+ * arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
- slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL) {
- return slab;
- }
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
- return NULL;
+ return NULL;
}
-/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
+/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, unsigned binshard) {
- const bin_info_t *bin_info;
- extent_t *slab;
-
- bin_info = &bin_infos[binind];
- if (!arena_is_auto(arena) && bin->slabcur != NULL) {
- arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- bin->slabcur = NULL;
- }
- slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
- if (bin->slabcur != NULL) {
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, unsigned binshard) {
+ const bin_info_t *bin_info;
+ extent_t *slab;
+
+ bin_info = &bin_infos[binind];
+ if (!arena_is_auto(arena) && bin->slabcur != NULL) {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+ slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
+ if (bin->slabcur != NULL) {
/*
- * Another thread updated slabcur while this one ran without the
- * bin lock in arena_bin_nonfull_slab_get().
+ * Another thread updated slabcur while this one ran without the
+ * bin lock in arena_bin_nonfull_slab_get().
*/
- if (extent_nfree_get(bin->slabcur) > 0) {
- void *ret = arena_slab_reg_alloc(bin->slabcur,
- bin_info);
- if (slab != NULL) {
- /*
- * arena_slab_alloc() may have allocated slab,
- * or it may have been pulled from
- * slabs_nonfull. Therefore it is unsafe to
- * make any assumptions about how slab has
- * previously been used, and
- * arena_bin_lower_slab() must be called, as if
- * a region were just deallocated from the slab.
- */
- if (extent_nfree_get(slab) == bin_info->nregs) {
- arena_dalloc_bin_slab(tsdn, arena, slab,
- bin);
- } else {
- arena_bin_lower_slab(tsdn, arena, slab,
- bin);
- }
- }
- return ret;
- }
-
- arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- bin->slabcur = NULL;
- }
-
- if (slab == NULL) {
- return NULL;
- }
- bin->slabcur = slab;
-
- assert(extent_nfree_get(bin->slabcur) > 0);
-
- return arena_slab_reg_alloc(slab, bin_info);
-}
-
-/* Choose a bin shard and return the locked bin. */
-bin_t *
-arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- unsigned *binshard) {
- bin_t *bin;
- if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
- *binshard = 0;
- } else {
- *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
- }
- assert(*binshard < bin_infos[binind].n_shards);
- bin = &arena->bins[binind].bin_shards[*binshard];
- malloc_mutex_lock(tsdn, &bin->lock);
-
- return bin;
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ void *ret = arena_slab_reg_alloc(bin->slabcur,
+ bin_info);
+ if (slab != NULL) {
+ /*
+ * arena_slab_alloc() may have allocated slab,
+ * or it may have been pulled from
+ * slabs_nonfull. Therefore it is unsafe to
+ * make any assumptions about how slab has
+ * previously been used, and
+ * arena_bin_lower_slab() must be called, as if
+ * a region were just deallocated from the slab.
+ */
+ if (extent_nfree_get(slab) == bin_info->nregs) {
+ arena_dalloc_bin_slab(tsdn, arena, slab,
+ bin);
+ } else {
+ arena_bin_lower_slab(tsdn, arena, slab,
+ bin);
+ }
+ }
+ return ret;
+ }
+
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+
+ if (slab == NULL) {
+ return NULL;
+ }
+ bin->slabcur = slab;
+
+ assert(extent_nfree_get(bin->slabcur) > 0);
+
+ return arena_slab_reg_alloc(slab, bin_info);
+}
+
+/* Choose a bin shard and return the locked bin. */
+bin_t *
+arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ unsigned *binshard) {
+ bin_t *bin;
+ if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
+ *binshard = 0;
+ } else {
+ *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
+ }
+ assert(*binshard < bin_infos[binind].n_shards);
+ bin = &arena->bins[binind].bin_shards[*binshard];
+ malloc_mutex_lock(tsdn, &bin->lock);
+
+ return bin;
}
void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
- unsigned i, nfill, cnt;
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
+ unsigned i, nfill, cnt;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
- prof_idump(tsdn);
- }
-
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
-
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
+ prof_idump(tsdn);
+ }
+
+ unsigned binshard;
+ bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
+
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
- extent_t *slab;
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
- 0) {
- unsigned tofill = nfill - i;
- cnt = tofill < extent_nfree_get(slab) ?
- tofill : extent_nfree_get(slab);
- arena_slab_reg_alloc_batch(
- slab, &bin_infos[binind], cnt,
- tbin->avail - nfill + i);
- } else {
- cnt = 1;
- void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
- binind, binshard);
- /*
- * OOM. tbin->avail isn't yet filled down to its first
- * element, so the successful allocations (if any) must
- * be moved just before tbin->avail before bailing out.
- */
- if (ptr == NULL) {
- if (i > 0) {
- memmove(tbin->avail - i,
- tbin->avail - nfill,
- i * sizeof(void *));
- }
- break;
- }
- /* Insert such that low regions get used first. */
- *(tbin->avail - nfill + i) = ptr;
- }
- if (config_fill && unlikely(opt_junk_alloc)) {
- for (unsigned j = 0; j < cnt; j++) {
- void* ptr = *(tbin->avail - nfill + i + j);
- arena_alloc_junk_small(ptr, &bin_infos[binind],
- true);
- }
+ tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
+ extent_t *slab;
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
+ 0) {
+ unsigned tofill = nfill - i;
+ cnt = tofill < extent_nfree_get(slab) ?
+ tofill : extent_nfree_get(slab);
+ arena_slab_reg_alloc_batch(
+ slab, &bin_infos[binind], cnt,
+ tbin->avail - nfill + i);
+ } else {
+ cnt = 1;
+ void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
+ binind, binshard);
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved just before tbin->avail before bailing out.
+ */
+ if (ptr == NULL) {
+ if (i > 0) {
+ memmove(tbin->avail - i,
+ tbin->avail - nfill,
+ i * sizeof(void *));
+ }
+ break;
+ }
+ /* Insert such that low regions get used first. */
+ *(tbin->avail - nfill + i) = ptr;
}
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ for (unsigned j = 0; j < cnt; j++) {
+ void* ptr = *(tbin->avail - nfill + i + j);
+ arena_alloc_junk_small(ptr, &bin_infos[binind],
+ true);
+ }
+ }
}
if (config_stats) {
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
- bin->stats.curregs += i;
+ bin->stats.curregs += i;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
- arena_decay_tick(tsdn, arena);
+ arena_decay_tick(tsdn, arena);
}
void
-arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
- if (!zero) {
- memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
+arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
+ if (!zero) {
+ memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
}
}
static void
-arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
- memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
+arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
+ memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
-arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
- arena_dalloc_junk_small_impl;
+arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
+ arena_dalloc_junk_small_impl;
-static void *
-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
+static void *
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
- bin_t *bin;
- size_t usize;
- extent_t *slab;
+ bin_t *bin;
+ size_t usize;
+ extent_t *slab;
- assert(binind < SC_NBINS);
- usize = sz_index2size(binind);
- unsigned binshard;
- bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
+ assert(binind < SC_NBINS);
+ usize = sz_index2size(binind);
+ unsigned binshard;
+ bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
- ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
- } else {
- ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
- }
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
+ ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
+ } else {
+ ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
+ }
if (ret == NULL) {
- malloc_mutex_unlock(tsdn, &bin->lock);
- return NULL;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return NULL;
}
if (config_stats) {
bin->stats.nmalloc++;
bin->stats.nrequests++;
- bin->stats.curregs++;
- }
- malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
- prof_idump(tsdn);
+ bin->stats.curregs++;
}
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
+ prof_idump(tsdn);
+ }
- if (!zero) {
+ if (!zero) {
if (config_fill) {
- if (unlikely(opt_junk_alloc)) {
+ if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
- &bin_infos[binind], false);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
+ &bin_infos[binind], false);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
}
} else {
- if (config_fill && unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind],
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &bin_infos[binind],
true);
}
- memset(ret, 0, usize);
+ memset(ret, 0, usize);
}
- arena_decay_tick(tsdn, arena);
- return ret;
+ arena_decay_tick(tsdn, arena);
+ return ret;
}
void *
-arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero) {
- assert(!tsdn_null(tsdn) || arena != NULL);
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero) {
+ assert(!tsdn_null(tsdn) || arena != NULL);
- if (likely(!tsdn_null(tsdn))) {
- arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
+ if (likely(!tsdn_null(tsdn))) {
+ arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
}
- if (unlikely(arena == NULL)) {
- return NULL;
+ if (unlikely(arena == NULL)) {
+ return NULL;
}
- if (likely(size <= SC_SMALL_MAXCLASS)) {
- return arena_malloc_small(tsdn, arena, ind, zero);
+ if (likely(size <= SC_SMALL_MAXCLASS)) {
+ return arena_malloc_small(tsdn, arena, ind, zero);
}
- return large_malloc(tsdn, arena, sz_index2size(ind), zero);
+ return large_malloc(tsdn, arena, sz_index2size(ind), zero);
}
void *
-arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero, tcache_t *tcache) {
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache) {
void *ret;
- if (usize <= SC_SMALL_MAXCLASS
- && (alignment < PAGE
- || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
- /* Small; alignment doesn't require special slab placement. */
- ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
- zero, tcache, true);
- } else {
- if (likely(alignment <= CACHELINE)) {
- ret = large_malloc(tsdn, arena, usize, zero);
- } else {
- ret = large_palloc(tsdn, arena, usize, alignment, zero);
- }
- }
- return ret;
-}
-
-void
-arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
- assert(usize <= SC_SMALL_MAXCLASS);
-
- if (config_opt_safety_checks) {
- safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
- }
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
- arena_t *arena = extent_arena_get(extent);
-
- szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- szind, false);
-
- prof_accum_cancel(tsdn, &arena->prof_accum, usize);
-
- assert(isalloc(tsdn, ptr) == usize);
-}
-
-static size_t
-arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
+ if (usize <= SC_SMALL_MAXCLASS
+ && (alignment < PAGE
+ || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special slab placement. */
+ ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
+ } else {
+ if (likely(alignment <= CACHELINE)) {
+ ret = large_malloc(tsdn, arena, usize, zero);
+ } else {
+ ret = large_palloc(tsdn, arena, usize, alignment, zero);
+ }
+ }
+ return ret;
+}
+
+void
+arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
+ assert(usize <= SC_SMALL_MAXCLASS);
+
+ if (config_opt_safety_checks) {
+ safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
+ }
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+ arena_t *arena = extent_arena_get(extent);
+
+ szind_t szind = sz_size2index(usize);
+ extent_szind_set(extent, szind);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ szind, false);
+
+ prof_accum_cancel(tsdn, &arena->prof_accum, usize);
+
+ assert(isalloc(tsdn, ptr) == usize);
+}
+
+static size_t
+arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_szind_set(extent, SC_NBINS);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- SC_NBINS, false);
-
- assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
-
- return SC_LARGE_MINCLASS;
-}
-
-void
-arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- bool slow_path) {
- cassert(config_prof);
- assert(opt_prof);
-
- extent_t *extent = iealloc(tsdn, ptr);
- size_t usize = extent_usize_get(extent);
- size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
- if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
- /*
- * Currently, we only do redzoning for small sampled
- * allocations.
- */
- assert(bumped_usize == SC_LARGE_MINCLASS);
- safety_check_verify_redzone(ptr, usize, bumped_usize);
- }
- if (bumped_usize <= tcache_maxclass && tcache != NULL) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- sz_size2index(bumped_usize), slow_path);
- } else {
- large_dalloc(tsdn, extent);
- }
-}
-
+ extent_szind_set(extent, SC_NBINS);
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ SC_NBINS, false);
+
+ assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
+
+ return SC_LARGE_MINCLASS;
+}
+
+void
+arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ bool slow_path) {
+ cassert(config_prof);
+ assert(opt_prof);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize = extent_usize_get(extent);
+ size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
+ if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
+ /*
+ * Currently, we only do redzoning for small sampled
+ * allocations.
+ */
+ assert(bumped_usize == SC_LARGE_MINCLASS);
+ safety_check_verify_redzone(ptr, usize, bumped_usize);
+ }
+ if (bumped_usize <= tcache_maxclass && tcache != NULL) {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ sz_size2index(bumped_usize), slow_path);
+ } else {
+ large_dalloc(tsdn, extent);
+ }
+}
+
static void
-arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
- /* Dissociate slab from bin. */
- if (slab == bin->slabcur) {
- bin->slabcur = NULL;
- } else {
- szind_t binind = extent_szind_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
-
- /*
- * The following block's conditional is necessary because if the
- * slab only contains one region, then it never gets inserted
- * into the non-full slabs heap.
- */
- if (bin_info->nregs == 1) {
- arena_bin_slabs_full_remove(arena, bin, slab);
- } else {
- arena_bin_slabs_nonfull_remove(bin, slab);
+arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
+ /* Dissociate slab from bin. */
+ if (slab == bin->slabcur) {
+ bin->slabcur = NULL;
+ } else {
+ szind_t binind = extent_szind_get(slab);
+ const bin_info_t *bin_info = &bin_infos[binind];
+
+ /*
+ * The following block's conditional is necessary because if the
+ * slab only contains one region, then it never gets inserted
+ * into the non-full slabs heap.
+ */
+ if (bin_info->nregs == 1) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ } else {
+ arena_bin_slabs_nonfull_remove(bin, slab);
}
}
}
static void
-arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin) {
- assert(slab != bin->slabcur);
+arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin) {
+ assert(slab != bin->slabcur);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- arena_slab_dalloc(tsdn, arena, slab);
- /****************************/
- malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats) {
- bin->stats.curslabs--;
+ arena_slab_dalloc(tsdn, arena, slab);
+ /****************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats) {
+ bin->stats.curslabs--;
}
}
static void
-arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin) {
- assert(extent_nfree_get(slab) > 0);
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin) {
+ assert(extent_nfree_get(slab) > 0);
/*
- * Make sure that if bin->slabcur is non-NULL, it refers to the
- * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
- * than proactively keeping it pointing at the oldest/lowest non-full
- * slab.
+ * Make sure that if bin->slabcur is non-NULL, it refers to the
+ * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
+ * than proactively keeping it pointing at the oldest/lowest non-full
+ * slab.
*/
- if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
- /* Switch slabcur. */
- if (extent_nfree_get(bin->slabcur) > 0) {
- arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
- } else {
- arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- }
- bin->slabcur = slab;
- if (config_stats) {
- bin->stats.reslabs++;
- }
- } else {
- arena_bin_slabs_nonfull_insert(bin, slab);
- }
-}
-
-static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *slab, void *ptr, bool junked) {
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
-
- if (!junked && config_fill && unlikely(opt_junk_free)) {
+ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ /* Switch slabcur. */
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
+ } else {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ }
+ bin->slabcur = slab;
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ } else {
+ arena_bin_slabs_nonfull_insert(bin, slab);
+ }
+}
+
+static void
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, extent_t *slab, void *ptr, bool junked) {
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ const bin_info_t *bin_info = &bin_infos[binind];
+
+ if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
- }
-
- arena_slab_reg_dalloc(slab, slab_data, ptr);
- unsigned nfree = extent_nfree_get(slab);
- if (nfree == bin_info->nregs) {
- arena_dissociate_bin_slab(arena, slab, bin);
- arena_dalloc_bin_slab(tsdn, arena, slab, bin);
- } else if (nfree == 1 && slab != bin->slabcur) {
- arena_bin_slabs_full_remove(arena, bin, slab);
- arena_bin_lower_slab(tsdn, arena, slab, bin);
- }
+ }
+
+ arena_slab_reg_dalloc(slab, slab_data, ptr);
+ unsigned nfree = extent_nfree_get(slab);
+ if (nfree == bin_info->nregs) {
+ arena_dissociate_bin_slab(arena, slab, bin);
+ arena_dalloc_bin_slab(tsdn, arena, slab, bin);
+ } else if (nfree == 1 && slab != bin->slabcur) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
if (config_stats) {
bin->stats.ndalloc++;
- bin->stats.curregs--;
+ bin->stats.curregs--;
}
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr) {
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
- true);
-}
-
-static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
- szind_t binind = extent_szind_get(extent);
- unsigned binshard = extent_binshard_get(extent);
- bin_t *bin = &arena->bins[binind].bin_shards[binshard];
-
- malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
- false);
- malloc_mutex_unlock(tsdn, &bin->lock);
-}
-
-void
-arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
- extent_t *extent = iealloc(tsdn, ptr);
- arena_t *arena = extent_arena_get(extent);
-
- arena_dalloc_bin(tsdn, arena, extent, ptr);
- arena_decay_tick(tsdn, arena);
-}
-
-bool
-arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero, size_t *newsize) {
- bool ret;
- /* Calls with non-zero extra had to clamp extra. */
- assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
-
- extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(size > SC_LARGE_MAXCLASS)) {
- ret = true;
- goto done;
- }
-
- size_t usize_min = sz_s2u(size);
- size_t usize_max = sz_s2u(size + extra);
- if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
- <= SC_SMALL_MAXCLASS)) {
- /*
- * Avoid moving the allocation if the size class can be left the
- * same.
- */
- assert(bin_infos[sz_size2index(oldsize)].reg_size ==
- oldsize);
- if ((usize_max > SC_SMALL_MAXCLASS
- || sz_size2index(usize_max) != sz_size2index(oldsize))
- && (size > oldsize || usize_max < oldsize)) {
- ret = true;
- goto done;
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, extent_t *extent, void *ptr) {
+ arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
+ true);
+}
+
+static void
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
+ szind_t binind = extent_szind_get(extent);
+ unsigned binshard = extent_binshard_get(extent);
+ bin_t *bin = &arena->bins[binind].bin_shards[binshard];
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
+ false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
+
+void
+arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ arena_t *arena = extent_arena_get(extent);
+
+ arena_dalloc_bin(tsdn, arena, extent, ptr);
+ arena_decay_tick(tsdn, arena);
+}
+
+bool
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero, size_t *newsize) {
+ bool ret;
+ /* Calls with non-zero extra had to clamp extra. */
+ assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(size > SC_LARGE_MAXCLASS)) {
+ ret = true;
+ goto done;
+ }
+
+ size_t usize_min = sz_s2u(size);
+ size_t usize_max = sz_s2u(size + extra);
+ if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
+ <= SC_SMALL_MAXCLASS)) {
+ /*
+ * Avoid moving the allocation if the size class can be left the
+ * same.
+ */
+ assert(bin_infos[sz_size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SC_SMALL_MAXCLASS
+ || sz_size2index(usize_max) != sz_size2index(oldsize))
+ && (size > oldsize || usize_max < oldsize)) {
+ ret = true;
+ goto done;
}
-
- arena_decay_tick(tsdn, extent_arena_get(extent));
- ret = false;
- } else if (oldsize >= SC_LARGE_MINCLASS
- && usize_max >= SC_LARGE_MINCLASS) {
- ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
- zero);
- } else {
- ret = true;
- }
-done:
- assert(extent == iealloc(tsdn, ptr));
- *newsize = extent_usize_get(extent);
-
- return ret;
-}
-
-static void *
-arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache) {
- if (alignment == 0) {
- return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
- zero, tcache, true);
- }
- usize = sz_sa2u(usize, alignment);
- if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
- return NULL;
- }
- return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
-}
-
-void *
-arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
- size_t size, size_t alignment, bool zero, tcache_t *tcache,
- hook_ralloc_args_t *hook_args) {
- size_t usize = sz_s2u(size);
- if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
- return NULL;
- }
-
- if (likely(usize <= SC_SMALL_MAXCLASS)) {
- /* Try to avoid moving the allocation. */
- UNUSED size_t newsize;
- if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
- &newsize)) {
- hook_invoke_expand(hook_args->is_realloc
- ? hook_expand_realloc : hook_expand_rallocx,
- ptr, oldsize, usize, (uintptr_t)ptr,
- hook_args->args);
- return ptr;
- }
- }
-
- if (oldsize >= SC_LARGE_MINCLASS
- && usize >= SC_LARGE_MINCLASS) {
- return large_ralloc(tsdn, arena, ptr, usize,
- alignment, zero, tcache, hook_args);
- }
-
+
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ ret = false;
+ } else if (oldsize >= SC_LARGE_MINCLASS
+ && usize_max >= SC_LARGE_MINCLASS) {
+ ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ zero);
+ } else {
+ ret = true;
+ }
+done:
+ assert(extent == iealloc(tsdn, ptr));
+ *newsize = extent_usize_get(extent);
+
+ return ret;
+}
+
+static void *
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache) {
+ if (alignment == 0) {
+ return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
+ }
+ usize = sz_sa2u(usize, alignment);
+ if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
+}
+
+void *
+arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache,
+ hook_ralloc_args_t *hook_args) {
+ size_t usize = sz_s2u(size);
+ if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
+ return NULL;
+ }
+
+ if (likely(usize <= SC_SMALL_MAXCLASS)) {
+ /* Try to avoid moving the allocation. */
+ UNUSED size_t newsize;
+ if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
+ &newsize)) {
+ hook_invoke_expand(hook_args->is_realloc
+ ? hook_expand_realloc : hook_expand_rallocx,
+ ptr, oldsize, usize, (uintptr_t)ptr,
+ hook_args->args);
+ return ptr;
+ }
+ }
+
+ if (oldsize >= SC_LARGE_MINCLASS
+ && usize >= SC_LARGE_MINCLASS) {
+ return large_ralloc(tsdn, arena, ptr, usize,
+ alignment, zero, tcache, hook_args);
+ }
+
/*
- * size and oldsize are different enough that we need to move the
- * object. In that case, fall back to allocating new space and copying.
+ * size and oldsize are different enough that we need to move the
+ * object. In that case, fall back to allocating new space and copying.
*/
- void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
- zero, tcache);
- if (ret == NULL) {
- return NULL;
+ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
+ zero, tcache);
+ if (ret == NULL) {
+ return NULL;
}
- hook_invoke_alloc(hook_args->is_realloc
- ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
- hook_args->args);
- hook_invoke_dalloc(hook_args->is_realloc
- ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
+ hook_invoke_alloc(hook_args->is_realloc
+ ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
+ hook_args->args);
+ hook_invoke_dalloc(hook_args->is_realloc
+ ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
- /*
- * Junk/zero-filling were already done by
- * ipalloc()/arena_malloc().
- */
- size_t copysize = (usize < oldsize) ? usize : oldsize;
- memcpy(ret, ptr, copysize);
- isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
- return ret;
-}
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
+ size_t copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
+ return ret;
+}
-dss_prec_t
-arena_dss_prec_get(arena_t *arena) {
- return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
-}
+dss_prec_t
+arena_dss_prec_get(arena_t *arena) {
+ return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
+}
-bool
-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
- if (!have_dss) {
- return (dss_prec != dss_prec_disabled);
+bool
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
+ if (!have_dss) {
+ return (dss_prec != dss_prec_disabled);
}
- atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
- return false;
-}
+ atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
+ return false;
+}
-ssize_t
-arena_dirty_decay_ms_default_get(void) {
- return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
+ssize_t
+arena_dirty_decay_ms_default_get(void) {
+ return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
}
-bool
-arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
- return true;
+bool
+arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
}
- atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
- return false;
+ atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
-ssize_t
-arena_muzzy_decay_ms_default_get(void) {
- return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
-}
+ssize_t
+arena_muzzy_decay_ms_default_get(void) {
+ return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
+}
-bool
-arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
- return true;
+bool
+arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
}
- atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
- return false;
+ atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
bool
-arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
- size_t *new_limit) {
- assert(opt_retain);
-
- pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
- if (new_limit != NULL) {
- size_t limit = *new_limit;
- /* Grow no more than the new limit. */
- if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
- return true;
+arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
+ size_t *new_limit) {
+ assert(opt_retain);
+
+ pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
+ if (new_limit != NULL) {
+ size_t limit = *new_limit;
+ /* Grow no more than the new limit. */
+ if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
+ return true;
}
}
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
- if (old_limit != NULL) {
- *old_limit = sz_pind2sz(arena->retain_grow_limit);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
+ if (old_limit != NULL) {
+ *old_limit = sz_pind2sz(arena->retain_grow_limit);
}
- if (new_limit != NULL) {
- arena->retain_grow_limit = new_ind;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
+ if (new_limit != NULL) {
+ arena->retain_grow_limit = new_ind;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
- return false;
-}
+ return false;
+}
-unsigned
-arena_nthreads_get(arena_t *arena, bool internal) {
- return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
+unsigned
+arena_nthreads_get(arena_t *arena, bool internal) {
+ return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
}
-void
-arena_nthreads_inc(arena_t *arena, bool internal) {
- atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
+void
+arena_nthreads_inc(arena_t *arena, bool internal) {
+ atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
void
-arena_nthreads_dec(arena_t *arena, bool internal) {
- atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
-}
+arena_nthreads_dec(arena_t *arena, bool internal) {
+ atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
+}
-size_t
-arena_extent_sn_next(arena_t *arena) {
- return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
+size_t
+arena_extent_sn_next(arena_t *arena) {
+ return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
}
-arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
- arena_t *arena;
- base_t *base;
+arena_t *
+arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+ base_t *base;
unsigned i;
- if (ind == 0) {
- base = b0get();
- } else {
- base = base_new(tsdn, ind, extent_hooks);
- if (base == NULL) {
- return NULL;
- }
- }
-
- unsigned nbins_total = 0;
- for (i = 0; i < SC_NBINS; i++) {
- nbins_total += bin_infos[i].n_shards;
- }
- size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
- arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
- if (arena == NULL) {
- goto label_error;
- }
-
- atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
- atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
- arena->last_thd = NULL;
-
- if (config_stats) {
- if (arena_stats_init(tsdn, &arena->stats)) {
- goto label_error;
- }
-
- ql_new(&arena->tcache_ql);
- ql_new(&arena->cache_bin_array_descriptor_ql);
- if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
- WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
- goto label_error;
+ if (ind == 0) {
+ base = b0get();
+ } else {
+ base = base_new(tsdn, ind, extent_hooks);
+ if (base == NULL) {
+ return NULL;
+ }
+ }
+
+ unsigned nbins_total = 0;
+ for (i = 0; i < SC_NBINS; i++) {
+ nbins_total += bin_infos[i].n_shards;
+ }
+ size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
+ arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
+ if (arena == NULL) {
+ goto label_error;
+ }
+
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ arena->last_thd = NULL;
+
+ if (config_stats) {
+ if (arena_stats_init(tsdn, &arena->stats)) {
+ goto label_error;
}
- }
-
- if (config_prof) {
- if (prof_accum_init(tsdn, &arena->prof_accum)) {
- goto label_error;
- }
- }
-
- if (config_cache_oblivious) {
- /*
- * A nondeterministic seed based on the address of arena reduces
- * the likelihood of lockstep non-uniform cache index
- * utilization among identical concurrent processes, but at the
- * cost of test repeatability. For debug builds, instead use a
- * deterministic seed.
- */
- atomic_store_zu(&arena->offset_state, config_debug ? ind :
- (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
- }
-
- atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
-
- atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
- ATOMIC_RELAXED);
-
- atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
-
- extent_list_init(&arena->large);
- if (malloc_mutex_init(&arena->large_mtx, "arena_large",
- WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
- goto label_error;
- }
-
- /*
- * Delay coalescing for dirty extents despite the disruptive effect on
- * memory layout for best-fit extent allocation, since cached extents
- * are likely to be reused soon after deallocation, and the cost of
- * merging/splitting extents is non-trivial.
- */
- if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
- true)) {
- goto label_error;
- }
- /*
- * Coalesce muzzy extents immediately, because operations on them are in
- * the critical path much less often than for dirty extents.
- */
- if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
- false)) {
- goto label_error;
- }
- /*
- * Coalesce retained extents immediately, in part because they will
- * never be evicted (and therefore there's no opportunity for delayed
- * coalescing), but also because operations on retained extents are not
- * in the critical path.
- */
- if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
- false)) {
- goto label_error;
- }
-
- if (arena_decay_init(&arena->decay_dirty,
- arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
- goto label_error;
- }
- if (arena_decay_init(&arena->decay_muzzy,
- arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
- goto label_error;
- }
-
- arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
- arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
- if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
- WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
- goto label_error;
- }
-
- extent_avail_new(&arena->extent_avail);
- if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
- WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
- goto label_error;
- }
+
+ ql_new(&arena->tcache_ql);
+ ql_new(&arena->cache_bin_array_descriptor_ql);
+ if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
+ WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+ }
+
+ if (config_prof) {
+ if (prof_accum_init(tsdn, &arena->prof_accum)) {
+ goto label_error;
+ }
+ }
+
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ atomic_store_zu(&arena->offset_state, config_debug ? ind :
+ (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
+ }
+
+ atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
+
+ atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
+ ATOMIC_RELAXED);
+
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
+
+ extent_list_init(&arena->large);
+ if (malloc_mutex_init(&arena->large_mtx, "arena_large",
+ WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+
+ /*
+ * Delay coalescing for dirty extents despite the disruptive effect on
+ * memory layout for best-fit extent allocation, since cached extents
+ * are likely to be reused soon after deallocation, and the cost of
+ * merging/splitting extents is non-trivial.
+ */
+ if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
+ true)) {
+ goto label_error;
+ }
+ /*
+ * Coalesce muzzy extents immediately, because operations on them are in
+ * the critical path much less often than for dirty extents.
+ */
+ if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
+ false)) {
+ goto label_error;
+ }
+ /*
+ * Coalesce retained extents immediately, in part because they will
+ * never be evicted (and therefore there's no opportunity for delayed
+ * coalescing), but also because operations on retained extents are not
+ * in the critical path.
+ */
+ if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
+ false)) {
+ goto label_error;
+ }
+
+ if (arena_decay_init(&arena->decay_dirty,
+ arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
+ goto label_error;
+ }
+ if (arena_decay_init(&arena->decay_muzzy,
+ arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
+ goto label_error;
+ }
+
+ arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
+ arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
+ if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
+ WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+
+ extent_avail_new(&arena->extent_avail);
+ if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
+ WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
/* Initialize bins. */
- uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
- atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
- for (i = 0; i < SC_NBINS; i++) {
- unsigned nshards = bin_infos[i].n_shards;
- arena->bins[i].bin_shards = (bin_t *)bin_addr;
- bin_addr += nshards * sizeof(bin_t);
- for (unsigned j = 0; j < nshards; j++) {
- bool err = bin_init(&arena->bins[i].bin_shards[j]);
- if (err) {
- goto label_error;
- }
- }
- }
- assert(bin_addr == (uintptr_t)arena + arena_size);
-
- arena->base = base;
- /* Set arena before creating background threads. */
- arena_set(ind, arena);
-
- nstime_init(&arena->create_time, 0);
- nstime_update(&arena->create_time);
-
- /* We don't support reentrancy for arena 0 bootstrapping. */
- if (ind != 0) {
- /*
- * If we're here, then arena 0 already exists, so bootstrapping
- * is done enough that we should have tsd.
- */
- assert(!tsdn_null(tsdn));
- pre_reentrancy(tsdn_tsd(tsdn), arena);
- if (test_hooks_arena_new_hook) {
- test_hooks_arena_new_hook();
- }
- post_reentrancy(tsdn_tsd(tsdn));
- }
-
- return arena;
-label_error:
- if (ind != 0) {
- base_delete(tsdn, base);
- }
- return NULL;
-}
-
-arena_t *
-arena_choose_huge(tsd_t *tsd) {
- /* huge_arena_ind can be 0 during init (will use a0). */
- if (huge_arena_ind == 0) {
- assert(!malloc_initialized());
- }
-
- arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
- if (huge_arena == NULL) {
- /* Create the huge arena on demand. */
- assert(huge_arena_ind != 0);
- huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
- if (huge_arena == NULL) {
- return NULL;
+ uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
+ atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
+ for (i = 0; i < SC_NBINS; i++) {
+ unsigned nshards = bin_infos[i].n_shards;
+ arena->bins[i].bin_shards = (bin_t *)bin_addr;
+ bin_addr += nshards * sizeof(bin_t);
+ for (unsigned j = 0; j < nshards; j++) {
+ bool err = bin_init(&arena->bins[i].bin_shards[j]);
+ if (err) {
+ goto label_error;
+ }
+ }
+ }
+ assert(bin_addr == (uintptr_t)arena + arena_size);
+
+ arena->base = base;
+ /* Set arena before creating background threads. */
+ arena_set(ind, arena);
+
+ nstime_init(&arena->create_time, 0);
+ nstime_update(&arena->create_time);
+
+ /* We don't support reentrancy for arena 0 bootstrapping. */
+ if (ind != 0) {
+ /*
+ * If we're here, then arena 0 already exists, so bootstrapping
+ * is done enough that we should have tsd.
+ */
+ assert(!tsdn_null(tsdn));
+ pre_reentrancy(tsdn_tsd(tsdn), arena);
+ if (test_hooks_arena_new_hook) {
+ test_hooks_arena_new_hook();
}
+ post_reentrancy(tsdn_tsd(tsdn));
+ }
+
+ return arena;
+label_error:
+ if (ind != 0) {
+ base_delete(tsdn, base);
+ }
+ return NULL;
+}
+
+arena_t *
+arena_choose_huge(tsd_t *tsd) {
+ /* huge_arena_ind can be 0 during init (will use a0). */
+ if (huge_arena_ind == 0) {
+ assert(!malloc_initialized());
+ }
+
+ arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
+ if (huge_arena == NULL) {
+ /* Create the huge arena on demand. */
+ assert(huge_arena_ind != 0);
+ huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
+ if (huge_arena == NULL) {
+ return NULL;
+ }
/*
- * Purge eagerly for huge allocations, because: 1) number of
- * huge allocations is usually small, which means ticker based
- * decay is not reliable; and 2) less immediate reuse is
- * expected for huge allocations.
+ * Purge eagerly for huge allocations, because: 1) number of
+ * huge allocations is usually small, which means ticker based
+ * decay is not reliable; and 2) less immediate reuse is
+ * expected for huge allocations.
*/
- if (arena_dirty_decay_ms_default_get() > 0) {
- arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
+ if (arena_dirty_decay_ms_default_get() > 0) {
+ arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
}
- if (arena_muzzy_decay_ms_default_get() > 0) {
- arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
- }
- }
-
- return huge_arena;
-}
-
-bool
-arena_init_huge(void) {
- bool huge_enabled;
-
- /* The threshold should be large size class. */
- if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
- opt_oversize_threshold < SC_LARGE_MINCLASS) {
- opt_oversize_threshold = 0;
- oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
- huge_enabled = false;
- } else {
- /* Reserve the index for the huge arena. */
- huge_arena_ind = narenas_total_get();
- oversize_threshold = opt_oversize_threshold;
- huge_enabled = true;
- }
-
- return huge_enabled;
-}
-
-bool
-arena_is_huge(unsigned arena_ind) {
- if (huge_arena_ind == 0) {
- return false;
- }
- return (arena_ind == huge_arena_ind);
-}
-
-void
-arena_boot(sc_data_t *sc_data) {
- arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
- arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
- for (unsigned i = 0; i < SC_NBINS; i++) {
- sc_t *sc = &sc_data->sc[i];
- div_init(&arena_binind_div_info[i],
- (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
- }
+ if (arena_muzzy_decay_ms_default_get() > 0) {
+ arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
+ }
+ }
+
+ return huge_arena;
+}
+
+bool
+arena_init_huge(void) {
+ bool huge_enabled;
+
+ /* The threshold should be large size class. */
+ if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
+ opt_oversize_threshold < SC_LARGE_MINCLASS) {
+ opt_oversize_threshold = 0;
+ oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
+ huge_enabled = false;
+ } else {
+ /* Reserve the index for the huge arena. */
+ huge_arena_ind = narenas_total_get();
+ oversize_threshold = opt_oversize_threshold;
+ huge_enabled = true;
+ }
+
+ return huge_enabled;
+}
+
+bool
+arena_is_huge(unsigned arena_ind) {
+ if (huge_arena_ind == 0) {
+ return false;
+ }
+ return (arena_ind == huge_arena_ind);
+}
+
+void
+arena_boot(sc_data_t *sc_data) {
+ arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
+ arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ sc_t *sc = &sc_data->sc[i];
+ div_init(&arena_binind_div_info[i],
+ (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
+ }
}
void
-arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
-}
+arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
+}
-void
-arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
- if (config_stats) {
- malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
+void
+arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
+ if (config_stats) {
+ malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
}
-}
+}
-void
-arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
-}
-
-void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
- extents_prefork(tsdn, &arena->extents_dirty);
- extents_prefork(tsdn, &arena->extents_muzzy);
- extents_prefork(tsdn, &arena->extents_retained);
-}
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
+}
-void
-arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
+ extents_prefork(tsdn, &arena->extents_dirty);
+ extents_prefork(tsdn, &arena->extents_muzzy);
+ extents_prefork(tsdn, &arena->extents_retained);
}
void
-arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
- base_prefork(tsdn, arena->base);
-}
+arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+}
-void
-arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->large_mtx);
+void
+arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
+ base_prefork(tsdn, arena->base);
}
void
-arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
- for (unsigned i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
- }
- }
-}
-
-void
-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
+arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->large_mtx);
+}
+
+void
+arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
+ }
+ }
+}
+
+void
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- for (i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_postfork_parent(tsdn,
- &arena->bins[i].bin_shards[j]);
- }
- }
- malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
- base_postfork_parent(tsdn, arena->base);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
- extents_postfork_parent(tsdn, &arena->extents_dirty);
- extents_postfork_parent(tsdn, &arena->extents_muzzy);
- extents_postfork_parent(tsdn, &arena->extents_retained);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
- if (config_stats) {
- malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
- }
+ for (i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ bin_postfork_parent(tsdn,
+ &arena->bins[i].bin_shards[j]);
+ }
+ }
+ malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
+ base_postfork_parent(tsdn, arena->base);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_parent(tsdn, &arena->extents_dirty);
+ extents_postfork_parent(tsdn, &arena->extents_muzzy);
+ extents_postfork_parent(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
+ }
}
void
-arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
- atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
- if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
- arena_nthreads_inc(arena, false);
- }
- if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
- arena_nthreads_inc(arena, true);
- }
- if (config_stats) {
- ql_new(&arena->tcache_ql);
- ql_new(&arena->cache_bin_array_descriptor_ql);
- tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
- if (tcache != NULL && tcache->arena == arena) {
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
- cache_bin_array_descriptor_init(
- &tcache->cache_bin_array_descriptor,
- tcache->bins_small, tcache->bins_large);
- ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
- }
- }
-
- for (i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
- }
- }
- malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
- base_postfork_child(tsdn, arena->base);
- malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
- extents_postfork_child(tsdn, &arena->extents_dirty);
- extents_postfork_child(tsdn, &arena->extents_muzzy);
- extents_postfork_child(tsdn, &arena->extents_retained);
- malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
- if (config_stats) {
- malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
- }
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
+ arena_nthreads_inc(arena, false);
+ }
+ if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
+ arena_nthreads_inc(arena, true);
+ }
+ if (config_stats) {
+ ql_new(&arena->tcache_ql);
+ ql_new(&arena->cache_bin_array_descriptor_ql);
+ tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
+ if (tcache != NULL && tcache->arena == arena) {
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ cache_bin_array_descriptor_init(
+ &tcache->cache_bin_array_descriptor,
+ tcache->bins_small, tcache->bins_large);
+ ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
+ &tcache->cache_bin_array_descriptor, link);
+ }
+ }
+
+ for (i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
+ }
+ }
+ malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
+ base_postfork_child(tsdn, arena->base);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_child(tsdn, &arena->extents_dirty);
+ extents_postfork_child(tsdn, &arena->extents_muzzy);
+ extents_postfork_child(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
+ }
}
diff --git a/contrib/libs/jemalloc/src/background_thread.c b/contrib/libs/jemalloc/src/background_thread.c
index 57b9b256bb..fc412d3b48 100644
--- a/contrib/libs/jemalloc/src/background_thread.c
+++ b/contrib/libs/jemalloc/src/background_thread.c
@@ -1,939 +1,939 @@
-#define JEMALLOC_BACKGROUND_THREAD_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-
-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-
-/******************************************************************************/
-/* Data. */
-
-/* This option should be opt-in only. */
-#define BACKGROUND_THREAD_DEFAULT false
-/* Read-only after initialization. */
-bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
-size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
-
-/* Used for thread creation, termination and stats. */
-malloc_mutex_t background_thread_lock;
-/* Indicates global state. Atomic because decay reads this w/o locking. */
-atomic_b_t background_thread_enabled_state;
-size_t n_background_threads;
-size_t max_background_threads;
-/* Thread info per-index. */
-background_thread_info_t *background_thread_info;
-
-/******************************************************************************/
-
-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
-
-static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
- void *(*)(void *), void *__restrict);
-
-static void
-pthread_create_wrapper_init(void) {
-#ifdef JEMALLOC_LAZY_LOCK
- if (!isthreaded) {
- isthreaded = true;
- }
-#endif
-}
-
-int
-pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
- void *(*start_routine)(void *), void *__restrict arg) {
- pthread_create_wrapper_init();
-
- return pthread_create_fptr(thread, attr, start_routine, arg);
-}
-#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
-
-#ifndef JEMALLOC_BACKGROUND_THREAD
-#define NOT_REACHED { not_reached(); }
-bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
-bool background_threads_enable(tsd_t *tsd) NOT_REACHED
-bool background_threads_disable(tsd_t *tsd) NOT_REACHED
-void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new) NOT_REACHED
-void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
-void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
-void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
-void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
-bool background_thread_stats_read(tsdn_t *tsdn,
- background_thread_stats_t *stats) NOT_REACHED
-void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
-#undef NOT_REACHED
-#else
-
-static bool background_thread_enabled_at_fork;
-
-static void
-background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
- background_thread_wakeup_time_set(tsdn, info, 0);
- info->npages_to_purge_new = 0;
- if (config_stats) {
- info->tot_n_runs = 0;
- nstime_init(&info->tot_sleep_time, 0);
- }
-}
-
-static inline bool
-set_current_thread_affinity(int cpu) {
-#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
- cpu_set_t cpuset;
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
- int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
-
- return (ret != 0);
-#else
- return false;
-#endif
-}
-
-/* Threshold for determining when to wake up the background thread. */
-#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
-#define BILLION UINT64_C(1000000000)
-/* Minimal sleep interval 100 ms. */
-#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
-
-static inline size_t
-decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
- size_t i;
- uint64_t sum = 0;
- for (i = 0; i < interval; i++) {
- sum += decay->backlog[i] * h_steps[i];
- }
- for (; i < SMOOTHSTEP_NSTEPS; i++) {
- sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
- }
-
- return (size_t)(sum >> SMOOTHSTEP_BFP);
-}
-
-static uint64_t
-arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
- extents_t *extents) {
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- /* Use minimal interval if decay is contended. */
- return BACKGROUND_THREAD_MIN_INTERVAL_NS;
- }
-
- uint64_t interval;
- ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
- if (decay_time <= 0) {
- /* Purging is eagerly done or disabled currently. */
- interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- goto label_done;
- }
-
- uint64_t decay_interval_ns = nstime_ns(&decay->interval);
- assert(decay_interval_ns > 0);
- size_t npages = extents_npages_get(extents);
- if (npages == 0) {
- unsigned i;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- if (decay->backlog[i] > 0) {
- break;
- }
- }
- if (i == SMOOTHSTEP_NSTEPS) {
- /* No dirty pages recorded. Sleep indefinitely. */
- interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- goto label_done;
- }
- }
- if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- /* Use max interval. */
- interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
- goto label_done;
- }
-
- size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
- size_t ub = SMOOTHSTEP_NSTEPS;
- /* Minimal 2 intervals to ensure reaching next epoch deadline. */
- lb = (lb < 2) ? 2 : lb;
- if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
- (lb + 2 > ub)) {
- interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
- goto label_done;
- }
-
- assert(lb + 2 <= ub);
- size_t npurge_lb, npurge_ub;
- npurge_lb = decay_npurge_after_interval(decay, lb);
- if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- interval = decay_interval_ns * lb;
- goto label_done;
- }
- npurge_ub = decay_npurge_after_interval(decay, ub);
- if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- interval = decay_interval_ns * ub;
- goto label_done;
- }
-
- unsigned n_search = 0;
- size_t target, npurge;
- while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
- && (lb + 2 < ub)) {
- target = (lb + ub) / 2;
- npurge = decay_npurge_after_interval(decay, target);
- if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- ub = target;
- npurge_ub = npurge;
- } else {
- lb = target;
- npurge_lb = npurge;
- }
- assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
- }
- interval = decay_interval_ns * (ub + lb) / 2;
-label_done:
- interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
- BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return interval;
-}
-
-/* Compute purge interval for background threads. */
-static uint64_t
-arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
- uint64_t i1, i2;
- i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
- &arena->extents_dirty);
- if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- return i1;
- }
- i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
- &arena->extents_muzzy);
-
- return i1 < i2 ? i1 : i2;
-}
-
-static void
-background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
- uint64_t interval) {
- if (config_stats) {
- info->tot_n_runs++;
- }
- info->npages_to_purge_new = 0;
-
- struct timeval tv;
- /* Specific clock required by timedwait. */
- gettimeofday(&tv, NULL);
- nstime_t before_sleep;
- nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
-
- int ret;
- if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
- assert(background_thread_indefinite_sleep(info));
- ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
- assert(ret == 0);
- } else {
- assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
- interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
- /* We need malloc clock (can be different from tv). */
- nstime_t next_wakeup;
- nstime_init(&next_wakeup, 0);
- nstime_update(&next_wakeup);
- nstime_iadd(&next_wakeup, interval);
- assert(nstime_ns(&next_wakeup) <
- BACKGROUND_THREAD_INDEFINITE_SLEEP);
- background_thread_wakeup_time_set(tsdn, info,
- nstime_ns(&next_wakeup));
-
- nstime_t ts_wakeup;
- nstime_copy(&ts_wakeup, &before_sleep);
- nstime_iadd(&ts_wakeup, interval);
- struct timespec ts;
- ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
- ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
-
- assert(!background_thread_indefinite_sleep(info));
- ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
- assert(ret == ETIMEDOUT || ret == 0);
- background_thread_wakeup_time_set(tsdn, info,
- BACKGROUND_THREAD_INDEFINITE_SLEEP);
- }
- if (config_stats) {
- gettimeofday(&tv, NULL);
- nstime_t after_sleep;
- nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
- if (nstime_compare(&after_sleep, &before_sleep) > 0) {
- nstime_subtract(&after_sleep, &before_sleep);
- nstime_add(&info->tot_sleep_time, &after_sleep);
- }
- }
-}
-
-static bool
-background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
- if (unlikely(info->state == background_thread_paused)) {
- malloc_mutex_unlock(tsdn, &info->mtx);
- /* Wait on global lock to update status. */
- malloc_mutex_lock(tsdn, &background_thread_lock);
- malloc_mutex_unlock(tsdn, &background_thread_lock);
- malloc_mutex_lock(tsdn, &info->mtx);
- return true;
- }
-
- return false;
-}
-
-static inline void
-background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
- uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- unsigned narenas = narenas_total_get();
-
- for (unsigned i = ind; i < narenas; i += max_background_threads) {
- arena_t *arena = arena_get(tsdn, i, false);
- if (!arena) {
- continue;
- }
- arena_decay(tsdn, arena, true, false);
- if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- /* Min interval will be used. */
- continue;
- }
- uint64_t interval = arena_decay_compute_purge_interval(tsdn,
- arena);
- assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
- if (min_interval > interval) {
- min_interval = interval;
- }
- }
- background_thread_sleep(tsdn, info, min_interval);
-}
-
-static bool
-background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
- if (info == &background_thread_info[0]) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd),
- &background_thread_lock);
- } else {
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
- &background_thread_lock);
- }
-
- pre_reentrancy(tsd, NULL);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- bool has_thread;
- assert(info->state != background_thread_paused);
- if (info->state == background_thread_started) {
- has_thread = true;
- info->state = background_thread_stopped;
- pthread_cond_signal(&info->cond);
- } else {
- has_thread = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
-
- if (!has_thread) {
- post_reentrancy(tsd);
- return false;
- }
- void *ret;
- if (pthread_join(info->thread, &ret)) {
- post_reentrancy(tsd);
- return true;
- }
- assert(ret == NULL);
- n_background_threads--;
- post_reentrancy(tsd);
-
- return false;
-}
-
-static void *background_thread_entry(void *ind_arg);
-
-static int
-background_thread_create_signals_masked(pthread_t *thread,
- const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
- /*
- * Mask signals during thread creation so that the thread inherits
- * an empty signal set.
- */
- sigset_t set;
- sigfillset(&set);
- sigset_t oldset;
- int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
- if (mask_err != 0) {
- return mask_err;
- }
- int create_err = pthread_create_wrapper(thread, attr, start_routine,
- arg);
- /*
- * Restore the signal mask. Failure to restore the signal mask here
- * changes program behavior.
- */
- int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- if (restore_err != 0) {
- malloc_printf("<jemalloc>: background thread creation "
- "failed (%d), and signal mask restoration failed "
- "(%d)\n", create_err, restore_err);
- if (opt_abort) {
- abort();
- }
- }
- return create_err;
-}
-
-static bool
-check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
- bool *created_threads) {
- bool ret = false;
- if (likely(*n_created == n_background_threads)) {
- return ret;
- }
-
- tsdn_t *tsdn = tsd_tsdn(tsd);
- malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
- for (unsigned i = 1; i < max_background_threads; i++) {
- if (created_threads[i]) {
- continue;
- }
- background_thread_info_t *info = &background_thread_info[i];
- malloc_mutex_lock(tsdn, &info->mtx);
- /*
- * In case of the background_thread_paused state because of
- * arena reset, delay the creation.
- */
- bool create = (info->state == background_thread_started);
- malloc_mutex_unlock(tsdn, &info->mtx);
- if (!create) {
- continue;
- }
-
- pre_reentrancy(tsd, NULL);
- int err = background_thread_create_signals_masked(&info->thread,
- NULL, background_thread_entry, (void *)(uintptr_t)i);
- post_reentrancy(tsd);
-
- if (err == 0) {
- (*n_created)++;
- created_threads[i] = true;
- } else {
- malloc_printf("<jemalloc>: background thread "
- "creation failed (%d)\n", err);
- if (opt_abort) {
- abort();
- }
- }
- /* Return to restart the loop since we unlocked. */
- ret = true;
- break;
- }
- malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
-
- return ret;
-}
-
-static void
-background_thread0_work(tsd_t *tsd) {
- /* Thread0 is also responsible for launching / terminating threads. */
- VARIABLE_ARRAY(bool, created_threads, max_background_threads);
- unsigned i;
- for (i = 1; i < max_background_threads; i++) {
- created_threads[i] = false;
- }
- /* Start working, and create more threads when asked. */
- unsigned n_created = 1;
- while (background_thread_info[0].state != background_thread_stopped) {
- if (background_thread_pause_check(tsd_tsdn(tsd),
- &background_thread_info[0])) {
- continue;
- }
- if (check_background_thread_creation(tsd, &n_created,
- (bool *)&created_threads)) {
- continue;
- }
- background_work_sleep_once(tsd_tsdn(tsd),
- &background_thread_info[0], 0);
- }
-
- /*
- * Shut down other threads at exit. Note that the ctl thread is holding
- * the global background_thread mutex (and is waiting) for us.
- */
- assert(!background_thread_enabled());
- for (i = 1; i < max_background_threads; i++) {
- background_thread_info_t *info = &background_thread_info[i];
- assert(info->state != background_thread_paused);
- if (created_threads[i]) {
- background_threads_disable_single(tsd, info);
- } else {
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- if (info->state != background_thread_stopped) {
- /* The thread was not created. */
- assert(info->state ==
- background_thread_started);
- n_background_threads--;
- info->state = background_thread_stopped;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- }
- }
- background_thread_info[0].state = background_thread_stopped;
- assert(n_background_threads == 1);
-}
-
-static void
-background_work(tsd_t *tsd, unsigned ind) {
- background_thread_info_t *info = &background_thread_info[ind];
-
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
- BACKGROUND_THREAD_INDEFINITE_SLEEP);
- if (ind == 0) {
- background_thread0_work(tsd);
- } else {
- while (info->state != background_thread_stopped) {
- if (background_thread_pause_check(tsd_tsdn(tsd),
- info)) {
- continue;
- }
- background_work_sleep_once(tsd_tsdn(tsd), info, ind);
- }
- }
- assert(info->state == background_thread_stopped);
- background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
-}
-
-static void *
-background_thread_entry(void *ind_arg) {
- unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
- assert(thread_ind < max_background_threads);
-#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
- pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
-#elif defined(__FreeBSD__)
- pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
-#endif
- if (opt_percpu_arena != percpu_arena_disabled) {
- set_current_thread_affinity((int)thread_ind);
- }
- /*
- * Start periodic background work. We use internal tsd which avoids
- * side effects, for example triggering new arena creation (which in
- * turn triggers another background thread creation).
- */
- background_work(tsd_internal_fetch(), thread_ind);
- assert(pthread_equal(pthread_self(),
- background_thread_info[thread_ind].thread));
-
- return NULL;
-}
-
-static void
-background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
- info->state = background_thread_started;
- background_thread_info_init(tsd_tsdn(tsd), info);
- n_background_threads++;
-}
-
-static bool
-background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
- assert(have_background_thread);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
-
- /* We create at most NCPUs threads. */
- size_t thread_ind = arena_ind % max_background_threads;
- background_thread_info_t *info = &background_thread_info[thread_ind];
-
- bool need_new_thread;
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- need_new_thread = background_thread_enabled() &&
- (info->state == background_thread_stopped);
- if (need_new_thread) {
- background_thread_init(tsd, info);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- if (!need_new_thread) {
- return false;
- }
- if (arena_ind != 0) {
- /* Threads are created asynchronously by Thread 0. */
- background_thread_info_t *t0 = &background_thread_info[0];
- malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
- assert(t0->state == background_thread_started);
- pthread_cond_signal(&t0->cond);
- malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
-
- return false;
- }
-
- pre_reentrancy(tsd, NULL);
- /*
- * To avoid complications (besides reentrancy), create internal
- * background threads with the underlying pthread_create.
- */
- int err = background_thread_create_signals_masked(&info->thread, NULL,
- background_thread_entry, (void *)thread_ind);
- post_reentrancy(tsd);
-
- if (err != 0) {
- malloc_printf("<jemalloc>: arena 0 background thread creation "
- "failed (%d)\n", err);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- info->state = background_thread_stopped;
- n_background_threads--;
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
-
- return true;
- }
-
- return false;
-}
-
-/* Create a new background thread if needed. */
-bool
-background_thread_create(tsd_t *tsd, unsigned arena_ind) {
- assert(have_background_thread);
-
- bool ret;
- malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
- ret = background_thread_create_locked(tsd, arena_ind);
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
-
- return ret;
-}
-
-bool
-background_threads_enable(tsd_t *tsd) {
- assert(n_background_threads == 0);
- assert(background_thread_enabled());
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
-
- VARIABLE_ARRAY(bool, marked, max_background_threads);
- unsigned i, nmarked;
- for (i = 0; i < max_background_threads; i++) {
- marked[i] = false;
- }
- nmarked = 0;
- /* Thread 0 is required and created at the end. */
- marked[0] = true;
- /* Mark the threads we need to create for thread 0. */
- unsigned n = narenas_total_get();
- for (i = 1; i < n; i++) {
- if (marked[i % max_background_threads] ||
- arena_get(tsd_tsdn(tsd), i, false) == NULL) {
- continue;
- }
- background_thread_info_t *info = &background_thread_info[
- i % max_background_threads];
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- assert(info->state == background_thread_stopped);
- background_thread_init(tsd, info);
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- marked[i % max_background_threads] = true;
- if (++nmarked == max_background_threads) {
- break;
- }
- }
-
- return background_thread_create_locked(tsd, 0);
-}
-
-bool
-background_threads_disable(tsd_t *tsd) {
- assert(!background_thread_enabled());
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
-
- /* Thread 0 will be responsible for terminating other threads. */
- if (background_threads_disable_single(tsd,
- &background_thread_info[0])) {
- return true;
- }
- assert(n_background_threads == 0);
-
- return false;
-}
-
-/* Check if we need to signal the background thread early. */
-void
-background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new) {
- background_thread_info_t *info = arena_background_thread_info_get(
- arena);
- if (malloc_mutex_trylock(tsdn, &info->mtx)) {
- /*
- * Background thread may hold the mutex for a long period of
- * time. We'd like to avoid the variance on application
- * threads. So keep this non-blocking, and leave the work to a
- * future epoch.
- */
- return;
- }
-
- if (info->state != background_thread_started) {
- goto label_done;
- }
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- goto label_done;
- }
-
- ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
- if (decay_time <= 0) {
- /* Purging is eagerly done or disabled currently. */
- goto label_done_unlock2;
- }
- uint64_t decay_interval_ns = nstime_ns(&decay->interval);
- assert(decay_interval_ns > 0);
-
- nstime_t diff;
- nstime_init(&diff, background_thread_wakeup_time_get(info));
- if (nstime_compare(&diff, &decay->epoch) <= 0) {
- goto label_done_unlock2;
- }
- nstime_subtract(&diff, &decay->epoch);
- if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- goto label_done_unlock2;
- }
-
- if (npages_new > 0) {
- size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
- /*
- * Compute how many new pages we would need to purge by the next
- * wakeup, which is used to determine if we should signal the
- * background thread.
- */
- uint64_t npurge_new;
- if (n_epoch >= SMOOTHSTEP_NSTEPS) {
- npurge_new = npages_new;
- } else {
- uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
- assert(h_steps_max >=
- h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
- npurge_new = npages_new * (h_steps_max -
- h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
- npurge_new >>= SMOOTHSTEP_BFP;
- }
- info->npages_to_purge_new += npurge_new;
- }
-
- bool should_signal;
- if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- should_signal = true;
- } else if (unlikely(background_thread_indefinite_sleep(info)) &&
- (extents_npages_get(&arena->extents_dirty) > 0 ||
- extents_npages_get(&arena->extents_muzzy) > 0 ||
- info->npages_to_purge_new > 0)) {
- should_signal = true;
- } else {
- should_signal = false;
- }
-
- if (should_signal) {
- info->npages_to_purge_new = 0;
- pthread_cond_signal(&info->cond);
- }
-label_done_unlock2:
- malloc_mutex_unlock(tsdn, &decay->mtx);
-label_done:
- malloc_mutex_unlock(tsdn, &info->mtx);
-}
-
-void
-background_thread_prefork0(tsdn_t *tsdn) {
- malloc_mutex_prefork(tsdn, &background_thread_lock);
- background_thread_enabled_at_fork = background_thread_enabled();
-}
-
-void
-background_thread_prefork1(tsdn_t *tsdn) {
- for (unsigned i = 0; i < max_background_threads; i++) {
- malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
- }
-}
-
-void
-background_thread_postfork_parent(tsdn_t *tsdn) {
- for (unsigned i = 0; i < max_background_threads; i++) {
- malloc_mutex_postfork_parent(tsdn,
- &background_thread_info[i].mtx);
- }
- malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
-}
-
-void
-background_thread_postfork_child(tsdn_t *tsdn) {
- for (unsigned i = 0; i < max_background_threads; i++) {
- malloc_mutex_postfork_child(tsdn,
- &background_thread_info[i].mtx);
- }
- malloc_mutex_postfork_child(tsdn, &background_thread_lock);
- if (!background_thread_enabled_at_fork) {
- return;
- }
-
- /* Clear background_thread state (reset to disabled for child). */
- malloc_mutex_lock(tsdn, &background_thread_lock);
- n_background_threads = 0;
- background_thread_enabled_set(tsdn, false);
- for (unsigned i = 0; i < max_background_threads; i++) {
- background_thread_info_t *info = &background_thread_info[i];
- malloc_mutex_lock(tsdn, &info->mtx);
- info->state = background_thread_stopped;
- int ret = pthread_cond_init(&info->cond, NULL);
- assert(ret == 0);
- background_thread_info_init(tsdn, info);
- malloc_mutex_unlock(tsdn, &info->mtx);
- }
- malloc_mutex_unlock(tsdn, &background_thread_lock);
-}
-
-bool
-background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
- assert(config_stats);
- malloc_mutex_lock(tsdn, &background_thread_lock);
- if (!background_thread_enabled()) {
- malloc_mutex_unlock(tsdn, &background_thread_lock);
- return true;
- }
-
- stats->num_threads = n_background_threads;
- uint64_t num_runs = 0;
- nstime_init(&stats->run_interval, 0);
- for (unsigned i = 0; i < max_background_threads; i++) {
- background_thread_info_t *info = &background_thread_info[i];
- if (malloc_mutex_trylock(tsdn, &info->mtx)) {
- /*
- * Each background thread run may take a long time;
- * avoid waiting on the stats if the thread is active.
- */
- continue;
- }
- if (info->state != background_thread_stopped) {
- num_runs += info->tot_n_runs;
- nstime_add(&stats->run_interval, &info->tot_sleep_time);
- }
- malloc_mutex_unlock(tsdn, &info->mtx);
- }
- stats->num_runs = num_runs;
- if (num_runs > 0) {
- nstime_idivide(&stats->run_interval, num_runs);
- }
- malloc_mutex_unlock(tsdn, &background_thread_lock);
-
- return false;
-}
-
-#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
-#undef BILLION
-#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
-
-#ifdef JEMALLOC_HAVE_DLSYM
-#include <dlfcn.h>
-#endif
-
-static bool
-pthread_create_fptr_init(void) {
- if (pthread_create_fptr != NULL) {
- return false;
- }
- /*
- * Try the next symbol first, because 1) when use lazy_lock we have a
- * wrapper for pthread_create; and 2) application may define its own
- * wrapper as well (and can call malloc within the wrapper).
- */
-#ifdef JEMALLOC_HAVE_DLSYM
- pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
-#else
- pthread_create_fptr = NULL;
-#endif
- if (pthread_create_fptr == NULL) {
- if (config_lazy_lock) {
- malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
- "\"pthread_create\")\n");
- abort();
- } else {
- /* Fall back to the default symbol. */
- pthread_create_fptr = pthread_create;
- }
- }
-
- return false;
-}
-
-/*
- * When lazy lock is enabled, we need to make sure setting isthreaded before
- * taking any background_thread locks. This is called early in ctl (instead of
- * wait for the pthread_create calls to trigger) because the mutex is required
- * before creating background threads.
- */
-void
-background_thread_ctl_init(tsdn_t *tsdn) {
- malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
- pthread_create_fptr_init();
- pthread_create_wrapper_init();
-#endif
-}
-
-#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
-
-bool
-background_thread_boot0(void) {
- if (!have_background_thread && opt_background_thread) {
- malloc_printf("<jemalloc>: option background_thread currently "
- "supports pthread only\n");
- return true;
- }
-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
- if ((config_lazy_lock || opt_background_thread) &&
- pthread_create_fptr_init()) {
- return true;
- }
-#endif
- return false;
-}
-
-bool
-background_thread_boot1(tsdn_t *tsdn) {
-#ifdef JEMALLOC_BACKGROUND_THREAD
- assert(have_background_thread);
- assert(narenas_total_get() > 0);
-
- if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
- opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
- }
- max_background_threads = opt_max_background_threads;
-
- background_thread_enabled_set(tsdn, opt_background_thread);
- if (malloc_mutex_init(&background_thread_lock,
- "background_thread_global",
- WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
- b0get(), opt_max_background_threads *
- sizeof(background_thread_info_t), CACHELINE);
- if (background_thread_info == NULL) {
- return true;
- }
-
- for (unsigned i = 0; i < max_background_threads; i++) {
- background_thread_info_t *info = &background_thread_info[i];
- /* Thread mutex is rank_inclusive because of thread0. */
- if (malloc_mutex_init(&info->mtx, "background_thread",
- WITNESS_RANK_BACKGROUND_THREAD,
- malloc_mutex_address_ordered)) {
- return true;
- }
- if (pthread_cond_init(&info->cond, NULL)) {
- return true;
- }
- malloc_mutex_lock(tsdn, &info->mtx);
- info->state = background_thread_stopped;
- background_thread_info_init(tsdn, info);
- malloc_mutex_unlock(tsdn, &info->mtx);
- }
-#endif
-
- return false;
-}
+#define JEMALLOC_BACKGROUND_THREAD_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+
+JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
+
+/******************************************************************************/
+/* Data. */
+
+/* This option should be opt-in only. */
+#define BACKGROUND_THREAD_DEFAULT false
+/* Read-only after initialization. */
+bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
+size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
+
+/* Used for thread creation, termination and stats. */
+malloc_mutex_t background_thread_lock;
+/* Indicates global state. Atomic because decay reads this w/o locking. */
+atomic_b_t background_thread_enabled_state;
+size_t n_background_threads;
+size_t max_background_threads;
+/* Thread info per-index. */
+background_thread_info_t *background_thread_info;
+
+/******************************************************************************/
+
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+
+static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+
+static void
+pthread_create_wrapper_init(void) {
+#ifdef JEMALLOC_LAZY_LOCK
+ if (!isthreaded) {
+ isthreaded = true;
+ }
+#endif
+}
+
+int
+pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *__restrict arg) {
+ pthread_create_wrapper_init();
+
+ return pthread_create_fptr(thread, attr, start_routine, arg);
+}
+#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
+
+#ifndef JEMALLOC_BACKGROUND_THREAD
+#define NOT_REACHED { not_reached(); }
+bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
+bool background_threads_enable(tsd_t *tsd) NOT_REACHED
+bool background_threads_disable(tsd_t *tsd) NOT_REACHED
+void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, size_t npages_new) NOT_REACHED
+void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
+void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
+void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
+void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
+bool background_thread_stats_read(tsdn_t *tsdn,
+ background_thread_stats_t *stats) NOT_REACHED
+void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
+#undef NOT_REACHED
+#else
+
+static bool background_thread_enabled_at_fork;
+
+static void
+background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
+ background_thread_wakeup_time_set(tsdn, info, 0);
+ info->npages_to_purge_new = 0;
+ if (config_stats) {
+ info->tot_n_runs = 0;
+ nstime_init(&info->tot_sleep_time, 0);
+ }
+}
+
+static inline bool
+set_current_thread_affinity(int cpu) {
+#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
+
+ return (ret != 0);
+#else
+ return false;
+#endif
+}
+
+/* Threshold for determining when to wake up the background thread. */
+#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
+#define BILLION UINT64_C(1000000000)
+/* Minimal sleep interval 100 ms. */
+#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
+
+static inline size_t
+decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
+ size_t i;
+ uint64_t sum = 0;
+ for (i = 0; i < interval; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ for (; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
+ }
+
+ return (size_t)(sum >> SMOOTHSTEP_BFP);
+}
+
+static uint64_t
+arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
+ extents_t *extents) {
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* Use minimal interval if decay is contended. */
+ return BACKGROUND_THREAD_MIN_INTERVAL_NS;
+ }
+
+ uint64_t interval;
+ ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+ if (decay_time <= 0) {
+ /* Purging is eagerly done or disabled currently. */
+ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ goto label_done;
+ }
+
+ uint64_t decay_interval_ns = nstime_ns(&decay->interval);
+ assert(decay_interval_ns > 0);
+ size_t npages = extents_npages_get(extents);
+ if (npages == 0) {
+ unsigned i;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ if (decay->backlog[i] > 0) {
+ break;
+ }
+ }
+ if (i == SMOOTHSTEP_NSTEPS) {
+ /* No dirty pages recorded. Sleep indefinitely. */
+ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ goto label_done;
+ }
+ }
+ if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ /* Use max interval. */
+ interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
+ goto label_done;
+ }
+
+ size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
+ size_t ub = SMOOTHSTEP_NSTEPS;
+ /* Minimal 2 intervals to ensure reaching next epoch deadline. */
+ lb = (lb < 2) ? 2 : lb;
+ if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
+ (lb + 2 > ub)) {
+ interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
+ goto label_done;
+ }
+
+ assert(lb + 2 <= ub);
+ size_t npurge_lb, npurge_ub;
+ npurge_lb = decay_npurge_after_interval(decay, lb);
+ if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ interval = decay_interval_ns * lb;
+ goto label_done;
+ }
+ npurge_ub = decay_npurge_after_interval(decay, ub);
+ if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ interval = decay_interval_ns * ub;
+ goto label_done;
+ }
+
+ unsigned n_search = 0;
+ size_t target, npurge;
+ while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
+ && (lb + 2 < ub)) {
+ target = (lb + ub) / 2;
+ npurge = decay_npurge_after_interval(decay, target);
+ if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ ub = target;
+ npurge_ub = npurge;
+ } else {
+ lb = target;
+ npurge_lb = npurge;
+ }
+ assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
+ }
+ interval = decay_interval_ns * (ub + lb) / 2;
+label_done:
+ interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
+ BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ return interval;
+}
+
+/* Compute purge interval for background threads. */
+static uint64_t
+arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
+ uint64_t i1, i2;
+ i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
+ &arena->extents_dirty);
+ if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ return i1;
+ }
+ i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
+ &arena->extents_muzzy);
+
+ return i1 < i2 ? i1 : i2;
+}
+
+static void
+background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
+ uint64_t interval) {
+ if (config_stats) {
+ info->tot_n_runs++;
+ }
+ info->npages_to_purge_new = 0;
+
+ struct timeval tv;
+ /* Specific clock required by timedwait. */
+ gettimeofday(&tv, NULL);
+ nstime_t before_sleep;
+ nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
+
+ int ret;
+ if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
+ assert(background_thread_indefinite_sleep(info));
+ ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
+ assert(ret == 0);
+ } else {
+ assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
+ interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ /* We need malloc clock (can be different from tv). */
+ nstime_t next_wakeup;
+ nstime_init(&next_wakeup, 0);
+ nstime_update(&next_wakeup);
+ nstime_iadd(&next_wakeup, interval);
+ assert(nstime_ns(&next_wakeup) <
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ background_thread_wakeup_time_set(tsdn, info,
+ nstime_ns(&next_wakeup));
+
+ nstime_t ts_wakeup;
+ nstime_copy(&ts_wakeup, &before_sleep);
+ nstime_iadd(&ts_wakeup, interval);
+ struct timespec ts;
+ ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
+ ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
+
+ assert(!background_thread_indefinite_sleep(info));
+ ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
+ assert(ret == ETIMEDOUT || ret == 0);
+ background_thread_wakeup_time_set(tsdn, info,
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ }
+ if (config_stats) {
+ gettimeofday(&tv, NULL);
+ nstime_t after_sleep;
+ nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
+ if (nstime_compare(&after_sleep, &before_sleep) > 0) {
+ nstime_subtract(&after_sleep, &before_sleep);
+ nstime_add(&info->tot_sleep_time, &after_sleep);
+ }
+ }
+}
+
+static bool
+background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
+ if (unlikely(info->state == background_thread_paused)) {
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ /* Wait on global lock to update status. */
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ malloc_mutex_lock(tsdn, &info->mtx);
+ return true;
+ }
+
+ return false;
+}
+
+static inline void
+background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
+ uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ unsigned narenas = narenas_total_get();
+
+ for (unsigned i = ind; i < narenas; i += max_background_threads) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (!arena) {
+ continue;
+ }
+ arena_decay(tsdn, arena, true, false);
+ if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ /* Min interval will be used. */
+ continue;
+ }
+ uint64_t interval = arena_decay_compute_purge_interval(tsdn,
+ arena);
+ assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
+ if (min_interval > interval) {
+ min_interval = interval;
+ }
+ }
+ background_thread_sleep(tsdn, info, min_interval);
+}
+
+static bool
+background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
+ if (info == &background_thread_info[0]) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd),
+ &background_thread_lock);
+ } else {
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
+ &background_thread_lock);
+ }
+
+ pre_reentrancy(tsd, NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ bool has_thread;
+ assert(info->state != background_thread_paused);
+ if (info->state == background_thread_started) {
+ has_thread = true;
+ info->state = background_thread_stopped;
+ pthread_cond_signal(&info->cond);
+ } else {
+ has_thread = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+
+ if (!has_thread) {
+ post_reentrancy(tsd);
+ return false;
+ }
+ void *ret;
+ if (pthread_join(info->thread, &ret)) {
+ post_reentrancy(tsd);
+ return true;
+ }
+ assert(ret == NULL);
+ n_background_threads--;
+ post_reentrancy(tsd);
+
+ return false;
+}
+
+static void *background_thread_entry(void *ind_arg);
+
+static int
+background_thread_create_signals_masked(pthread_t *thread,
+ const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
+ /*
+ * Mask signals during thread creation so that the thread inherits
+ * an empty signal set.
+ */
+ sigset_t set;
+ sigfillset(&set);
+ sigset_t oldset;
+ int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
+ if (mask_err != 0) {
+ return mask_err;
+ }
+ int create_err = pthread_create_wrapper(thread, attr, start_routine,
+ arg);
+ /*
+ * Restore the signal mask. Failure to restore the signal mask here
+ * changes program behavior.
+ */
+ int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+ if (restore_err != 0) {
+ malloc_printf("<jemalloc>: background thread creation "
+ "failed (%d), and signal mask restoration failed "
+ "(%d)\n", create_err, restore_err);
+ if (opt_abort) {
+ abort();
+ }
+ }
+ return create_err;
+}
+
+static bool
+check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
+ bool *created_threads) {
+ bool ret = false;
+ if (likely(*n_created == n_background_threads)) {
+ return ret;
+ }
+
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
+ for (unsigned i = 1; i < max_background_threads; i++) {
+ if (created_threads[i]) {
+ continue;
+ }
+ background_thread_info_t *info = &background_thread_info[i];
+ malloc_mutex_lock(tsdn, &info->mtx);
+ /*
+ * In case of the background_thread_paused state because of
+ * arena reset, delay the creation.
+ */
+ bool create = (info->state == background_thread_started);
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ if (!create) {
+ continue;
+ }
+
+ pre_reentrancy(tsd, NULL);
+ int err = background_thread_create_signals_masked(&info->thread,
+ NULL, background_thread_entry, (void *)(uintptr_t)i);
+ post_reentrancy(tsd);
+
+ if (err == 0) {
+ (*n_created)++;
+ created_threads[i] = true;
+ } else {
+ malloc_printf("<jemalloc>: background thread "
+ "creation failed (%d)\n", err);
+ if (opt_abort) {
+ abort();
+ }
+ }
+ /* Return to restart the loop since we unlocked. */
+ ret = true;
+ break;
+ }
+ malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
+
+ return ret;
+}
+
+static void
+background_thread0_work(tsd_t *tsd) {
+ /* Thread0 is also responsible for launching / terminating threads. */
+ VARIABLE_ARRAY(bool, created_threads, max_background_threads);
+ unsigned i;
+ for (i = 1; i < max_background_threads; i++) {
+ created_threads[i] = false;
+ }
+ /* Start working, and create more threads when asked. */
+ unsigned n_created = 1;
+ while (background_thread_info[0].state != background_thread_stopped) {
+ if (background_thread_pause_check(tsd_tsdn(tsd),
+ &background_thread_info[0])) {
+ continue;
+ }
+ if (check_background_thread_creation(tsd, &n_created,
+ (bool *)&created_threads)) {
+ continue;
+ }
+ background_work_sleep_once(tsd_tsdn(tsd),
+ &background_thread_info[0], 0);
+ }
+
+ /*
+ * Shut down other threads at exit. Note that the ctl thread is holding
+ * the global background_thread mutex (and is waiting) for us.
+ */
+ assert(!background_thread_enabled());
+ for (i = 1; i < max_background_threads; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ assert(info->state != background_thread_paused);
+ if (created_threads[i]) {
+ background_threads_disable_single(tsd, info);
+ } else {
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ if (info->state != background_thread_stopped) {
+ /* The thread was not created. */
+ assert(info->state ==
+ background_thread_started);
+ n_background_threads--;
+ info->state = background_thread_stopped;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ }
+ background_thread_info[0].state = background_thread_stopped;
+ assert(n_background_threads == 1);
+}
+
+static void
+background_work(tsd_t *tsd, unsigned ind) {
+ background_thread_info_t *info = &background_thread_info[ind];
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ if (ind == 0) {
+ background_thread0_work(tsd);
+ } else {
+ while (info->state != background_thread_stopped) {
+ if (background_thread_pause_check(tsd_tsdn(tsd),
+ info)) {
+ continue;
+ }
+ background_work_sleep_once(tsd_tsdn(tsd), info, ind);
+ }
+ }
+ assert(info->state == background_thread_stopped);
+ background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+}
+
+static void *
+background_thread_entry(void *ind_arg) {
+ unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
+ assert(thread_ind < max_background_threads);
+#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+ pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
+#elif defined(__FreeBSD__)
+ pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
+#endif
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ set_current_thread_affinity((int)thread_ind);
+ }
+ /*
+ * Start periodic background work. We use internal tsd which avoids
+ * side effects, for example triggering new arena creation (which in
+ * turn triggers another background thread creation).
+ */
+ background_work(tsd_internal_fetch(), thread_ind);
+ assert(pthread_equal(pthread_self(),
+ background_thread_info[thread_ind].thread));
+
+ return NULL;
+}
+
+static void
+background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+ info->state = background_thread_started;
+ background_thread_info_init(tsd_tsdn(tsd), info);
+ n_background_threads++;
+}
+
+static bool
+background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
+ assert(have_background_thread);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+
+ /* We create at most NCPUs threads. */
+ size_t thread_ind = arena_ind % max_background_threads;
+ background_thread_info_t *info = &background_thread_info[thread_ind];
+
+ bool need_new_thread;
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ need_new_thread = background_thread_enabled() &&
+ (info->state == background_thread_stopped);
+ if (need_new_thread) {
+ background_thread_init(tsd, info);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ if (!need_new_thread) {
+ return false;
+ }
+ if (arena_ind != 0) {
+ /* Threads are created asynchronously by Thread 0. */
+ background_thread_info_t *t0 = &background_thread_info[0];
+ malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
+ assert(t0->state == background_thread_started);
+ pthread_cond_signal(&t0->cond);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
+
+ return false;
+ }
+
+ pre_reentrancy(tsd, NULL);
+ /*
+ * To avoid complications (besides reentrancy), create internal
+ * background threads with the underlying pthread_create.
+ */
+ int err = background_thread_create_signals_masked(&info->thread, NULL,
+ background_thread_entry, (void *)thread_ind);
+ post_reentrancy(tsd);
+
+ if (err != 0) {
+ malloc_printf("<jemalloc>: arena 0 background thread creation "
+ "failed (%d)\n", err);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_stopped;
+ n_background_threads--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Create a new background thread if needed. */
+bool
+background_thread_create(tsd_t *tsd, unsigned arena_ind) {
+ assert(have_background_thread);
+
+ bool ret;
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ ret = background_thread_create_locked(tsd, arena_ind);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+
+ return ret;
+}
+
+bool
+background_threads_enable(tsd_t *tsd) {
+ assert(n_background_threads == 0);
+ assert(background_thread_enabled());
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+
+ VARIABLE_ARRAY(bool, marked, max_background_threads);
+ unsigned i, nmarked;
+ for (i = 0; i < max_background_threads; i++) {
+ marked[i] = false;
+ }
+ nmarked = 0;
+ /* Thread 0 is required and created at the end. */
+ marked[0] = true;
+ /* Mark the threads we need to create for thread 0. */
+ unsigned n = narenas_total_get();
+ for (i = 1; i < n; i++) {
+ if (marked[i % max_background_threads] ||
+ arena_get(tsd_tsdn(tsd), i, false) == NULL) {
+ continue;
+ }
+ background_thread_info_t *info = &background_thread_info[
+ i % max_background_threads];
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ assert(info->state == background_thread_stopped);
+ background_thread_init(tsd, info);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ marked[i % max_background_threads] = true;
+ if (++nmarked == max_background_threads) {
+ break;
+ }
+ }
+
+ return background_thread_create_locked(tsd, 0);
+}
+
+bool
+background_threads_disable(tsd_t *tsd) {
+ assert(!background_thread_enabled());
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+
+ /* Thread 0 will be responsible for terminating other threads. */
+ if (background_threads_disable_single(tsd,
+ &background_thread_info[0])) {
+ return true;
+ }
+ assert(n_background_threads == 0);
+
+ return false;
+}
+
+/* Check if we need to signal the background thread early. */
+void
+background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, size_t npages_new) {
+ background_thread_info_t *info = arena_background_thread_info_get(
+ arena);
+ if (malloc_mutex_trylock(tsdn, &info->mtx)) {
+ /*
+ * Background thread may hold the mutex for a long period of
+ * time. We'd like to avoid the variance on application
+ * threads. So keep this non-blocking, and leave the work to a
+ * future epoch.
+ */
+ return;
+ }
+
+ if (info->state != background_thread_started) {
+ goto label_done;
+ }
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ goto label_done;
+ }
+
+ ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+ if (decay_time <= 0) {
+ /* Purging is eagerly done or disabled currently. */
+ goto label_done_unlock2;
+ }
+ uint64_t decay_interval_ns = nstime_ns(&decay->interval);
+ assert(decay_interval_ns > 0);
+
+ nstime_t diff;
+ nstime_init(&diff, background_thread_wakeup_time_get(info));
+ if (nstime_compare(&diff, &decay->epoch) <= 0) {
+ goto label_done_unlock2;
+ }
+ nstime_subtract(&diff, &decay->epoch);
+ if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ goto label_done_unlock2;
+ }
+
+ if (npages_new > 0) {
+ size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
+ /*
+ * Compute how many new pages we would need to purge by the next
+ * wakeup, which is used to determine if we should signal the
+ * background thread.
+ */
+ uint64_t npurge_new;
+ if (n_epoch >= SMOOTHSTEP_NSTEPS) {
+ npurge_new = npages_new;
+ } else {
+ uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
+ assert(h_steps_max >=
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npurge_new = npages_new * (h_steps_max -
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npurge_new >>= SMOOTHSTEP_BFP;
+ }
+ info->npages_to_purge_new += npurge_new;
+ }
+
+ bool should_signal;
+ if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ should_signal = true;
+ } else if (unlikely(background_thread_indefinite_sleep(info)) &&
+ (extents_npages_get(&arena->extents_dirty) > 0 ||
+ extents_npages_get(&arena->extents_muzzy) > 0 ||
+ info->npages_to_purge_new > 0)) {
+ should_signal = true;
+ } else {
+ should_signal = false;
+ }
+
+ if (should_signal) {
+ info->npages_to_purge_new = 0;
+ pthread_cond_signal(&info->cond);
+ }
+label_done_unlock2:
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+label_done:
+ malloc_mutex_unlock(tsdn, &info->mtx);
+}
+
+void
+background_thread_prefork0(tsdn_t *tsdn) {
+ malloc_mutex_prefork(tsdn, &background_thread_lock);
+ background_thread_enabled_at_fork = background_thread_enabled();
+}
+
+void
+background_thread_prefork1(tsdn_t *tsdn) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
+ malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
+ }
+}
+
+void
+background_thread_postfork_parent(tsdn_t *tsdn) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
+ malloc_mutex_postfork_parent(tsdn,
+ &background_thread_info[i].mtx);
+ }
+ malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
+}
+
+void
+background_thread_postfork_child(tsdn_t *tsdn) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
+ malloc_mutex_postfork_child(tsdn,
+ &background_thread_info[i].mtx);
+ }
+ malloc_mutex_postfork_child(tsdn, &background_thread_lock);
+ if (!background_thread_enabled_at_fork) {
+ return;
+ }
+
+ /* Clear background_thread state (reset to disabled for child). */
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ n_background_threads = 0;
+ background_thread_enabled_set(tsdn, false);
+ for (unsigned i = 0; i < max_background_threads; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ malloc_mutex_lock(tsdn, &info->mtx);
+ info->state = background_thread_stopped;
+ int ret = pthread_cond_init(&info->cond, NULL);
+ assert(ret == 0);
+ background_thread_info_init(tsdn, info);
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ }
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+}
+
+bool
+background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
+ assert(config_stats);
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ if (!background_thread_enabled()) {
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ return true;
+ }
+
+ stats->num_threads = n_background_threads;
+ uint64_t num_runs = 0;
+ nstime_init(&stats->run_interval, 0);
+ for (unsigned i = 0; i < max_background_threads; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ if (malloc_mutex_trylock(tsdn, &info->mtx)) {
+ /*
+ * Each background thread run may take a long time;
+ * avoid waiting on the stats if the thread is active.
+ */
+ continue;
+ }
+ if (info->state != background_thread_stopped) {
+ num_runs += info->tot_n_runs;
+ nstime_add(&stats->run_interval, &info->tot_sleep_time);
+ }
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ }
+ stats->num_runs = num_runs;
+ if (num_runs > 0) {
+ nstime_idivide(&stats->run_interval, num_runs);
+ }
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+
+ return false;
+}
+
+#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
+#undef BILLION
+#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
+
+#ifdef JEMALLOC_HAVE_DLSYM
+#include <dlfcn.h>
+#endif
+
+static bool
+pthread_create_fptr_init(void) {
+ if (pthread_create_fptr != NULL) {
+ return false;
+ }
+ /*
+ * Try the next symbol first, because 1) when use lazy_lock we have a
+ * wrapper for pthread_create; and 2) application may define its own
+ * wrapper as well (and can call malloc within the wrapper).
+ */
+#ifdef JEMALLOC_HAVE_DLSYM
+ pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+#else
+ pthread_create_fptr = NULL;
+#endif
+ if (pthread_create_fptr == NULL) {
+ if (config_lazy_lock) {
+ malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+ "\"pthread_create\")\n");
+ abort();
+ } else {
+ /* Fall back to the default symbol. */
+ pthread_create_fptr = pthread_create;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * When lazy lock is enabled, we need to make sure setting isthreaded before
+ * taking any background_thread locks. This is called early in ctl (instead of
+ * wait for the pthread_create calls to trigger) because the mutex is required
+ * before creating background threads.
+ */
+void
+background_thread_ctl_init(tsdn_t *tsdn) {
+ malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+ pthread_create_fptr_init();
+ pthread_create_wrapper_init();
+#endif
+}
+
+#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
+
+bool
+background_thread_boot0(void) {
+ if (!have_background_thread && opt_background_thread) {
+ malloc_printf("<jemalloc>: option background_thread currently "
+ "supports pthread only\n");
+ return true;
+ }
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+ if ((config_lazy_lock || opt_background_thread) &&
+ pthread_create_fptr_init()) {
+ return true;
+ }
+#endif
+ return false;
+}
+
+bool
+background_thread_boot1(tsdn_t *tsdn) {
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ assert(have_background_thread);
+ assert(narenas_total_get() > 0);
+
+ if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
+ opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
+ }
+ max_background_threads = opt_max_background_threads;
+
+ background_thread_enabled_set(tsdn, opt_background_thread);
+ if (malloc_mutex_init(&background_thread_lock,
+ "background_thread_global",
+ WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
+ b0get(), opt_max_background_threads *
+ sizeof(background_thread_info_t), CACHELINE);
+ if (background_thread_info == NULL) {
+ return true;
+ }
+
+ for (unsigned i = 0; i < max_background_threads; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ /* Thread mutex is rank_inclusive because of thread0. */
+ if (malloc_mutex_init(&info->mtx, "background_thread",
+ WITNESS_RANK_BACKGROUND_THREAD,
+ malloc_mutex_address_ordered)) {
+ return true;
+ }
+ if (pthread_cond_init(&info->cond, NULL)) {
+ return true;
+ }
+ malloc_mutex_lock(tsdn, &info->mtx);
+ info->state = background_thread_stopped;
+ background_thread_info_init(tsdn, info);
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ }
+#endif
+
+ return false;
+}
diff --git a/contrib/libs/jemalloc/src/base.c b/contrib/libs/jemalloc/src/base.c
index f3c61661a2..3145a8c855 100644
--- a/contrib/libs/jemalloc/src/base.c
+++ b/contrib/libs/jemalloc/src/base.c
@@ -1,514 +1,514 @@
-#define JEMALLOC_BASE_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/extent_mmap.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/sz.h"
-
+#define JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/sz.h"
+
/******************************************************************************/
/* Data. */
-static base_t *b0;
-
-metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
+static base_t *b0;
-const char *metadata_thp_mode_names[] = {
- "disabled",
- "auto",
- "always"
-};
+metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
+const char *metadata_thp_mode_names[] = {
+ "disabled",
+ "auto",
+ "always"
+};
+
/******************************************************************************/
-static inline bool
-metadata_thp_madvise(void) {
- return (metadata_thp_enabled() &&
- (init_system_thp_mode == thp_mode_default));
-}
-
-static void *
-base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
- void *addr;
- bool zero = true;
- bool commit = true;
-
- /* Use huge page sizes and alignment regardless of opt_metadata_thp. */
- assert(size == HUGEPAGE_CEILING(size));
- size_t alignment = HUGEPAGE;
- if (extent_hooks == &extent_hooks_default) {
- addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
- } else {
- /* No arena context as we are creating new arenas. */
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, NULL);
- addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
- &zero, &commit, ind);
- post_reentrancy(tsd);
- }
-
- return addr;
-}
-
-static void
-base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
- size_t size) {
- /*
- * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
- * stopping at first success. This cascade is performed for consistency
- * with the cascade in extent_dalloc_wrapper() because an application's
- * custom hooks may not support e.g. dalloc. This function is only ever
- * called as a side effect of arena destruction, so although it might
- * seem pointless to do anything besides dalloc here, the application
- * may in fact want the end state of all associated virtual memory to be
- * in some consistent-but-allocated state.
- */
- if (extent_hooks == &extent_hooks_default) {
- if (!extent_dalloc_mmap(addr, size)) {
- goto label_done;
- }
- if (!pages_decommit(addr, size)) {
- goto label_done;
- }
- if (!pages_purge_forced(addr, size)) {
- goto label_done;
- }
- if (!pages_purge_lazy(addr, size)) {
- goto label_done;
- }
- /* Nothing worked. This should never happen. */
- not_reached();
- } else {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, NULL);
- if (extent_hooks->dalloc != NULL &&
- !extent_hooks->dalloc(extent_hooks, addr, size, true,
- ind)) {
- goto label_post_reentrancy;
- }
- if (extent_hooks->decommit != NULL &&
- !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
- ind)) {
- goto label_post_reentrancy;
- }
- if (extent_hooks->purge_forced != NULL &&
- !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
- size, ind)) {
- goto label_post_reentrancy;
- }
- if (extent_hooks->purge_lazy != NULL &&
- !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
- ind)) {
- goto label_post_reentrancy;
- }
- /* Nothing worked. That's the application's problem. */
- label_post_reentrancy:
- post_reentrancy(tsd);
- }
-label_done:
- if (metadata_thp_madvise()) {
- /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
- assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
- (size & HUGEPAGE_MASK) == 0);
- pages_nohuge(addr, size);
- }
-}
-
-static void
-base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
- size_t size) {
- size_t sn;
-
- sn = *extent_sn_next;
- (*extent_sn_next)++;
-
- extent_binit(extent, addr, size, sn);
-}
-
-static size_t
-base_get_num_blocks(base_t *base, bool with_new_block) {
- base_block_t *b = base->blocks;
- assert(b != NULL);
-
- size_t n_blocks = with_new_block ? 2 : 1;
- while (b->next != NULL) {
- n_blocks++;
- b = b->next;
- }
-
- return n_blocks;
-}
-
-static void
-base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
- assert(opt_metadata_thp == metadata_thp_auto);
- malloc_mutex_assert_owner(tsdn, &base->mtx);
- if (base->auto_thp_switched) {
- return;
- }
- /* Called when adding a new block. */
- bool should_switch;
- if (base_ind_get(base) != 0) {
- should_switch = (base_get_num_blocks(base, true) ==
- BASE_AUTO_THP_THRESHOLD);
- } else {
- should_switch = (base_get_num_blocks(base, true) ==
- BASE_AUTO_THP_THRESHOLD_A0);
- }
- if (!should_switch) {
- return;
- }
-
- base->auto_thp_switched = true;
- assert(!config_stats || base->n_thp == 0);
- /* Make the initial blocks THP lazily. */
- base_block_t *block = base->blocks;
- while (block != NULL) {
- assert((block->size & HUGEPAGE_MASK) == 0);
- pages_huge(block, block->size);
- if (config_stats) {
- base->n_thp += HUGEPAGE_CEILING(block->size -
- extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
- }
- block = block->next;
- assert(block == NULL || (base_ind_get(base) == 0));
- }
-}
-
-static void *
-base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
- size_t alignment) {
- void *ret;
-
- assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
- assert(size == ALIGNMENT_CEILING(size, alignment));
-
- *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
- alignment) - (uintptr_t)extent_addr_get(extent);
- ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
- assert(extent_bsize_get(extent) >= *gap_size + size);
- extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
- *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
- extent_sn_get(extent));
- return ret;
-}
-
-static void
-base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
- void *addr, size_t size) {
- if (extent_bsize_get(extent) > 0) {
- /*
- * Compute the index for the largest size class that does not
- * exceed extent's size.
- */
- szind_t index_floor =
- sz_size2index(extent_bsize_get(extent) + 1) - 1;
- extent_heap_insert(&base->avail[index_floor], extent);
- }
-
- if (config_stats) {
- base->allocated += size;
- /*
- * Add one PAGE to base_resident for every page boundary that is
- * crossed by the new allocation. Adjust n_thp similarly when
- * metadata_thp is enabled.
- */
- base->resident += PAGE_CEILING((uintptr_t)addr + size) -
- PAGE_CEILING((uintptr_t)addr - gap_size);
- assert(base->allocated <= base->resident);
- assert(base->resident <= base->mapped);
- if (metadata_thp_madvise() && (opt_metadata_thp ==
- metadata_thp_always || base->auto_thp_switched)) {
- base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
- LG_HUGEPAGE;
- assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
- }
- }
+static inline bool
+metadata_thp_madvise(void) {
+ return (metadata_thp_enabled() &&
+ (init_system_thp_mode == thp_mode_default));
+}
+
+static void *
+base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
+ void *addr;
+ bool zero = true;
+ bool commit = true;
+
+ /* Use huge page sizes and alignment regardless of opt_metadata_thp. */
+ assert(size == HUGEPAGE_CEILING(size));
+ size_t alignment = HUGEPAGE;
+ if (extent_hooks == &extent_hooks_default) {
+ addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
+ } else {
+ /* No arena context as we are creating new arenas. */
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ pre_reentrancy(tsd, NULL);
+ addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
+ &zero, &commit, ind);
+ post_reentrancy(tsd);
+ }
+
+ return addr;
+}
+
+static void
+base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+ size_t size) {
+ /*
+ * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
+ * stopping at first success. This cascade is performed for consistency
+ * with the cascade in extent_dalloc_wrapper() because an application's
+ * custom hooks may not support e.g. dalloc. This function is only ever
+ * called as a side effect of arena destruction, so although it might
+ * seem pointless to do anything besides dalloc here, the application
+ * may in fact want the end state of all associated virtual memory to be
+ * in some consistent-but-allocated state.
+ */
+ if (extent_hooks == &extent_hooks_default) {
+ if (!extent_dalloc_mmap(addr, size)) {
+ goto label_done;
+ }
+ if (!pages_decommit(addr, size)) {
+ goto label_done;
+ }
+ if (!pages_purge_forced(addr, size)) {
+ goto label_done;
+ }
+ if (!pages_purge_lazy(addr, size)) {
+ goto label_done;
+ }
+ /* Nothing worked. This should never happen. */
+ not_reached();
+ } else {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ pre_reentrancy(tsd, NULL);
+ if (extent_hooks->dalloc != NULL &&
+ !extent_hooks->dalloc(extent_hooks, addr, size, true,
+ ind)) {
+ goto label_post_reentrancy;
+ }
+ if (extent_hooks->decommit != NULL &&
+ !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
+ ind)) {
+ goto label_post_reentrancy;
+ }
+ if (extent_hooks->purge_forced != NULL &&
+ !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
+ size, ind)) {
+ goto label_post_reentrancy;
+ }
+ if (extent_hooks->purge_lazy != NULL &&
+ !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
+ ind)) {
+ goto label_post_reentrancy;
+ }
+ /* Nothing worked. That's the application's problem. */
+ label_post_reentrancy:
+ post_reentrancy(tsd);
+ }
+label_done:
+ if (metadata_thp_madvise()) {
+ /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
+ assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
+ (size & HUGEPAGE_MASK) == 0);
+ pages_nohuge(addr, size);
+ }
}
-static void *
-base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
- size_t alignment) {
+static void
+base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+ size_t size) {
+ size_t sn;
+
+ sn = *extent_sn_next;
+ (*extent_sn_next)++;
+
+ extent_binit(extent, addr, size, sn);
+}
+
+static size_t
+base_get_num_blocks(base_t *base, bool with_new_block) {
+ base_block_t *b = base->blocks;
+ assert(b != NULL);
+
+ size_t n_blocks = with_new_block ? 2 : 1;
+ while (b->next != NULL) {
+ n_blocks++;
+ b = b->next;
+ }
+
+ return n_blocks;
+}
+
+static void
+base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
+ assert(opt_metadata_thp == metadata_thp_auto);
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
+ if (base->auto_thp_switched) {
+ return;
+ }
+ /* Called when adding a new block. */
+ bool should_switch;
+ if (base_ind_get(base) != 0) {
+ should_switch = (base_get_num_blocks(base, true) ==
+ BASE_AUTO_THP_THRESHOLD);
+ } else {
+ should_switch = (base_get_num_blocks(base, true) ==
+ BASE_AUTO_THP_THRESHOLD_A0);
+ }
+ if (!should_switch) {
+ return;
+ }
+
+ base->auto_thp_switched = true;
+ assert(!config_stats || base->n_thp == 0);
+ /* Make the initial blocks THP lazily. */
+ base_block_t *block = base->blocks;
+ while (block != NULL) {
+ assert((block->size & HUGEPAGE_MASK) == 0);
+ pages_huge(block, block->size);
+ if (config_stats) {
+ base->n_thp += HUGEPAGE_CEILING(block->size -
+ extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
+ }
+ block = block->next;
+ assert(block == NULL || (base_ind_get(base) == 0));
+ }
+}
+
+static void *
+base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+ size_t alignment) {
void *ret;
- size_t gap_size;
-
- ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
- base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
- return ret;
-}
-
-/*
- * Allocate a block of virtual memory that is large enough to start with a
- * base_block_t header, followed by an object of specified size and alignment.
- * On success a pointer to the initialized base_block_t header is returned.
- */
-static base_block_t *
-base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
- unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
- size_t alignment) {
- alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
- size_t usize = ALIGNMENT_CEILING(size, alignment);
- size_t header_size = sizeof(base_block_t);
- size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
- header_size;
- /*
- * Create increasingly larger blocks in order to limit the total number
- * of disjoint virtual memory ranges. Choose the next size in the page
- * size class series (skipping size classes that are not a multiple of
- * HUGEPAGE), or a size large enough to satisfy the requested size and
- * alignment, whichever is larger.
- */
- size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
- + usize));
- pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
- *pind_last + 1 : *pind_last;
- size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
- size_t block_size = (min_block_size > next_block_size) ? min_block_size
- : next_block_size;
- base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
- block_size);
- if (block == NULL) {
- return NULL;
- }
-
- if (metadata_thp_madvise()) {
- void *addr = (void *)block;
- assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
- (block_size & HUGEPAGE_MASK) == 0);
- if (opt_metadata_thp == metadata_thp_always) {
- pages_huge(addr, block_size);
- } else if (opt_metadata_thp == metadata_thp_auto &&
- base != NULL) {
- /* base != NULL indicates this is not a new base. */
- malloc_mutex_lock(tsdn, &base->mtx);
- base_auto_thp_switch(tsdn, base);
- if (base->auto_thp_switched) {
- pages_huge(addr, block_size);
- }
- malloc_mutex_unlock(tsdn, &base->mtx);
- }
- }
-
- *pind_last = sz_psz2ind(block_size);
- block->size = block_size;
- block->next = NULL;
- assert(block_size >= header_size);
- base_extent_init(extent_sn_next, &block->extent,
- (void *)((uintptr_t)block + header_size), block_size - header_size);
- return block;
-}
-/*
- * Allocate an extent that is at least as large as specified size, with
- * specified alignment.
- */
-static extent_t *
-base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
- malloc_mutex_assert_owner(tsdn, &base->mtx);
-
- extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
- /*
- * Drop mutex during base_block_alloc(), because an extent hook will be
- * called.
- */
- malloc_mutex_unlock(tsdn, &base->mtx);
- base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
- base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
- alignment);
- malloc_mutex_lock(tsdn, &base->mtx);
- if (block == NULL) {
- return NULL;
- }
- block->next = base->blocks;
- base->blocks = block;
- if (config_stats) {
- base->allocated += sizeof(base_block_t);
- base->resident += PAGE_CEILING(sizeof(base_block_t));
- base->mapped += block->size;
- if (metadata_thp_madvise() &&
- !(opt_metadata_thp == metadata_thp_auto
- && !base->auto_thp_switched)) {
- assert(base->n_thp > 0);
- base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
- LG_HUGEPAGE;
+ assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
+ assert(size == ALIGNMENT_CEILING(size, alignment));
+
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
+ alignment) - (uintptr_t)extent_addr_get(extent);
+ ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
+ assert(extent_bsize_get(extent) >= *gap_size + size);
+ extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
+ *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
+ extent_sn_get(extent));
+ return ret;
+}
+
+static void
+base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
+ void *addr, size_t size) {
+ if (extent_bsize_get(extent) > 0) {
+ /*
+ * Compute the index for the largest size class that does not
+ * exceed extent's size.
+ */
+ szind_t index_floor =
+ sz_size2index(extent_bsize_get(extent) + 1) - 1;
+ extent_heap_insert(&base->avail[index_floor], extent);
+ }
+
+ if (config_stats) {
+ base->allocated += size;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation. Adjust n_thp similarly when
+ * metadata_thp is enabled.
+ */
+ base->resident += PAGE_CEILING((uintptr_t)addr + size) -
+ PAGE_CEILING((uintptr_t)addr - gap_size);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ if (metadata_thp_madvise() && (opt_metadata_thp ==
+ metadata_thp_always || base->auto_thp_switched)) {
+ base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
+ - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
+ LG_HUGEPAGE;
+ assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
}
- assert(base->allocated <= base->resident);
- assert(base->resident <= base->mapped);
- assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- return &block->extent;
+}
+
+static void *
+base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
+ size_t alignment) {
+ void *ret;
+ size_t gap_size;
+
+ ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
+ return ret;
}
-base_t *
-b0get(void) {
- return b0;
+/*
+ * Allocate a block of virtual memory that is large enough to start with a
+ * base_block_t header, followed by an object of specified size and alignment.
+ * On success a pointer to the initialized base_block_t header is returned.
+ */
+static base_block_t *
+base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
+ unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
+ size_t alignment) {
+ alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t header_size = sizeof(base_block_t);
+ size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
+ header_size;
+ /*
+ * Create increasingly larger blocks in order to limit the total number
+ * of disjoint virtual memory ranges. Choose the next size in the page
+ * size class series (skipping size classes that are not a multiple of
+ * HUGEPAGE), or a size large enough to satisfy the requested size and
+ * alignment, whichever is larger.
+ */
+ size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ + usize));
+ pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
+ *pind_last + 1 : *pind_last;
+ size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
+ size_t block_size = (min_block_size > next_block_size) ? min_block_size
+ : next_block_size;
+ base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
+ block_size);
+ if (block == NULL) {
+ return NULL;
+ }
+
+ if (metadata_thp_madvise()) {
+ void *addr = (void *)block;
+ assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
+ (block_size & HUGEPAGE_MASK) == 0);
+ if (opt_metadata_thp == metadata_thp_always) {
+ pages_huge(addr, block_size);
+ } else if (opt_metadata_thp == metadata_thp_auto &&
+ base != NULL) {
+ /* base != NULL indicates this is not a new base. */
+ malloc_mutex_lock(tsdn, &base->mtx);
+ base_auto_thp_switch(tsdn, base);
+ if (base->auto_thp_switched) {
+ pages_huge(addr, block_size);
+ }
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ }
+ }
+
+ *pind_last = sz_psz2ind(block_size);
+ block->size = block_size;
+ block->next = NULL;
+ assert(block_size >= header_size);
+ base_extent_init(extent_sn_next, &block->extent,
+ (void *)((uintptr_t)block + header_size), block_size - header_size);
+ return block;
}
-base_t *
-base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
- pszind_t pind_last = 0;
- size_t extent_sn_next = 0;
- base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
- &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
- if (block == NULL) {
- return NULL;
+/*
+ * Allocate an extent that is at least as large as specified size, with
+ * specified alignment.
+ */
+static extent_t *
+base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
+
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ /*
+ * Drop mutex during base_block_alloc(), because an extent hook will be
+ * called.
+ */
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
+ base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
+ alignment);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ if (block == NULL) {
+ return NULL;
}
-
- size_t gap_size;
- size_t base_alignment = CACHELINE;
- size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
- base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
- &gap_size, base_size, base_alignment);
- base->ind = ind;
- atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
- if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
- malloc_mutex_rank_exclusive)) {
- base_unmap(tsdn, extent_hooks, ind, block, block->size);
- return NULL;
- }
- base->pind_last = pind_last;
- base->extent_sn_next = extent_sn_next;
- base->blocks = block;
- base->auto_thp_switched = false;
- for (szind_t i = 0; i < SC_NSIZES; i++) {
- extent_heap_new(&base->avail[i]);
- }
- if (config_stats) {
- base->allocated = sizeof(base_block_t);
- base->resident = PAGE_CEILING(sizeof(base_block_t));
- base->mapped = block->size;
- base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
- metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
- >> LG_HUGEPAGE : 0;
- assert(base->allocated <= base->resident);
- assert(base->resident <= base->mapped);
- assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
- }
- base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
- base_size);
-
- return base;
+ block->next = base->blocks;
+ base->blocks = block;
+ if (config_stats) {
+ base->allocated += sizeof(base_block_t);
+ base->resident += PAGE_CEILING(sizeof(base_block_t));
+ base->mapped += block->size;
+ if (metadata_thp_madvise() &&
+ !(opt_metadata_thp == metadata_thp_auto
+ && !base->auto_thp_switched)) {
+ assert(base->n_thp > 0);
+ base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
+ LG_HUGEPAGE;
+ }
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
+ }
+ return &block->extent;
+}
+
+base_t *
+b0get(void) {
+ return b0;
}
+base_t *
+base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ pszind_t pind_last = 0;
+ size_t extent_sn_next = 0;
+ base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
+ &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
+ if (block == NULL) {
+ return NULL;
+ }
+
+ size_t gap_size;
+ size_t base_alignment = CACHELINE;
+ size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
+ base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ &gap_size, base_size, base_alignment);
+ base->ind = ind;
+ atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
+ if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
+ malloc_mutex_rank_exclusive)) {
+ base_unmap(tsdn, extent_hooks, ind, block, block->size);
+ return NULL;
+ }
+ base->pind_last = pind_last;
+ base->extent_sn_next = extent_sn_next;
+ base->blocks = block;
+ base->auto_thp_switched = false;
+ for (szind_t i = 0; i < SC_NSIZES; i++) {
+ extent_heap_new(&base->avail[i]);
+ }
+ if (config_stats) {
+ base->allocated = sizeof(base_block_t);
+ base->resident = PAGE_CEILING(sizeof(base_block_t));
+ base->mapped = block->size;
+ base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
+ metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
+ >> LG_HUGEPAGE : 0;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
+ }
+ base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
+ base_size);
+
+ return base;
+}
+
void
-base_delete(tsdn_t *tsdn, base_t *base) {
- extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
- base_block_t *next = base->blocks;
- do {
- base_block_t *block = next;
- next = block->next;
- base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
- block->size);
- } while (next != NULL);
+base_delete(tsdn_t *tsdn, base_t *base) {
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *next = base->blocks;
+ do {
+ base_block_t *block = next;
+ next = block->next;
+ base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
+ block->size);
+ } while (next != NULL);
+}
+
+extent_hooks_t *
+base_extent_hooks_get(base_t *base) {
+ return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
+ ATOMIC_ACQUIRE);
}
-extent_hooks_t *
-base_extent_hooks_get(base_t *base) {
- return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
- ATOMIC_ACQUIRE);
-}
-
-extent_hooks_t *
-base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
- extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
- atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
- return old_extent_hooks;
-}
-
-static void *
-base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
- size_t *esn) {
- alignment = QUANTUM_CEILING(alignment);
- size_t usize = ALIGNMENT_CEILING(size, alignment);
- size_t asize = usize + alignment - QUANTUM;
-
- extent_t *extent = NULL;
- malloc_mutex_lock(tsdn, &base->mtx);
- for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
- extent = extent_heap_remove_first(&base->avail[i]);
- if (extent != NULL) {
- /* Use existing space. */
- break;
- }
- }
- if (extent == NULL) {
- /* Try to allocate more space. */
- extent = base_extent_alloc(tsdn, base, usize, alignment);
- }
- void *ret;
- if (extent == NULL) {
- ret = NULL;
- goto label_return;
- }
-
- ret = base_extent_bump_alloc(base, extent, usize, alignment);
- if (esn != NULL) {
- *esn = extent_sn_get(extent);
- }
-label_return:
- malloc_mutex_unlock(tsdn, &base->mtx);
- return ret;
-}
-
-/*
- * base_alloc() returns zeroed memory, which is always demand-zeroed for the
- * auto arenas, in order to make multi-page sparse data structures such as radix
- * tree nodes efficient with respect to physical memory usage. Upon success a
- * pointer to at least size bytes with specified alignment is returned. Note
- * that size is rounded up to the nearest multiple of alignment to avoid false
- * sharing.
- */
-void *
-base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
- return base_alloc_impl(tsdn, base, size, alignment, NULL);
-}
-
-extent_t *
-base_alloc_extent(tsdn_t *tsdn, base_t *base) {
- size_t esn;
- extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
- CACHELINE, &esn);
- if (extent == NULL) {
- return NULL;
- }
- extent_esn_set(extent, esn);
- return extent;
+extent_hooks_t *
+base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
+ extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
+ atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
+ return old_extent_hooks;
+}
+
+static void *
+base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
+ size_t *esn) {
+ alignment = QUANTUM_CEILING(alignment);
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t asize = usize + alignment - QUANTUM;
+
+ extent_t *extent = NULL;
+ malloc_mutex_lock(tsdn, &base->mtx);
+ for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
+ extent = extent_heap_remove_first(&base->avail[i]);
+ if (extent != NULL) {
+ /* Use existing space. */
+ break;
+ }
+ }
+ if (extent == NULL) {
+ /* Try to allocate more space. */
+ extent = base_extent_alloc(tsdn, base, usize, alignment);
+ }
+ void *ret;
+ if (extent == NULL) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = base_extent_bump_alloc(base, extent, usize, alignment);
+ if (esn != NULL) {
+ *esn = extent_sn_get(extent);
+ }
+label_return:
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ return ret;
}
+/*
+ * base_alloc() returns zeroed memory, which is always demand-zeroed for the
+ * auto arenas, in order to make multi-page sparse data structures such as radix
+ * tree nodes efficient with respect to physical memory usage. Upon success a
+ * pointer to at least size bytes with specified alignment is returned. Note
+ * that size is rounded up to the nearest multiple of alignment to avoid false
+ * sharing.
+ */
+void *
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ return base_alloc_impl(tsdn, base, size, alignment, NULL);
+}
+
+extent_t *
+base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+ size_t esn;
+ extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
+ CACHELINE, &esn);
+ if (extent == NULL) {
+ return NULL;
+ }
+ extent_esn_set(extent, esn);
+ return extent;
+}
+
void
-base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
- size_t *mapped, size_t *n_thp) {
- cassert(config_stats);
-
- malloc_mutex_lock(tsdn, &base->mtx);
- assert(base->allocated <= base->resident);
- assert(base->resident <= base->mapped);
- *allocated = base->allocated;
- *resident = base->resident;
- *mapped = base->mapped;
- *n_thp = base->n_thp;
- malloc_mutex_unlock(tsdn, &base->mtx);
+base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
+ size_t *mapped, size_t *n_thp) {
+ cassert(config_stats);
+
+ malloc_mutex_lock(tsdn, &base->mtx);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ *allocated = base->allocated;
+ *resident = base->resident;
+ *mapped = base->mapped;
+ *n_thp = base->n_thp;
+ malloc_mutex_unlock(tsdn, &base->mtx);
}
void
-base_prefork(tsdn_t *tsdn, base_t *base) {
- malloc_mutex_prefork(tsdn, &base->mtx);
-}
+base_prefork(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_prefork(tsdn, &base->mtx);
+}
-void
-base_postfork_parent(tsdn_t *tsdn, base_t *base) {
- malloc_mutex_postfork_parent(tsdn, &base->mtx);
+void
+base_postfork_parent(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
-base_postfork_child(tsdn_t *tsdn, base_t *base) {
- malloc_mutex_postfork_child(tsdn, &base->mtx);
-}
-
-bool
-base_boot(tsdn_t *tsdn) {
- b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
- return (b0 == NULL);
+base_postfork_child(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_postfork_child(tsdn, &base->mtx);
+}
+
+bool
+base_boot(tsdn_t *tsdn) {
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ return (b0 == NULL);
}
diff --git a/contrib/libs/jemalloc/src/bin.c b/contrib/libs/jemalloc/src/bin.c
index bca6b12c35..26b1cb6120 100644
--- a/contrib/libs/jemalloc/src/bin.c
+++ b/contrib/libs/jemalloc/src/bin.c
@@ -1,95 +1,95 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/witness.h"
-
-bin_info_t bin_infos[SC_NBINS];
-
-static void
-bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
- bin_info_t bin_infos[SC_NBINS]) {
- for (unsigned i = 0; i < SC_NBINS; i++) {
- bin_info_t *bin_info = &bin_infos[i];
- sc_t *sc = &sc_data->sc[i];
- bin_info->reg_size = ((size_t)1U << sc->lg_base)
- + ((size_t)sc->ndelta << sc->lg_delta);
- bin_info->slab_size = (sc->pgs << LG_PAGE);
- bin_info->nregs =
- (uint32_t)(bin_info->slab_size / bin_info->reg_size);
- bin_info->n_shards = bin_shard_sizes[i];
- bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
- bin_info->nregs);
- bin_info->bitmap_info = bitmap_info;
- }
-}
-
-bool
-bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
- size_t end_size, size_t nshards) {
- if (nshards > BIN_SHARDS_MAX || nshards == 0) {
- return true;
- }
-
- if (start_size > SC_SMALL_MAXCLASS) {
- return false;
- }
- if (end_size > SC_SMALL_MAXCLASS) {
- end_size = SC_SMALL_MAXCLASS;
- }
-
- /* Compute the index since this may happen before sz init. */
- szind_t ind1 = sz_size2index_compute(start_size);
- szind_t ind2 = sz_size2index_compute(end_size);
- for (unsigned i = ind1; i <= ind2; i++) {
- bin_shard_sizes[i] = (unsigned)nshards;
- }
-
- return false;
-}
-
-void
-bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
- /* Load the default number of shards. */
- for (unsigned i = 0; i < SC_NBINS; i++) {
- bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
- }
-}
-
-void
-bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
- assert(sc_data->initialized);
- bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
-}
-
-bool
-bin_init(bin_t *bin) {
- if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- bin->slabcur = NULL;
- extent_heap_new(&bin->slabs_nonfull);
- extent_list_init(&bin->slabs_full);
- if (config_stats) {
- memset(&bin->stats, 0, sizeof(bin_stats_t));
- }
- return false;
-}
-
-void
-bin_prefork(tsdn_t *tsdn, bin_t *bin) {
- malloc_mutex_prefork(tsdn, &bin->lock);
-}
-
-void
-bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
- malloc_mutex_postfork_parent(tsdn, &bin->lock);
-}
-
-void
-bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
- malloc_mutex_postfork_child(tsdn, &bin->lock);
-}
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/bin.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/witness.h"
+
+bin_info_t bin_infos[SC_NBINS];
+
+static void
+bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
+ bin_info_t bin_infos[SC_NBINS]) {
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ bin_info_t *bin_info = &bin_infos[i];
+ sc_t *sc = &sc_data->sc[i];
+ bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ + ((size_t)sc->ndelta << sc->lg_delta);
+ bin_info->slab_size = (sc->pgs << LG_PAGE);
+ bin_info->nregs =
+ (uint32_t)(bin_info->slab_size / bin_info->reg_size);
+ bin_info->n_shards = bin_shard_sizes[i];
+ bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
+ bin_info->nregs);
+ bin_info->bitmap_info = bitmap_info;
+ }
+}
+
+bool
+bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
+ size_t end_size, size_t nshards) {
+ if (nshards > BIN_SHARDS_MAX || nshards == 0) {
+ return true;
+ }
+
+ if (start_size > SC_SMALL_MAXCLASS) {
+ return false;
+ }
+ if (end_size > SC_SMALL_MAXCLASS) {
+ end_size = SC_SMALL_MAXCLASS;
+ }
+
+ /* Compute the index since this may happen before sz init. */
+ szind_t ind1 = sz_size2index_compute(start_size);
+ szind_t ind2 = sz_size2index_compute(end_size);
+ for (unsigned i = ind1; i <= ind2; i++) {
+ bin_shard_sizes[i] = (unsigned)nshards;
+ }
+
+ return false;
+}
+
+void
+bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
+ /* Load the default number of shards. */
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
+ }
+}
+
+void
+bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
+ assert(sc_data->initialized);
+ bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
+}
+
+bool
+bin_init(bin_t *bin) {
+ if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ bin->slabcur = NULL;
+ extent_heap_new(&bin->slabs_nonfull);
+ extent_list_init(&bin->slabs_full);
+ if (config_stats) {
+ memset(&bin->stats, 0, sizeof(bin_stats_t));
+ }
+ return false;
+}
+
+void
+bin_prefork(tsdn_t *tsdn, bin_t *bin) {
+ malloc_mutex_prefork(tsdn, &bin->lock);
+}
+
+void
+bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
+ malloc_mutex_postfork_parent(tsdn, &bin->lock);
+}
+
+void
+bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
+ malloc_mutex_postfork_child(tsdn, &bin->lock);
+}
diff --git a/contrib/libs/jemalloc/src/bitmap.c b/contrib/libs/jemalloc/src/bitmap.c
index 468b3178eb..8d9beff032 100644
--- a/contrib/libs/jemalloc/src/bitmap.c
+++ b/contrib/libs/jemalloc/src/bitmap.c
@@ -1,15 +1,15 @@
-#define JEMALLOC_BITMAP_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/assert.h"
/******************************************************************************/
-#ifdef BITMAP_USE_TREE
+#ifdef BITMAP_USE_TREE
void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
@@ -22,100 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
- group_count = BITMAP_BITS2GROUPS(nbits);
+ group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
- group_count = BITMAP_BITS2GROUPS(group_count);
+ group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
- assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
+ assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
binfo->nbits = nbits;
}
-static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo) {
- return binfo->levels[binfo->nlevels].group_offset;
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
+ return binfo->levels[binfo->nlevels].group_offset;
}
void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
- * interface.
+ * interface.
*/
-
- if (fill) {
- /* The "filled" bitmap starts out with all 0 bits. */
- memset(bitmap, 0, bitmap_size(binfo));
- return;
- }
-
- /*
- * The "empty" bitmap starts out with all 1 bits, except for trailing
- * unused bits (if any). Note that each group uses bit 0 to correspond
- * to the first logical bit in the group, so extra bits are the most
- * significant bits of the last group.
- */
- memset(bitmap, 0xffU, bitmap_size(binfo));
+
+ if (fill) {
+ /* The "filled" bitmap starts out with all 0 bits. */
+ memset(bitmap, 0, bitmap_size(binfo));
+ return;
+ }
+
+ /*
+ * The "empty" bitmap starts out with all 1 bits, except for trailing
+ * unused bits (if any). Note that each group uses bit 0 to correspond
+ * to the first logical bit in the group, so extra bits are the most
+ * significant bits of the last group.
+ */
+ memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
- if (extra != 0) {
+ if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
- }
+ }
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0) {
+ if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
- }
+ }
}
}
-
-#else /* BITMAP_USE_TREE */
-
-void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
- assert(nbits > 0);
- assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
-
- binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
- binfo->nbits = nbits;
-}
-
-static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo) {
- return binfo->ngroups;
-}
-
-void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
- size_t extra;
-
- if (fill) {
- memset(bitmap, 0, bitmap_size(binfo));
- return;
- }
-
- memset(bitmap, 0xffU, bitmap_size(binfo));
- extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
- & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0) {
- bitmap[binfo->ngroups - 1] >>= extra;
- }
-}
-
-#endif /* BITMAP_USE_TREE */
-
-size_t
-bitmap_size(const bitmap_info_t *binfo) {
- return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
-}
+
+#else /* BITMAP_USE_TREE */
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
+ binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
+ return binfo->ngroups;
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
+ size_t extra;
+
+ if (fill) {
+ memset(bitmap, 0, bitmap_size(binfo));
+ return;
+ }
+
+ memset(bitmap, 0xffU, bitmap_size(binfo));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0) {
+ bitmap[binfo->ngroups - 1] >>= extra;
+ }
+}
+
+#endif /* BITMAP_USE_TREE */
+
+size_t
+bitmap_size(const bitmap_info_t *binfo) {
+ return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
+}
diff --git a/contrib/libs/jemalloc/src/ckh.c b/contrib/libs/jemalloc/src/ckh.c
index 1bf6df5a11..3c48134b75 100644
--- a/contrib/libs/jemalloc/src/ckh.c
+++ b/contrib/libs/jemalloc/src/ckh.c
@@ -34,24 +34,24 @@
* respectively.
*
******************************************************************************/
-#define JEMALLOC_CKH_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-
-#include "jemalloc/internal/ckh.h"
-
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/util.h"
-
+#define JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/ckh.h"
+
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/util.h"
+
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
-static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
+static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
+static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
@@ -59,26 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
-static size_t
-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
+static size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
- if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
- return (bucket << LG_CKH_BUCKET_CELLS) + i;
- }
+ if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
+ return (bucket << LG_CKH_BUCKET_CELLS) + i;
+ }
}
- return SIZE_T_MAX;
+ return SIZE_T_MAX;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
-static size_t
-ckh_isearch(ckh_t *ckh, const void *key) {
+static size_t
+ckh_isearch(ckh_t *ckh, const void *key) {
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
@@ -88,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key) {
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- if (cell != SIZE_T_MAX) {
- return cell;
- }
+ if (cell != SIZE_T_MAX) {
+ return cell;
+ }
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- return cell;
+ return cell;
}
-static bool
+static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
- const void *data) {
+ const void *data) {
ckhc_t *cell;
unsigned offset, i;
@@ -108,8 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
- offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
- LG_CKH_BUCKET_CELLS);
+ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -117,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell->key = key;
cell->data = data;
ckh->count++;
- return false;
+ return false;
}
}
- return true;
+ return true;
}
/*
@@ -130,9 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
-static bool
+static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
- void const **argdata) {
+ void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
@@ -150,8 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
- i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
- LG_CKH_BUCKET_CELLS);
+ i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@@ -191,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
- return true;
+ return true;
}
bucket = tbucket;
- if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
- return false;
- }
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
}
}
-static bool
-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
+static bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
@@ -211,28 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
- return false;
- }
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
- return false;
- }
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
- return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
+ return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
-static bool
-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
+static bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
const void *key, *data;
@@ -244,20 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
- return true;
+ return true;
}
nins++;
}
}
- return false;
+ return false;
}
static bool
-ckh_grow(tsd_t *tsd, ckh_t *ckh) {
+ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
- unsigned lg_prevbuckets, lg_curcells;
+ unsigned lg_prevbuckets, lg_curcells;
#ifdef CKH_COUNT
ckh->ngrows++;
@@ -274,14 +274,14 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
size_t usize;
lg_curcells++;
- usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0
+ || usize > SC_LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
- true, NULL, true, arena_ichoose(tsd, NULL));
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
+ true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@@ -292,27 +292,27 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
- return ret;
+ return ret;
}
static void
-ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
+ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
- size_t usize;
- unsigned lg_prevbuckets, lg_curcells;
+ size_t usize;
+ unsigned lg_prevbuckets, lg_curcells;
/*
* It is possible (though unlikely, given well behaved hashes) that the
@@ -320,12 +320,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
- usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
return;
- }
- tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
- true, arena_ichoose(tsd, NULL));
+ }
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
+ true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -339,8 +339,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -348,7 +348,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -357,8 +357,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
}
bool
-ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
- ckh_keycomp_t *keycomp) {
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
@@ -378,31 +378,31 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->count = 0;
/*
- * Find the minimum power of 2 that is large enough to fit minitems
+ * Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
- * factor that will typically allow mincells items to fit without ever
+ * factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
- lg_mincells++) {
- /* Do nothing. */
- }
+ lg_mincells++) {
+ /* Do nothing. */
+ }
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
- usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
- if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
- NULL, true, arena_ichoose(tsd, NULL));
+ ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
+ NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -410,18 +410,18 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = false;
label_return:
- return ret;
+ return ret;
}
void
-ckh_delete(tsd_t *tsd, ckh_t *ckh) {
+ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
- "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
- " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
- " nrelocs: %"FMTu64"\n", __func__, ckh,
+ "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+ " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+ " nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
@@ -429,42 +429,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) {
(unsigned long long)ckh->nrelocs);
#endif
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
- if (config_debug) {
- memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
- }
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
+ if (config_debug) {
+ memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+ }
}
size_t
-ckh_count(ckh_t *ckh) {
+ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
- return ckh->count;
+ return ckh->count;
}
bool
-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
- if (key != NULL) {
+ if (key != NULL) {
*key = (void *)ckh->tab[i].key;
- }
- if (data != NULL) {
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[i].data;
- }
+ }
*tabind = i + 1;
- return false;
+ return false;
}
}
- return true;
+ return true;
}
bool
-ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
bool ret;
assert(ckh != NULL);
@@ -475,7 +475,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
#endif
while (ckh_try_insert(ckh, &key, &data)) {
- if (ckh_grow(tsd, ckh)) {
+ if (ckh_grow(tsd, ckh)) {
ret = true;
goto label_return;
}
@@ -483,24 +483,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
ret = false;
label_return:
- return ret;
+ return ret;
}
bool
-ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
- void **data) {
+ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL) {
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- }
- if (data != NULL) {
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
- }
+ }
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
@@ -510,50 +510,50 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
- ckh_shrink(tsd, ckh);
+ ckh_shrink(tsd, ckh);
}
- return false;
+ return false;
}
- return true;
+ return true;
}
bool
-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL) {
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- }
- if (data != NULL) {
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
- }
- return false;
+ }
+ return false;
}
- return true;
+ return true;
}
void
-ckh_string_hash(const void *key, size_t r_hash[2]) {
+ckh_string_hash(const void *key, size_t r_hash[2]) {
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
-ckh_string_keycomp(const void *k1, const void *k2) {
- assert(k1 != NULL);
- assert(k2 != NULL);
+ckh_string_keycomp(const void *k1, const void *k2) {
+ assert(k1 != NULL);
+ assert(k2 != NULL);
- return !strcmp((char *)k1, (char *)k2);
+ return !strcmp((char *)k1, (char *)k2);
}
void
-ckh_pointer_hash(const void *key, size_t r_hash[2]) {
+ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
@@ -565,6 +565,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) {
}
bool
-ckh_pointer_keycomp(const void *k1, const void *k2) {
- return (k1 == k2);
+ckh_pointer_keycomp(const void *k1, const void *k2) {
+ return (k1 == k2);
}
diff --git a/contrib/libs/jemalloc/src/ctl.c b/contrib/libs/jemalloc/src/ctl.c
index 48afaa61f4..b53d15fd0d 100644
--- a/contrib/libs/jemalloc/src/ctl.c
+++ b/contrib/libs/jemalloc/src/ctl.c
@@ -1,78 +1,78 @@
-#define JEMALLOC_CTL_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/extent_mmap.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/util.h"
-
+#define JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/util.h"
+
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
- * - ctl_stats->*
+ * - ctl_stats->*
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
-static ctl_stats_t *ctl_stats;
-static ctl_arenas_t *ctl_arenas;
+static ctl_stats_t *ctl_stats;
+static ctl_arenas_t *ctl_arenas;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
-static const ctl_named_node_t *
-ctl_named_node(const ctl_node_t *node) {
+static const ctl_named_node_t *
+ctl_named_node(const ctl_node_t *node) {
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
-static const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, size_t index) {
+static const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, size_t index) {
const ctl_named_node_t *children = ctl_named_node(node->children);
return (children ? &children[index] : NULL);
}
-static const ctl_indexed_node_t *
-ctl_indexed_node(const ctl_node_t *node) {
- return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
+static const ctl_indexed_node_t *
+ctl_indexed_node(const ctl_node_t *node) {
+ return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
}
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-#define CTL_PROTO(n) \
-static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+#define CTL_PROTO(n) \
+static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-#define INDEX_PROTO(n) \
-static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
- const size_t *mib, size_t miblen, size_t i);
+#define INDEX_PROTO(n) \
+static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
+ const size_t *mib, size_t miblen, size_t i);
CTL_PROTO(version)
CTL_PROTO(epoch)
-CTL_PROTO(background_thread)
-CTL_PROTO(max_background_threads)
+CTL_PROTO(background_thread)
+CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
-CTL_PROTO(thread_prof_name)
-CTL_PROTO(thread_prof_active)
+CTL_PROTO(thread_prof_name)
+CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
-CTL_PROTO(config_cache_oblivious)
+CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_malloc_conf)
-CTL_PROTO(config_opt_safety_checks)
+CTL_PROTO(config_malloc_conf)
+CTL_PROTO(config_opt_safety_checks)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
@@ -80,287 +80,287 @@ CTL_PROTO(config_stats)
CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
-CTL_PROTO(opt_abort_conf)
-CTL_PROTO(opt_confirm_conf)
-CTL_PROTO(opt_metadata_thp)
-CTL_PROTO(opt_retain)
+CTL_PROTO(opt_abort_conf)
+CTL_PROTO(opt_confirm_conf)
+CTL_PROTO(opt_metadata_thp)
+CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_percpu_arena)
-CTL_PROTO(opt_oversize_threshold)
-CTL_PROTO(opt_background_thread)
-CTL_PROTO(opt_max_background_threads)
-CTL_PROTO(opt_dirty_decay_ms)
-CTL_PROTO(opt_muzzy_decay_ms)
+CTL_PROTO(opt_percpu_arena)
+CTL_PROTO(opt_oversize_threshold)
+CTL_PROTO(opt_background_thread)
+CTL_PROTO(opt_max_background_threads)
+CTL_PROTO(opt_dirty_decay_ms)
+CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
-CTL_PROTO(opt_stats_print_opts)
+CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
-CTL_PROTO(opt_thp)
-CTL_PROTO(opt_lg_extent_max_active_fit)
+CTL_PROTO(opt_thp)
+CTL_PROTO(opt_lg_extent_max_active_fit)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
-CTL_PROTO(opt_prof_thread_active_init)
+CTL_PROTO(opt_prof_thread_active_init)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
-CTL_PROTO(tcache_create)
-CTL_PROTO(tcache_flush)
-CTL_PROTO(tcache_destroy)
-CTL_PROTO(arena_i_initialized)
-CTL_PROTO(arena_i_decay)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
+CTL_PROTO(arena_i_initialized)
+CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_purge)
-CTL_PROTO(arena_i_reset)
-CTL_PROTO(arena_i_destroy)
+CTL_PROTO(arena_i_reset)
+CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
-CTL_PROTO(arena_i_dirty_decay_ms)
-CTL_PROTO(arena_i_muzzy_decay_ms)
-CTL_PROTO(arena_i_extent_hooks)
-CTL_PROTO(arena_i_retain_grow_limit)
+CTL_PROTO(arena_i_dirty_decay_ms)
+CTL_PROTO(arena_i_muzzy_decay_ms)
+CTL_PROTO(arena_i_extent_hooks)
+CTL_PROTO(arena_i_retain_grow_limit)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
-CTL_PROTO(arenas_bin_i_slab_size)
-CTL_PROTO(arenas_bin_i_nshards)
+CTL_PROTO(arenas_bin_i_slab_size)
+CTL_PROTO(arenas_bin_i_nshards)
INDEX_PROTO(arenas_bin_i)
-CTL_PROTO(arenas_lextent_i_size)
-INDEX_PROTO(arenas_lextent_i)
+CTL_PROTO(arenas_lextent_i_size)
+INDEX_PROTO(arenas_lextent_i)
CTL_PROTO(arenas_narenas)
-CTL_PROTO(arenas_dirty_decay_ms)
-CTL_PROTO(arenas_muzzy_decay_ms)
+CTL_PROTO(arenas_dirty_decay_ms)
+CTL_PROTO(arenas_muzzy_decay_ms)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
-CTL_PROTO(arenas_nlextents)
-CTL_PROTO(arenas_create)
-CTL_PROTO(arenas_lookup)
-CTL_PROTO(prof_thread_active_init)
+CTL_PROTO(arenas_nlextents)
+CTL_PROTO(arenas_create)
+CTL_PROTO(arenas_lookup)
+CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
-CTL_PROTO(prof_gdump)
-CTL_PROTO(prof_reset)
+CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
-CTL_PROTO(lg_prof_sample)
-CTL_PROTO(prof_log_start)
-CTL_PROTO(prof_log_stop)
+CTL_PROTO(lg_prof_sample)
+CTL_PROTO(prof_log_start)
+CTL_PROTO(prof_log_stop)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
CTL_PROTO(stats_arenas_i_small_nrequests)
-CTL_PROTO(stats_arenas_i_small_nfills)
-CTL_PROTO(stats_arenas_i_small_nflushes)
+CTL_PROTO(stats_arenas_i_small_nfills)
+CTL_PROTO(stats_arenas_i_small_nflushes)
CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_large_nfills)
-CTL_PROTO(stats_arenas_i_large_nflushes)
+CTL_PROTO(stats_arenas_i_large_nfills)
+CTL_PROTO(stats_arenas_i_large_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_curregs)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-CTL_PROTO(stats_arenas_i_bins_j_nslabs)
-CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
-CTL_PROTO(stats_arenas_i_bins_j_curslabs)
-CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
+CTL_PROTO(stats_arenas_i_bins_j_nslabs)
+CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
+CTL_PROTO(stats_arenas_i_bins_j_curslabs)
+CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
INDEX_PROTO(stats_arenas_i_bins_j)
-CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
-CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
-CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
-CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
-INDEX_PROTO(stats_arenas_i_lextents_j)
-CTL_PROTO(stats_arenas_i_extents_j_ndirty)
-CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
-CTL_PROTO(stats_arenas_i_extents_j_nretained)
-CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
-CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
-CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
-INDEX_PROTO(stats_arenas_i_extents_j)
+CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
+CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
+INDEX_PROTO(stats_arenas_i_lextents_j)
+CTL_PROTO(stats_arenas_i_extents_j_ndirty)
+CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
+CTL_PROTO(stats_arenas_i_extents_j_nretained)
+CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
+CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
+CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
+INDEX_PROTO(stats_arenas_i_extents_j)
CTL_PROTO(stats_arenas_i_nthreads)
-CTL_PROTO(stats_arenas_i_uptime)
+CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
-CTL_PROTO(stats_arenas_i_dirty_decay_ms)
-CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
+CTL_PROTO(stats_arenas_i_dirty_decay_ms)
+CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
-CTL_PROTO(stats_arenas_i_pmuzzy)
+CTL_PROTO(stats_arenas_i_pmuzzy)
CTL_PROTO(stats_arenas_i_mapped)
-CTL_PROTO(stats_arenas_i_retained)
-CTL_PROTO(stats_arenas_i_extent_avail)
-CTL_PROTO(stats_arenas_i_dirty_npurge)
-CTL_PROTO(stats_arenas_i_dirty_nmadvise)
-CTL_PROTO(stats_arenas_i_dirty_purged)
-CTL_PROTO(stats_arenas_i_muzzy_npurge)
-CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
-CTL_PROTO(stats_arenas_i_muzzy_purged)
-CTL_PROTO(stats_arenas_i_base)
-CTL_PROTO(stats_arenas_i_internal)
-CTL_PROTO(stats_arenas_i_metadata_thp)
-CTL_PROTO(stats_arenas_i_tcache_bytes)
-CTL_PROTO(stats_arenas_i_resident)
-CTL_PROTO(stats_arenas_i_abandoned_vm)
+CTL_PROTO(stats_arenas_i_retained)
+CTL_PROTO(stats_arenas_i_extent_avail)
+CTL_PROTO(stats_arenas_i_dirty_npurge)
+CTL_PROTO(stats_arenas_i_dirty_nmadvise)
+CTL_PROTO(stats_arenas_i_dirty_purged)
+CTL_PROTO(stats_arenas_i_muzzy_npurge)
+CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
+CTL_PROTO(stats_arenas_i_muzzy_purged)
+CTL_PROTO(stats_arenas_i_base)
+CTL_PROTO(stats_arenas_i_internal)
+CTL_PROTO(stats_arenas_i_metadata_thp)
+CTL_PROTO(stats_arenas_i_tcache_bytes)
+CTL_PROTO(stats_arenas_i_resident)
+CTL_PROTO(stats_arenas_i_abandoned_vm)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
-CTL_PROTO(stats_background_thread_num_threads)
-CTL_PROTO(stats_background_thread_num_runs)
-CTL_PROTO(stats_background_thread_run_interval)
-CTL_PROTO(stats_metadata)
-CTL_PROTO(stats_metadata_thp)
-CTL_PROTO(stats_resident)
+CTL_PROTO(stats_background_thread_num_threads)
+CTL_PROTO(stats_background_thread_num_runs)
+CTL_PROTO(stats_background_thread_run_interval)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_metadata_thp)
+CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
-CTL_PROTO(stats_retained)
-CTL_PROTO(experimental_hooks_install)
-CTL_PROTO(experimental_hooks_remove)
-CTL_PROTO(experimental_utilization_query)
-CTL_PROTO(experimental_utilization_batch_query)
-CTL_PROTO(experimental_arenas_i_pactivep)
-INDEX_PROTO(experimental_arenas_i)
-
-#define MUTEX_STATS_CTL_PROTO_GEN(n) \
-CTL_PROTO(stats_##n##_num_ops) \
-CTL_PROTO(stats_##n##_num_wait) \
-CTL_PROTO(stats_##n##_num_spin_acq) \
-CTL_PROTO(stats_##n##_num_owner_switch) \
-CTL_PROTO(stats_##n##_total_wait_time) \
-CTL_PROTO(stats_##n##_max_wait_time) \
-CTL_PROTO(stats_##n##_max_num_thds)
-
-/* Global mutexes. */
-#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
-MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
-
-/* Per arena mutexes. */
-#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
-MUTEX_PROF_ARENA_MUTEXES
-#undef OP
-
-/* Arena bin mutexes. */
-MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
-#undef MUTEX_STATS_CTL_PROTO_GEN
-
-CTL_PROTO(stats_mutexes_reset)
-
+CTL_PROTO(stats_retained)
+CTL_PROTO(experimental_hooks_install)
+CTL_PROTO(experimental_hooks_remove)
+CTL_PROTO(experimental_utilization_query)
+CTL_PROTO(experimental_utilization_batch_query)
+CTL_PROTO(experimental_arenas_i_pactivep)
+INDEX_PROTO(experimental_arenas_i)
+
+#define MUTEX_STATS_CTL_PROTO_GEN(n) \
+CTL_PROTO(stats_##n##_num_ops) \
+CTL_PROTO(stats_##n##_num_wait) \
+CTL_PROTO(stats_##n##_num_spin_acq) \
+CTL_PROTO(stats_##n##_num_owner_switch) \
+CTL_PROTO(stats_##n##_total_wait_time) \
+CTL_PROTO(stats_##n##_max_wait_time) \
+CTL_PROTO(stats_##n##_max_num_thds)
+
+/* Global mutexes. */
+#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+/* Per arena mutexes. */
+#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+/* Arena bin mutexes. */
+MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
+#undef MUTEX_STATS_CTL_PROTO_GEN
+
+CTL_PROTO(stats_mutexes_reset)
+
/******************************************************************************/
/* mallctl tree. */
-#define NAME(n) {true}, n
-#define CHILD(t, c) \
+#define NAME(n) {true}, n
+#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
(ctl_node_t *)c##_node, \
NULL
-#define CTL(c) 0, NULL, c##_ctl
+#define CTL(c) 0, NULL, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
-#define INDEX(i) {false}, i##_index
+#define INDEX(i) {false}, i##_index
-static const ctl_named_node_t thread_tcache_node[] = {
+static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("flush"), CTL(thread_tcache_flush)}
};
-static const ctl_named_node_t thread_prof_node[] = {
- {NAME("name"), CTL(thread_prof_name)},
- {NAME("active"), CTL(thread_prof_active)}
-};
-
+static const ctl_named_node_t thread_prof_node[] = {
+ {NAME("name"), CTL(thread_prof_name)},
+ {NAME("active"), CTL(thread_prof_active)}
+};
+
static const ctl_named_node_t thread_node[] = {
{NAME("arena"), CTL(thread_arena)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
- {NAME("tcache"), CHILD(named, thread_tcache)},
- {NAME("prof"), CHILD(named, thread_prof)}
+ {NAME("tcache"), CHILD(named, thread_tcache)},
+ {NAME("prof"), CHILD(named, thread_prof)}
};
static const ctl_named_node_t config_node[] = {
- {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
- {NAME("debug"), CTL(config_debug)},
- {NAME("fill"), CTL(config_fill)},
- {NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("malloc_conf"), CTL(config_malloc_conf)},
- {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)},
- {NAME("prof"), CTL(config_prof)},
- {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
- {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
- {NAME("stats"), CTL(config_stats)},
- {NAME("utrace"), CTL(config_utrace)},
- {NAME("xmalloc"), CTL(config_xmalloc)}
+ {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("malloc_conf"), CTL(config_malloc_conf)},
+ {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_named_node_t opt_node[] = {
- {NAME("abort"), CTL(opt_abort)},
- {NAME("abort_conf"), CTL(opt_abort_conf)},
- {NAME("confirm_conf"), CTL(opt_confirm_conf)},
- {NAME("metadata_thp"), CTL(opt_metadata_thp)},
- {NAME("retain"), CTL(opt_retain)},
- {NAME("dss"), CTL(opt_dss)},
- {NAME("narenas"), CTL(opt_narenas)},
- {NAME("percpu_arena"), CTL(opt_percpu_arena)},
- {NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
- {NAME("background_thread"), CTL(opt_background_thread)},
- {NAME("max_background_threads"), CTL(opt_max_background_threads)},
- {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
- {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
- {NAME("stats_print"), CTL(opt_stats_print)},
- {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
- {NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)},
- {NAME("utrace"), CTL(opt_utrace)},
- {NAME("xmalloc"), CTL(opt_xmalloc)},
- {NAME("tcache"), CTL(opt_tcache)},
- {NAME("thp"), CTL(opt_thp)},
- {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
- {NAME("prof"), CTL(opt_prof)},
- {NAME("prof_prefix"), CTL(opt_prof_prefix)},
- {NAME("prof_active"), CTL(opt_prof_active)},
- {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
- {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
- {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
- {NAME("prof_gdump"), CTL(opt_prof_gdump)},
- {NAME("prof_final"), CTL(opt_prof_final)},
- {NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
-};
-
-static const ctl_named_node_t tcache_node[] = {
- {NAME("create"), CTL(tcache_create)},
- {NAME("flush"), CTL(tcache_flush)},
- {NAME("destroy"), CTL(tcache_destroy)}
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("abort_conf"), CTL(opt_abort_conf)},
+ {NAME("confirm_conf"), CTL(opt_confirm_conf)},
+ {NAME("metadata_thp"), CTL(opt_metadata_thp)},
+ {NAME("retain"), CTL(opt_retain)},
+ {NAME("dss"), CTL(opt_dss)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("percpu_arena"), CTL(opt_percpu_arena)},
+ {NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
+ {NAME("background_thread"), CTL(opt_background_thread)},
+ {NAME("max_background_threads"), CTL(opt_max_background_threads)},
+ {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("thp"), CTL(opt_thp)},
+ {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
};
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("create"), CTL(tcache_create)},
+ {NAME("flush"), CTL(tcache_flush)},
+ {NAME("destroy"), CTL(tcache_destroy)}
+};
+
static const ctl_named_node_t arena_i_node[] = {
- {NAME("initialized"), CTL(arena_i_initialized)},
- {NAME("decay"), CTL(arena_i_decay)},
- {NAME("purge"), CTL(arena_i_purge)},
- {NAME("reset"), CTL(arena_i_reset)},
- {NAME("destroy"), CTL(arena_i_destroy)},
- {NAME("dss"), CTL(arena_i_dss)},
- {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
- {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
- {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
- {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
+ {NAME("initialized"), CTL(arena_i_initialized)},
+ {NAME("decay"), CTL(arena_i_decay)},
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("reset"), CTL(arena_i_reset)},
+ {NAME("destroy"), CTL(arena_i_destroy)},
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
+ {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
+ {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
};
static const ctl_named_node_t super_arena_i_node[] = {
- {NAME(""), CHILD(named, arena_i)}
+ {NAME(""), CHILD(named, arena_i)}
};
static const ctl_indexed_node_t arena_node[] = {
@@ -368,271 +368,271 @@ static const ctl_indexed_node_t arena_node[] = {
};
static const ctl_named_node_t arenas_bin_i_node[] = {
- {NAME("size"), CTL(arenas_bin_i_size)},
- {NAME("nregs"), CTL(arenas_bin_i_nregs)},
- {NAME("slab_size"), CTL(arenas_bin_i_slab_size)},
- {NAME("nshards"), CTL(arenas_bin_i_nshards)}
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("slab_size"), CTL(arenas_bin_i_slab_size)},
+ {NAME("nshards"), CTL(arenas_bin_i_nshards)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
- {NAME(""), CHILD(named, arenas_bin_i)}
+ {NAME(""), CHILD(named, arenas_bin_i)}
};
static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
-static const ctl_named_node_t arenas_lextent_i_node[] = {
- {NAME("size"), CTL(arenas_lextent_i_size)}
+static const ctl_named_node_t arenas_lextent_i_node[] = {
+ {NAME("size"), CTL(arenas_lextent_i_size)}
};
-static const ctl_named_node_t super_arenas_lextent_i_node[] = {
- {NAME(""), CHILD(named, arenas_lextent_i)}
+static const ctl_named_node_t super_arenas_lextent_i_node[] = {
+ {NAME(""), CHILD(named, arenas_lextent_i)}
};
-static const ctl_indexed_node_t arenas_lextent_node[] = {
- {INDEX(arenas_lextent_i)}
+static const ctl_indexed_node_t arenas_lextent_node[] = {
+ {INDEX(arenas_lextent_i)}
};
static const ctl_named_node_t arenas_node[] = {
- {NAME("narenas"), CTL(arenas_narenas)},
- {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
- {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
- {NAME("quantum"), CTL(arenas_quantum)},
- {NAME("page"), CTL(arenas_page)},
- {NAME("tcache_max"), CTL(arenas_tcache_max)},
- {NAME("nbins"), CTL(arenas_nbins)},
- {NAME("nhbins"), CTL(arenas_nhbins)},
- {NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlextents"), CTL(arenas_nlextents)},
- {NAME("lextent"), CHILD(indexed, arenas_lextent)},
- {NAME("create"), CTL(arenas_create)},
- {NAME("lookup"), CTL(arenas_lookup)}
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
+ {NAME("nlextents"), CTL(arenas_nlextents)},
+ {NAME("lextent"), CHILD(indexed, arenas_lextent)},
+ {NAME("create"), CTL(arenas_create)},
+ {NAME("lookup"), CTL(arenas_lookup)}
};
static const ctl_named_node_t prof_node[] = {
- {NAME("thread_active_init"), CTL(prof_thread_active_init)},
+ {NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
- {NAME("gdump"), CTL(prof_gdump)},
- {NAME("reset"), CTL(prof_reset)},
- {NAME("interval"), CTL(prof_interval)},
- {NAME("lg_sample"), CTL(lg_prof_sample)},
- {NAME("log_start"), CTL(prof_log_start)},
- {NAME("log_stop"), CTL(prof_log_stop)}
+ {NAME("gdump"), CTL(prof_gdump)},
+ {NAME("reset"), CTL(prof_reset)},
+ {NAME("interval"), CTL(prof_interval)},
+ {NAME("lg_sample"), CTL(lg_prof_sample)},
+ {NAME("log_start"), CTL(prof_log_start)},
+ {NAME("log_stop"), CTL(prof_log_stop)}
};
-static const ctl_named_node_t stats_arenas_i_small_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)},
- {NAME("nfills"), CTL(stats_arenas_i_small_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)}
+static const ctl_named_node_t stats_arenas_i_small_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)},
+ {NAME("nfills"), CTL(stats_arenas_i_small_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)}
};
-static const ctl_named_node_t stats_arenas_i_large_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)},
- {NAME("nfills"), CTL(stats_arenas_i_large_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)}
+static const ctl_named_node_t stats_arenas_i_large_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)},
+ {NAME("nfills"), CTL(stats_arenas_i_large_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)}
};
-#define MUTEX_PROF_DATA_NODE(prefix) \
-static const ctl_named_node_t stats_##prefix##_node[] = { \
- {NAME("num_ops"), \
- CTL(stats_##prefix##_num_ops)}, \
- {NAME("num_wait"), \
- CTL(stats_##prefix##_num_wait)}, \
- {NAME("num_spin_acq"), \
- CTL(stats_##prefix##_num_spin_acq)}, \
- {NAME("num_owner_switch"), \
- CTL(stats_##prefix##_num_owner_switch)}, \
- {NAME("total_wait_time"), \
- CTL(stats_##prefix##_total_wait_time)}, \
- {NAME("max_wait_time"), \
- CTL(stats_##prefix##_max_wait_time)}, \
- {NAME("max_num_thds"), \
- CTL(stats_##prefix##_max_num_thds)} \
- /* Note that # of current waiting thread not provided. */ \
+#define MUTEX_PROF_DATA_NODE(prefix) \
+static const ctl_named_node_t stats_##prefix##_node[] = { \
+ {NAME("num_ops"), \
+ CTL(stats_##prefix##_num_ops)}, \
+ {NAME("num_wait"), \
+ CTL(stats_##prefix##_num_wait)}, \
+ {NAME("num_spin_acq"), \
+ CTL(stats_##prefix##_num_spin_acq)}, \
+ {NAME("num_owner_switch"), \
+ CTL(stats_##prefix##_num_owner_switch)}, \
+ {NAME("total_wait_time"), \
+ CTL(stats_##prefix##_total_wait_time)}, \
+ {NAME("max_wait_time"), \
+ CTL(stats_##prefix##_max_wait_time)}, \
+ {NAME("max_num_thds"), \
+ CTL(stats_##prefix##_max_num_thds)} \
+ /* Note that # of current waiting thread not provided. */ \
};
-MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
+MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
- {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
- {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
- {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
- {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
- {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
- {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)},
- {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
+ {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
+ {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
+ {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)},
+ {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
};
-
+
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
-static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
- {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
+static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
+ {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
};
-static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
+static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
};
-static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
- {INDEX(stats_arenas_i_lextents_j)}
-};
-
-static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
- {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)},
- {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)},
- {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)},
- {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)},
- {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)},
- {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
-};
-
-static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_extents_j)}
-};
-
-static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
- {INDEX(stats_arenas_i_extents_j)}
-};
-
-#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
-MUTEX_PROF_ARENA_MUTEXES
-#undef OP
-
-static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
-#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
-MUTEX_PROF_ARENA_MUTEXES
-#undef OP
+static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
+ {INDEX(stats_arenas_i_lextents_j)}
};
+static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
+ {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)},
+ {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)},
+ {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)},
+ {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)},
+ {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)},
+ {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
+};
+
+static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_extents_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
+ {INDEX(stats_arenas_i_extents_j)}
+};
+
+#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
+#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+};
+
static const ctl_named_node_t stats_arenas_i_node[] = {
- {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
- {NAME("uptime"), CTL(stats_arenas_i_uptime)},
- {NAME("dss"), CTL(stats_arenas_i_dss)},
- {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
- {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
- {NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
- {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
- {NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("retained"), CTL(stats_arenas_i_retained)},
- {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)},
- {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
- {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
- {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
- {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
- {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
- {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
- {NAME("base"), CTL(stats_arenas_i_base)},
- {NAME("internal"), CTL(stats_arenas_i_internal)},
- {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
- {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
- {NAME("resident"), CTL(stats_arenas_i_resident)},
- {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
- {NAME("small"), CHILD(named, stats_arenas_i_small)},
- {NAME("large"), CHILD(named, stats_arenas_i_large)},
- {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
- {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
- {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("uptime"), CTL(stats_arenas_i_uptime)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("retained"), CTL(stats_arenas_i_retained)},
+ {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)},
+ {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
+ {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
+ {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
+ {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
+ {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
+ {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
+ {NAME("base"), CTL(stats_arenas_i_base)},
+ {NAME("internal"), CTL(stats_arenas_i_internal)},
+ {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
+ {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
+ {NAME("resident"), CTL(stats_arenas_i_resident)},
+ {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
+ {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
+ {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i)}
+ {NAME(""), CHILD(named, stats_arenas_i)}
};
static const ctl_indexed_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)}
};
-static const ctl_named_node_t stats_background_thread_node[] = {
- {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
- {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
- {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
-};
-
-#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
-MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
-
-static const ctl_named_node_t stats_mutexes_node[] = {
-#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
-MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
- {NAME("reset"), CTL(stats_mutexes_reset)}
-};
-#undef MUTEX_PROF_DATA_NODE
-
+static const ctl_named_node_t stats_background_thread_node[] = {
+ {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
+ {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
+ {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
+};
+
+#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+static const ctl_named_node_t stats_mutexes_node[] = {
+#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+ {NAME("reset"), CTL(stats_mutexes_reset)}
+};
+#undef MUTEX_PROF_DATA_NODE
+
static const ctl_named_node_t stats_node[] = {
- {NAME("allocated"), CTL(stats_allocated)},
- {NAME("active"), CTL(stats_active)},
- {NAME("metadata"), CTL(stats_metadata)},
- {NAME("metadata_thp"), CTL(stats_metadata_thp)},
- {NAME("resident"), CTL(stats_resident)},
- {NAME("mapped"), CTL(stats_mapped)},
- {NAME("retained"), CTL(stats_retained)},
- {NAME("background_thread"),
- CHILD(named, stats_background_thread)},
- {NAME("mutexes"), CHILD(named, stats_mutexes)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
-};
-
-static const ctl_named_node_t experimental_hooks_node[] = {
- {NAME("install"), CTL(experimental_hooks_install)},
- {NAME("remove"), CTL(experimental_hooks_remove)}
-};
-
-static const ctl_named_node_t experimental_utilization_node[] = {
- {NAME("query"), CTL(experimental_utilization_query)},
- {NAME("batch_query"), CTL(experimental_utilization_batch_query)}
-};
-
-static const ctl_named_node_t experimental_arenas_i_node[] = {
- {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)}
-};
-static const ctl_named_node_t super_experimental_arenas_i_node[] = {
- {NAME(""), CHILD(named, experimental_arenas_i)}
-};
-
-static const ctl_indexed_node_t experimental_arenas_node[] = {
- {INDEX(experimental_arenas_i)}
-};
-
-static const ctl_named_node_t experimental_node[] = {
- {NAME("hooks"), CHILD(named, experimental_hooks)},
- {NAME("utilization"), CHILD(named, experimental_utilization)},
- {NAME("arenas"), CHILD(indexed, experimental_arenas)}
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
+ {NAME("metadata_thp"), CTL(stats_metadata_thp)},
+ {NAME("resident"), CTL(stats_resident)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("retained"), CTL(stats_retained)},
+ {NAME("background_thread"),
+ CHILD(named, stats_background_thread)},
+ {NAME("mutexes"), CHILD(named, stats_mutexes)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
};
+static const ctl_named_node_t experimental_hooks_node[] = {
+ {NAME("install"), CTL(experimental_hooks_install)},
+ {NAME("remove"), CTL(experimental_hooks_remove)}
+};
+
+static const ctl_named_node_t experimental_utilization_node[] = {
+ {NAME("query"), CTL(experimental_utilization_query)},
+ {NAME("batch_query"), CTL(experimental_utilization_batch_query)}
+};
+
+static const ctl_named_node_t experimental_arenas_i_node[] = {
+ {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)}
+};
+static const ctl_named_node_t super_experimental_arenas_i_node[] = {
+ {NAME(""), CHILD(named, experimental_arenas_i)}
+};
+
+static const ctl_indexed_node_t experimental_arenas_node[] = {
+ {INDEX(experimental_arenas_i)}
+};
+
+static const ctl_named_node_t experimental_node[] = {
+ {NAME("hooks"), CHILD(named, experimental_hooks)},
+ {NAME("utilization"), CHILD(named, experimental_utilization)},
+ {NAME("arenas"), CHILD(indexed, experimental_arenas)}
+};
+
static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
- {NAME("background_thread"), CTL(background_thread)},
- {NAME("max_background_threads"), CTL(max_background_threads)},
+ {NAME("background_thread"), CTL(background_thread)},
+ {NAME("max_background_threads"), CTL(max_background_threads)},
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
- {NAME("tcache"), CHILD(named, tcache)},
+ {NAME("tcache"), CHILD(named, tcache)},
{NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)},
- {NAME("stats"), CHILD(named, stats)},
- {NAME("experimental"), CHILD(named, experimental)}
+ {NAME("stats"), CHILD(named, stats)},
+ {NAME("experimental"), CHILD(named, experimental)}
};
static const ctl_named_node_t super_root_node[] = {
{NAME(""), CHILD(named, root)}
@@ -645,554 +645,554 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
-/*
- * Sets *dst + *src non-atomically. This is safe, since everything is
- * synchronized by the ctl mutex.
- */
-static void
-ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
- uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
- atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
-#else
- *dst += *src;
-#endif
-}
-
-/* Likewise: with ctl mutex synchronization, reading is simple. */
-static uint64_t
-ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(p, ATOMIC_RELAXED);
-#else
- return *p;
-#endif
-}
-
-static void
-accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
- size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
- size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
- atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
-}
-
-/******************************************************************************/
-
-static unsigned
-arenas_i2a_impl(size_t i, bool compat, bool validate) {
- unsigned a;
-
- switch (i) {
- case MALLCTL_ARENAS_ALL:
- a = 0;
- break;
- case MALLCTL_ARENAS_DESTROYED:
- a = 1;
- break;
- default:
- if (compat && i == ctl_arenas->narenas) {
- /*
- * Provide deprecated backward compatibility for
- * accessing the merged stats at index narenas rather
- * than via MALLCTL_ARENAS_ALL. This is scheduled for
- * removal in 6.0.0.
- */
- a = 0;
- } else if (validate && i >= ctl_arenas->narenas) {
- a = UINT_MAX;
- } else {
- /*
- * This function should never be called for an index
- * more than one past the range of indices that have
- * initialized ctl data.
- */
- assert(i < ctl_arenas->narenas || (!validate && i ==
- ctl_arenas->narenas));
- a = (unsigned)i + 2;
- }
- break;
- }
-
- return a;
-}
-
-static unsigned
-arenas_i2a(size_t i) {
- return arenas_i2a_impl(i, true, false);
-}
-
-static ctl_arena_t *
-arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
- ctl_arena_t *ret;
-
- assert(!compat || !init);
-
- ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
- if (init && ret == NULL) {
- if (config_stats) {
- struct container_s {
- ctl_arena_t ctl_arena;
- ctl_arena_stats_t astats;
- };
- struct container_s *cont =
- (struct container_s *)base_alloc(tsd_tsdn(tsd),
- b0get(), sizeof(struct container_s), QUANTUM);
- if (cont == NULL) {
- return NULL;
- }
- ret = &cont->ctl_arena;
- ret->astats = &cont->astats;
- } else {
- ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
- sizeof(ctl_arena_t), QUANTUM);
- if (ret == NULL) {
- return NULL;
- }
- }
- ret->arena_ind = (unsigned)i;
- ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
- }
-
- assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
- return ret;
-}
-
-static ctl_arena_t *
-arenas_i(size_t i) {
- ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
- assert(ret != NULL);
- return ret;
-}
-
+/*
+ * Sets *dst + *src non-atomically. This is safe, since everything is
+ * synchronized by the ctl mutex.
+ */
+static void
+ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
+ atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+#else
+ *dst += *src;
+#endif
+}
+
+/* Likewise: with ctl mutex synchronization, reading is simple. */
+static uint64_t
+ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ return *p;
+#endif
+}
+
+static void
+accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
+ size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
+ size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
+ atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+}
+
+/******************************************************************************/
+
+static unsigned
+arenas_i2a_impl(size_t i, bool compat, bool validate) {
+ unsigned a;
+
+ switch (i) {
+ case MALLCTL_ARENAS_ALL:
+ a = 0;
+ break;
+ case MALLCTL_ARENAS_DESTROYED:
+ a = 1;
+ break;
+ default:
+ if (compat && i == ctl_arenas->narenas) {
+ /*
+ * Provide deprecated backward compatibility for
+ * accessing the merged stats at index narenas rather
+ * than via MALLCTL_ARENAS_ALL. This is scheduled for
+ * removal in 6.0.0.
+ */
+ a = 0;
+ } else if (validate && i >= ctl_arenas->narenas) {
+ a = UINT_MAX;
+ } else {
+ /*
+ * This function should never be called for an index
+ * more than one past the range of indices that have
+ * initialized ctl data.
+ */
+ assert(i < ctl_arenas->narenas || (!validate && i ==
+ ctl_arenas->narenas));
+ a = (unsigned)i + 2;
+ }
+ break;
+ }
+
+ return a;
+}
+
+static unsigned
+arenas_i2a(size_t i) {
+ return arenas_i2a_impl(i, true, false);
+}
+
+static ctl_arena_t *
+arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
+ ctl_arena_t *ret;
+
+ assert(!compat || !init);
+
+ ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
+ if (init && ret == NULL) {
+ if (config_stats) {
+ struct container_s {
+ ctl_arena_t ctl_arena;
+ ctl_arena_stats_t astats;
+ };
+ struct container_s *cont =
+ (struct container_s *)base_alloc(tsd_tsdn(tsd),
+ b0get(), sizeof(struct container_s), QUANTUM);
+ if (cont == NULL) {
+ return NULL;
+ }
+ ret = &cont->ctl_arena;
+ ret->astats = &cont->astats;
+ } else {
+ ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
+ sizeof(ctl_arena_t), QUANTUM);
+ if (ret == NULL) {
+ return NULL;
+ }
+ }
+ ret->arena_ind = (unsigned)i;
+ ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
+ }
+
+ assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
+ return ret;
+}
+
+static ctl_arena_t *
+arenas_i(size_t i) {
+ ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
+ assert(ret != NULL);
+ return ret;
+}
+
static void
-ctl_arena_clear(ctl_arena_t *ctl_arena) {
- ctl_arena->nthreads = 0;
- ctl_arena->dss = dss_prec_names[dss_prec_limit];
- ctl_arena->dirty_decay_ms = -1;
- ctl_arena->muzzy_decay_ms = -1;
- ctl_arena->pactive = 0;
- ctl_arena->pdirty = 0;
- ctl_arena->pmuzzy = 0;
+ctl_arena_clear(ctl_arena_t *ctl_arena) {
+ ctl_arena->nthreads = 0;
+ ctl_arena->dss = dss_prec_names[dss_prec_limit];
+ ctl_arena->dirty_decay_ms = -1;
+ ctl_arena->muzzy_decay_ms = -1;
+ ctl_arena->pactive = 0;
+ ctl_arena->pdirty = 0;
+ ctl_arena->pmuzzy = 0;
if (config_stats) {
- memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
- ctl_arena->astats->allocated_small = 0;
- ctl_arena->astats->nmalloc_small = 0;
- ctl_arena->astats->ndalloc_small = 0;
- ctl_arena->astats->nrequests_small = 0;
- ctl_arena->astats->nfills_small = 0;
- ctl_arena->astats->nflushes_small = 0;
- memset(ctl_arena->astats->bstats, 0, SC_NBINS *
- sizeof(bin_stats_t));
- memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
- sizeof(arena_stats_large_t));
- memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
- sizeof(arena_stats_extents_t));
+ memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
+ ctl_arena->astats->allocated_small = 0;
+ ctl_arena->astats->nmalloc_small = 0;
+ ctl_arena->astats->ndalloc_small = 0;
+ ctl_arena->astats->nrequests_small = 0;
+ ctl_arena->astats->nfills_small = 0;
+ ctl_arena->astats->nflushes_small = 0;
+ memset(ctl_arena->astats->bstats, 0, SC_NBINS *
+ sizeof(bin_stats_t));
+ memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
+ sizeof(arena_stats_large_t));
+ memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
+ sizeof(arena_stats_extents_t));
}
}
static void
-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
unsigned i;
- if (config_stats) {
- arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
- &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
- &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
- &ctl_arena->pdirty, &ctl_arena->pmuzzy,
- &ctl_arena->astats->astats, ctl_arena->astats->bstats,
- ctl_arena->astats->lstats, ctl_arena->astats->estats);
-
- for (i = 0; i < SC_NBINS; i++) {
- ctl_arena->astats->allocated_small +=
- ctl_arena->astats->bstats[i].curregs *
- sz_index2size(i);
- ctl_arena->astats->nmalloc_small +=
- ctl_arena->astats->bstats[i].nmalloc;
- ctl_arena->astats->ndalloc_small +=
- ctl_arena->astats->bstats[i].ndalloc;
- ctl_arena->astats->nrequests_small +=
- ctl_arena->astats->bstats[i].nrequests;
- ctl_arena->astats->nfills_small +=
- ctl_arena->astats->bstats[i].nfills;
- ctl_arena->astats->nflushes_small +=
- ctl_arena->astats->bstats[i].nflushes;
- }
- } else {
- arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
- &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
- &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
- &ctl_arena->pdirty, &ctl_arena->pmuzzy);
+ if (config_stats) {
+ arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
+ &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
+ &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
+ &ctl_arena->pdirty, &ctl_arena->pmuzzy,
+ &ctl_arena->astats->astats, ctl_arena->astats->bstats,
+ ctl_arena->astats->lstats, ctl_arena->astats->estats);
+
+ for (i = 0; i < SC_NBINS; i++) {
+ ctl_arena->astats->allocated_small +=
+ ctl_arena->astats->bstats[i].curregs *
+ sz_index2size(i);
+ ctl_arena->astats->nmalloc_small +=
+ ctl_arena->astats->bstats[i].nmalloc;
+ ctl_arena->astats->ndalloc_small +=
+ ctl_arena->astats->bstats[i].ndalloc;
+ ctl_arena->astats->nrequests_small +=
+ ctl_arena->astats->bstats[i].nrequests;
+ ctl_arena->astats->nfills_small +=
+ ctl_arena->astats->bstats[i].nfills;
+ ctl_arena->astats->nflushes_small +=
+ ctl_arena->astats->bstats[i].nflushes;
+ }
+ } else {
+ arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
+ &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
+ &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
+ &ctl_arena->pdirty, &ctl_arena->pmuzzy);
}
}
static void
-ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
- bool destroyed) {
+ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
+ bool destroyed) {
unsigned i;
- if (!destroyed) {
- ctl_sdarena->nthreads += ctl_arena->nthreads;
- ctl_sdarena->pactive += ctl_arena->pactive;
- ctl_sdarena->pdirty += ctl_arena->pdirty;
- ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
- } else {
- assert(ctl_arena->nthreads == 0);
- assert(ctl_arena->pactive == 0);
- assert(ctl_arena->pdirty == 0);
- assert(ctl_arena->pmuzzy == 0);
- }
-
- if (config_stats) {
- ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
- ctl_arena_stats_t *astats = ctl_arena->astats;
-
- if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.mapped,
- &astats->astats.mapped);
- accum_atomic_zu(&sdstats->astats.retained,
- &astats->astats.retained);
- accum_atomic_zu(&sdstats->astats.extent_avail,
- &astats->astats.extent_avail);
- }
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
- &astats->astats.decay_dirty.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
- &astats->astats.decay_dirty.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
- &astats->astats.decay_dirty.purged);
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
- &astats->astats.decay_muzzy.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
- &astats->astats.decay_muzzy.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
- &astats->astats.decay_muzzy.purged);
-
-#define OP(mtx) malloc_mutex_prof_merge( \
- &(sdstats->astats.mutex_prof_data[ \
- arena_prof_mutex_##mtx]), \
- &(astats->astats.mutex_prof_data[ \
- arena_prof_mutex_##mtx]));
-MUTEX_PROF_ARENA_MUTEXES
-#undef OP
- if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.base,
- &astats->astats.base);
- accum_atomic_zu(&sdstats->astats.internal,
- &astats->astats.internal);
- accum_atomic_zu(&sdstats->astats.resident,
- &astats->astats.resident);
- accum_atomic_zu(&sdstats->astats.metadata_thp,
- &astats->astats.metadata_thp);
- } else {
- assert(atomic_load_zu(
- &astats->astats.internal, ATOMIC_RELAXED) == 0);
- }
-
- if (!destroyed) {
- sdstats->allocated_small += astats->allocated_small;
- } else {
- assert(astats->allocated_small == 0);
- }
- sdstats->nmalloc_small += astats->nmalloc_small;
- sdstats->ndalloc_small += astats->ndalloc_small;
- sdstats->nrequests_small += astats->nrequests_small;
- sdstats->nfills_small += astats->nfills_small;
- sdstats->nflushes_small += astats->nflushes_small;
-
- if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.allocated_large,
- &astats->astats.allocated_large);
- } else {
- assert(atomic_load_zu(&astats->astats.allocated_large,
- ATOMIC_RELAXED) == 0);
- }
- ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
- &astats->astats.nmalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
- &astats->astats.ndalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
- &astats->astats.nrequests_large);
- accum_atomic_zu(&sdstats->astats.abandoned_vm,
- &astats->astats.abandoned_vm);
-
- accum_atomic_zu(&sdstats->astats.tcache_bytes,
- &astats->astats.tcache_bytes);
-
- if (ctl_arena->arena_ind == 0) {
- sdstats->astats.uptime = astats->astats.uptime;
- }
-
- /* Merge bin stats. */
- for (i = 0; i < SC_NBINS; i++) {
- sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sdstats->bstats[i].nrequests +=
- astats->bstats[i].nrequests;
- if (!destroyed) {
- sdstats->bstats[i].curregs +=
- astats->bstats[i].curregs;
- } else {
- assert(astats->bstats[i].curregs == 0);
- }
- sdstats->bstats[i].nfills += astats->bstats[i].nfills;
- sdstats->bstats[i].nflushes +=
+ if (!destroyed) {
+ ctl_sdarena->nthreads += ctl_arena->nthreads;
+ ctl_sdarena->pactive += ctl_arena->pactive;
+ ctl_sdarena->pdirty += ctl_arena->pdirty;
+ ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
+ } else {
+ assert(ctl_arena->nthreads == 0);
+ assert(ctl_arena->pactive == 0);
+ assert(ctl_arena->pdirty == 0);
+ assert(ctl_arena->pmuzzy == 0);
+ }
+
+ if (config_stats) {
+ ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
+ ctl_arena_stats_t *astats = ctl_arena->astats;
+
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.mapped,
+ &astats->astats.mapped);
+ accum_atomic_zu(&sdstats->astats.retained,
+ &astats->astats.retained);
+ accum_atomic_zu(&sdstats->astats.extent_avail,
+ &astats->astats.extent_avail);
+ }
+
+ ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
+ &astats->astats.decay_dirty.npurge);
+ ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
+ &astats->astats.decay_dirty.nmadvise);
+ ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
+ &astats->astats.decay_dirty.purged);
+
+ ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
+ &astats->astats.decay_muzzy.npurge);
+ ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
+ &astats->astats.decay_muzzy.nmadvise);
+ ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
+ &astats->astats.decay_muzzy.purged);
+
+#define OP(mtx) malloc_mutex_prof_merge( \
+ &(sdstats->astats.mutex_prof_data[ \
+ arena_prof_mutex_##mtx]), \
+ &(astats->astats.mutex_prof_data[ \
+ arena_prof_mutex_##mtx]));
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.base,
+ &astats->astats.base);
+ accum_atomic_zu(&sdstats->astats.internal,
+ &astats->astats.internal);
+ accum_atomic_zu(&sdstats->astats.resident,
+ &astats->astats.resident);
+ accum_atomic_zu(&sdstats->astats.metadata_thp,
+ &astats->astats.metadata_thp);
+ } else {
+ assert(atomic_load_zu(
+ &astats->astats.internal, ATOMIC_RELAXED) == 0);
+ }
+
+ if (!destroyed) {
+ sdstats->allocated_small += astats->allocated_small;
+ } else {
+ assert(astats->allocated_small == 0);
+ }
+ sdstats->nmalloc_small += astats->nmalloc_small;
+ sdstats->ndalloc_small += astats->ndalloc_small;
+ sdstats->nrequests_small += astats->nrequests_small;
+ sdstats->nfills_small += astats->nfills_small;
+ sdstats->nflushes_small += astats->nflushes_small;
+
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.allocated_large,
+ &astats->astats.allocated_large);
+ } else {
+ assert(atomic_load_zu(&astats->astats.allocated_large,
+ ATOMIC_RELAXED) == 0);
+ }
+ ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
+ &astats->astats.nmalloc_large);
+ ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
+ &astats->astats.ndalloc_large);
+ ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
+ &astats->astats.nrequests_large);
+ accum_atomic_zu(&sdstats->astats.abandoned_vm,
+ &astats->astats.abandoned_vm);
+
+ accum_atomic_zu(&sdstats->astats.tcache_bytes,
+ &astats->astats.tcache_bytes);
+
+ if (ctl_arena->arena_ind == 0) {
+ sdstats->astats.uptime = astats->astats.uptime;
+ }
+
+ /* Merge bin stats. */
+ for (i = 0; i < SC_NBINS; i++) {
+ sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sdstats->bstats[i].nrequests +=
+ astats->bstats[i].nrequests;
+ if (!destroyed) {
+ sdstats->bstats[i].curregs +=
+ astats->bstats[i].curregs;
+ } else {
+ assert(astats->bstats[i].curregs == 0);
+ }
+ sdstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sdstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
- sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
- sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
- if (!destroyed) {
- sdstats->bstats[i].curslabs +=
- astats->bstats[i].curslabs;
- sdstats->bstats[i].nonfull_slabs +=
- astats->bstats[i].nonfull_slabs;
- } else {
- assert(astats->bstats[i].curslabs == 0);
- assert(astats->bstats[i].nonfull_slabs == 0);
- }
- malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
- &astats->bstats[i].mutex_data);
- }
-
- /* Merge stats for large allocations. */
- for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
- &astats->lstats[i].nmalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
- &astats->lstats[i].ndalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
- &astats->lstats[i].nrequests);
- if (!destroyed) {
- sdstats->lstats[i].curlextents +=
- astats->lstats[i].curlextents;
- } else {
- assert(astats->lstats[i].curlextents == 0);
- }
- }
-
- /* Merge extents stats. */
- for (i = 0; i < SC_NPSIZES; i++) {
- accum_atomic_zu(&sdstats->estats[i].ndirty,
- &astats->estats[i].ndirty);
- accum_atomic_zu(&sdstats->estats[i].nmuzzy,
- &astats->estats[i].nmuzzy);
- accum_atomic_zu(&sdstats->estats[i].nretained,
- &astats->estats[i].nretained);
- accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
- &astats->estats[i].dirty_bytes);
- accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
- &astats->estats[i].muzzy_bytes);
- accum_atomic_zu(&sdstats->estats[i].retained_bytes,
- &astats->estats[i].retained_bytes);
+ sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
+ sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
+ if (!destroyed) {
+ sdstats->bstats[i].curslabs +=
+ astats->bstats[i].curslabs;
+ sdstats->bstats[i].nonfull_slabs +=
+ astats->bstats[i].nonfull_slabs;
+ } else {
+ assert(astats->bstats[i].curslabs == 0);
+ assert(astats->bstats[i].nonfull_slabs == 0);
+ }
+ malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
+ &astats->bstats[i].mutex_data);
}
+
+ /* Merge stats for large allocations. */
+ for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
+ ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ &astats->lstats[i].nmalloc);
+ ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ &astats->lstats[i].ndalloc);
+ ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ &astats->lstats[i].nrequests);
+ if (!destroyed) {
+ sdstats->lstats[i].curlextents +=
+ astats->lstats[i].curlextents;
+ } else {
+ assert(astats->lstats[i].curlextents == 0);
+ }
+ }
+
+ /* Merge extents stats. */
+ for (i = 0; i < SC_NPSIZES; i++) {
+ accum_atomic_zu(&sdstats->estats[i].ndirty,
+ &astats->estats[i].ndirty);
+ accum_atomic_zu(&sdstats->estats[i].nmuzzy,
+ &astats->estats[i].nmuzzy);
+ accum_atomic_zu(&sdstats->estats[i].nretained,
+ &astats->estats[i].nretained);
+ accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
+ &astats->estats[i].dirty_bytes);
+ accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
+ &astats->estats[i].muzzy_bytes);
+ accum_atomic_zu(&sdstats->estats[i].retained_bytes,
+ &astats->estats[i].retained_bytes);
+ }
}
}
static void
-ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
- unsigned i, bool destroyed) {
- ctl_arena_t *ctl_arena = arenas_i(i);
-
- ctl_arena_clear(ctl_arena);
- ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
-}
-
-static unsigned
-ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
- unsigned arena_ind;
- ctl_arena_t *ctl_arena;
-
- if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
- NULL) {
- ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
- arena_ind = ctl_arena->arena_ind;
+ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
+ unsigned i, bool destroyed) {
+ ctl_arena_t *ctl_arena = arenas_i(i);
+
+ ctl_arena_clear(ctl_arena);
+ ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
+}
+
+static unsigned
+ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
+ unsigned arena_ind;
+ ctl_arena_t *ctl_arena;
+
+ if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
+ NULL) {
+ ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
+ arena_ind = ctl_arena->arena_ind;
} else {
- arena_ind = ctl_arenas->narenas;
+ arena_ind = ctl_arenas->narenas;
}
- /* Trigger stats allocation. */
- if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
- return UINT_MAX;
- }
+ /* Trigger stats allocation. */
+ if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
+ return UINT_MAX;
+ }
- /* Initialize new arena. */
- if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
- return UINT_MAX;
+ /* Initialize new arena. */
+ if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
+ return UINT_MAX;
}
- if (arena_ind == ctl_arenas->narenas) {
- ctl_arenas->narenas++;
+ if (arena_ind == ctl_arenas->narenas) {
+ ctl_arenas->narenas++;
}
- return arena_ind;
-}
-
-static void
-ctl_background_thread_stats_read(tsdn_t *tsdn) {
- background_thread_stats_t *stats = &ctl_stats->background_thread;
- if (!have_background_thread ||
- background_thread_stats_read(tsdn, stats)) {
- memset(stats, 0, sizeof(background_thread_stats_t));
- nstime_init(&stats->run_interval, 0);
- }
+ return arena_ind;
}
static void
-ctl_refresh(tsdn_t *tsdn) {
+ctl_background_thread_stats_read(tsdn_t *tsdn) {
+ background_thread_stats_t *stats = &ctl_stats->background_thread;
+ if (!have_background_thread ||
+ background_thread_stats_read(tsdn, stats)) {
+ memset(stats, 0, sizeof(background_thread_stats_t));
+ nstime_init(&stats->run_interval, 0);
+ }
+}
+
+static void
+ctl_refresh(tsdn_t *tsdn) {
unsigned i;
- ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
- VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
+ ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
- ctl_arena_clear(ctl_sarena);
+ ctl_arena_clear(ctl_sarena);
- for (i = 0; i < ctl_arenas->narenas; i++) {
- tarenas[i] = arena_get(tsdn, i, false);
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ tarenas[i] = arena_get(tsdn, i, false);
}
-
- for (i = 0; i < ctl_arenas->narenas; i++) {
- ctl_arena_t *ctl_arena = arenas_i(i);
+
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ ctl_arena_t *ctl_arena = arenas_i(i);
bool initialized = (tarenas[i] != NULL);
- ctl_arena->initialized = initialized;
- if (initialized) {
- ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
- false);
- }
+ ctl_arena->initialized = initialized;
+ if (initialized) {
+ ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
+ false);
+ }
}
if (config_stats) {
- ctl_stats->allocated = ctl_sarena->astats->allocated_small +
- atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
- ATOMIC_RELAXED);
- ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
- ctl_stats->metadata = atomic_load_zu(
- &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
- atomic_load_zu(&ctl_sarena->astats->astats.internal,
- ATOMIC_RELAXED);
- ctl_stats->metadata_thp = atomic_load_zu(
- &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
- ctl_stats->resident = atomic_load_zu(
- &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
- ctl_stats->mapped = atomic_load_zu(
- &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
- ctl_stats->retained = atomic_load_zu(
- &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
-
- ctl_background_thread_stats_read(tsdn);
-
-#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
- malloc_mutex_lock(tsdn, &mtx); \
- malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
- malloc_mutex_unlock(tsdn, &mtx);
-
- if (config_prof && opt_prof) {
- READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
- bt2gctx_mtx);
- }
- if (have_background_thread) {
- READ_GLOBAL_MUTEX_PROF_DATA(
- global_prof_mutex_background_thread,
- background_thread_lock);
- } else {
- memset(&ctl_stats->mutex_prof_data[
- global_prof_mutex_background_thread], 0,
- sizeof(mutex_prof_data_t));
- }
- /* We own ctl mutex already. */
- malloc_mutex_prof_read(tsdn,
- &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
- &ctl_mtx);
-#undef READ_GLOBAL_MUTEX_PROF_DATA
- }
- ctl_arenas->epoch++;
+ ctl_stats->allocated = ctl_sarena->astats->allocated_small +
+ atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
+ ATOMIC_RELAXED);
+ ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
+ ctl_stats->metadata = atomic_load_zu(
+ &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
+ atomic_load_zu(&ctl_sarena->astats->astats.internal,
+ ATOMIC_RELAXED);
+ ctl_stats->metadata_thp = atomic_load_zu(
+ &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
+ ctl_stats->resident = atomic_load_zu(
+ &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
+ ctl_stats->mapped = atomic_load_zu(
+ &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
+ ctl_stats->retained = atomic_load_zu(
+ &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
+
+ ctl_background_thread_stats_read(tsdn);
+
+#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
+ malloc_mutex_lock(tsdn, &mtx); \
+ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
+ malloc_mutex_unlock(tsdn, &mtx);
+
+ if (config_prof && opt_prof) {
+ READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
+ bt2gctx_mtx);
+ }
+ if (have_background_thread) {
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_background_thread,
+ background_thread_lock);
+ } else {
+ memset(&ctl_stats->mutex_prof_data[
+ global_prof_mutex_background_thread], 0,
+ sizeof(mutex_prof_data_t));
+ }
+ /* We own ctl mutex already. */
+ malloc_mutex_prof_read(tsdn,
+ &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
+ &ctl_mtx);
+#undef READ_GLOBAL_MUTEX_PROF_DATA
+ }
+ ctl_arenas->epoch++;
}
static bool
-ctl_init(tsd_t *tsd) {
+ctl_init(tsd_t *tsd) {
bool ret;
- tsdn_t *tsdn = tsd_tsdn(tsd);
-
- malloc_mutex_lock(tsdn, &ctl_mtx);
- if (!ctl_initialized) {
- ctl_arena_t *ctl_sarena, *ctl_darena;
- unsigned i;
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (!ctl_initialized) {
+ ctl_arena_t *ctl_sarena, *ctl_darena;
+ unsigned i;
+
/*
- * Allocate demand-zeroed space for pointers to the full
- * range of supported arena indices.
+ * Allocate demand-zeroed space for pointers to the full
+ * range of supported arena indices.
*/
- if (ctl_arenas == NULL) {
- ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
- b0get(), sizeof(ctl_arenas_t), QUANTUM);
- if (ctl_arenas == NULL) {
- ret = true;
- goto label_return;
- }
- }
-
- if (config_stats && ctl_stats == NULL) {
- ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
- sizeof(ctl_stats_t), QUANTUM);
- if (ctl_stats == NULL) {
- ret = true;
- goto label_return;
- }
- }
-
- /*
- * Allocate space for the current full range of arenas
- * here rather than doing it lazily elsewhere, in order
- * to limit when OOM-caused errors can occur.
- */
- if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
- true)) == NULL) {
- ret = true;
- goto label_return;
- }
- ctl_sarena->initialized = true;
-
- if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
- false, true)) == NULL) {
+ if (ctl_arenas == NULL) {
+ ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
+ b0get(), sizeof(ctl_arenas_t), QUANTUM);
+ if (ctl_arenas == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ if (config_stats && ctl_stats == NULL) {
+ ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
+ sizeof(ctl_stats_t), QUANTUM);
+ if (ctl_stats == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ /*
+ * Allocate space for the current full range of arenas
+ * here rather than doing it lazily elsewhere, in order
+ * to limit when OOM-caused errors can occur.
+ */
+ if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
+ true)) == NULL) {
ret = true;
goto label_return;
}
- ctl_arena_clear(ctl_darena);
+ ctl_sarena->initialized = true;
+
+ if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
+ false, true)) == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ ctl_arena_clear(ctl_darena);
/*
- * Don't toggle ctl_darena to initialized until an arena is
- * actually destroyed, so that arena.<i>.initialized can be used
- * to query whether the stats are relevant.
+ * Don't toggle ctl_darena to initialized until an arena is
+ * actually destroyed, so that arena.<i>.initialized can be used
+ * to query whether the stats are relevant.
*/
-
- ctl_arenas->narenas = narenas_total_get();
- for (i = 0; i < ctl_arenas->narenas; i++) {
- if (arenas_i_impl(tsd, i, false, true) == NULL) {
- ret = true;
- goto label_return;
+
+ ctl_arenas->narenas = narenas_total_get();
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ if (arenas_i_impl(tsd, i, false, true) == NULL) {
+ ret = true;
+ goto label_return;
}
}
- ql_new(&ctl_arenas->destroyed);
- ctl_refresh(tsdn);
-
+ ql_new(&ctl_arenas->destroyed);
+ ctl_refresh(tsdn);
+
ctl_initialized = true;
}
ret = false;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
- return ret;
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
}
static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp) {
+ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -1220,10 +1220,10 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL) {
+ if (nodesp != NULL) {
nodesp[i] =
(const ctl_node_t *)node;
- }
+ }
mibp[i] = j;
break;
}
@@ -1244,15 +1244,15 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
inode = ctl_indexed_node(node->children);
- node = inode->index(tsdn, mibp, *depthp, (size_t)index);
+ node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
- if (nodesp != NULL) {
+ if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node;
- }
+ }
mibp[i] = (size_t)index;
}
@@ -1285,33 +1285,33 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = 0;
label_return:
- return ret;
+ return ret;
}
int
-ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen) {
+ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen) {
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (!ctl_initialized && ctl_init(tsd)) {
+ if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
- if (ret != 0) {
+ ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ if (ret != 0) {
goto label_return;
- }
+ }
node = ctl_named_node(nodes[depth-1]);
- if (node != NULL && node->ctl) {
- ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
- } else {
+ if (node != NULL && node->ctl) {
+ ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ } else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
}
@@ -1321,27 +1321,27 @@ label_return:
}
int
-ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
+ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
int ret;
- if (!ctl_initialized && ctl_init(tsd)) {
+ if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
- ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
+ ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
size_t i;
- if (!ctl_initialized && ctl_init(tsd)) {
+ if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
@@ -1363,7 +1363,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -1372,9 +1372,9 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Call the ctl function. */
- if (node && node->ctl) {
- ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- } else {
+ if (node && node->ctl) {
+ ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ } else {
/* Partial MIB. */
ret = ENOENT;
}
@@ -1384,58 +1384,58 @@ label_return:
}
bool
-ctl_boot(void) {
- if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
+ctl_boot(void) {
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
ctl_initialized = false;
- return false;
+ return false;
}
void
-ctl_prefork(tsdn_t *tsdn) {
- malloc_mutex_prefork(tsdn, &ctl_mtx);
+ctl_prefork(tsdn_t *tsdn) {
+ malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
-ctl_postfork_parent(tsdn_t *tsdn) {
- malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
+ctl_postfork_parent(tsdn_t *tsdn) {
+ malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
-ctl_postfork_child(tsdn_t *tsdn) {
- malloc_mutex_postfork_child(tsdn, &ctl_mtx);
+ctl_postfork_child(tsdn_t *tsdn) {
+ malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
/******************************************************************************/
/* *_ctl() functions. */
-#define READONLY() do { \
+#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
-#define WRITEONLY() do { \
+#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
-#define READ_XOR_WRITE() do { \
- if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
- newlen != 0)) { \
- ret = EPERM; \
- goto label_return; \
- } \
-} while (0)
-
-#define READ(v, t) do { \
+#define READ_XOR_WRITE() do { \
+ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
+ newlen != 0)) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
@@ -1443,12 +1443,12 @@ ctl_postfork_child(tsdn_t *tsdn) {
memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
- } \
- *(t *)oldp = (v); \
+ } \
+ *(t *)oldp = (v); \
} \
} while (0)
-#define WRITE(v, t) do { \
+#define WRITE(v, t) do { \
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
@@ -1458,109 +1458,109 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
-#define MIB_UNSIGNED(v, i) do { \
- if (mib[i] > UINT_MAX) { \
- ret = EFAULT; \
- goto label_return; \
- } \
- v = (unsigned)mib[i]; \
-} while (0)
-
+#define MIB_UNSIGNED(v, i) do { \
+ if (mib[i] > UINT_MAX) { \
+ ret = EFAULT; \
+ goto label_return; \
+ } \
+ v = (unsigned)mib[i]; \
+} while (0)
+
/*
* There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded.
*/
-#define CTL_RO_CLGEN(c, l, n, v, t) \
+#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) { \
- return ENOENT; \
- } \
- if (l) { \
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
- } \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
+ if (l) { \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- if (l) { \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
- } \
- return ret; \
+ if (l) { \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
+ return ret; \
}
-#define CTL_RO_CGEN(c, n, v, t) \
+#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) { \
- return ENOENT; \
- } \
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
- return ret; \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return ret; \
}
-#define CTL_RO_GEN(n, v, t) \
+#define CTL_RO_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
- return ret; \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return ret; \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
-#define CTL_RO_NL_CGEN(c, n, v, t) \
+#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) { \
- return ENOENT; \
- } \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- return ret; \
+ return ret; \
}
-#define CTL_RO_NL_GEN(n, v, t) \
+#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1570,42 +1570,42 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
\
ret = 0; \
label_return: \
- return ret; \
-}
-
-#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
-static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) { \
- int ret; \
- t oldval; \
- \
- if (!(c)) { \
- return ENOENT; \
- } \
- READONLY(); \
- oldval = (m(tsd)); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- return ret; \
+ return ret; \
}
-#define CTL_RO_CONFIG_GEN(n, t) \
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
- t oldval; \
+ t oldval; \
\
+ if (!(c)) { \
+ return ENOENT; \
+ } \
READONLY(); \
+ oldval = (m(tsd)); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return ret; \
+}
+
+#define CTL_RO_CONFIG_GEN(n, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+ int ret; \
+ t oldval; \
+ \
+ READONLY(); \
oldval = n; \
- READ(oldval, t); \
+ READ(oldval, t); \
\
ret = 0; \
label_return: \
- return ret; \
+ return ret; \
}
/******************************************************************************/
@@ -1613,183 +1613,183 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
-epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
UNUSED uint64_t newval;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
- if (newp != NULL) {
- ctl_refresh(tsd_tsdn(tsd));
- }
- READ(ctl_arenas->epoch, uint64_t);
+ if (newp != NULL) {
+ ctl_refresh(tsd_tsdn(tsd));
+ }
+ READ(ctl_arenas->epoch, uint64_t);
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
-}
-
-static int
-background_thread_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen) {
- int ret;
- bool oldval;
-
- if (!have_background_thread) {
- return ENOENT;
- }
- background_thread_ctl_init(tsd_tsdn(tsd));
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
- if (newp == NULL) {
- oldval = background_thread_enabled();
- READ(oldval, bool);
- } else {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- oldval = background_thread_enabled();
- READ(oldval, bool);
-
- bool newval = *(bool *)newp;
- if (newval == oldval) {
- ret = 0;
- goto label_return;
- }
-
- background_thread_enabled_set(tsd_tsdn(tsd), newval);
- if (newval) {
- if (background_threads_enable(tsd)) {
- ret = EFAULT;
- goto label_return;
- }
- } else {
- if (background_threads_disable(tsd)) {
- ret = EFAULT;
- goto label_return;
- }
- }
- }
- ret = 0;
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-
- return ret;
-}
-
-static int
-max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
- int ret;
- size_t oldval;
-
- if (!have_background_thread) {
- return ENOENT;
- }
- background_thread_ctl_init(tsd_tsdn(tsd));
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
- if (newp == NULL) {
- oldval = max_background_threads;
- READ(oldval, size_t);
- } else {
- if (newlen != sizeof(size_t)) {
- ret = EINVAL;
- goto label_return;
- }
- oldval = max_background_threads;
- READ(oldval, size_t);
-
- size_t newval = *(size_t *)newp;
- if (newval == oldval) {
- ret = 0;
- goto label_return;
- }
- if (newval > opt_max_background_threads) {
- ret = EINVAL;
- goto label_return;
- }
-
- if (background_thread_enabled()) {
- background_thread_enabled_set(tsd_tsdn(tsd), false);
- if (background_threads_disable(tsd)) {
- ret = EFAULT;
- goto label_return;
- }
- max_background_threads = newval;
- background_thread_enabled_set(tsd_tsdn(tsd), true);
- if (background_threads_enable(tsd)) {
- ret = EFAULT;
- goto label_return;
- }
- } else {
- max_background_threads = newval;
- }
- }
- ret = 0;
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-
- return ret;
-}
-
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
+background_thread_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen) {
+ int ret;
+ bool oldval;
+
+ if (!have_background_thread) {
+ return ENOENT;
+ }
+ background_thread_ctl_init(tsd_tsdn(tsd));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (newp == NULL) {
+ oldval = background_thread_enabled();
+ READ(oldval, bool);
+ } else {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = background_thread_enabled();
+ READ(oldval, bool);
+
+ bool newval = *(bool *)newp;
+ if (newval == oldval) {
+ ret = 0;
+ goto label_return;
+ }
+
+ background_thread_enabled_set(tsd_tsdn(tsd), newval);
+ if (newval) {
+ if (background_threads_enable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ } else {
+ if (background_threads_disable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
+ return ret;
+}
+
+static int
+max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ size_t oldval;
+
+ if (!have_background_thread) {
+ return ENOENT;
+ }
+ background_thread_ctl_init(tsd_tsdn(tsd));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (newp == NULL) {
+ oldval = max_background_threads;
+ READ(oldval, size_t);
+ } else {
+ if (newlen != sizeof(size_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = max_background_threads;
+ READ(oldval, size_t);
+
+ size_t newval = *(size_t *)newp;
+ if (newval == oldval) {
+ ret = 0;
+ goto label_return;
+ }
+ if (newval > opt_max_background_threads) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if (background_thread_enabled()) {
+ background_thread_enabled_set(tsd_tsdn(tsd), false);
+ if (background_threads_disable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ max_background_threads = newval;
+ background_thread_enabled_set(tsd_tsdn(tsd), true);
+ if (background_threads_enable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ } else {
+ max_background_threads = newval;
+ }
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
+ return ret;
+}
+
/******************************************************************************/
-CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
-CTL_RO_CONFIG_GEN(config_debug, bool)
-CTL_RO_CONFIG_GEN(config_fill, bool)
-CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
-CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
-CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
-CTL_RO_CONFIG_GEN(config_prof, bool)
-CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
-CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
-CTL_RO_CONFIG_GEN(config_stats, bool)
-CTL_RO_CONFIG_GEN(config_utrace, bool)
-CTL_RO_CONFIG_GEN(config_xmalloc, bool)
+CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
+CTL_RO_CONFIG_GEN(config_debug, bool)
+CTL_RO_CONFIG_GEN(config_fill, bool)
+CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
+CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
+CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
+CTL_RO_CONFIG_GEN(config_prof, bool)
+CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
+CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
+CTL_RO_CONFIG_GEN(config_stats, bool)
+CTL_RO_CONFIG_GEN(config_utrace, bool)
+CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
-CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
-CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
- const char *)
-CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
+CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
+CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
+CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
+ const char *)
+CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
-CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
- const char *)
-CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
-CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
-CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
-CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
-CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
+CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
+ const char *)
+CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
+CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
+CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
+CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
+CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
+CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
-CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
-CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
- size_t)
-CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
+CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
+ size_t)
+CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
- opt_prof_thread_active_init, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
+ opt_prof_thread_active_init, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
@@ -1800,1636 +1800,1636 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
-thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- arena_t *oldarena;
+ arena_t *oldarena;
unsigned newind, oldind;
- oldarena = arena_choose(tsd, NULL);
- if (oldarena == NULL) {
- return EAGAIN;
- }
- newind = oldind = arena_ind_get(oldarena);
+ oldarena = arena_choose(tsd, NULL);
+ if (oldarena == NULL) {
+ return EAGAIN;
+ }
+ newind = oldind = arena_ind_get(oldarena);
WRITE(newind, unsigned);
READ(oldind, unsigned);
-
+
if (newind != oldind) {
- arena_t *newarena;
+ arena_t *newarena;
- if (newind >= narenas_total_get()) {
+ if (newind >= narenas_total_get()) {
/* New arena index is out of range. */
ret = EFAULT;
goto label_return;
}
- if (have_percpu_arena &&
- PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
- if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
- /*
- * If perCPU arena is enabled, thread_arena
- * control is not allowed for the auto arena
- * range.
- */
- ret = EPERM;
- goto label_return;
- }
- }
-
+ if (have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
+ if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
+ /*
+ * If perCPU arena is enabled, thread_arena
+ * control is not allowed for the auto arena
+ * range.
+ */
+ ret = EPERM;
+ goto label_return;
+ }
+ }
+
/* Initialize arena if necessary. */
- newarena = arena_get(tsd_tsdn(tsd), newind, true);
- if (newarena == NULL) {
+ newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
}
- /* Set new arena/tcache associations. */
- arena_migrate(tsd, oldind, newind);
- if (tcache_available(tsd)) {
- tcache_arena_reassociate(tsd_tsdn(tsd),
- tsd_tcachep_get(tsd), newarena);
- }
- }
-
- ret = 0;
-label_return:
- return ret;
-}
-
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
- uint64_t *)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
- tsd_thread_deallocatedp_get, uint64_t *)
-
-static int
-thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
- int ret;
- bool oldval;
-
- oldval = tcache_enabled_get(tsd);
- if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- tcache_enabled_set(tsd, *(bool *)newp);
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
- int ret;
-
- if (!tcache_available(tsd)) {
- ret = EFAULT;
- goto label_return;
- }
-
- READONLY();
- WRITEONLY();
-
- tcache_flush(tsd);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
- int ret;
-
- if (!config_prof) {
- return ENOENT;
- }
-
- READ_XOR_WRITE();
-
- if (newp != NULL) {
- if (newlen != sizeof(const char *)) {
- ret = EINVAL;
- goto label_return;
- }
-
- if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
- 0) {
- goto label_return;
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
+ if (tcache_available(tsd)) {
+ tcache_arena_reassociate(tsd_tsdn(tsd),
+ tsd_tcachep_get(tsd), newarena);
+ }
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
+ uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ tsd_thread_deallocatedp_get, uint64_t *)
+
+static int
+thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ bool oldval;
+
+ oldval = tcache_enabled_get(tsd);
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
}
- } else {
- const char *oldname = prof_thread_name_get(tsd);
- READ(oldname, const char *);
+ tcache_enabled_set(tsd, *(bool *)newp);
}
+ READ(oldval, bool);
ret = 0;
label_return:
- return ret;
-}
-
+ return ret;
+}
+
+static int
+thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+
+ if (!tcache_available(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush(tsd);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
static int
-thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
+thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
int ret;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ READ_XOR_WRITE();
+
+ if (newp != NULL) {
+ if (newlen != sizeof(const char *)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
+ 0) {
+ goto label_return;
+ }
+ } else {
+ const char *oldname = prof_thread_name_get(tsd);
+ READ(oldname, const char *);
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
bool oldval;
- if (!config_prof) {
- return ENOENT;
- }
+ if (!config_prof) {
+ return ENOENT;
+ }
- oldval = prof_thread_active_get(tsd);
+ oldval = prof_thread_active_get(tsd);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- if (prof_thread_active_set(tsd, *(bool *)newp)) {
- ret = EAGAIN;
- goto label_return;
- }
+ if (prof_thread_active_set(tsd, *(bool *)newp)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
}
READ(oldval, bool);
ret = 0;
label_return:
- return ret;
+ return ret;
}
-/******************************************************************************/
-
+/******************************************************************************/
+
static int
-tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- unsigned tcache_ind;
-
- READONLY();
- if (tcaches_create(tsd, &tcache_ind)) {
- ret = EFAULT;
- goto label_return;
- }
- READ(tcache_ind, unsigned);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- unsigned tcache_ind;
-
- WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
- tcaches_flush(tsd, tcache_ind);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- unsigned tcache_ind;
-
+ unsigned tcache_ind;
+
+ READONLY();
+ if (tcaches_create(tsd, &tcache_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ READ(tcache_ind, unsigned);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned tcache_ind;
+
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
- tcaches_destroy(tsd, tcache_ind);
-
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_flush(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned tcache_ind;
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_destroy(tsd, tcache_ind);
+
ret = 0;
label_return:
- return ret;
+ return ret;
}
/******************************************************************************/
-static int
-arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- tsdn_t *tsdn = tsd_tsdn(tsd);
- unsigned arena_ind;
- bool initialized;
-
- READONLY();
- MIB_UNSIGNED(arena_ind, 1);
-
- malloc_mutex_lock(tsdn, &ctl_mtx);
- initialized = arenas_i(arena_ind)->initialized;
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- READ(initialized, bool);
-
- ret = 0;
-label_return:
- return ret;
-}
-
+static int
+arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ unsigned arena_ind;
+ bool initialized;
+
+ READONLY();
+ MIB_UNSIGNED(arena_ind, 1);
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ initialized = arenas_i(arena_ind)->initialized;
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ READ(initialized, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
static void
-arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
- malloc_mutex_lock(tsdn, &ctl_mtx);
- {
- unsigned narenas = ctl_arenas->narenas;
-
- /*
- * Access via index narenas is deprecated, and scheduled for
- * removal in 6.0.0.
- */
- if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
- unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
-
- for (i = 0; i < narenas; i++) {
- tarenas[i] = arena_get(tsdn, i, false);
- }
-
- /*
- * No further need to hold ctl_mtx, since narenas and
- * tarenas contain everything needed below.
- */
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL) {
- arena_decay(tsdn, tarenas[i], false,
- all);
- }
- }
- } else {
- arena_t *tarena;
-
- assert(arena_ind < narenas);
-
- tarena = arena_get(tsdn, arena_ind, false);
-
- /* No further need to hold ctl_mtx. */
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- if (tarena != NULL) {
- arena_decay(tsdn, tarena, false, all);
- }
+arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ {
+ unsigned narenas = ctl_arenas->narenas;
+
+ /*
+ * Access via index narenas is deprecated, and scheduled for
+ * removal in 6.0.0.
+ */
+ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
+ unsigned i;
+ VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+
+ for (i = 0; i < narenas; i++) {
+ tarenas[i] = arena_get(tsdn, i, false);
+ }
+
+ /*
+ * No further need to hold ctl_mtx, since narenas and
+ * tarenas contain everything needed below.
+ */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ for (i = 0; i < narenas; i++) {
+ if (tarenas[i] != NULL) {
+ arena_decay(tsdn, tarenas[i], false,
+ all);
+ }
+ }
+ } else {
+ arena_t *tarena;
+
+ assert(arena_ind < narenas);
+
+ tarena = arena_get(tsdn, arena_ind, false);
+
+ /* No further need to hold ctl_mtx. */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ if (tarena != NULL) {
+ arena_decay(tsdn, tarena, false, all);
+ }
}
}
}
static int
-arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- unsigned arena_ind;
-
- READONLY();
- WRITEONLY();
- MIB_UNSIGNED(arena_ind, 1);
- arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
+arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- unsigned arena_ind;
+ unsigned arena_ind;
READONLY();
WRITEONLY();
- MIB_UNSIGNED(arena_ind, 1);
- arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
+ MIB_UNSIGNED(arena_ind, 1);
+ arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
ret = 0;
label_return:
- return ret;
+ return ret;
}
static int
-arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
- arena_t **arena) {
- int ret;
-
- READONLY();
- WRITEONLY();
- MIB_UNSIGNED(*arena_ind, 1);
-
- *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
- if (*arena == NULL || arena_is_auto(*arena)) {
- ret = EFAULT;
- goto label_return;
- }
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static void
-arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
- /* Temporarily disable the background thread during arena reset. */
- if (have_background_thread) {
- malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
- if (background_thread_enabled()) {
- background_thread_info_t *info =
- background_thread_info_get(arena_ind);
- assert(info->state == background_thread_started);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- info->state = background_thread_paused;
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- }
- }
-}
-
-static void
-arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
- if (have_background_thread) {
- if (background_thread_enabled()) {
- background_thread_info_t *info =
- background_thread_info_get(arena_ind);
- assert(info->state == background_thread_paused);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- info->state = background_thread_started;
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
- }
-}
-
-static int
-arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- unsigned arena_ind;
- arena_t *arena;
-
- ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
- newp, newlen, &arena_ind, &arena);
- if (ret != 0) {
- return ret;
- }
-
- arena_reset_prepare_background_thread(tsd, arena_ind);
- arena_reset(tsd, arena);
- arena_reset_finish_background_thread(tsd, arena_ind);
-
- return ret;
-}
-
-static int
-arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- unsigned arena_ind;
- arena_t *arena;
- ctl_arena_t *ctl_darena, *ctl_arena;
-
- ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
- newp, newlen, &arena_ind, &arena);
- if (ret != 0) {
- goto label_return;
- }
-
- if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
- true) != 0) {
- ret = EFAULT;
- goto label_return;
- }
-
- arena_reset_prepare_background_thread(tsd, arena_ind);
- /* Merge stats after resetting and purging arena. */
- arena_reset(tsd, arena);
- arena_decay(tsd_tsdn(tsd), arena, false, true);
- ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
- ctl_darena->initialized = true;
- ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
- /* Destroy arena. */
- arena_destroy(tsd, arena);
- ctl_arena = arenas_i(arena_ind);
- ctl_arena->initialized = false;
- /* Record arena index for later recycling via arenas.create. */
- ql_elm_new(ctl_arena, destroyed_link);
- ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
- arena_reset_finish_background_thread(tsd, arena_ind);
-
- assert(ret == 0);
-label_return:
- return ret;
-}
-
-static int
-arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- const char *dss = NULL;
- unsigned arena_ind;
+arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+
+ READONLY();
+ WRITEONLY();
+ MIB_UNSIGNED(arena_ind, 1);
+ arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
+ arena_t **arena) {
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ MIB_UNSIGNED(*arena_ind, 1);
+
+ *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
+ if (*arena == NULL || arena_is_auto(*arena)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static void
+arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
+ /* Temporarily disable the background thread during arena reset. */
+ if (have_background_thread) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (background_thread_enabled()) {
+ background_thread_info_t *info =
+ background_thread_info_get(arena_ind);
+ assert(info->state == background_thread_started);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_paused;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ }
+}
+
+static void
+arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
+ if (have_background_thread) {
+ if (background_thread_enabled()) {
+ background_thread_info_t *info =
+ background_thread_info_get(arena_ind);
+ assert(info->state == background_thread_paused);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_started;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ }
+}
+
+static int
+arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
+ newp, newlen, &arena_ind, &arena);
+ if (ret != 0) {
+ return ret;
+ }
+
+ arena_reset_prepare_background_thread(tsd, arena_ind);
+ arena_reset(tsd, arena);
+ arena_reset_finish_background_thread(tsd, arena_ind);
+
+ return ret;
+}
+
+static int
+arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+ ctl_arena_t *ctl_darena, *ctl_arena;
+
+ ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
+ newp, newlen, &arena_ind, &arena);
+ if (ret != 0) {
+ goto label_return;
+ }
+
+ if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
+ true) != 0) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ arena_reset_prepare_background_thread(tsd, arena_ind);
+ /* Merge stats after resetting and purging arena. */
+ arena_reset(tsd, arena);
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
+ ctl_darena->initialized = true;
+ ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
+ /* Destroy arena. */
+ arena_destroy(tsd, arena);
+ ctl_arena = arenas_i(arena_ind);
+ ctl_arena->initialized = false;
+ /* Record arena index for later recycling via arenas.create. */
+ ql_elm_new(ctl_arena, destroyed_link);
+ ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
+ arena_reset_finish_background_thread(tsd, arena_ind);
+
+ assert(ret == 0);
+label_return:
+ return ret;
+}
+
+static int
+arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const char *dss = NULL;
+ unsigned arena_ind;
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *);
- MIB_UNSIGNED(arena_ind, 1);
- if (dss != NULL) {
- int i;
- bool match = false;
-
- for (i = 0; i < dss_prec_limit; i++) {
- if (strcmp(dss_prec_names[i], dss) == 0) {
- dss_prec = i;
- match = true;
- break;
- }
+ MIB_UNSIGNED(arena_ind, 1);
+ if (dss != NULL) {
+ int i;
+ bool match = false;
+
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
}
-
- if (!match) {
- ret = EINVAL;
- goto label_return;
- }
- }
-
- /*
- * Access via index narenas is deprecated, and scheduled for removal in
- * 6.0.0.
- */
- if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
- ctl_arenas->narenas) {
- if (dss_prec != dss_prec_limit &&
- extent_dss_prec_set(dss_prec)) {
- ret = EFAULT;
- goto label_return;
- }
- dss_prec_old = extent_dss_prec_get();
+
+ if (!match) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ }
+
+ /*
+ * Access via index narenas is deprecated, and scheduled for removal in
+ * 6.0.0.
+ */
+ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
+ ctl_arenas->narenas) {
+ if (dss_prec != dss_prec_limit &&
+ extent_dss_prec_set(dss_prec)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = extent_dss_prec_get();
} else {
- arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL || (dss_prec != dss_prec_limit &&
- arena_dss_prec_set(arena, dss_prec))) {
- ret = EFAULT;
- goto label_return;
- }
- dss_prec_old = arena_dss_prec_get(arena);
- }
-
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(arena, dss_prec))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = arena_dss_prec_get(arena);
+ }
+
dss = dss_prec_names[dss_prec_old];
READ(dss, const char *);
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
-}
-
-static int
-arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
- int ret;
- unsigned arena_ind;
- arena_t *arena;
-
- MIB_UNSIGNED(arena_ind, 1);
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL) {
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
+arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ MIB_UNSIGNED(arena_ind, 1);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
- arena_muzzy_decay_ms_get(arena);
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
- /*
- * By default the huge arena purges eagerly. If it is
- * set to non-zero decay time afterwards, background
- * thread might be needed.
- */
- if (background_thread_create(tsd, arena_ind)) {
- ret = EFAULT;
- goto label_return;
- }
- }
- if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
- arena, *(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
-
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
+ arena_muzzy_decay_ms_get(arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
+ /*
+ * By default the huge arena purges eagerly. If it is
+ * set to non-zero decay time afterwards, background
+ * thread might be needed.
+ */
+ if (background_thread_create(tsd, arena_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+ if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
+ arena, *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
ret = 0;
label_return:
- return ret;
-}
-
-static int
-arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
- newlen, true);
-}
-
-static int
-arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
- newlen, false);
-}
-
-static int
-arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- unsigned arena_ind;
- arena_t *arena;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- MIB_UNSIGNED(arena_ind, 1);
- if (arena_ind < narenas_total_get()) {
- extent_hooks_t *old_extent_hooks;
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL) {
- if (arena_ind >= narenas_auto) {
- ret = EFAULT;
- goto label_return;
- }
- old_extent_hooks =
- (extent_hooks_t *)&extent_hooks_default;
- READ(old_extent_hooks, extent_hooks_t *);
- if (newp != NULL) {
- /* Initialize a new arena as a side effect. */
- extent_hooks_t *new_extent_hooks
- JEMALLOC_CC_SILENCE_INIT(NULL);
- WRITE(new_extent_hooks, extent_hooks_t *);
- arena = arena_init(tsd_tsdn(tsd), arena_ind,
- new_extent_hooks);
- if (arena == NULL) {
- ret = EFAULT;
- goto label_return;
- }
- }
- } else {
- if (newp != NULL) {
- extent_hooks_t *new_extent_hooks
- JEMALLOC_CC_SILENCE_INIT(NULL);
- WRITE(new_extent_hooks, extent_hooks_t *);
- old_extent_hooks = extent_hooks_set(tsd, arena,
- new_extent_hooks);
- READ(old_extent_hooks, extent_hooks_t *);
- } else {
- old_extent_hooks = extent_hooks_get(arena);
- READ(old_extent_hooks, extent_hooks_t *);
- }
- }
- } else {
- ret = EFAULT;
+ return ret;
+}
+
+static int
+arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, true);
+}
+
+static int
+arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, false);
+}
+
+static int
+arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ MIB_UNSIGNED(arena_ind, 1);
+ if (arena_ind < narenas_total_get()) {
+ extent_hooks_t *old_extent_hooks;
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ if (arena_ind >= narenas_auto) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ old_extent_hooks =
+ (extent_hooks_t *)&extent_hooks_default;
+ READ(old_extent_hooks, extent_hooks_t *);
+ if (newp != NULL) {
+ /* Initialize a new arena as a side effect. */
+ extent_hooks_t *new_extent_hooks
+ JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_extent_hooks, extent_hooks_t *);
+ arena = arena_init(tsd_tsdn(tsd), arena_ind,
+ new_extent_hooks);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+ } else {
+ if (newp != NULL) {
+ extent_hooks_t *new_extent_hooks
+ JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_extent_hooks, extent_hooks_t *);
+ old_extent_hooks = extent_hooks_set(tsd, arena,
+ new_extent_hooks);
+ READ(old_extent_hooks, extent_hooks_t *);
+ } else {
+ old_extent_hooks = extent_hooks_get(arena);
+ READ(old_extent_hooks, extent_hooks_t *);
+ }
+ }
+ } else {
+ ret = EFAULT;
goto label_return;
}
- ret = 0;
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
-}
-
-static int
-arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
- int ret;
- unsigned arena_ind;
- arena_t *arena;
-
- if (!opt_retain) {
- /* Only relevant when retain is enabled. */
- return ENOENT;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- MIB_UNSIGNED(arena_ind, 1);
- if (arena_ind < narenas_total_get() && (arena =
- arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
- size_t old_limit, new_limit;
- if (newp != NULL) {
- WRITE(new_limit, size_t);
- }
- bool err = arena_retain_grow_limit_get_set(tsd, arena,
- &old_limit, newp != NULL ? &new_limit : NULL);
- if (!err) {
- READ(old_limit, size_t);
- ret = 0;
- } else {
- ret = EFAULT;
- }
- } else {
- ret = EFAULT;
- }
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
-}
-
-static const ctl_named_node_t *
-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t i) {
- const ctl_named_node_t *ret;
-
- malloc_mutex_lock(tsdn, &ctl_mtx);
- switch (i) {
- case MALLCTL_ARENAS_ALL:
- case MALLCTL_ARENAS_DESTROYED:
- break;
- default:
- if (i > ctl_arenas->narenas) {
- ret = NULL;
- goto label_return;
- }
- break;
- }
-
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
+arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ if (!opt_retain) {
+ /* Only relevant when retain is enabled. */
+ return ENOENT;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ MIB_UNSIGNED(arena_ind, 1);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+ size_t old_limit, new_limit;
+ if (newp != NULL) {
+ WRITE(new_limit, size_t);
+ }
+ bool err = arena_retain_grow_limit_get_set(tsd, arena,
+ &old_limit, newp != NULL ? &new_limit : NULL);
+ if (!err) {
+ READ(old_limit, size_t);
+ ret = 0;
+ } else {
+ ret = EFAULT;
+ }
+ } else {
+ ret = EFAULT;
+ }
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static const ctl_named_node_t *
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ const ctl_named_node_t *ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ switch (i) {
+ case MALLCTL_ARENAS_ALL:
+ case MALLCTL_ARENAS_DESTROYED:
+ break;
+ default:
+ if (i > ctl_arenas->narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+ break;
+ }
+
ret = super_arena_i_node;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
- return ret;
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
}
/******************************************************************************/
static int
-arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned narenas;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
- narenas = ctl_arenas->narenas;
+ narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
}
static int
-arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen, bool dirty) {
+arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen, bool dirty) {
int ret;
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
- arena_muzzy_decay_ms_default_get());
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
- : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
-
- ret = 0;
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
+ arena_muzzy_decay_ms_default_get());
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
+ : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
label_return:
- return ret;
-}
-
-static int
-arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
- newlen, true);
-}
-
-static int
-arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
- newlen, false);
-}
-
+ return ret;
+}
+
+static int
+arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, true);
+}
+
+static int
+arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, false);
+}
+
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
-CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
-CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
-CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
-CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
+CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
+CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
static const ctl_named_node_t *
-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t i) {
- if (i > SC_NBINS) {
- return NULL;
- }
- return super_arenas_bin_i_node;
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t i) {
+ if (i > SC_NBINS) {
+ return NULL;
+ }
+ return super_arenas_bin_i_node;
}
-CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
-CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
- size_t)
+CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
+CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
+ size_t)
static const ctl_named_node_t *
-arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t i) {
- if (i > SC_NSIZES - SC_NBINS) {
- return NULL;
- }
- return super_arenas_lextent_i_node;
+arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t i) {
+ if (i > SC_NSIZES - SC_NBINS) {
+ return NULL;
+ }
+ return super_arenas_lextent_i_node;
}
static int
-arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- extent_hooks_t *extent_hooks;
+ extent_hooks_t *extent_hooks;
unsigned arena_ind;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-
- extent_hooks = (extent_hooks_t *)&extent_hooks_default;
- WRITE(extent_hooks, extent_hooks_t *);
- if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
- ret = EAGAIN;
- goto label_return;
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
+ extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+ WRITE(extent_hooks, extent_hooks_t *);
+ if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
+ ret = EAGAIN;
+ goto label_return;
}
- READ(arena_ind, unsigned);
+ READ(arena_ind, unsigned);
- ret = 0;
+ ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
}
static int
-arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
+arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
int ret;
- unsigned arena_ind;
- void *ptr;
- extent_t *extent;
- arena_t *arena;
-
- ptr = NULL;
- ret = EINVAL;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- WRITE(ptr, void *);
- extent = iealloc(tsd_tsdn(tsd), ptr);
- if (extent == NULL)
+ unsigned arena_ind;
+ void *ptr;
+ extent_t *extent;
+ arena_t *arena;
+
+ ptr = NULL;
+ ret = EINVAL;
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITE(ptr, void *);
+ extent = iealloc(tsd_tsdn(tsd), ptr);
+ if (extent == NULL)
goto label_return;
- arena = extent_arena_get(extent);
- if (arena == NULL)
- goto label_return;
-
- arena_ind = arena_ind_get(arena);
- READ(arena_ind, unsigned);
-
+ arena = extent_arena_get(extent);
+ if (arena == NULL)
+ goto label_return;
+
+ arena_ind = arena_ind_get(arena);
+ READ(arena_ind, unsigned);
+
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
}
/******************************************************************************/
static int
-prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
+prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
int ret;
bool oldval;
- if (!config_prof) {
- return ENOENT;
- }
-
- if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
- *(bool *)newp);
- } else {
- oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- bool oldval;
-
- if (!config_prof) {
- return ENOENT;
- }
+ if (!config_prof) {
+ return ENOENT;
+ }
if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
- } else {
- oldval = prof_active_get(tsd_tsdn(tsd));
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
+ *(bool *)newp);
+ } else {
+ oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
}
READ(oldval, bool);
ret = 0;
label_return:
- return ret;
+ return ret;
}
static int
-prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ bool oldval;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else {
+ oldval = prof_active_get(tsd_tsdn(tsd));
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
const char *filename = NULL;
- if (!config_prof) {
- return ENOENT;
- }
+ if (!config_prof) {
+ return ENOENT;
+ }
WRITEONLY();
WRITE(filename, const char *);
- if (prof_mdump(tsd, filename)) {
+ if (prof_mdump(tsd, filename)) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
- return ret;
-}
-
-static int
-prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- bool oldval;
-
- if (!config_prof) {
- return ENOENT;
- }
-
- if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
- } else {
- oldval = prof_gdump_get(tsd_tsdn(tsd));
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- size_t lg_sample = lg_prof_sample;
-
- if (!config_prof) {
- return ENOENT;
- }
-
- WRITEONLY();
- WRITE(lg_sample, size_t);
- if (lg_sample >= (sizeof(uint64_t) << 3)) {
- lg_sample = (sizeof(uint64_t) << 3) - 1;
- }
-
- prof_reset(tsd, lg_sample);
-
- ret = 0;
-label_return:
- return ret;
-}
-
+ return ret;
+}
+
+static int
+prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ bool oldval;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else {
+ oldval = prof_gdump_get(tsd_tsdn(tsd));
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ size_t lg_sample = lg_prof_sample;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ WRITEONLY();
+ WRITE(lg_sample, size_t);
+ if (lg_sample >= (sizeof(uint64_t) << 3)) {
+ lg_sample = (sizeof(uint64_t) << 3) - 1;
+ }
+
+ prof_reset(tsd, lg_sample);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
-CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
-
-static int
-prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
-
- const char *filename = NULL;
-
- if (!config_prof) {
- return ENOENT;
- }
-
- WRITEONLY();
- WRITE(filename, const char *);
-
- if (prof_log_start(tsd_tsdn(tsd), filename)) {
- ret = EFAULT;
- goto label_return;
- }
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
- if (!config_prof) {
- return ENOENT;
- }
-
- if (prof_log_stop(tsd_tsdn(tsd))) {
- return EFAULT;
- }
-
- return 0;
-}
-
+CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
+
+static int
+prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ const char *filename = NULL;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ WRITEONLY();
+ WRITE(filename, const char *);
+
+ if (prof_log_start(tsd_tsdn(tsd), filename)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ if (prof_log_stop(tsd_tsdn(tsd))) {
+ return EFAULT;
+ }
+
+ return 0;
+}
+
/******************************************************************************/
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
-CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
-CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
-CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
-
-CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
- ctl_stats->background_thread.num_threads, size_t)
-CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
- ctl_stats->background_thread.num_runs, uint64_t)
-CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
- nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
-
-CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
-CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
- ssize_t)
-CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
- ssize_t)
-CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_uptime,
- nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
-CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
-CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
+
+CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
+ ctl_stats->background_thread.num_threads, size_t)
+CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
+ ctl_stats->background_thread.num_runs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
+ nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
+CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_uptime,
+ nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
+CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
+CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
- size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
- size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
- ATOMIC_RELAXED),
- size_t)
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_base,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
- size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
- size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
- ATOMIC_RELAXED), size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
- ATOMIC_RELAXED), size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
- size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
- ATOMIC_RELAXED), size_t)
-
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
+ ATOMIC_RELAXED),
+ size_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_base,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
+ ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
+ ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
+ ATOMIC_RELAXED), size_t)
+
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
- arenas_i(mib[2])->astats->allocated_small, size_t)
+ arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
- arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
+ arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
- arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
+ arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
- arenas_i(mib[2])->astats->nrequests_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
- arenas_i(mib[2])->astats->nfills_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
- arenas_i(mib[2])->astats->nflushes_small, uint64_t)
+ arenas_i(mib[2])->astats->nrequests_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
+ arenas_i(mib[2])->astats->nfills_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
+ arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
- ATOMIC_RELAXED), size_t)
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
+ ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
-/*
- * Note: "nmalloc_large" here instead of "nfills" in the read. This is
- * intentional (large has no batch fill).
- */
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
-
-/* Lock profiling related APIs below. */
-#define RO_MUTEX_CTL_GEN(n, l) \
-CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
- l.n_lock_ops, uint64_t) \
-CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
- l.n_wait_times, uint64_t) \
-CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
- l.n_spin_acquired, uint64_t) \
-CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
- l.n_owner_switches, uint64_t) \
-CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
- nstime_ns(&l.tot_wait_time), uint64_t) \
-CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
- nstime_ns(&l.max_wait_time), uint64_t) \
-CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
- l.max_n_thds, uint32_t)
-
-/* Global mutexes. */
-#define OP(mtx) \
- RO_MUTEX_CTL_GEN(mutexes_##mtx, \
- ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
-MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
-
-/* Per arena mutexes */
-#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
- arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
-MUTEX_PROF_ARENA_MUTEXES
-#undef OP
-
-/* tcache bin mutex */
-RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
- arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
-#undef RO_MUTEX_CTL_GEN
-
-/* Resets all mutex stats, including global, arena and bin mutexes. */
-static int
-stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen) {
- if (!config_stats) {
- return ENOENT;
- }
-
- tsdn_t *tsdn = tsd_tsdn(tsd);
-
-#define MUTEX_PROF_RESET(mtx) \
- malloc_mutex_lock(tsdn, &mtx); \
- malloc_mutex_prof_data_reset(tsdn, &mtx); \
- malloc_mutex_unlock(tsdn, &mtx);
-
- /* Global mutexes: ctl and prof. */
- MUTEX_PROF_RESET(ctl_mtx);
- if (have_background_thread) {
- MUTEX_PROF_RESET(background_thread_lock);
- }
- if (config_prof && opt_prof) {
- MUTEX_PROF_RESET(bt2gctx_mtx);
- }
-
-
- /* Per arena mutexes. */
- unsigned n = narenas_total_get();
-
- for (unsigned i = 0; i < n; i++) {
- arena_t *arena = arena_get(tsdn, i, false);
- if (!arena) {
- continue;
- }
- MUTEX_PROF_RESET(arena->large_mtx);
- MUTEX_PROF_RESET(arena->extent_avail_mtx);
- MUTEX_PROF_RESET(arena->extents_dirty.mtx);
- MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
- MUTEX_PROF_RESET(arena->extents_retained.mtx);
- MUTEX_PROF_RESET(arena->decay_dirty.mtx);
- MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
- MUTEX_PROF_RESET(arena->tcache_ql_mtx);
- MUTEX_PROF_RESET(arena->base->mtx);
-
- for (szind_t i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_t *bin = &arena->bins[i].bin_shards[j];
- MUTEX_PROF_RESET(bin->lock);
- }
- }
- }
-#undef MUTEX_PROF_RESET
- return 0;
-}
-
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
+/*
+ * Note: "nmalloc_large" here instead of "nfills" in the read. This is
+ * intentional (large has no batch fill).
+ */
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
+
+/* Lock profiling related APIs below. */
+#define RO_MUTEX_CTL_GEN(n, l) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
+ l.n_lock_ops, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
+ l.n_wait_times, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
+ l.n_spin_acquired, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
+ l.n_owner_switches, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
+ nstime_ns(&l.tot_wait_time), uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
+ nstime_ns(&l.max_wait_time), uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
+ l.max_n_thds, uint32_t)
+
+/* Global mutexes. */
+#define OP(mtx) \
+ RO_MUTEX_CTL_GEN(mutexes_##mtx, \
+ ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+/* Per arena mutexes */
+#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
+ arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+/* tcache bin mutex */
+RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
+ arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
+#undef RO_MUTEX_CTL_GEN
+
+/* Resets all mutex stats, including global, arena and bin mutexes. */
+static int
+stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen) {
+ if (!config_stats) {
+ return ENOENT;
+ }
+
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+
+#define MUTEX_PROF_RESET(mtx) \
+ malloc_mutex_lock(tsdn, &mtx); \
+ malloc_mutex_prof_data_reset(tsdn, &mtx); \
+ malloc_mutex_unlock(tsdn, &mtx);
+
+ /* Global mutexes: ctl and prof. */
+ MUTEX_PROF_RESET(ctl_mtx);
+ if (have_background_thread) {
+ MUTEX_PROF_RESET(background_thread_lock);
+ }
+ if (config_prof && opt_prof) {
+ MUTEX_PROF_RESET(bt2gctx_mtx);
+ }
+
+
+ /* Per arena mutexes. */
+ unsigned n = narenas_total_get();
+
+ for (unsigned i = 0; i < n; i++) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (!arena) {
+ continue;
+ }
+ MUTEX_PROF_RESET(arena->large_mtx);
+ MUTEX_PROF_RESET(arena->extent_avail_mtx);
+ MUTEX_PROF_RESET(arena->extents_dirty.mtx);
+ MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->extents_retained.mtx);
+ MUTEX_PROF_RESET(arena->decay_dirty.mtx);
+ MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->tcache_ql_mtx);
+ MUTEX_PROF_RESET(arena->base->mtx);
+
+ for (szind_t i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ bin_t *bin = &arena->bins[i].bin_shards[j];
+ MUTEX_PROF_RESET(bin->lock);
+ }
+ }
+ }
+#undef MUTEX_PROF_RESET
+ return 0;
+}
+
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
- arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
- arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t j) {
- if (j > SC_NBINS) {
- return NULL;
- }
- return super_stats_arenas_i_bins_j_node;
-}
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
- arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t j) {
- if (j > SC_NSIZES - SC_NBINS) {
- return NULL;
- }
- return super_stats_arenas_i_lextents_j_node;
-}
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
- ATOMIC_RELAXED), size_t);
-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
- ATOMIC_RELAXED), size_t);
-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
- ATOMIC_RELAXED), size_t);
-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
- ATOMIC_RELAXED), size_t);
-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
- ATOMIC_RELAXED), size_t);
-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
- ATOMIC_RELAXED), size_t);
+stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t j) {
+ if (j > SC_NBINS) {
+ return NULL;
+ }
+ return super_stats_arenas_i_bins_j_node;
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
+ ctl_arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
+ arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t j) {
+ if (j > SC_NSIZES - SC_NBINS) {
+ return NULL;
+ }
+ return super_stats_arenas_i_lextents_j_node;
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
+ ATOMIC_RELAXED), size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
+ ATOMIC_RELAXED), size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
+ ATOMIC_RELAXED), size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
+ ATOMIC_RELAXED), size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
+ ATOMIC_RELAXED), size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
+ ATOMIC_RELAXED), size_t);
static const ctl_named_node_t *
-stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t j) {
- if (j >= SC_NPSIZES) {
- return NULL;
- }
- return super_stats_arenas_i_extents_j_node;
-}
-
-static bool
-ctl_arenas_i_verify(size_t i) {
- size_t a = arenas_i2a_impl(i, true, true);
- if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
- return true;
- }
-
- return false;
+stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t j) {
+ if (j >= SC_NPSIZES) {
+ return NULL;
+ }
+ return super_stats_arenas_i_extents_j_node;
+}
+
+static bool
+ctl_arenas_i_verify(size_t i) {
+ size_t a = arenas_i2a_impl(i, true, true);
+ if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
+ return true;
+ }
+
+ return false;
}
static const ctl_named_node_t *
-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t i) {
- const ctl_named_node_t *ret;
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t i) {
+ const ctl_named_node_t *ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
- if (ctl_arenas_i_verify(i)) {
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (ctl_arenas_i_verify(i)) {
ret = NULL;
goto label_return;
}
ret = super_stats_arenas_i_node;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
- return ret;
-}
-
-static int
-experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
- ret = EINVAL;
- goto label_return;
- }
- /*
- * Note: this is a *private* struct. This is an experimental interface;
- * forcing the user to know the jemalloc internals well enough to
- * extract the ABI hopefully ensures nobody gets too comfortable with
- * this API, which can change at a moment's notice.
- */
- hooks_t hooks;
- WRITE(hooks, hooks_t);
- void *handle = hook_install(tsd_tsdn(tsd), &hooks);
- if (handle == NULL) {
- ret = EAGAIN;
- goto label_return;
- }
- READ(handle, void *);
-
- ret = 0;
-label_return:
- return ret;
-}
-
-static int
-experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
- WRITEONLY();
- void *handle = NULL;
- WRITE(handle, void *);
- if (handle == NULL) {
- ret = EINVAL;
- goto label_return;
- }
- hook_remove(tsd_tsdn(tsd), handle);
- ret = 0;
-label_return:
- return ret;
-}
-
-/*
- * Output six memory utilization entries for an input pointer, the first one of
- * type (void *) and the remaining five of type size_t, describing the following
- * (in the same order):
- *
- * (a) memory address of the extent a potential reallocation would go into,
- * == the five fields below describe about the extent the pointer resides in ==
- * (b) number of free regions in the extent,
- * (c) number of regions in the extent,
- * (d) size of the extent in terms of bytes,
- * (e) total number of free regions in the bin the extent belongs to, and
- * (f) total number of regions in the bin the extent belongs to.
- *
- * Note that "(e)" and "(f)" are only available when stats are enabled;
- * otherwise their values are undefined.
- *
- * This API is mainly intended for small class allocations, where extents are
- * used as slab.
- *
- * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
- * will be zero (if stats are enabled; otherwise undefined). The other three
- * fields will be properly set though the values are trivial: "(b)" will be 0,
- * "(c)" will be 1, and "(d)" will be the usable size.
- *
- * The input pointer and size are respectively passed in by newp and newlen,
- * and the output fields and size are respectively oldp and *oldlenp.
- *
- * It can be beneficial to define the following macros to make it easier to
- * access the output:
- *
- * #define SLABCUR_READ(out) (*(void **)out)
- * #define COUNTS(out) ((size_t *)((void **)out + 1))
- * #define NFREE_READ(out) COUNTS(out)[0]
- * #define NREGS_READ(out) COUNTS(out)[1]
- * #define SIZE_READ(out) COUNTS(out)[2]
- * #define BIN_NFREE_READ(out) COUNTS(out)[3]
- * #define BIN_NREGS_READ(out) COUNTS(out)[4]
- *
- * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test
- * test_query in test/unit/extent_util.c for an example.
- *
- * For a typical defragmentation workflow making use of this API for
- * understanding the fragmentation level, please refer to the comment for
- * experimental_utilization_batch_query_ctl.
- *
- * It's up to the application how to determine the significance of
- * fragmentation relying on the outputs returned. Possible choices are:
- *
- * (a) if extent utilization ratio is below certain threshold,
- * (b) if extent memory consumption is above certain threshold,
- * (c) if extent utilization ratio is significantly below bin utilization ratio,
- * (d) if input pointer deviates a lot from potential reallocation address, or
- * (e) some selection/combination of the above.
- *
- * The caller needs to make sure that the input/output arguments are valid,
- * in particular, that the size of the output is correct, i.e.:
- *
- * *oldlenp = sizeof(void *) + sizeof(size_t) * 5
- *
- * Otherwise, the function immediately returns EINVAL without touching anything.
- *
- * In the rare case where there's no associated extent found for the input
- * pointer, the function zeros out all output fields and return. Please refer
- * to the comment for experimental_utilization_batch_query_ctl to understand the
- * motivation from C++.
- */
-static int
-experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
-
- assert(sizeof(extent_util_stats_verbose_t)
- == sizeof(void *) + sizeof(size_t) * 5);
-
- if (oldp == NULL || oldlenp == NULL
- || *oldlenp != sizeof(extent_util_stats_verbose_t)
- || newp == NULL) {
- ret = EINVAL;
- goto label_return;
- }
-
- void *ptr = NULL;
- WRITE(ptr, void *);
- extent_util_stats_verbose_t *util_stats
- = (extent_util_stats_verbose_t *)oldp;
- extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
- &util_stats->nfree, &util_stats->nregs, &util_stats->size,
- &util_stats->bin_nfree, &util_stats->bin_nregs,
- &util_stats->slabcur_addr);
- ret = 0;
-
-label_return:
- return ret;
-}
-
-/*
- * Given an input array of pointers, output three memory utilization entries of
- * type size_t for each input pointer about the extent it resides in:
- *
- * (a) number of free regions in the extent,
- * (b) number of regions in the extent, and
- * (c) size of the extent in terms of bytes.
- *
- * This API is mainly intended for small class allocations, where extents are
- * used as slab. In case of large class allocations, the outputs are trivial:
- * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
- *
- * Note that multiple input pointers may reside on a same extent so the output
- * fields may contain duplicates.
- *
- * The format of the input/output looks like:
- *
- * input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions
- * | output[1]: 1st_extent_n_regions
- * | output[2]: 1st_extent_size
- * input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions
- * | output[4]: 2nd_extent_n_regions
- * | output[5]: 2nd_extent_size
- * ... | ...
- *
- * The input array and size are respectively passed in by newp and newlen, and
- * the output array and size are respectively oldp and *oldlenp.
- *
- * It can be beneficial to define the following macros to make it easier to
- * access the output:
- *
- * #define NFREE_READ(out, i) out[(i) * 3]
- * #define NREGS_READ(out, i) out[(i) * 3 + 1]
- * #define SIZE_READ(out, i) out[(i) * 3 + 2]
- *
- * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit
- * test test_batch in test/unit/extent_util.c for a concrete example.
- *
- * A typical workflow would be composed of the following steps:
- *
- * (1) flush tcache: mallctl("thread.tcache.flush", ...)
- * (2) initialize input array of pointers to query fragmentation
- * (3) allocate output array to hold utilization statistics
- * (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
- * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
- * (6) disable tcache: mallctl("thread.tcache.enabled", ...)
- * (7) defragment allocations with significant fragmentation, e.g.:
- * for each allocation {
- * if it's fragmented {
- * malloc(...);
- * memcpy(...);
- * free(...);
- * }
- * }
- * (8) enable tcache: mallctl("thread.tcache.enabled", ...)
- *
- * The application can determine the significance of fragmentation themselves
- * relying on the statistics returned, both at the overall level i.e. step "(5)"
- * and at individual allocation level i.e. within step "(7)". Possible choices
- * are:
- *
- * (a) whether memory utilization ratio is below certain threshold,
- * (b) whether memory consumption is above certain threshold, or
- * (c) some combination of the two.
- *
- * The caller needs to make sure that the input/output arrays are valid and
- * their sizes are proper as well as matched, meaning:
- *
- * (a) newlen = n_pointers * sizeof(const void *)
- * (b) *oldlenp = n_pointers * sizeof(size_t) * 3
- * (c) n_pointers > 0
- *
- * Otherwise, the function immediately returns EINVAL without touching anything.
- *
- * In the rare case where there's no associated extent found for some pointers,
- * rather than immediately terminating the computation and raising an error,
- * the function simply zeros out the corresponding output fields and continues
- * the computation until all input pointers are handled. The motivations of
- * such a design are as follows:
- *
- * (a) The function always either processes nothing or processes everything, and
- * never leaves the output half touched and half untouched.
- *
- * (b) It facilitates usage needs especially common in C++. A vast variety of
- * C++ objects are instantiated with multiple dynamic memory allocations. For
- * example, std::string and std::vector typically use at least two allocations,
- * one for the metadata and one for the actual content. Other types may use
- * even more allocations. When inquiring about utilization statistics, the
- * caller often wants to examine into all such allocations, especially internal
- * one(s), rather than just the topmost one. The issue comes when some
- * implementations do certain optimizations to reduce/aggregate some internal
- * allocations, e.g. putting short strings directly into the metadata, and such
- * decisions are not known to the caller. Therefore, we permit pointers to
- * memory usages that may not be returned by previous malloc calls, and we
- * provide the caller a convenient way to identify such cases.
- */
-static int
-experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- int ret;
-
- assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
-
- const size_t len = newlen / sizeof(const void *);
- if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
- || newlen != len * sizeof(const void *)
- || *oldlenp != len * sizeof(extent_util_stats_t)) {
- ret = EINVAL;
- goto label_return;
- }
-
- void **ptrs = (void **)newp;
- extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
- size_t i;
- for (i = 0; i < len; ++i) {
- extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
- &util_stats[i].nfree, &util_stats[i].nregs,
- &util_stats[i].size);
- }
- ret = 0;
-
-label_return:
- return ret;
-}
-
-static const ctl_named_node_t *
-experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
- size_t miblen, size_t i) {
- const ctl_named_node_t *ret;
-
- malloc_mutex_lock(tsdn, &ctl_mtx);
- if (ctl_arenas_i_verify(i)) {
- ret = NULL;
- goto label_return;
- }
- ret = super_experimental_arenas_i_node;
-label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
- return ret;
-}
-
-static int
-experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
- if (!config_stats) {
- return ENOENT;
- }
- if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
- return EINVAL;
- }
-
- unsigned arena_ind;
- arena_t *arena;
- int ret;
- size_t *pactivep;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- READONLY();
- MIB_UNSIGNED(arena_ind, 2);
- if (arena_ind < narenas_total_get() && (arena =
- arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
-#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
- defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
- /* Expose the underlying counter for fast read. */
- pactivep = (size_t *)&(arena->nactive.repr);
- READ(pactivep, size_t *);
- ret = 0;
-#else
- ret = EFAULT;
-#endif
- } else {
- ret = EFAULT;
- }
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return ret;
-}
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
+}
+
+static int
+experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ /*
+ * Note: this is a *private* struct. This is an experimental interface;
+ * forcing the user to know the jemalloc internals well enough to
+ * extract the ABI hopefully ensures nobody gets too comfortable with
+ * this API, which can change at a moment's notice.
+ */
+ hooks_t hooks;
+ WRITE(hooks, hooks_t);
+ void *handle = hook_install(tsd_tsdn(tsd), &hooks);
+ if (handle == NULL) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ READ(handle, void *);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ WRITEONLY();
+ void *handle = NULL;
+ WRITE(handle, void *);
+ if (handle == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ hook_remove(tsd_tsdn(tsd), handle);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+/*
+ * Output six memory utilization entries for an input pointer, the first one of
+ * type (void *) and the remaining five of type size_t, describing the following
+ * (in the same order):
+ *
+ * (a) memory address of the extent a potential reallocation would go into,
+ * == the five fields below describe about the extent the pointer resides in ==
+ * (b) number of free regions in the extent,
+ * (c) number of regions in the extent,
+ * (d) size of the extent in terms of bytes,
+ * (e) total number of free regions in the bin the extent belongs to, and
+ * (f) total number of regions in the bin the extent belongs to.
+ *
+ * Note that "(e)" and "(f)" are only available when stats are enabled;
+ * otherwise their values are undefined.
+ *
+ * This API is mainly intended for small class allocations, where extents are
+ * used as slab.
+ *
+ * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
+ * will be zero (if stats are enabled; otherwise undefined). The other three
+ * fields will be properly set though the values are trivial: "(b)" will be 0,
+ * "(c)" will be 1, and "(d)" will be the usable size.
+ *
+ * The input pointer and size are respectively passed in by newp and newlen,
+ * and the output fields and size are respectively oldp and *oldlenp.
+ *
+ * It can be beneficial to define the following macros to make it easier to
+ * access the output:
+ *
+ * #define SLABCUR_READ(out) (*(void **)out)
+ * #define COUNTS(out) ((size_t *)((void **)out + 1))
+ * #define NFREE_READ(out) COUNTS(out)[0]
+ * #define NREGS_READ(out) COUNTS(out)[1]
+ * #define SIZE_READ(out) COUNTS(out)[2]
+ * #define BIN_NFREE_READ(out) COUNTS(out)[3]
+ * #define BIN_NREGS_READ(out) COUNTS(out)[4]
+ *
+ * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test
+ * test_query in test/unit/extent_util.c for an example.
+ *
+ * For a typical defragmentation workflow making use of this API for
+ * understanding the fragmentation level, please refer to the comment for
+ * experimental_utilization_batch_query_ctl.
+ *
+ * It's up to the application how to determine the significance of
+ * fragmentation relying on the outputs returned. Possible choices are:
+ *
+ * (a) if extent utilization ratio is below certain threshold,
+ * (b) if extent memory consumption is above certain threshold,
+ * (c) if extent utilization ratio is significantly below bin utilization ratio,
+ * (d) if input pointer deviates a lot from potential reallocation address, or
+ * (e) some selection/combination of the above.
+ *
+ * The caller needs to make sure that the input/output arguments are valid,
+ * in particular, that the size of the output is correct, i.e.:
+ *
+ * *oldlenp = sizeof(void *) + sizeof(size_t) * 5
+ *
+ * Otherwise, the function immediately returns EINVAL without touching anything.
+ *
+ * In the rare case where there's no associated extent found for the input
+ * pointer, the function zeros out all output fields and return. Please refer
+ * to the comment for experimental_utilization_batch_query_ctl to understand the
+ * motivation from C++.
+ */
+static int
+experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ assert(sizeof(extent_util_stats_verbose_t)
+ == sizeof(void *) + sizeof(size_t) * 5);
+
+ if (oldp == NULL || oldlenp == NULL
+ || *oldlenp != sizeof(extent_util_stats_verbose_t)
+ || newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ void *ptr = NULL;
+ WRITE(ptr, void *);
+ extent_util_stats_verbose_t *util_stats
+ = (extent_util_stats_verbose_t *)oldp;
+ extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
+ &util_stats->nfree, &util_stats->nregs, &util_stats->size,
+ &util_stats->bin_nfree, &util_stats->bin_nregs,
+ &util_stats->slabcur_addr);
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+/*
+ * Given an input array of pointers, output three memory utilization entries of
+ * type size_t for each input pointer about the extent it resides in:
+ *
+ * (a) number of free regions in the extent,
+ * (b) number of regions in the extent, and
+ * (c) size of the extent in terms of bytes.
+ *
+ * This API is mainly intended for small class allocations, where extents are
+ * used as slab. In case of large class allocations, the outputs are trivial:
+ * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
+ *
+ * Note that multiple input pointers may reside on a same extent so the output
+ * fields may contain duplicates.
+ *
+ * The format of the input/output looks like:
+ *
+ * input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions
+ * | output[1]: 1st_extent_n_regions
+ * | output[2]: 1st_extent_size
+ * input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions
+ * | output[4]: 2nd_extent_n_regions
+ * | output[5]: 2nd_extent_size
+ * ... | ...
+ *
+ * The input array and size are respectively passed in by newp and newlen, and
+ * the output array and size are respectively oldp and *oldlenp.
+ *
+ * It can be beneficial to define the following macros to make it easier to
+ * access the output:
+ *
+ * #define NFREE_READ(out, i) out[(i) * 3]
+ * #define NREGS_READ(out, i) out[(i) * 3 + 1]
+ * #define SIZE_READ(out, i) out[(i) * 3 + 2]
+ *
+ * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit
+ * test test_batch in test/unit/extent_util.c for a concrete example.
+ *
+ * A typical workflow would be composed of the following steps:
+ *
+ * (1) flush tcache: mallctl("thread.tcache.flush", ...)
+ * (2) initialize input array of pointers to query fragmentation
+ * (3) allocate output array to hold utilization statistics
+ * (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
+ * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
+ * (6) disable tcache: mallctl("thread.tcache.enabled", ...)
+ * (7) defragment allocations with significant fragmentation, e.g.:
+ * for each allocation {
+ * if it's fragmented {
+ * malloc(...);
+ * memcpy(...);
+ * free(...);
+ * }
+ * }
+ * (8) enable tcache: mallctl("thread.tcache.enabled", ...)
+ *
+ * The application can determine the significance of fragmentation themselves
+ * relying on the statistics returned, both at the overall level i.e. step "(5)"
+ * and at individual allocation level i.e. within step "(7)". Possible choices
+ * are:
+ *
+ * (a) whether memory utilization ratio is below certain threshold,
+ * (b) whether memory consumption is above certain threshold, or
+ * (c) some combination of the two.
+ *
+ * The caller needs to make sure that the input/output arrays are valid and
+ * their sizes are proper as well as matched, meaning:
+ *
+ * (a) newlen = n_pointers * sizeof(const void *)
+ * (b) *oldlenp = n_pointers * sizeof(size_t) * 3
+ * (c) n_pointers > 0
+ *
+ * Otherwise, the function immediately returns EINVAL without touching anything.
+ *
+ * In the rare case where there's no associated extent found for some pointers,
+ * rather than immediately terminating the computation and raising an error,
+ * the function simply zeros out the corresponding output fields and continues
+ * the computation until all input pointers are handled. The motivations of
+ * such a design are as follows:
+ *
+ * (a) The function always either processes nothing or processes everything, and
+ * never leaves the output half touched and half untouched.
+ *
+ * (b) It facilitates usage needs especially common in C++. A vast variety of
+ * C++ objects are instantiated with multiple dynamic memory allocations. For
+ * example, std::string and std::vector typically use at least two allocations,
+ * one for the metadata and one for the actual content. Other types may use
+ * even more allocations. When inquiring about utilization statistics, the
+ * caller often wants to examine into all such allocations, especially internal
+ * one(s), rather than just the topmost one. The issue comes when some
+ * implementations do certain optimizations to reduce/aggregate some internal
+ * allocations, e.g. putting short strings directly into the metadata, and such
+ * decisions are not known to the caller. Therefore, we permit pointers to
+ * memory usages that may not be returned by previous malloc calls, and we
+ * provide the caller a convenient way to identify such cases.
+ */
+static int
+experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
+
+ const size_t len = newlen / sizeof(const void *);
+ if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
+ || newlen != len * sizeof(const void *)
+ || *oldlenp != len * sizeof(extent_util_stats_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ void **ptrs = (void **)newp;
+ extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
+ &util_stats[i].nfree, &util_stats[i].nregs,
+ &util_stats[i].size);
+ }
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t i) {
+ const ctl_named_node_t *ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (ctl_arenas_i_verify(i)) {
+ ret = NULL;
+ goto label_return;
+ }
+ ret = super_experimental_arenas_i_node;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
+}
+
+static int
+experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ if (!config_stats) {
+ return ENOENT;
+ }
+ if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
+ return EINVAL;
+ }
+
+ unsigned arena_ind;
+ arena_t *arena;
+ int ret;
+ size_t *pactivep;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ MIB_UNSIGNED(arena_ind, 2);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
+ defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
+ /* Expose the underlying counter for fast read. */
+ pactivep = (size_t *)&(arena->nactive.repr);
+ READ(pactivep, size_t *);
+ ret = 0;
+#else
+ ret = EFAULT;
+#endif
+ } else {
+ ret = EFAULT;
+ }
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
diff --git a/contrib/libs/jemalloc/src/div.c b/contrib/libs/jemalloc/src/div.c
index 808892a133..a8c574fdd4 100644
--- a/contrib/libs/jemalloc/src/div.c
+++ b/contrib/libs/jemalloc/src/div.c
@@ -1,55 +1,55 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-
-#include "jemalloc/internal/div.h"
-
-#include "jemalloc/internal/assert.h"
-
-/*
- * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
- *
- * For any k, we have (here, all division is exact; not C-style rounding):
- * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
- * r = (-2^k) mod d.
- *
- * Expanding this out:
- * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
- * = floor(n / d + (r / d) * (n / 2^k)).
- *
- * The fractional part of n / d is 0 (because of the assumption that d divides n
- * exactly), so we have:
- * ... = n / d + floor((r / d) * (n / 2^k))
- *
- * So that our initial expression is equal to the quantity we seek, so long as
- * (r / d) * (n / 2^k) < 1.
- *
- * r is a remainder mod d, so r < d and r / d < 1 always. We can make
- * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
- */
-
-void
-div_init(div_info_t *div_info, size_t d) {
- /* Nonsensical. */
- assert(d != 0);
- /*
- * This would make the value of magic too high to fit into a uint32_t
- * (we would want magic = 2^32 exactly). This would mess with code gen
- * on 32-bit machines.
- */
- assert(d != 1);
-
- uint64_t two_to_k = ((uint64_t)1 << 32);
- uint32_t magic = (uint32_t)(two_to_k / d);
-
- /*
- * We want magic = ceil(2^k / d), but C gives us floor. We have to
- * increment it unless the result was exact (i.e. unless d is a power of
- * two).
- */
- if (two_to_k % d != 0) {
- magic++;
- }
- div_info->magic = magic;
-#ifdef JEMALLOC_DEBUG
- div_info->d = d;
-#endif
-}
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/div.h"
+
+#include "jemalloc/internal/assert.h"
+
+/*
+ * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
+ *
+ * For any k, we have (here, all division is exact; not C-style rounding):
+ * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
+ * r = (-2^k) mod d.
+ *
+ * Expanding this out:
+ * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
+ * = floor(n / d + (r / d) * (n / 2^k)).
+ *
+ * The fractional part of n / d is 0 (because of the assumption that d divides n
+ * exactly), so we have:
+ * ... = n / d + floor((r / d) * (n / 2^k))
+ *
+ * So that our initial expression is equal to the quantity we seek, so long as
+ * (r / d) * (n / 2^k) < 1.
+ *
+ * r is a remainder mod d, so r < d and r / d < 1 always. We can make
+ * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
+ */
+
+void
+div_init(div_info_t *div_info, size_t d) {
+ /* Nonsensical. */
+ assert(d != 0);
+ /*
+ * This would make the value of magic too high to fit into a uint32_t
+ * (we would want magic = 2^32 exactly). This would mess with code gen
+ * on 32-bit machines.
+ */
+ assert(d != 1);
+
+ uint64_t two_to_k = ((uint64_t)1 << 32);
+ uint32_t magic = (uint32_t)(two_to_k / d);
+
+ /*
+ * We want magic = ceil(2^k / d), but C gives us floor. We have to
+ * increment it unless the result was exact (i.e. unless d is a power of
+ * two).
+ */
+ if (two_to_k % d != 0) {
+ magic++;
+ }
+ div_info->magic = magic;
+#ifdef JEMALLOC_DEBUG
+ div_info->d = d;
+#endif
+}
diff --git a/contrib/libs/jemalloc/src/extent.c b/contrib/libs/jemalloc/src/extent.c
index 9237f903dc..9042caa7ba 100644
--- a/contrib/libs/jemalloc/src/extent.c
+++ b/contrib/libs/jemalloc/src/extent.c
@@ -1,2403 +1,2403 @@
-#define JEMALLOC_EXTENT_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/extent_mmap.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-
+#define JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+
/******************************************************************************/
-/* Data. */
-
-rtree_t extents_rtree;
-/* Keyed by the address of the extent_t being protected. */
-mutex_pool_t extent_mutex_pool;
-
-size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
-
-static const bitmap_info_t extents_bitmap_info =
- BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
-
-static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit,
- unsigned arena_ind);
-static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, bool committed, unsigned arena_ind);
-static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, bool committed, unsigned arena_ind);
-static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-static bool extent_decommit_default(extent_hooks_t *extent_hooks,
- void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
- void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t size_a, size_t size_b, bool committed,
- unsigned arena_ind);
-static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
- bool growing_retained);
-static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
- size_t size_a, void *addr_b, size_t size_b, bool committed,
- unsigned arena_ind);
-static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- bool growing_retained);
-
-const extent_hooks_t extent_hooks_default = {
- extent_alloc_default,
- extent_dalloc_default,
- extent_destroy_default,
- extent_commit_default,
- extent_decommit_default
-#ifdef PAGES_CAN_PURGE_LAZY
- ,
- extent_purge_lazy_default
-#else
- ,
- NULL
-#endif
-#ifdef PAGES_CAN_PURGE_FORCED
- ,
- extent_purge_forced_default
-#else
- ,
- NULL
-#endif
- ,
- extent_split_default,
- extent_merge_default
-};
-
-/* Used exclusively for gdump triggering. */
-static atomic_zu_t curpages;
-static atomic_zu_t highpages;
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
-static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
- size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
- bool *zero, bool *commit, bool growing_retained);
-static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained);
-static void extent_record(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
- bool growing_retained);
-
-/******************************************************************************/
-
-#define ATTR_NONE /* does nothing */
-
-ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
- extent_esnead_comp)
-
-#undef ATTR_NONE
-
-typedef enum {
- lock_result_success,
- lock_result_failure,
- lock_result_no_extent
-} lock_result_t;
-
-static lock_result_t
-extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
- extent_t **result, bool inactive_only) {
- extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
- elm, true);
-
- /* Slab implies active extents and should be skipped. */
- if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
- &extents_rtree, elm, true))) {
- return lock_result_no_extent;
- }
-
- /*
- * It's possible that the extent changed out from under us, and with it
- * the leaf->extent mapping. We have to recheck while holding the lock.
- */
- extent_lock(tsdn, extent1);
- extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
- &extents_rtree, elm, true);
-
- if (extent1 == extent2) {
- *result = extent1;
- return lock_result_success;
- } else {
- extent_unlock(tsdn, extent1);
- return lock_result_failure;
- }
-}
-
-/*
- * Returns a pool-locked extent_t * if there's one associated with the given
- * address, and NULL otherwise.
- */
-static extent_t *
-extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
- bool inactive_only) {
- extent_t *ret = NULL;
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)addr, false, false);
- if (elm == NULL) {
- return NULL;
- }
- lock_result_t lock_result;
- do {
- lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
- inactive_only);
- } while (lock_result == lock_result_failure);
- return ret;
-}
-
-extent_t *
-extent_alloc(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_t *extent = extent_avail_first(&arena->extent_avail);
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return base_alloc_extent(tsdn, arena->base);
- }
- extent_avail_remove(&arena->extent_avail, extent);
- atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return extent;
-}
-
-void
-extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_avail_insert(&arena->extent_avail, extent);
- atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
-}
-
-extent_hooks_t *
-extent_hooks_get(arena_t *arena) {
- return base_extent_hooks_get(arena->base);
-}
-
-extent_hooks_t *
-extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
- background_thread_info_t *info;
- if (have_background_thread) {
- info = arena_background_thread_info_get(arena);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- }
- extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
- if (have_background_thread) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- }
-
- return ret;
-}
-
-static void
-extent_hooks_assure_initialized(arena_t *arena,
- extent_hooks_t **r_extent_hooks) {
- if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
- *r_extent_hooks = extent_hooks_get(arena);
- }
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-extent_size_quantize_floor(size_t size) {
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert((size & PAGE_MASK) == 0);
-
- pind = sz_psz2ind(size - sz_large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return size;
- }
- ret = sz_pind2sz(pind - 1) + sz_large_pad;
- assert(ret <= size);
- return ret;
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-extent_size_quantize_ceil(size_t size) {
- size_t ret;
-
- assert(size > 0);
- assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- ret = extent_size_quantize_floor(size);
- if (ret < size) {
- /*
- * Skip a quantization that may have an adequately large extent,
- * because under-sized extents may be mixed in. This only
- * happens when an unusual size is requested, i.e. for aligned
- * allocation, and is just one of several places where linear
- * search would potentially find sufficiently aligned available
- * memory somewhere lower.
- */
- ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
- sz_large_pad;
- }
- return ret;
-}
-
-/* Generate pairing heap functions. */
-ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
-
-bool
-extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
- bool delay_coalesce) {
- if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
- extent_heap_new(&extents->heaps[i]);
- }
- bitmap_init(extents->bitmap, &extents_bitmap_info, true);
- extent_list_init(&extents->lru);
- atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
- extents->state = state;
- extents->delay_coalesce = delay_coalesce;
- return false;
-}
-
-extent_state_t
-extents_state_get(const extents_t *extents) {
- return extents->state;
-}
-
-size_t
-extents_npages_get(extents_t *extents) {
- return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
-}
-
-size_t
-extents_nextents_get(extents_t *extents, pszind_t pind) {
- return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
-}
-
-size_t
-extents_nbytes_get(extents_t *extents, pszind_t pind) {
- return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
-}
-
-static void
-extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
- size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
- cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
-}
-
-static void
-extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
- size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
- cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
-}
-
-static void
-extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
- assert(extent_state_get(extent) == extents->state);
-
- size_t size = extent_size_get(extent);
- size_t psz = extent_size_quantize_floor(size);
- pszind_t pind = sz_psz2ind(psz);
- if (extent_heap_empty(&extents->heaps[pind])) {
- bitmap_unset(extents->bitmap, &extents_bitmap_info,
- (size_t)pind);
- }
- extent_heap_insert(&extents->heaps[pind], extent);
-
- if (config_stats) {
- extents_stats_add(extents, pind, size);
- }
-
- extent_list_append(&extents->lru, extent);
- size_t npages = size >> LG_PAGE;
- /*
- * All modifications to npages hold the mutex (as asserted above), so we
- * don't need an atomic fetch-add; we can get by with a load followed by
- * a store.
- */
- size_t cur_extents_npages =
- atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
- atomic_store_zu(&extents->npages, cur_extents_npages + npages,
- ATOMIC_RELAXED);
-}
-
-static void
-extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
- assert(extent_state_get(extent) == extents->state);
-
- size_t size = extent_size_get(extent);
- size_t psz = extent_size_quantize_floor(size);
- pszind_t pind = sz_psz2ind(psz);
- extent_heap_remove(&extents->heaps[pind], extent);
-
- if (config_stats) {
- extents_stats_sub(extents, pind, size);
- }
-
- if (extent_heap_empty(&extents->heaps[pind])) {
- bitmap_set(extents->bitmap, &extents_bitmap_info,
- (size_t)pind);
- }
- extent_list_remove(&extents->lru, extent);
- size_t npages = size >> LG_PAGE;
- /*
- * As in extents_insert_locked, we hold extents->mtx and so don't need
- * atomic operations for updating extents->npages.
- */
- size_t cur_extents_npages =
- atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
- assert(cur_extents_npages >= npages);
- atomic_store_zu(&extents->npages,
- cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
-}
-
-/*
- * Find an extent with size [min_size, max_size) to satisfy the alignment
- * requirement. For each size, try only the first extent in the heap.
- */
-static extent_t *
-extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
- size_t alignment) {
- pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
- pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
-
- for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
- &extents_bitmap_info, (size_t)pind); i < pind_max; i =
- (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
- (size_t)i+1)) {
- assert(i < SC_NPSIZES);
- assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_first(&extents->heaps[i]);
- uintptr_t base = (uintptr_t)extent_base_get(extent);
- size_t candidate_size = extent_size_get(extent);
- assert(candidate_size >= min_size);
-
- uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
- PAGE_CEILING(alignment));
- if (base > next_align || base + candidate_size <= next_align) {
- /* Overflow or not crossing the next alignment. */
- continue;
- }
-
- size_t leadsize = next_align - base;
- if (candidate_size - leadsize >= min_size) {
- return extent;
- }
- }
-
- return NULL;
-}
-
-/*
- * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
- * large enough.
- */
-static extent_t *
-extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t size) {
- extent_t *ret = NULL;
-
- pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
-
- if (!maps_coalesce && !opt_retain) {
- /*
- * No split / merge allowed (Windows w/o retain). Try exact fit
- * only.
- */
- return extent_heap_empty(&extents->heaps[pind]) ? NULL :
- extent_heap_first(&extents->heaps[pind]);
- }
-
- for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
- &extents_bitmap_info, (size_t)pind);
- i < SC_NPSIZES + 1;
- i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
- (size_t)i+1)) {
- assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_first(&extents->heaps[i]);
- assert(extent_size_get(extent) >= size);
- /*
- * In order to reduce fragmentation, avoid reusing and splitting
- * large extents for much smaller sizes.
- *
- * Only do check for dirty extents (delay_coalesce).
- */
- if (extents->delay_coalesce &&
- (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
- break;
- }
- if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
- ret = extent;
- }
- if (i == SC_NPSIZES) {
- break;
- }
- assert(i < SC_NPSIZES);
- }
-
- return ret;
-}
-
-/*
- * Do first-fit extent selection, where the selection policy choice is
- * based on extents->delay_coalesce.
- */
-static extent_t *
-extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t esize, size_t alignment) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
-
- size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
- /* Beware size_t wrap-around. */
- if (max_size < esize) {
- return NULL;
- }
-
- extent_t *extent =
- extents_first_fit_locked(tsdn, arena, extents, max_size);
-
- if (alignment > PAGE && extent == NULL) {
- /*
- * max_size guarantees the alignment requirement but is rather
- * pessimistic. Next we try to satisfy the aligned allocation
- * with sizes in [esize, max_size).
- */
- extent = extents_fit_alignment(extents, esize, max_size,
- alignment);
- }
-
- return extent;
-}
-
-static bool
-extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent) {
- extent_state_set(extent, extent_state_active);
- bool coalesced;
- extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, &coalesced, false);
- extent_state_set(extent, extents_state_get(extents));
-
- if (!coalesced) {
- return true;
- }
- extents_insert_locked(tsdn, extents, extent);
- return false;
-}
-
-extent_t *
-extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- assert(size + pad != 0);
- assert(alignment != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
- new_addr, size, pad, alignment, slab, szind, zero, commit, false);
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
-}
-
-void
-extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
- assert(extent_dumpable_get(extent));
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_addr_set(extent, extent_base_get(extent));
- extent_zeroed_set(extent, false);
-
- extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
-}
-
-extent_t *
-extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, size_t npages_min) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- malloc_mutex_lock(tsdn, &extents->mtx);
-
- /*
- * Get the LRU coalesced extent, if any. If coalescing was delayed,
- * the loop will iterate until the LRU extent is fully coalesced.
- */
- extent_t *extent;
- while (true) {
- /* Get the LRU extent, if any. */
- extent = extent_list_first(&extents->lru);
- if (extent == NULL) {
- goto label_return;
- }
- /* Check the eviction limit. */
- size_t extents_npages = atomic_load_zu(&extents->npages,
- ATOMIC_RELAXED);
- if (extents_npages <= npages_min) {
- extent = NULL;
- goto label_return;
- }
- extents_remove_locked(tsdn, extents, extent);
- if (!extents->delay_coalesce) {
- break;
- }
- /* Try to coalesce. */
- if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, extent)) {
- break;
- }
- /*
- * The LRU extent was just coalesced and the result placed in
- * the LRU at its neighbor's position. Start over.
- */
- }
-
- /*
- * Either mark the extent active or deregister it to protect against
- * concurrent operations.
- */
- switch (extents_state_get(extents)) {
- case extent_state_active:
- not_reached();
- case extent_state_dirty:
- case extent_state_muzzy:
- extent_state_set(extent, extent_state_active);
- break;
- case extent_state_retained:
- extent_deregister(tsdn, extent);
- break;
- default:
- not_reached();
- }
-
-label_return:
- malloc_mutex_unlock(tsdn, &extents->mtx);
- return extent;
-}
-
-/*
- * This can only happen when we fail to allocate a new extent struct (which
- * indicates OOM), e.g. when trying to split an existing extent.
- */
-static void
-extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent, bool growing_retained) {
- size_t sz = extent_size_get(extent);
- if (config_stats) {
- arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
- }
- /*
- * Leak extent after making sure its pages have already been purged, so
- * that this is only a virtual memory leak.
- */
- if (extents_state_get(extents) == extent_state_dirty) {
- if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
- extent, 0, sz, growing_retained)) {
- extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
- extent, 0, extent_size_get(extent),
- growing_retained);
- }
- }
- extent_dalloc(tsdn, arena, extent);
-}
-
-void
-extents_prefork(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_prefork(tsdn, &extents->mtx);
-}
-
-void
-extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_postfork_parent(tsdn, &extents->mtx);
-}
-
-void
-extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_postfork_child(tsdn, &extents->mtx);
-}
-
-static void
-extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- assert(extent_arena_get(extent) == arena);
- assert(extent_state_get(extent) == extent_state_active);
-
- extent_state_set(extent, extents_state_get(extents));
- extents_insert_locked(tsdn, extents, extent);
-}
-
-static void
-extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_deactivate_locked(tsdn, arena, extents, extent);
- malloc_mutex_unlock(tsdn, &extents->mtx);
-}
-
-static void
-extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- assert(extent_arena_get(extent) == arena);
- assert(extent_state_get(extent) == extents_state_get(extents));
-
- extents_remove_locked(tsdn, extents, extent);
- extent_state_set(extent, extent_state_active);
-}
-
-static bool
-extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- const extent_t *extent, bool dependent, bool init_missing,
- rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
- *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_a == NULL) {
- return true;
- }
- assert(*r_elm_a != NULL);
-
- *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_last_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_b == NULL) {
- return true;
- }
- assert(*r_elm_b != NULL);
-
- return false;
-}
-
-static void
-extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
- rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
- if (elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
- slab);
- }
-}
-
-static void
-extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
- szind_t szind) {
- assert(extent_slab_get(extent));
-
- /* Register interior. */
- for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_write(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE), extent, szind, true);
- }
-}
-
-static void
-extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
- cassert(config_prof);
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nadd = extent_size_get(extent) >> LG_PAGE;
- size_t cur = atomic_fetch_add_zu(&curpages, nadd,
- ATOMIC_RELAXED) + nadd;
- size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
- while (cur > high && !atomic_compare_exchange_weak_zu(
- &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
- /*
- * Don't refresh cur, because it may have decreased
- * since this thread lost the highpages update race.
- * Note that high is updated in case of CAS failure.
- */
- }
- if (cur > high && prof_gdump_get_unlocked()) {
- prof_gdump(tsdn);
- }
- }
-}
-
-static void
-extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
- cassert(config_prof);
-
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nsub = extent_size_get(extent) >> LG_PAGE;
- assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
- atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
- }
-}
-
-static bool
-extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *elm_a, *elm_b;
-
- /*
- * We need to hold the lock to protect against a concurrent coalesce
- * operation that sees us in a partial state.
- */
- extent_lock(tsdn, extent);
-
- if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
- &elm_a, &elm_b)) {
- extent_unlock(tsdn, extent);
- return true;
- }
-
- szind_t szind = extent_szind_get_maybe_invalid(extent);
- bool slab = extent_slab_get(extent);
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
- if (slab) {
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
-
- extent_unlock(tsdn, extent);
-
- if (config_prof && gdump_add) {
- extent_gdump_add(tsdn, extent);
- }
-
- return false;
-}
-
-static bool
-extent_register(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, true);
-}
-
-static bool
-extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, false);
-}
-
-static void
-extent_reregister(tsdn_t *tsdn, extent_t *extent) {
- bool err = extent_register(tsdn, extent);
- assert(!err);
-}
-
-/*
- * Removes all pointers to the given extent from the global rtree indices for
- * its interior. This is relevant for slab extents, for which we need to do
- * metadata lookups at places other than the head of the extent. We deregister
- * on the interior, then, when an extent moves from being an active slab to an
- * inactive state.
- */
-static void
-extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- extent_t *extent) {
- size_t i;
-
- assert(extent_slab_get(extent));
-
- for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_clear(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE));
- }
-}
-
-/*
- * Removes all pointers to the given extent from the global rtree.
- */
-static void
-extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *elm_a, *elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
- &elm_a, &elm_b);
-
- extent_lock(tsdn, extent);
-
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
- }
-
- extent_unlock(tsdn, extent);
-
- if (config_prof && gdump) {
- extent_gdump_sub(tsdn, extent);
- }
-}
-
-static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, true);
-}
-
-static void
-extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, false);
-}
-
-/*
- * Tries to find and remove an extent from extents that can be used for the
- * given allocation request.
- */
-static extent_t *
-extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(alignment > 0);
- if (config_debug && new_addr != NULL) {
- /*
- * Non-NULL new_addr has two use cases:
- *
- * 1) Recycle a known-extant extent, e.g. during purging.
- * 2) Perform in-place expanding reallocation.
- *
- * Regardless of use case, new_addr must either refer to a
- * non-existing extent, or to the base of an extant extent,
- * since only active slabs support interior lookups (which of
- * course cannot be recycled).
- */
- assert(PAGE_ADDR2BASE(new_addr) == new_addr);
- assert(pad == 0);
- assert(alignment <= PAGE);
- }
-
- size_t esize = size + pad;
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- extent_t *extent;
- if (new_addr != NULL) {
- extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
- false);
- if (extent != NULL) {
- /*
- * We might null-out extent to report an error, but we
- * still need to unlock the associated mutex after.
- */
- extent_t *unlock_extent = extent;
- assert(extent_base_get(extent) == new_addr);
- if (extent_arena_get(extent) != arena ||
- extent_size_get(extent) < esize ||
- extent_state_get(extent) !=
- extents_state_get(extents)) {
- extent = NULL;
- }
- extent_unlock(tsdn, unlock_extent);
- }
- } else {
- extent = extents_fit_locked(tsdn, arena, extents, esize,
- alignment);
- }
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &extents->mtx);
- return NULL;
- }
-
- extent_activate_locked(tsdn, arena, extents, extent);
- malloc_mutex_unlock(tsdn, &extents->mtx);
-
- return extent;
-}
-
-/*
- * Given an allocation request and an extent guaranteed to be able to satisfy
- * it, this splits off lead and trail extents, leaving extent pointing to an
- * extent satisfying the allocation.
- * This function doesn't put lead or trail into any extents_t; it's the caller's
- * job to ensure that they can be reused.
- */
-typedef enum {
- /*
- * Split successfully. lead, extent, and trail, are modified to extents
- * describing the ranges before, in, and after the given allocation.
- */
- extent_split_interior_ok,
- /*
- * The extent can't satisfy the given allocation request. None of the
- * input extent_t *s are touched.
- */
- extent_split_interior_cant_alloc,
- /*
- * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
- * and salvage what's still salvageable (if *to_salvage is non-NULL).
- * None of lead, extent, or trail are valid.
- */
- extent_split_interior_error
-} extent_split_interior_result_t;
-
-static extent_split_interior_result_t
-extent_split_interior(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
- /* The result of splitting, in case of success. */
- extent_t **extent, extent_t **lead, extent_t **trail,
- /* The mess to clean up, in case of error. */
- extent_t **to_leak, extent_t **to_salvage,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, bool growing_retained) {
- size_t esize = size + pad;
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
- PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
- assert(new_addr == NULL || leadsize == 0);
- if (extent_size_get(*extent) < leadsize + esize) {
- return extent_split_interior_cant_alloc;
- }
- size_t trailsize = extent_size_get(*extent) - leadsize - esize;
-
- *lead = NULL;
- *trail = NULL;
- *to_leak = NULL;
- *to_salvage = NULL;
-
- /* Split the lead. */
- if (leadsize != 0) {
- *lead = *extent;
- *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
- *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
- slab, growing_retained);
- if (*extent == NULL) {
- *to_leak = *lead;
- *lead = NULL;
- return extent_split_interior_error;
- }
- }
-
- /* Split the trail. */
- if (trailsize != 0) {
- *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
- esize, szind, slab, trailsize, SC_NSIZES, false,
- growing_retained);
- if (*trail == NULL) {
- *to_leak = *extent;
- *to_salvage = *lead;
- *lead = NULL;
- *extent = NULL;
- return extent_split_interior_error;
- }
- }
-
- if (leadsize == 0 && trailsize == 0) {
- /*
- * Splitting causes szind to be set as a side effect, but no
- * splitting occurred.
- */
- extent_szind_set(*extent, szind);
- if (szind != SC_NSIZES) {
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(*extent), szind, slab);
- if (slab && extent_size_get(*extent) > PAGE) {
- rtree_szind_slab_update(tsdn, &extents_rtree,
- rtree_ctx,
- (uintptr_t)extent_past_get(*extent) -
- (uintptr_t)PAGE, szind, slab);
- }
- }
- }
-
- return extent_split_interior_ok;
-}
-
-/*
- * This fulfills the indicated allocation request out of the given extent (which
- * the caller should have ensured was big enough). If there's any unused space
- * before or after the resulting allocation, that space is given its own extent
- * and put back into extents.
- */
-static extent_t *
-extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, extent_t *extent, bool growing_retained) {
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
-
- extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
- &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
- growing_retained);
-
- if (!maps_coalesce && result != extent_split_interior_ok
- && !opt_retain) {
- /*
- * Split isn't supported (implies Windows w/o retain). Avoid
- * leaking the extents.
- */
- assert(to_leak != NULL && lead == NULL && trail == NULL);
- extent_deactivate(tsdn, arena, extents, to_leak);
- return NULL;
- }
-
- if (result == extent_split_interior_ok) {
- if (lead != NULL) {
- extent_deactivate(tsdn, arena, extents, lead);
- }
- if (trail != NULL) {
- extent_deactivate(tsdn, arena, extents, trail);
- }
- return extent;
- } else {
- /*
- * We should have picked an extent that was large enough to
- * fulfill our allocation request.
- */
- assert(result == extent_split_interior_error);
- if (to_salvage != NULL) {
- extent_deregister(tsdn, to_salvage);
- }
- if (to_leak != NULL) {
- void *leak = extent_base_get(to_leak);
- extent_deregister_no_gdump_sub(tsdn, to_leak);
- extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
- to_leak, growing_retained);
- assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
- false) == NULL);
- }
- return NULL;
- }
- unreachable();
-}
-
-static bool
-extent_need_manual_zero(arena_t *arena) {
- /*
- * Need to manually zero the extent on repopulating if either; 1) non
- * default extent hooks installed (in which case the purge semantics may
- * change); or 2) transparent huge pages enabled.
- */
- return (!arena_has_default_hooks(arena) ||
- (opt_thp == thp_mode_always));
-}
-
-/*
- * Tries to satisfy the given allocation request by reusing one of the extents
- * in the given extents_t.
- */
-static extent_t *
-extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(new_addr == NULL || !slab);
- assert(pad == 0 || !slab);
- assert(!*zero || !slab);
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, new_addr, size, pad, alignment, slab,
- growing_retained);
- if (extent == NULL) {
- return NULL;
- }
-
- extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, new_addr, size, pad, alignment, slab, szind, extent,
- growing_retained);
- if (extent == NULL) {
- return NULL;
- }
-
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent), growing_retained)) {
- extent_record(tsdn, arena, r_extent_hooks, extents,
- extent, growing_retained);
- return NULL;
- }
- if (!extent_need_manual_zero(arena)) {
- extent_zeroed_set(extent, true);
- }
- }
-
- if (extent_committed_get(extent)) {
- *commit = true;
- }
- if (extent_zeroed_get(extent)) {
- *zero = true;
- }
-
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- assert(extent_state_get(extent) == extent_state_active);
- if (slab) {
- extent_slab_set(extent, slab);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
-
- if (*zero) {
- void *addr = extent_base_get(extent);
- if (!extent_zeroed_get(extent)) {
- size_t size = extent_size_get(extent);
- if (extent_need_manual_zero(arena) ||
- pages_purge_forced(addr, size)) {
- memset(addr, 0, size);
- }
- } else if (config_debug) {
- size_t *p = (size_t *)(uintptr_t)addr;
- /* Check the first page only. */
- for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
- assert(p[i] == 0);
- }
- }
- }
- return extent;
-}
-
-/*
- * If the caller specifies (!*zero), it is still possible to receive zeroed
- * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
- * advantage of this to avoid demanding zeroed extents, but taking advantage of
- * them if they are returned.
- */
-static void *
-extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
- void *ret;
-
- assert(size != 0);
- assert(alignment != 0);
-
- /* "primary" dss. */
- if (have_dss && dss_prec == dss_prec_primary && (ret =
- extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL) {
- return ret;
- }
- /* mmap. */
- if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
- != NULL) {
- return ret;
- }
- /* "secondary" dss. */
- if (have_dss && dss_prec == dss_prec_secondary && (ret =
- extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL) {
- return ret;
- }
-
- /* All strategies for allocation failed. */
- return NULL;
-}
-
-static void *
-extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit) {
- void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
- commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
- ATOMIC_RELAXED));
- if (have_madvise_huge && ret) {
- pages_set_thp_state(ret, size);
- }
- return ret;
-}
-
-static void *
-extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = arena_get(tsdn, arena_ind, false);
- /*
- * The arena we're allocating on behalf of must have been initialized
- * already.
- */
- assert(arena != NULL);
-
- return extent_alloc_default_impl(tsdn, arena, new_addr, size,
- ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
-}
-
-static void
-extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
- /*
- * The only legitimate case of customized extent hooks for a0 is
- * hooks with no allocation activities. One such example is to
- * place metadata on pre-allocated resources such as huge pages.
- * In that case, rely on reentrancy_level checks to catch
- * infinite recursions.
- */
- pre_reentrancy(tsd, NULL);
- } else {
- pre_reentrancy(tsd, arena);
- }
-}
-
-static void
-extent_hook_post_reentrancy(tsdn_t *tsdn) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- post_reentrancy(tsd);
-}
-
-/*
- * If virtual memory is retained, create increasingly larger extents from which
- * to split requested extents in order to limit the total number of disjoint
- * virtual memory ranges retained by each arena.
- */
-static extent_t *
-extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
- bool slab, szind_t szind, bool *zero, bool *commit) {
- malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
- assert(pad == 0 || !slab);
- assert(!*zero || !slab);
-
- size_t esize = size + pad;
- size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
- /* Beware size_t wrap-around. */
- if (alloc_size_min < esize) {
- goto label_err;
- }
- /*
- * Find the next extent size in the series that would be large enough to
- * satisfy this request.
- */
- pszind_t egn_skip = 0;
- size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
- while (alloc_size < alloc_size_min) {
- egn_skip++;
- if (arena->extent_grow_next + egn_skip >=
- sz_psz2ind(SC_LARGE_MAXCLASS)) {
- /* Outside legal range. */
- goto label_err;
- }
- alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
- }
-
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
- goto label_err;
- }
- bool zeroed = false;
- bool committed = false;
-
- void *ptr;
- if (*r_extent_hooks == &extent_hooks_default) {
- ptr = extent_alloc_default_impl(tsdn, arena, NULL,
- alloc_size, PAGE, &zeroed, &committed);
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
- alloc_size, PAGE, &zeroed, &committed,
- arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
-
- extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
- arena_extent_sn_next(arena), extent_state_active, zeroed,
- committed, true, EXTENT_IS_HEAD);
- if (ptr == NULL) {
- extent_dalloc(tsdn, arena, extent);
- goto label_err;
- }
-
- if (extent_register_no_gdump_add(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
- goto label_err;
- }
-
- if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
- *zero = true;
- }
- if (extent_committed_get(extent)) {
- *commit = true;
- }
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
- extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
- &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
- true);
-
- if (result == extent_split_interior_ok) {
- if (lead != NULL) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, lead, true);
- }
- if (trail != NULL) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, trail, true);
- }
- } else {
- /*
- * We should have allocated a sufficiently large extent; the
- * cant_alloc case should not occur.
- */
- assert(result == extent_split_interior_error);
- if (to_salvage != NULL) {
- if (config_prof) {
- extent_gdump_add(tsdn, to_salvage);
- }
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, to_salvage, true);
- }
- if (to_leak != NULL) {
- extent_deregister_no_gdump_sub(tsdn, to_leak);
- extents_abandon_vm(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, to_leak, true);
- }
- goto label_err;
- }
-
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
- extent_size_get(extent), true)) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, extent, true);
- goto label_err;
- }
- if (!extent_need_manual_zero(arena)) {
- extent_zeroed_set(extent, true);
- }
- }
-
- /*
- * Increment extent_grow_next if doing so wouldn't exceed the allowed
- * range.
- */
- if (arena->extent_grow_next + egn_skip + 1 <=
- arena->retain_grow_limit) {
- arena->extent_grow_next += egn_skip + 1;
- } else {
- arena->extent_grow_next = arena->retain_grow_limit;
- }
- /* All opportunities for failure are past. */
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
-
- if (config_prof) {
- /* Adjust gdump stats now that extent is final size. */
- extent_gdump_add(tsdn, extent);
- }
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- if (slab) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- extent_slab_set(extent, true);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
- if (*zero && !extent_zeroed_get(extent)) {
- void *addr = extent_base_get(extent);
- size_t size = extent_size_get(extent);
- if (extent_need_manual_zero(arena) ||
- pages_purge_forced(addr, size)) {
- memset(addr, 0, size);
- }
- }
-
- return extent;
-label_err:
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
- return NULL;
-}
-
-static extent_t *
-extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- assert(size != 0);
- assert(alignment != 0);
-
- malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
-
- extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, new_addr, size, pad, alignment, slab,
- szind, zero, commit, true);
- if (extent != NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
- if (config_prof) {
- extent_gdump_add(tsdn, extent);
- }
- } else if (opt_retain && new_addr == NULL) {
- extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
- pad, alignment, slab, szind, zero, commit);
- /* extent_grow_retained() always releases extent_grow_mtx. */
- } else {
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
- }
- malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
-
- return extent;
-}
-
-static extent_t *
-extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- size_t esize = size + pad;
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
- return NULL;
- }
- void *addr;
- size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
- palignment, zero, commit);
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
- esize, palignment, zero, commit, arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
- if (addr == NULL) {
- extent_dalloc(tsdn, arena, extent);
- return NULL;
- }
- extent_init(extent, arena, addr, esize, slab, szind,
- arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
- true, EXTENT_NOT_HEAD);
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
- return NULL;
- }
-
- return extent;
-}
-
-extent_t *
-extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
- new_addr, size, pad, alignment, slab, szind, zero, commit);
- if (extent == NULL) {
- if (opt_retain && new_addr != NULL) {
- /*
- * When retain is enabled and new_addr is set, we do not
- * attempt extent_alloc_wrapper_hard which does mmap
- * that is very unlikely to succeed (unless it happens
- * to be at the end).
- */
- return NULL;
- }
- extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
- new_addr, size, pad, alignment, slab, szind, zero, commit);
- }
-
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
-}
-
-static bool
-extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
- const extent_t *outer) {
- assert(extent_arena_get(inner) == arena);
- if (extent_arena_get(outer) != arena) {
- return false;
- }
-
- assert(extent_state_get(inner) == extent_state_active);
- if (extent_state_get(outer) != extents->state) {
- return false;
- }
-
- if (extent_committed_get(inner) != extent_committed_get(outer)) {
- return false;
- }
-
- return true;
-}
-
-static bool
-extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
- bool growing_retained) {
- assert(extent_can_coalesce(arena, extents, inner, outer));
-
- extent_activate_locked(tsdn, arena, extents, outer);
-
- malloc_mutex_unlock(tsdn, &extents->mtx);
- bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
- forward ? inner : outer, forward ? outer : inner, growing_retained);
- malloc_mutex_lock(tsdn, &extents->mtx);
-
- if (err) {
- extent_deactivate_locked(tsdn, arena, extents, outer);
- }
-
- return err;
-}
-
-static extent_t *
-extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained,
- bool inactive_only) {
- /*
- * We avoid checking / locking inactive neighbors for large size
- * classes, since they are eagerly coalesced on deallocation which can
- * cause lock contention.
- */
- /*
- * Continue attempting to coalesce until failure, to protect against
- * races with other threads that are thwarted by this one.
- */
- bool again;
- do {
- again = false;
-
- /* Try to coalesce forward. */
- extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_past_get(extent), inactive_only);
- if (next != NULL) {
- /*
- * extents->mtx only protects against races for
- * like-state extents, so call extent_can_coalesce()
- * before releasing next's pool lock.
- */
- bool can_coalesce = extent_can_coalesce(arena, extents,
- extent, next);
-
- extent_unlock(tsdn, next);
-
- if (can_coalesce && !extent_coalesce(tsdn, arena,
- r_extent_hooks, extents, extent, next, true,
- growing_retained)) {
- if (extents->delay_coalesce) {
- /* Do minimal coalescing. */
- *coalesced = true;
- return extent;
- }
- again = true;
- }
- }
-
- /* Try to coalesce backward. */
- extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_before_get(extent), inactive_only);
- if (prev != NULL) {
- bool can_coalesce = extent_can_coalesce(arena, extents,
- extent, prev);
- extent_unlock(tsdn, prev);
-
- if (can_coalesce && !extent_coalesce(tsdn, arena,
- r_extent_hooks, extents, extent, prev, false,
- growing_retained)) {
- extent = prev;
- if (extents->delay_coalesce) {
- /* Do minimal coalescing. */
- *coalesced = true;
- return extent;
- }
- again = true;
- }
- }
- } while (again);
-
- if (extents->delay_coalesce) {
- *coalesced = false;
- }
- return extent;
-}
-
-static extent_t *
-extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained) {
- return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, coalesced, growing_retained, false);
-}
-
-static extent_t *
-extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained) {
- return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, coalesced, growing_retained, true);
-}
-
-/*
- * Does the metadata management portions of putting an unused extent into the
- * given extents_t (coalesces, deregisters slab interiors, the heap operations).
- */
-static void
-extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent, bool growing_retained) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- assert((extents_state_get(extents) != extent_state_dirty &&
- extents_state_get(extents) != extent_state_muzzy) ||
- !extent_zeroed_get(extent));
-
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- extent_szind_set(extent, SC_NSIZES);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
- }
-
- assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), true) == extent);
-
- if (!extents->delay_coalesce) {
- extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, extent, NULL, growing_retained);
- } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
- assert(extents == &arena->extents_dirty);
- /* Always coalesce large extents eagerly. */
- bool coalesced;
- do {
- assert(extent_state_get(extent) == extent_state_active);
- extent = extent_try_coalesce_large(tsdn, arena,
- r_extent_hooks, rtree_ctx, extents, extent,
- &coalesced, growing_retained);
- } while (coalesced);
- if (extent_size_get(extent) >= oversize_threshold) {
- /* Shortcut to purge the oversize extent eagerly. */
- malloc_mutex_unlock(tsdn, &extents->mtx);
- arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
- return;
- }
- }
- extent_deactivate_locked(tsdn, arena, extents, extent);
-
- malloc_mutex_unlock(tsdn, &extents->mtx);
-}
-
-void
-extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
- return;
- }
- extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
-}
-
-static bool
-extent_may_dalloc(void) {
- /* With retain enabled, the default dalloc always fails. */
- return !opt_retain;
-}
-
-static bool
-extent_dalloc_default_impl(void *addr, size_t size) {
- if (!have_dss || !extent_in_dss(addr)) {
- return extent_dalloc_mmap(addr, size);
- }
- return true;
-}
-
-static bool
-extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind) {
- return extent_dalloc_default_impl(addr, size);
-}
-
-static bool
-extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- bool err;
-
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_addr_set(extent, extent_base_get(extent));
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- /* Try to deallocate. */
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- err = extent_dalloc_default_impl(extent_base_get(extent),
- extent_size_get(extent));
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- err = ((*r_extent_hooks)->dalloc == NULL ||
- (*r_extent_hooks)->dalloc(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena_ind_get(arena)));
- extent_hook_post_reentrancy(tsdn);
- }
-
- if (!err) {
- extent_dalloc(tsdn, arena, extent);
- }
-
- return err;
-}
-
-void
-extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- assert(extent_dumpable_get(extent));
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- /* Avoid calling the default extent_dalloc unless have to. */
- if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
- /*
- * Deregister first to avoid a race with other allocating
- * threads, and reregister if deallocation fails.
- */
- extent_deregister(tsdn, extent);
- if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
- extent)) {
- return;
- }
- extent_reregister(tsdn, extent);
- }
-
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- /* Try to decommit; purge if that fails. */
- bool zeroed;
- if (!extent_committed_get(extent)) {
- zeroed = true;
- } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent))) {
- zeroed = true;
- } else if ((*r_extent_hooks)->purge_forced != NULL &&
- !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena))) {
- zeroed = true;
- } else if (extent_state_get(extent) == extent_state_muzzy ||
- ((*r_extent_hooks)->purge_lazy != NULL &&
- !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena)))) {
- zeroed = false;
- } else {
- zeroed = false;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_zeroed_set(extent, zeroed);
-
- if (config_prof) {
- extent_gdump_sub(tsdn, extent);
- }
-
- extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
- extent, false);
-}
-
-static void
-extent_destroy_default_impl(void *addr, size_t size) {
- if (!have_dss || !extent_in_dss(addr)) {
- pages_unmap(addr, size);
- }
-}
-
-static void
-extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind) {
- extent_destroy_default_impl(addr, size);
-}
-
-void
-extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- /* Deregister first to avoid a race with other allocating threads. */
- extent_deregister(tsdn, extent);
-
- extent_addr_set(extent, extent_base_get(extent));
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- /* Try to destroy; silently fail otherwise. */
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- extent_destroy_default_impl(extent_base_get(extent),
- extent_size_get(extent));
- } else if ((*r_extent_hooks)->destroy != NULL) {
- extent_hook_pre_reentrancy(tsdn, arena);
- (*r_extent_hooks)->destroy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
-
- extent_dalloc(tsdn, arena, extent);
-}
-
-static bool
-extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
-}
-
-static bool
-extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = ((*r_extent_hooks)->commit == NULL ||
- (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena)));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_committed_set(extent, extent_committed_get(extent) || !err);
- return err;
-}
-
-bool
-extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
- length, false);
-}
-
-static bool
-extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
-}
-
-bool
-extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = ((*r_extent_hooks)->decommit == NULL ||
- (*r_extent_hooks)->decommit(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena)));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_committed_set(extent, extent_committed_get(extent) && err);
- return err;
-}
-
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool
-extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
-}
-#endif
-
-static bool
-extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->purge_lazy == NULL) {
- return true;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
-
- return err;
-}
-
-bool
-extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
- offset, length, false);
-}
-
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool
-extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind) {
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return pages_purge_forced((void *)((uintptr_t)addr +
- (uintptr_t)offset), length);
-}
-#endif
-
-static bool
-extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->purge_forced == NULL) {
- return true;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- return err;
-}
-
-bool
-extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
- offset, length, false);
-}
-
-static bool
-extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
- if (!maps_coalesce) {
- /*
- * Without retain, only whole regions can be purged (required by
- * MEM_RELEASE on Windows) -- therefore disallow splitting. See
- * comments in extent_head_no_merge().
- */
- return !opt_retain;
- }
-
- return false;
-}
-
-/*
- * Accepts the extent to split, and the characteristics of each side of the
- * split. The 'a' parameters go with the 'lead' of the resulting pair of
- * extents (the lower addressed portion of the split), and the 'b' parameters go
- * with the trail (the higher addressed portion). This makes 'extent' the lead,
- * and returns the trail (except in case of error).
- */
-static extent_t *
-extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
- bool growing_retained) {
- assert(extent_size_get(extent) == size_a + size_b);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->split == NULL) {
- return NULL;
- }
-
- extent_t *trail = extent_alloc(tsdn, arena);
- if (trail == NULL) {
- goto label_error_a;
- }
-
- extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
- size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
- EXTENT_NOT_HEAD);
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
- {
- extent_t lead;
-
- extent_init(&lead, arena, extent_addr_get(extent), size_a,
- slab_a, szind_a, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
- EXTENT_NOT_HEAD);
-
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
- true, &lead_elm_a, &lead_elm_b);
- }
- rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
- &trail_elm_a, &trail_elm_b);
-
- if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
- || trail_elm_b == NULL) {
- goto label_error_b;
- }
-
- extent_lock2(tsdn, extent, trail);
-
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
- size_a + size_b, size_a, size_b, extent_committed_get(extent),
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- if (err) {
- goto label_error_c;
- }
-
- extent_size_set(extent, size_a);
- extent_szind_set(extent, szind_a);
-
- extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
- szind_a, slab_a);
- extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
- szind_b, slab_b);
-
- extent_unlock2(tsdn, extent, trail);
-
- return trail;
-label_error_c:
- extent_unlock2(tsdn, extent, trail);
-label_error_b:
- extent_dalloc(tsdn, arena, trail);
-label_error_a:
- return NULL;
-}
-
-extent_t *
-extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
- return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
- szind_a, slab_a, size_b, szind_b, slab_b, false);
-}
-
-static bool
-extent_merge_default_impl(void *addr_a, void *addr_b) {
- if (!maps_coalesce && !opt_retain) {
- return true;
- }
- if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
- return true;
- }
-
- return false;
-}
-
-/*
- * Returns true if the given extents can't be merged because of their head bit
- * settings. Assumes the second extent has the higher address.
- */
-static bool
-extent_head_no_merge(extent_t *a, extent_t *b) {
- assert(extent_base_get(a) < extent_base_get(b));
- /*
- * When coalesce is not always allowed (Windows), only merge extents
- * from the same VirtualAlloc region under opt.retain (in which case
- * MEM_DECOMMIT is utilized for purging).
- */
- if (maps_coalesce) {
- return false;
- }
- if (!opt_retain) {
- return true;
- }
- /* If b is a head extent, disallow the cross-region merge. */
- if (extent_is_head_get(b)) {
- /*
- * Additionally, sn should not overflow with retain; sanity
- * check that different regions have unique sn.
- */
- assert(extent_sn_comp(a, b) != 0);
- return true;
- }
- assert(extent_sn_comp(a, b) == 0);
-
- return false;
-}
-
-static bool
-extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
- if (!maps_coalesce) {
- tsdn_t *tsdn = tsdn_fetch();
- extent_t *a = iealloc(tsdn, addr_a);
- extent_t *b = iealloc(tsdn, addr_b);
- if (extent_head_no_merge(a, b)) {
- return true;
- }
- }
- return extent_merge_default_impl(addr_a, addr_b);
-}
-
-static bool
-extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(extent_base_get(a) < extent_base_get(b));
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
- return true;
- }
-
- bool err;
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- err = extent_merge_default_impl(extent_base_get(a),
- extent_base_get(b));
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- err = (*r_extent_hooks)->merge(*r_extent_hooks,
- extent_base_get(a), extent_size_get(a), extent_base_get(b),
- extent_size_get(b), extent_committed_get(a),
- arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
-
- if (err) {
- return true;
- }
-
- /*
- * The rtree writes must happen while all the relevant elements are
- * owned, so the following code uses decomposed helper functions rather
- * than extent_{,de}register() to do things in the right order.
- */
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
- &a_elm_b);
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
- &b_elm_b);
-
- extent_lock2(tsdn, a, b);
-
- if (a_elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
- SC_NSIZES, false);
- }
- if (b_elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
- SC_NSIZES, false);
- } else {
- b_elm_b = b_elm_a;
- }
-
- extent_size_set(a, extent_size_get(a) + extent_size_get(b));
- extent_szind_set(a, SC_NSIZES);
- extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
- extent_sn_get(a) : extent_sn_get(b));
- extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
-
- extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
- false);
-
- extent_unlock2(tsdn, a, b);
-
- extent_dalloc(tsdn, extent_arena_get(b), b);
-
- return false;
-}
-
-bool
-extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
- return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
-}
-
-bool
-extent_boot(void) {
- if (rtree_new(&extents_rtree, true)) {
- return true;
- }
-
- if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
- WITNESS_RANK_EXTENT_POOL)) {
- return true;
- }
-
- if (have_dss) {
- extent_dss_boot();
- }
-
- return false;
-}
-
-void
-extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size) {
- assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
-
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
- *nfree = *nregs = *size = 0;
- return;
- }
-
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
- *nfree = 0;
- *nregs = 1;
- } else {
- *nfree = extent_nfree_get(extent);
- *nregs = bin_infos[extent_szind_get(extent)].nregs;
- assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
- }
-}
-
-void
-extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size,
- size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
- assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
- && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
-
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
- *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
- *slabcur_addr = NULL;
- return;
- }
-
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
- *nfree = *bin_nfree = *bin_nregs = 0;
- *nregs = 1;
- *slabcur_addr = NULL;
- return;
- }
-
- *nfree = extent_nfree_get(extent);
- const szind_t szind = extent_szind_get(extent);
- *nregs = bin_infos[szind].nregs;
- assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
-
- const arena_t *arena = extent_arena_get(extent);
- assert(arena != NULL);
- const unsigned binshard = extent_binshard_get(extent);
- bin_t *bin = &arena->bins[szind].bin_shards[binshard];
-
- malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats) {
- *bin_nregs = *nregs * bin->stats.curslabs;
- assert(*bin_nregs >= bin->stats.curregs);
- *bin_nfree = *bin_nregs - bin->stats.curregs;
- } else {
- *bin_nfree = *bin_nregs = 0;
- }
- *slabcur_addr = extent_addr_get(bin->slabcur);
- assert(*slabcur_addr != NULL);
- malloc_mutex_unlock(tsdn, &bin->lock);
-}
+/* Data. */
+
+rtree_t extents_rtree;
+/* Keyed by the address of the extent_t being protected. */
+mutex_pool_t extent_mutex_pool;
+
+size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
+
+static const bitmap_info_t extents_bitmap_info =
+ BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
+
+static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit,
+ unsigned arena_ind);
+static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+static bool extent_decommit_default(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+#ifdef PAGES_CAN_PURGE_LAZY
+static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+#endif
+static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+#ifdef PAGES_CAN_PURGE_FORCED
+static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+#endif
+static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t size_a, size_t size_b, bool committed,
+ unsigned arena_ind);
+static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
+ bool growing_retained);
+static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
+ unsigned arena_ind);
+static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
+ bool growing_retained);
+
+const extent_hooks_t extent_hooks_default = {
+ extent_alloc_default,
+ extent_dalloc_default,
+ extent_destroy_default,
+ extent_commit_default,
+ extent_decommit_default
+#ifdef PAGES_CAN_PURGE_LAZY
+ ,
+ extent_purge_lazy_default
+#else
+ ,
+ NULL
+#endif
+#ifdef PAGES_CAN_PURGE_FORCED
+ ,
+ extent_purge_forced_default
+#else
+ ,
+ NULL
+#endif
+ ,
+ extent_split_default,
+ extent_merge_default
+};
+
+/* Used exclusively for gdump triggering. */
+static atomic_zu_t curpages;
+static atomic_zu_t highpages;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
+static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
+ size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
+ bool *zero, bool *commit, bool growing_retained);
+static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained);
+static void extent_record(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
+ bool growing_retained);
+
+/******************************************************************************/
+
+#define ATTR_NONE /* does nothing */
+
+ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
+ extent_esnead_comp)
+
+#undef ATTR_NONE
+
+typedef enum {
+ lock_result_success,
+ lock_result_failure,
+ lock_result_no_extent
+} lock_result_t;
+
+static lock_result_t
+extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
+ extent_t **result, bool inactive_only) {
+ extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
+ elm, true);
+
+ /* Slab implies active extents and should be skipped. */
+ if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
+ &extents_rtree, elm, true))) {
+ return lock_result_no_extent;
+ }
+
+ /*
+ * It's possible that the extent changed out from under us, and with it
+ * the leaf->extent mapping. We have to recheck while holding the lock.
+ */
+ extent_lock(tsdn, extent1);
+ extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
+ &extents_rtree, elm, true);
+
+ if (extent1 == extent2) {
+ *result = extent1;
+ return lock_result_success;
+ } else {
+ extent_unlock(tsdn, extent1);
+ return lock_result_failure;
+ }
+}
+
+/*
+ * Returns a pool-locked extent_t * if there's one associated with the given
+ * address, and NULL otherwise.
+ */
+static extent_t *
+extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
+ bool inactive_only) {
+ extent_t *ret = NULL;
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)addr, false, false);
+ if (elm == NULL) {
+ return NULL;
+ }
+ lock_result_t lock_result;
+ do {
+ lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
+ inactive_only);
+ } while (lock_result == lock_result_failure);
+ return ret;
+}
+
+extent_t *
+extent_alloc(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
+ extent_t *extent = extent_avail_first(&arena->extent_avail);
+ if (extent == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+ return base_alloc_extent(tsdn, arena->base);
+ }
+ extent_avail_remove(&arena->extent_avail, extent);
+ atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+ return extent;
+}
+
+void
+extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
+ extent_avail_insert(&arena->extent_avail, extent);
+ atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+}
+
+extent_hooks_t *
+extent_hooks_get(arena_t *arena) {
+ return base_extent_hooks_get(arena->base);
+}
+
+extent_hooks_t *
+extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
+ background_thread_info_t *info;
+ if (have_background_thread) {
+ info = arena_background_thread_info_get(arena);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ }
+ extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+
+ return ret;
+}
+
+static void
+extent_hooks_assure_initialized(arena_t *arena,
+ extent_hooks_t **r_extent_hooks) {
+ if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
+ *r_extent_hooks = extent_hooks_get(arena);
+ }
+}
+
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+extent_size_quantize_floor(size_t size) {
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert((size & PAGE_MASK) == 0);
+
+ pind = sz_psz2ind(size - sz_large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return size;
+ }
+ ret = sz_pind2sz(pind - 1) + sz_large_pad;
+ assert(ret <= size);
+ return ret;
+}
+
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+extent_size_quantize_ceil(size_t size) {
+ size_t ret;
+
+ assert(size > 0);
+ assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ ret = extent_size_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large extent,
+ * because under-sized extents may be mixed in. This only
+ * happens when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
+ sz_large_pad;
+ }
+ return ret;
+}
+
+/* Generate pairing heap functions. */
+ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
+
+bool
+extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
+ bool delay_coalesce) {
+ if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
+ extent_heap_new(&extents->heaps[i]);
+ }
+ bitmap_init(extents->bitmap, &extents_bitmap_info, true);
+ extent_list_init(&extents->lru);
+ atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
+ extents->state = state;
+ extents->delay_coalesce = delay_coalesce;
+ return false;
+}
+
+extent_state_t
+extents_state_get(const extents_t *extents) {
+ return extents->state;
+}
+
+size_t
+extents_npages_get(extents_t *extents) {
+ return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+}
+
+size_t
+extents_nextents_get(extents_t *extents, pszind_t pind) {
+ return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
+}
+
+size_t
+extents_nbytes_get(extents_t *extents, pszind_t pind) {
+ return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
+}
+
+static void
+extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
+ size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
+ atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
+ cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
+ atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
+}
+
+static void
+extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
+ size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
+ atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
+ cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
+ atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
+}
+
+static void
+extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+ assert(extent_state_get(extent) == extents->state);
+
+ size_t size = extent_size_get(extent);
+ size_t psz = extent_size_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ if (extent_heap_empty(&extents->heaps[pind])) {
+ bitmap_unset(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ }
+ extent_heap_insert(&extents->heaps[pind], extent);
+
+ if (config_stats) {
+ extents_stats_add(extents, pind, size);
+ }
+
+ extent_list_append(&extents->lru, extent);
+ size_t npages = size >> LG_PAGE;
+ /*
+ * All modifications to npages hold the mutex (as asserted above), so we
+ * don't need an atomic fetch-add; we can get by with a load followed by
+ * a store.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+ atomic_store_zu(&extents->npages, cur_extents_npages + npages,
+ ATOMIC_RELAXED);
+}
+
+static void
+extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+ assert(extent_state_get(extent) == extents->state);
+
+ size_t size = extent_size_get(extent);
+ size_t psz = extent_size_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ extent_heap_remove(&extents->heaps[pind], extent);
+
+ if (config_stats) {
+ extents_stats_sub(extents, pind, size);
+ }
+
+ if (extent_heap_empty(&extents->heaps[pind])) {
+ bitmap_set(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ }
+ extent_list_remove(&extents->lru, extent);
+ size_t npages = size >> LG_PAGE;
+ /*
+ * As in extents_insert_locked, we hold extents->mtx and so don't need
+ * atomic operations for updating extents->npages.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+ assert(cur_extents_npages >= npages);
+ atomic_store_zu(&extents->npages,
+ cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
+}
+
+/*
+ * Find an extent with size [min_size, max_size) to satisfy the alignment
+ * requirement. For each size, try only the first extent in the heap.
+ */
+static extent_t *
+extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
+ size_t alignment) {
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
+ pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
+
+ for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
+ &extents_bitmap_info, (size_t)pind); i < pind_max; i =
+ (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)i+1)) {
+ assert(i < SC_NPSIZES);
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
+ uintptr_t base = (uintptr_t)extent_base_get(extent);
+ size_t candidate_size = extent_size_get(extent);
+ assert(candidate_size >= min_size);
+
+ uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
+ PAGE_CEILING(alignment));
+ if (base > next_align || base + candidate_size <= next_align) {
+ /* Overflow or not crossing the next alignment. */
+ continue;
+ }
+
+ size_t leadsize = next_align - base;
+ if (candidate_size - leadsize >= min_size) {
+ return extent;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
+ * large enough.
+ */
+static extent_t *
+extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ extent_t *ret = NULL;
+
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
+
+ if (!maps_coalesce && !opt_retain) {
+ /*
+ * No split / merge allowed (Windows w/o retain). Try exact fit
+ * only.
+ */
+ return extent_heap_empty(&extents->heaps[pind]) ? NULL :
+ extent_heap_first(&extents->heaps[pind]);
+ }
+
+ for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
+ &extents_bitmap_info, (size_t)pind);
+ i < SC_NPSIZES + 1;
+ i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)i+1)) {
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
+ assert(extent_size_get(extent) >= size);
+ /*
+ * In order to reduce fragmentation, avoid reusing and splitting
+ * large extents for much smaller sizes.
+ *
+ * Only do check for dirty extents (delay_coalesce).
+ */
+ if (extents->delay_coalesce &&
+ (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
+ break;
+ }
+ if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
+ ret = extent;
+ }
+ if (i == SC_NPSIZES) {
+ break;
+ }
+ assert(i < SC_NPSIZES);
+ }
+
+ return ret;
+}
+
+/*
+ * Do first-fit extent selection, where the selection policy choice is
+ * based on extents->delay_coalesce.
+ */
+static extent_t *
+extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t esize, size_t alignment) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+
+ size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (max_size < esize) {
+ return NULL;
+ }
+
+ extent_t *extent =
+ extents_first_fit_locked(tsdn, arena, extents, max_size);
+
+ if (alignment > PAGE && extent == NULL) {
+ /*
+ * max_size guarantees the alignment requirement but is rather
+ * pessimistic. Next we try to satisfy the aligned allocation
+ * with sizes in [esize, max_size).
+ */
+ extent = extents_fit_alignment(extents, esize, max_size,
+ alignment);
+ }
+
+ return extent;
+}
+
+static bool
+extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent) {
+ extent_state_set(extent, extent_state_active);
+ bool coalesced;
+ extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, extent, &coalesced, false);
+ extent_state_set(extent, extents_state_get(extents));
+
+ if (!coalesced) {
+ return true;
+ }
+ extents_insert_locked(tsdn, extents, extent);
+ return false;
+}
+
+extent_t *
+extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ assert(size + pad != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
+ new_addr, size, pad, alignment, slab, szind, zero, commit, false);
+ assert(extent == NULL || extent_dumpable_get(extent));
+ return extent;
+}
+
+void
+extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent) {
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ assert(extent_dumpable_get(extent));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+ extent_zeroed_set(extent, false);
+
+ extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
+}
+
+extent_t *
+extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, size_t npages_min) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ malloc_mutex_lock(tsdn, &extents->mtx);
+
+ /*
+ * Get the LRU coalesced extent, if any. If coalescing was delayed,
+ * the loop will iterate until the LRU extent is fully coalesced.
+ */
+ extent_t *extent;
+ while (true) {
+ /* Get the LRU extent, if any. */
+ extent = extent_list_first(&extents->lru);
+ if (extent == NULL) {
+ goto label_return;
+ }
+ /* Check the eviction limit. */
+ size_t extents_npages = atomic_load_zu(&extents->npages,
+ ATOMIC_RELAXED);
+ if (extents_npages <= npages_min) {
+ extent = NULL;
+ goto label_return;
+ }
+ extents_remove_locked(tsdn, extents, extent);
+ if (!extents->delay_coalesce) {
+ break;
+ }
+ /* Try to coalesce. */
+ if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, extent)) {
+ break;
+ }
+ /*
+ * The LRU extent was just coalesced and the result placed in
+ * the LRU at its neighbor's position. Start over.
+ */
+ }
+
+ /*
+ * Either mark the extent active or deregister it to protect against
+ * concurrent operations.
+ */
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ case extent_state_muzzy:
+ extent_state_set(extent, extent_state_active);
+ break;
+ case extent_state_retained:
+ extent_deregister(tsdn, extent);
+ break;
+ default:
+ not_reached();
+ }
+
+label_return:
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ return extent;
+}
+
+/*
+ * This can only happen when we fail to allocate a new extent struct (which
+ * indicates OOM), e.g. when trying to split an existing extent.
+ */
+static void
+extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent, bool growing_retained) {
+ size_t sz = extent_size_get(extent);
+ if (config_stats) {
+ arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
+ }
+ /*
+ * Leak extent after making sure its pages have already been purged, so
+ * that this is only a virtual memory leak.
+ */
+ if (extents_state_get(extents) == extent_state_dirty) {
+ if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
+ extent, 0, sz, growing_retained)) {
+ extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
+ extent, 0, extent_size_get(extent),
+ growing_retained);
+ }
+ }
+ extent_dalloc(tsdn, arena, extent);
+}
+
+void
+extents_prefork(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_prefork(tsdn, &extents->mtx);
+}
+
+void
+extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_postfork_parent(tsdn, &extents->mtx);
+}
+
+void
+extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_postfork_child(tsdn, &extents->mtx);
+}
+
+static void
+extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent) {
+ assert(extent_arena_get(extent) == arena);
+ assert(extent_state_get(extent) == extent_state_active);
+
+ extent_state_set(extent, extents_state_get(extents));
+ extents_insert_locked(tsdn, extents, extent);
+}
+
+static void
+extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent) {
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_deactivate_locked(tsdn, arena, extents, extent);
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+}
+
+static void
+extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent) {
+ assert(extent_arena_get(extent) == arena);
+ assert(extent_state_get(extent) == extents_state_get(extents));
+
+ extents_remove_locked(tsdn, extents, extent);
+ extent_state_set(extent, extent_state_active);
+}
+
+static bool
+extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
+ const extent_t *extent, bool dependent, bool init_missing,
+ rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
+ *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_a == NULL) {
+ return true;
+ }
+ assert(*r_elm_a != NULL);
+
+ *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_last_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_b == NULL) {
+ return true;
+ }
+ assert(*r_elm_b != NULL);
+
+ return false;
+}
+
+static void
+extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
+ rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
+ if (elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
+ slab);
+ }
+}
+
+static void
+extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
+ szind_t szind) {
+ assert(extent_slab_get(extent));
+
+ /* Register interior. */
+ for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_write(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE), extent, szind, true);
+ }
+}
+
+static void
+extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
+ cassert(config_prof);
+ /* prof_gdump() requirement. */
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (opt_prof && extent_state_get(extent) == extent_state_active) {
+ size_t nadd = extent_size_get(extent) >> LG_PAGE;
+ size_t cur = atomic_fetch_add_zu(&curpages, nadd,
+ ATOMIC_RELAXED) + nadd;
+ size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
+ while (cur > high && !atomic_compare_exchange_weak_zu(
+ &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highpages update race.
+ * Note that high is updated in case of CAS failure.
+ */
+ }
+ if (cur > high && prof_gdump_get_unlocked()) {
+ prof_gdump(tsdn);
+ }
+ }
+}
+
+static void
+extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
+ cassert(config_prof);
+
+ if (opt_prof && extent_state_get(extent) == extent_state_active) {
+ size_t nsub = extent_size_get(extent) >> LG_PAGE;
+ assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
+ atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
+ }
+}
+
+static bool
+extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *elm_a, *elm_b;
+
+ /*
+ * We need to hold the lock to protect against a concurrent coalesce
+ * operation that sees us in a partial state.
+ */
+ extent_lock(tsdn, extent);
+
+ if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
+ &elm_a, &elm_b)) {
+ extent_unlock(tsdn, extent);
+ return true;
+ }
+
+ szind_t szind = extent_szind_get_maybe_invalid(extent);
+ bool slab = extent_slab_get(extent);
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
+ if (slab) {
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+
+ extent_unlock(tsdn, extent);
+
+ if (config_prof && gdump_add) {
+ extent_gdump_add(tsdn, extent);
+ }
+
+ return false;
+}
+
+static bool
+extent_register(tsdn_t *tsdn, extent_t *extent) {
+ return extent_register_impl(tsdn, extent, true);
+}
+
+static bool
+extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
+ return extent_register_impl(tsdn, extent, false);
+}
+
+static void
+extent_reregister(tsdn_t *tsdn, extent_t *extent) {
+ bool err = extent_register(tsdn, extent);
+ assert(!err);
+}
+
+/*
+ * Removes all pointers to the given extent from the global rtree indices for
+ * its interior. This is relevant for slab extents, for which we need to do
+ * metadata lookups at places other than the head of the extent. We deregister
+ * on the interior, then, when an extent moves from being an active slab to an
+ * inactive state.
+ */
+static void
+extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
+ extent_t *extent) {
+ size_t i;
+
+ assert(extent_slab_get(extent));
+
+ for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_clear(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE));
+ }
+}
+
+/*
+ * Removes all pointers to the given extent from the global rtree.
+ */
+static void
+extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
+ &elm_a, &elm_b);
+
+ extent_lock(tsdn, extent);
+
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, rtree_ctx, extent);
+ extent_slab_set(extent, false);
+ }
+
+ extent_unlock(tsdn, extent);
+
+ if (config_prof && gdump) {
+ extent_gdump_sub(tsdn, extent);
+ }
+}
+
+static void
+extent_deregister(tsdn_t *tsdn, extent_t *extent) {
+ extent_deregister_impl(tsdn, extent, true);
+}
+
+static void
+extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
+ extent_deregister_impl(tsdn, extent, false);
+}
+
+/*
+ * Tries to find and remove an extent from extents that can be used for the
+ * given allocation request.
+ */
+static extent_t *
+extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(alignment > 0);
+ if (config_debug && new_addr != NULL) {
+ /*
+ * Non-NULL new_addr has two use cases:
+ *
+ * 1) Recycle a known-extant extent, e.g. during purging.
+ * 2) Perform in-place expanding reallocation.
+ *
+ * Regardless of use case, new_addr must either refer to a
+ * non-existing extent, or to the base of an extant extent,
+ * since only active slabs support interior lookups (which of
+ * course cannot be recycled).
+ */
+ assert(PAGE_ADDR2BASE(new_addr) == new_addr);
+ assert(pad == 0);
+ assert(alignment <= PAGE);
+ }
+
+ size_t esize = size + pad;
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ extent_t *extent;
+ if (new_addr != NULL) {
+ extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
+ false);
+ if (extent != NULL) {
+ /*
+ * We might null-out extent to report an error, but we
+ * still need to unlock the associated mutex after.
+ */
+ extent_t *unlock_extent = extent;
+ assert(extent_base_get(extent) == new_addr);
+ if (extent_arena_get(extent) != arena ||
+ extent_size_get(extent) < esize ||
+ extent_state_get(extent) !=
+ extents_state_get(extents)) {
+ extent = NULL;
+ }
+ extent_unlock(tsdn, unlock_extent);
+ }
+ } else {
+ extent = extents_fit_locked(tsdn, arena, extents, esize,
+ alignment);
+ }
+ if (extent == NULL) {
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ return NULL;
+ }
+
+ extent_activate_locked(tsdn, arena, extents, extent);
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+
+ return extent;
+}
+
+/*
+ * Given an allocation request and an extent guaranteed to be able to satisfy
+ * it, this splits off lead and trail extents, leaving extent pointing to an
+ * extent satisfying the allocation.
+ * This function doesn't put lead or trail into any extents_t; it's the caller's
+ * job to ensure that they can be reused.
+ */
+typedef enum {
+ /*
+ * Split successfully. lead, extent, and trail, are modified to extents
+ * describing the ranges before, in, and after the given allocation.
+ */
+ extent_split_interior_ok,
+ /*
+ * The extent can't satisfy the given allocation request. None of the
+ * input extent_t *s are touched.
+ */
+ extent_split_interior_cant_alloc,
+ /*
+ * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
+ * and salvage what's still salvageable (if *to_salvage is non-NULL).
+ * None of lead, extent, or trail are valid.
+ */
+ extent_split_interior_error
+} extent_split_interior_result_t;
+
+static extent_split_interior_result_t
+extent_split_interior(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
+ /* The result of splitting, in case of success. */
+ extent_t **extent, extent_t **lead, extent_t **trail,
+ /* The mess to clean up, in case of error. */
+ extent_t **to_leak, extent_t **to_salvage,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ szind_t szind, bool growing_retained) {
+ size_t esize = size + pad;
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
+ PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
+ assert(new_addr == NULL || leadsize == 0);
+ if (extent_size_get(*extent) < leadsize + esize) {
+ return extent_split_interior_cant_alloc;
+ }
+ size_t trailsize = extent_size_get(*extent) - leadsize - esize;
+
+ *lead = NULL;
+ *trail = NULL;
+ *to_leak = NULL;
+ *to_salvage = NULL;
+
+ /* Split the lead. */
+ if (leadsize != 0) {
+ *lead = *extent;
+ *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
+ *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
+ slab, growing_retained);
+ if (*extent == NULL) {
+ *to_leak = *lead;
+ *lead = NULL;
+ return extent_split_interior_error;
+ }
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
+ esize, szind, slab, trailsize, SC_NSIZES, false,
+ growing_retained);
+ if (*trail == NULL) {
+ *to_leak = *extent;
+ *to_salvage = *lead;
+ *lead = NULL;
+ *extent = NULL;
+ return extent_split_interior_error;
+ }
+ }
+
+ if (leadsize == 0 && trailsize == 0) {
+ /*
+ * Splitting causes szind to be set as a side effect, but no
+ * splitting occurred.
+ */
+ extent_szind_set(*extent, szind);
+ if (szind != SC_NSIZES) {
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(*extent), szind, slab);
+ if (slab && extent_size_get(*extent) > PAGE) {
+ rtree_szind_slab_update(tsdn, &extents_rtree,
+ rtree_ctx,
+ (uintptr_t)extent_past_get(*extent) -
+ (uintptr_t)PAGE, szind, slab);
+ }
+ }
+ }
+
+ return extent_split_interior_ok;
+}
+
+/*
+ * This fulfills the indicated allocation request out of the given extent (which
+ * the caller should have ensured was big enough). If there's any unused space
+ * before or after the resulting allocation, that space is given its own extent
+ * and put back into extents.
+ */
+static extent_t *
+extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ szind_t szind, extent_t *extent, bool growing_retained) {
+ extent_t *lead;
+ extent_t *trail;
+ extent_t *to_leak;
+ extent_t *to_salvage;
+
+ extent_split_interior_result_t result = extent_split_interior(
+ tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
+ &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
+ growing_retained);
+
+ if (!maps_coalesce && result != extent_split_interior_ok
+ && !opt_retain) {
+ /*
+ * Split isn't supported (implies Windows w/o retain). Avoid
+ * leaking the extents.
+ */
+ assert(to_leak != NULL && lead == NULL && trail == NULL);
+ extent_deactivate(tsdn, arena, extents, to_leak);
+ return NULL;
+ }
+
+ if (result == extent_split_interior_ok) {
+ if (lead != NULL) {
+ extent_deactivate(tsdn, arena, extents, lead);
+ }
+ if (trail != NULL) {
+ extent_deactivate(tsdn, arena, extents, trail);
+ }
+ return extent;
+ } else {
+ /*
+ * We should have picked an extent that was large enough to
+ * fulfill our allocation request.
+ */
+ assert(result == extent_split_interior_error);
+ if (to_salvage != NULL) {
+ extent_deregister(tsdn, to_salvage);
+ }
+ if (to_leak != NULL) {
+ void *leak = extent_base_get(to_leak);
+ extent_deregister_no_gdump_sub(tsdn, to_leak);
+ extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
+ to_leak, growing_retained);
+ assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
+ false) == NULL);
+ }
+ return NULL;
+ }
+ unreachable();
+}
+
+static bool
+extent_need_manual_zero(arena_t *arena) {
+ /*
+ * Need to manually zero the extent on repopulating if either; 1) non
+ * default extent hooks installed (in which case the purge semantics may
+ * change); or 2) transparent huge pages enabled.
+ */
+ return (!arena_has_default_hooks(arena) ||
+ (opt_thp == thp_mode_always));
+}
+
+/*
+ * Tries to satisfy the given allocation request by reusing one of the extents
+ * in the given extents_t.
+ */
+static extent_t *
+extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(new_addr == NULL || !slab);
+ assert(pad == 0 || !slab);
+ assert(!*zero || !slab);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, new_addr, size, pad, alignment, slab,
+ growing_retained);
+ if (extent == NULL) {
+ return NULL;
+ }
+
+ extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, new_addr, size, pad, alignment, slab, szind, extent,
+ growing_retained);
+ if (extent == NULL) {
+ return NULL;
+ }
+
+ if (*commit && !extent_committed_get(extent)) {
+ if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent), growing_retained)) {
+ extent_record(tsdn, arena, r_extent_hooks, extents,
+ extent, growing_retained);
+ return NULL;
+ }
+ if (!extent_need_manual_zero(arena)) {
+ extent_zeroed_set(extent, true);
+ }
+ }
+
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+ if (extent_zeroed_get(extent)) {
+ *zero = true;
+ }
+
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ assert(extent_state_get(extent) == extent_state_active);
+ if (slab) {
+ extent_slab_set(extent, slab);
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+
+ if (*zero) {
+ void *addr = extent_base_get(extent);
+ if (!extent_zeroed_get(extent)) {
+ size_t size = extent_size_get(extent);
+ if (extent_need_manual_zero(arena) ||
+ pages_purge_forced(addr, size)) {
+ memset(addr, 0, size);
+ }
+ } else if (config_debug) {
+ size_t *p = (size_t *)(uintptr_t)addr;
+ /* Check the first page only. */
+ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
+ assert(p[i] == 0);
+ }
+ }
+ }
+ return extent;
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
+ * advantage of this to avoid demanding zeroed extents, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
+ void *ret;
+
+ assert(size != 0);
+ assert(alignment != 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+ /* mmap. */
+ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
+ != NULL) {
+ return ret;
+ }
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+
+ /* All strategies for allocation failed. */
+ return NULL;
+}
+
+static void *
+extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit) {
+ void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
+ ATOMIC_RELAXED));
+ if (have_madvise_huge && ret) {
+ pages_set_thp_state(ret, size);
+ }
+ return ret;
+}
+
+static void *
+extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ tsdn_t *tsdn;
+ arena_t *arena;
+
+ tsdn = tsdn_fetch();
+ arena = arena_get(tsdn, arena_ind, false);
+ /*
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
+ */
+ assert(arena != NULL);
+
+ return extent_alloc_default_impl(tsdn, arena, new_addr, size,
+ ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
+}
+
+static void
+extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
+ /*
+ * The only legitimate case of customized extent hooks for a0 is
+ * hooks with no allocation activities. One such example is to
+ * place metadata on pre-allocated resources such as huge pages.
+ * In that case, rely on reentrancy_level checks to catch
+ * infinite recursions.
+ */
+ pre_reentrancy(tsd, NULL);
+ } else {
+ pre_reentrancy(tsd, arena);
+ }
+}
+
+static void
+extent_hook_post_reentrancy(tsdn_t *tsdn) {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ post_reentrancy(tsd);
+}
+
+/*
+ * If virtual memory is retained, create increasingly larger extents from which
+ * to split requested extents in order to limit the total number of disjoint
+ * virtual memory ranges retained by each arena.
+ */
+static extent_t *
+extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
+ bool slab, szind_t szind, bool *zero, bool *commit) {
+ malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
+ assert(pad == 0 || !slab);
+ assert(!*zero || !slab);
+
+ size_t esize = size + pad;
+ size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size_min < esize) {
+ goto label_err;
+ }
+ /*
+ * Find the next extent size in the series that would be large enough to
+ * satisfy this request.
+ */
+ pszind_t egn_skip = 0;
+ size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ while (alloc_size < alloc_size_min) {
+ egn_skip++;
+ if (arena->extent_grow_next + egn_skip >=
+ sz_psz2ind(SC_LARGE_MAXCLASS)) {
+ /* Outside legal range. */
+ goto label_err;
+ }
+ alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ }
+
+ extent_t *extent = extent_alloc(tsdn, arena);
+ if (extent == NULL) {
+ goto label_err;
+ }
+ bool zeroed = false;
+ bool committed = false;
+
+ void *ptr;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ ptr = extent_alloc_default_impl(tsdn, arena, NULL,
+ alloc_size, PAGE, &zeroed, &committed);
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
+ alloc_size, PAGE, &zeroed, &committed,
+ arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
+ arena_extent_sn_next(arena), extent_state_active, zeroed,
+ committed, true, EXTENT_IS_HEAD);
+ if (ptr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ goto label_err;
+ }
+
+ if (extent_register_no_gdump_add(tsdn, extent)) {
+ extent_dalloc(tsdn, arena, extent);
+ goto label_err;
+ }
+
+ if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
+ *zero = true;
+ }
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *lead;
+ extent_t *trail;
+ extent_t *to_leak;
+ extent_t *to_salvage;
+ extent_split_interior_result_t result = extent_split_interior(
+ tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
+ &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
+ true);
+
+ if (result == extent_split_interior_ok) {
+ if (lead != NULL) {
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, lead, true);
+ }
+ if (trail != NULL) {
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, trail, true);
+ }
+ } else {
+ /*
+ * We should have allocated a sufficiently large extent; the
+ * cant_alloc case should not occur.
+ */
+ assert(result == extent_split_interior_error);
+ if (to_salvage != NULL) {
+ if (config_prof) {
+ extent_gdump_add(tsdn, to_salvage);
+ }
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, to_salvage, true);
+ }
+ if (to_leak != NULL) {
+ extent_deregister_no_gdump_sub(tsdn, to_leak);
+ extents_abandon_vm(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, to_leak, true);
+ }
+ goto label_err;
+ }
+
+ if (*commit && !extent_committed_get(extent)) {
+ if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
+ extent_size_get(extent), true)) {
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+ if (!extent_need_manual_zero(arena)) {
+ extent_zeroed_set(extent, true);
+ }
+ }
+
+ /*
+ * Increment extent_grow_next if doing so wouldn't exceed the allowed
+ * range.
+ */
+ if (arena->extent_grow_next + egn_skip + 1 <=
+ arena->retain_grow_limit) {
+ arena->extent_grow_next += egn_skip + 1;
+ } else {
+ arena->extent_grow_next = arena->retain_grow_limit;
+ }
+ /* All opportunities for failure are past. */
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+
+ if (config_prof) {
+ /* Adjust gdump stats now that extent is final size. */
+ extent_gdump_add(tsdn, extent);
+ }
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ if (slab) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ extent_slab_set(extent, true);
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+ if (*zero && !extent_zeroed_get(extent)) {
+ void *addr = extent_base_get(extent);
+ size_t size = extent_size_get(extent);
+ if (extent_need_manual_zero(arena) ||
+ pages_purge_forced(addr, size)) {
+ memset(addr, 0, size);
+ }
+ }
+
+ return extent;
+label_err:
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ return NULL;
+}
+
+static extent_t *
+extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ assert(size != 0);
+ assert(alignment != 0);
+
+ malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
+
+ extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, new_addr, size, pad, alignment, slab,
+ szind, zero, commit, true);
+ if (extent != NULL) {
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ if (config_prof) {
+ extent_gdump_add(tsdn, extent);
+ }
+ } else if (opt_retain && new_addr == NULL) {
+ extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
+ pad, alignment, slab, szind, zero, commit);
+ /* extent_grow_retained() always releases extent_grow_mtx. */
+ } else {
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ }
+ malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
+
+ return extent;
+}
+
+static extent_t *
+extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ size_t esize = size + pad;
+ extent_t *extent = extent_alloc(tsdn, arena);
+ if (extent == NULL) {
+ return NULL;
+ }
+ void *addr;
+ size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
+ palignment, zero, commit);
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
+ esize, palignment, zero, commit, arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+ if (addr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ return NULL;
+ }
+ extent_init(extent, arena, addr, esize, slab, szind,
+ arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
+ true, EXTENT_NOT_HEAD);
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ if (extent_register(tsdn, extent)) {
+ extent_dalloc(tsdn, arena, extent);
+ return NULL;
+ }
+
+ return extent;
+}
+
+extent_t *
+extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
+ new_addr, size, pad, alignment, slab, szind, zero, commit);
+ if (extent == NULL) {
+ if (opt_retain && new_addr != NULL) {
+ /*
+ * When retain is enabled and new_addr is set, we do not
+ * attempt extent_alloc_wrapper_hard which does mmap
+ * that is very unlikely to succeed (unless it happens
+ * to be at the end).
+ */
+ return NULL;
+ }
+ extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
+ new_addr, size, pad, alignment, slab, szind, zero, commit);
+ }
+
+ assert(extent == NULL || extent_dumpable_get(extent));
+ return extent;
+}
+
+static bool
+extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
+ const extent_t *outer) {
+ assert(extent_arena_get(inner) == arena);
+ if (extent_arena_get(outer) != arena) {
+ return false;
+ }
+
+ assert(extent_state_get(inner) == extent_state_active);
+ if (extent_state_get(outer) != extents->state) {
+ return false;
+ }
+
+ if (extent_committed_get(inner) != extent_committed_get(outer)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
+ bool growing_retained) {
+ assert(extent_can_coalesce(arena, extents, inner, outer));
+
+ extent_activate_locked(tsdn, arena, extents, outer);
+
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
+ forward ? inner : outer, forward ? outer : inner, growing_retained);
+ malloc_mutex_lock(tsdn, &extents->mtx);
+
+ if (err) {
+ extent_deactivate_locked(tsdn, arena, extents, outer);
+ }
+
+ return err;
+}
+
+static extent_t *
+extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained,
+ bool inactive_only) {
+ /*
+ * We avoid checking / locking inactive neighbors for large size
+ * classes, since they are eagerly coalesced on deallocation which can
+ * cause lock contention.
+ */
+ /*
+ * Continue attempting to coalesce until failure, to protect against
+ * races with other threads that are thwarted by this one.
+ */
+ bool again;
+ do {
+ again = false;
+
+ /* Try to coalesce forward. */
+ extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
+ extent_past_get(extent), inactive_only);
+ if (next != NULL) {
+ /*
+ * extents->mtx only protects against races for
+ * like-state extents, so call extent_can_coalesce()
+ * before releasing next's pool lock.
+ */
+ bool can_coalesce = extent_can_coalesce(arena, extents,
+ extent, next);
+
+ extent_unlock(tsdn, next);
+
+ if (can_coalesce && !extent_coalesce(tsdn, arena,
+ r_extent_hooks, extents, extent, next, true,
+ growing_retained)) {
+ if (extents->delay_coalesce) {
+ /* Do minimal coalescing. */
+ *coalesced = true;
+ return extent;
+ }
+ again = true;
+ }
+ }
+
+ /* Try to coalesce backward. */
+ extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
+ extent_before_get(extent), inactive_only);
+ if (prev != NULL) {
+ bool can_coalesce = extent_can_coalesce(arena, extents,
+ extent, prev);
+ extent_unlock(tsdn, prev);
+
+ if (can_coalesce && !extent_coalesce(tsdn, arena,
+ r_extent_hooks, extents, extent, prev, false,
+ growing_retained)) {
+ extent = prev;
+ if (extents->delay_coalesce) {
+ /* Do minimal coalescing. */
+ *coalesced = true;
+ return extent;
+ }
+ again = true;
+ }
+ }
+ } while (again);
+
+ if (extents->delay_coalesce) {
+ *coalesced = false;
+ }
+ return extent;
+}
+
+static extent_t *
+extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained) {
+ return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, extent, coalesced, growing_retained, false);
+}
+
+static extent_t *
+extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained) {
+ return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, extent, coalesced, growing_retained, true);
+}
+
+/*
+ * Does the metadata management portions of putting an unused extent into the
+ * given extents_t (coalesces, deregisters slab interiors, the heap operations).
+ */
+static void
+extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent, bool growing_retained) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ assert((extents_state_get(extents) != extent_state_dirty &&
+ extents_state_get(extents) != extent_state_muzzy) ||
+ !extent_zeroed_get(extent));
+
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ extent_szind_set(extent, SC_NSIZES);
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, rtree_ctx, extent);
+ extent_slab_set(extent, false);
+ }
+
+ assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent), true) == extent);
+
+ if (!extents->delay_coalesce) {
+ extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, extent, NULL, growing_retained);
+ } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
+ assert(extents == &arena->extents_dirty);
+ /* Always coalesce large extents eagerly. */
+ bool coalesced;
+ do {
+ assert(extent_state_get(extent) == extent_state_active);
+ extent = extent_try_coalesce_large(tsdn, arena,
+ r_extent_hooks, rtree_ctx, extents, extent,
+ &coalesced, growing_retained);
+ } while (coalesced);
+ if (extent_size_get(extent) >= oversize_threshold) {
+ /* Shortcut to purge the oversize extent eagerly. */
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
+ return;
+ }
+ }
+ extent_deactivate_locked(tsdn, arena, extents, extent);
+
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+}
+
+void
+extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (extent_register(tsdn, extent)) {
+ extent_dalloc(tsdn, arena, extent);
+ return;
+ }
+ extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
+}
+
+static bool
+extent_may_dalloc(void) {
+ /* With retain enabled, the default dalloc always fails. */
+ return !opt_retain;
+}
+
+static bool
+extent_dalloc_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ return extent_dalloc_mmap(addr, size);
+ }
+ return true;
+}
+
+static bool
+extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ return extent_dalloc_default_impl(addr, size);
+}
+
+static bool
+extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ bool err;
+
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ /* Try to deallocate. */
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ err = extent_dalloc_default_impl(extent_base_get(extent),
+ extent_size_get(extent));
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ err = ((*r_extent_hooks)->dalloc == NULL ||
+ (*r_extent_hooks)->dalloc(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent),
+ extent_committed_get(extent), arena_ind_get(arena)));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ if (!err) {
+ extent_dalloc(tsdn, arena, extent);
+ }
+
+ return err;
+}
+
+void
+extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ assert(extent_dumpable_get(extent));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* Avoid calling the default extent_dalloc unless have to. */
+ if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
+ /*
+ * Deregister first to avoid a race with other allocating
+ * threads, and reregister if deallocation fails.
+ */
+ extent_deregister(tsdn, extent);
+ if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
+ extent)) {
+ return;
+ }
+ extent_reregister(tsdn, extent);
+ }
+
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ /* Try to decommit; purge if that fails. */
+ bool zeroed;
+ if (!extent_committed_get(extent)) {
+ zeroed = true;
+ } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent))) {
+ zeroed = true;
+ } else if ((*r_extent_hooks)->purge_forced != NULL &&
+ !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), 0,
+ extent_size_get(extent), arena_ind_get(arena))) {
+ zeroed = true;
+ } else if (extent_state_get(extent) == extent_state_muzzy ||
+ ((*r_extent_hooks)->purge_lazy != NULL &&
+ !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), 0,
+ extent_size_get(extent), arena_ind_get(arena)))) {
+ zeroed = false;
+ } else {
+ zeroed = false;
+ }
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ extent_zeroed_set(extent, zeroed);
+
+ if (config_prof) {
+ extent_gdump_sub(tsdn, extent);
+ }
+
+ extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
+ extent, false);
+}
+
+static void
+extent_destroy_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ pages_unmap(addr, size);
+ }
+}
+
+static void
+extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ extent_destroy_default_impl(addr, size);
+}
+
+void
+extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* Deregister first to avoid a race with other allocating threads. */
+ extent_deregister(tsdn, extent);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ /* Try to destroy; silently fail otherwise. */
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ extent_destroy_default_impl(extent_base_get(extent),
+ extent_size_get(extent));
+ } else if ((*r_extent_hooks)->destroy != NULL) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ (*r_extent_hooks)->destroy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent),
+ extent_committed_get(extent), arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ extent_dalloc(tsdn, arena, extent);
+}
+
+static bool
+extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = ((*r_extent_hooks)->commit == NULL ||
+ (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
+ extent_size_get(extent), offset, length, arena_ind_get(arena)));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ extent_committed_set(extent, extent_committed_get(extent) || !err);
+ return err;
+}
+
+bool
+extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
+ length, false);
+}
+
+static bool
+extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+bool
+extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = ((*r_extent_hooks)->decommit == NULL ||
+ (*r_extent_hooks)->decommit(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena)));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ extent_committed_set(extent, extent_committed_get(extent) && err);
+ return err;
+}
+
+#ifdef PAGES_CAN_PURGE_LAZY
+static bool
+extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+#endif
+
+static bool
+extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->purge_lazy == NULL) {
+ return true;
+ }
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ return err;
+}
+
+bool
+extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
+ offset, length, false);
+}
+
+#ifdef PAGES_CAN_PURGE_FORCED
+static bool
+extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return pages_purge_forced((void *)((uintptr_t)addr +
+ (uintptr_t)offset), length);
+}
+#endif
+
+static bool
+extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->purge_forced == NULL) {
+ return true;
+ }
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ return err;
+}
+
+bool
+extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
+ offset, length, false);
+}
+
+static bool
+extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ if (!maps_coalesce) {
+ /*
+ * Without retain, only whole regions can be purged (required by
+ * MEM_RELEASE on Windows) -- therefore disallow splitting. See
+ * comments in extent_head_no_merge().
+ */
+ return !opt_retain;
+ }
+
+ return false;
+}
+
+/*
+ * Accepts the extent to split, and the characteristics of each side of the
+ * split. The 'a' parameters go with the 'lead' of the resulting pair of
+ * extents (the lower addressed portion of the split), and the 'b' parameters go
+ * with the trail (the higher addressed portion). This makes 'extent' the lead,
+ * and returns the trail (except in case of error).
+ */
+static extent_t *
+extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
+ bool growing_retained) {
+ assert(extent_size_get(extent) == size_a + size_b);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->split == NULL) {
+ return NULL;
+ }
+
+ extent_t *trail = extent_alloc(tsdn, arena);
+ if (trail == NULL) {
+ goto label_error_a;
+ }
+
+ extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
+ size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
+ extent_state_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent), extent_dumpable_get(extent),
+ EXTENT_NOT_HEAD);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
+ {
+ extent_t lead;
+
+ extent_init(&lead, arena, extent_addr_get(extent), size_a,
+ slab_a, szind_a, extent_sn_get(extent),
+ extent_state_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent), extent_dumpable_get(extent),
+ EXTENT_NOT_HEAD);
+
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
+ true, &lead_elm_a, &lead_elm_b);
+ }
+ rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
+ &trail_elm_a, &trail_elm_b);
+
+ if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
+ || trail_elm_b == NULL) {
+ goto label_error_b;
+ }
+
+ extent_lock2(tsdn, extent, trail);
+
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
+ size_a + size_b, size_a, size_b, extent_committed_get(extent),
+ arena_ind_get(arena));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ if (err) {
+ goto label_error_c;
+ }
+
+ extent_size_set(extent, size_a);
+ extent_szind_set(extent, szind_a);
+
+ extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
+ szind_a, slab_a);
+ extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
+ szind_b, slab_b);
+
+ extent_unlock2(tsdn, extent, trail);
+
+ return trail;
+label_error_c:
+ extent_unlock2(tsdn, extent, trail);
+label_error_b:
+ extent_dalloc(tsdn, arena, trail);
+label_error_a:
+ return NULL;
+}
+
+extent_t *
+extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
+ return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
+ szind_a, slab_a, size_b, szind_b, slab_b, false);
+}
+
+static bool
+extent_merge_default_impl(void *addr_a, void *addr_b) {
+ if (!maps_coalesce && !opt_retain) {
+ return true;
+ }
+ if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Returns true if the given extents can't be merged because of their head bit
+ * settings. Assumes the second extent has the higher address.
+ */
+static bool
+extent_head_no_merge(extent_t *a, extent_t *b) {
+ assert(extent_base_get(a) < extent_base_get(b));
+ /*
+ * When coalesce is not always allowed (Windows), only merge extents
+ * from the same VirtualAlloc region under opt.retain (in which case
+ * MEM_DECOMMIT is utilized for purging).
+ */
+ if (maps_coalesce) {
+ return false;
+ }
+ if (!opt_retain) {
+ return true;
+ }
+ /* If b is a head extent, disallow the cross-region merge. */
+ if (extent_is_head_get(b)) {
+ /*
+ * Additionally, sn should not overflow with retain; sanity
+ * check that different regions have unique sn.
+ */
+ assert(extent_sn_comp(a, b) != 0);
+ return true;
+ }
+ assert(extent_sn_comp(a, b) == 0);
+
+ return false;
+}
+
+static bool
+extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ if (!maps_coalesce) {
+ tsdn_t *tsdn = tsdn_fetch();
+ extent_t *a = iealloc(tsdn, addr_a);
+ extent_t *b = iealloc(tsdn, addr_b);
+ if (extent_head_no_merge(a, b)) {
+ return true;
+ }
+ }
+ return extent_merge_default_impl(addr_a, addr_b);
+}
+
+static bool
+extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(extent_base_get(a) < extent_base_get(b));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
+ return true;
+ }
+
+ bool err;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ err = extent_merge_default_impl(extent_base_get(a),
+ extent_base_get(b));
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ err = (*r_extent_hooks)->merge(*r_extent_hooks,
+ extent_base_get(a), extent_size_get(a), extent_base_get(b),
+ extent_size_get(b), extent_committed_get(a),
+ arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ if (err) {
+ return true;
+ }
+
+ /*
+ * The rtree writes must happen while all the relevant elements are
+ * owned, so the following code uses decomposed helper functions rather
+ * than extent_{,de}register() to do things in the right order.
+ */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
+ &a_elm_b);
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
+ &b_elm_b);
+
+ extent_lock2(tsdn, a, b);
+
+ if (a_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
+ SC_NSIZES, false);
+ }
+ if (b_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
+ SC_NSIZES, false);
+ } else {
+ b_elm_b = b_elm_a;
+ }
+
+ extent_size_set(a, extent_size_get(a) + extent_size_get(b));
+ extent_szind_set(a, SC_NSIZES);
+ extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
+ extent_sn_get(a) : extent_sn_get(b));
+ extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+
+ extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
+ false);
+
+ extent_unlock2(tsdn, a, b);
+
+ extent_dalloc(tsdn, extent_arena_get(b), b);
+
+ return false;
+}
+
+bool
+extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
+ return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
+}
+
+bool
+extent_boot(void) {
+ if (rtree_new(&extents_rtree, true)) {
+ return true;
+ }
+
+ if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
+ WITNESS_RANK_EXTENT_POOL)) {
+ return true;
+ }
+
+ if (have_dss) {
+ extent_dss_boot();
+ }
+
+ return false;
+}
+
+void
+extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size) {
+ assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
+
+ const extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(extent == NULL)) {
+ *nfree = *nregs = *size = 0;
+ return;
+ }
+
+ *size = extent_size_get(extent);
+ if (!extent_slab_get(extent)) {
+ *nfree = 0;
+ *nregs = 1;
+ } else {
+ *nfree = extent_nfree_get(extent);
+ *nregs = bin_infos[extent_szind_get(extent)].nregs;
+ assert(*nfree <= *nregs);
+ assert(*nfree * extent_usize_get(extent) <= *size);
+ }
+}
+
+void
+extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size,
+ size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
+ assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
+ && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
+
+ const extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(extent == NULL)) {
+ *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
+ *slabcur_addr = NULL;
+ return;
+ }
+
+ *size = extent_size_get(extent);
+ if (!extent_slab_get(extent)) {
+ *nfree = *bin_nfree = *bin_nregs = 0;
+ *nregs = 1;
+ *slabcur_addr = NULL;
+ return;
+ }
+
+ *nfree = extent_nfree_get(extent);
+ const szind_t szind = extent_szind_get(extent);
+ *nregs = bin_infos[szind].nregs;
+ assert(*nfree <= *nregs);
+ assert(*nfree * extent_usize_get(extent) <= *size);
+
+ const arena_t *arena = extent_arena_get(extent);
+ assert(arena != NULL);
+ const unsigned binshard = extent_binshard_get(extent);
+ bin_t *bin = &arena->bins[szind].bin_shards[binshard];
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats) {
+ *bin_nregs = *nregs * bin->stats.curslabs;
+ assert(*bin_nregs >= bin->stats.curregs);
+ *bin_nfree = *bin_nregs - bin->stats.curregs;
+ } else {
+ *bin_nfree = *bin_nregs = 0;
+ }
+ *slabcur_addr = extent_addr_get(bin->slabcur);
+ assert(*slabcur_addr != NULL);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
diff --git a/contrib/libs/jemalloc/src/extent_dss.c b/contrib/libs/jemalloc/src/extent_dss.c
index 8581789110..90622d0cb9 100644
--- a/contrib/libs/jemalloc/src/extent_dss.c
+++ b/contrib/libs/jemalloc/src/extent_dss.c
@@ -1,271 +1,271 @@
-#define JEMALLOC_EXTENT_DSS_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/spin.h"
-
-/******************************************************************************/
-/* Data. */
-
-const char *opt_dss = DSS_DEFAULT;
-
-const char *dss_prec_names[] = {
- "disabled",
- "primary",
- "secondary",
- "N/A"
-};
-
-/*
- * Current dss precedence default, used when creating new arenas. NB: This is
- * stored as unsigned rather than dss_prec_t because in principle there's no
- * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
- * atomic operations to synchronize the setting.
- */
-static atomic_u_t dss_prec_default = ATOMIC_INIT(
- (unsigned)DSS_PREC_DEFAULT);
-
-/* Base address of the DSS. */
-static void *dss_base;
-/* Atomic boolean indicating whether a thread is currently extending DSS. */
-static atomic_b_t dss_extending;
-/* Atomic boolean indicating whether the DSS is exhausted. */
-static atomic_b_t dss_exhausted;
-/* Atomic current upper limit on DSS addresses. */
-static atomic_p_t dss_max;
-
-/******************************************************************************/
-
-static void *
-extent_dss_sbrk(intptr_t increment) {
-#ifdef JEMALLOC_DSS
- return sbrk(increment);
-#else
- not_implemented();
- return NULL;
-#endif
-}
-
-dss_prec_t
-extent_dss_prec_get(void) {
- dss_prec_t ret;
-
- if (!have_dss) {
- return dss_prec_disabled;
- }
- ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
- return ret;
-}
-
-bool
-extent_dss_prec_set(dss_prec_t dss_prec) {
- if (!have_dss) {
- return (dss_prec != dss_prec_disabled);
- }
- atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
- return false;
-}
-
-static void
-extent_dss_extending_start(void) {
- spin_t spinner = SPIN_INITIALIZER;
- while (true) {
- bool expected = false;
- if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
- true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
- break;
- }
- spin_adaptive(&spinner);
- }
-}
-
-static void
-extent_dss_extending_finish(void) {
- assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
-
- atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
-}
-
-static void *
-extent_dss_max_update(void *new_addr) {
- /*
- * Get the current end of the DSS as max_cur and assure that dss_max is
- * up to date.
- */
- void *max_cur = extent_dss_sbrk(0);
- if (max_cur == (void *)-1) {
- return NULL;
- }
- atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
- /* Fixed new_addr can only be supported if it is at the edge of DSS. */
- if (new_addr != NULL && max_cur != new_addr) {
- return NULL;
- }
- return max_cur;
-}
-
-void *
-extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit) {
- extent_t *gap;
-
- cassert(have_dss);
- assert(size > 0);
- assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
-
- /*
- * sbrk() uses a signed increment argument, so take care not to
- * interpret a large allocation request as a negative increment.
- */
- if ((intptr_t)size < 0) {
- return NULL;
- }
-
- gap = extent_alloc(tsdn, arena);
- if (gap == NULL) {
- return NULL;
- }
-
- extent_dss_extending_start();
- if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
- /*
- * The loop is necessary to recover from races with other
- * threads that are using the DSS for something other than
- * malloc.
- */
- while (true) {
- void *max_cur = extent_dss_max_update(new_addr);
- if (max_cur == NULL) {
- goto label_oom;
- }
-
- /*
- * Compute how much page-aligned gap space (if any) is
- * necessary to satisfy alignment. This space can be
- * recycled for later use.
- */
- void *gap_addr_page = (void *)(PAGE_CEILING(
- (uintptr_t)max_cur));
- void *ret = (void *)ALIGNMENT_CEILING(
- (uintptr_t)gap_addr_page, alignment);
- size_t gap_size_page = (uintptr_t)ret -
- (uintptr_t)gap_addr_page;
- if (gap_size_page != 0) {
- extent_init(gap, arena, gap_addr_page,
- gap_size_page, false, SC_NSIZES,
- arena_extent_sn_next(arena),
- extent_state_active, false, true, true,
- EXTENT_NOT_HEAD);
- }
- /*
- * Compute the address just past the end of the desired
- * allocation space.
- */
- void *dss_next = (void *)((uintptr_t)ret + size);
- if ((uintptr_t)ret < (uintptr_t)max_cur ||
- (uintptr_t)dss_next < (uintptr_t)max_cur) {
- goto label_oom; /* Wrap-around. */
- }
- /* Compute the increment, including subpage bytes. */
- void *gap_addr_subpage = max_cur;
- size_t gap_size_subpage = (uintptr_t)ret -
- (uintptr_t)gap_addr_subpage;
- intptr_t incr = gap_size_subpage + size;
-
- assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
- size);
-
- /* Try to allocate. */
- void *dss_prev = extent_dss_sbrk(incr);
- if (dss_prev == max_cur) {
- /* Success. */
- atomic_store_p(&dss_max, dss_next,
- ATOMIC_RELEASE);
- extent_dss_extending_finish();
-
- if (gap_size_page != 0) {
- extent_dalloc_gap(tsdn, arena, gap);
- } else {
- extent_dalloc(tsdn, arena, gap);
- }
- if (!*commit) {
- *commit = pages_decommit(ret, size);
- }
- if (*zero && *commit) {
- extent_hooks_t *extent_hooks =
- EXTENT_HOOKS_INITIALIZER;
- extent_t extent;
-
- extent_init(&extent, arena, ret, size,
- size, false, SC_NSIZES,
- extent_state_active, false, true,
- true, EXTENT_NOT_HEAD);
- if (extent_purge_forced_wrapper(tsdn,
- arena, &extent_hooks, &extent, 0,
- size)) {
- memset(ret, 0, size);
- }
- }
- return ret;
- }
- /*
- * Failure, whether due to OOM or a race with a raw
- * sbrk() call from outside the allocator.
- */
- if (dss_prev == (void *)-1) {
- /* OOM. */
- atomic_store_b(&dss_exhausted, true,
- ATOMIC_RELEASE);
- goto label_oom;
- }
- }
- }
-label_oom:
- extent_dss_extending_finish();
- extent_dalloc(tsdn, arena, gap);
- return NULL;
-}
-
-static bool
-extent_in_dss_helper(void *addr, void *max) {
- return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
- (uintptr_t)max);
-}
-
-bool
-extent_in_dss(void *addr) {
- cassert(have_dss);
-
- return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
- ATOMIC_ACQUIRE));
-}
-
-bool
-extent_dss_mergeable(void *addr_a, void *addr_b) {
- void *max;
-
- cassert(have_dss);
-
- if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
- (uintptr_t)dss_base) {
- return true;
- }
-
- max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
- return (extent_in_dss_helper(addr_a, max) ==
- extent_in_dss_helper(addr_b, max));
-}
-
-void
-extent_dss_boot(void) {
- cassert(have_dss);
-
- dss_base = extent_dss_sbrk(0);
- atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
- atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
- atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
-}
-
-/******************************************************************************/
+#define JEMALLOC_EXTENT_DSS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/spin.h"
+
+/******************************************************************************/
+/* Data. */
+
+const char *opt_dss = DSS_DEFAULT;
+
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/*
+ * Current dss precedence default, used when creating new arenas. NB: This is
+ * stored as unsigned rather than dss_prec_t because in principle there's no
+ * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
+ * atomic operations to synchronize the setting.
+ */
+static atomic_u_t dss_prec_default = ATOMIC_INIT(
+ (unsigned)DSS_PREC_DEFAULT);
+
+/* Base address of the DSS. */
+static void *dss_base;
+/* Atomic boolean indicating whether a thread is currently extending DSS. */
+static atomic_b_t dss_extending;
+/* Atomic boolean indicating whether the DSS is exhausted. */
+static atomic_b_t dss_exhausted;
+/* Atomic current upper limit on DSS addresses. */
+static atomic_p_t dss_max;
+
+/******************************************************************************/
+
+static void *
+extent_dss_sbrk(intptr_t increment) {
+#ifdef JEMALLOC_DSS
+ return sbrk(increment);
+#else
+ not_implemented();
+ return NULL;
+#endif
+}
+
+dss_prec_t
+extent_dss_prec_get(void) {
+ dss_prec_t ret;
+
+ if (!have_dss) {
+ return dss_prec_disabled;
+ }
+ ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
+ return ret;
+}
+
+bool
+extent_dss_prec_set(dss_prec_t dss_prec) {
+ if (!have_dss) {
+ return (dss_prec != dss_prec_disabled);
+ }
+ atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
+ return false;
+}
+
+static void
+extent_dss_extending_start(void) {
+ spin_t spinner = SPIN_INITIALIZER;
+ while (true) {
+ bool expected = false;
+ if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
+ true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
+ break;
+ }
+ spin_adaptive(&spinner);
+ }
+}
+
+static void
+extent_dss_extending_finish(void) {
+ assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
+
+ atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
+}
+
+static void *
+extent_dss_max_update(void *new_addr) {
+ /*
+ * Get the current end of the DSS as max_cur and assure that dss_max is
+ * up to date.
+ */
+ void *max_cur = extent_dss_sbrk(0);
+ if (max_cur == (void *)-1) {
+ return NULL;
+ }
+ atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
+ /* Fixed new_addr can only be supported if it is at the edge of DSS. */
+ if (new_addr != NULL && max_cur != new_addr) {
+ return NULL;
+ }
+ return max_cur;
+}
+
+void *
+extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit) {
+ extent_t *gap;
+
+ cassert(have_dss);
+ assert(size > 0);
+ assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
+
+ /*
+ * sbrk() uses a signed increment argument, so take care not to
+ * interpret a large allocation request as a negative increment.
+ */
+ if ((intptr_t)size < 0) {
+ return NULL;
+ }
+
+ gap = extent_alloc(tsdn, arena);
+ if (gap == NULL) {
+ return NULL;
+ }
+
+ extent_dss_extending_start();
+ if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
+ /*
+ * The loop is necessary to recover from races with other
+ * threads that are using the DSS for something other than
+ * malloc.
+ */
+ while (true) {
+ void *max_cur = extent_dss_max_update(new_addr);
+ if (max_cur == NULL) {
+ goto label_oom;
+ }
+
+ /*
+ * Compute how much page-aligned gap space (if any) is
+ * necessary to satisfy alignment. This space can be
+ * recycled for later use.
+ */
+ void *gap_addr_page = (void *)(PAGE_CEILING(
+ (uintptr_t)max_cur));
+ void *ret = (void *)ALIGNMENT_CEILING(
+ (uintptr_t)gap_addr_page, alignment);
+ size_t gap_size_page = (uintptr_t)ret -
+ (uintptr_t)gap_addr_page;
+ if (gap_size_page != 0) {
+ extent_init(gap, arena, gap_addr_page,
+ gap_size_page, false, SC_NSIZES,
+ arena_extent_sn_next(arena),
+ extent_state_active, false, true, true,
+ EXTENT_NOT_HEAD);
+ }
+ /*
+ * Compute the address just past the end of the desired
+ * allocation space.
+ */
+ void *dss_next = (void *)((uintptr_t)ret + size);
+ if ((uintptr_t)ret < (uintptr_t)max_cur ||
+ (uintptr_t)dss_next < (uintptr_t)max_cur) {
+ goto label_oom; /* Wrap-around. */
+ }
+ /* Compute the increment, including subpage bytes. */
+ void *gap_addr_subpage = max_cur;
+ size_t gap_size_subpage = (uintptr_t)ret -
+ (uintptr_t)gap_addr_subpage;
+ intptr_t incr = gap_size_subpage + size;
+
+ assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
+ size);
+
+ /* Try to allocate. */
+ void *dss_prev = extent_dss_sbrk(incr);
+ if (dss_prev == max_cur) {
+ /* Success. */
+ atomic_store_p(&dss_max, dss_next,
+ ATOMIC_RELEASE);
+ extent_dss_extending_finish();
+
+ if (gap_size_page != 0) {
+ extent_dalloc_gap(tsdn, arena, gap);
+ } else {
+ extent_dalloc(tsdn, arena, gap);
+ }
+ if (!*commit) {
+ *commit = pages_decommit(ret, size);
+ }
+ if (*zero && *commit) {
+ extent_hooks_t *extent_hooks =
+ EXTENT_HOOKS_INITIALIZER;
+ extent_t extent;
+
+ extent_init(&extent, arena, ret, size,
+ size, false, SC_NSIZES,
+ extent_state_active, false, true,
+ true, EXTENT_NOT_HEAD);
+ if (extent_purge_forced_wrapper(tsdn,
+ arena, &extent_hooks, &extent, 0,
+ size)) {
+ memset(ret, 0, size);
+ }
+ }
+ return ret;
+ }
+ /*
+ * Failure, whether due to OOM or a race with a raw
+ * sbrk() call from outside the allocator.
+ */
+ if (dss_prev == (void *)-1) {
+ /* OOM. */
+ atomic_store_b(&dss_exhausted, true,
+ ATOMIC_RELEASE);
+ goto label_oom;
+ }
+ }
+ }
+label_oom:
+ extent_dss_extending_finish();
+ extent_dalloc(tsdn, arena, gap);
+ return NULL;
+}
+
+static bool
+extent_in_dss_helper(void *addr, void *max) {
+ return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
+ (uintptr_t)max);
+}
+
+bool
+extent_in_dss(void *addr) {
+ cassert(have_dss);
+
+ return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
+ ATOMIC_ACQUIRE));
+}
+
+bool
+extent_dss_mergeable(void *addr_a, void *addr_b) {
+ void *max;
+
+ cassert(have_dss);
+
+ if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
+ (uintptr_t)dss_base) {
+ return true;
+ }
+
+ max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
+ return (extent_in_dss_helper(addr_a, max) ==
+ extent_in_dss_helper(addr_b, max));
+}
+
+void
+extent_dss_boot(void) {
+ cassert(have_dss);
+
+ dss_base = extent_dss_sbrk(0);
+ atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
+ atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
+ atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
+}
+
+/******************************************************************************/
diff --git a/contrib/libs/jemalloc/src/extent_mmap.c b/contrib/libs/jemalloc/src/extent_mmap.c
index 17fd1c8f95..ca42b1353b 100644
--- a/contrib/libs/jemalloc/src/extent_mmap.c
+++ b/contrib/libs/jemalloc/src/extent_mmap.c
@@ -1,42 +1,42 @@
-#define JEMALLOC_EXTENT_MMAP_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/extent_mmap.h"
-
-/******************************************************************************/
-/* Data. */
-
-bool opt_retain =
-#ifdef JEMALLOC_RETAIN
- true
-#else
- false
-#endif
- ;
-
-/******************************************************************************/
-
-void *
-extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit) {
- assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
- void *ret = pages_map(new_addr, size, alignment, commit);
- if (ret == NULL) {
- return NULL;
- }
- assert(ret != NULL);
- if (*commit) {
- *zero = true;
- }
- return ret;
-}
-
-bool
-extent_dalloc_mmap(void *addr, size_t size) {
- if (!opt_retain) {
- pages_unmap(addr, size);
- }
- return opt_retain;
-}
+#define JEMALLOC_EXTENT_MMAP_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_retain =
+#ifdef JEMALLOC_RETAIN
+ true
+#else
+ false
+#endif
+ ;
+
+/******************************************************************************/
+
+void *
+extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit) {
+ assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
+ void *ret = pages_map(new_addr, size, alignment, commit);
+ if (ret == NULL) {
+ return NULL;
+ }
+ assert(ret != NULL);
+ if (*commit) {
+ *zero = true;
+ }
+ return ret;
+}
+
+bool
+extent_dalloc_mmap(void *addr, size_t size) {
+ if (!opt_retain) {
+ pages_unmap(addr, size);
+ }
+ return opt_retain;
+}
diff --git a/contrib/libs/jemalloc/src/hash.c b/contrib/libs/jemalloc/src/hash.c
index 7b2bdc2bd6..5b7f7cf3d7 100644
--- a/contrib/libs/jemalloc/src/hash.c
+++ b/contrib/libs/jemalloc/src/hash.c
@@ -1,3 +1,3 @@
-#define JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
+#define JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/libs/jemalloc/src/hook.c b/contrib/libs/jemalloc/src/hook.c
index 9ac703cf9f..5ab979f59b 100644
--- a/contrib/libs/jemalloc/src/hook.c
+++ b/contrib/libs/jemalloc/src/hook.c
@@ -1,195 +1,195 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-
-#include "jemalloc/internal/hook.h"
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/seq.h"
-
-typedef struct hooks_internal_s hooks_internal_t;
-struct hooks_internal_s {
- hooks_t hooks;
- bool in_use;
-};
-
-seq_define(hooks_internal_t, hooks)
-
-static atomic_u_t nhooks = ATOMIC_INIT(0);
-static seq_hooks_t hooks[HOOK_MAX];
-static malloc_mutex_t hooks_mu;
-
-bool
-hook_boot() {
- return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
- malloc_mutex_rank_exclusive);
-}
-
-static void *
-hook_install_locked(hooks_t *to_install) {
- hooks_internal_t hooks_internal;
- for (int i = 0; i < HOOK_MAX; i++) {
- bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
- /* We hold mu; no concurrent access. */
- assert(success);
- if (!hooks_internal.in_use) {
- hooks_internal.hooks = *to_install;
- hooks_internal.in_use = true;
- seq_store_hooks(&hooks[i], &hooks_internal);
- atomic_store_u(&nhooks,
- atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
- ATOMIC_RELAXED);
- return &hooks[i];
- }
- }
- return NULL;
-}
-
-void *
-hook_install(tsdn_t *tsdn, hooks_t *to_install) {
- malloc_mutex_lock(tsdn, &hooks_mu);
- void *ret = hook_install_locked(to_install);
- if (ret != NULL) {
- tsd_global_slow_inc(tsdn);
- }
- malloc_mutex_unlock(tsdn, &hooks_mu);
- return ret;
-}
-
-static void
-hook_remove_locked(seq_hooks_t *to_remove) {
- hooks_internal_t hooks_internal;
- bool success = seq_try_load_hooks(&hooks_internal, to_remove);
- /* We hold mu; no concurrent access. */
- assert(success);
- /* Should only remove hooks that were added. */
- assert(hooks_internal.in_use);
- hooks_internal.in_use = false;
- seq_store_hooks(to_remove, &hooks_internal);
- atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
- ATOMIC_RELAXED);
-}
-
-void
-hook_remove(tsdn_t *tsdn, void *opaque) {
- if (config_debug) {
- char *hooks_begin = (char *)&hooks[0];
- char *hooks_end = (char *)&hooks[HOOK_MAX];
- char *hook = (char *)opaque;
- assert(hooks_begin <= hook && hook < hooks_end
- && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
- }
- malloc_mutex_lock(tsdn, &hooks_mu);
- hook_remove_locked((seq_hooks_t *)opaque);
- tsd_global_slow_dec(tsdn);
- malloc_mutex_unlock(tsdn, &hooks_mu);
-}
-
-#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
-for (int for_each_hook_counter = 0; \
- for_each_hook_counter < HOOK_MAX; \
- for_each_hook_counter++) { \
- bool for_each_hook_success = seq_try_load_hooks( \
- (hooks_internal_ptr), &hooks[for_each_hook_counter]); \
- if (!for_each_hook_success) { \
- continue; \
- } \
- if (!(hooks_internal_ptr)->in_use) { \
- continue; \
- }
-#define FOR_EACH_HOOK_END \
-}
-
-static bool *
-hook_reentrantp() {
- /*
- * We prevent user reentrancy within hooks. This is basically just a
- * thread-local bool that triggers an early-exit.
- *
- * We don't fold in_hook into reentrancy. There are two reasons for
- * this:
- * - Right now, we turn on reentrancy during things like extent hook
- * execution. Allocating during extent hooks is not officially
- * supported, but we don't want to break it for the time being. These
- * sorts of allocations should probably still be hooked, though.
- * - If a hook allocates, we may want it to be relatively fast (after
- * all, it executes on every allocator operation). Turning on
- * reentrancy is a fairly heavyweight mode (disabling tcache,
- * redirecting to arena 0, etc.). It's possible we may one day want
- * to turn on reentrant mode here, if it proves too difficult to keep
- * this working. But that's fairly easy for us to see; OTOH, people
- * not using hooks because they're too slow is easy for us to miss.
- *
- * The tricky part is
- * that this code might get invoked even if we don't have access to tsd.
- * This function mimics getting a pointer to thread-local data, except
- * that it might secretly return a pointer to some global data if we
- * know that the caller will take the early-exit path.
- * If we return a bool that indicates that we are reentrant, then the
- * caller will go down the early exit path, leaving the global
- * untouched.
- */
- static bool in_hook_global = true;
- tsdn_t *tsdn = tsdn_fetch();
- tcache_t *tcache = tsdn_tcachep_get(tsdn);
- if (tcache != NULL) {
- return &tcache->in_hook;
- }
- return &in_hook_global;
-}
-
-#define HOOK_PROLOGUE \
- if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
- return; \
- } \
- bool *in_hook = hook_reentrantp(); \
- if (*in_hook) { \
- return; \
- } \
- *in_hook = true;
-
-#define HOOK_EPILOGUE \
- *in_hook = false;
-
-void
-hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
- uintptr_t args_raw[3]) {
- HOOK_PROLOGUE
-
- hooks_internal_t hook;
- FOR_EACH_HOOK_BEGIN(&hook)
- hook_alloc h = hook.hooks.alloc_hook;
- if (h != NULL) {
- h(hook.hooks.extra, type, result, result_raw, args_raw);
- }
- FOR_EACH_HOOK_END
-
- HOOK_EPILOGUE
-}
-
-void
-hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
- HOOK_PROLOGUE
- hooks_internal_t hook;
- FOR_EACH_HOOK_BEGIN(&hook)
- hook_dalloc h = hook.hooks.dalloc_hook;
- if (h != NULL) {
- h(hook.hooks.extra, type, address, args_raw);
- }
- FOR_EACH_HOOK_END
- HOOK_EPILOGUE
-}
-
-void
-hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
- size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
- HOOK_PROLOGUE
- hooks_internal_t hook;
- FOR_EACH_HOOK_BEGIN(&hook)
- hook_expand h = hook.hooks.expand_hook;
- if (h != NULL) {
- h(hook.hooks.extra, type, address, old_usize, new_usize,
- result_raw, args_raw);
- }
- FOR_EACH_HOOK_END
- HOOK_EPILOGUE
-}
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/hook.h"
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/seq.h"
+
+typedef struct hooks_internal_s hooks_internal_t;
+struct hooks_internal_s {
+ hooks_t hooks;
+ bool in_use;
+};
+
+seq_define(hooks_internal_t, hooks)
+
+static atomic_u_t nhooks = ATOMIC_INIT(0);
+static seq_hooks_t hooks[HOOK_MAX];
+static malloc_mutex_t hooks_mu;
+
+bool
+hook_boot() {
+ return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
+ malloc_mutex_rank_exclusive);
+}
+
+static void *
+hook_install_locked(hooks_t *to_install) {
+ hooks_internal_t hooks_internal;
+ for (int i = 0; i < HOOK_MAX; i++) {
+ bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
+ /* We hold mu; no concurrent access. */
+ assert(success);
+ if (!hooks_internal.in_use) {
+ hooks_internal.hooks = *to_install;
+ hooks_internal.in_use = true;
+ seq_store_hooks(&hooks[i], &hooks_internal);
+ atomic_store_u(&nhooks,
+ atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
+ ATOMIC_RELAXED);
+ return &hooks[i];
+ }
+ }
+ return NULL;
+}
+
+void *
+hook_install(tsdn_t *tsdn, hooks_t *to_install) {
+ malloc_mutex_lock(tsdn, &hooks_mu);
+ void *ret = hook_install_locked(to_install);
+ if (ret != NULL) {
+ tsd_global_slow_inc(tsdn);
+ }
+ malloc_mutex_unlock(tsdn, &hooks_mu);
+ return ret;
+}
+
+static void
+hook_remove_locked(seq_hooks_t *to_remove) {
+ hooks_internal_t hooks_internal;
+ bool success = seq_try_load_hooks(&hooks_internal, to_remove);
+ /* We hold mu; no concurrent access. */
+ assert(success);
+ /* Should only remove hooks that were added. */
+ assert(hooks_internal.in_use);
+ hooks_internal.in_use = false;
+ seq_store_hooks(to_remove, &hooks_internal);
+ atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
+ ATOMIC_RELAXED);
+}
+
+void
+hook_remove(tsdn_t *tsdn, void *opaque) {
+ if (config_debug) {
+ char *hooks_begin = (char *)&hooks[0];
+ char *hooks_end = (char *)&hooks[HOOK_MAX];
+ char *hook = (char *)opaque;
+ assert(hooks_begin <= hook && hook < hooks_end
+ && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
+ }
+ malloc_mutex_lock(tsdn, &hooks_mu);
+ hook_remove_locked((seq_hooks_t *)opaque);
+ tsd_global_slow_dec(tsdn);
+ malloc_mutex_unlock(tsdn, &hooks_mu);
+}
+
+#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
+for (int for_each_hook_counter = 0; \
+ for_each_hook_counter < HOOK_MAX; \
+ for_each_hook_counter++) { \
+ bool for_each_hook_success = seq_try_load_hooks( \
+ (hooks_internal_ptr), &hooks[for_each_hook_counter]); \
+ if (!for_each_hook_success) { \
+ continue; \
+ } \
+ if (!(hooks_internal_ptr)->in_use) { \
+ continue; \
+ }
+#define FOR_EACH_HOOK_END \
+}
+
+static bool *
+hook_reentrantp() {
+ /*
+ * We prevent user reentrancy within hooks. This is basically just a
+ * thread-local bool that triggers an early-exit.
+ *
+ * We don't fold in_hook into reentrancy. There are two reasons for
+ * this:
+ * - Right now, we turn on reentrancy during things like extent hook
+ * execution. Allocating during extent hooks is not officially
+ * supported, but we don't want to break it for the time being. These
+ * sorts of allocations should probably still be hooked, though.
+ * - If a hook allocates, we may want it to be relatively fast (after
+ * all, it executes on every allocator operation). Turning on
+ * reentrancy is a fairly heavyweight mode (disabling tcache,
+ * redirecting to arena 0, etc.). It's possible we may one day want
+ * to turn on reentrant mode here, if it proves too difficult to keep
+ * this working. But that's fairly easy for us to see; OTOH, people
+ * not using hooks because they're too slow is easy for us to miss.
+ *
+ * The tricky part is
+ * that this code might get invoked even if we don't have access to tsd.
+ * This function mimics getting a pointer to thread-local data, except
+ * that it might secretly return a pointer to some global data if we
+ * know that the caller will take the early-exit path.
+ * If we return a bool that indicates that we are reentrant, then the
+ * caller will go down the early exit path, leaving the global
+ * untouched.
+ */
+ static bool in_hook_global = true;
+ tsdn_t *tsdn = tsdn_fetch();
+ tcache_t *tcache = tsdn_tcachep_get(tsdn);
+ if (tcache != NULL) {
+ return &tcache->in_hook;
+ }
+ return &in_hook_global;
+}
+
+#define HOOK_PROLOGUE \
+ if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
+ return; \
+ } \
+ bool *in_hook = hook_reentrantp(); \
+ if (*in_hook) { \
+ return; \
+ } \
+ *in_hook = true;
+
+#define HOOK_EPILOGUE \
+ *in_hook = false;
+
+void
+hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
+ uintptr_t args_raw[3]) {
+ HOOK_PROLOGUE
+
+ hooks_internal_t hook;
+ FOR_EACH_HOOK_BEGIN(&hook)
+ hook_alloc h = hook.hooks.alloc_hook;
+ if (h != NULL) {
+ h(hook.hooks.extra, type, result, result_raw, args_raw);
+ }
+ FOR_EACH_HOOK_END
+
+ HOOK_EPILOGUE
+}
+
+void
+hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
+ HOOK_PROLOGUE
+ hooks_internal_t hook;
+ FOR_EACH_HOOK_BEGIN(&hook)
+ hook_dalloc h = hook.hooks.dalloc_hook;
+ if (h != NULL) {
+ h(hook.hooks.extra, type, address, args_raw);
+ }
+ FOR_EACH_HOOK_END
+ HOOK_EPILOGUE
+}
+
+void
+hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
+ size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
+ HOOK_PROLOGUE
+ hooks_internal_t hook;
+ FOR_EACH_HOOK_BEGIN(&hook)
+ hook_expand h = hook.hooks.expand_hook;
+ if (h != NULL) {
+ h(hook.hooks.extra, type, address, old_usize, new_usize,
+ result_raw, args_raw);
+ }
+ FOR_EACH_HOOK_END
+ HOOK_EPILOGUE
+}
diff --git a/contrib/libs/jemalloc/src/jemalloc.c b/contrib/libs/jemalloc/src/jemalloc.c
index 63ff26b656..94d3c6da71 100644
--- a/contrib/libs/jemalloc/src/jemalloc.c
+++ b/contrib/libs/jemalloc/src/jemalloc.c
@@ -1,34 +1,34 @@
-#define JEMALLOC_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/extent_mmap.h"
-#include "jemalloc/internal/hook.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/log.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/spin.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/util.h"
-
+#define JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/hook.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/log.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/util.h"
+
/******************************************************************************/
/* Data. */
/* Runtime configuration options. */
-const char *je_malloc_conf
-#ifndef _WIN32
- JEMALLOC_ATTR(weak)
-#endif
- ;
+const char *je_malloc_conf
+#ifndef _WIN32
+ JEMALLOC_ATTR(weak)
+#endif
+ ;
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -36,85 +36,85 @@ bool opt_abort =
false
#endif
;
-bool opt_abort_conf =
-#ifdef JEMALLOC_DEBUG
- true
-#else
- false
-#endif
- ;
-/* Intentionally default off, even with debug builds. */
-bool opt_confirm_conf = false;
-const char *opt_junk =
-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
- "true"
-#else
- "false"
-#endif
- ;
-bool opt_junk_alloc =
-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
- true
-#else
- false
-#endif
- ;
-bool opt_junk_free =
+bool opt_abort_conf =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+/* Intentionally default off, even with debug builds. */
+bool opt_confirm_conf = false;
+const char *opt_junk =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ "true"
+#else
+ "false"
+#endif
+ ;
+bool opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
-
+bool opt_junk_free =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
-unsigned opt_narenas = 0;
+unsigned opt_narenas = 0;
unsigned ncpus;
-/* Protects arenas initialization. */
-malloc_mutex_t arenas_lock;
-/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- *
- * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
- * arenas. arenas[narenas_auto..narenas_total) are only used if the application
- * takes some action to create them and allocate from them.
- *
- * Points to an arena_t.
- */
-JEMALLOC_ALIGNED(CACHELINE)
-atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
-static atomic_u_t narenas_total; /* Use narenas_total_*(). */
-/* Below three are read-only after initialization. */
-static arena_t *a0; /* arenas[0]. */
+/* Protects arenas initialization. */
+malloc_mutex_t arenas_lock;
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ *
+ * Points to an arena_t.
+ */
+JEMALLOC_ALIGNED(CACHELINE)
+atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
+static atomic_u_t narenas_total; /* Use narenas_total_*(). */
+/* Below three are read-only after initialization. */
+static arena_t *a0; /* arenas[0]. */
unsigned narenas_auto;
-unsigned manual_arena_base;
-
-typedef enum {
- malloc_init_uninitialized = 3,
- malloc_init_a0_initialized = 2,
- malloc_init_recursible = 1,
- malloc_init_initialized = 0 /* Common case --> jnz. */
-} malloc_init_t;
-static malloc_init_t malloc_init_state = malloc_init_uninitialized;
-
-/* False should be the common case. Set to true to trigger initialization. */
-bool malloc_slow = true;
-
-/* When malloc_slow is true, set the corresponding bits for sanity check. */
-enum {
- flag_opt_junk_alloc = (1U),
- flag_opt_junk_free = (1U << 1),
- flag_opt_zero = (1U << 2),
- flag_opt_utrace = (1U << 3),
- flag_opt_xmalloc = (1U << 4)
-};
-static uint8_t malloc_slow_flags;
-
+unsigned manual_arena_base;
+
+typedef enum {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+
+/* False should be the common case. Set to true to trigger initialization. */
+bool malloc_slow = true;
+
+/* When malloc_slow is true, set the corresponding bits for sanity check. */
+enum {
+ flag_opt_junk_alloc = (1U),
+ flag_opt_junk_free = (1U << 1),
+ flag_opt_zero = (1U << 2),
+ flag_opt_utrace = (1U << 3),
+ flag_opt_xmalloc = (1U << 4)
+};
+static uint8_t malloc_slow_flags;
+
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
# define NO_INITIALIZER ((unsigned long)0)
@@ -130,30 +130,30 @@ static bool malloc_initializer = NO_INITIALIZER;
/* Used to avoid initialization races. */
#ifdef _WIN32
-#if _WIN32_WINNT >= 0x0600
-static malloc_mutex_t init_lock = SRWLOCK_INIT;
-#else
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t init_lock = SRWLOCK_INIT;
+#else
static malloc_mutex_t init_lock;
-static bool init_lock_initialized = false;
+static bool init_lock_initialized = false;
JEMALLOC_ATTR(constructor)
static void WINAPI
-_init_init_lock(void) {
- /*
- * If another constructor in the same binary is using mallctl to e.g.
- * set up extent hooks, it may end up running before this one, and
- * malloc_init_hard will crash trying to lock the uninitialized lock. So
- * we force an initialization of the lock in malloc_init_hard as well.
- * We don't try to care about atomicity of the accessed to the
- * init_lock_initialized boolean, since it really only matters early in
- * the process creation, before any separate thread normally starts
- * doing anything.
- */
- if (!init_lock_initialized) {
- malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
- malloc_mutex_rank_exclusive);
- }
- init_lock_initialized = true;
+_init_init_lock(void) {
+ /*
+ * If another constructor in the same binary is using mallctl to e.g.
+ * set up extent hooks, it may end up running before this one, and
+ * malloc_init_hard will crash trying to lock the uninitialized lock. So
+ * we force an initialization of the lock in malloc_init_hard as well.
+ * We don't try to care about atomicity of the accessed to the
+ * init_lock_initialized boolean, since it really only matters early in
+ * the process creation, before any separate thread normally starts
+ * doing anything.
+ */
+ if (!init_lock_initialized) {
+ malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
+ malloc_mutex_rank_exclusive);
+ }
+ init_lock_initialized = true;
}
#ifdef _MSC_VER
@@ -161,7 +161,7 @@ _init_init_lock(void) {
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
#endif
-#endif
+#endif
#else
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#endif
@@ -174,7 +174,7 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
- if (unlikely(opt_utrace)) { \
+ if (unlikely(opt_utrace)) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
@@ -188,16 +188,16 @@ typedef struct {
# define UTRACE(a, b, c)
#endif
-/* Whether encountered any invalid config options. */
-static bool had_conf_error = false;
-
+/* Whether encountered any invalid config options. */
+static bool had_conf_error = false;
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
-static bool malloc_init_hard_a0(void);
+static bool malloc_init_hard_a0(void);
static bool malloc_init_hard(void);
/******************************************************************************/
@@ -205,345 +205,345 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
-bool
-malloc_initialized(void) {
- return (malloc_init_state == malloc_init_initialized);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-malloc_init_a0(void) {
- if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
- return malloc_init_hard_a0();
- }
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-malloc_init(void) {
- if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
- return true;
- }
- return false;
-}
-
-/*
- * The a0*() functions are used instead of i{d,}alloc() in situations that
- * cannot tolerate TLS variable access.
- */
-
-static void *
-a0ialloc(size_t size, bool zero, bool is_internal) {
- if (unlikely(malloc_init_a0())) {
- return NULL;
- }
-
- return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
- is_internal, arena_get(TSDN_NULL, 0, true), true);
-}
-
-static void
-a0idalloc(void *ptr, bool is_internal) {
- idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
-}
-
-void *
-a0malloc(size_t size) {
- return a0ialloc(size, false, true);
-}
-
-void
-a0dalloc(void *ptr) {
- a0idalloc(ptr, true);
-}
-
-/*
- * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
- * situations that cannot tolerate TLS variable access (TLS allocation and very
- * early internal data structure initialization).
- */
-
-void *
-bootstrap_malloc(size_t size) {
- if (unlikely(size == 0)) {
- size = 1;
- }
-
- return a0ialloc(size, false, false);
-}
-
-void *
-bootstrap_calloc(size_t num, size_t size) {
- size_t num_size;
-
- num_size = num * size;
- if (unlikely(num_size == 0)) {
- assert(num == 0 || size == 0);
- num_size = 1;
- }
-
- return a0ialloc(num_size, true, false);
-}
-
-void
-bootstrap_free(void *ptr) {
- if (unlikely(ptr == NULL)) {
- return;
- }
-
- a0idalloc(ptr, false);
-}
-
-void
-arena_set(unsigned ind, arena_t *arena) {
- atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
-}
-
-static void
-narenas_total_set(unsigned narenas) {
- atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
-}
-
-static void
-narenas_total_inc(void) {
- atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
-}
-
-unsigned
-narenas_total_get(void) {
- return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
-}
-
+bool
+malloc_initialized(void) {
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+malloc_init_a0(void) {
+ if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
+ return malloc_init_hard_a0();
+ }
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+malloc_init(void) {
+ if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * The a0*() functions are used instead of i{d,}alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_internal) {
+ if (unlikely(malloc_init_a0())) {
+ return NULL;
+ }
+
+ return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
+ is_internal, arena_get(TSDN_NULL, 0, true), true);
+}
+
+static void
+a0idalloc(void *ptr, bool is_internal) {
+ idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
+}
+
+void *
+a0malloc(size_t size) {
+ return a0ialloc(size, false, true);
+}
+
+void
+a0dalloc(void *ptr) {
+ a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size) {
+ if (unlikely(size == 0)) {
+ size = 1;
+ }
+
+ return a0ialloc(size, false, false);
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size) {
+ size_t num_size;
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ assert(num == 0 || size == 0);
+ num_size = 1;
+ }
+
+ return a0ialloc(num_size, true, false);
+}
+
+void
+bootstrap_free(void *ptr) {
+ if (unlikely(ptr == NULL)) {
+ return;
+ }
+
+ a0idalloc(ptr, false);
+}
+
+void
+arena_set(unsigned ind, arena_t *arena) {
+ atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
+}
+
+static void
+narenas_total_set(unsigned narenas) {
+ atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
+}
+
+static void
+narenas_total_inc(void) {
+ atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
+}
+
+unsigned
+narenas_total_get(void) {
+ return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
+}
+
/* Create a new arena and insert it into the arenas array at index ind. */
-static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
- arena_t *arena;
-
- assert(ind <= narenas_total_get());
- if (ind >= MALLOCX_ARENA_LIMIT) {
- return NULL;
- }
- if (ind == narenas_total_get()) {
- narenas_total_inc();
- }
-
- /*
- * Another thread may have already initialized arenas[ind] if it's an
- * auto arena.
- */
- arena = arena_get(tsdn, ind, false);
- if (arena != NULL) {
- assert(arena_is_auto(arena));
- return arena;
- }
-
- /* Actually initialize the arena. */
- arena = arena_new(tsdn, ind, extent_hooks);
-
- return arena;
-}
-
-static void
-arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
- if (ind == 0) {
- return;
- }
- /*
- * Avoid creating a new background thread just for the huge arena, which
- * purges eagerly by default.
- */
- if (have_background_thread && !arena_is_huge(ind)) {
- if (background_thread_create(tsdn_tsd(tsdn), ind)) {
- malloc_printf("<jemalloc>: error in background thread "
- "creation for arena %u. Abort.\n", ind);
- abort();
- }
- }
-}
-
+static arena_t *
+arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+
+ assert(ind <= narenas_total_get());
+ if (ind >= MALLOCX_ARENA_LIMIT) {
+ return NULL;
+ }
+ if (ind == narenas_total_get()) {
+ narenas_total_inc();
+ }
+
+ /*
+ * Another thread may have already initialized arenas[ind] if it's an
+ * auto arena.
+ */
+ arena = arena_get(tsdn, ind, false);
+ if (arena != NULL) {
+ assert(arena_is_auto(arena));
+ return arena;
+ }
+
+ /* Actually initialize the arena. */
+ arena = arena_new(tsdn, ind, extent_hooks);
+
+ return arena;
+}
+
+static void
+arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
+ if (ind == 0) {
+ return;
+ }
+ /*
+ * Avoid creating a new background thread just for the huge arena, which
+ * purges eagerly by default.
+ */
+ if (have_background_thread && !arena_is_huge(ind)) {
+ if (background_thread_create(tsdn_tsd(tsdn), ind)) {
+ malloc_printf("<jemalloc>: error in background thread "
+ "creation for arena %u. Abort.\n", ind);
+ abort();
+ }
+ }
+}
+
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
- arena_t *arena;
-
- malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind, extent_hooks);
- malloc_mutex_unlock(tsdn, &arenas_lock);
-
- arena_new_create_background_thread(tsdn, ind);
-
- return arena;
-}
-
-static void
-arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
- arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
- arena_nthreads_inc(arena, internal);
-
- if (internal) {
- tsd_iarena_set(tsd, arena);
- } else {
- tsd_arena_set(tsd, arena);
- unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
- ATOMIC_RELAXED);
- tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
- for (unsigned i = 0; i < SC_NBINS; i++) {
- assert(bin_infos[i].n_shards > 0 &&
- bin_infos[i].n_shards <= BIN_SHARDS_MAX);
- bins->binshard[i] = shard % bin_infos[i].n_shards;
- }
- }
-}
-
-void
-arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
- arena_t *oldarena, *newarena;
-
- oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
- newarena = arena_get(tsd_tsdn(tsd), newind, false);
- arena_nthreads_dec(oldarena, false);
- arena_nthreads_inc(newarena, false);
- tsd_arena_set(tsd, newarena);
-}
-
-static void
-arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
- arena_t *arena;
-
- arena = arena_get(tsd_tsdn(tsd), ind, false);
- arena_nthreads_dec(arena, internal);
-
- if (internal) {
- tsd_iarena_set(tsd, NULL);
- } else {
- tsd_arena_set(tsd, NULL);
- }
-}
-
-arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
- arena_tdata_t *tdata, *arenas_tdata_old;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
- unsigned narenas_tdata_old, i;
- unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
- unsigned narenas_actual = narenas_total_get();
-
+arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+
+ malloc_mutex_lock(tsdn, &arenas_lock);
+ arena = arena_init_locked(tsdn, ind, extent_hooks);
+ malloc_mutex_unlock(tsdn, &arenas_lock);
+
+ arena_new_create_background_thread(tsdn, ind);
+
+ return arena;
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_inc(arena, internal);
+
+ if (internal) {
+ tsd_iarena_set(tsd, arena);
+ } else {
+ tsd_arena_set(tsd, arena);
+ unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
+ ATOMIC_RELAXED);
+ tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ assert(bin_infos[i].n_shards > 0 &&
+ bin_infos[i].n_shards <= BIN_SHARDS_MAX);
+ bins->binshard[i] = shard % bin_infos[i].n_shards;
+ }
+ }
+}
+
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
+ arena_t *oldarena, *newarena;
+
+ oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
+ newarena = arena_get(tsd_tsdn(tsd), newind, false);
+ arena_nthreads_dec(oldarena, false);
+ arena_nthreads_inc(newarena, false);
+ tsd_arena_set(tsd, newarena);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_dec(arena, internal);
+
+ if (internal) {
+ tsd_iarena_set(tsd, NULL);
+ } else {
+ tsd_arena_set(tsd, NULL);
+ }
+}
+
+arena_tdata_t *
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
+ arena_tdata_t *tdata, *arenas_tdata_old;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+ unsigned narenas_tdata_old, i;
+ unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+ unsigned narenas_actual = narenas_total_get();
+
/*
- * Dissociate old tdata array (and set up for deallocation upon return)
- * if it's too small.
+ * Dissociate old tdata array (and set up for deallocation upon return)
+ * if it's too small.
*/
- if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
- arenas_tdata_old = arenas_tdata;
- narenas_tdata_old = narenas_tdata;
- arenas_tdata = NULL;
- narenas_tdata = 0;
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- } else {
- arenas_tdata_old = NULL;
- narenas_tdata_old = 0;
- }
-
- /* Allocate tdata array if it's missing. */
- if (arenas_tdata == NULL) {
- bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
- narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
-
- if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
- *arenas_tdata_bypassp = true;
- arenas_tdata = (arena_tdata_t *)a0malloc(
- sizeof(arena_tdata_t) * narenas_tdata);
- *arenas_tdata_bypassp = false;
- }
- if (arenas_tdata == NULL) {
- tdata = NULL;
- goto label_return;
- }
- assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- }
-
- /*
- * Copy to tdata array. It's possible that the actual number of arenas
- * has increased since narenas_total_get() was called above, but that
- * causes no correctness issues unless two threads concurrently execute
- * the arenas.create mallctl, which we trust mallctl synchronization to
- * prevent.
- */
-
- /* Copy/initialize tickers. */
- for (i = 0; i < narenas_actual; i++) {
- if (i < narenas_tdata_old) {
- ticker_copy(&arenas_tdata[i].decay_ticker,
- &arenas_tdata_old[i].decay_ticker);
- } else {
- ticker_init(&arenas_tdata[i].decay_ticker,
- DECAY_NTICKS_PER_UPDATE);
- }
- }
- if (narenas_tdata > narenas_actual) {
- memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
- * (narenas_tdata - narenas_actual));
- }
-
- /* Read the refreshed tdata array. */
- tdata = &arenas_tdata[ind];
-label_return:
- if (arenas_tdata_old != NULL) {
- a0dalloc(arenas_tdata_old);
- }
- return tdata;
-}
-
-/* Slow path, called only by arena_choose(). */
+ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
+ arenas_tdata_old = arenas_tdata;
+ narenas_tdata_old = narenas_tdata;
+ arenas_tdata = NULL;
+ narenas_tdata = 0;
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ } else {
+ arenas_tdata_old = NULL;
+ narenas_tdata_old = 0;
+ }
+
+ /* Allocate tdata array if it's missing. */
+ if (arenas_tdata == NULL) {
+ bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
+ narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
+ *arenas_tdata_bypassp = true;
+ arenas_tdata = (arena_tdata_t *)a0malloc(
+ sizeof(arena_tdata_t) * narenas_tdata);
+ *arenas_tdata_bypassp = false;
+ }
+ if (arenas_tdata == NULL) {
+ tdata = NULL;
+ goto label_return;
+ }
+ assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ }
+
+ /*
+ * Copy to tdata array. It's possible that the actual number of arenas
+ * has increased since narenas_total_get() was called above, but that
+ * causes no correctness issues unless two threads concurrently execute
+ * the arenas.create mallctl, which we trust mallctl synchronization to
+ * prevent.
+ */
+
+ /* Copy/initialize tickers. */
+ for (i = 0; i < narenas_actual; i++) {
+ if (i < narenas_tdata_old) {
+ ticker_copy(&arenas_tdata[i].decay_ticker,
+ &arenas_tdata_old[i].decay_ticker);
+ } else {
+ ticker_init(&arenas_tdata[i].decay_ticker,
+ DECAY_NTICKS_PER_UPDATE);
+ }
+ }
+ if (narenas_tdata > narenas_actual) {
+ memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
+ * (narenas_tdata - narenas_actual));
+ }
+
+ /* Read the refreshed tdata array. */
+ tdata = &arenas_tdata[ind];
+label_return:
+ if (arenas_tdata_old != NULL) {
+ a0dalloc(arenas_tdata_old);
+ }
+ return tdata;
+}
+
+/* Slow path, called only by arena_choose(). */
arena_t *
-arena_choose_hard(tsd_t *tsd, bool internal) {
- arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
- unsigned choose = percpu_arena_choose();
- ret = arena_get(tsd_tsdn(tsd), choose, true);
- assert(ret != NULL);
- arena_bind(tsd, arena_ind_get(ret), false);
- arena_bind(tsd, arena_ind_get(ret), true);
-
- return ret;
- }
-
+arena_choose_hard(tsd_t *tsd, bool internal) {
+ arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
+ unsigned choose = percpu_arena_choose();
+ ret = arena_get(tsd_tsdn(tsd), choose, true);
+ assert(ret != NULL);
+ arena_bind(tsd, arena_ind_get(ret), false);
+ arena_bind(tsd, arena_ind_get(ret), true);
+
+ return ret;
+ }
+
if (narenas_auto > 1) {
- unsigned i, j, choose[2], first_null;
- bool is_new_arena[2];
-
- /*
- * Determine binding for both non-internal and internal
- * allocation.
- *
- * choose[0]: For application allocation.
- * choose[1]: For internal metadata allocation.
- */
-
- for (j = 0; j < 2; j++) {
- choose[j] = 0;
- is_new_arena[j] = false;
- }
-
+ unsigned i, j, choose[2], first_null;
+ bool is_new_arena[2];
+
+ /*
+ * Determine binding for both non-internal and internal
+ * allocation.
+ *
+ * choose[0]: For application allocation.
+ * choose[1]: For internal metadata allocation.
+ */
+
+ for (j = 0; j < 2; j++) {
+ choose[j] = 0;
+ is_new_arena[j] = false;
+ }
+
first_null = narenas_auto;
- malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
- assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
+ assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
for (i = 1; i < narenas_auto; i++) {
- if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
+ if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
- for (j = 0; j < 2; j++) {
- if (arena_nthreads_get(arena_get(
- tsd_tsdn(tsd), i, false), !!j) <
- arena_nthreads_get(arena_get(
- tsd_tsdn(tsd), choose[j], false),
- !!j)) {
- choose[j] = i;
- }
- }
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), i, false), !!j) <
+ arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), choose[j], false),
+ !!j)) {
+ choose[j] = i;
+ }
+ }
} else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
@@ -558,99 +558,99 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
}
}
- for (j = 0; j < 2; j++) {
- if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
- choose[j], false), !!j) == 0 || first_null ==
- narenas_auto) {
- /*
- * Use an unloaded arena, or the least loaded
- * arena if all arenas are already initialized.
- */
- if (!!j == internal) {
- ret = arena_get(tsd_tsdn(tsd),
- choose[j], false);
- }
- } else {
- arena_t *arena;
-
- /* Initialize a new arena. */
- choose[j] = first_null;
- arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j],
- (extent_hooks_t *)&extent_hooks_default);
- if (arena == NULL) {
- malloc_mutex_unlock(tsd_tsdn(tsd),
- &arenas_lock);
- return NULL;
- }
- is_new_arena[j] = true;
- if (!!j == internal) {
- ret = arena;
- }
- }
- arena_bind(tsd, choose[j], !!j);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
-
- for (j = 0; j < 2; j++) {
- if (is_new_arena[j]) {
- assert(choose[j] > 0);
- arena_new_create_background_thread(
- tsd_tsdn(tsd), choose[j]);
- }
- }
-
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
+ choose[j], false), !!j) == 0 || first_null ==
+ narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded
+ * arena if all arenas are already initialized.
+ */
+ if (!!j == internal) {
+ ret = arena_get(tsd_tsdn(tsd),
+ choose[j], false);
+ }
+ } else {
+ arena_t *arena;
+
+ /* Initialize a new arena. */
+ choose[j] = first_null;
+ arena = arena_init_locked(tsd_tsdn(tsd),
+ choose[j],
+ (extent_hooks_t *)&extent_hooks_default);
+ if (arena == NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd),
+ &arenas_lock);
+ return NULL;
+ }
+ is_new_arena[j] = true;
+ if (!!j == internal) {
+ ret = arena;
+ }
+ }
+ arena_bind(tsd, choose[j], !!j);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+
+ for (j = 0; j < 2; j++) {
+ if (is_new_arena[j]) {
+ assert(choose[j] > 0);
+ arena_new_create_background_thread(
+ tsd_tsdn(tsd), choose[j]);
+ }
+ }
+
} else {
- ret = arena_get(tsd_tsdn(tsd), 0, false);
- arena_bind(tsd, 0, false);
- arena_bind(tsd, 0, true);
- }
-
- return ret;
-}
-
-void
-iarena_cleanup(tsd_t *tsd) {
- arena_t *iarena;
-
- iarena = tsd_iarena_get(tsd);
- if (iarena != NULL) {
- arena_unbind(tsd, arena_ind_get(iarena), true);
- }
-}
-
-void
-arena_cleanup(tsd_t *tsd) {
- arena_t *arena;
-
- arena = tsd_arena_get(tsd);
- if (arena != NULL) {
- arena_unbind(tsd, arena_ind_get(arena), false);
- }
-}
-
-void
-arenas_tdata_cleanup(tsd_t *tsd) {
- arena_tdata_t *arenas_tdata;
-
- /* Prevent tsd->arenas_tdata from being (re)created. */
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
-
- arenas_tdata = tsd_arenas_tdata_get(tsd);
- if (arenas_tdata != NULL) {
- tsd_arenas_tdata_set(tsd, NULL);
- a0dalloc(arenas_tdata);
- }
-}
-
+ ret = arena_get(tsd_tsdn(tsd), 0, false);
+ arena_bind(tsd, 0, false);
+ arena_bind(tsd, 0, true);
+ }
+
+ return ret;
+}
+
+void
+iarena_cleanup(tsd_t *tsd) {
+ arena_t *iarena;
+
+ iarena = tsd_iarena_get(tsd);
+ if (iarena != NULL) {
+ arena_unbind(tsd, arena_ind_get(iarena), true);
+ }
+}
+
+void
+arena_cleanup(tsd_t *tsd) {
+ arena_t *arena;
+
+ arena = tsd_arena_get(tsd);
+ if (arena != NULL) {
+ arena_unbind(tsd, arena_ind_get(arena), false);
+ }
+}
+
+void
+arenas_tdata_cleanup(tsd_t *tsd) {
+ arena_tdata_t *arenas_tdata;
+
+ /* Prevent tsd->arenas_tdata from being (re)created. */
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+
+ arenas_tdata = tsd_arenas_tdata_get(tsd);
+ if (arenas_tdata != NULL) {
+ tsd_arenas_tdata_set(tsd, NULL);
+ a0dalloc(arenas_tdata);
+ }
+}
+
static void
-stats_print_atexit(void) {
- if (config_stats) {
- tsdn_t *tsdn;
+stats_print_atexit(void) {
+ if (config_stats) {
+ tsdn_t *tsdn;
unsigned narenas, i;
- tsdn = tsdn_fetch();
-
+ tsdn = tsdn_fetch();
+
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
@@ -659,48 +659,48 @@ stats_print_atexit(void) {
* continue to allocate.
*/
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena = arena_get(tsdn, i, false);
+ arena_t *arena = arena_get(tsdn, i, false);
if (arena != NULL) {
tcache_t *tcache;
- malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tsdn, tcache, arena);
+ tcache_stats_merge(tsdn, tcache, arena);
}
- malloc_mutex_unlock(tsdn,
- &arena->tcache_ql_mtx);
+ malloc_mutex_unlock(tsdn,
+ &arena->tcache_ql_mtx);
}
}
}
- je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
-}
-
-/*
- * Ensure that we don't hold any locks upon entry to or exit from allocator
- * code (in a "broad" sense that doesn't count a reentrant allocation as an
- * entrance or exit).
- */
-JEMALLOC_ALWAYS_INLINE void
-check_entry_exit_locking(tsdn_t *tsdn) {
- if (!config_debug) {
- return;
- }
- if (tsdn_null(tsdn)) {
- return;
- }
- tsd_t *tsd = tsdn_tsd(tsdn);
- /*
- * It's possible we hold locks at entry/exit if we're in a nested
- * allocation.
- */
- int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
- if (reentrancy_level != 0) {
- return;
- }
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
}
/*
+ * Ensure that we don't hold any locks upon entry to or exit from allocator
+ * code (in a "broad" sense that doesn't count a reentrant allocation as an
+ * entrance or exit).
+ */
+JEMALLOC_ALWAYS_INLINE void
+check_entry_exit_locking(tsdn_t *tsdn) {
+ if (!config_debug) {
+ return;
+ }
+ if (tsdn_null(tsdn)) {
+ return;
+ }
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ /*
+ * It's possible we hold locks at entry/exit if we're in a nested
+ * allocation.
+ */
+ int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (reentrancy_level != 0) {
+ return;
+ }
+ witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+}
+
+/*
* End miscellaneous support functions.
*/
/******************************************************************************/
@@ -708,126 +708,126 @@ check_entry_exit_locking(tsdn_t *tsdn) {
* Begin initialization functions.
*/
-static char *
-jemalloc_secure_getenv(const char *name) {
-#ifdef JEMALLOC_HAVE_SECURE_GETENV
- return secure_getenv(name);
-#else
-# ifdef JEMALLOC_HAVE_ISSETUGID
- if (issetugid() != 0) {
- return NULL;
- }
-# endif
- return getenv(name);
-#endif
-}
-
+static char *
+jemalloc_secure_getenv(const char *name) {
+#ifdef JEMALLOC_HAVE_SECURE_GETENV
+ return secure_getenv(name);
+#else
+# ifdef JEMALLOC_HAVE_ISSETUGID
+ if (issetugid() != 0) {
+ return NULL;
+ }
+# endif
+ return getenv(name);
+#endif
+}
+
static unsigned
-malloc_ncpus(void) {
+malloc_ncpus(void) {
long result;
#ifdef _WIN32
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
-#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
- /*
- * glibc >= 2.6 has the CPU_COUNT macro.
- *
- * glibc's sysconf() uses isspace(). glibc allocates for the first time
- * *before* setting up the isspace tables. Therefore we need a
- * different method to get the number of CPUs.
- */
- {
- cpu_set_t set;
-
- pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
- result = CPU_COUNT(&set);
- }
+#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
+ /*
+ * glibc >= 2.6 has the CPU_COUNT macro.
+ *
+ * glibc's sysconf() uses isspace(). glibc allocates for the first time
+ * *before* setting up the isspace tables. Therefore we need a
+ * different method to get the number of CPUs.
+ */
+ {
+ cpu_set_t set;
+
+ pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+ result = CPU_COUNT(&set);
+ }
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
return ((result == -1) ? 1 : (unsigned)result);
}
-static void
-init_opt_stats_print_opts(const char *v, size_t vlen) {
- size_t opts_len = strlen(opt_stats_print_opts);
- assert(opts_len <= stats_print_tot_num_options);
-
- for (size_t i = 0; i < vlen; i++) {
- switch (v[i]) {
-#define OPTION(o, v, d, s) case o: break;
- STATS_PRINT_OPTIONS
-#undef OPTION
- default: continue;
- }
-
- if (strchr(opt_stats_print_opts, v[i]) != NULL) {
- /* Ignore repeated. */
- continue;
- }
-
- opt_stats_print_opts[opts_len++] = v[i];
- opt_stats_print_opts[opts_len] = '\0';
- assert(opts_len <= stats_print_tot_num_options);
- }
- assert(opts_len == strlen(opt_stats_print_opts));
-}
-
-/* Reads the next size pair in a multi-sized option. */
-static bool
-malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
- size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
- const char *cur = *slab_size_segment_cur;
- char *end;
- uintmax_t um;
-
- set_errno(0);
-
- /* First number, then '-' */
- um = malloc_strtoumax(cur, &end, 0);
- if (get_errno() != 0 || *end != '-') {
- return true;
- }
- *slab_start = (size_t)um;
- cur = end + 1;
-
- /* Second number, then ':' */
- um = malloc_strtoumax(cur, &end, 0);
- if (get_errno() != 0 || *end != ':') {
- return true;
- }
- *slab_end = (size_t)um;
- cur = end + 1;
-
- /* Last number */
- um = malloc_strtoumax(cur, &end, 0);
- if (get_errno() != 0) {
- return true;
- }
- *new_size = (size_t)um;
-
- /* Consume the separator if there is one. */
- if (*end == '|') {
- end++;
- }
-
- *vlen_left -= end - *slab_size_segment_cur;
- *slab_size_segment_cur = end;
-
- return false;
+static void
+init_opt_stats_print_opts(const char *v, size_t vlen) {
+ size_t opts_len = strlen(opt_stats_print_opts);
+ assert(opts_len <= stats_print_tot_num_options);
+
+ for (size_t i = 0; i < vlen; i++) {
+ switch (v[i]) {
+#define OPTION(o, v, d, s) case o: break;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ default: continue;
+ }
+
+ if (strchr(opt_stats_print_opts, v[i]) != NULL) {
+ /* Ignore repeated. */
+ continue;
+ }
+
+ opt_stats_print_opts[opts_len++] = v[i];
+ opt_stats_print_opts[opts_len] = '\0';
+ assert(opts_len <= stats_print_tot_num_options);
+ }
+ assert(opts_len == strlen(opt_stats_print_opts));
+}
+
+/* Reads the next size pair in a multi-sized option. */
+static bool
+malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
+ size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
+ const char *cur = *slab_size_segment_cur;
+ char *end;
+ uintmax_t um;
+
+ set_errno(0);
+
+ /* First number, then '-' */
+ um = malloc_strtoumax(cur, &end, 0);
+ if (get_errno() != 0 || *end != '-') {
+ return true;
+ }
+ *slab_start = (size_t)um;
+ cur = end + 1;
+
+ /* Second number, then ':' */
+ um = malloc_strtoumax(cur, &end, 0);
+ if (get_errno() != 0 || *end != ':') {
+ return true;
+ }
+ *slab_end = (size_t)um;
+ cur = end + 1;
+
+ /* Last number */
+ um = malloc_strtoumax(cur, &end, 0);
+ if (get_errno() != 0) {
+ return true;
+ }
+ *new_size = (size_t)um;
+
+ /* Consume the separator if there is one. */
+ if (*end == '|') {
+ end++;
+ }
+
+ *vlen_left -= end - *slab_size_segment_cur;
+ *slab_size_segment_cur = end;
+
+ return false;
}
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
- char const **v_p, size_t *vlen_p) {
+ char const **v_p, size_t *vlen_p) {
bool accept;
const char *opts = *opts_p;
*k_p = opts;
- for (accept = false; !accept;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
@@ -855,14 +855,14 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
}
- return true;
+ return true;
default:
malloc_write("<jemalloc>: Malformed conf string\n");
- return true;
+ return true;
}
}
- for (accept = false; !accept;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case ',':
opts++;
@@ -891,211 +891,211 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
}
*opts_p = opts;
- return false;
-}
-
-static void
-malloc_abort_invalid_conf(void) {
- assert(opt_abort_conf);
- malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
- "value (see above).\n");
- abort();
+ return false;
}
static void
+malloc_abort_invalid_conf(void) {
+ assert(opt_abort_conf);
+ malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
+ "value (see above).\n");
+ abort();
+}
+
+static void
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
- size_t vlen) {
+ size_t vlen) {
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
(int)vlen, v);
- /* If abort_conf is set, error out after processing all options. */
- const char *experimental = "experimental_";
- if (strncmp(k, experimental, strlen(experimental)) == 0) {
- /* However, tolerate experimental features. */
- return;
- }
- had_conf_error = true;
+ /* If abort_conf is set, error out after processing all options. */
+ const char *experimental = "experimental_";
+ if (strncmp(k, experimental, strlen(experimental)) == 0) {
+ /* However, tolerate experimental features. */
+ return;
+ }
+ had_conf_error = true;
}
static void
-malloc_slow_flag_init(void) {
+malloc_slow_flag_init(void) {
/*
- * Combine the runtime options into malloc_slow for fast path. Called
- * after processing all the options.
+ * Combine the runtime options into malloc_slow for fast path. Called
+ * after processing all the options.
*/
- malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
- | (opt_junk_free ? flag_opt_junk_free : 0)
- | (opt_zero ? flag_opt_zero : 0)
- | (opt_utrace ? flag_opt_utrace : 0)
- | (opt_xmalloc ? flag_opt_xmalloc : 0);
-
- malloc_slow = (malloc_slow_flags != 0);
-}
-
-/* Number of sources for initializing malloc_conf */
-#define MALLOC_CONF_NSOURCES 4
-
-static const char *
-obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
- if (config_debug) {
- static unsigned read_source = 0;
- /*
- * Each source should only be read once, to minimize # of
- * syscalls on init.
- */
- assert(read_source++ == which_source);
- }
- assert(which_source < MALLOC_CONF_NSOURCES);
-
- const char *ret;
- switch (which_source) {
- case 0:
- ret = config_malloc_conf;
- break;
- case 1:
- if (je_malloc_conf != NULL) {
- /* Use options that were compiled into the program. */
- ret = je_malloc_conf;
- } else {
- /* No configuration specified. */
- ret = NULL;
- }
- break;
- case 2: {
- ssize_t linklen = 0;
+ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
+ | (opt_junk_free ? flag_opt_junk_free : 0)
+ | (opt_zero ? flag_opt_zero : 0)
+ | (opt_utrace ? flag_opt_utrace : 0)
+ | (opt_xmalloc ? flag_opt_xmalloc : 0);
+
+ malloc_slow = (malloc_slow_flags != 0);
+}
+
+/* Number of sources for initializing malloc_conf */
+#define MALLOC_CONF_NSOURCES 4
+
+static const char *
+obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
+ if (config_debug) {
+ static unsigned read_source = 0;
+ /*
+ * Each source should only be read once, to minimize # of
+ * syscalls on init.
+ */
+ assert(read_source++ == which_source);
+ }
+ assert(which_source < MALLOC_CONF_NSOURCES);
+
+ const char *ret;
+ switch (which_source) {
+ case 0:
+ ret = config_malloc_conf;
+ break;
+ case 1:
+ if (je_malloc_conf != NULL) {
+ /* Use options that were compiled into the program. */
+ ret = je_malloc_conf;
+ } else {
+ /* No configuration specified. */
+ ret = NULL;
+ }
+ break;
+ case 2: {
+ ssize_t linklen = 0;
#ifndef _WIN32
- int saved_errno = errno;
- const char *linkname =
+ int saved_errno = errno;
+ const char *linkname =
# ifdef JEMALLOC_PREFIX
- "/etc/"JEMALLOC_PREFIX"malloc.conf"
+ "/etc/"JEMALLOC_PREFIX"malloc.conf"
# else
- "/etc/malloc.conf"
+ "/etc/malloc.conf"
# endif
- ;
-
- /*
- * Try to use the contents of the "/etc/malloc.conf" symbolic
- * link's name.
- */
-#ifndef JEMALLOC_READLINKAT
- linklen = readlink(linkname, buf, PATH_MAX);
-#else
- linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
-#endif
- if (linklen == -1) {
- /* No configuration specified. */
- linklen = 0;
- /* Restore errno. */
- set_errno(saved_errno);
- }
+ ;
+
+ /*
+ * Try to use the contents of the "/etc/malloc.conf" symbolic
+ * link's name.
+ */
+#ifndef JEMALLOC_READLINKAT
+ linklen = readlink(linkname, buf, PATH_MAX);
+#else
+ linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
#endif
- buf[linklen] = '\0';
- ret = buf;
- break;
- } case 3: {
- const char *envname =
+ if (linklen == -1) {
+ /* No configuration specified. */
+ linklen = 0;
+ /* Restore errno. */
+ set_errno(saved_errno);
+ }
+#endif
+ buf[linklen] = '\0';
+ ret = buf;
+ break;
+ } case 3: {
+ const char *envname =
#ifdef JEMALLOC_PREFIX
- JEMALLOC_CPREFIX"MALLOC_CONF"
+ JEMALLOC_CPREFIX"MALLOC_CONF"
#else
- "MALLOC_CONF"
+ "MALLOC_CONF"
#endif
- ;
-
- if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
- /*
- * Do nothing; opts is already initialized to the value
- * of the MALLOC_CONF environment variable.
- */
- } else {
- /* No configuration specified. */
- ret = NULL;
- }
- break;
- } default:
- not_reached();
- ret = NULL;
- }
- return ret;
-}
-
-static void
-malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
- bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
- char buf[PATH_MAX + 1]) {
- static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
- "string specified via --with-malloc-conf",
- "string pointed to by the global variable malloc_conf",
- "\"name\" of the file referenced by the symbolic link named "
- "/etc/malloc.conf",
- "value of the environment variable MALLOC_CONF"
- };
- unsigned i;
- const char *opts, *k, *v;
- size_t klen, vlen;
-
- for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
- /* Get runtime configuration. */
- if (initial_call) {
- opts_cache[i] = obtain_malloc_conf(i, buf);
- }
- opts = opts_cache[i];
- if (!initial_call && opt_confirm_conf) {
- malloc_printf(
- "<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
- i + 1, opts_explain[i], opts != NULL ? opts : "");
- }
- if (opts == NULL) {
- continue;
- }
-
- while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
- &vlen)) {
-
-#define CONF_ERROR(msg, k, klen, v, vlen) \
- if (!initial_call) { \
- malloc_conf_error( \
- msg, k, klen, v, vlen); \
- cur_opt_valid = false; \
- }
-#define CONF_CONTINUE { \
- if (!initial_call && opt_confirm_conf \
- && cur_opt_valid) { \
- malloc_printf("<jemalloc>: -- " \
- "Set conf value: %.*s:%.*s" \
- "\n", (int)klen, k, \
- (int)vlen, v); \
- } \
- continue; \
- }
-#define CONF_MATCH(n) \
- (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
-#define CONF_MATCH_VALUE(n) \
- (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
-#define CONF_HANDLE_BOOL(o, n) \
- if (CONF_MATCH(n)) { \
- if (CONF_MATCH_VALUE("true")) { \
+ ;
+
+ if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
+ /*
+ * Do nothing; opts is already initialized to the value
+ * of the MALLOC_CONF environment variable.
+ */
+ } else {
+ /* No configuration specified. */
+ ret = NULL;
+ }
+ break;
+ } default:
+ not_reached();
+ ret = NULL;
+ }
+ return ret;
+}
+
+static void
+malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
+ bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
+ char buf[PATH_MAX + 1]) {
+ static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
+ "string specified via --with-malloc-conf",
+ "string pointed to by the global variable malloc_conf",
+ "\"name\" of the file referenced by the symbolic link named "
+ "/etc/malloc.conf",
+ "value of the environment variable MALLOC_CONF"
+ };
+ unsigned i;
+ const char *opts, *k, *v;
+ size_t klen, vlen;
+
+ for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
+ /* Get runtime configuration. */
+ if (initial_call) {
+ opts_cache[i] = obtain_malloc_conf(i, buf);
+ }
+ opts = opts_cache[i];
+ if (!initial_call && opt_confirm_conf) {
+ malloc_printf(
+ "<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
+ i + 1, opts_explain[i], opts != NULL ? opts : "");
+ }
+ if (opts == NULL) {
+ continue;
+ }
+
+ while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen)) {
+
+#define CONF_ERROR(msg, k, klen, v, vlen) \
+ if (!initial_call) { \
+ malloc_conf_error( \
+ msg, k, klen, v, vlen); \
+ cur_opt_valid = false; \
+ }
+#define CONF_CONTINUE { \
+ if (!initial_call && opt_confirm_conf \
+ && cur_opt_valid) { \
+ malloc_printf("<jemalloc>: -- " \
+ "Set conf value: %.*s:%.*s" \
+ "\n", (int)klen, k, \
+ (int)vlen, v); \
+ } \
+ continue; \
+ }
+#define CONF_MATCH(n) \
+ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define CONF_MATCH_VALUE(n) \
+ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
+#define CONF_HANDLE_BOOL(o, n) \
+ if (CONF_MATCH(n)) { \
+ if (CONF_MATCH_VALUE("true")) { \
o = true; \
- } else if (CONF_MATCH_VALUE("false")) { \
+ } else if (CONF_MATCH_VALUE("false")) { \
o = false; \
- } else { \
- CONF_ERROR("Invalid conf value",\
+ } else { \
+ CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
} \
- CONF_CONTINUE; \
+ CONF_CONTINUE; \
}
- /*
- * One of the CONF_MIN macros below expands, in one of the use points,
- * to "unsigned integer < 0", which is always false, triggering the
- * GCC -Wtype-limits warning, which we disable here and re-enable below.
- */
- JEMALLOC_DIAGNOSTIC_PUSH
- JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
-
-#define CONF_DONT_CHECK_MIN(um, min) false
-#define CONF_CHECK_MIN(um, min) ((um) < (min))
-#define CONF_DONT_CHECK_MAX(um, max) false
-#define CONF_CHECK_MAX(um, max) ((um) > (max))
-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
- if (CONF_MATCH(n)) { \
+ /*
+ * One of the CONF_MIN macros below expands, in one of the use points,
+ * to "unsigned integer < 0", which is always false, triggering the
+ * GCC -Wtype-limits warning, which we disable here and re-enable below.
+ */
+ JEMALLOC_DIAGNOSTIC_PUSH
+ JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
+
+#define CONF_DONT_CHECK_MIN(um, min) false
+#define CONF_CHECK_MIN(um, min) ((um) < (min))
+#define CONF_DONT_CHECK_MAX(um, max) false
+#define CONF_CHECK_MAX(um, max) ((um) > (max))
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+ if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
@@ -1103,39 +1103,39 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
um = malloc_strtoumax(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
- CONF_ERROR("Invalid conf value",\
+ CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
} else if (clip) { \
- if (check_min(um, (t)(min))) { \
- o = (t)(min); \
- } else if ( \
- check_max(um, (t)(max))) { \
- o = (t)(max); \
- } else { \
- o = (t)um; \
- } \
+ if (check_min(um, (t)(min))) { \
+ o = (t)(min); \
+ } else if ( \
+ check_max(um, (t)(max))) { \
+ o = (t)(max); \
+ } else { \
+ o = (t)um; \
+ } \
} else { \
- if (check_min(um, (t)(min)) || \
- check_max(um, (t)(max))) { \
- CONF_ERROR( \
+ if (check_min(um, (t)(min)) || \
+ check_max(um, (t)(max))) { \
+ CONF_ERROR( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
- } else { \
- o = (t)um; \
- } \
+ } else { \
+ o = (t)um; \
+ } \
} \
- CONF_CONTINUE; \
+ CONF_CONTINUE; \
}
-#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
- clip) \
- CONF_HANDLE_T_U(unsigned, o, n, min, max, \
- check_min, check_max, clip)
-#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
- CONF_HANDLE_T_U(size_t, o, n, min, max, \
- check_min, check_max, clip)
-#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (CONF_MATCH(n)) { \
+#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
+ clip) \
+ CONF_HANDLE_T_U(unsigned, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T_U(size_t, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+ if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
@@ -1143,63 +1143,63 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
l = strtol(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
- CONF_ERROR("Invalid conf value",\
+ CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
- } else if (l < (ssize_t)(min) || l > \
- (ssize_t)(max)) { \
- CONF_ERROR( \
+ } else if (l < (ssize_t)(min) || l > \
+ (ssize_t)(max)) { \
+ CONF_ERROR( \
"Out-of-range conf value", \
k, klen, v, vlen); \
- } else { \
+ } else { \
o = l; \
- } \
- CONF_CONTINUE; \
+ } \
+ CONF_CONTINUE; \
}
-#define CONF_HANDLE_CHAR_P(o, n, d) \
- if (CONF_MATCH(n)) { \
+#define CONF_HANDLE_CHAR_P(o, n, d) \
+ if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
strncpy(o, v, cpylen); \
o[cpylen] = '\0'; \
- CONF_CONTINUE; \
- }
-
- bool cur_opt_valid = true;
-
- CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
- if (initial_call) {
- continue;
+ CONF_CONTINUE; \
}
+ bool cur_opt_valid = true;
+
+ CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
+ if (initial_call) {
+ continue;
+ }
+
CONF_HANDLE_BOOL(opt_abort, "abort")
- CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
- if (strncmp("metadata_thp", k, klen) == 0) {
- int i;
- bool match = false;
- for (i = 0; i < metadata_thp_mode_limit; i++) {
- if (strncmp(metadata_thp_mode_names[i],
- v, vlen) == 0) {
- opt_metadata_thp = i;
- match = true;
- break;
- }
- }
- if (!match) {
- CONF_ERROR("Invalid conf value",
- k, klen, v, vlen);
- }
- CONF_CONTINUE;
- }
- CONF_HANDLE_BOOL(opt_retain, "retain")
+ CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
+ if (strncmp("metadata_thp", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < metadata_thp_mode_limit; i++) {
+ if (strncmp(metadata_thp_mode_names[i],
+ v, vlen) == 0) {
+ opt_metadata_thp = i;
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
+ CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
- if (extent_dss_prec_set(i)) {
- CONF_ERROR(
+ if (extent_dss_prec_set(i)) {
+ CONF_ERROR(
"Error setting dss",
k, klen, v, vlen);
} else {
@@ -1210,75 +1210,75 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
}
}
- if (!match) {
- CONF_ERROR("Invalid conf value",
+ if (!match) {
+ CONF_ERROR("Invalid conf value",
k, klen, v, vlen);
}
- CONF_CONTINUE;
+ CONF_CONTINUE;
}
- CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
- UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
- false)
- if (CONF_MATCH("bin_shards")) {
- const char *bin_shards_segment_cur = v;
- size_t vlen_left = vlen;
- do {
- size_t size_start;
- size_t size_end;
- size_t nshards;
- bool err = malloc_conf_multi_sizes_next(
- &bin_shards_segment_cur, &vlen_left,
- &size_start, &size_end, &nshards);
- if (err || bin_update_shard_size(
- bin_shard_sizes, size_start,
- size_end, nshards)) {
- CONF_ERROR(
- "Invalid settings for "
- "bin_shards", k, klen, v,
- vlen);
- break;
- }
- } while (vlen_left > 0);
- CONF_CONTINUE;
- }
- CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
- "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
- QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
- SSIZE_MAX);
- CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
- "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
- QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
- SSIZE_MAX);
+ CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
+ UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ false)
+ if (CONF_MATCH("bin_shards")) {
+ const char *bin_shards_segment_cur = v;
+ size_t vlen_left = vlen;
+ do {
+ size_t size_start;
+ size_t size_end;
+ size_t nshards;
+ bool err = malloc_conf_multi_sizes_next(
+ &bin_shards_segment_cur, &vlen_left,
+ &size_start, &size_end, &nshards);
+ if (err || bin_update_shard_size(
+ bin_shard_sizes, size_start,
+ size_end, nshards)) {
+ CONF_ERROR(
+ "Invalid settings for "
+ "bin_shards", k, klen, v,
+ vlen);
+ break;
+ }
+ } while (vlen_left > 0);
+ CONF_CONTINUE;
+ }
+ CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
+ "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
+ QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
+ SSIZE_MAX);
+ CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
+ "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
+ QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
+ SSIZE_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
- if (CONF_MATCH("stats_print_opts")) {
- init_opt_stats_print_opts(v, vlen);
- CONF_CONTINUE;
- }
+ if (CONF_MATCH("stats_print_opts")) {
+ init_opt_stats_print_opts(v, vlen);
+ CONF_CONTINUE;
+ }
if (config_fill) {
- if (CONF_MATCH("junk")) {
- if (CONF_MATCH_VALUE("true")) {
- opt_junk = "true";
- opt_junk_alloc = opt_junk_free =
- true;
- } else if (CONF_MATCH_VALUE("false")) {
- opt_junk = "false";
- opt_junk_alloc = opt_junk_free =
- false;
- } else if (CONF_MATCH_VALUE("alloc")) {
- opt_junk = "alloc";
- opt_junk_alloc = true;
- opt_junk_free = false;
- } else if (CONF_MATCH_VALUE("free")) {
- opt_junk = "free";
- opt_junk_alloc = false;
- opt_junk_free = true;
- } else {
- CONF_ERROR(
- "Invalid conf value",
- k, klen, v, vlen);
- }
- CONF_CONTINUE;
- }
+ if (CONF_MATCH("junk")) {
+ if (CONF_MATCH_VALUE("true")) {
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
+ } else if (CONF_MATCH_VALUE("false")) {
+ opt_junk = "false";
+ opt_junk_alloc = opt_junk_free =
+ false;
+ } else if (CONF_MATCH_VALUE("alloc")) {
+ opt_junk = "alloc";
+ opt_junk_alloc = true;
+ opt_junk_free = false;
+ } else if (CONF_MATCH_VALUE("free")) {
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
+ } else {
+ CONF_ERROR(
+ "Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
CONF_HANDLE_BOOL(opt_zero, "zero")
}
if (config_utrace) {
@@ -1287,90 +1287,90 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
- CONF_HANDLE_BOOL(opt_tcache, "tcache")
- CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
- -1, (sizeof(size_t) << 3) - 1)
-
- /*
- * The runtime option of oversize_threshold remains
- * undocumented. It may be tweaked in the next major
- * release (6.0). The default value 8M is rather
- * conservative / safe. Tuning it further down may
- * improve fragmentation a bit more, but may also cause
- * contention on the huge arena.
- */
- CONF_HANDLE_SIZE_T(opt_oversize_threshold,
- "oversize_threshold", 0, SC_LARGE_MAXCLASS,
- CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
- CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
- "lg_extent_max_active_fit", 0,
- (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
- CONF_CHECK_MAX, false)
-
- if (strncmp("percpu_arena", k, klen) == 0) {
- bool match = false;
- for (int i = percpu_arena_mode_names_base; i <
- percpu_arena_mode_names_limit; i++) {
- if (strncmp(percpu_arena_mode_names[i],
- v, vlen) == 0) {
- if (!have_percpu_arena) {
- CONF_ERROR(
- "No getcpu support",
- k, klen, v, vlen);
- }
- opt_percpu_arena = i;
- match = true;
- break;
- }
- }
- if (!match) {
- CONF_ERROR("Invalid conf value",
- k, klen, v, vlen);
- }
- CONF_CONTINUE;
- }
- CONF_HANDLE_BOOL(opt_background_thread,
- "background_thread");
- CONF_HANDLE_SIZE_T(opt_max_background_threads,
- "max_background_threads", 1,
- opt_max_background_threads,
- CONF_CHECK_MIN, CONF_CHECK_MAX,
- true);
- if (CONF_MATCH("slab_sizes")) {
- bool err;
- const char *slab_size_segment_cur = v;
- size_t vlen_left = vlen;
- do {
- size_t slab_start;
- size_t slab_end;
- size_t pgs;
- err = malloc_conf_multi_sizes_next(
- &slab_size_segment_cur,
- &vlen_left, &slab_start, &slab_end,
- &pgs);
- if (!err) {
- sc_data_update_slab_size(
- sc_data, slab_start,
- slab_end, (int)pgs);
- } else {
- CONF_ERROR("Invalid settings "
- "for slab_sizes",
- k, klen, v, vlen);
- }
- } while (!err && vlen_left > 0);
- CONF_CONTINUE;
+ CONF_HANDLE_BOOL(opt_tcache, "tcache")
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
+ -1, (sizeof(size_t) << 3) - 1)
+
+ /*
+ * The runtime option of oversize_threshold remains
+ * undocumented. It may be tweaked in the next major
+ * release (6.0). The default value 8M is rather
+ * conservative / safe. Tuning it further down may
+ * improve fragmentation a bit more, but may also cause
+ * contention on the huge arena.
+ */
+ CONF_HANDLE_SIZE_T(opt_oversize_threshold,
+ "oversize_threshold", 0, SC_LARGE_MAXCLASS,
+ CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
+ CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
+ "lg_extent_max_active_fit", 0,
+ (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
+ CONF_CHECK_MAX, false)
+
+ if (strncmp("percpu_arena", k, klen) == 0) {
+ bool match = false;
+ for (int i = percpu_arena_mode_names_base; i <
+ percpu_arena_mode_names_limit; i++) {
+ if (strncmp(percpu_arena_mode_names[i],
+ v, vlen) == 0) {
+ if (!have_percpu_arena) {
+ CONF_ERROR(
+ "No getcpu support",
+ k, klen, v, vlen);
+ }
+ opt_percpu_arena = i;
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
}
+ CONF_HANDLE_BOOL(opt_background_thread,
+ "background_thread");
+ CONF_HANDLE_SIZE_T(opt_max_background_threads,
+ "max_background_threads", 1,
+ opt_max_background_threads,
+ CONF_CHECK_MIN, CONF_CHECK_MAX,
+ true);
+ if (CONF_MATCH("slab_sizes")) {
+ bool err;
+ const char *slab_size_segment_cur = v;
+ size_t vlen_left = vlen;
+ do {
+ size_t slab_start;
+ size_t slab_end;
+ size_t pgs;
+ err = malloc_conf_multi_sizes_next(
+ &slab_size_segment_cur,
+ &vlen_left, &slab_start, &slab_end,
+ &pgs);
+ if (!err) {
+ sc_data_update_slab_size(
+ sc_data, slab_start,
+ slab_end, (int)pgs);
+ } else {
+ CONF_ERROR("Invalid settings "
+ "for slab_sizes",
+ k, klen, v, vlen);
+ }
+ } while (!err && vlen_left > 0);
+ CONF_CONTINUE;
+ }
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
- CONF_HANDLE_BOOL(opt_prof_thread_active_init,
- "prof_thread_active_init")
- CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
- "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
- - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
- true)
+ CONF_HANDLE_BOOL(opt_prof_thread_active_init,
+ "prof_thread_active_init")
+ CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
+ "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
+ - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
+ true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
@@ -1378,1506 +1378,1506 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
- CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
- }
- if (config_log) {
- if (CONF_MATCH("log")) {
- size_t cpylen = (
- vlen <= sizeof(log_var_names) ?
- vlen : sizeof(log_var_names) - 1);
- strncpy(log_var_names, v, cpylen);
- log_var_names[cpylen] = '\0';
- CONF_CONTINUE;
- }
- }
- if (CONF_MATCH("thp")) {
- bool match = false;
- for (int i = 0; i < thp_mode_names_limit; i++) {
- if (strncmp(thp_mode_names[i],v, vlen)
- == 0) {
- if (!have_madvise_huge) {
- CONF_ERROR(
- "No THP support",
- k, klen, v, vlen);
- }
- opt_thp = i;
- match = true;
- break;
- }
- }
- if (!match) {
- CONF_ERROR("Invalid conf value",
- k, klen, v, vlen);
- }
- CONF_CONTINUE;
+ CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
}
- CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
-#undef CONF_ERROR
-#undef CONF_CONTINUE
-#undef CONF_MATCH
-#undef CONF_MATCH_VALUE
+ if (config_log) {
+ if (CONF_MATCH("log")) {
+ size_t cpylen = (
+ vlen <= sizeof(log_var_names) ?
+ vlen : sizeof(log_var_names) - 1);
+ strncpy(log_var_names, v, cpylen);
+ log_var_names[cpylen] = '\0';
+ CONF_CONTINUE;
+ }
+ }
+ if (CONF_MATCH("thp")) {
+ bool match = false;
+ for (int i = 0; i < thp_mode_names_limit; i++) {
+ if (strncmp(thp_mode_names[i],v, vlen)
+ == 0) {
+ if (!have_madvise_huge) {
+ CONF_ERROR(
+ "No THP support",
+ k, klen, v, vlen);
+ }
+ opt_thp = i;
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
+ CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
+#undef CONF_ERROR
+#undef CONF_CONTINUE
+#undef CONF_MATCH
+#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
-#undef CONF_DONT_CHECK_MIN
-#undef CONF_CHECK_MIN
-#undef CONF_DONT_CHECK_MAX
-#undef CONF_CHECK_MAX
-#undef CONF_HANDLE_T_U
-#undef CONF_HANDLE_UNSIGNED
+#undef CONF_DONT_CHECK_MIN
+#undef CONF_CHECK_MIN
+#undef CONF_DONT_CHECK_MAX
+#undef CONF_CHECK_MAX
+#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
- /* Re-enable diagnostic "-Wtype-limits" */
- JEMALLOC_DIAGNOSTIC_POP
- }
- if (opt_abort_conf && had_conf_error) {
- malloc_abort_invalid_conf();
- }
- }
- atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
-}
-
-static void
-malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
- const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL};
- char buf[PATH_MAX + 1];
-
- /* The first call only set the confirm_conf option and opts_cache */
- malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
- malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
- NULL);
-}
-
-#undef MALLOC_CONF_NSOURCES
-
+ /* Re-enable diagnostic "-Wtype-limits" */
+ JEMALLOC_DIAGNOSTIC_POP
+ }
+ if (opt_abort_conf && had_conf_error) {
+ malloc_abort_invalid_conf();
+ }
+ }
+ atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
+}
+
+static void
+malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
+ const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL};
+ char buf[PATH_MAX + 1];
+
+ /* The first call only set the confirm_conf option and opts_cache */
+ malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
+ malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
+ NULL);
+}
+
+#undef MALLOC_CONF_NSOURCES
+
static bool
-malloc_init_hard_needed(void) {
- if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
- malloc_init_recursible)) {
+malloc_init_hard_needed(void) {
+ if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+ malloc_init_recursible)) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
- return false;
+ return false;
}
#ifdef JEMALLOC_THREADED_INIT
- if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
+ if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
/* Busy-wait until the initializing thread completes. */
- spin_t spinner = SPIN_INITIALIZER;
+ spin_t spinner = SPIN_INITIALIZER;
do {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- spin_adaptive(&spinner);
- malloc_mutex_lock(TSDN_NULL, &init_lock);
- } while (!malloc_initialized());
- return false;
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ spin_adaptive(&spinner);
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ } while (!malloc_initialized());
+ return false;
}
#endif
- return true;
-}
-
-static bool
-malloc_init_hard_a0_locked() {
+ return true;
+}
+
+static bool
+malloc_init_hard_a0_locked() {
malloc_initializer = INITIALIZER;
- JEMALLOC_DIAGNOSTIC_PUSH
- JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
- sc_data_t sc_data = {0};
- JEMALLOC_DIAGNOSTIC_POP
-
- /*
- * Ordering here is somewhat tricky; we need sc_boot() first, since that
- * determines what the size classes will be, and then
- * malloc_conf_init(), since any slab size tweaking will need to be done
- * before sz_boot and bin_boot, which assume that the values they read
- * out of sc_data_global are final.
- */
- sc_boot(&sc_data);
- unsigned bin_shard_sizes[SC_NBINS];
- bin_shard_sizes_boot(bin_shard_sizes);
- /*
- * prof_boot0 only initializes opt_prof_prefix. We need to do it before
- * we parse malloc_conf options, in case malloc_conf parsing overwrites
- * it.
- */
- if (config_prof) {
+ JEMALLOC_DIAGNOSTIC_PUSH
+ JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+ sc_data_t sc_data = {0};
+ JEMALLOC_DIAGNOSTIC_POP
+
+ /*
+ * Ordering here is somewhat tricky; we need sc_boot() first, since that
+ * determines what the size classes will be, and then
+ * malloc_conf_init(), since any slab size tweaking will need to be done
+ * before sz_boot and bin_boot, which assume that the values they read
+ * out of sc_data_global are final.
+ */
+ sc_boot(&sc_data);
+ unsigned bin_shard_sizes[SC_NBINS];
+ bin_shard_sizes_boot(bin_shard_sizes);
+ /*
+ * prof_boot0 only initializes opt_prof_prefix. We need to do it before
+ * we parse malloc_conf options, in case malloc_conf parsing overwrites
+ * it.
+ */
+ if (config_prof) {
prof_boot0();
- }
- malloc_conf_init(&sc_data, bin_shard_sizes);
- sz_boot(&sc_data);
- bin_boot(&sc_data, bin_shard_sizes);
+ }
+ malloc_conf_init(&sc_data, bin_shard_sizes);
+ sz_boot(&sc_data);
+ bin_boot(&sc_data, bin_shard_sizes);
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort) {
+ if (opt_abort) {
abort();
- }
+ }
}
}
- if (pages_boot()) {
- return true;
+ if (pages_boot()) {
+ return true;
}
- if (base_boot(TSDN_NULL)) {
- return true;
- }
- if (extent_boot()) {
- return true;
+ if (base_boot(TSDN_NULL)) {
+ return true;
}
+ if (extent_boot()) {
+ return true;
+ }
if (ctl_boot()) {
- return true;
+ return true;
}
- if (config_prof) {
+ if (config_prof) {
prof_boot1();
}
- arena_boot(&sc_data);
- if (tcache_boot(TSDN_NULL)) {
- return true;
+ arena_boot(&sc_data);
+ if (tcache_boot(TSDN_NULL)) {
+ return true;
}
- if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
- malloc_mutex_rank_exclusive)) {
- return true;
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
}
- hook_boot();
+ hook_boot();
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas_auto = 1;
- manual_arena_base = narenas_auto + 1;
+ narenas_auto = 1;
+ manual_arena_base = narenas_auto + 1;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
- * arena_choose_hard().
+ * arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
- == NULL) {
- return true;
+ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
+ == NULL) {
+ return true;
}
- a0 = arena_get(TSDN_NULL, 0, false);
- malloc_init_state = malloc_init_a0_initialized;
+ a0 = arena_get(TSDN_NULL, 0, false);
+ malloc_init_state = malloc_init_a0_initialized;
- return false;
-}
+ return false;
+}
-static bool
-malloc_init_hard_a0(void) {
- bool ret;
+static bool
+malloc_init_hard_a0(void) {
+ bool ret;
- malloc_mutex_lock(TSDN_NULL, &init_lock);
- ret = malloc_init_hard_a0_locked();
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- return ret;
-}
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ ret = malloc_init_hard_a0_locked();
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return ret;
+}
-/* Initialize data structures which may trigger recursive allocation. */
-static bool
-malloc_init_hard_recursible(void) {
- malloc_init_state = malloc_init_recursible;
+/* Initialize data structures which may trigger recursive allocation. */
+static bool
+malloc_init_hard_recursible(void) {
+ malloc_init_state = malloc_init_recursible;
ncpus = malloc_ncpus();
-#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
- && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
- !defined(__native_client__))
- /* LinuxThreads' pthread_atfork() allocates. */
+#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
+ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
+ !defined(__native_client__))
+ /* LinuxThreads' pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort) {
+ if (opt_abort) {
abort();
- }
- return true;
+ }
+ return true;
}
#endif
- if (background_thread_boot0()) {
- return true;
- }
-
- return false;
-}
-
-static unsigned
-malloc_narenas_default(void) {
- assert(ncpus > 0);
- /*
- * For SMP systems, create more than one arena per CPU by
- * default.
- */
- if (ncpus > 1) {
- return ncpus << 2;
- } else {
- return 1;
- }
-}
-
-static percpu_arena_mode_t
-percpu_arena_as_initialized(percpu_arena_mode_t mode) {
- assert(!malloc_initialized());
- assert(mode <= percpu_arena_disabled);
-
- if (mode != percpu_arena_disabled) {
- mode += percpu_arena_mode_enabled_base;
- }
-
- return mode;
-}
-
-static bool
-malloc_init_narenas(void) {
- assert(ncpus > 0);
-
- if (opt_percpu_arena != percpu_arena_disabled) {
- if (!have_percpu_arena || malloc_getcpu() < 0) {
- opt_percpu_arena = percpu_arena_disabled;
- malloc_printf("<jemalloc>: perCPU arena getcpu() not "
- "available. Setting narenas to %u.\n", opt_narenas ?
- opt_narenas : malloc_narenas_default());
- if (opt_abort) {
- abort();
- }
- } else {
- if (ncpus >= MALLOCX_ARENA_LIMIT) {
- malloc_printf("<jemalloc>: narenas w/ percpu"
- "arena beyond limit (%d)\n", ncpus);
- if (opt_abort) {
- abort();
- }
- return true;
- }
- /* NB: opt_percpu_arena isn't fully initialized yet. */
- if (percpu_arena_as_initialized(opt_percpu_arena) ==
- per_phycpu_arena && ncpus % 2 != 0) {
- malloc_printf("<jemalloc>: invalid "
- "configuration -- per physical CPU arena "
- "with odd number (%u) of CPUs (no hyper "
- "threading?).\n", ncpus);
- if (opt_abort)
- abort();
- }
- unsigned n = percpu_arena_ind_limit(
- percpu_arena_as_initialized(opt_percpu_arena));
- if (opt_narenas < n) {
- /*
- * If narenas is specified with percpu_arena
- * enabled, actual narenas is set as the greater
- * of the two. percpu_arena_choose will be free
- * to use any of the arenas based on CPU
- * id. This is conservative (at a small cost)
- * but ensures correctness.
- *
- * If for some reason the ncpus determined at
- * boot is not the actual number (e.g. because
- * of affinity setting from numactl), reserving
- * narenas this way provides a workaround for
- * percpu_arena.
- */
- opt_narenas = n;
- }
- }
- }
+ if (background_thread_boot0()) {
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned
+malloc_narenas_default(void) {
+ assert(ncpus > 0);
+ /*
+ * For SMP systems, create more than one arena per CPU by
+ * default.
+ */
+ if (ncpus > 1) {
+ return ncpus << 2;
+ } else {
+ return 1;
+ }
+}
+
+static percpu_arena_mode_t
+percpu_arena_as_initialized(percpu_arena_mode_t mode) {
+ assert(!malloc_initialized());
+ assert(mode <= percpu_arena_disabled);
+
+ if (mode != percpu_arena_disabled) {
+ mode += percpu_arena_mode_enabled_base;
+ }
+
+ return mode;
+}
+
+static bool
+malloc_init_narenas(void) {
+ assert(ncpus > 0);
+
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ if (!have_percpu_arena || malloc_getcpu() < 0) {
+ opt_percpu_arena = percpu_arena_disabled;
+ malloc_printf("<jemalloc>: perCPU arena getcpu() not "
+ "available. Setting narenas to %u.\n", opt_narenas ?
+ opt_narenas : malloc_narenas_default());
+ if (opt_abort) {
+ abort();
+ }
+ } else {
+ if (ncpus >= MALLOCX_ARENA_LIMIT) {
+ malloc_printf("<jemalloc>: narenas w/ percpu"
+ "arena beyond limit (%d)\n", ncpus);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+ /* NB: opt_percpu_arena isn't fully initialized yet. */
+ if (percpu_arena_as_initialized(opt_percpu_arena) ==
+ per_phycpu_arena && ncpus % 2 != 0) {
+ malloc_printf("<jemalloc>: invalid "
+ "configuration -- per physical CPU arena "
+ "with odd number (%u) of CPUs (no hyper "
+ "threading?).\n", ncpus);
+ if (opt_abort)
+ abort();
+ }
+ unsigned n = percpu_arena_ind_limit(
+ percpu_arena_as_initialized(opt_percpu_arena));
+ if (opt_narenas < n) {
+ /*
+ * If narenas is specified with percpu_arena
+ * enabled, actual narenas is set as the greater
+ * of the two. percpu_arena_choose will be free
+ * to use any of the arenas based on CPU
+ * id. This is conservative (at a small cost)
+ * but ensures correctness.
+ *
+ * If for some reason the ncpus determined at
+ * boot is not the actual number (e.g. because
+ * of affinity setting from numactl), reserving
+ * narenas this way provides a workaround for
+ * percpu_arena.
+ */
+ opt_narenas = n;
+ }
+ }
+ }
if (opt_narenas == 0) {
- opt_narenas = malloc_narenas_default();
+ opt_narenas = malloc_narenas_default();
}
- assert(opt_narenas > 0);
-
+ assert(opt_narenas > 0);
+
narenas_auto = opt_narenas;
/*
- * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
+ * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
*/
- if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
- narenas_auto = MALLOCX_ARENA_LIMIT - 1;
+ if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
+ narenas_auto = MALLOCX_ARENA_LIMIT - 1;
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto);
}
- narenas_total_set(narenas_auto);
- if (arena_init_huge()) {
- narenas_total_inc();
- }
- manual_arena_base = narenas_total_get();
-
- return false;
-}
-
-static void
-malloc_init_percpu(void) {
- opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
-}
-
-static bool
-malloc_init_hard_finish(void) {
- if (malloc_mutex_boot()) {
- return true;
- }
-
- malloc_init_state = malloc_init_initialized;
- malloc_slow_flag_init();
-
- return false;
-}
-
-static void
-malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
- malloc_mutex_assert_owner(tsdn, &init_lock);
- malloc_mutex_unlock(tsdn, &init_lock);
- if (reentrancy_set) {
- assert(!tsdn_null(tsdn));
- tsd_t *tsd = tsdn_tsd(tsdn);
- assert(tsd_reentrancy_level_get(tsd) > 0);
- post_reentrancy(tsd);
- }
-}
-
-static bool
-malloc_init_hard(void) {
- tsd_t *tsd;
-
-#if defined(_WIN32) && _WIN32_WINNT < 0x0600
- _init_init_lock();
-#endif
- malloc_mutex_lock(TSDN_NULL, &init_lock);
-
-#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
- malloc_init_hard_cleanup(tsdn, reentrancy); \
- return ret;
-
- if (!malloc_init_hard_needed()) {
- UNLOCK_RETURN(TSDN_NULL, false, false)
- }
-
- if (malloc_init_state != malloc_init_a0_initialized &&
- malloc_init_hard_a0_locked()) {
- UNLOCK_RETURN(TSDN_NULL, true, false)
- }
-
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- /* Recursive allocation relies on functional tsd. */
- tsd = malloc_tsd_boot0();
- if (tsd == NULL) {
- return true;
- }
- if (malloc_init_hard_recursible()) {
- return true;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
- /* Set reentrancy level to 1 during init. */
- pre_reentrancy(tsd, NULL);
- /* Initialize narenas before prof_boot2 (for allocation). */
- if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
- UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
- }
- if (config_prof && prof_boot2(tsd)) {
- UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
- }
-
- malloc_init_percpu();
-
- if (malloc_init_hard_finish()) {
- UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
- }
- post_reentrancy(tsd);
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
-
- witness_assert_lockless(witness_tsd_tsdn(
- tsd_witness_tsdp_get_unsafe(tsd)));
- malloc_tsd_boot1();
- /* Update TSD after tsd_boot1. */
- tsd = tsd_fetch();
- if (opt_background_thread) {
- assert(have_background_thread);
- /*
- * Need to finish init & unlock first before creating background
- * threads (pthread_create depends on malloc). ctl_init (which
- * sets isthreaded) needs to be called without holding any lock.
- */
- background_thread_ctl_init(tsd_tsdn(tsd));
- if (background_thread_create(tsd, 0)) {
- return true;
- }
- }
-#undef UNLOCK_RETURN
- return false;
-}
-
+ narenas_total_set(narenas_auto);
+ if (arena_init_huge()) {
+ narenas_total_inc();
+ }
+ manual_arena_base = narenas_total_get();
+
+ return false;
+}
+
+static void
+malloc_init_percpu(void) {
+ opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
+}
+
+static bool
+malloc_init_hard_finish(void) {
+ if (malloc_mutex_boot()) {
+ return true;
+ }
+
+ malloc_init_state = malloc_init_initialized;
+ malloc_slow_flag_init();
+
+ return false;
+}
+
+static void
+malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
+ malloc_mutex_assert_owner(tsdn, &init_lock);
+ malloc_mutex_unlock(tsdn, &init_lock);
+ if (reentrancy_set) {
+ assert(!tsdn_null(tsdn));
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ assert(tsd_reentrancy_level_get(tsd) > 0);
+ post_reentrancy(tsd);
+ }
+}
+
+static bool
+malloc_init_hard(void) {
+ tsd_t *tsd;
+
+#if defined(_WIN32) && _WIN32_WINNT < 0x0600
+ _init_init_lock();
+#endif
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+
+#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
+ malloc_init_hard_cleanup(tsdn, reentrancy); \
+ return ret;
+
+ if (!malloc_init_hard_needed()) {
+ UNLOCK_RETURN(TSDN_NULL, false, false)
+ }
+
+ if (malloc_init_state != malloc_init_a0_initialized &&
+ malloc_init_hard_a0_locked()) {
+ UNLOCK_RETURN(TSDN_NULL, true, false)
+ }
+
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ /* Recursive allocation relies on functional tsd. */
+ tsd = malloc_tsd_boot0();
+ if (tsd == NULL) {
+ return true;
+ }
+ if (malloc_init_hard_recursible()) {
+ return true;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+ /* Set reentrancy level to 1 during init. */
+ pre_reentrancy(tsd, NULL);
+ /* Initialize narenas before prof_boot2 (for allocation). */
+ if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
+ if (config_prof && prof_boot2(tsd)) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
+
+ malloc_init_percpu();
+
+ if (malloc_init_hard_finish()) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
+ post_reentrancy(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+
+ witness_assert_lockless(witness_tsd_tsdn(
+ tsd_witness_tsdp_get_unsafe(tsd)));
+ malloc_tsd_boot1();
+ /* Update TSD after tsd_boot1. */
+ tsd = tsd_fetch();
+ if (opt_background_thread) {
+ assert(have_background_thread);
+ /*
+ * Need to finish init & unlock first before creating background
+ * threads (pthread_create depends on malloc). ctl_init (which
+ * sets isthreaded) needs to be called without holding any lock.
+ */
+ background_thread_ctl_init(tsd_tsdn(tsd));
+ if (background_thread_create(tsd, 0)) {
+ return true;
+ }
+ }
+#undef UNLOCK_RETURN
+ return false;
+}
+
/*
* End initialization functions.
*/
/******************************************************************************/
/*
- * Begin allocation-path internal functions and data structures.
- */
-
-/*
- * Settings determined by the documented behavior of the allocation functions.
+ * Begin allocation-path internal functions and data structures.
*/
-typedef struct static_opts_s static_opts_t;
-struct static_opts_s {
- /* Whether or not allocation size may overflow. */
- bool may_overflow;
-
- /*
- * Whether or not allocations (with alignment) of size 0 should be
- * treated as size 1.
- */
- bool bump_empty_aligned_alloc;
- /*
- * Whether to assert that allocations are not of size 0 (after any
- * bumping).
- */
- bool assert_nonempty_alloc;
- /*
- * Whether or not to modify the 'result' argument to malloc in case of
- * error.
- */
- bool null_out_result_on_error;
- /* Whether to set errno when we encounter an error condition. */
- bool set_errno_on_error;
-
- /*
- * The minimum valid alignment for functions requesting aligned storage.
- */
- size_t min_alignment;
-
- /* The error string to use if we oom. */
- const char *oom_string;
- /* The error string to use if the passed-in alignment is invalid. */
- const char *invalid_alignment_string;
-
- /*
- * False if we're configured to skip some time-consuming operations.
- *
- * This isn't really a malloc "behavior", but it acts as a useful
- * summary of several other static (or at least, static after program
- * initialization) options.
- */
- bool slow;
- /*
- * Return size.
- */
- bool usize;
-};
-
-JEMALLOC_ALWAYS_INLINE void
-static_opts_init(static_opts_t *static_opts) {
- static_opts->may_overflow = false;
- static_opts->bump_empty_aligned_alloc = false;
- static_opts->assert_nonempty_alloc = false;
- static_opts->null_out_result_on_error = false;
- static_opts->set_errno_on_error = false;
- static_opts->min_alignment = 0;
- static_opts->oom_string = "";
- static_opts->invalid_alignment_string = "";
- static_opts->slow = false;
- static_opts->usize = false;
+/*
+ * Settings determined by the documented behavior of the allocation functions.
+ */
+typedef struct static_opts_s static_opts_t;
+struct static_opts_s {
+ /* Whether or not allocation size may overflow. */
+ bool may_overflow;
+
+ /*
+ * Whether or not allocations (with alignment) of size 0 should be
+ * treated as size 1.
+ */
+ bool bump_empty_aligned_alloc;
+ /*
+ * Whether to assert that allocations are not of size 0 (after any
+ * bumping).
+ */
+ bool assert_nonempty_alloc;
+
+ /*
+ * Whether or not to modify the 'result' argument to malloc in case of
+ * error.
+ */
+ bool null_out_result_on_error;
+ /* Whether to set errno when we encounter an error condition. */
+ bool set_errno_on_error;
+
+ /*
+ * The minimum valid alignment for functions requesting aligned storage.
+ */
+ size_t min_alignment;
+
+ /* The error string to use if we oom. */
+ const char *oom_string;
+ /* The error string to use if the passed-in alignment is invalid. */
+ const char *invalid_alignment_string;
+
+ /*
+ * False if we're configured to skip some time-consuming operations.
+ *
+ * This isn't really a malloc "behavior", but it acts as a useful
+ * summary of several other static (or at least, static after program
+ * initialization) options.
+ */
+ bool slow;
+ /*
+ * Return size.
+ */
+ bool usize;
+};
+
+JEMALLOC_ALWAYS_INLINE void
+static_opts_init(static_opts_t *static_opts) {
+ static_opts->may_overflow = false;
+ static_opts->bump_empty_aligned_alloc = false;
+ static_opts->assert_nonempty_alloc = false;
+ static_opts->null_out_result_on_error = false;
+ static_opts->set_errno_on_error = false;
+ static_opts->min_alignment = 0;
+ static_opts->oom_string = "";
+ static_opts->invalid_alignment_string = "";
+ static_opts->slow = false;
+ static_opts->usize = false;
}
/*
- * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
- * should have one constant here per magic value there. Note however that the
- * representations need not be related.
+ * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
+ * should have one constant here per magic value there. Note however that the
+ * representations need not be related.
*/
-#define TCACHE_IND_NONE ((unsigned)-1)
-#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
-#define ARENA_IND_AUTOMATIC ((unsigned)-1)
-
-typedef struct dynamic_opts_s dynamic_opts_t;
-struct dynamic_opts_s {
- void **result;
- size_t usize;
- size_t num_items;
- size_t item_size;
- size_t alignment;
- bool zero;
- unsigned tcache_ind;
- unsigned arena_ind;
-};
-
-JEMALLOC_ALWAYS_INLINE void
-dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
- dynamic_opts->result = NULL;
- dynamic_opts->usize = 0;
- dynamic_opts->num_items = 0;
- dynamic_opts->item_size = 0;
- dynamic_opts->alignment = 0;
- dynamic_opts->zero = false;
- dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
- dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
-}
-
-/* ind is ignored if dopts->alignment > 0. */
-JEMALLOC_ALWAYS_INLINE void *
-imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
- size_t size, size_t usize, szind_t ind) {
- tcache_t *tcache;
- arena_t *arena;
-
- /* Fill in the tcache. */
- if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
- if (likely(!sopts->slow)) {
- /* Getting tcache ptr unconditionally. */
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- tcache = tcache_get(tsd);
- }
- } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, dopts->tcache_ind);
- }
-
- /* Fill in the arena. */
- if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
- /*
- * In case of automatic arena management, we defer arena
- * computation until as late as we can, hoping to fill the
- * allocation out of the tcache.
- */
- arena = NULL;
- } else {
- arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
- }
-
- if (unlikely(dopts->alignment != 0)) {
- return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
- dopts->zero, tcache, arena);
- }
-
- return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
- arena, sopts->slow);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
- size_t usize, szind_t ind) {
- void *ret;
-
- /*
- * For small allocations, sampling bumps the usize. If so, we allocate
- * from the ind_large bucket.
- */
- szind_t ind_large;
- size_t bumped_usize = usize;
-
- if (usize <= SC_SMALL_MAXCLASS) {
- assert(((dopts->alignment == 0) ?
- sz_s2u(SC_LARGE_MINCLASS) :
- sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
- == SC_LARGE_MINCLASS);
- ind_large = sz_size2index(SC_LARGE_MINCLASS);
- bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
- ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
- bumped_usize, ind_large);
- if (unlikely(ret == NULL)) {
- return NULL;
- }
- arena_prof_promote(tsd_tsdn(tsd), ret, usize);
- } else {
- ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
- }
-
- return ret;
+#define TCACHE_IND_NONE ((unsigned)-1)
+#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
+#define ARENA_IND_AUTOMATIC ((unsigned)-1)
+
+typedef struct dynamic_opts_s dynamic_opts_t;
+struct dynamic_opts_s {
+ void **result;
+ size_t usize;
+ size_t num_items;
+ size_t item_size;
+ size_t alignment;
+ bool zero;
+ unsigned tcache_ind;
+ unsigned arena_ind;
+};
+
+JEMALLOC_ALWAYS_INLINE void
+dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
+ dynamic_opts->result = NULL;
+ dynamic_opts->usize = 0;
+ dynamic_opts->num_items = 0;
+ dynamic_opts->item_size = 0;
+ dynamic_opts->alignment = 0;
+ dynamic_opts->zero = false;
+ dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
+ dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
+}
+
+/* ind is ignored if dopts->alignment > 0. */
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t size, size_t usize, szind_t ind) {
+ tcache_t *tcache;
+ arena_t *arena;
+
+ /* Fill in the tcache. */
+ if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
+ if (likely(!sopts->slow)) {
+ /* Getting tcache ptr unconditionally. */
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ tcache = tcache_get(tsd);
+ }
+ } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, dopts->tcache_ind);
+ }
+
+ /* Fill in the arena. */
+ if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
+ /*
+ * In case of automatic arena management, we defer arena
+ * computation until as late as we can, hoping to fill the
+ * allocation out of the tcache.
+ */
+ arena = NULL;
+ } else {
+ arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
+ }
+
+ if (unlikely(dopts->alignment != 0)) {
+ return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
+ dopts->zero, tcache, arena);
+ }
+
+ return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
+ arena, sopts->slow);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t usize, szind_t ind) {
+ void *ret;
+
+ /*
+ * For small allocations, sampling bumps the usize. If so, we allocate
+ * from the ind_large bucket.
+ */
+ szind_t ind_large;
+ size_t bumped_usize = usize;
+
+ if (usize <= SC_SMALL_MAXCLASS) {
+ assert(((dopts->alignment == 0) ?
+ sz_s2u(SC_LARGE_MINCLASS) :
+ sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
+ == SC_LARGE_MINCLASS);
+ ind_large = sz_size2index(SC_LARGE_MINCLASS);
+ bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
+ ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
+ bumped_usize, ind_large);
+ if (unlikely(ret == NULL)) {
+ return NULL;
+ }
+ arena_prof_promote(tsd_tsdn(tsd), ret, usize);
+ } else {
+ ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
+ }
+
+ return ret;
}
/*
- * Returns true if the allocation will overflow, and false otherwise. Sets
- * *size to the product either way.
+ * Returns true if the allocation will overflow, and false otherwise. Sets
+ * *size to the product either way.
*/
-JEMALLOC_ALWAYS_INLINE bool
-compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
- size_t *size) {
- /*
- * This function is just num_items * item_size, except that we may have
- * to check for overflow.
- */
-
- if (!may_overflow) {
- assert(dopts->num_items == 1);
- *size = dopts->item_size;
- return false;
- }
-
- /* A size_t with its high-half bits all set to 1. */
- static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
-
- *size = dopts->item_size * dopts->num_items;
-
- if (unlikely(*size == 0)) {
- return (dopts->num_items != 0 && dopts->item_size != 0);
- }
-
- /*
- * We got a non-zero size, but we don't know if we overflowed to get
- * there. To avoid having to do a divide, we'll be clever and note that
- * if both A and B can be represented in N/2 bits, then their product
- * can be represented in N bits (without the possibility of overflow).
- */
- if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
- return false;
- }
- if (likely(*size / dopts->item_size == dopts->num_items)) {
- return false;
- }
- return true;
-}
-
-JEMALLOC_ALWAYS_INLINE int
-imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
- /* Where the actual allocated memory will live. */
- void *allocation = NULL;
- /* Filled in by compute_size_with_overflow below. */
- size_t size = 0;
- /*
- * For unaligned allocations, we need only ind. For aligned
- * allocations, or in case of stats or profiling we need usize.
- *
- * These are actually dead stores, in that their values are reset before
- * any branch on their value is taken. Sometimes though, it's
- * convenient to pass them as arguments before this point. To avoid
- * undefined behavior then, we initialize them with dummy stores.
- */
- szind_t ind = 0;
- size_t usize = 0;
-
- /* Reentrancy is only checked on slow path. */
- int8_t reentrancy_level;
-
- /* Compute the amount of memory the user wants. */
- if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
- &size))) {
+JEMALLOC_ALWAYS_INLINE bool
+compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
+ size_t *size) {
+ /*
+ * This function is just num_items * item_size, except that we may have
+ * to check for overflow.
+ */
+
+ if (!may_overflow) {
+ assert(dopts->num_items == 1);
+ *size = dopts->item_size;
+ return false;
+ }
+
+ /* A size_t with its high-half bits all set to 1. */
+ static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
+
+ *size = dopts->item_size * dopts->num_items;
+
+ if (unlikely(*size == 0)) {
+ return (dopts->num_items != 0 && dopts->item_size != 0);
+ }
+
+ /*
+ * We got a non-zero size, but we don't know if we overflowed to get
+ * there. To avoid having to do a divide, we'll be clever and note that
+ * if both A and B can be represented in N/2 bits, then their product
+ * can be represented in N bits (without the possibility of overflow).
+ */
+ if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
+ return false;
+ }
+ if (likely(*size / dopts->item_size == dopts->num_items)) {
+ return false;
+ }
+ return true;
+}
+
+JEMALLOC_ALWAYS_INLINE int
+imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
+ /* Where the actual allocated memory will live. */
+ void *allocation = NULL;
+ /* Filled in by compute_size_with_overflow below. */
+ size_t size = 0;
+ /*
+ * For unaligned allocations, we need only ind. For aligned
+ * allocations, or in case of stats or profiling we need usize.
+ *
+ * These are actually dead stores, in that their values are reset before
+ * any branch on their value is taken. Sometimes though, it's
+ * convenient to pass them as arguments before this point. To avoid
+ * undefined behavior then, we initialize them with dummy stores.
+ */
+ szind_t ind = 0;
+ size_t usize = 0;
+
+ /* Reentrancy is only checked on slow path. */
+ int8_t reentrancy_level;
+
+ /* Compute the amount of memory the user wants. */
+ if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
+ &size))) {
goto label_oom;
- }
-
- if (unlikely(dopts->alignment < sopts->min_alignment
- || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
- goto label_invalid_alignment;
- }
-
- /* This is the beginning of the "core" algorithm. */
-
- if (dopts->alignment == 0) {
- ind = sz_size2index(size);
- if (unlikely(ind >= SC_NSIZES)) {
- goto label_oom;
- }
- if (config_stats || (config_prof && opt_prof) || sopts->usize) {
- usize = sz_index2size(ind);
- dopts->usize = usize;
- assert(usize > 0 && usize
- <= SC_LARGE_MAXCLASS);
- }
+ }
+
+ if (unlikely(dopts->alignment < sopts->min_alignment
+ || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
+ goto label_invalid_alignment;
+ }
+
+ /* This is the beginning of the "core" algorithm. */
+
+ if (dopts->alignment == 0) {
+ ind = sz_size2index(size);
+ if (unlikely(ind >= SC_NSIZES)) {
+ goto label_oom;
+ }
+ if (config_stats || (config_prof && opt_prof) || sopts->usize) {
+ usize = sz_index2size(ind);
+ dopts->usize = usize;
+ assert(usize > 0 && usize
+ <= SC_LARGE_MAXCLASS);
+ }
} else {
- if (sopts->bump_empty_aligned_alloc) {
- if (unlikely(size == 0)) {
- size = 1;
+ if (sopts->bump_empty_aligned_alloc) {
+ if (unlikely(size == 0)) {
+ size = 1;
}
}
- usize = sz_sa2u(size, dopts->alignment);
- dopts->usize = usize;
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- goto label_oom;
- }
- }
- /* Validate the user input. */
- if (sopts->assert_nonempty_alloc) {
- assert (size != 0);
- }
-
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- /*
- * If we need to handle reentrancy, we can do it out of a
- * known-initialized arena (i.e. arena 0).
- */
- reentrancy_level = tsd_reentrancy_level_get(tsd);
- if (sopts->slow && unlikely(reentrancy_level > 0)) {
- /*
- * We should never specify particular arenas or tcaches from
- * within our internal allocations.
- */
- assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
- dopts->tcache_ind == TCACHE_IND_NONE);
- assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
- dopts->tcache_ind = TCACHE_IND_NONE;
- /* We know that arena 0 has already been initialized. */
- dopts->arena_ind = 0;
- }
-
- /* If profiling is on, get our profiling context. */
- if (config_prof && opt_prof) {
- /*
- * Note that if we're going down this path, usize must have been
- * initialized in the previous if statement.
- */
- prof_tctx_t *tctx = prof_alloc_prep(
- tsd, usize, prof_active_get_unlocked(), true);
-
- alloc_ctx_t alloc_ctx;
- if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
- alloc_ctx.slab = (usize
- <= SC_SMALL_MAXCLASS);
- allocation = imalloc_no_sample(
- sopts, dopts, tsd, usize, usize, ind);
- } else if ((uintptr_t)tctx > (uintptr_t)1U) {
- /*
- * Note that ind might still be 0 here. This is fine;
- * imalloc_sample ignores ind if dopts->alignment > 0.
- */
- allocation = imalloc_sample(
- sopts, dopts, tsd, usize, ind);
- alloc_ctx.slab = false;
- } else {
- allocation = NULL;
- }
-
- if (unlikely(allocation == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
+ usize = sz_sa2u(size, dopts->alignment);
+ dopts->usize = usize;
+ if (unlikely(usize == 0
+ || usize > SC_LARGE_MAXCLASS)) {
goto label_oom;
}
- prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
- } else {
- /*
- * If dopts->alignment > 0, then ind is still 0, but usize was
- * computed in the previous if statement. Down the positive
- * alignment path, imalloc_no_sample ignores ind and size
- * (relying only on usize).
- */
- allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
- ind);
- if (unlikely(allocation == NULL)) {
+ }
+ /* Validate the user input. */
+ if (sopts->assert_nonempty_alloc) {
+ assert (size != 0);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ /*
+ * If we need to handle reentrancy, we can do it out of a
+ * known-initialized arena (i.e. arena 0).
+ */
+ reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (sopts->slow && unlikely(reentrancy_level > 0)) {
+ /*
+ * We should never specify particular arenas or tcaches from
+ * within our internal allocations.
+ */
+ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
+ dopts->tcache_ind == TCACHE_IND_NONE);
+ assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
+ dopts->tcache_ind = TCACHE_IND_NONE;
+ /* We know that arena 0 has already been initialized. */
+ dopts->arena_ind = 0;
+ }
+
+ /* If profiling is on, get our profiling context. */
+ if (config_prof && opt_prof) {
+ /*
+ * Note that if we're going down this path, usize must have been
+ * initialized in the previous if statement.
+ */
+ prof_tctx_t *tctx = prof_alloc_prep(
+ tsd, usize, prof_active_get_unlocked(), true);
+
+ alloc_ctx_t alloc_ctx;
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ alloc_ctx.slab = (usize
+ <= SC_SMALL_MAXCLASS);
+ allocation = imalloc_no_sample(
+ sopts, dopts, tsd, usize, usize, ind);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ /*
+ * Note that ind might still be 0 here. This is fine;
+ * imalloc_sample ignores ind if dopts->alignment > 0.
+ */
+ allocation = imalloc_sample(
+ sopts, dopts, tsd, usize, ind);
+ alloc_ctx.slab = false;
+ } else {
+ allocation = NULL;
+ }
+
+ if (unlikely(allocation == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
goto label_oom;
- }
- }
-
- /*
- * Allocation has been done at this point. We still have some
- * post-allocation work to do though.
- */
- assert(dopts->alignment == 0
- || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
-
- if (config_stats) {
- assert(usize == isalloc(tsd_tsdn(tsd), allocation));
- *tsd_thread_allocatedp_get(tsd) += usize;
- }
-
- if (sopts->slow) {
- UTRACE(0, size, allocation);
- }
-
- /* Success! */
- check_entry_exit_locking(tsd_tsdn(tsd));
- *dopts->result = allocation;
- return 0;
-
+ }
+ prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
+ } else {
+ /*
+ * If dopts->alignment > 0, then ind is still 0, but usize was
+ * computed in the previous if statement. Down the positive
+ * alignment path, imalloc_no_sample ignores ind and size
+ * (relying only on usize).
+ */
+ allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
+ ind);
+ if (unlikely(allocation == NULL)) {
+ goto label_oom;
+ }
+ }
+
+ /*
+ * Allocation has been done at this point. We still have some
+ * post-allocation work to do though.
+ */
+ assert(dopts->alignment == 0
+ || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
+
+ if (config_stats) {
+ assert(usize == isalloc(tsd_tsdn(tsd), allocation));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+
+ if (sopts->slow) {
+ UTRACE(0, size, allocation);
+ }
+
+ /* Success! */
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ *dopts->result = allocation;
+ return 0;
+
label_oom:
- if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write(sopts->oom_string);
+ if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->oom_string);
abort();
}
- if (sopts->slow) {
- UTRACE(NULL, size, NULL);
- }
-
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- if (sopts->set_errno_on_error) {
- set_errno(ENOMEM);
- }
-
- if (sopts->null_out_result_on_error) {
- *dopts->result = NULL;
- }
-
- return ENOMEM;
-
- /*
- * This label is only jumped to by one goto; we move it out of line
- * anyways to avoid obscuring the non-error paths, and for symmetry with
- * the oom case.
- */
-label_invalid_alignment:
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write(sopts->invalid_alignment_string);
- abort();
- }
-
- if (sopts->set_errno_on_error) {
- set_errno(EINVAL);
- }
-
- if (sopts->slow) {
- UTRACE(NULL, size, NULL);
- }
-
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- if (sopts->null_out_result_on_error) {
- *dopts->result = NULL;
- }
-
- return EINVAL;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
- if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write(sopts->oom_string);
- abort();
- }
- UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
- set_errno(ENOMEM);
- *dopts->result = NULL;
-
- return false;
- }
-
- return true;
-}
-
-/* Returns the errno-style error code of the allocation. */
-JEMALLOC_ALWAYS_INLINE int
-imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
- if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
- return ENOMEM;
- }
-
- /* We always need the tsd. Let's grab it right away. */
- tsd_t *tsd = tsd_fetch();
- assert(tsd);
- if (likely(tsd_fast(tsd))) {
- /* Fast and common path. */
- tsd_assert_fast(tsd);
- sopts->slow = false;
- return imalloc_body(sopts, dopts, tsd);
- } else {
- if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
- return ENOMEM;
- }
-
- sopts->slow = true;
- return imalloc_body(sopts, dopts, tsd);
- }
-}
-
-JEMALLOC_NOINLINE
+ if (sopts->slow) {
+ UTRACE(NULL, size, NULL);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (sopts->set_errno_on_error) {
+ set_errno(ENOMEM);
+ }
+
+ if (sopts->null_out_result_on_error) {
+ *dopts->result = NULL;
+ }
+
+ return ENOMEM;
+
+ /*
+ * This label is only jumped to by one goto; we move it out of line
+ * anyways to avoid obscuring the non-error paths, and for symmetry with
+ * the oom case.
+ */
+label_invalid_alignment:
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->invalid_alignment_string);
+ abort();
+ }
+
+ if (sopts->set_errno_on_error) {
+ set_errno(EINVAL);
+ }
+
+ if (sopts->slow) {
+ UTRACE(NULL, size, NULL);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (sopts->null_out_result_on_error) {
+ *dopts->result = NULL;
+ }
+
+ return EINVAL;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
+ if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->oom_string);
+ abort();
+ }
+ UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
+ set_errno(ENOMEM);
+ *dopts->result = NULL;
+
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns the errno-style error code of the allocation. */
+JEMALLOC_ALWAYS_INLINE int
+imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
+ if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
+ return ENOMEM;
+ }
+
+ /* We always need the tsd. Let's grab it right away. */
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd);
+ if (likely(tsd_fast(tsd))) {
+ /* Fast and common path. */
+ tsd_assert_fast(tsd);
+ sopts->slow = false;
+ return imalloc_body(sopts, dopts, tsd);
+ } else {
+ if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
+ return ENOMEM;
+ }
+
+ sopts->slow = true;
+ return imalloc_body(sopts, dopts, tsd);
+ }
+}
+
+JEMALLOC_NOINLINE
void *
-malloc_default(size_t size) {
+malloc_default(size_t size) {
void *ret;
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.malloc.entry", "size: %zu", size);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.null_out_result_on_error = true;
- sopts.set_errno_on_error = true;
- sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
-
- imalloc(&sopts, &dopts);
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.malloc.entry", "size: %zu", size);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+
+ imalloc(&sopts, &dopts);
/*
- * Note that this branch gets optimized away -- it immediately follows
- * the check on tsd_fast that sets sopts.slow.
+ * Note that this branch gets optimized away -- it immediately follows
+ * the check on tsd_fast that sets sopts.slow.
*/
- if (sopts.slow) {
- uintptr_t args[3] = {size};
- hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
- }
-
- LOG("core.malloc.exit", "result: %p", ret);
-
- return ret;
-}
-
-/******************************************************************************/
-/*
- * Begin malloc(3)-compatible functions.
- */
-
-#if defined(JEMALLOC_ZONE)
-extern void je_assure_zone_register();
-#endif
-
-/*
- * malloc() fastpath.
- *
- * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
- * tcache. If either of these is false, we tail-call to the slowpath,
- * malloc_default(). Tail-calling is used to avoid any caller-saved
- * registers.
- *
- * fastpath supports ticker and profiling, both of which will also
- * tail-call to the slowpath if they fire.
- */
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_malloc(size_t size) {
-#if defined(JEMALLOC_ZONE)
- je_assure_zone_register();
-#endif
-
- LOG("core.malloc.entry", "size: %zu", size);
-
- if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
- return malloc_default(size);
- }
-
- tsd_t *tsd = tsd_get(false);
- if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) {
- return malloc_default(size);
- }
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
-
- if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
- return malloc_default(size);
- }
-
- szind_t ind = sz_size2index_lookup(size);
- size_t usize;
- if (config_stats || config_prof) {
- usize = sz_index2size(ind);
- }
- /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */
- assert(ind < SC_NBINS);
- assert(size <= SC_SMALL_MAXCLASS);
-
- if (config_prof) {
- int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
- bytes_until_sample -= usize;
- tsd_bytes_until_sample_set(tsd, bytes_until_sample);
-
- if (unlikely(bytes_until_sample < 0)) {
- /*
- * Avoid a prof_active check on the fastpath.
- * If prof_active is false, set bytes_until_sample to
- * a large value. If prof_active is set to true,
- * bytes_until_sample will be reset.
- */
- if (!prof_active) {
- tsd_bytes_until_sample_set(tsd, SSIZE_MAX);
- }
- return malloc_default(size);
- }
- }
-
- cache_bin_t *bin = tcache_small_bin_get(tcache, ind);
- bool tcache_success;
- void* ret = cache_bin_alloc_easy(bin, &tcache_success);
-
- if (tcache_success) {
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- bin->tstats.nrequests++;
- }
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
-
- LOG("core.malloc.exit", "result: %p", ret);
-
- /* Fastpath success */
- return ret;
- }
-
- return malloc_default(size);
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-JEMALLOC_ATTR(nonnull(1))
-je_posix_memalign(void **memptr, size_t alignment, size_t size) {
- int ret;
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
- "size: %zu", memptr, alignment, size);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.bump_empty_aligned_alloc = true;
- sopts.min_alignment = sizeof(void *);
- sopts.oom_string =
- "<jemalloc>: Error allocating aligned memory: out of memory\n";
- sopts.invalid_alignment_string =
- "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-
- dopts.result = memptr;
- dopts.num_items = 1;
- dopts.item_size = size;
- dopts.alignment = alignment;
-
- ret = imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
- (uintptr_t)size};
- hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
- (uintptr_t)ret, args);
- }
-
- LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
- *memptr);
-
- return ret;
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
-je_aligned_alloc(size_t alignment, size_t size) {
- void *ret;
-
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
- alignment, size);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.bump_empty_aligned_alloc = true;
- sopts.null_out_result_on_error = true;
- sopts.set_errno_on_error = true;
- sopts.min_alignment = 1;
- sopts.oom_string =
- "<jemalloc>: Error allocating aligned memory: out of memory\n";
- sopts.invalid_alignment_string =
- "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
- dopts.alignment = alignment;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
- hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
- (uintptr_t)ret, args);
- }
-
- LOG("core.aligned_alloc.exit", "result: %p", ret);
-
- return ret;
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
-je_calloc(size_t num, size_t size) {
- void *ret;
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.may_overflow = true;
- sopts.null_out_result_on_error = true;
- sopts.set_errno_on_error = true;
- sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
-
- dopts.result = &ret;
- dopts.num_items = num;
- dopts.item_size = size;
- dopts.zero = true;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
- hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
- }
-
- LOG("core.calloc.exit", "result: %p", ret);
-
- return ret;
-}
-
+ if (sopts.slow) {
+ uintptr_t args[3] = {size};
+ hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
+ }
+
+ LOG("core.malloc.exit", "result: %p", ret);
+
+ return ret;
+}
+
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+#if defined(JEMALLOC_ZONE)
+extern void je_assure_zone_register();
+#endif
+
+/*
+ * malloc() fastpath.
+ *
+ * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
+ * tcache. If either of these is false, we tail-call to the slowpath,
+ * malloc_default(). Tail-calling is used to avoid any caller-saved
+ * registers.
+ *
+ * fastpath supports ticker and profiling, both of which will also
+ * tail-call to the slowpath if they fire.
+ */
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_malloc(size_t size) {
+#if defined(JEMALLOC_ZONE)
+ je_assure_zone_register();
+#endif
+
+ LOG("core.malloc.entry", "size: %zu", size);
+
+ if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
+ return malloc_default(size);
+ }
+
+ tsd_t *tsd = tsd_get(false);
+ if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) {
+ return malloc_default(size);
+ }
+
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+
+ if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
+ return malloc_default(size);
+ }
+
+ szind_t ind = sz_size2index_lookup(size);
+ size_t usize;
+ if (config_stats || config_prof) {
+ usize = sz_index2size(ind);
+ }
+ /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */
+ assert(ind < SC_NBINS);
+ assert(size <= SC_SMALL_MAXCLASS);
+
+ if (config_prof) {
+ int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
+ bytes_until_sample -= usize;
+ tsd_bytes_until_sample_set(tsd, bytes_until_sample);
+
+ if (unlikely(bytes_until_sample < 0)) {
+ /*
+ * Avoid a prof_active check on the fastpath.
+ * If prof_active is false, set bytes_until_sample to
+ * a large value. If prof_active is set to true,
+ * bytes_until_sample will be reset.
+ */
+ if (!prof_active) {
+ tsd_bytes_until_sample_set(tsd, SSIZE_MAX);
+ }
+ return malloc_default(size);
+ }
+ }
+
+ cache_bin_t *bin = tcache_small_bin_get(tcache, ind);
+ bool tcache_success;
+ void* ret = cache_bin_alloc_easy(bin, &tcache_success);
+
+ if (tcache_success) {
+ if (config_stats) {
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ bin->tstats.nrequests++;
+ }
+ if (config_prof) {
+ tcache->prof_accumbytes += usize;
+ }
+
+ LOG("core.malloc.exit", "result: %p", ret);
+
+ /* Fastpath success */
+ return ret;
+ }
+
+ return malloc_default(size);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
+je_posix_memalign(void **memptr, size_t alignment, size_t size) {
+ int ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
+ "size: %zu", memptr, alignment, size);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_aligned_alloc = true;
+ sopts.min_alignment = sizeof(void *);
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = memptr;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ ret = imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
+ (uintptr_t)size};
+ hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
+ (uintptr_t)ret, args);
+ }
+
+ LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
+ *memptr);
+
+ return ret;
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
+je_aligned_alloc(size_t alignment, size_t size) {
+ void *ret;
+
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
+ alignment, size);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_aligned_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.min_alignment = 1;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
+ hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
+ (uintptr_t)ret, args);
+ }
+
+ LOG("core.aligned_alloc.exit", "result: %p", ret);
+
+ return ret;
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
+je_calloc(size_t num, size_t size) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.may_overflow = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = num;
+ dopts.item_size = size;
+ dopts.zero = true;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
+ hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
+ }
+
+ LOG("core.calloc.exit", "result: %p", ret);
+
+ return ret;
+}
+
static void *
-irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
+irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
void *p;
- if (tctx == NULL) {
- return NULL;
- }
- if (usize <= SC_SMALL_MAXCLASS) {
- p = iralloc(tsd, old_ptr, old_usize,
- SC_LARGE_MINCLASS, 0, false, hook_args);
- if (p == NULL) {
- return NULL;
- }
- arena_prof_promote(tsd_tsdn(tsd), p, usize);
- } else {
- p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
- hook_args);
- }
-
- return p;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
+ if (tctx == NULL) {
+ return NULL;
+ }
+ if (usize <= SC_SMALL_MAXCLASS) {
+ p = iralloc(tsd, old_ptr, old_usize,
+ SC_LARGE_MINCLASS, 0, false, hook_args);
+ if (p == NULL) {
+ return NULL;
+ }
+ arena_prof_promote(tsd_tsdn(tsd), p, usize);
+ } else {
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
+ hook_args);
+ }
+
+ return p;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
void *p;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
- tctx = prof_alloc_prep(tsd, usize, prof_active, true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx,
- hook_args);
- } else {
- p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
- hook_args);
- }
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return NULL;
- }
- prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
- old_tctx);
-
- return p;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
- if (!slow_path) {
- tsd_assert_fast(tsd);
- }
- check_entry_exit_locking(tsd_tsdn(tsd));
- if (tsd_reentrancy_level_get(tsd) != 0) {
- assert(slow_path);
- }
-
- assert(ptr != NULL);
- assert(malloc_initialized() || IS_INITIALIZER);
-
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
-
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
+ tctx = prof_alloc_prep(tsd, usize, prof_active, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx,
+ hook_args);
+ } else {
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
+ hook_args);
+ }
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return NULL;
+ }
+ prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
+ old_tctx);
+
+ return p;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
+ if (!slow_path) {
+ tsd_assert_fast(tsd);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
+ assert(slow_path);
+ }
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != SC_NSIZES);
+
size_t usize;
- if (config_prof && opt_prof) {
- usize = sz_index2size(alloc_ctx.szind);
- prof_free(tsd, ptr, usize, &alloc_ctx);
- } else if (config_stats) {
- usize = sz_index2size(alloc_ctx.szind);
- }
- if (config_stats) {
- *tsd_thread_deallocatedp_get(tsd) += usize;
- }
-
- if (likely(!slow_path)) {
- idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
- false);
- } else {
- idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
- true);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
- if (!slow_path) {
- tsd_assert_fast(tsd);
- }
- check_entry_exit_locking(tsd_tsdn(tsd));
- if (tsd_reentrancy_level_get(tsd) != 0) {
- assert(slow_path);
- }
-
+ if (config_prof && opt_prof) {
+ usize = sz_index2size(alloc_ctx.szind);
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ } else if (config_stats) {
+ usize = sz_index2size(alloc_ctx.szind);
+ }
+ if (config_stats) {
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ }
+
+ if (likely(!slow_path)) {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
+ false);
+ } else {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
+ true);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
+ if (!slow_path) {
+ tsd_assert_fast(tsd);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
+ assert(slow_path);
+ }
+
assert(ptr != NULL);
- assert(malloc_initialized() || IS_INITIALIZER);
-
- alloc_ctx_t alloc_ctx, *ctx;
- if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
- /*
- * When cache_oblivious is disabled and ptr is not page aligned,
- * the allocation was not sampled -- usize can be used to
- * determine szind directly.
- */
- alloc_ctx.szind = sz_size2index(usize);
- alloc_ctx.slab = true;
- ctx = &alloc_ctx;
- if (config_debug) {
- alloc_ctx_t dbg_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
- &dbg_ctx.slab);
- assert(dbg_ctx.szind == alloc_ctx.szind);
- assert(dbg_ctx.slab == alloc_ctx.slab);
- }
- } else if (config_prof && opt_prof) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind == sz_size2index(usize));
- ctx = &alloc_ctx;
- } else {
- ctx = NULL;
- }
-
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ alloc_ctx_t alloc_ctx, *ctx;
+ if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
+ /*
+ * When cache_oblivious is disabled and ptr is not page aligned,
+ * the allocation was not sampled -- usize can be used to
+ * determine szind directly.
+ */
+ alloc_ctx.szind = sz_size2index(usize);
+ alloc_ctx.slab = true;
+ ctx = &alloc_ctx;
+ if (config_debug) {
+ alloc_ctx_t dbg_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
+ &dbg_ctx.slab);
+ assert(dbg_ctx.szind == alloc_ctx.szind);
+ assert(dbg_ctx.slab == alloc_ctx.slab);
+ }
+ } else if (config_prof && opt_prof) {
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind == sz_size2index(usize));
+ ctx = &alloc_ctx;
+ } else {
+ ctx = NULL;
+ }
+
if (config_prof && opt_prof) {
- prof_free(tsd, ptr, usize, ctx);
- }
- if (config_stats) {
- *tsd_thread_deallocatedp_get(tsd) += usize;
- }
-
- if (likely(!slow_path)) {
- isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
- } else {
- isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
- }
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_realloc(void *ptr, size_t arg_size) {
+ prof_free(tsd, ptr, usize, ctx);
+ }
+ if (config_stats) {
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ }
+
+ if (likely(!slow_path)) {
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
+ } else {
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
+ }
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_realloc(void *ptr, size_t arg_size) {
void *ret;
- tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
+ tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
- size_t size = arg_size;
-
- LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
+ size_t size = arg_size;
- if (unlikely(size == 0)) {
+ LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
+
+ if (unlikely(size == 0)) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
- tcache_t *tcache;
- tsd_t *tsd = tsd_fetch();
- if (tsd_reentrancy_level_get(tsd) == 0) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
-
- uintptr_t args[3] = {(uintptr_t)ptr, size};
- hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
-
- ifree(tsd, ptr, tcache, true);
-
- LOG("core.realloc.exit", "result: %p", NULL);
- return NULL;
+ tcache_t *tcache;
+ tsd_t *tsd = tsd_fetch();
+ if (tsd_reentrancy_level_get(tsd) == 0) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+
+ uintptr_t args[3] = {(uintptr_t)ptr, size};
+ hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
+
+ ifree(tsd, ptr, tcache, true);
+
+ LOG("core.realloc.exit", "result: %p", NULL);
+ return NULL;
}
size = 1;
}
- if (likely(ptr != NULL)) {
- assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
+ if (likely(ptr != NULL)) {
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd_t *tsd = tsd_fetch();
- check_entry_exit_locking(tsd_tsdn(tsd));
+ check_entry_exit_locking(tsd_tsdn(tsd));
-
- hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr,
- (uintptr_t)arg_size, 0, 0}};
-
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
- old_usize = sz_index2size(alloc_ctx.szind);
- assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
+
+ hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr,
+ (uintptr_t)arg_size, 0, 0}};
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != SC_NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
- usize = sz_s2u(size);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- ret = NULL;
- } else {
- ret = irealloc_prof(tsd, ptr, old_usize, usize,
- &alloc_ctx, &hook_args);
- }
+ usize = sz_s2u(size);
+ if (unlikely(usize == 0
+ || usize > SC_LARGE_MAXCLASS)) {
+ ret = NULL;
+ } else {
+ ret = irealloc_prof(tsd, ptr, old_usize, usize,
+ &alloc_ctx, &hook_args);
+ }
} else {
- if (config_stats) {
- usize = sz_s2u(size);
- }
- ret = iralloc(tsd, ptr, old_usize, size, 0, false,
- &hook_args);
+ if (config_stats) {
+ usize = sz_s2u(size);
+ }
+ ret = iralloc(tsd, ptr, old_usize, size, 0, false,
+ &hook_args);
}
- tsdn = tsd_tsdn(tsd);
+ tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.null_out_result_on_error = true;
- sopts.set_errno_on_error = true;
- sopts.oom_string =
- "<jemalloc>: Error in realloc(): out of memory\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {(uintptr_t)ptr, arg_size};
- hook_invoke_alloc(hook_alloc_realloc, ret,
- (uintptr_t)ret, args);
- }
-
- return ret;
- }
-
- if (unlikely(ret == NULL)) {
- if (config_xmalloc && unlikely(opt_xmalloc)) {
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string =
+ "<jemalloc>: Error in realloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {(uintptr_t)ptr, arg_size};
+ hook_invoke_alloc(hook_alloc_realloc, ret,
+ (uintptr_t)ret, args);
+ }
+
+ return ret;
+ }
+
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && likely(ret != NULL)) {
- tsd_t *tsd;
-
- assert(usize == isalloc(tsdn, ret));
- tsd = tsdn_tsd(tsdn);
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ if (config_stats && likely(ret != NULL)) {
+ tsd_t *tsd;
+
+ assert(usize == isalloc(tsdn, ret));
+ tsd = tsdn_tsd(tsdn);
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- check_entry_exit_locking(tsdn);
-
- LOG("core.realloc.exit", "result: %p", ret);
- return ret;
+ check_entry_exit_locking(tsdn);
+
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
}
-JEMALLOC_NOINLINE
+JEMALLOC_NOINLINE
void
-free_default(void *ptr) {
- UTRACE(ptr, 0, 0);
- if (likely(ptr != NULL)) {
- /*
- * We avoid setting up tsd fully (e.g. tcache, arena binding)
- * based on only free() calls -- other activities trigger the
- * minimal to full transition. This is because free() may
- * happen during thread shutdown after tls deallocation: if a
- * thread never had any malloc activities until then, a
- * fully-setup tsd won't be destructed properly.
- */
- tsd_t *tsd = tsd_fetch_min();
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- tcache_t *tcache;
- if (likely(tsd_fast(tsd))) {
- tsd_assert_fast(tsd);
- /* Unconditionally get tcache ptr on fast path. */
- tcache = tsd_tcachep_get(tsd);
- ifree(tsd, ptr, tcache, false);
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- uintptr_t args_raw[3] = {(uintptr_t)ptr};
- hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
- ifree(tsd, ptr, tcache, true);
- }
- check_entry_exit_locking(tsd_tsdn(tsd));
- }
-}
-
-JEMALLOC_ALWAYS_INLINE
-bool free_fastpath(void *ptr, size_t size, bool size_hint) {
- tsd_t *tsd = tsd_get(false);
- if (unlikely(!tsd || !tsd_fast(tsd))) {
- return false;
- }
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
-
- alloc_ctx_t alloc_ctx;
- /*
- * If !config_cache_oblivious, we can check PAGE alignment to
- * detect sampled objects. Otherwise addresses are
- * randomized, and we have to look it up in the rtree anyway.
- * See also isfree().
- */
- if (!size_hint || config_cache_oblivious) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree,
- rtree_ctx, (uintptr_t)ptr,
- &alloc_ctx.szind, &alloc_ctx.slab);
-
- /* Note: profiled objects will have alloc_ctx.slab set */
- if (!res || !alloc_ctx.slab) {
- return false;
- }
- assert(alloc_ctx.szind != SC_NSIZES);
- } else {
- /*
- * Check for both sizes that are too large, and for sampled objects.
- * Sampled objects are always page-aligned. The sampled object check
- * will also check for null ptr.
- */
- if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) {
- return false;
- }
- alloc_ctx.szind = sz_size2index_lookup(size);
- }
-
- if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
- return false;
- }
-
- cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
- cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind];
- if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) {
- return false;
- }
-
- if (config_stats) {
- size_t usize = sz_index2size(alloc_ctx.szind);
- *tsd_thread_deallocatedp_get(tsd) += usize;
- }
-
- return true;
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_free(void *ptr) {
- LOG("core.free.entry", "ptr: %p", ptr);
-
- if (!free_fastpath(ptr, 0, false)) {
- free_default(ptr);
- }
-
- LOG("core.free.exit", "");
-}
-
+free_default(void *ptr) {
+ UTRACE(ptr, 0, 0);
+ if (likely(ptr != NULL)) {
+ /*
+ * We avoid setting up tsd fully (e.g. tcache, arena binding)
+ * based on only free() calls -- other activities trigger the
+ * minimal to full transition. This is because free() may
+ * happen during thread shutdown after tls deallocation: if a
+ * thread never had any malloc activities until then, a
+ * fully-setup tsd won't be destructed properly.
+ */
+ tsd_t *tsd = tsd_fetch_min();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (likely(tsd_fast(tsd))) {
+ tsd_assert_fast(tsd);
+ /* Unconditionally get tcache ptr on fast path. */
+ tcache = tsd_tcachep_get(tsd);
+ ifree(tsd, ptr, tcache, false);
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ uintptr_t args_raw[3] = {(uintptr_t)ptr};
+ hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
+ ifree(tsd, ptr, tcache, true);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE
+bool free_fastpath(void *ptr, size_t size, bool size_hint) {
+ tsd_t *tsd = tsd_get(false);
+ if (unlikely(!tsd || !tsd_fast(tsd))) {
+ return false;
+ }
+
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+
+ alloc_ctx_t alloc_ctx;
+ /*
+ * If !config_cache_oblivious, we can check PAGE alignment to
+ * detect sampled objects. Otherwise addresses are
+ * randomized, and we have to look it up in the rtree anyway.
+ * See also isfree().
+ */
+ if (!size_hint || config_cache_oblivious) {
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr,
+ &alloc_ctx.szind, &alloc_ctx.slab);
+
+ /* Note: profiled objects will have alloc_ctx.slab set */
+ if (!res || !alloc_ctx.slab) {
+ return false;
+ }
+ assert(alloc_ctx.szind != SC_NSIZES);
+ } else {
+ /*
+ * Check for both sizes that are too large, and for sampled objects.
+ * Sampled objects are always page-aligned. The sampled object check
+ * will also check for null ptr.
+ */
+ if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) {
+ return false;
+ }
+ alloc_ctx.szind = sz_size2index_lookup(size);
+ }
+
+ if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
+ return false;
+ }
+
+ cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
+ cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind];
+ if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) {
+ return false;
+ }
+
+ if (config_stats) {
+ size_t usize = sz_index2size(alloc_ctx.szind);
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ }
+
+ return true;
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_free(void *ptr) {
+ LOG("core.free.entry", "ptr: %p", ptr);
+
+ if (!free_fastpath(ptr, 0, false)) {
+ free_default(ptr);
+ }
+
+ LOG("core.free.exit", "");
+}
+
/*
* End malloc(3)-compatible functions.
*/
@@ -2887,83 +2887,83 @@ je_free(void *ptr) {
*/
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc)
-je_memalign(size_t alignment, size_t size) {
- void *ret;
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
- size);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.min_alignment = 1;
- sopts.oom_string =
- "<jemalloc>: Error allocating aligned memory: out of memory\n";
- sopts.invalid_alignment_string =
- "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
- sopts.null_out_result_on_error = true;
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
- dopts.alignment = alignment;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {alignment, size};
- hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
- args);
- }
-
- LOG("core.memalign.exit", "result: %p", ret);
- return ret;
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_memalign(size_t alignment, size_t size) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
+ size);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.min_alignment = 1;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+ sopts.null_out_result_on_error = true;
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {alignment, size};
+ hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
+ args);
+ }
+
+ LOG("core.memalign.exit", "result: %p", ret);
+ return ret;
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc)
-je_valloc(size_t size) {
- void *ret;
-
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.valloc.entry", "size: %zu\n", size);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.null_out_result_on_error = true;
- sopts.min_alignment = PAGE;
- sopts.oom_string =
- "<jemalloc>: Error allocating aligned memory: out of memory\n";
- sopts.invalid_alignment_string =
- "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
- dopts.alignment = PAGE;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {size};
- hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
- }
-
- LOG("core.valloc.exit", "result: %p\n", ret);
- return ret;
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_valloc(size_t size) {
+ void *ret;
+
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.valloc.entry", "size: %zu\n", size);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.null_out_result_on_error = true;
+ sopts.min_alignment = PAGE;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = PAGE;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {size};
+ hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
+ }
+
+ LOG("core.valloc.exit", "result: %p\n", ret);
+ return ret;
}
#endif
-#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
+#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
@@ -2975,47 +2975,47 @@ je_valloc(size_t size) {
*/
#include <features.h> // defines __GLIBC__ if we are compiling against glibc
-JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
-# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
-JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
+JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
-# endif
-
+# endif
+
# ifdef __GLIBC__
-/*
- * To enable static linking with glibc, the libc specific malloc interface must
- * be implemented also, so none of glibc's malloc.o functions are added to the
- * link.
- */
-# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
-/* To force macro expansion of je_ prefix before stringification. */
-# define PREALIAS(je_fn) ALIAS(je_fn)
-# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
-void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
-# endif
-# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
-void __libc_free(void* ptr) PREALIAS(je_free);
-# endif
-# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
-void *__libc_malloc(size_t size) PREALIAS(je_malloc);
-# endif
-# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
-void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
-# endif
-# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
-void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
-# endif
-# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
-void *__libc_valloc(size_t size) PREALIAS(je_valloc);
-# endif
-# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
-int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
-# endif
-# undef PREALIAS
-# undef ALIAS
-# endif
+/*
+ * To enable static linking with glibc, the libc specific malloc interface must
+ * be implemented also, so none of glibc's malloc.o functions are added to the
+ * link.
+ */
+# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
+/* To force macro expansion of je_ prefix before stringification. */
+# define PREALIAS(je_fn) ALIAS(je_fn)
+# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
+void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
+void __libc_free(void* ptr) PREALIAS(je_free);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
+void *__libc_malloc(size_t size) PREALIAS(je_malloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
+void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
+void *__libc_valloc(size_t size) PREALIAS(je_valloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
+int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
+# endif
+# undef PREALIAS
+# undef ALIAS
+# endif
#endif
/*
@@ -3026,184 +3026,184 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
* Begin non-standard functions.
*/
-#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
-
-#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
-#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \
- JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
-
-typedef struct {
- void *ptr;
- size_t size;
-} smallocx_return_t;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-smallocx_return_t JEMALLOC_NOTHROW
-/*
- * The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
- * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
- */
-JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
- (size_t size, int flags) {
- /*
- * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
- * used here because it makes writing beyond the `size`
- * of the `ptr` undefined behavior, but the objective
- * of this function is to allow writing beyond `size`
- * up to `smallocx_return_t::size`.
- */
- smallocx_return_t ret;
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.assert_nonempty_alloc = true;
- sopts.null_out_result_on_error = true;
- sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
- sopts.usize = true;
-
- dopts.result = &ret.ptr;
- dopts.num_items = 1;
- dopts.item_size = size;
- if (unlikely(flags != 0)) {
- if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
- dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- }
-
- dopts.zero = MALLOCX_ZERO_GET(flags);
-
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK)
- == MALLOCX_TCACHE_NONE) {
- dopts.tcache_ind = TCACHE_IND_NONE;
- } else {
- dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
- }
- } else {
- dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
- }
-
- if ((flags & MALLOCX_ARENA_MASK) != 0)
- dopts.arena_ind = MALLOCX_ARENA_GET(flags);
- }
-
- imalloc(&sopts, &dopts);
- assert(dopts.usize == je_nallocx(size, flags));
- ret.size = dopts.usize;
-
- LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
- return ret;
-}
-#undef JEMALLOC_SMALLOCX_CONCAT_HELPER
-#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
-#endif
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_mallocx(size_t size, int flags) {
- void *ret;
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.assert_nonempty_alloc = true;
- sopts.null_out_result_on_error = true;
- sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
- if (unlikely(flags != 0)) {
- if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
- dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- }
-
- dopts.zero = MALLOCX_ZERO_GET(flags);
-
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK)
- == MALLOCX_TCACHE_NONE) {
- dopts.tcache_ind = TCACHE_IND_NONE;
- } else {
- dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
- }
- } else {
- dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
- }
-
- if ((flags & MALLOCX_ARENA_MASK) != 0)
- dopts.arena_ind = MALLOCX_ARENA_GET(flags);
- }
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {size, flags};
- hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
- args);
- }
-
- LOG("core.mallocx.exit", "result: %p", ret);
- return ret;
+#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
+
+#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
+#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \
+ JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
+
+typedef struct {
+ void *ptr;
+ size_t size;
+} smallocx_return_t;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+smallocx_return_t JEMALLOC_NOTHROW
+/*
+ * The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
+ * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
+ */
+JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
+ (size_t size, int flags) {
+ /*
+ * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
+ * used here because it makes writing beyond the `size`
+ * of the `ptr` undefined behavior, but the objective
+ * of this function is to allow writing beyond `size`
+ * up to `smallocx_return_t::size`.
+ */
+ smallocx_return_t ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.assert_nonempty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
+ sopts.usize = true;
+
+ dopts.result = &ret.ptr;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ if (unlikely(flags != 0)) {
+ if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
+ dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ }
+
+ dopts.zero = MALLOCX_ZERO_GET(flags);
+
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK)
+ == MALLOCX_TCACHE_NONE) {
+ dopts.tcache_ind = TCACHE_IND_NONE;
+ } else {
+ dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
+ }
+ } else {
+ dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
+ }
+
+ if ((flags & MALLOCX_ARENA_MASK) != 0)
+ dopts.arena_ind = MALLOCX_ARENA_GET(flags);
+ }
+
+ imalloc(&sopts, &dopts);
+ assert(dopts.usize == je_nallocx(size, flags));
+ ret.size = dopts.usize;
+
+ LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
+ return ret;
+}
+#undef JEMALLOC_SMALLOCX_CONCAT_HELPER
+#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
+#endif
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_mallocx(size_t size, int flags) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.assert_nonempty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ if (unlikely(flags != 0)) {
+ if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
+ dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ }
+
+ dopts.zero = MALLOCX_ZERO_GET(flags);
+
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK)
+ == MALLOCX_TCACHE_NONE) {
+ dopts.tcache_ind = TCACHE_IND_NONE;
+ } else {
+ dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
+ }
+ } else {
+ dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
+ }
+
+ if ((flags & MALLOCX_ARENA_MASK) != 0)
+ dopts.arena_ind = MALLOCX_ARENA_GET(flags);
+ }
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {size, flags};
+ hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
+ args);
+ }
+
+ LOG("core.mallocx.exit", "result: %p", ret);
+ return ret;
}
static void *
-irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
- size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
- prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
+irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+ prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
void *p;
- if (tctx == NULL) {
- return NULL;
- }
- if (usize <= SC_SMALL_MAXCLASS) {
- p = iralloct(tsdn, old_ptr, old_usize,
- SC_LARGE_MINCLASS, alignment, zero, tcache,
- arena, hook_args);
- if (p == NULL) {
- return NULL;
- }
- arena_prof_promote(tsdn, p, usize);
+ if (tctx == NULL) {
+ return NULL;
+ }
+ if (usize <= SC_SMALL_MAXCLASS) {
+ p = iralloct(tsdn, old_ptr, old_usize,
+ SC_LARGE_MINCLASS, alignment, zero, tcache,
+ arena, hook_args);
+ if (p == NULL) {
+ return NULL;
+ }
+ arena_prof_promote(tsdn, p, usize);
} else {
- p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
- tcache, arena, hook_args);
+ p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
+ tcache, arena, hook_args);
}
- return p;
+ return p;
}
-JEMALLOC_ALWAYS_INLINE void *
-irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
- size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
- arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
+JEMALLOC_ALWAYS_INLINE void *
+irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
+ size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+ arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
void *p;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
- tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
- *usize, alignment, zero, tcache, arena, tctx, hook_args);
- } else {
- p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
- zero, tcache, arena, hook_args);
- }
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, false);
- return NULL;
- }
-
- if (p == old_ptr && alignment != 0) {
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
+ *usize, alignment, zero, tcache, arena, tctx, hook_args);
+ } else {
+ p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
+ zero, tcache, arena, hook_args);
+ }
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return NULL;
+ }
+
+ if (p == old_ptr && alignment != 0) {
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
@@ -3212,552 +3212,552 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
- *usize = isalloc(tsd_tsdn(tsd), p);
+ *usize = isalloc(tsd_tsdn(tsd), p);
}
- prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
- old_usize, old_tctx);
+ prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
+ old_usize, old_tctx);
- return p;
+ return p;
}
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_rallocx(void *ptr, size_t size, int flags) {
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_rallocx(void *ptr, size_t size, int flags) {
void *p;
- tsd_t *tsd;
- size_t usize;
- size_t old_usize;
- size_t alignment = MALLOCX_ALIGN_GET(flags);
+ tsd_t *tsd;
+ size_t usize;
+ size_t old_usize;
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
arena_t *arena;
- tcache_t *tcache;
-
- LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
- size, flags);
-
+ tcache_t *tcache;
+ LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
+ size, flags);
+
+
assert(ptr != NULL);
assert(size != 0);
- assert(malloc_initialized() || IS_INITIALIZER);
- tsd = tsd_fetch();
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
- unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
- if (unlikely(arena == NULL)) {
- goto label_oom;
- }
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(arena == NULL)) {
+ goto label_oom;
+ }
} else {
arena = NULL;
}
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- tcache = tcache_get(tsd);
- }
-
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
- old_usize = sz_index2size(alloc_ctx.szind);
- assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
-
- hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags,
- 0}};
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ }
+ } else {
+ tcache = tcache_get(tsd);
+ }
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != SC_NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
+
+ hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags,
+ 0}};
if (config_prof && opt_prof) {
- usize = (alignment == 0) ?
- sz_s2u(size) : sz_sa2u(size, alignment);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- goto label_oom;
- }
- p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
- zero, tcache, arena, &alloc_ctx, &hook_args);
- if (unlikely(p == NULL)) {
+ usize = (alignment == 0) ?
+ sz_s2u(size) : sz_sa2u(size, alignment);
+ if (unlikely(usize == 0
+ || usize > SC_LARGE_MAXCLASS)) {
goto label_oom;
- }
+ }
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ zero, tcache, arena, &alloc_ctx, &hook_args);
+ if (unlikely(p == NULL)) {
+ goto label_oom;
+ }
} else {
- p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
- zero, tcache, arena, &hook_args);
- if (unlikely(p == NULL)) {
+ p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
+ zero, tcache, arena, &hook_args);
+ if (unlikely(p == NULL)) {
goto label_oom;
- }
- if (config_stats) {
- usize = isalloc(tsd_tsdn(tsd), p);
- }
+ }
+ if (config_stats) {
+ usize = isalloc(tsd_tsdn(tsd), p);
+ }
}
- assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- LOG("core.rallocx.exit", "result: %p", p);
- return p;
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ LOG("core.rallocx.exit", "result: %p", p);
+ return p;
label_oom:
- if (config_xmalloc && unlikely(opt_xmalloc)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
UTRACE(ptr, size, 0);
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- LOG("core.rallocx.exit", "result: %p", NULL);
- return NULL;
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ LOG("core.rallocx.exit", "result: %p", NULL);
+ return NULL;
}
-JEMALLOC_ALWAYS_INLINE size_t
-ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero) {
- size_t newsize;
+JEMALLOC_ALWAYS_INLINE size_t
+ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero) {
+ size_t newsize;
- if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
- &newsize)) {
- return old_usize;
- }
+ if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
+ &newsize)) {
+ return old_usize;
+ }
- return newsize;
+ return newsize;
}
static size_t
-ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
+ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
size_t usize;
- if (tctx == NULL) {
- return old_usize;
- }
- usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
- zero);
-
- return usize;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
- size_t usize_max, usize;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
- /*
- * usize isn't knowable before ixalloc() returns when extra is non-zero.
- * Therefore, compute its maximum possible value and use that in
- * prof_alloc_prep() to decide whether to capture a backtrace.
- * prof_realloc() will use the actual usize to decide whether to sample.
- */
- if (alignment == 0) {
- usize_max = sz_s2u(size+extra);
- assert(usize_max > 0
- && usize_max <= SC_LARGE_MAXCLASS);
- } else {
- usize_max = sz_sa2u(size+extra, alignment);
- if (unlikely(usize_max == 0
- || usize_max > SC_LARGE_MAXCLASS)) {
- /*
- * usize_max is out of range, and chances are that
- * allocation will fail, but use the maximum possible
- * value and carry on with prof_alloc_prep(), just in
- * case allocation succeeds.
- */
- usize_max = SC_LARGE_MAXCLASS;
- }
- }
- tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
-
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
- size, extra, alignment, zero, tctx);
+ if (tctx == NULL) {
+ return old_usize;
+ }
+ usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
+ zero);
+
+ return usize;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
+ size_t usize_max, usize;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
+ /*
+ * usize isn't knowable before ixalloc() returns when extra is non-zero.
+ * Therefore, compute its maximum possible value and use that in
+ * prof_alloc_prep() to decide whether to capture a backtrace.
+ * prof_realloc() will use the actual usize to decide whether to sample.
+ */
+ if (alignment == 0) {
+ usize_max = sz_s2u(size+extra);
+ assert(usize_max > 0
+ && usize_max <= SC_LARGE_MAXCLASS);
} else {
- usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
- extra, alignment, zero);
- }
- if (usize == old_usize) {
- prof_alloc_rollback(tsd, tctx, false);
- return usize;
- }
- prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
- old_tctx);
-
- return usize;
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
- tsd_t *tsd;
+ usize_max = sz_sa2u(size+extra, alignment);
+ if (unlikely(usize_max == 0
+ || usize_max > SC_LARGE_MAXCLASS)) {
+ /*
+ * usize_max is out of range, and chances are that
+ * allocation will fail, but use the maximum possible
+ * value and carry on with prof_alloc_prep(), just in
+ * case allocation succeeds.
+ */
+ usize_max = SC_LARGE_MAXCLASS;
+ }
+ }
+ tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
+ size, extra, alignment, zero, tctx);
+ } else {
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
+ }
+ if (usize == old_usize) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return usize;
+ }
+ prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
+ old_tctx);
+
+ return usize;
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
+ tsd_t *tsd;
size_t usize, old_usize;
- size_t alignment = MALLOCX_ALIGN_GET(flags);
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
- "flags: %d", ptr, size, extra, flags);
-
+ LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
+ "flags: %d", ptr, size, extra, flags);
+
assert(ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
- assert(malloc_initialized() || IS_INITIALIZER);
- tsd = tsd_fetch();
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
- old_usize = sz_index2size(alloc_ctx.szind);
- assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
- /*
- * The API explicitly absolves itself of protecting against (size +
- * extra) numerical overflow, but we may need to clamp extra to avoid
- * exceeding SC_LARGE_MAXCLASS.
- *
- * Ordinarily, size limit checking is handled deeper down, but here we
- * have to check as part of (size + extra) clamping, since we need the
- * clamped value in the above helper functions.
- */
- if (unlikely(size > SC_LARGE_MAXCLASS)) {
- usize = old_usize;
- goto label_not_resized;
- }
- if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
- extra = SC_LARGE_MAXCLASS - size;
- }
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != SC_NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
+ /*
+ * The API explicitly absolves itself of protecting against (size +
+ * extra) numerical overflow, but we may need to clamp extra to avoid
+ * exceeding SC_LARGE_MAXCLASS.
+ *
+ * Ordinarily, size limit checking is handled deeper down, but here we
+ * have to check as part of (size + extra) clamping, since we need the
+ * clamped value in the above helper functions.
+ */
+ if (unlikely(size > SC_LARGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
+ if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
+ extra = SC_LARGE_MAXCLASS - size;
+ }
if (config_prof && opt_prof) {
- usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
- alignment, zero, &alloc_ctx);
+ usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
+ alignment, zero, &alloc_ctx);
} else {
- usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
- extra, alignment, zero);
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
}
- if (unlikely(usize == old_usize)) {
+ if (unlikely(usize == old_usize)) {
goto label_not_resized;
- }
+ }
if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
label_not_resized:
- if (unlikely(!tsd_fast(tsd))) {
- uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
- hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
- usize, (uintptr_t)usize, args);
- }
-
+ if (unlikely(!tsd_fast(tsd))) {
+ uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
+ hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
+ usize, (uintptr_t)usize, args);
+ }
+
UTRACE(ptr, size, ptr);
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- LOG("core.xallocx.exit", "result: %zu", usize);
- return usize;
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ LOG("core.xallocx.exit", "result: %zu", usize);
+ return usize;
}
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-JEMALLOC_ATTR(pure)
-je_sallocx(const void *ptr, int flags) {
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_sallocx(const void *ptr, int flags) {
size_t usize;
- tsdn_t *tsdn;
+ tsdn_t *tsdn;
- LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
+ LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
- assert(malloc_initialized() || IS_INITIALIZER);
- assert(ptr != NULL);
-
- tsdn = tsdn_fetch();
- check_entry_exit_locking(tsdn);
-
- if (config_debug || force_ivsalloc) {
- usize = ivsalloc(tsdn, ptr);
- assert(force_ivsalloc || usize != 0);
- } else {
- usize = isalloc(tsdn, ptr);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ assert(ptr != NULL);
+
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
+
+ if (config_debug || force_ivsalloc) {
+ usize = ivsalloc(tsdn, ptr);
+ assert(force_ivsalloc || usize != 0);
+ } else {
+ usize = isalloc(tsdn, ptr);
}
- check_entry_exit_locking(tsdn);
-
- LOG("core.sallocx.exit", "result: %zu", usize);
- return usize;
+ check_entry_exit_locking(tsdn);
+
+ LOG("core.sallocx.exit", "result: %zu", usize);
+ return usize;
}
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_dallocx(void *ptr, int flags) {
- LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_dallocx(void *ptr, int flags) {
+ LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
assert(ptr != NULL);
- assert(malloc_initialized() || IS_INITIALIZER);
-
- tsd_t *tsd = tsd_fetch();
- bool fast = tsd_fast(tsd);
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- tcache_t *tcache;
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- /* Not allowed to be reentrant and specify a custom tcache. */
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- if (likely(fast)) {
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- }
- }
-
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsd_t *tsd = tsd_fetch();
+ bool fast = tsd_fast(tsd);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ }
+ } else {
+ if (likely(fast)) {
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ }
+ }
+
UTRACE(ptr, 0, 0);
- if (likely(fast)) {
- tsd_assert_fast(tsd);
- ifree(tsd, ptr, tcache, false);
- } else {
- uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
- hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
- ifree(tsd, ptr, tcache, true);
- }
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- LOG("core.dallocx.exit", "");
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-inallocx(tsdn_t *tsdn, size_t size, int flags) {
- check_entry_exit_locking(tsdn);
-
+ if (likely(fast)) {
+ tsd_assert_fast(tsd);
+ ifree(tsd, ptr, tcache, false);
+ } else {
+ uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
+ hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
+ ifree(tsd, ptr, tcache, true);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ LOG("core.dallocx.exit", "");
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+inallocx(tsdn_t *tsdn, size_t size, int flags) {
+ check_entry_exit_locking(tsdn);
+
size_t usize;
- if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
- usize = sz_s2u(size);
- } else {
- usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
- }
- check_entry_exit_locking(tsdn);
- return usize;
-}
-
-JEMALLOC_NOINLINE void
-sdallocx_default(void *ptr, size_t size, int flags) {
- assert(ptr != NULL);
- assert(malloc_initialized() || IS_INITIALIZER);
-
- tsd_t *tsd = tsd_fetch();
- bool fast = tsd_fast(tsd);
- size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- tcache_t *tcache;
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- /* Not allowed to be reentrant and specify a custom tcache. */
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- if (likely(fast)) {
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- }
- }
-
- UTRACE(ptr, 0, 0);
- if (likely(fast)) {
- tsd_assert_fast(tsd);
- isfree(tsd, ptr, usize, tcache, false);
- } else {
- uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
- hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
- isfree(tsd, ptr, usize, tcache, true);
- }
- check_entry_exit_locking(tsd_tsdn(tsd));
-
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_sdallocx(void *ptr, size_t size, int flags) {
- LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
- size, flags);
-
- if (flags !=0 || !free_fastpath(ptr, size, true)) {
- sdallocx_default(ptr, size, flags);
- }
-
- LOG("core.sdallocx.exit", "");
-}
-
-void JEMALLOC_NOTHROW
-je_sdallocx_noflags(void *ptr, size_t size) {
- LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
- size);
-
- if (!free_fastpath(ptr, size, true)) {
- sdallocx_default(ptr, size, 0);
- }
-
- LOG("core.sdallocx.exit", "");
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-JEMALLOC_ATTR(pure)
-je_nallocx(size_t size, int flags) {
- size_t usize;
- tsdn_t *tsdn;
-
- assert(size != 0);
-
- if (unlikely(malloc_init())) {
- LOG("core.nallocx.exit", "result: %zu", ZU(0));
- return 0;
- }
-
- tsdn = tsdn_fetch();
- check_entry_exit_locking(tsdn);
-
- usize = inallocx(tsdn, size, flags);
- if (unlikely(usize > SC_LARGE_MAXCLASS)) {
- LOG("core.nallocx.exit", "result: %zu", ZU(0));
- return 0;
- }
-
- check_entry_exit_locking(tsdn);
- LOG("core.nallocx.exit", "result: %zu", usize);
- return usize;
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen) {
- int ret;
- tsd_t *tsd;
-
- LOG("core.mallctl.entry", "name: %s", name);
-
- if (unlikely(malloc_init())) {
- LOG("core.mallctl.exit", "result: %d", EAGAIN);
- return EAGAIN;
- }
-
- tsd = tsd_fetch();
- check_entry_exit_locking(tsd_tsdn(tsd));
- ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- LOG("core.mallctl.exit", "result: %d", ret);
- return ret;
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
- int ret;
-
- LOG("core.mallctlnametomib.entry", "name: %s", name);
-
- if (unlikely(malloc_init())) {
- LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
- return EAGAIN;
- }
-
- tsd_t *tsd = tsd_fetch();
- check_entry_exit_locking(tsd_tsdn(tsd));
- ret = ctl_nametomib(tsd, name, mibp, miblenp);
- check_entry_exit_locking(tsd_tsdn(tsd));
-
- LOG("core.mallctlnametomib.exit", "result: %d", ret);
- return ret;
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
+ usize = sz_s2u(size);
+ } else {
+ usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ }
+ check_entry_exit_locking(tsdn);
+ return usize;
+}
+
+JEMALLOC_NOINLINE void
+sdallocx_default(void *ptr, size_t size, int flags) {
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsd_t *tsd = tsd_fetch();
+ bool fast = tsd_fast(tsd);
+ size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ }
+ } else {
+ if (likely(fast)) {
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ }
+ }
+
+ UTRACE(ptr, 0, 0);
+ if (likely(fast)) {
+ tsd_assert_fast(tsd);
+ isfree(tsd, ptr, usize, tcache, false);
+ } else {
+ uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
+ hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
+ isfree(tsd, ptr, usize, tcache, true);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_sdallocx(void *ptr, size_t size, int flags) {
+ LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
+ size, flags);
+
+ if (flags !=0 || !free_fastpath(ptr, size, true)) {
+ sdallocx_default(ptr, size, flags);
+ }
+
+ LOG("core.sdallocx.exit", "");
+}
+
+void JEMALLOC_NOTHROW
+je_sdallocx_noflags(void *ptr, size_t size) {
+ LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
+ size);
+
+ if (!free_fastpath(ptr, size, true)) {
+ sdallocx_default(ptr, size, 0);
+ }
+
+ LOG("core.sdallocx.exit", "");
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_nallocx(size_t size, int flags) {
+ size_t usize;
+ tsdn_t *tsdn;
+
+ assert(size != 0);
+
+ if (unlikely(malloc_init())) {
+ LOG("core.nallocx.exit", "result: %zu", ZU(0));
+ return 0;
+ }
+
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
+
+ usize = inallocx(tsdn, size, flags);
+ if (unlikely(usize > SC_LARGE_MAXCLASS)) {
+ LOG("core.nallocx.exit", "result: %zu", ZU(0));
+ return 0;
+ }
+
+ check_entry_exit_locking(tsdn);
+ LOG("core.nallocx.exit", "result: %zu", usize);
+ return usize;
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ tsd_t *tsd;
+
+ LOG("core.mallctl.entry", "name: %s", name);
+
+ if (unlikely(malloc_init())) {
+ LOG("core.mallctl.exit", "result: %d", EAGAIN);
+ return EAGAIN;
+ }
+
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ LOG("core.mallctl.exit", "result: %d", ret);
+ return ret;
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
+ int ret;
+
+ LOG("core.mallctlnametomib.entry", "name: %s", name);
+
+ if (unlikely(malloc_init())) {
+ LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
+ return EAGAIN;
+ }
+
+ tsd_t *tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ ret = ctl_nametomib(tsd, name, mibp, miblenp);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ LOG("core.mallctlnametomib.exit", "result: %d", ret);
+ return ret;
}
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen) {
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen) {
int ret;
- tsd_t *tsd;
-
- LOG("core.mallctlbymib.entry", "");
-
- if (unlikely(malloc_init())) {
- LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
- return EAGAIN;
- }
-
- tsd = tsd_fetch();
- check_entry_exit_locking(tsd_tsdn(tsd));
- ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- check_entry_exit_locking(tsd_tsdn(tsd));
- LOG("core.mallctlbymib.exit", "result: %d", ret);
- return ret;
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts) {
- tsdn_t *tsdn;
-
- LOG("core.malloc_stats_print.entry", "");
-
- tsdn = tsdn_fetch();
- check_entry_exit_locking(tsdn);
- stats_print(write_cb, cbopaque, opts);
- check_entry_exit_locking(tsdn);
- LOG("core.malloc_stats_print.exit", "");
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
- size_t ret;
- tsdn_t *tsdn;
-
- LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
-
- assert(malloc_initialized() || IS_INITIALIZER);
-
- tsdn = tsdn_fetch();
- check_entry_exit_locking(tsdn);
-
- if (unlikely(ptr == NULL)) {
- ret = 0;
- } else {
- if (config_debug || force_ivsalloc) {
- ret = ivsalloc(tsdn, ptr);
- assert(force_ivsalloc || ret != 0);
- } else {
- ret = isalloc(tsdn, ptr);
- }
- }
-
- check_entry_exit_locking(tsdn);
- LOG("core.malloc_usable_size.exit", "result: %zu", ret);
- return ret;
+ tsd_t *tsd;
+
+ LOG("core.mallctlbymib.entry", "");
+
+ if (unlikely(malloc_init())) {
+ LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
+ return EAGAIN;
+ }
+
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ LOG("core.mallctlbymib.exit", "result: %d", ret);
+ return ret;
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts) {
+ tsdn_t *tsdn;
+
+ LOG("core.malloc_stats_print.entry", "");
+
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
+ stats_print(write_cb, cbopaque, opts);
+ check_entry_exit_locking(tsdn);
+ LOG("core.malloc_stats_print.exit", "");
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
+ size_t ret;
+ tsdn_t *tsdn;
+
+ LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
+
+ if (unlikely(ptr == NULL)) {
+ ret = 0;
+ } else {
+ if (config_debug || force_ivsalloc) {
+ ret = ivsalloc(tsdn, ptr);
+ assert(force_ivsalloc || ret != 0);
+ } else {
+ ret = isalloc(tsdn, ptr);
+ }
+ }
+
+ check_entry_exit_locking(tsdn);
+ LOG("core.malloc_usable_size.exit", "result: %zu", ret);
+ return ret;
}
/*
- * End non-standard functions.
+ * End non-standard functions.
*/
/******************************************************************************/
/*
@@ -3774,17 +3774,17 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
- * constructor is a partial solution to this problem. It may still be possible
- * to trigger the deadlock described above, but doing so would involve forking
- * via a library constructor that runs before jemalloc's runs.
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
*/
-#ifndef JEMALLOC_JET
+#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
-jemalloc_constructor(void) {
+jemalloc_constructor(void) {
malloc_init();
}
-#endif
+#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
@@ -3794,70 +3794,70 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
- tsd_t *tsd;
- unsigned i, j, narenas;
- arena_t *arena;
+ tsd_t *tsd;
+ unsigned i, j, narenas;
+ arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (!malloc_initialized()) {
+ if (!malloc_initialized()) {
return;
- }
+ }
#endif
- assert(malloc_initialized());
-
- tsd = tsd_fetch();
+ assert(malloc_initialized());
- narenas = narenas_total_get();
-
- witness_prefork(tsd_witness_tsdp_get(tsd));
+ tsd = tsd_fetch();
+
+ narenas = narenas_total_get();
+
+ witness_prefork(tsd_witness_tsdp_get(tsd));
/* Acquire all mutexes in a safe order. */
- ctl_prefork(tsd_tsdn(tsd));
- tcache_prefork(tsd_tsdn(tsd));
- malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
- if (have_background_thread) {
- background_thread_prefork0(tsd_tsdn(tsd));
- }
- prof_prefork0(tsd_tsdn(tsd));
- if (have_background_thread) {
- background_thread_prefork1(tsd_tsdn(tsd));
- }
- /* Break arena prefork into stages to preserve lock order. */
- for (i = 0; i < 8; i++) {
- for (j = 0; j < narenas; j++) {
- if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
- NULL) {
- switch (i) {
- case 0:
- arena_prefork0(tsd_tsdn(tsd), arena);
- break;
- case 1:
- arena_prefork1(tsd_tsdn(tsd), arena);
- break;
- case 2:
- arena_prefork2(tsd_tsdn(tsd), arena);
- break;
- case 3:
- arena_prefork3(tsd_tsdn(tsd), arena);
- break;
- case 4:
- arena_prefork4(tsd_tsdn(tsd), arena);
- break;
- case 5:
- arena_prefork5(tsd_tsdn(tsd), arena);
- break;
- case 6:
- arena_prefork6(tsd_tsdn(tsd), arena);
- break;
- case 7:
- arena_prefork7(tsd_tsdn(tsd), arena);
- break;
- default: not_reached();
- }
- }
- }
- }
- prof_prefork1(tsd_tsdn(tsd));
- tsd_prefork(tsd);
+ ctl_prefork(tsd_tsdn(tsd));
+ tcache_prefork(tsd_tsdn(tsd));
+ malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
+ if (have_background_thread) {
+ background_thread_prefork0(tsd_tsdn(tsd));
+ }
+ prof_prefork0(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_prefork1(tsd_tsdn(tsd));
+ }
+ /* Break arena prefork into stages to preserve lock order. */
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < narenas; j++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
+ NULL) {
+ switch (i) {
+ case 0:
+ arena_prefork0(tsd_tsdn(tsd), arena);
+ break;
+ case 1:
+ arena_prefork1(tsd_tsdn(tsd), arena);
+ break;
+ case 2:
+ arena_prefork2(tsd_tsdn(tsd), arena);
+ break;
+ case 3:
+ arena_prefork3(tsd_tsdn(tsd), arena);
+ break;
+ case 4:
+ arena_prefork4(tsd_tsdn(tsd), arena);
+ break;
+ case 5:
+ arena_prefork5(tsd_tsdn(tsd), arena);
+ break;
+ case 6:
+ arena_prefork6(tsd_tsdn(tsd), arena);
+ break;
+ case 7:
+ arena_prefork7(tsd_tsdn(tsd), arena);
+ break;
+ default: not_reached();
+ }
+ }
+ }
+ }
+ prof_prefork1(tsd_tsdn(tsd));
+ tsd_prefork(tsd);
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -3868,65 +3868,65 @@ JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
- tsd_t *tsd;
- unsigned i, narenas;
+ tsd_t *tsd;
+ unsigned i, narenas;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (!malloc_initialized()) {
+ if (!malloc_initialized()) {
return;
- }
+ }
#endif
- assert(malloc_initialized());
+ assert(malloc_initialized());
- tsd = tsd_fetch();
+ tsd = tsd_fetch();
- tsd_postfork_parent(tsd);
+ tsd_postfork_parent(tsd);
- witness_postfork_parent(tsd_witness_tsdp_get(tsd));
- /* Release all mutexes, now that fork() has completed. */
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
+ witness_postfork_parent(tsd_witness_tsdp_get(tsd));
+ /* Release all mutexes, now that fork() has completed. */
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
- arena_postfork_parent(tsd_tsdn(tsd), arena);
- }
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
+ arena_postfork_parent(tsd_tsdn(tsd), arena);
+ }
}
- prof_postfork_parent(tsd_tsdn(tsd));
- if (have_background_thread) {
- background_thread_postfork_parent(tsd_tsdn(tsd));
- }
- malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
- tcache_postfork_parent(tsd_tsdn(tsd));
- ctl_postfork_parent(tsd_tsdn(tsd));
+ prof_postfork_parent(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_postfork_parent(tsd_tsdn(tsd));
+ }
+ malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
+ tcache_postfork_parent(tsd_tsdn(tsd));
+ ctl_postfork_parent(tsd_tsdn(tsd));
}
-void
-jemalloc_postfork_child(void) {
- tsd_t *tsd;
- unsigned i, narenas;
+void
+jemalloc_postfork_child(void) {
+ tsd_t *tsd;
+ unsigned i, narenas;
- assert(malloc_initialized());
+ assert(malloc_initialized());
- tsd = tsd_fetch();
+ tsd = tsd_fetch();
- tsd_postfork_child(tsd);
+ tsd_postfork_child(tsd);
- witness_postfork_child(tsd_witness_tsdp_get(tsd));
- /* Release all mutexes, now that fork() has completed. */
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
+ witness_postfork_child(tsd_witness_tsdp_get(tsd));
+ /* Release all mutexes, now that fork() has completed. */
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
- arena_postfork_child(tsd_tsdn(tsd), arena);
- }
- }
- prof_postfork_child(tsd_tsdn(tsd));
- if (have_background_thread) {
- background_thread_postfork_child(tsd_tsdn(tsd));
- }
- malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
- tcache_postfork_child(tsd_tsdn(tsd));
- ctl_postfork_child(tsd_tsdn(tsd));
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
+ arena_postfork_child(tsd_tsdn(tsd), arena);
+ }
+ }
+ prof_postfork_child(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_postfork_child(tsd_tsdn(tsd));
+ }
+ malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
+ tcache_postfork_child(tsd_tsdn(tsd));
+ ctl_postfork_child(tsd_tsdn(tsd));
}
/******************************************************************************/
diff --git a/contrib/libs/jemalloc/src/jemalloc_cpp.cpp b/contrib/libs/jemalloc/src/jemalloc_cpp.cpp
index da0441a7c9..7469985729 100644
--- a/contrib/libs/jemalloc/src/jemalloc_cpp.cpp
+++ b/contrib/libs/jemalloc/src/jemalloc_cpp.cpp
@@ -1,141 +1,141 @@
-#include <mutex>
-#include <new>
-
-#define JEMALLOC_CPP_CPP_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-// All operators in this file are exported.
-
-// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
-// thunk?
-//
-// extern __typeof (sdallocx) sdallocx_int
-// __attribute ((alias ("sdallocx"),
-// visibility ("hidden")));
-//
-// ... but it needs to work with jemalloc namespaces.
-
-void *operator new(std::size_t size);
-void *operator new[](std::size_t size);
-void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
-void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
-void operator delete(void *ptr) noexcept;
-void operator delete[](void *ptr) noexcept;
-void operator delete(void *ptr, const std::nothrow_t &) noexcept;
-void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
-
-#if __cpp_sized_deallocation >= 201309
-/* C++14's sized-delete operators. */
-void operator delete(void *ptr, std::size_t size) noexcept;
-void operator delete[](void *ptr, std::size_t size) noexcept;
-#endif
-
-JEMALLOC_NOINLINE
-static void *
-handleOOM(std::size_t size, bool nothrow) {
- void *ptr = nullptr;
-
- while (ptr == nullptr) {
- std::new_handler handler;
- // GCC-4.8 and clang 4.0 do not have std::get_new_handler.
- {
- static std::mutex mtx;
- std::lock_guard<std::mutex> lock(mtx);
-
- handler = std::set_new_handler(nullptr);
- std::set_new_handler(handler);
- }
- if (handler == nullptr)
- break;
-
- try {
- handler();
- } catch (const std::bad_alloc &) {
- break;
- }
-
- ptr = je_malloc(size);
- }
-
- if (ptr == nullptr && !nothrow)
- std::__throw_bad_alloc();
- return ptr;
-}
-
-template <bool IsNoExcept>
-JEMALLOC_ALWAYS_INLINE
-void *
-newImpl(std::size_t size) noexcept(IsNoExcept) {
- void *ptr = je_malloc(size);
- if (likely(ptr != nullptr))
- return ptr;
-
- return handleOOM(size, IsNoExcept);
-}
-
-void *
-operator new(std::size_t size) {
- return newImpl<false>(size);
-}
-
-void *
-operator new[](std::size_t size) {
- return newImpl<false>(size);
-}
-
-void *
-operator new(std::size_t size, const std::nothrow_t &) noexcept {
- return newImpl<true>(size);
-}
-
-void *
-operator new[](std::size_t size, const std::nothrow_t &) noexcept {
- return newImpl<true>(size);
-}
-
-void
-operator delete(void *ptr) noexcept {
- je_free(ptr);
-}
-
-void
-operator delete[](void *ptr) noexcept {
- je_free(ptr);
-}
-
-void
-operator delete(void *ptr, const std::nothrow_t &) noexcept {
- je_free(ptr);
-}
-
-void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
- je_free(ptr);
-}
-
-#if __cpp_sized_deallocation >= 201309
-
-void
-operator delete(void *ptr, std::size_t size) noexcept {
- if (unlikely(ptr == nullptr)) {
- return;
- }
- je_sdallocx_noflags(ptr, size);
-}
-
-void operator delete[](void *ptr, std::size_t size) noexcept {
- if (unlikely(ptr == nullptr)) {
- return;
- }
- je_sdallocx_noflags(ptr, size);
-}
-
-#endif // __cpp_sized_deallocation
+#include <mutex>
+#include <new>
+
+#define JEMALLOC_CPP_CPP_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+// All operators in this file are exported.
+
+// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
+// thunk?
+//
+// extern __typeof (sdallocx) sdallocx_int
+// __attribute ((alias ("sdallocx"),
+// visibility ("hidden")));
+//
+// ... but it needs to work with jemalloc namespaces.
+
+void *operator new(std::size_t size);
+void *operator new[](std::size_t size);
+void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
+void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
+void operator delete(void *ptr) noexcept;
+void operator delete[](void *ptr) noexcept;
+void operator delete(void *ptr, const std::nothrow_t &) noexcept;
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
+
+#if __cpp_sized_deallocation >= 201309
+/* C++14's sized-delete operators. */
+void operator delete(void *ptr, std::size_t size) noexcept;
+void operator delete[](void *ptr, std::size_t size) noexcept;
+#endif
+
+JEMALLOC_NOINLINE
+static void *
+handleOOM(std::size_t size, bool nothrow) {
+ void *ptr = nullptr;
+
+ while (ptr == nullptr) {
+ std::new_handler handler;
+ // GCC-4.8 and clang 4.0 do not have std::get_new_handler.
+ {
+ static std::mutex mtx;
+ std::lock_guard<std::mutex> lock(mtx);
+
+ handler = std::set_new_handler(nullptr);
+ std::set_new_handler(handler);
+ }
+ if (handler == nullptr)
+ break;
+
+ try {
+ handler();
+ } catch (const std::bad_alloc &) {
+ break;
+ }
+
+ ptr = je_malloc(size);
+ }
+
+ if (ptr == nullptr && !nothrow)
+ std::__throw_bad_alloc();
+ return ptr;
+}
+
+template <bool IsNoExcept>
+JEMALLOC_ALWAYS_INLINE
+void *
+newImpl(std::size_t size) noexcept(IsNoExcept) {
+ void *ptr = je_malloc(size);
+ if (likely(ptr != nullptr))
+ return ptr;
+
+ return handleOOM(size, IsNoExcept);
+}
+
+void *
+operator new(std::size_t size) {
+ return newImpl<false>(size);
+}
+
+void *
+operator new[](std::size_t size) {
+ return newImpl<false>(size);
+}
+
+void *
+operator new(std::size_t size, const std::nothrow_t &) noexcept {
+ return newImpl<true>(size);
+}
+
+void *
+operator new[](std::size_t size, const std::nothrow_t &) noexcept {
+ return newImpl<true>(size);
+}
+
+void
+operator delete(void *ptr) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void *ptr) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void *ptr, const std::nothrow_t &) noexcept {
+ je_free(ptr);
+}
+
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
+ je_free(ptr);
+}
+
+#if __cpp_sized_deallocation >= 201309
+
+void
+operator delete(void *ptr, std::size_t size) noexcept {
+ if (unlikely(ptr == nullptr)) {
+ return;
+ }
+ je_sdallocx_noflags(ptr, size);
+}
+
+void operator delete[](void *ptr, std::size_t size) noexcept {
+ if (unlikely(ptr == nullptr)) {
+ return;
+ }
+ je_sdallocx_noflags(ptr, size);
+}
+
+#endif // __cpp_sized_deallocation
diff --git a/contrib/libs/jemalloc/src/large.c b/contrib/libs/jemalloc/src/large.c
index 8e7a781d33..ecc144f325 100644
--- a/contrib/libs/jemalloc/src/large.c
+++ b/contrib/libs/jemalloc/src/large.c
@@ -1,395 +1,395 @@
-#define JEMALLOC_LARGE_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/extent_mmap.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/util.h"
-
-/******************************************************************************/
-
-void *
-large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
- assert(usize == sz_s2u(usize));
-
- return large_palloc(tsdn, arena, usize, CACHELINE, zero);
-}
-
-void *
-large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero) {
- size_t ausize;
- extent_t *extent;
- bool is_zeroed;
- UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
-
- assert(!tsdn_null(tsdn) || arena != NULL);
-
- ausize = sz_sa2u(usize, alignment);
- if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
- return NULL;
- }
-
- if (config_fill && unlikely(opt_zero)) {
- zero = true;
- }
- /*
- * Copy zero into is_zeroed and pass the copy when allocating the
- * extent, so that it is possible to make correct junk/zero fill
- * decisions below, even if is_zeroed ends up true when zero is false.
- */
- is_zeroed = zero;
- if (likely(!tsdn_null(tsdn))) {
- arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
- }
- if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
- arena, usize, alignment, &is_zeroed)) == NULL) {
- return NULL;
- }
-
- /* See comments in arena_bin_slabs_full_insert(). */
- if (!arena_is_auto(arena)) {
- /* Insert extent into large. */
- malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_append(&arena->large, extent);
- malloc_mutex_unlock(tsdn, &arena->large_mtx);
- }
- if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
- prof_idump(tsdn);
- }
-
- if (zero) {
- assert(is_zeroed);
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
- extent_usize_get(extent));
- }
-
- arena_decay_tick(tsdn, arena);
- return extent_addr_get(extent);
-}
-
-static void
-large_dalloc_junk_impl(void *ptr, size_t size) {
- memset(ptr, JEMALLOC_FREE_JUNK, size);
-}
-large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
-
-static void
-large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
- if (config_fill && have_dss && unlikely(opt_junk_free)) {
- /*
- * Only bother junk filling if the extent isn't about to be
- * unmapped.
- */
- if (opt_retain || (have_dss && extent_in_dss(ptr))) {
- large_dalloc_junk(ptr, size);
- }
- }
-}
-large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
- large_dalloc_maybe_junk_impl;
-
-static bool
-large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
- arena_t *arena = extent_arena_get(extent);
- size_t oldusize = extent_usize_get(extent);
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
-
- assert(oldusize > usize);
-
- if (extent_hooks->split == NULL) {
- return true;
- }
-
- /* Split excess pages. */
- if (diff != 0) {
- extent_t *trail = extent_split_wrapper(tsdn, arena,
- &extent_hooks, extent, usize + sz_large_pad,
- sz_size2index(usize), false, diff, SC_NSIZES, false);
- if (trail == NULL) {
- return true;
- }
-
- if (config_fill && unlikely(opt_junk_free)) {
- large_dalloc_maybe_junk(extent_addr_get(trail),
- extent_size_get(trail));
- }
-
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
- }
-
- arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
-
- return false;
-}
-
-static bool
-large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
- bool zero) {
- arena_t *arena = extent_arena_get(extent);
- size_t oldusize = extent_usize_get(extent);
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- size_t trailsize = usize - oldusize;
-
- if (extent_hooks->merge == NULL) {
- return true;
- }
-
- if (config_fill && unlikely(opt_zero)) {
- zero = true;
- }
- /*
- * Copy zero into is_zeroed_trail and pass the copy when allocating the
- * extent, so that it is possible to make correct junk/zero fill
- * decisions below, even if is_zeroed_trail ends up true when zero is
- * false.
- */
- bool is_zeroed_trail = zero;
- bool commit = true;
- extent_t *trail;
- bool new_mapping;
- if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
- CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
- || (trail = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
- CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
- if (config_stats) {
- new_mapping = false;
- }
- } else {
- if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
- extent_past_get(extent), trailsize, 0, CACHELINE, false,
- SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
- return true;
- }
- if (config_stats) {
- new_mapping = true;
- }
- }
-
- if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
- extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
- return true;
- }
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(extent), szind, false);
-
- if (config_stats && new_mapping) {
- arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
- }
-
- if (zero) {
- if (config_cache_oblivious) {
- /*
- * Zero the trailing bytes of the original allocation's
- * last page, since they are in an indeterminate state.
- * There will always be trailing bytes, because ptr's
- * offset from the beginning of the extent is a multiple
- * of CACHELINE in [0 .. PAGE).
- */
- void *zbase = (void *)
- ((uintptr_t)extent_addr_get(extent) + oldusize);
- void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
- PAGE));
- size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
- assert(nzero > 0);
- memset(zbase, 0, nzero);
- }
- assert(is_zeroed_trail);
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
- JEMALLOC_ALLOC_JUNK, usize - oldusize);
- }
-
- arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
-
- return false;
-}
-
-bool
-large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
- size_t usize_max, bool zero) {
- size_t oldusize = extent_usize_get(extent);
-
- /* The following should have been caught by callers. */
- assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
- /* Both allocation sizes must be large to avoid a move. */
- assert(oldusize >= SC_LARGE_MINCLASS
- && usize_max >= SC_LARGE_MINCLASS);
-
- if (usize_max > oldusize) {
- /* Attempt to expand the allocation in-place. */
- if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
- zero)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
- return false;
- }
- /* Try again, this time with usize_min. */
- if (usize_min < usize_max && usize_min > oldusize &&
- large_ralloc_no_move_expand(tsdn, extent, usize_min,
- zero)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
- return false;
- }
- }
-
- /*
- * Avoid moving the allocation if the existing extent size accommodates
- * the new size.
- */
- if (oldusize >= usize_min && oldusize <= usize_max) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
- return false;
- }
-
- /* Attempt to shrink the allocation in-place. */
- if (oldusize > usize_max) {
- if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
- return false;
- }
- }
- return true;
-}
-
-static void *
-large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero) {
- if (alignment <= CACHELINE) {
- return large_malloc(tsdn, arena, usize, zero);
- }
- return large_palloc(tsdn, arena, usize, alignment, zero);
-}
-
-void *
-large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache,
- hook_ralloc_args_t *hook_args) {
- extent_t *extent = iealloc(tsdn, ptr);
-
- size_t oldusize = extent_usize_get(extent);
- /* The following should have been caught by callers. */
- assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
- /* Both allocation sizes must be large to avoid a move. */
- assert(oldusize >= SC_LARGE_MINCLASS
- && usize >= SC_LARGE_MINCLASS);
-
- /* Try to avoid moving the allocation. */
- if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
- hook_invoke_expand(hook_args->is_realloc
- ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
- usize, (uintptr_t)ptr, hook_args->args);
- return extent_addr_get(extent);
- }
-
- /*
- * usize and old size are different enough that we need to use a
- * different size class. In that case, fall back to allocating new
- * space and copying.
- */
- void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
- zero);
- if (ret == NULL) {
- return NULL;
- }
-
- hook_invoke_alloc(hook_args->is_realloc
- ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
- hook_args->args);
- hook_invoke_dalloc(hook_args->is_realloc
- ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
-
- size_t copysize = (usize < oldusize) ? usize : oldusize;
- memcpy(ret, extent_addr_get(extent), copysize);
- isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
- return ret;
-}
-
-/*
- * junked_locked indicates whether the extent's data have been junk-filled, and
- * whether the arena's large_mtx is currently held.
- */
-static void
-large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool junked_locked) {
- if (!junked_locked) {
- /* See comments in arena_bin_slabs_full_insert(). */
- if (!arena_is_auto(arena)) {
- malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
- malloc_mutex_unlock(tsdn, &arena->large_mtx);
- }
- large_dalloc_maybe_junk(extent_addr_get(extent),
- extent_usize_get(extent));
- } else {
- /* Only hold the large_mtx if necessary. */
- if (!arena_is_auto(arena)) {
- malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
- }
- }
- arena_extent_dalloc_large_prep(tsdn, arena, extent);
-}
-
-static void
-large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
-}
-
-void
-large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
-}
-
-void
-large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
-}
-
-void
-large_dalloc(tsdn_t *tsdn, extent_t *extent) {
- arena_t *arena = extent_arena_get(extent);
- large_dalloc_prep_impl(tsdn, arena, extent, false);
- large_dalloc_finish_impl(tsdn, arena, extent);
- arena_decay_tick(tsdn, arena);
-}
-
-size_t
-large_salloc(tsdn_t *tsdn, const extent_t *extent) {
- return extent_usize_get(extent);
-}
-
-prof_tctx_t *
-large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
- return extent_prof_tctx_get(extent);
-}
-
-void
-large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
- extent_prof_tctx_set(extent, tctx);
-}
-
-void
-large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
- large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
-}
-
-nstime_t
-large_prof_alloc_time_get(const extent_t *extent) {
- return extent_prof_alloc_time_get(extent);
-}
-
-void
-large_prof_alloc_time_set(extent_t *extent, nstime_t t) {
- extent_prof_alloc_time_set(extent, t);
-}
+#define JEMALLOC_LARGE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/util.h"
+
+/******************************************************************************/
+
+void *
+large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
+ assert(usize == sz_s2u(usize));
+
+ return large_palloc(tsdn, arena, usize, CACHELINE, zero);
+}
+
+void *
+large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero) {
+ size_t ausize;
+ extent_t *extent;
+ bool is_zeroed;
+ UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ ausize = sz_sa2u(usize, alignment);
+ if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
+ return NULL;
+ }
+
+ if (config_fill && unlikely(opt_zero)) {
+ zero = true;
+ }
+ /*
+ * Copy zero into is_zeroed and pass the copy when allocating the
+ * extent, so that it is possible to make correct junk/zero fill
+ * decisions below, even if is_zeroed ends up true when zero is false.
+ */
+ is_zeroed = zero;
+ if (likely(!tsdn_null(tsdn))) {
+ arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
+ }
+ if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
+ arena, usize, alignment, &is_zeroed)) == NULL) {
+ return NULL;
+ }
+
+ /* See comments in arena_bin_slabs_full_insert(). */
+ if (!arena_is_auto(arena)) {
+ /* Insert extent into large. */
+ malloc_mutex_lock(tsdn, &arena->large_mtx);
+ extent_list_append(&arena->large, extent);
+ malloc_mutex_unlock(tsdn, &arena->large_mtx);
+ }
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
+ prof_idump(tsdn);
+ }
+
+ if (zero) {
+ assert(is_zeroed);
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
+ extent_usize_get(extent));
+ }
+
+ arena_decay_tick(tsdn, arena);
+ return extent_addr_get(extent);
+}
+
+static void
+large_dalloc_junk_impl(void *ptr, size_t size) {
+ memset(ptr, JEMALLOC_FREE_JUNK, size);
+}
+large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
+
+static void
+large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
+ if (config_fill && have_dss && unlikely(opt_junk_free)) {
+ /*
+ * Only bother junk filling if the extent isn't about to be
+ * unmapped.
+ */
+ if (opt_retain || (have_dss && extent_in_dss(ptr))) {
+ large_dalloc_junk(ptr, size);
+ }
+ }
+}
+large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
+ large_dalloc_maybe_junk_impl;
+
+static bool
+large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldusize = extent_usize_get(extent);
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
+
+ assert(oldusize > usize);
+
+ if (extent_hooks->split == NULL) {
+ return true;
+ }
+
+ /* Split excess pages. */
+ if (diff != 0) {
+ extent_t *trail = extent_split_wrapper(tsdn, arena,
+ &extent_hooks, extent, usize + sz_large_pad,
+ sz_size2index(usize), false, diff, SC_NSIZES, false);
+ if (trail == NULL) {
+ return true;
+ }
+
+ if (config_fill && unlikely(opt_junk_free)) {
+ large_dalloc_maybe_junk(extent_addr_get(trail),
+ extent_size_get(trail));
+ }
+
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
+ }
+
+ arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
+
+ return false;
+}
+
+static bool
+large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
+ bool zero) {
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldusize = extent_usize_get(extent);
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ size_t trailsize = usize - oldusize;
+
+ if (extent_hooks->merge == NULL) {
+ return true;
+ }
+
+ if (config_fill && unlikely(opt_zero)) {
+ zero = true;
+ }
+ /*
+ * Copy zero into is_zeroed_trail and pass the copy when allocating the
+ * extent, so that it is possible to make correct junk/zero fill
+ * decisions below, even if is_zeroed_trail ends up true when zero is
+ * false.
+ */
+ bool is_zeroed_trail = zero;
+ bool commit = true;
+ extent_t *trail;
+ bool new_mapping;
+ if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
+ CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
+ || (trail = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
+ CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
+ if (config_stats) {
+ new_mapping = false;
+ }
+ } else {
+ if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
+ extent_past_get(extent), trailsize, 0, CACHELINE, false,
+ SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
+ return true;
+ }
+ if (config_stats) {
+ new_mapping = true;
+ }
+ }
+
+ if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
+ extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
+ return true;
+ }
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ szind_t szind = sz_size2index(usize);
+ extent_szind_set(extent, szind);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(extent), szind, false);
+
+ if (config_stats && new_mapping) {
+ arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
+ }
+
+ if (zero) {
+ if (config_cache_oblivious) {
+ /*
+ * Zero the trailing bytes of the original allocation's
+ * last page, since they are in an indeterminate state.
+ * There will always be trailing bytes, because ptr's
+ * offset from the beginning of the extent is a multiple
+ * of CACHELINE in [0 .. PAGE).
+ */
+ void *zbase = (void *)
+ ((uintptr_t)extent_addr_get(extent) + oldusize);
+ void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
+ PAGE));
+ size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
+ assert(nzero > 0);
+ memset(zbase, 0, nzero);
+ }
+ assert(is_zeroed_trail);
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
+ JEMALLOC_ALLOC_JUNK, usize - oldusize);
+ }
+
+ arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
+
+ return false;
+}
+
+bool
+large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero) {
+ size_t oldusize = extent_usize_get(extent);
+
+ /* The following should have been caught by callers. */
+ assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
+ /* Both allocation sizes must be large to avoid a move. */
+ assert(oldusize >= SC_LARGE_MINCLASS
+ && usize_max >= SC_LARGE_MINCLASS);
+
+ if (usize_max > oldusize) {
+ /* Attempt to expand the allocation in-place. */
+ if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
+ zero)) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+ /* Try again, this time with usize_min. */
+ if (usize_min < usize_max && usize_min > oldusize &&
+ large_ralloc_no_move_expand(tsdn, extent, usize_min,
+ zero)) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+ }
+
+ /*
+ * Avoid moving the allocation if the existing extent size accommodates
+ * the new size.
+ */
+ if (oldusize >= usize_min && oldusize <= usize_max) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+
+ /* Attempt to shrink the allocation in-place. */
+ if (oldusize > usize_max) {
+ if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+ }
+ return true;
+}
+
+static void *
+large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero) {
+ if (alignment <= CACHELINE) {
+ return large_malloc(tsdn, arena, usize, zero);
+ }
+ return large_palloc(tsdn, arena, usize, alignment, zero);
+}
+
+void *
+large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache,
+ hook_ralloc_args_t *hook_args) {
+ extent_t *extent = iealloc(tsdn, ptr);
+
+ size_t oldusize = extent_usize_get(extent);
+ /* The following should have been caught by callers. */
+ assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
+ /* Both allocation sizes must be large to avoid a move. */
+ assert(oldusize >= SC_LARGE_MINCLASS
+ && usize >= SC_LARGE_MINCLASS);
+
+ /* Try to avoid moving the allocation. */
+ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
+ hook_invoke_expand(hook_args->is_realloc
+ ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
+ usize, (uintptr_t)ptr, hook_args->args);
+ return extent_addr_get(extent);
+ }
+
+ /*
+ * usize and old size are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
+ zero);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ hook_invoke_alloc(hook_args->is_realloc
+ ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
+ hook_args->args);
+ hook_invoke_dalloc(hook_args->is_realloc
+ ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
+
+ size_t copysize = (usize < oldusize) ? usize : oldusize;
+ memcpy(ret, extent_addr_get(extent), copysize);
+ isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
+ return ret;
+}
+
+/*
+ * junked_locked indicates whether the extent's data have been junk-filled, and
+ * whether the arena's large_mtx is currently held.
+ */
+static void
+large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ bool junked_locked) {
+ if (!junked_locked) {
+ /* See comments in arena_bin_slabs_full_insert(). */
+ if (!arena_is_auto(arena)) {
+ malloc_mutex_lock(tsdn, &arena->large_mtx);
+ extent_list_remove(&arena->large, extent);
+ malloc_mutex_unlock(tsdn, &arena->large_mtx);
+ }
+ large_dalloc_maybe_junk(extent_addr_get(extent),
+ extent_usize_get(extent));
+ } else {
+ /* Only hold the large_mtx if necessary. */
+ if (!arena_is_auto(arena)) {
+ malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
+ extent_list_remove(&arena->large, extent);
+ }
+ }
+ arena_extent_dalloc_large_prep(tsdn, arena, extent);
+}
+
+static void
+large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
+}
+
+void
+large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
+ large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
+}
+
+void
+large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
+ large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
+}
+
+void
+large_dalloc(tsdn_t *tsdn, extent_t *extent) {
+ arena_t *arena = extent_arena_get(extent);
+ large_dalloc_prep_impl(tsdn, arena, extent, false);
+ large_dalloc_finish_impl(tsdn, arena, extent);
+ arena_decay_tick(tsdn, arena);
+}
+
+size_t
+large_salloc(tsdn_t *tsdn, const extent_t *extent) {
+ return extent_usize_get(extent);
+}
+
+prof_tctx_t *
+large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
+ return extent_prof_tctx_get(extent);
+}
+
+void
+large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
+ extent_prof_tctx_set(extent, tctx);
+}
+
+void
+large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
+ large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
+}
+
+nstime_t
+large_prof_alloc_time_get(const extent_t *extent) {
+ return extent_prof_alloc_time_get(extent);
+}
+
+void
+large_prof_alloc_time_set(extent_t *extent, nstime_t t) {
+ extent_prof_alloc_time_set(extent, t);
+}
diff --git a/contrib/libs/jemalloc/src/log.c b/contrib/libs/jemalloc/src/log.c
index 778902fb9b..36262b76fc 100644
--- a/contrib/libs/jemalloc/src/log.c
+++ b/contrib/libs/jemalloc/src/log.c
@@ -1,78 +1,78 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/log.h"
-
-char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
-atomic_b_t log_init_done = ATOMIC_INIT(false);
-
-/*
- * Returns true if we were able to pick out a segment. Fills in r_segment_end
- * with a pointer to the first character after the end of the string.
- */
-static const char *
-log_var_extract_segment(const char* segment_begin) {
- const char *end;
- for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
- }
- return end;
-}
-
-static bool
-log_var_matches_segment(const char *segment_begin, const char *segment_end,
- const char *log_var_begin, const char *log_var_end) {
- assert(segment_begin <= segment_end);
- assert(log_var_begin < log_var_end);
-
- ptrdiff_t segment_len = segment_end - segment_begin;
- ptrdiff_t log_var_len = log_var_end - log_var_begin;
- /* The special '.' segment matches everything. */
- if (segment_len == 1 && *segment_begin == '.') {
- return true;
- }
- if (segment_len == log_var_len) {
- return strncmp(segment_begin, log_var_begin, segment_len) == 0;
- } else if (segment_len < log_var_len) {
- return strncmp(segment_begin, log_var_begin, segment_len) == 0
- && log_var_begin[segment_len] == '.';
- } else {
- return false;
- }
-}
-
-unsigned
-log_var_update_state(log_var_t *log_var) {
- const char *log_var_begin = log_var->name;
- const char *log_var_end = log_var->name + strlen(log_var->name);
-
- /* Pointer to one before the beginning of the current segment. */
- const char *segment_begin = log_var_names;
-
- /*
- * If log_init done is false, we haven't parsed the malloc conf yet. To
- * avoid log-spew, we default to not displaying anything.
- */
- if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) {
- return LOG_INITIALIZED_NOT_ENABLED;
- }
-
- while (true) {
- const char *segment_end = log_var_extract_segment(
- segment_begin);
- assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
- if (log_var_matches_segment(segment_begin, segment_end,
- log_var_begin, log_var_end)) {
- atomic_store_u(&log_var->state, LOG_ENABLED,
- ATOMIC_RELAXED);
- return LOG_ENABLED;
- }
- if (*segment_end == '\0') {
- /* Hit the end of the segment string with no match. */
- atomic_store_u(&log_var->state,
- LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED);
- return LOG_INITIALIZED_NOT_ENABLED;
- }
- /* Otherwise, skip the delimiter and continue. */
- segment_begin = segment_end + 1;
- }
-}
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/log.h"
+
+char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
+atomic_b_t log_init_done = ATOMIC_INIT(false);
+
+/*
+ * Returns true if we were able to pick out a segment. Fills in r_segment_end
+ * with a pointer to the first character after the end of the string.
+ */
+static const char *
+log_var_extract_segment(const char* segment_begin) {
+ const char *end;
+ for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
+ }
+ return end;
+}
+
+static bool
+log_var_matches_segment(const char *segment_begin, const char *segment_end,
+ const char *log_var_begin, const char *log_var_end) {
+ assert(segment_begin <= segment_end);
+ assert(log_var_begin < log_var_end);
+
+ ptrdiff_t segment_len = segment_end - segment_begin;
+ ptrdiff_t log_var_len = log_var_end - log_var_begin;
+ /* The special '.' segment matches everything. */
+ if (segment_len == 1 && *segment_begin == '.') {
+ return true;
+ }
+ if (segment_len == log_var_len) {
+ return strncmp(segment_begin, log_var_begin, segment_len) == 0;
+ } else if (segment_len < log_var_len) {
+ return strncmp(segment_begin, log_var_begin, segment_len) == 0
+ && log_var_begin[segment_len] == '.';
+ } else {
+ return false;
+ }
+}
+
+unsigned
+log_var_update_state(log_var_t *log_var) {
+ const char *log_var_begin = log_var->name;
+ const char *log_var_end = log_var->name + strlen(log_var->name);
+
+ /* Pointer to one before the beginning of the current segment. */
+ const char *segment_begin = log_var_names;
+
+ /*
+ * If log_init done is false, we haven't parsed the malloc conf yet. To
+ * avoid log-spew, we default to not displaying anything.
+ */
+ if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) {
+ return LOG_INITIALIZED_NOT_ENABLED;
+ }
+
+ while (true) {
+ const char *segment_end = log_var_extract_segment(
+ segment_begin);
+ assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
+ if (log_var_matches_segment(segment_begin, segment_end,
+ log_var_begin, log_var_end)) {
+ atomic_store_u(&log_var->state, LOG_ENABLED,
+ ATOMIC_RELAXED);
+ return LOG_ENABLED;
+ }
+ if (*segment_end == '\0') {
+ /* Hit the end of the segment string with no match. */
+ atomic_store_u(&log_var->state,
+ LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED);
+ return LOG_INITIALIZED_NOT_ENABLED;
+ }
+ /* Otherwise, skip the delimiter and continue. */
+ segment_begin = segment_end + 1;
+ }
+}
diff --git a/contrib/libs/jemalloc/src/malloc_io.c b/contrib/libs/jemalloc/src/malloc_io.c
index d7cb0f5284..e50b5de6ac 100644
--- a/contrib/libs/jemalloc/src/malloc_io.c
+++ b/contrib/libs/jemalloc/src/malloc_io.c
@@ -1,675 +1,675 @@
-#define JEMALLOC_MALLOC_IO_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/util.h"
-
-#ifdef assert
-# undef assert
-#endif
-#ifdef not_reached
-# undef not_reached
-#endif
-#ifdef not_implemented
-# undef not_implemented
-#endif
-#ifdef assert_not_implemented
-# undef assert_not_implemented
-#endif
-
-/*
- * Define simple versions of assertion macros that won't recurse in case
- * of assertion failures in malloc_*printf().
- */
-#define assert(e) do { \
- if (config_debug && !(e)) { \
- malloc_write("<jemalloc>: Failed assertion\n"); \
- abort(); \
- } \
-} while (0)
-
-#define not_reached() do { \
- if (config_debug) { \
- malloc_write("<jemalloc>: Unreachable code reached\n"); \
- abort(); \
- } \
- unreachable(); \
-} while (0)
-
-#define not_implemented() do { \
- if (config_debug) { \
- malloc_write("<jemalloc>: Not implemented\n"); \
- abort(); \
- } \
-} while (0)
-
-#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) { \
- not_implemented(); \
- } \
-} while (0)
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void wrtmessage(void *cbopaque, const char *s);
-#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
-static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
- size_t *slen_p);
-#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
-static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
-#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
-static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
-#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
-static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
- size_t *slen_p);
-
-/******************************************************************************/
-
-/* malloc_message() setup. */
-static void
-wrtmessage(void *cbopaque, const char *s) {
- malloc_write_fd(STDERR_FILENO, s, strlen(s));
-}
-
-JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
-
-/*
- * Wrapper around malloc_message() that avoids the need for
- * je_malloc_message(...) throughout the code.
- */
-void
-malloc_write(const char *s) {
- if (je_malloc_message != NULL) {
- je_malloc_message(NULL, s);
- } else {
- wrtmessage(NULL, s);
- }
-}
-
-/*
- * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
- * provide a wrapper.
- */
-int
-buferror(int err, char *buf, size_t buflen) {
-#ifdef _WIN32
- FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
- (LPSTR)buf, (DWORD)buflen, NULL);
- return 0;
-#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
- char *b = strerror_r(err, buf, buflen);
- if (b != buf) {
- strncpy(buf, b, buflen);
- buf[buflen-1] = '\0';
- }
- return 0;
-#else
- return strerror_r(err, buf, buflen);
-#endif
-}
-
-uintmax_t
-malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
- uintmax_t ret, digit;
- unsigned b;
- bool neg;
- const char *p, *ns;
-
- p = nptr;
- if (base < 0 || base == 1 || base > 36) {
- ns = p;
- set_errno(EINVAL);
- ret = UINTMAX_MAX;
- goto label_return;
- }
- b = base;
-
- /* Swallow leading whitespace and get sign, if any. */
- neg = false;
- while (true) {
- switch (*p) {
- case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
- p++;
- break;
- case '-':
- neg = true;
- /* Fall through. */
- case '+':
- p++;
- /* Fall through. */
- default:
- goto label_prefix;
- }
- }
-
- /* Get prefix, if any. */
- label_prefix:
- /*
- * Note where the first non-whitespace/sign character is so that it is
- * possible to tell whether any digits are consumed (e.g., " 0" vs.
- * " -x").
- */
- ns = p;
- if (*p == '0') {
- switch (p[1]) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7':
- if (b == 0) {
- b = 8;
- }
- if (b == 8) {
- p++;
- }
- break;
- case 'X': case 'x':
- switch (p[2]) {
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- case 'A': case 'B': case 'C': case 'D': case 'E':
- case 'F':
- case 'a': case 'b': case 'c': case 'd': case 'e':
- case 'f':
- if (b == 0) {
- b = 16;
- }
- if (b == 16) {
- p += 2;
- }
- break;
- default:
- break;
- }
- break;
- default:
- p++;
- ret = 0;
- goto label_return;
- }
- }
- if (b == 0) {
- b = 10;
- }
-
- /* Convert. */
- ret = 0;
- while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
- || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
- || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
- uintmax_t pret = ret;
- ret *= b;
- ret += digit;
- if (ret < pret) {
- /* Overflow. */
- set_errno(ERANGE);
- ret = UINTMAX_MAX;
- goto label_return;
- }
- p++;
- }
- if (neg) {
- ret = (uintmax_t)(-((intmax_t)ret));
- }
-
- if (p == ns) {
- /* No conversion performed. */
- set_errno(EINVAL);
- ret = UINTMAX_MAX;
- goto label_return;
- }
-
-label_return:
- if (endptr != NULL) {
- if (p == ns) {
- /* No characters were converted. */
- *endptr = (char *)nptr;
- } else {
- *endptr = (char *)p;
- }
- }
- return ret;
-}
-
-static char *
-u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
- unsigned i;
-
- i = U2S_BUFSIZE - 1;
- s[i] = '\0';
- switch (base) {
- case 10:
- do {
- i--;
- s[i] = "0123456789"[x % (uint64_t)10];
- x /= (uint64_t)10;
- } while (x > 0);
- break;
- case 16: {
- const char *digits = (uppercase)
- ? "0123456789ABCDEF"
- : "0123456789abcdef";
-
- do {
- i--;
- s[i] = digits[x & 0xf];
- x >>= 4;
- } while (x > 0);
- break;
- } default: {
- const char *digits = (uppercase)
- ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- : "0123456789abcdefghijklmnopqrstuvwxyz";
-
- assert(base >= 2 && base <= 36);
- do {
- i--;
- s[i] = digits[x % (uint64_t)base];
- x /= (uint64_t)base;
- } while (x > 0);
- }}
-
- *slen_p = U2S_BUFSIZE - 1 - i;
- return &s[i];
-}
-
-static char *
-d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
- bool neg;
-
- if ((neg = (x < 0))) {
- x = -x;
- }
- s = u2s(x, 10, false, s, slen_p);
- if (neg) {
- sign = '-';
- }
- switch (sign) {
- case '-':
- if (!neg) {
- break;
- }
- /* Fall through. */
- case ' ':
- case '+':
- s--;
- (*slen_p)++;
- *s = sign;
- break;
- default: not_reached();
- }
- return s;
-}
-
-static char *
-o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
- s = u2s(x, 8, false, s, slen_p);
- if (alt_form && *s != '0') {
- s--;
- (*slen_p)++;
- *s = '0';
- }
- return s;
-}
-
-static char *
-x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
- s = u2s(x, 16, uppercase, s, slen_p);
- if (alt_form) {
- s -= 2;
- (*slen_p) += 2;
- memcpy(s, uppercase ? "0X" : "0x", 2);
- }
- return s;
-}
-
-size_t
-malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
- size_t i;
- const char *f;
-
-#define APPEND_C(c) do { \
- if (i < size) { \
- str[i] = (c); \
- } \
- i++; \
-} while (0)
-#define APPEND_S(s, slen) do { \
- if (i < size) { \
- size_t cpylen = (slen <= size - i) ? slen : size - i; \
- memcpy(&str[i], s, cpylen); \
- } \
- i += slen; \
-} while (0)
-#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
- /* Left padding. */ \
- size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
- (size_t)width - slen : 0); \
- if (!left_justify && pad_len != 0) { \
- size_t j; \
- for (j = 0; j < pad_len; j++) { \
- APPEND_C(' '); \
- } \
- } \
- /* Value. */ \
- APPEND_S(s, slen); \
- /* Right padding. */ \
- if (left_justify && pad_len != 0) { \
- size_t j; \
- for (j = 0; j < pad_len; j++) { \
- APPEND_C(' '); \
- } \
- } \
-} while (0)
-#define GET_ARG_NUMERIC(val, len) do { \
- switch ((unsigned char)len) { \
- case '?': \
- val = va_arg(ap, int); \
- break; \
- case '?' | 0x80: \
- val = va_arg(ap, unsigned int); \
- break; \
- case 'l': \
- val = va_arg(ap, long); \
- break; \
- case 'l' | 0x80: \
- val = va_arg(ap, unsigned long); \
- break; \
- case 'q': \
- val = va_arg(ap, long long); \
- break; \
- case 'q' | 0x80: \
- val = va_arg(ap, unsigned long long); \
- break; \
- case 'j': \
- val = va_arg(ap, intmax_t); \
- break; \
- case 'j' | 0x80: \
- val = va_arg(ap, uintmax_t); \
- break; \
- case 't': \
- val = va_arg(ap, ptrdiff_t); \
- break; \
- case 'z': \
- val = va_arg(ap, ssize_t); \
- break; \
- case 'z' | 0x80: \
- val = va_arg(ap, size_t); \
- break; \
- case 'p': /* Synthetic; used for %p. */ \
- val = va_arg(ap, uintptr_t); \
- break; \
- default: \
- not_reached(); \
- val = 0; \
- } \
-} while (0)
-
- i = 0;
- f = format;
- while (true) {
- switch (*f) {
- case '\0': goto label_out;
- case '%': {
- bool alt_form = false;
- bool left_justify = false;
- bool plus_space = false;
- bool plus_plus = false;
- int prec = -1;
- int width = -1;
- unsigned char len = '?';
- char *s;
- size_t slen;
-
- f++;
- /* Flags. */
- while (true) {
- switch (*f) {
- case '#':
- assert(!alt_form);
- alt_form = true;
- break;
- case '-':
- assert(!left_justify);
- left_justify = true;
- break;
- case ' ':
- assert(!plus_space);
- plus_space = true;
- break;
- case '+':
- assert(!plus_plus);
- plus_plus = true;
- break;
- default: goto label_width;
- }
- f++;
- }
- /* Width. */
- label_width:
- switch (*f) {
- case '*':
- width = va_arg(ap, int);
- f++;
- if (width < 0) {
- left_justify = true;
- width = -width;
- }
- break;
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9': {
- uintmax_t uwidth;
- set_errno(0);
- uwidth = malloc_strtoumax(f, (char **)&f, 10);
- assert(uwidth != UINTMAX_MAX || get_errno() !=
- ERANGE);
- width = (int)uwidth;
- break;
- } default:
- break;
- }
- /* Width/precision separator. */
- if (*f == '.') {
- f++;
- } else {
- goto label_length;
- }
- /* Precision. */
- switch (*f) {
- case '*':
- prec = va_arg(ap, int);
- f++;
- break;
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9': {
- uintmax_t uprec;
- set_errno(0);
- uprec = malloc_strtoumax(f, (char **)&f, 10);
- assert(uprec != UINTMAX_MAX || get_errno() !=
- ERANGE);
- prec = (int)uprec;
- break;
- }
- default: break;
- }
- /* Length. */
- label_length:
- switch (*f) {
- case 'l':
- f++;
- if (*f == 'l') {
- len = 'q';
- f++;
- } else {
- len = 'l';
- }
- break;
- case 'q': case 'j': case 't': case 'z':
- len = *f;
- f++;
- break;
- default: break;
- }
- /* Conversion specifier. */
- switch (*f) {
- case '%':
- /* %% */
- APPEND_C(*f);
- f++;
- break;
- case 'd': case 'i': {
- intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[D2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len);
- s = d2s(val, (plus_plus ? '+' : (plus_space ?
- ' ' : '-')), buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'o': {
- uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[O2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len | 0x80);
- s = o2s(val, alt_form, buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'u': {
- uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[U2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len | 0x80);
- s = u2s(val, 10, false, buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'x': case 'X': {
- uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[X2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len | 0x80);
- s = x2s(val, alt_form, *f == 'X', buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'c': {
- unsigned char val;
- char buf[2];
-
- assert(len == '?' || len == 'l');
- assert_not_implemented(len != 'l');
- val = va_arg(ap, int);
- buf[0] = val;
- buf[1] = '\0';
- APPEND_PADDED_S(buf, 1, width, left_justify);
- f++;
- break;
- } case 's':
- assert(len == '?' || len == 'l');
- assert_not_implemented(len != 'l');
- s = va_arg(ap, char *);
- slen = (prec < 0) ? strlen(s) : (size_t)prec;
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- case 'p': {
- uintmax_t val;
- char buf[X2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, 'p');
- s = x2s(val, true, false, buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } default: not_reached();
- }
- break;
- } default: {
- APPEND_C(*f);
- f++;
- break;
- }}
- }
- label_out:
- if (i < size) {
- str[i] = '\0';
- } else {
- str[size - 1] = '\0';
- }
-
-#undef APPEND_C
-#undef APPEND_S
-#undef APPEND_PADDED_S
-#undef GET_ARG_NUMERIC
- return i;
-}
-
-JEMALLOC_FORMAT_PRINTF(3, 4)
-size_t
-malloc_snprintf(char *str, size_t size, const char *format, ...) {
- size_t ret;
- va_list ap;
-
- va_start(ap, format);
- ret = malloc_vsnprintf(str, size, format, ap);
- va_end(ap);
-
- return ret;
-}
-
-void
-malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap) {
- char buf[MALLOC_PRINTF_BUFSIZE];
-
- if (write_cb == NULL) {
- /*
- * The caller did not provide an alternate write_cb callback
- * function, so use the default one. malloc_write() is an
- * inline function, so use malloc_message() directly here.
- */
- write_cb = (je_malloc_message != NULL) ? je_malloc_message :
- wrtmessage;
- }
-
- malloc_vsnprintf(buf, sizeof(buf), format, ap);
- write_cb(cbopaque, buf);
-}
-
-/*
- * Print to a callback function in such a way as to (hopefully) avoid memory
- * allocation.
- */
-JEMALLOC_FORMAT_PRINTF(3, 4)
-void
-malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...) {
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(write_cb, cbopaque, format, ap);
- va_end(ap);
-}
-
-/* Print to stderr in such a way as to avoid memory allocation. */
-JEMALLOC_FORMAT_PRINTF(1, 2)
-void
-malloc_printf(const char *format, ...) {
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(NULL, NULL, format, ap);
- va_end(ap);
-}
-
-/*
- * Restore normal assertion macros, in order to make it possible to compile all
- * C files as a single concatenation.
- */
-#undef assert
-#undef not_reached
-#undef not_implemented
-#undef assert_not_implemented
-#include "jemalloc/internal/assert.h"
+#define JEMALLOC_MALLOC_IO_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/util.h"
+
+#ifdef assert
+# undef assert
+#endif
+#ifdef not_reached
+# undef not_reached
+#endif
+#ifdef not_implemented
+# undef not_implemented
+#endif
+#ifdef assert_not_implemented
+# undef assert_not_implemented
+#endif
+
+/*
+ * Define simple versions of assertion macros that won't recurse in case
+ * of assertion failures in malloc_*printf().
+ */
+#define assert(e) do { \
+ if (config_debug && !(e)) { \
+ malloc_write("<jemalloc>: Failed assertion\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Unreachable code reached\n"); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Not implemented\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ not_implemented(); \
+ } \
+} while (0)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void wrtmessage(void *cbopaque, const char *s);
+#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+ size_t *slen_p);
+#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
+static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+ size_t *slen_p);
+
+/******************************************************************************/
+
+/* malloc_message() setup. */
+static void
+wrtmessage(void *cbopaque, const char *s) {
+ malloc_write_fd(STDERR_FILENO, s, strlen(s));
+}
+
+JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
+
+/*
+ * Wrapper around malloc_message() that avoids the need for
+ * je_malloc_message(...) throughout the code.
+ */
+void
+malloc_write(const char *s) {
+ if (je_malloc_message != NULL) {
+ je_malloc_message(NULL, s);
+ } else {
+ wrtmessage(NULL, s);
+ }
+}
+
+/*
+ * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+ * provide a wrapper.
+ */
+int
+buferror(int err, char *buf, size_t buflen) {
+#ifdef _WIN32
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
+ (LPSTR)buf, (DWORD)buflen, NULL);
+ return 0;
+#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
+ char *b = strerror_r(err, buf, buflen);
+ if (b != buf) {
+ strncpy(buf, b, buflen);
+ buf[buflen-1] = '\0';
+ }
+ return 0;
+#else
+ return strerror_r(err, buf, buflen);
+#endif
+}
+
+uintmax_t
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
+ uintmax_t ret, digit;
+ unsigned b;
+ bool neg;
+ const char *p, *ns;
+
+ p = nptr;
+ if (base < 0 || base == 1 || base > 36) {
+ ns = p;
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+ b = base;
+
+ /* Swallow leading whitespace and get sign, if any. */
+ neg = false;
+ while (true) {
+ switch (*p) {
+ case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+ p++;
+ break;
+ case '-':
+ neg = true;
+ /* Fall through. */
+ case '+':
+ p++;
+ /* Fall through. */
+ default:
+ goto label_prefix;
+ }
+ }
+
+ /* Get prefix, if any. */
+ label_prefix:
+ /*
+ * Note where the first non-whitespace/sign character is so that it is
+ * possible to tell whether any digits are consumed (e.g., " 0" vs.
+ * " -x").
+ */
+ ns = p;
+ if (*p == '0') {
+ switch (p[1]) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7':
+ if (b == 0) {
+ b = 8;
+ }
+ if (b == 8) {
+ p++;
+ }
+ break;
+ case 'X': case 'x':
+ switch (p[2]) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ if (b == 0) {
+ b = 16;
+ }
+ if (b == 16) {
+ p += 2;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ p++;
+ ret = 0;
+ goto label_return;
+ }
+ }
+ if (b == 0) {
+ b = 10;
+ }
+
+ /* Convert. */
+ ret = 0;
+ while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
+ || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
+ || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
+ uintmax_t pret = ret;
+ ret *= b;
+ ret += digit;
+ if (ret < pret) {
+ /* Overflow. */
+ set_errno(ERANGE);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+ p++;
+ }
+ if (neg) {
+ ret = (uintmax_t)(-((intmax_t)ret));
+ }
+
+ if (p == ns) {
+ /* No conversion performed. */
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+
+label_return:
+ if (endptr != NULL) {
+ if (p == ns) {
+ /* No characters were converted. */
+ *endptr = (char *)nptr;
+ } else {
+ *endptr = (char *)p;
+ }
+ }
+ return ret;
+}
+
+static char *
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
+ unsigned i;
+
+ i = U2S_BUFSIZE - 1;
+ s[i] = '\0';
+ switch (base) {
+ case 10:
+ do {
+ i--;
+ s[i] = "0123456789"[x % (uint64_t)10];
+ x /= (uint64_t)10;
+ } while (x > 0);
+ break;
+ case 16: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEF"
+ : "0123456789abcdef";
+
+ do {
+ i--;
+ s[i] = digits[x & 0xf];
+ x >>= 4;
+ } while (x > 0);
+ break;
+ } default: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ : "0123456789abcdefghijklmnopqrstuvwxyz";
+
+ assert(base >= 2 && base <= 36);
+ do {
+ i--;
+ s[i] = digits[x % (uint64_t)base];
+ x /= (uint64_t)base;
+ } while (x > 0);
+ }}
+
+ *slen_p = U2S_BUFSIZE - 1 - i;
+ return &s[i];
+}
+
+static char *
+d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
+ bool neg;
+
+ if ((neg = (x < 0))) {
+ x = -x;
+ }
+ s = u2s(x, 10, false, s, slen_p);
+ if (neg) {
+ sign = '-';
+ }
+ switch (sign) {
+ case '-':
+ if (!neg) {
+ break;
+ }
+ /* Fall through. */
+ case ' ':
+ case '+':
+ s--;
+ (*slen_p)++;
+ *s = sign;
+ break;
+ default: not_reached();
+ }
+ return s;
+}
+
+static char *
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
+ s = u2s(x, 8, false, s, slen_p);
+ if (alt_form && *s != '0') {
+ s--;
+ (*slen_p)++;
+ *s = '0';
+ }
+ return s;
+}
+
+static char *
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
+ s = u2s(x, 16, uppercase, s, slen_p);
+ if (alt_form) {
+ s -= 2;
+ (*slen_p) += 2;
+ memcpy(s, uppercase ? "0X" : "0x", 2);
+ }
+ return s;
+}
+
+size_t
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
+ size_t i;
+ const char *f;
+
+#define APPEND_C(c) do { \
+ if (i < size) { \
+ str[i] = (c); \
+ } \
+ i++; \
+} while (0)
+#define APPEND_S(s, slen) do { \
+ if (i < size) { \
+ size_t cpylen = (slen <= size - i) ? slen : size - i; \
+ memcpy(&str[i], s, cpylen); \
+ } \
+ i += slen; \
+} while (0)
+#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
+ /* Left padding. */ \
+ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
+ (size_t)width - slen : 0); \
+ if (!left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) { \
+ APPEND_C(' '); \
+ } \
+ } \
+ /* Value. */ \
+ APPEND_S(s, slen); \
+ /* Right padding. */ \
+ if (left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) { \
+ APPEND_C(' '); \
+ } \
+ } \
+} while (0)
+#define GET_ARG_NUMERIC(val, len) do { \
+ switch ((unsigned char)len) { \
+ case '?': \
+ val = va_arg(ap, int); \
+ break; \
+ case '?' | 0x80: \
+ val = va_arg(ap, unsigned int); \
+ break; \
+ case 'l': \
+ val = va_arg(ap, long); \
+ break; \
+ case 'l' | 0x80: \
+ val = va_arg(ap, unsigned long); \
+ break; \
+ case 'q': \
+ val = va_arg(ap, long long); \
+ break; \
+ case 'q' | 0x80: \
+ val = va_arg(ap, unsigned long long); \
+ break; \
+ case 'j': \
+ val = va_arg(ap, intmax_t); \
+ break; \
+ case 'j' | 0x80: \
+ val = va_arg(ap, uintmax_t); \
+ break; \
+ case 't': \
+ val = va_arg(ap, ptrdiff_t); \
+ break; \
+ case 'z': \
+ val = va_arg(ap, ssize_t); \
+ break; \
+ case 'z' | 0x80: \
+ val = va_arg(ap, size_t); \
+ break; \
+ case 'p': /* Synthetic; used for %p. */ \
+ val = va_arg(ap, uintptr_t); \
+ break; \
+ default: \
+ not_reached(); \
+ val = 0; \
+ } \
+} while (0)
+
+ i = 0;
+ f = format;
+ while (true) {
+ switch (*f) {
+ case '\0': goto label_out;
+ case '%': {
+ bool alt_form = false;
+ bool left_justify = false;
+ bool plus_space = false;
+ bool plus_plus = false;
+ int prec = -1;
+ int width = -1;
+ unsigned char len = '?';
+ char *s;
+ size_t slen;
+
+ f++;
+ /* Flags. */
+ while (true) {
+ switch (*f) {
+ case '#':
+ assert(!alt_form);
+ alt_form = true;
+ break;
+ case '-':
+ assert(!left_justify);
+ left_justify = true;
+ break;
+ case ' ':
+ assert(!plus_space);
+ plus_space = true;
+ break;
+ case '+':
+ assert(!plus_plus);
+ plus_plus = true;
+ break;
+ default: goto label_width;
+ }
+ f++;
+ }
+ /* Width. */
+ label_width:
+ switch (*f) {
+ case '*':
+ width = va_arg(ap, int);
+ f++;
+ if (width < 0) {
+ left_justify = true;
+ width = -width;
+ }
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uwidth;
+ set_errno(0);
+ uwidth = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uwidth != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ width = (int)uwidth;
+ break;
+ } default:
+ break;
+ }
+ /* Width/precision separator. */
+ if (*f == '.') {
+ f++;
+ } else {
+ goto label_length;
+ }
+ /* Precision. */
+ switch (*f) {
+ case '*':
+ prec = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uprec;
+ set_errno(0);
+ uprec = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uprec != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ prec = (int)uprec;
+ break;
+ }
+ default: break;
+ }
+ /* Length. */
+ label_length:
+ switch (*f) {
+ case 'l':
+ f++;
+ if (*f == 'l') {
+ len = 'q';
+ f++;
+ } else {
+ len = 'l';
+ }
+ break;
+ case 'q': case 'j': case 't': case 'z':
+ len = *f;
+ f++;
+ break;
+ default: break;
+ }
+ /* Conversion specifier. */
+ switch (*f) {
+ case '%':
+ /* %% */
+ APPEND_C(*f);
+ f++;
+ break;
+ case 'd': case 'i': {
+ intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[D2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = d2s(val, (plus_plus ? '+' : (plus_space ?
+ ' ' : '-')), buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'o': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[O2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = o2s(val, alt_form, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'u': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[U2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = u2s(val, 10, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'x': case 'X': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = x2s(val, alt_form, *f == 'X', buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'c': {
+ unsigned char val;
+ char buf[2];
+
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ val = va_arg(ap, int);
+ buf[0] = val;
+ buf[1] = '\0';
+ APPEND_PADDED_S(buf, 1, width, left_justify);
+ f++;
+ break;
+ } case 's':
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ s = va_arg(ap, char *);
+ slen = (prec < 0) ? strlen(s) : (size_t)prec;
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ case 'p': {
+ uintmax_t val;
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, 'p');
+ s = x2s(val, true, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } default: not_reached();
+ }
+ break;
+ } default: {
+ APPEND_C(*f);
+ f++;
+ break;
+ }}
+ }
+ label_out:
+ if (i < size) {
+ str[i] = '\0';
+ } else {
+ str[size - 1] = '\0';
+ }
+
+#undef APPEND_C
+#undef APPEND_S
+#undef APPEND_PADDED_S
+#undef GET_ARG_NUMERIC
+ return i;
+}
+
+JEMALLOC_FORMAT_PRINTF(3, 4)
+size_t
+malloc_snprintf(char *str, size_t size, const char *format, ...) {
+ size_t ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = malloc_vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+void
+malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap) {
+ char buf[MALLOC_PRINTF_BUFSIZE];
+
+ if (write_cb == NULL) {
+ /*
+ * The caller did not provide an alternate write_cb callback
+ * function, so use the default one. malloc_write() is an
+ * inline function, so use malloc_message() directly here.
+ */
+ write_cb = (je_malloc_message != NULL) ? je_malloc_message :
+ wrtmessage;
+ }
+
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ write_cb(cbopaque, buf);
+}
+
+/*
+ * Print to a callback function in such a way as to (hopefully) avoid memory
+ * allocation.
+ */
+JEMALLOC_FORMAT_PRINTF(3, 4)
+void
+malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(write_cb, cbopaque, format, ap);
+ va_end(ap);
+}
+
+/* Print to stderr in such a way as to avoid memory allocation. */
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+malloc_printf(const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+}
+
+/*
+ * Restore normal assertion macros, in order to make it possible to compile all
+ * C files as a single concatenation.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#undef assert_not_implemented
+#include "jemalloc/internal/assert.h"
diff --git a/contrib/libs/jemalloc/src/mutex.c b/contrib/libs/jemalloc/src/mutex.c
index 3f920f5b1c..b4852f6058 100644
--- a/contrib/libs/jemalloc/src/mutex.c
+++ b/contrib/libs/jemalloc/src/mutex.c
@@ -1,13 +1,13 @@
-#define JEMALLOC_MUTEX_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
+#define JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/spin.h"
#ifndef _CRT_SPINCOUNT
-#define _CRT_SPINCOUNT 4000
+#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
@@ -31,8 +31,8 @@ static malloc_mutex_t *postponed_mutexes = NULL;
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
- void *__restrict arg) {
- return pthread_create_wrapper(thread, attr, start_routine, arg);
+ void *__restrict arg) {
+ return pthread_create_wrapper(thread, attr, start_routine, arg);
}
#endif
@@ -43,181 +43,181 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
#endif
-void
-malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
- mutex_prof_data_t *data = &mutex->prof_data;
- nstime_t before = NSTIME_ZERO_INITIALIZER;
-
- if (ncpus == 1) {
- goto label_spin_done;
- }
-
- int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
- do {
- spin_cpu_spinwait();
- if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
- && !malloc_mutex_trylock_final(mutex)) {
- data->n_spin_acquired++;
- return;
- }
- } while (cnt++ < max_cnt);
-
- if (!config_stats) {
- /* Only spin is useful when stats is off. */
- malloc_mutex_lock_final(mutex);
- return;
- }
-label_spin_done:
- nstime_update(&before);
- /* Copy before to after to avoid clock skews. */
- nstime_t after;
- nstime_copy(&after, &before);
- uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
- ATOMIC_RELAXED) + 1;
- /* One last try as above two calls may take quite some cycles. */
- if (!malloc_mutex_trylock_final(mutex)) {
- atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
- data->n_spin_acquired++;
- return;
- }
-
- /* True slow path. */
- malloc_mutex_lock_final(mutex);
- /* Update more slow-path only counters. */
- atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
- nstime_update(&after);
-
- nstime_t delta;
- nstime_copy(&delta, &after);
- nstime_subtract(&delta, &before);
-
- data->n_wait_times++;
- nstime_add(&data->tot_wait_time, &delta);
- if (nstime_compare(&data->max_wait_time, &delta) < 0) {
- nstime_copy(&data->max_wait_time, &delta);
- }
- if (n_thds > data->max_n_thds) {
- data->max_n_thds = n_thds;
- }
-}
-
-static void
-mutex_prof_data_init(mutex_prof_data_t *data) {
- memset(data, 0, sizeof(mutex_prof_data_t));
- nstime_init(&data->max_wait_time, 0);
- nstime_init(&data->tot_wait_time, 0);
- data->prev_owner = NULL;
-}
-
-void
-malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- malloc_mutex_assert_owner(tsdn, mutex);
- mutex_prof_data_init(&mutex->prof_data);
-}
-
-static int
-mutex_addr_comp(const witness_t *witness1, void *mutex1,
- const witness_t *witness2, void *mutex2) {
- assert(mutex1 != NULL);
- assert(mutex2 != NULL);
- uintptr_t mu1int = (uintptr_t)mutex1;
- uintptr_t mu2int = (uintptr_t)mutex2;
- if (mu1int < mu2int) {
- return -1;
- } else if (mu1int == mu2int) {
- return 0;
- } else {
- return 1;
- }
-}
-
+void
+malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
+ mutex_prof_data_t *data = &mutex->prof_data;
+ nstime_t before = NSTIME_ZERO_INITIALIZER;
+
+ if (ncpus == 1) {
+ goto label_spin_done;
+ }
+
+ int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
+ do {
+ spin_cpu_spinwait();
+ if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
+ && !malloc_mutex_trylock_final(mutex)) {
+ data->n_spin_acquired++;
+ return;
+ }
+ } while (cnt++ < max_cnt);
+
+ if (!config_stats) {
+ /* Only spin is useful when stats is off. */
+ malloc_mutex_lock_final(mutex);
+ return;
+ }
+label_spin_done:
+ nstime_update(&before);
+ /* Copy before to after to avoid clock skews. */
+ nstime_t after;
+ nstime_copy(&after, &before);
+ uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
+ ATOMIC_RELAXED) + 1;
+ /* One last try as above two calls may take quite some cycles. */
+ if (!malloc_mutex_trylock_final(mutex)) {
+ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
+ data->n_spin_acquired++;
+ return;
+ }
+
+ /* True slow path. */
+ malloc_mutex_lock_final(mutex);
+ /* Update more slow-path only counters. */
+ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
+ nstime_update(&after);
+
+ nstime_t delta;
+ nstime_copy(&delta, &after);
+ nstime_subtract(&delta, &before);
+
+ data->n_wait_times++;
+ nstime_add(&data->tot_wait_time, &delta);
+ if (nstime_compare(&data->max_wait_time, &delta) < 0) {
+ nstime_copy(&data->max_wait_time, &delta);
+ }
+ if (n_thds > data->max_n_thds) {
+ data->max_n_thds = n_thds;
+ }
+}
+
+static void
+mutex_prof_data_init(mutex_prof_data_t *data) {
+ memset(data, 0, sizeof(mutex_prof_data_t));
+ nstime_init(&data->max_wait_time, 0);
+ nstime_init(&data->tot_wait_time, 0);
+ data->prev_owner = NULL;
+}
+
+void
+malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_assert_owner(tsdn, mutex);
+ mutex_prof_data_init(&mutex->prof_data);
+}
+
+static int
+mutex_addr_comp(const witness_t *witness1, void *mutex1,
+ const witness_t *witness2, void *mutex2) {
+ assert(mutex1 != NULL);
+ assert(mutex2 != NULL);
+ uintptr_t mu1int = (uintptr_t)mutex1;
+ uintptr_t mu2int = (uintptr_t)mutex2;
+ if (mu1int < mu2int) {
+ return -1;
+ } else if (mu1int == mu2int) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
bool
-malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
- mutex_prof_data_init(&mutex->prof_data);
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
+ mutex_prof_data_init(&mutex->prof_data);
#ifdef _WIN32
-# if _WIN32_WINNT >= 0x0600
- InitializeSRWLock(&mutex->lock);
-# else
+# if _WIN32_WINNT >= 0x0600
+ InitializeSRWLock(&mutex->lock);
+# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
- _CRT_SPINCOUNT)) {
- return true;
- }
-# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- mutex->lock = OS_UNFAIR_LOCK_INIT;
+ _CRT_SPINCOUNT)) {
+ return true;
+ }
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
- if (_pthread_mutex_init_calloc_cb(&mutex->lock,
- bootstrap_calloc) != 0) {
- return true;
- }
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0) {
+ return true;
+ }
}
#else
pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0) {
- return true;
- }
+ if (pthread_mutexattr_init(&attr) != 0) {
+ return true;
+ }
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
- return true;
+ return true;
}
pthread_mutexattr_destroy(&attr);
#endif
- if (config_debug) {
- mutex->lock_order = lock_order;
- if (lock_order == malloc_mutex_address_ordered) {
- witness_init(&mutex->witness, name, rank,
- mutex_addr_comp, mutex);
- } else {
- witness_init(&mutex->witness, name, rank, NULL, NULL);
- }
- }
- return false;
+ if (config_debug) {
+ mutex->lock_order = lock_order;
+ if (lock_order == malloc_mutex_address_ordered) {
+ witness_init(&mutex->witness, name, rank,
+ mutex_addr_comp, mutex);
+ } else {
+ witness_init(&mutex->witness, name, rank, NULL, NULL);
+ }
+ }
+ return false;
}
void
-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- malloc_mutex_lock(tsdn, mutex);
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_lock(tsdn, mutex);
}
void
-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- malloc_mutex_unlock(tsdn, mutex);
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_unlock(tsdn, mutex);
}
void
-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
#ifdef JEMALLOC_MUTEX_INIT_CB
- malloc_mutex_unlock(tsdn, mutex);
+ malloc_mutex_unlock(tsdn, mutex);
#else
- if (malloc_mutex_init(mutex, mutex->witness.name,
- mutex->witness.rank, mutex->lock_order)) {
+ if (malloc_mutex_init(mutex, mutex->witness.name,
+ mutex->witness.rank, mutex->lock_order)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
- if (opt_abort) {
+ if (opt_abort) {
abort();
- }
+ }
}
#endif
}
bool
-malloc_mutex_boot(void) {
+malloc_mutex_boot(void) {
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- bootstrap_calloc) != 0) {
- return true;
- }
+ bootstrap_calloc) != 0) {
+ return true;
+ }
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
- return false;
+ return false;
}
diff --git a/contrib/libs/jemalloc/src/mutex_pool.c b/contrib/libs/jemalloc/src/mutex_pool.c
index f24d10e44a..ecba865e3f 100644
--- a/contrib/libs/jemalloc/src/mutex_pool.c
+++ b/contrib/libs/jemalloc/src/mutex_pool.c
@@ -1,18 +1,18 @@
-#define JEMALLOC_MUTEX_POOL_C_
-
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-
-bool
-mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
- for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
- if (malloc_mutex_init(&pool->mutexes[i], name, rank,
- malloc_mutex_address_ordered)) {
- return true;
- }
- }
- return false;
-}
+#define JEMALLOC_MUTEX_POOL_C_
+
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+
+bool
+mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
+ for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
+ if (malloc_mutex_init(&pool->mutexes[i], name, rank,
+ malloc_mutex_address_ordered)) {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/contrib/libs/jemalloc/src/nstime.c b/contrib/libs/jemalloc/src/nstime.c
index 71db353965..431ca7ca97 100644
--- a/contrib/libs/jemalloc/src/nstime.c
+++ b/contrib/libs/jemalloc/src/nstime.c
@@ -1,170 +1,170 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/nstime.h"
-
-#include "jemalloc/internal/assert.h"
-
-#define BILLION UINT64_C(1000000000)
-#define MILLION UINT64_C(1000000)
-
-void
-nstime_init(nstime_t *time, uint64_t ns) {
- time->ns = ns;
-}
-
-void
-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
- time->ns = sec * BILLION + nsec;
-}
-
-uint64_t
-nstime_ns(const nstime_t *time) {
- return time->ns;
-}
-
-uint64_t
-nstime_msec(const nstime_t *time) {
- return time->ns / MILLION;
-}
-
-uint64_t
-nstime_sec(const nstime_t *time) {
- return time->ns / BILLION;
-}
-
-uint64_t
-nstime_nsec(const nstime_t *time) {
- return time->ns % BILLION;
-}
-
-void
-nstime_copy(nstime_t *time, const nstime_t *source) {
- *time = *source;
-}
-
-int
-nstime_compare(const nstime_t *a, const nstime_t *b) {
- return (a->ns > b->ns) - (a->ns < b->ns);
-}
-
-void
-nstime_add(nstime_t *time, const nstime_t *addend) {
- assert(UINT64_MAX - time->ns >= addend->ns);
-
- time->ns += addend->ns;
-}
-
-void
-nstime_iadd(nstime_t *time, uint64_t addend) {
- assert(UINT64_MAX - time->ns >= addend);
-
- time->ns += addend;
-}
-
-void
-nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
- assert(nstime_compare(time, subtrahend) >= 0);
-
- time->ns -= subtrahend->ns;
-}
-
-void
-nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
- assert(time->ns >= subtrahend);
-
- time->ns -= subtrahend;
-}
-
-void
-nstime_imultiply(nstime_t *time, uint64_t multiplier) {
- assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
- 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
-
- time->ns *= multiplier;
-}
-
-void
-nstime_idivide(nstime_t *time, uint64_t divisor) {
- assert(divisor != 0);
-
- time->ns /= divisor;
-}
-
-uint64_t
-nstime_divide(const nstime_t *time, const nstime_t *divisor) {
- assert(divisor->ns != 0);
-
- return time->ns / divisor->ns;
-}
-
-#ifdef _WIN32
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time) {
- FILETIME ft;
- uint64_t ticks_100ns;
-
- GetSystemTimeAsFileTime(&ft);
- ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-
- nstime_init(time, ticks_100ns * 100);
-}
-#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time) {
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
- nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time) {
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
- nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time) {
- nstime_init(time, mach_absolute_time());
-}
-#else
-# define NSTIME_MONOTONIC false
-static void
-nstime_get(nstime_t *time) {
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
-}
-#endif
-
-static bool
-nstime_monotonic_impl(void) {
- return NSTIME_MONOTONIC;
-#undef NSTIME_MONOTONIC
-}
-nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
-
-static bool
-nstime_update_impl(nstime_t *time) {
- nstime_t old_time;
-
- nstime_copy(&old_time, time);
- nstime_get(time);
-
- /* Handle non-monotonic clocks. */
- if (unlikely(nstime_compare(&old_time, time) > 0)) {
- nstime_copy(time, &old_time);
- return true;
- }
-
- return false;
-}
-nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/nstime.h"
+
+#include "jemalloc/internal/assert.h"
+
+#define BILLION UINT64_C(1000000000)
+#define MILLION UINT64_C(1000000)
+
+void
+nstime_init(nstime_t *time, uint64_t ns) {
+ time->ns = ns;
+}
+
+void
+nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
+ time->ns = sec * BILLION + nsec;
+}
+
+uint64_t
+nstime_ns(const nstime_t *time) {
+ return time->ns;
+}
+
+uint64_t
+nstime_msec(const nstime_t *time) {
+ return time->ns / MILLION;
+}
+
+uint64_t
+nstime_sec(const nstime_t *time) {
+ return time->ns / BILLION;
+}
+
+uint64_t
+nstime_nsec(const nstime_t *time) {
+ return time->ns % BILLION;
+}
+
+void
+nstime_copy(nstime_t *time, const nstime_t *source) {
+ *time = *source;
+}
+
+int
+nstime_compare(const nstime_t *a, const nstime_t *b) {
+ return (a->ns > b->ns) - (a->ns < b->ns);
+}
+
+void
+nstime_add(nstime_t *time, const nstime_t *addend) {
+ assert(UINT64_MAX - time->ns >= addend->ns);
+
+ time->ns += addend->ns;
+}
+
+void
+nstime_iadd(nstime_t *time, uint64_t addend) {
+ assert(UINT64_MAX - time->ns >= addend);
+
+ time->ns += addend;
+}
+
+void
+nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
+ assert(nstime_compare(time, subtrahend) >= 0);
+
+ time->ns -= subtrahend->ns;
+}
+
+void
+nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
+ assert(time->ns >= subtrahend);
+
+ time->ns -= subtrahend;
+}
+
+void
+nstime_imultiply(nstime_t *time, uint64_t multiplier) {
+ assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
+ 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
+
+ time->ns *= multiplier;
+}
+
+void
+nstime_idivide(nstime_t *time, uint64_t divisor) {
+ assert(divisor != 0);
+
+ time->ns /= divisor;
+}
+
+uint64_t
+nstime_divide(const nstime_t *time, const nstime_t *divisor) {
+ assert(divisor->ns != 0);
+
+ return time->ns / divisor->ns;
+}
+
+#ifdef _WIN32
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time) {
+ FILETIME ft;
+ uint64_t ticks_100ns;
+
+ GetSystemTimeAsFileTime(&ft);
+ ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+
+ nstime_init(time, ticks_100ns * 100);
+}
+#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time) {
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+}
+#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time) {
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+}
+#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time) {
+ nstime_init(time, mach_absolute_time());
+}
+#else
+# define NSTIME_MONOTONIC false
+static void
+nstime_get(nstime_t *time) {
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
+}
+#endif
+
+static bool
+nstime_monotonic_impl(void) {
+ return NSTIME_MONOTONIC;
+#undef NSTIME_MONOTONIC
+}
+nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
+
+static bool
+nstime_update_impl(nstime_t *time) {
+ nstime_t old_time;
+
+ nstime_copy(&old_time, time);
+ nstime_get(time);
+
+ /* Handle non-monotonic clocks. */
+ if (unlikely(nstime_compare(&old_time, time) > 0)) {
+ nstime_copy(time, &old_time);
+ return true;
+ }
+
+ return false;
+}
+nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
diff --git a/contrib/libs/jemalloc/src/pages.c b/contrib/libs/jemalloc/src/pages.c
index 13de27a008..59912113be 100644
--- a/contrib/libs/jemalloc/src/pages.c
+++ b/contrib/libs/jemalloc/src/pages.c
@@ -1,649 +1,649 @@
-#define JEMALLOC_PAGES_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-
-#include "jemalloc/internal/pages.h"
-
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/malloc_io.h"
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#include <sys/sysctl.h>
-#ifdef __FreeBSD__
-#include <vm/vm_param.h>
-#endif
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-/* Actual operating system page size, detected during bootstrap, <= PAGE. */
-static size_t os_page;
-
-#ifndef _WIN32
-# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
-# define PAGES_PROT_DECOMMIT (PROT_NONE)
-static int mmap_flags;
-#endif
-static bool os_overcommits;
-
-const char *thp_mode_names[] = {
- "default",
- "always",
- "never",
- "not supported"
-};
-thp_mode_t opt_thp = THP_MODE_DEFAULT;
-thp_mode_t init_system_thp_mode;
-
-/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
-static bool pages_can_purge_lazy_runtime = true;
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static void os_pages_unmap(void *addr, size_t size);
-
-/******************************************************************************/
-
-static void *
-os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
- assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
- assert(ALIGNMENT_CEILING(size, os_page) == size);
- assert(size != 0);
-
- if (os_overcommits) {
- *commit = true;
- }
-
- void *ret;
-#ifdef _WIN32
- /*
- * If VirtualAlloc can't allocate at the given address when one is
- * given, it fails and returns NULL.
- */
- ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
- PAGE_READWRITE);
-#else
- /*
- * We don't use MAP_FIXED here, because it can cause the *replacement*
- * of existing mappings, and we only want to create new mappings.
- */
- {
- int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
-
- ret = mmap(addr, size, prot, mmap_flags, -1, 0);
- }
- assert(ret != NULL);
-
- if (ret == MAP_FAILED) {
- ret = NULL;
- } else if (addr != NULL && ret != addr) {
- /*
- * We succeeded in mapping memory, but not in the right place.
- */
- os_pages_unmap(ret, size);
- ret = NULL;
- }
-#endif
- assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
- ret == addr));
- return ret;
-}
-
-static void *
-os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
- bool *commit) {
- void *ret = (void *)((uintptr_t)addr + leadsize);
-
- assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
- os_pages_unmap(addr, alloc_size);
- void *new_addr = os_pages_map(ret, size, PAGE, commit);
- if (new_addr == ret) {
- return ret;
- }
- if (new_addr != NULL) {
- os_pages_unmap(new_addr, size);
- }
- return NULL;
-#else
- size_t trailsize = alloc_size - leadsize - size;
-
- if (leadsize != 0) {
- os_pages_unmap(addr, leadsize);
- }
- if (trailsize != 0) {
- os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- }
- return ret;
-#endif
-}
-
-static void
-os_pages_unmap(void *addr, size_t size) {
- assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
- assert(ALIGNMENT_CEILING(size, os_page) == size);
-
-#ifdef _WIN32
- if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
-#else
- if (munmap(addr, size) == -1)
-#endif
- {
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in "
-#ifdef _WIN32
- "VirtualFree"
-#else
- "munmap"
-#endif
- "(): %s\n", buf);
- if (opt_abort) {
- abort();
- }
- }
-}
-
-static void *
-pages_map_slow(size_t size, size_t alignment, bool *commit) {
- size_t alloc_size = size + alignment - os_page;
- /* Beware size_t wrap-around. */
- if (alloc_size < size) {
- return NULL;
- }
-
- void *ret;
- do {
- void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
- if (pages == NULL) {
- return NULL;
- }
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
- - (uintptr_t)pages;
- ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
- } while (ret == NULL);
-
- assert(ret != NULL);
- assert(PAGE_ADDR2BASE(ret) == ret);
- return ret;
-}
-
-void *
-pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
- assert(alignment >= PAGE);
- assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
-
-#if defined(__FreeBSD__) && defined(MAP_EXCL)
- /*
- * FreeBSD has mechanisms both to mmap at specific address without
- * touching existing mappings, and to mmap with specific alignment.
- */
- {
- if (os_overcommits) {
- *commit = true;
- }
-
- int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- int flags = mmap_flags;
-
- if (addr != NULL) {
- flags |= MAP_FIXED | MAP_EXCL;
- } else {
- unsigned alignment_bits = ffs_zu(alignment);
- assert(alignment_bits > 1);
- flags |= MAP_ALIGNED(alignment_bits - 1);
- }
-
- void *ret = mmap(addr, size, prot, flags, -1, 0);
- if (ret == MAP_FAILED) {
- ret = NULL;
- }
-
- return ret;
- }
-#endif
- /*
- * Ideally, there would be a way to specify alignment to mmap() (like
- * NetBSD has), but in the absence of such a feature, we have to work
- * hard to efficiently create aligned mappings. The reliable, but
- * slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in one or two calls to
- * os_pages_unmap(), and it can leave holes in the process's virtual
- * memory map if memory grows downward.
- *
- * Optimistically try mapping precisely the right amount before falling
- * back to the slow method, with the expectation that the optimistic
- * approach works most of the time.
- */
-
- void *ret = os_pages_map(addr, size, os_page, commit);
- if (ret == NULL || ret == addr) {
- return ret;
- }
- assert(addr == NULL);
- if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
- os_pages_unmap(ret, size);
- return pages_map_slow(size, alignment, commit);
- }
-
- assert(PAGE_ADDR2BASE(ret) == ret);
- return ret;
-}
-
-void
-pages_unmap(void *addr, size_t size) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
- os_pages_unmap(addr, size);
-}
-
-static bool
-pages_commit_impl(void *addr, size_t size, bool commit) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
- if (os_overcommits) {
- return true;
- }
-
-#ifdef _WIN32
- return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
- PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
-#else
- {
- int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
- -1, 0);
- if (result == MAP_FAILED) {
- return true;
- }
- if (result != addr) {
- /*
- * We succeeded in mapping memory, but not in the right
- * place.
- */
- os_pages_unmap(result, size);
- return true;
- }
- return false;
- }
-#endif
-}
-
-bool
-pages_commit(void *addr, size_t size) {
- return pages_commit_impl(addr, size, true);
-}
-
-bool
-pages_decommit(void *addr, size_t size) {
- return pages_commit_impl(addr, size, false);
-}
-
-bool
-pages_purge_lazy(void *addr, size_t size) {
- assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
- assert(PAGE_CEILING(size) == size);
-
- if (!pages_can_purge_lazy) {
- return true;
- }
- if (!pages_can_purge_lazy_runtime) {
- /*
- * Built with lazy purge enabled, but detected it was not
- * supported on the current system.
- */
- return true;
- }
-
-#ifdef _WIN32
- VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
- return false;
-#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
- return (madvise(addr, size,
-# ifdef MADV_FREE
- MADV_FREE
-# else
- JEMALLOC_MADV_FREE
-# endif
- ) != 0);
-#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
- !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
- return (madvise(addr, size, MADV_DONTNEED) != 0);
-#else
- not_reached();
-#endif
-}
-
-bool
-pages_purge_forced(void *addr, size_t size) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
- if (!pages_can_purge_forced) {
- return true;
- }
-
-#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
- defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
- return (madvise(addr, size, MADV_DONTNEED) != 0);
-#elif defined(JEMALLOC_MAPS_COALESCE)
- /* Try to overlay a new demand-zeroed mapping. */
- return pages_commit(addr, size);
-#else
- not_reached();
-#endif
-}
-
-static bool
-pages_huge_impl(void *addr, size_t size, bool aligned) {
- if (aligned) {
- assert(HUGEPAGE_ADDR2BASE(addr) == addr);
- assert(HUGEPAGE_CEILING(size) == size);
- }
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
- return (madvise(addr, size, MADV_HUGEPAGE) != 0);
-#else
- return true;
-#endif
-}
-
-bool
-pages_huge(void *addr, size_t size) {
- return pages_huge_impl(addr, size, true);
-}
-
-static bool
-pages_huge_unaligned(void *addr, size_t size) {
- return pages_huge_impl(addr, size, false);
-}
-
-static bool
-pages_nohuge_impl(void *addr, size_t size, bool aligned) {
- if (aligned) {
- assert(HUGEPAGE_ADDR2BASE(addr) == addr);
- assert(HUGEPAGE_CEILING(size) == size);
- }
-
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
- return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
-#else
- return false;
-#endif
-}
-
-bool
-pages_nohuge(void *addr, size_t size) {
- return pages_nohuge_impl(addr, size, true);
-}
-
-static bool
-pages_nohuge_unaligned(void *addr, size_t size) {
- return pages_nohuge_impl(addr, size, false);
-}
-
-bool
-pages_dontdump(void *addr, size_t size) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
- return madvise(addr, size, MADV_DONTDUMP) != 0;
-#else
- return false;
-#endif
-}
-
-bool
-pages_dodump(void *addr, size_t size) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
- return madvise(addr, size, MADV_DODUMP) != 0;
-#else
- return false;
-#endif
-}
-
-
-static size_t
-os_page_detect(void) {
-#ifdef _WIN32
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- return si.dwPageSize;
-#elif defined(__FreeBSD__)
- /*
- * This returns the value obtained from
- * the auxv vector, avoiding a syscall.
- */
- return getpagesize();
-#else
- long result = sysconf(_SC_PAGESIZE);
- if (result == -1) {
- return LG_PAGE;
- }
- return (size_t)result;
-#endif
-}
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-static bool
-os_overcommits_sysctl(void) {
- int vm_overcommit;
- size_t sz;
-
- sz = sizeof(vm_overcommit);
-#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
- int mib[2];
-
- mib[0] = CTL_VM;
- mib[1] = VM_OVERCOMMIT;
- if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
- return false; /* Error. */
- }
-#else
- if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
- return false; /* Error. */
- }
-#endif
-
- return ((vm_overcommit & 0x3) == 0);
-}
-#endif
-
-#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-/*
- * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
- * reentry during bootstrapping if another library has interposed system call
- * wrappers.
- */
-static bool
-os_overcommits_proc(void) {
- int fd;
- char buf[1];
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
- #if defined(O_CLOEXEC)
- fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
- O_CLOEXEC);
- #else
- fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
- if (fd != -1) {
- fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
- }
- #endif
-#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
- #if defined(O_CLOEXEC)
- fd = (int)syscall(SYS_openat,
- AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
- #else
- fd = (int)syscall(SYS_openat,
- AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
- if (fd != -1) {
- fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
- }
- #endif
-#else
- #if defined(O_CLOEXEC)
- fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
- #else
- fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
- if (fd != -1) {
- fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
- }
- #endif
-#endif
-
- if (fd == -1) {
- return false; /* Error. */
- }
-
- ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
- syscall(SYS_close, fd);
-#else
- close(fd);
-#endif
-
- if (nread < 1) {
- return false; /* Error. */
- }
- /*
- * /proc/sys/vm/overcommit_memory meanings:
- * 0: Heuristic overcommit.
- * 1: Always overcommit.
- * 2: Never overcommit.
- */
- return (buf[0] == '0' || buf[0] == '1');
-}
-#endif
-
-void
-pages_set_thp_state (void *ptr, size_t size) {
- if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
- return;
- }
- assert(opt_thp != thp_mode_not_supported &&
- init_system_thp_mode != thp_mode_not_supported);
-
- if (opt_thp == thp_mode_always
- && init_system_thp_mode != thp_mode_never) {
- assert(init_system_thp_mode == thp_mode_default);
- pages_huge_unaligned(ptr, size);
- } else if (opt_thp == thp_mode_never) {
- assert(init_system_thp_mode == thp_mode_default ||
- init_system_thp_mode == thp_mode_always);
- pages_nohuge_unaligned(ptr, size);
- }
-}
-
-static void
-init_thp_state(void) {
- if (!have_madvise_huge) {
- if (metadata_thp_enabled() && opt_abort) {
- malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
- abort();
- }
- goto label_error;
- }
-
- static const char sys_state_madvise[] = "always [madvise] never\n";
- static const char sys_state_always[] = "[always] madvise never\n";
- static const char sys_state_never[] = "always madvise [never]\n";
- char buf[sizeof(sys_state_madvise)];
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
- int fd = (int)syscall(SYS_open,
- "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
-#else
- int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
-#endif
- if (fd == -1) {
- goto label_error;
- }
-
- ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
- syscall(SYS_close, fd);
-#else
- close(fd);
-#endif
-
- if (nread < 0) {
+#define JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/pages.h"
+
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#include <sys/sysctl.h>
+#ifdef __FreeBSD__
+#include <vm/vm_param.h>
+#endif
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+/* Actual operating system page size, detected during bootstrap, <= PAGE. */
+static size_t os_page;
+
+#ifndef _WIN32
+# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
+# define PAGES_PROT_DECOMMIT (PROT_NONE)
+static int mmap_flags;
+#endif
+static bool os_overcommits;
+
+const char *thp_mode_names[] = {
+ "default",
+ "always",
+ "never",
+ "not supported"
+};
+thp_mode_t opt_thp = THP_MODE_DEFAULT;
+thp_mode_t init_system_thp_mode;
+
+/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
+static bool pages_can_purge_lazy_runtime = true;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void os_pages_unmap(void *addr, size_t size);
+
+/******************************************************************************/
+
+static void *
+os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(ALIGNMENT_CEILING(size, os_page) == size);
+ assert(size != 0);
+
+ if (os_overcommits) {
+ *commit = true;
+ }
+
+ void *ret;
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
+ PAGE_READWRITE);
+#else
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ {
+ int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+
+ ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ }
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED) {
+ ret = NULL;
+ } else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ os_pages_unmap(ret, size);
+ ret = NULL;
+ }
+#endif
+ assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
+ ret == addr));
+ return ret;
+}
+
+static void *
+os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
+ bool *commit) {
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ os_pages_unmap(addr, alloc_size);
+ void *new_addr = os_pages_map(ret, size, PAGE, commit);
+ if (new_addr == ret) {
+ return ret;
+ }
+ if (new_addr != NULL) {
+ os_pages_unmap(new_addr, size);
+ }
+ return NULL;
+#else
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0) {
+ os_pages_unmap(addr, leadsize);
+ }
+ if (trailsize != 0) {
+ os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ }
+ return ret;
+#endif
+}
+
+static void
+os_pages_unmap(void *addr, size_t size) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(ALIGNMENT_CEILING(size, os_page) == size);
+
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
+ if (opt_abort) {
+ abort();
+ }
+ }
+}
+
+static void *
+pages_map_slow(size_t size, size_t alignment, bool *commit) {
+ size_t alloc_size = size + alignment - os_page;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size) {
+ return NULL;
+ }
+
+ void *ret;
+ do {
+ void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
+ if (pages == NULL) {
+ return NULL;
+ }
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
+ - (uintptr_t)pages;
+ ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ assert(PAGE_ADDR2BASE(ret) == ret);
+ return ret;
+}
+
+void *
+pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
+ assert(alignment >= PAGE);
+ assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
+
+#if defined(__FreeBSD__) && defined(MAP_EXCL)
+ /*
+ * FreeBSD has mechanisms both to mmap at specific address without
+ * touching existing mappings, and to mmap with specific alignment.
+ */
+ {
+ if (os_overcommits) {
+ *commit = true;
+ }
+
+ int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+ int flags = mmap_flags;
+
+ if (addr != NULL) {
+ flags |= MAP_FIXED | MAP_EXCL;
+ } else {
+ unsigned alignment_bits = ffs_zu(alignment);
+ assert(alignment_bits > 1);
+ flags |= MAP_ALIGNED(alignment_bits - 1);
+ }
+
+ void *ret = mmap(addr, size, prot, flags, -1, 0);
+ if (ret == MAP_FAILED) {
+ ret = NULL;
+ }
+
+ return ret;
+ }
+#endif
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in one or two calls to
+ * os_pages_unmap(), and it can leave holes in the process's virtual
+ * memory map if memory grows downward.
+ *
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
+ */
+
+ void *ret = os_pages_map(addr, size, os_page, commit);
+ if (ret == NULL || ret == addr) {
+ return ret;
+ }
+ assert(addr == NULL);
+ if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
+ os_pages_unmap(ret, size);
+ return pages_map_slow(size, alignment, commit);
+ }
+
+ assert(PAGE_ADDR2BASE(ret) == ret);
+ return ret;
+}
+
+void
+pages_unmap(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ os_pages_unmap(addr, size);
+}
+
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ if (os_overcommits) {
+ return true;
+ }
+
+#ifdef _WIN32
+ return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
+ PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
+#else
+ {
+ int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+ void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
+ -1, 0);
+ if (result == MAP_FAILED) {
+ return true;
+ }
+ if (result != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right
+ * place.
+ */
+ os_pages_unmap(result, size);
+ return true;
+ }
+ return false;
+ }
+#endif
+}
+
+bool
+pages_commit(void *addr, size_t size) {
+ return pages_commit_impl(addr, size, true);
+}
+
+bool
+pages_decommit(void *addr, size_t size) {
+ return pages_commit_impl(addr, size, false);
+}
+
+bool
+pages_purge_lazy(void *addr, size_t size) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ if (!pages_can_purge_lazy) {
+ return true;
+ }
+ if (!pages_can_purge_lazy_runtime) {
+ /*
+ * Built with lazy purge enabled, but detected it was not
+ * supported on the current system.
+ */
+ return true;
+ }
+
+#ifdef _WIN32
+ VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ return false;
+#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+ return (madvise(addr, size,
+# ifdef MADV_FREE
+ MADV_FREE
+# else
+ JEMALLOC_MADV_FREE
+# endif
+ ) != 0);
+#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
+ !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
+ return (madvise(addr, size, MADV_DONTNEED) != 0);
+#else
+ not_reached();
+#endif
+}
+
+bool
+pages_purge_forced(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ if (!pages_can_purge_forced) {
+ return true;
+ }
+
+#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
+ return (madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_MAPS_COALESCE)
+ /* Try to overlay a new demand-zeroed mapping. */
+ return pages_commit(addr, size);
+#else
+ not_reached();
+#endif
+}
+
+static bool
+pages_huge_impl(void *addr, size_t size, bool aligned) {
+ if (aligned) {
+ assert(HUGEPAGE_ADDR2BASE(addr) == addr);
+ assert(HUGEPAGE_CEILING(size) == size);
+ }
+#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+ return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#else
+ return true;
+#endif
+}
+
+bool
+pages_huge(void *addr, size_t size) {
+ return pages_huge_impl(addr, size, true);
+}
+
+static bool
+pages_huge_unaligned(void *addr, size_t size) {
+ return pages_huge_impl(addr, size, false);
+}
+
+static bool
+pages_nohuge_impl(void *addr, size_t size, bool aligned) {
+ if (aligned) {
+ assert(HUGEPAGE_ADDR2BASE(addr) == addr);
+ assert(HUGEPAGE_CEILING(size) == size);
+ }
+
+#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+ return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
+#else
+ return false;
+#endif
+}
+
+bool
+pages_nohuge(void *addr, size_t size) {
+ return pages_nohuge_impl(addr, size, true);
+}
+
+static bool
+pages_nohuge_unaligned(void *addr, size_t size) {
+ return pages_nohuge_impl(addr, size, false);
+}
+
+bool
+pages_dontdump(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+#ifdef JEMALLOC_MADVISE_DONTDUMP
+ return madvise(addr, size, MADV_DONTDUMP) != 0;
+#else
+ return false;
+#endif
+}
+
+bool
+pages_dodump(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+#ifdef JEMALLOC_MADVISE_DONTDUMP
+ return madvise(addr, size, MADV_DODUMP) != 0;
+#else
+ return false;
+#endif
+}
+
+
+static size_t
+os_page_detect(void) {
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+#elif defined(__FreeBSD__)
+ /*
+ * This returns the value obtained from
+ * the auxv vector, avoiding a syscall.
+ */
+ return getpagesize();
+#else
+ long result = sysconf(_SC_PAGESIZE);
+ if (result == -1) {
+ return LG_PAGE;
+ }
+ return (size_t)result;
+#endif
+}
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+static bool
+os_overcommits_sysctl(void) {
+ int vm_overcommit;
+ size_t sz;
+
+ sz = sizeof(vm_overcommit);
+#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
+ int mib[2];
+
+ mib[0] = CTL_VM;
+ mib[1] = VM_OVERCOMMIT;
+ if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
+ return false; /* Error. */
+ }
+#else
+ if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
+ return false; /* Error. */
+ }
+#endif
+
+ return ((vm_overcommit & 0x3) == 0);
+}
+#endif
+
+#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+/*
+ * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
+ * reentry during bootstrapping if another library has interposed system call
+ * wrappers.
+ */
+static bool
+os_overcommits_proc(void) {
+ int fd;
+ char buf[1];
+
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
+ #if defined(O_CLOEXEC)
+ fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
+ O_CLOEXEC);
+ #else
+ fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd != -1) {
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+ }
+ #endif
+#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
+ #if defined(O_CLOEXEC)
+ fd = (int)syscall(SYS_openat,
+ AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
+ #else
+ fd = (int)syscall(SYS_openat,
+ AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd != -1) {
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+ }
+ #endif
+#else
+ #if defined(O_CLOEXEC)
+ fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
+ #else
+ fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd != -1) {
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+ }
+ #endif
+#endif
+
+ if (fd == -1) {
+ return false; /* Error. */
+ }
+
+ ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
+ syscall(SYS_close, fd);
+#else
+ close(fd);
+#endif
+
+ if (nread < 1) {
+ return false; /* Error. */
+ }
+ /*
+ * /proc/sys/vm/overcommit_memory meanings:
+ * 0: Heuristic overcommit.
+ * 1: Always overcommit.
+ * 2: Never overcommit.
+ */
+ return (buf[0] == '0' || buf[0] == '1');
+}
+#endif
+
+void
+pages_set_thp_state (void *ptr, size_t size) {
+ if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
+ return;
+ }
+ assert(opt_thp != thp_mode_not_supported &&
+ init_system_thp_mode != thp_mode_not_supported);
+
+ if (opt_thp == thp_mode_always
+ && init_system_thp_mode != thp_mode_never) {
+ assert(init_system_thp_mode == thp_mode_default);
+ pages_huge_unaligned(ptr, size);
+ } else if (opt_thp == thp_mode_never) {
+ assert(init_system_thp_mode == thp_mode_default ||
+ init_system_thp_mode == thp_mode_always);
+ pages_nohuge_unaligned(ptr, size);
+ }
+}
+
+static void
+init_thp_state(void) {
+ if (!have_madvise_huge) {
+ if (metadata_thp_enabled() && opt_abort) {
+ malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
+ abort();
+ }
goto label_error;
- }
-
- if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
- init_system_thp_mode = thp_mode_default;
- } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
- init_system_thp_mode = thp_mode_always;
- } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
- init_system_thp_mode = thp_mode_never;
- } else {
- goto label_error;
- }
- return;
-label_error:
- opt_thp = init_system_thp_mode = thp_mode_not_supported;
-}
-
-bool
-pages_boot(void) {
- os_page = os_page_detect();
- if (os_page > PAGE) {
- malloc_write("<jemalloc>: Unsupported system page size\n");
- if (opt_abort) {
- abort();
- }
- return true;
- }
-
-#ifndef _WIN32
- mmap_flags = MAP_PRIVATE | MAP_ANON;
-#endif
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
- os_overcommits = os_overcommits_sysctl();
-#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
- os_overcommits = os_overcommits_proc();
-# ifdef MAP_NORESERVE
- if (os_overcommits) {
- mmap_flags |= MAP_NORESERVE;
- }
-# endif
-#else
- os_overcommits = false;
-#endif
-
- init_thp_state();
-
-#ifdef __FreeBSD__
- /*
- * FreeBSD doesn't need the check; madvise(2) is known to work.
- */
-#else
- /* Detect lazy purge runtime support. */
- if (pages_can_purge_lazy) {
- bool committed = false;
- void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
- if (madv_free_page == NULL) {
- return true;
- }
- assert(pages_can_purge_lazy_runtime);
- if (pages_purge_lazy(madv_free_page, PAGE)) {
- pages_can_purge_lazy_runtime = false;
- }
- os_pages_unmap(madv_free_page, PAGE);
- }
-#endif
-
- return false;
-}
+ }
+
+ static const char sys_state_madvise[] = "always [madvise] never\n";
+ static const char sys_state_always[] = "[always] madvise never\n";
+ static const char sys_state_never[] = "always madvise [never]\n";
+ char buf[sizeof(sys_state_madvise)];
+
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
+ int fd = (int)syscall(SYS_open,
+ "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
+#else
+ int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
+#endif
+ if (fd == -1) {
+ goto label_error;
+ }
+
+ ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
+ syscall(SYS_close, fd);
+#else
+ close(fd);
+#endif
+
+ if (nread < 0) {
+ goto label_error;
+ }
+
+ if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
+ init_system_thp_mode = thp_mode_default;
+ } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
+ init_system_thp_mode = thp_mode_always;
+ } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
+ init_system_thp_mode = thp_mode_never;
+ } else {
+ goto label_error;
+ }
+ return;
+label_error:
+ opt_thp = init_system_thp_mode = thp_mode_not_supported;
+}
+
+bool
+pages_boot(void) {
+ os_page = os_page_detect();
+ if (os_page > PAGE) {
+ malloc_write("<jemalloc>: Unsupported system page size\n");
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+
+#ifndef _WIN32
+ mmap_flags = MAP_PRIVATE | MAP_ANON;
+#endif
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+ os_overcommits = os_overcommits_sysctl();
+#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
+ os_overcommits = os_overcommits_proc();
+# ifdef MAP_NORESERVE
+ if (os_overcommits) {
+ mmap_flags |= MAP_NORESERVE;
+ }
+# endif
+#else
+ os_overcommits = false;
+#endif
+
+ init_thp_state();
+
+#ifdef __FreeBSD__
+ /*
+ * FreeBSD doesn't need the check; madvise(2) is known to work.
+ */
+#else
+ /* Detect lazy purge runtime support. */
+ if (pages_can_purge_lazy) {
+ bool committed = false;
+ void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
+ if (madv_free_page == NULL) {
+ return true;
+ }
+ assert(pages_can_purge_lazy_runtime);
+ if (pages_purge_lazy(madv_free_page, PAGE)) {
+ pages_can_purge_lazy_runtime = false;
+ }
+ os_pages_unmap(madv_free_page, PAGE);
+ }
+#endif
+
+ return false;
+}
diff --git a/contrib/libs/jemalloc/src/prng.c b/contrib/libs/jemalloc/src/prng.c
index 83c04bf9b5..913a947c21 100644
--- a/contrib/libs/jemalloc/src/prng.c
+++ b/contrib/libs/jemalloc/src/prng.c
@@ -1,3 +1,3 @@
-#define JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
+#define JEMALLOC_PRNG_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/libs/jemalloc/src/prof.c b/contrib/libs/jemalloc/src/prof.c
index da834b54fb..d692c30c44 100644
--- a/contrib/libs/jemalloc/src/prof.c
+++ b/contrib/libs/jemalloc/src/prof.c
@@ -1,30 +1,30 @@
-#define JEMALLOC_PROF_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/emitter.h"
-
+#define JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/emitter.h"
+
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
-#define UNW_LOCAL_ONLY
+#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
-/*
- * We have a circular dependency -- jemalloc_internal.h tells us if we should
- * use libgcc's unwinding functionality, but after we've included that, we've
- * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
- */
-#undef _Unwind_Backtrace
+/*
+ * We have a circular dependency -- jemalloc_internal.h tells us if we should
+ * use libgcc's unwinding functionality, but after we've included that, we've
+ * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ */
+#undef _Unwind_Backtrace
#include <unwind.h>
-#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
+#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#endif
/******************************************************************************/
@@ -32,14 +32,14 @@
bool opt_prof = false;
bool opt_prof_active = true;
-bool opt_prof_thread_active_init = true;
+bool opt_prof_thread_active_init = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
-bool opt_prof_final = false;
+bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
-bool opt_prof_log = false;
+bool opt_prof_log = false;
char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
@@ -47,161 +47,161 @@ char opt_prof_prefix[
#endif
1];
-/*
- * Initialized as opt_prof_active, and accessed via
- * prof_active_[gs]et{_unlocked,}().
- */
-bool prof_active;
-static malloc_mutex_t prof_active_mtx;
-
-/*
- * Initialized as opt_prof_thread_active_init, and accessed via
- * prof_thread_active_init_[gs]et().
- */
-static bool prof_thread_active_init;
-static malloc_mutex_t prof_thread_active_init_mtx;
-
-/*
- * Initialized as opt_prof_gdump, and accessed via
- * prof_gdump_[gs]et{_unlocked,}().
- */
-bool prof_gdump_val;
-static malloc_mutex_t prof_gdump_mtx;
-
+/*
+ * Initialized as opt_prof_active, and accessed via
+ * prof_active_[gs]et{_unlocked,}().
+ */
+bool prof_active;
+static malloc_mutex_t prof_active_mtx;
+
+/*
+ * Initialized as opt_prof_thread_active_init, and accessed via
+ * prof_thread_active_init_[gs]et().
+ */
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
+
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
uint64_t prof_interval = 0;
-size_t lg_prof_sample;
-
-typedef enum prof_logging_state_e prof_logging_state_t;
-enum prof_logging_state_e {
- prof_logging_state_stopped,
- prof_logging_state_started,
- prof_logging_state_dumping
-};
-
+size_t lg_prof_sample;
+
+typedef enum prof_logging_state_e prof_logging_state_t;
+enum prof_logging_state_e {
+ prof_logging_state_stopped,
+ prof_logging_state_started,
+ prof_logging_state_dumping
+};
+
/*
- * - stopped: log_start never called, or previous log_stop has completed.
- * - started: log_start called, log_stop not called yet. Allocations are logged.
- * - dumping: log_stop called but not finished; samples are not logged anymore.
- */
-prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
-
-#ifdef JEMALLOC_JET
-static bool prof_log_dummy = false;
-#endif
-
-/* Incremented for every log file that is output. */
-static uint64_t log_seq = 0;
-static char log_filename[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
-
-/* Timestamp for most recent call to log_start(). */
-static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER;
-
-/* Increment these when adding to the log_bt and log_thr linked lists. */
-static size_t log_bt_index = 0;
-static size_t log_thr_index = 0;
-
-/* Linked list node definitions. These are only used in prof.c. */
-typedef struct prof_bt_node_s prof_bt_node_t;
-
-struct prof_bt_node_s {
- prof_bt_node_t *next;
- size_t index;
- prof_bt_t bt;
- /* Variable size backtrace vector pointed to by bt. */
- void *vec[1];
-};
-
-typedef struct prof_thr_node_s prof_thr_node_t;
-
-struct prof_thr_node_s {
- prof_thr_node_t *next;
- size_t index;
- uint64_t thr_uid;
- /* Variable size based on thr_name_sz. */
- char name[1];
-};
-
-typedef struct prof_alloc_node_s prof_alloc_node_t;
-
-/* This is output when logging sampled allocations. */
-struct prof_alloc_node_s {
- prof_alloc_node_t *next;
- /* Indices into an array of thread data. */
- size_t alloc_thr_ind;
- size_t free_thr_ind;
-
- /* Indices into an array of backtraces. */
- size_t alloc_bt_ind;
- size_t free_bt_ind;
-
- uint64_t alloc_time_ns;
- uint64_t free_time_ns;
-
- size_t usize;
-};
-
-/*
- * Created on the first call to prof_log_start and deleted on prof_log_stop.
- * These are the backtraces and threads that have already been logged by an
- * allocation.
- */
-static bool log_tables_initialized = false;
-static ckh_t log_bt_node_set;
-static ckh_t log_thr_node_set;
-
-/* Store linked lists for logged data. */
-static prof_bt_node_t *log_bt_first = NULL;
-static prof_bt_node_t *log_bt_last = NULL;
-static prof_thr_node_t *log_thr_first = NULL;
-static prof_thr_node_t *log_thr_last = NULL;
-static prof_alloc_node_t *log_alloc_first = NULL;
-static prof_alloc_node_t *log_alloc_last = NULL;
-
-/* Protects the prof_logging_state and any log_{...} variable. */
-static malloc_mutex_t log_mtx;
-
-/*
- * Table of mutexes that are shared among gctx's. These are leaf locks, so
- * there is no problem with using them for more than one gctx at the same time.
- * The primary motivation for this sharing though is that gctx's are ephemeral,
+ * - stopped: log_start never called, or previous log_stop has completed.
+ * - started: log_start called, log_stop not called yet. Allocations are logged.
+ * - dumping: log_stop called but not finished; samples are not logged anymore.
+ */
+prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
+
+#ifdef JEMALLOC_JET
+static bool prof_log_dummy = false;
+#endif
+
+/* Incremented for every log file that is output. */
+static uint64_t log_seq = 0;
+static char log_filename[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Timestamp for most recent call to log_start(). */
+static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER;
+
+/* Increment these when adding to the log_bt and log_thr linked lists. */
+static size_t log_bt_index = 0;
+static size_t log_thr_index = 0;
+
+/* Linked list node definitions. These are only used in prof.c. */
+typedef struct prof_bt_node_s prof_bt_node_t;
+
+struct prof_bt_node_s {
+ prof_bt_node_t *next;
+ size_t index;
+ prof_bt_t bt;
+ /* Variable size backtrace vector pointed to by bt. */
+ void *vec[1];
+};
+
+typedef struct prof_thr_node_s prof_thr_node_t;
+
+struct prof_thr_node_s {
+ prof_thr_node_t *next;
+ size_t index;
+ uint64_t thr_uid;
+ /* Variable size based on thr_name_sz. */
+ char name[1];
+};
+
+typedef struct prof_alloc_node_s prof_alloc_node_t;
+
+/* This is output when logging sampled allocations. */
+struct prof_alloc_node_s {
+ prof_alloc_node_t *next;
+ /* Indices into an array of thread data. */
+ size_t alloc_thr_ind;
+ size_t free_thr_ind;
+
+ /* Indices into an array of backtraces. */
+ size_t alloc_bt_ind;
+ size_t free_bt_ind;
+
+ uint64_t alloc_time_ns;
+ uint64_t free_time_ns;
+
+ size_t usize;
+};
+
+/*
+ * Created on the first call to prof_log_start and deleted on prof_log_stop.
+ * These are the backtraces and threads that have already been logged by an
+ * allocation.
+ */
+static bool log_tables_initialized = false;
+static ckh_t log_bt_node_set;
+static ckh_t log_thr_node_set;
+
+/* Store linked lists for logged data. */
+static prof_bt_node_t *log_bt_first = NULL;
+static prof_bt_node_t *log_bt_last = NULL;
+static prof_thr_node_t *log_thr_first = NULL;
+static prof_thr_node_t *log_thr_last = NULL;
+static prof_alloc_node_t *log_alloc_first = NULL;
+static prof_alloc_node_t *log_alloc_last = NULL;
+
+/* Protects the prof_logging_state and any log_{...} variable. */
+static malloc_mutex_t log_mtx;
+
+/*
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
* and destroying mutexes causes complications for systems that allocate when
* creating/destroying mutexes.
*/
-static malloc_mutex_t *gctx_locks;
-static atomic_u_t cum_gctxs; /* Atomic counter. */
+static malloc_mutex_t *gctx_locks;
+static atomic_u_t cum_gctxs; /* Atomic counter. */
/*
- * Table of mutexes that are shared among tdata's. No operations require
- * holding multiple tdata locks, so there is no problem with using them for more
- * than one tdata at the same time, even though a gctx lock may be acquired
- * while holding a tdata lock.
- */
-static malloc_mutex_t *tdata_locks;
-
-/*
- * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+static malloc_mutex_t *tdata_locks;
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
* structure that knows about all backtraces currently captured.
*/
-static ckh_t bt2gctx;
-/* Non static to enable profiling. */
-malloc_mutex_t bt2gctx_mtx;
-
-/*
- * Tree of all extant prof_tdata_t structures, regardless of state,
- * {attached,detached,expired}.
- */
-static prof_tdata_tree_t tdatas;
-static malloc_mutex_t tdatas_mtx;
-
-static uint64_t next_thr_uid;
-static malloc_mutex_t next_thr_uid_mtx;
-
+static ckh_t bt2gctx;
+/* Non static to enable profiling. */
+malloc_mutex_t bt2gctx_mtx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+static malloc_mutex_t tdatas_mtx;
+
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
+
static malloc_mutex_t prof_dump_seq_mtx;
static uint64_t prof_dump_seq;
static uint64_t prof_dump_iseq;
@@ -221,371 +221,371 @@ static char prof_dump_buf[
1
#endif
];
-static size_t prof_dump_buf_end;
+static size_t prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false;
/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
-static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached);
-static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached);
-static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
-
-/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
-static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
-static bool prof_thr_node_keycomp(const void *k1, const void *k2);
-static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
-static bool prof_bt_node_keycomp(const void *k1, const void *k2);
-
-/******************************************************************************/
-/* Red-black trees. */
-
-static int
-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
- uint64_t a_thr_uid = a->thr_uid;
- uint64_t b_thr_uid = b->thr_uid;
- int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
- if (ret == 0) {
- uint64_t a_thr_discrim = a->thr_discrim;
- uint64_t b_thr_discrim = b->thr_discrim;
- ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
- b_thr_discrim);
- if (ret == 0) {
- uint64_t a_tctx_uid = a->tctx_uid;
- uint64_t b_tctx_uid = b->tctx_uid;
- ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
- b_tctx_uid);
- }
- }
- return ret;
-}
-
-rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
- tctx_link, prof_tctx_comp)
-
-static int
-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
- unsigned a_len = a->bt.len;
- unsigned b_len = b->bt.len;
- unsigned comp_len = (a_len < b_len) ? a_len : b_len;
- int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
- if (ret == 0) {
- ret = (a_len > b_len) - (a_len < b_len);
- }
- return ret;
-}
-
-rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
- prof_gctx_comp)
-
-static int
-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
- int ret;
- uint64_t a_uid = a->thr_uid;
- uint64_t b_uid = b->thr_uid;
-
- ret = ((a_uid > b_uid) - (a_uid < b_uid));
- if (ret == 0) {
- uint64_t a_discrim = a->thr_discrim;
- uint64_t b_discrim = b->thr_discrim;
-
- ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
- }
- return ret;
-}
-
-rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
- prof_tdata_comp)
-
-/******************************************************************************/
-
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
+static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached);
+static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached);
+static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+
+/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
+static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_thr_node_keycomp(const void *k1, const void *k2);
+static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_bt_node_keycomp(const void *k1, const void *k2);
+
+/******************************************************************************/
+/* Red-black trees. */
+
+static int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+static int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0) {
+ ret = (a_len > b_len) - (a_len < b_len);
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+static int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
+
void
-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
- prof_tdata_t *tdata;
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (updated) {
- /*
- * Compute a new sample threshold. This isn't very important in
- * practice, because this function is rarely executed, so the
- * potential for sample bias is minimal except in contrived
- * programs.
- */
- tdata = prof_tdata_get(tsd, true);
- if (tdata != NULL) {
- prof_sample_threshold_update(tdata);
- }
- }
-
- if ((uintptr_t)tctx > (uintptr_t)1U) {
- malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
- tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
- prof_tctx_destroy(tsd, tctx);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
- }
- }
-}
-
-void
-prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx) {
- prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
-
- /* Get the current time and set this in the extent_t. We'll read this
- * when free() is called. */
- nstime_t t = NSTIME_ZERO_INITIALIZER;
- nstime_update(&t);
- prof_alloc_time_set(tsdn, ptr, NULL, t);
-
- malloc_mutex_lock(tsdn, tctx->tdata->lock);
- tctx->cnts.curobjs++;
- tctx->cnts.curbytes += usize;
- if (opt_prof_accum) {
- tctx->cnts.accumobjs++;
- tctx->cnts.accumbytes += usize;
- }
- tctx->prepared = false;
- malloc_mutex_unlock(tsdn, tctx->tdata->lock);
-}
-
-static size_t
-prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
- assert(prof_logging_state == prof_logging_state_started);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
-
- prof_bt_node_t dummy_node;
- dummy_node.bt = *bt;
- prof_bt_node_t *node;
-
- /* See if this backtrace is already cached in the table. */
- if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
- (void **)(&node), NULL)) {
- size_t sz = offsetof(prof_bt_node_t, vec) +
- (bt->len * sizeof(void *));
- prof_bt_node_t *new_node = (prof_bt_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
- true, arena_get(TSDN_NULL, 0, true), true);
- if (log_bt_first == NULL) {
- log_bt_first = new_node;
- log_bt_last = new_node;
- } else {
- log_bt_last->next = new_node;
- log_bt_last = new_node;
- }
-
- new_node->next = NULL;
- new_node->index = log_bt_index;
- /*
- * Copy the backtrace: bt is inside a tdata or gctx, which
- * might die before prof_log_stop is called.
- */
- new_node->bt.len = bt->len;
- memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
- new_node->bt.vec = new_node->vec;
-
- log_bt_index++;
- ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
- return new_node->index;
- } else {
- return node->index;
- }
-}
-static size_t
-prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
- assert(prof_logging_state == prof_logging_state_started);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
-
- prof_thr_node_t dummy_node;
- dummy_node.thr_uid = thr_uid;
- prof_thr_node_t *node;
-
- /* See if this thread is already cached in the table. */
- if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
- (void **)(&node), NULL)) {
- size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
- prof_thr_node_t *new_node = (prof_thr_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
- true, arena_get(TSDN_NULL, 0, true), true);
- if (log_thr_first == NULL) {
- log_thr_first = new_node;
- log_thr_last = new_node;
- } else {
- log_thr_last->next = new_node;
- log_thr_last = new_node;
- }
-
- new_node->next = NULL;
- new_node->index = log_thr_index;
- new_node->thr_uid = thr_uid;
- strcpy(new_node->name, name);
-
- log_thr_index++;
- ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
- return new_node->index;
- } else {
- return node->index;
- }
-}
-
+ if (updated) {
+ /*
+ * Compute a new sample threshold. This isn't very important in
+ * practice, because this function is rarely executed, so the
+ * potential for sample bias is minimal except in contrived
+ * programs.
+ */
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata != NULL) {
+ prof_sample_threshold_update(tdata);
+ }
+ }
+
+ if ((uintptr_t)tctx > (uintptr_t)1U) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ tctx->prepared = false;
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
+ prof_tctx_destroy(tsd, tctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+ }
+}
+
+void
+prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx) {
+ prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
+
+ /* Get the current time and set this in the extent_t. We'll read this
+ * when free() is called. */
+ nstime_t t = NSTIME_ZERO_INITIALIZER;
+ nstime_update(&t);
+ prof_alloc_time_set(tsdn, ptr, NULL, t);
+
+ malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ tctx->cnts.curobjs++;
+ tctx->cnts.curbytes += usize;
+ if (opt_prof_accum) {
+ tctx->cnts.accumobjs++;
+ tctx->cnts.accumbytes += usize;
+ }
+ tctx->prepared = false;
+ malloc_mutex_unlock(tsdn, tctx->tdata->lock);
+}
+
+static size_t
+prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_bt_node_t dummy_node;
+ dummy_node.bt = *bt;
+ prof_bt_node_t *node;
+
+ /* See if this backtrace is already cached in the table. */
+ if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_bt_node_t, vec) +
+ (bt->len * sizeof(void *));
+ prof_bt_node_t *new_node = (prof_bt_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_bt_first == NULL) {
+ log_bt_first = new_node;
+ log_bt_last = new_node;
+ } else {
+ log_bt_last->next = new_node;
+ log_bt_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_bt_index;
+ /*
+ * Copy the backtrace: bt is inside a tdata or gctx, which
+ * might die before prof_log_stop is called.
+ */
+ new_node->bt.len = bt->len;
+ memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
+ new_node->bt.vec = new_node->vec;
+
+ log_bt_index++;
+ ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+static size_t
+prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_thr_node_t dummy_node;
+ dummy_node.thr_uid = thr_uid;
+ prof_thr_node_t *node;
+
+ /* See if this thread is already cached in the table. */
+ if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
+ prof_thr_node_t *new_node = (prof_thr_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_thr_first == NULL) {
+ log_thr_first = new_node;
+ log_thr_last = new_node;
+ } else {
+ log_thr_last->next = new_node;
+ log_thr_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_thr_index;
+ new_node->thr_uid = thr_uid;
+ strcpy(new_node->name, name);
+
+ log_thr_index++;
+ ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+
static void
-prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
- if (cons_tdata == NULL) {
- /*
- * We decide not to log these allocations. cons_tdata will be
- * NULL only when the current thread is in a weird state (e.g.
- * it's being destroyed).
- */
- return;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
-
- if (prof_logging_state != prof_logging_state_started) {
- goto label_done;
- }
-
- if (!log_tables_initialized) {
- bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
- prof_bt_node_hash, prof_bt_node_keycomp);
- bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
- prof_thr_node_hash, prof_thr_node_keycomp);
- if (err1 || err2) {
- goto label_done;
- }
- log_tables_initialized = true;
- }
-
- nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr,
- (alloc_ctx_t *)NULL);
- nstime_t free_time = NSTIME_ZERO_INITIALIZER;
- nstime_update(&free_time);
-
- size_t sz = sizeof(prof_alloc_node_t);
- prof_alloc_node_t *new_node = (prof_alloc_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
-
- const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
- "" : tctx->tdata->thread_name;
- const char *cons_thr_name = prof_thread_name_get(tsd);
-
- prof_bt_t bt;
- /* Initialize the backtrace, using the buffer in tdata to store it. */
- bt_init(&bt, cons_tdata->vec);
- prof_backtrace(&bt);
- prof_bt_t *cons_bt = &bt;
-
- /* We haven't destroyed tctx yet, so gctx should be good to read. */
- prof_bt_t *prod_bt = &tctx->gctx->bt;
-
- new_node->next = NULL;
- new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
- prod_thr_name);
- new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
- cons_thr_name);
- new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
- new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
- new_node->alloc_time_ns = nstime_ns(&alloc_time);
- new_node->free_time_ns = nstime_ns(&free_time);
- new_node->usize = usize;
-
- if (log_alloc_first == NULL) {
- log_alloc_first = new_node;
- log_alloc_last = new_node;
- } else {
- log_alloc_last->next = new_node;
- log_alloc_last = new_node;
- }
-
-label_done:
- malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
-}
-
-void
-prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx) {
- malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
-
- assert(tctx->cnts.curobjs > 0);
- assert(tctx->cnts.curbytes >= usize);
- tctx->cnts.curobjs--;
- tctx->cnts.curbytes -= usize;
-
- prof_try_log(tsd, ptr, usize, tctx);
-
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
- prof_tctx_destroy(tsd, tctx);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
- }
-}
-
-void
-bt_init(prof_bt_t *bt, void **vec) {
+prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
+ if (cons_tdata == NULL) {
+ /*
+ * We decide not to log these allocations. cons_tdata will be
+ * NULL only when the current thread is in a weird state (e.g.
+ * it's being destroyed).
+ */
+ return;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ goto label_done;
+ }
+
+ if (!log_tables_initialized) {
+ bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
+ prof_bt_node_hash, prof_bt_node_keycomp);
+ bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
+ prof_thr_node_hash, prof_thr_node_keycomp);
+ if (err1 || err2) {
+ goto label_done;
+ }
+ log_tables_initialized = true;
+ }
+
+ nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr,
+ (alloc_ctx_t *)NULL);
+ nstime_t free_time = NSTIME_ZERO_INITIALIZER;
+ nstime_update(&free_time);
+
+ size_t sz = sizeof(prof_alloc_node_t);
+ prof_alloc_node_t *new_node = (prof_alloc_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+
+ const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
+ "" : tctx->tdata->thread_name;
+ const char *cons_thr_name = prof_thread_name_get(tsd);
+
+ prof_bt_t bt;
+ /* Initialize the backtrace, using the buffer in tdata to store it. */
+ bt_init(&bt, cons_tdata->vec);
+ prof_backtrace(&bt);
+ prof_bt_t *cons_bt = &bt;
+
+ /* We haven't destroyed tctx yet, so gctx should be good to read. */
+ prof_bt_t *prod_bt = &tctx->gctx->bt;
+
+ new_node->next = NULL;
+ new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
+ prod_thr_name);
+ new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
+ cons_thr_name);
+ new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
+ new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
+ new_node->alloc_time_ns = nstime_ns(&alloc_time);
+ new_node->free_time_ns = nstime_ns(&free_time);
+ new_node->usize = usize;
+
+ if (log_alloc_first == NULL) {
+ log_alloc_first = new_node;
+ log_alloc_last = new_node;
+ } else {
+ log_alloc_last->next = new_node;
+ log_alloc_last = new_node;
+ }
+
+label_done:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
+}
+
+void
+prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
+ prof_tctx_t *tctx) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs > 0);
+ assert(tctx->cnts.curbytes >= usize);
+ tctx->cnts.curobjs--;
+ tctx->cnts.curbytes -= usize;
+
+ prof_try_log(tsd, ptr, usize, tctx);
+
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
+ prof_tctx_destroy(tsd, tctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+}
+
+void
+bt_init(prof_bt_t *bt, void **vec) {
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
+}
+
+static void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
- bt->vec = vec;
- bt->len = 0;
-}
-
-static void
-prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
- cassert(config_prof);
- assert(tdata == prof_tdata_get(tsd, false));
-
- if (tdata != NULL) {
- assert(!tdata->enq);
- tdata->enq = true;
- }
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
}
-static void
-prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
+static void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
- assert(tdata == prof_tdata_get(tsd, false));
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ assert(tdata == prof_tdata_get(tsd, false));
- if (tdata != NULL) {
- bool idump, gdump;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
- assert(tdata->enq);
- tdata->enq = false;
- idump = tdata->enq_idump;
- tdata->enq_idump = false;
- gdump = tdata->enq_gdump;
- tdata->enq_gdump = false;
+ if (tdata != NULL) {
+ bool idump, gdump;
- if (idump) {
- prof_idump(tsd_tsdn(tsd));
- }
- if (gdump) {
- prof_gdump(tsd_tsdn(tsd));
- }
- }
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
+
+ if (idump) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+ if (gdump) {
+ prof_gdump(tsd_tsdn(tsd));
+ }
+ }
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt) {
+prof_backtrace(prof_bt_t *bt) {
unw_context_t uc;
unw_cursor_t cursor;
unsigned i;
@@ -612,35 +612,35 @@ prof_backtrace(prof_bt_t *bt) {
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
cassert(config_prof);
- return _URC_NO_REASON;
+ return _URC_NO_REASON;
}
static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
+prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
- void *ip;
+ void *ip;
cassert(config_prof);
- ip = (void *)_Unwind_GetIP(context);
- if (ip == NULL) {
- return _URC_END_OF_STACK;
- }
- data->bt->vec[data->bt->len] = ip;
- data->bt->len++;
- if (data->bt->len == data->max) {
- return _URC_END_OF_STACK;
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL) {
+ return _URC_END_OF_STACK;
}
+ data->bt->vec[data->bt->len] = ip;
+ data->bt->len++;
+ if (data->bt->len == data->max) {
+ return _URC_END_OF_STACK;
+ }
- return _URC_NO_REASON;
+ return _URC_NO_REASON;
}
void
-prof_backtrace(prof_bt_t *bt) {
- prof_unwind_data_t data = {bt, PROF_BT_MAX};
+prof_backtrace(prof_bt_t *bt) {
+ prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
@@ -648,22 +648,22 @@ prof_backtrace(prof_bt_t *bt) {
}
#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt) {
-#define BT_FRAME(i) \
- if ((i) < PROF_BT_MAX) { \
+prof_backtrace(prof_bt_t *bt) {
+#define BT_FRAME(i) \
+ if ((i) < PROF_BT_MAX) { \
void *p; \
- if (__builtin_frame_address(i) == 0) { \
+ if (__builtin_frame_address(i) == 0) { \
return; \
- } \
+ } \
p = __builtin_return_address(i); \
- if (p == NULL) { \
+ if (p == NULL) { \
return; \
} \
- bt->vec[(i)] = p; \
- bt->len = (i) + 1; \
- } else { \
- return; \
- }
+ bt->vec[(i)] = p; \
+ bt->len = (i) + 1; \
+ } else { \
+ return; \
+ }
cassert(config_prof);
@@ -811,473 +811,473 @@ prof_backtrace(prof_bt_t *bt) {
}
#else
void
-prof_backtrace(prof_bt_t *bt) {
+prof_backtrace(prof_bt_t *bt) {
cassert(config_prof);
not_reached();
}
#endif
static malloc_mutex_t *
-prof_gctx_mutex_choose(void) {
- unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
+prof_gctx_mutex_choose(void) {
+ unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
- return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
+ return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
}
-static malloc_mutex_t *
-prof_tdata_mutex_choose(uint64_t thr_uid) {
- return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
-}
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid) {
+ return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
+}
-static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
- /*
- * Create a single allocation that has space for vec of length bt->len.
- */
- size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
- prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
- sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
- true);
- if (gctx == NULL) {
- return NULL;
- }
- gctx->lock = prof_gctx_mutex_choose();
+static prof_gctx_t *
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
/*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
+ if (gctx == NULL) {
+ return NULL;
+ }
+ gctx->lock = prof_gctx_mutex_choose();
+ /*
* Set nlimbo to 1, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_try_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- gctx->nlimbo = 1;
- tctx_tree_new(&gctx->tctxs);
- /* Duplicate bt. */
- memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
- gctx->bt.vec = gctx->vec;
- gctx->bt.len = bt->len;
- return gctx;
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return gctx;
}
static void
-prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
- prof_tdata_t *tdata) {
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata) {
cassert(config_prof);
/*
- * Check that gctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_tctx_destroy() in order to
- * avoid a race between the main body of prof_tctx_destroy() and entry
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
* into this function.
*/
- prof_enter(tsd, tdata_self);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- assert(gctx->nlimbo != 0);
- if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
- /* Remove gctx from bt2gctx. */
- if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
not_reached();
- }
- prof_leave(tsd, tdata_self);
- /* Destroy gctx. */
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
+ }
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
} else {
/*
- * Compensate for increment in prof_tctx_destroy() or
+ * Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
- gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- prof_leave(tsd, tdata_self);
- }
-}
-
-static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- if (opt_prof_accum) {
- return false;
- }
- if (tctx->cnts.curobjs != 0) {
- return false;
- }
- if (tctx->prepared) {
- return false;
- }
- return true;
-}
-
-static bool
-prof_gctx_should_destroy(prof_gctx_t *gctx) {
- if (opt_prof_accum) {
- return false;
- }
- if (!tctx_tree_empty(&gctx->tctxs)) {
- return false;
- }
- if (gctx->nlimbo != 0) {
- return false;
- }
- return true;
-}
-
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_leave(tsd, tdata_self);
+ }
+}
+
+static bool
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (tctx->cnts.curobjs != 0) {
+ return false;
+ }
+ if (tctx->prepared) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx) {
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (!tctx_tree_empty(&gctx->tctxs)) {
+ return false;
+ }
+ if (gctx->nlimbo != 0) {
+ return false;
+ }
+ return true;
+}
+
static void
-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
- prof_tdata_t *tdata = tctx->tdata;
- prof_gctx_t *gctx = tctx->gctx;
- bool destroy_tdata, destroy_tctx, destroy_gctx;
-
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- assert(tctx->cnts.curobjs == 0);
- assert(tctx->cnts.curbytes == 0);
- assert(!opt_prof_accum);
- assert(tctx->cnts.accumobjs == 0);
- assert(tctx->cnts.accumbytes == 0);
-
- ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- tctx_tree_remove(&gctx->tctxs, tctx);
- destroy_tctx = true;
- if (prof_gctx_should_destroy(gctx)) {
- /*
- * Increment gctx->nlimbo in order to keep another
- * thread from winning the race to destroy gctx while
- * this one has gctx->lock dropped. Without this, it
- * would be possible for another thread to:
- *
- * 1) Sample an allocation associated with gctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_gctx_try_destroy(gctx).
- *
- * The result would be that gctx no longer exists by the
- * time this thread accesses it in
- * prof_gctx_try_destroy().
- */
- gctx->nlimbo++;
- destroy_gctx = true;
- } else {
- destroy_gctx = false;
- }
- break;
- case prof_tctx_state_dumping:
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ prof_tdata_t *tdata = tctx->tdata;
+ prof_gctx_t *gctx = tctx->gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
+
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else {
+ destroy_gctx = false;
+ }
+ break;
+ case prof_tctx_state_dumping:
/*
- * A dumping thread needs tctx to remain valid until dumping
- * has finished. Change state such that the dumping thread will
- * complete destruction during a late dump iteration phase.
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
*/
- tctx->state = prof_tctx_state_purgatory;
- destroy_tctx = false;
- destroy_gctx = false;
- break;
- default:
- not_reached();
- destroy_tctx = false;
- destroy_gctx = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- if (destroy_gctx) {
- prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
- tdata);
- }
-
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- if (destroy_tdata) {
- prof_tdata_destroy(tsd, tdata, false);
- }
-
- if (destroy_tctx) {
- idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
- }
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
+
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, false);
+ }
+
+ if (destroy_tctx) {
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
+ }
}
static bool
-prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
- void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
union {
- prof_gctx_t *p;
+ prof_gctx_t *p;
void *v;
- } gctx, tgctx;
+ } gctx, tgctx;
union {
prof_bt_t *p;
void *v;
} btkey;
- bool new_gctx;
+ bool new_gctx;
- prof_enter(tsd, tdata);
- if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- prof_leave(tsd, tdata);
- tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
- if (tgctx.v == NULL) {
- return true;
- }
- prof_enter(tsd, tdata);
- if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
- gctx.p = tgctx.p;
- btkey.p = &gctx.p->bt;
- if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
- /* OOM. */
- prof_leave(tsd, tdata);
- idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
- true, true);
- return true;
- }
- new_gctx = true;
- } else {
- new_gctx = false;
+ prof_leave(tsd, tdata);
+ tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (tgctx.v == NULL) {
+ return true;
+ }
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ gctx.p = tgctx.p;
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
+ true, true);
+ return true;
+ }
+ new_gctx = true;
+ } else {
+ new_gctx = false;
}
} else {
- tgctx.v = NULL;
- new_gctx = false;
- }
-
- if (!new_gctx) {
+ tgctx.v = NULL;
+ new_gctx = false;
+ }
+
+ if (!new_gctx) {
/*
* Increment nlimbo, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_try_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
- gctx.p->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
- new_gctx = false;
-
- if (tgctx.v != NULL) {
- /* Lost race to insert. */
- idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
- true);
- }
- }
- prof_leave(tsd, tdata);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ new_gctx = false;
+
+ if (tgctx.v != NULL) {
+ /* Lost race to insert. */
+ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
+ true);
+ }
+ }
+ prof_leave(tsd, tdata);
*p_btkey = btkey.v;
- *p_gctx = gctx.p;
- *p_new_gctx = new_gctx;
- return false;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
+ return false;
}
-prof_tctx_t *
-prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
union {
- prof_tctx_t *p;
+ prof_tctx_t *p;
void *v;
} ret;
- prof_tdata_t *tdata;
- bool not_found;
+ prof_tdata_t *tdata;
+ bool not_found;
cassert(config_prof);
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- return NULL;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
- if (!not_found) { /* Note double negative! */
- ret.p->prepared = true;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (not_found) {
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ return NULL;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) { /* Note double negative! */
+ ret.p->prepared = true;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (not_found) {
void *btkey;
- prof_gctx_t *gctx;
- bool new_gctx, error;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
- &new_gctx)) {
- return NULL;
- }
-
- /* Link a prof_tctx_t into gctx for this thread. */
- ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
- sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
- arena_ichoose(tsd, NULL), true);
- if (ret.p == NULL) {
- if (new_gctx) {
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx)) {
+ return NULL;
+ }
+
+ /* Link a prof_tctx_t into gctx for this thread. */
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd, NULL), true);
+ if (ret.p == NULL) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
}
- return NULL;
+ return NULL;
}
- ret.p->tdata = tdata;
- ret.p->thr_uid = tdata->thr_uid;
- ret.p->thr_discrim = tdata->thr_discrim;
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- ret.p->gctx = gctx;
- ret.p->tctx_uid = tdata->tctx_uid_next++;
- ret.p->prepared = true;
- ret.p->state = prof_tctx_state_initializing;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (error) {
- if (new_gctx) {
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- }
- idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
- return NULL;
- }
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- ret.p->state = prof_tctx_state_nominal;
- tctx_tree_insert(&gctx->tctxs, ret.p);
- gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- }
-
- return ret.p;
-}
-
-/*
- * The bodies of this function and prof_leakcheck() are compiled out unless heap
- * profiling is enabled, so that it is possible to compile jemalloc with
- * floating point support completely disabled. Avoiding floating point code is
- * important on memory-constrained systems, but it also enables a workaround for
- * versions of glibc that don't properly save/restore floating point registers
- * during dynamic lazy symbol loading (which internally calls into whatever
- * malloc implementation happens to be integrated into the application). Note
- * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
- * memory moves, so jemalloc must be compiled with such optimizations disabled
- * (e.g.
- * -mno-sse) in order for the workaround to be complete.
- */
-void
-prof_sample_threshold_update(prof_tdata_t *tdata) {
-#ifdef JEMALLOC_PROF
- if (!config_prof) {
- return;
- }
-
- if (lg_prof_sample == 0) {
- tsd_bytes_until_sample_set(tsd_fetch(), 0);
- return;
- }
-
- /*
- * Compute sample interval as a geometrically distributed random
- * variable with mean (2^lg_prof_sample).
- *
- * __ __
- * | log(u) | 1
- * tdata->bytes_until_sample = | -------- |, where p = ---------------
- * | log(1-p) | lg_prof_sample
- * 2
- *
- * For more information on the math, see:
- *
- * Non-Uniform Random Variate Generation
- * Luc Devroye
- * Springer-Verlag, New York, 1986
- * pp 500
- * (http://luc.devroye.org/rnbookindex.html)
- */
- uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
- double u = (double)r * (1.0/9007199254740992.0L);
- uint64_t bytes_until_sample = (uint64_t)(log(u) /
- log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
- + (uint64_t)1U;
- if (bytes_until_sample > SSIZE_MAX) {
- bytes_until_sample = SSIZE_MAX;
- }
- tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample);
-
-#endif
-}
-
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (error) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ }
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
+ return NULL;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+
+ return ret.p;
+}
+
+/*
+ * The bodies of this function and prof_leakcheck() are compiled out unless heap
+ * profiling is enabled, so that it is possible to compile jemalloc with
+ * floating point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a workaround for
+ * versions of glibc that don't properly save/restore floating point registers
+ * during dynamic lazy symbol loading (which internally calls into whatever
+ * malloc implementation happens to be integrated into the application). Note
+ * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
+ * memory moves, so jemalloc must be compiled with such optimizations disabled
+ * (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+void
+prof_sample_threshold_update(prof_tdata_t *tdata) {
+#ifdef JEMALLOC_PROF
+ if (!config_prof) {
+ return;
+ }
+
+ if (lg_prof_sample == 0) {
+ tsd_bytes_until_sample_set(tsd_fetch(), 0);
+ return;
+ }
+
+ /*
+ * Compute sample interval as a geometrically distributed random
+ * variable with mean (2^lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * tdata->bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://luc.devroye.org/rnbookindex.html)
+ */
+ uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
+ double u = (double)r * (1.0/9007199254740992.0L);
+ uint64_t bytes_until_sample = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ + (uint64_t)1U;
+ if (bytes_until_sample > SSIZE_MAX) {
+ bytes_until_sample = SSIZE_MAX;
+ }
+ tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample);
+
+#endif
+}
+
#ifdef JEMALLOC_JET
-static prof_tdata_t *
-prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- size_t *tdata_count = (size_t *)arg;
-
- (*tdata_count)++;
-
- return NULL;
-}
-
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return NULL;
+}
+
size_t
-prof_tdata_count(void) {
- size_t tdata_count = 0;
- tsdn_t *tsdn;
-
- tsdn = tsdn_fetch();
- malloc_mutex_lock(tsdn, &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
- (void *)&tdata_count);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
-
- return tdata_count;
-}
-
-size_t
-prof_bt_count(void) {
+prof_tdata_count(void) {
+ size_t tdata_count = 0;
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+
+ return tdata_count;
+}
+
+size_t
+prof_bt_count(void) {
size_t bt_count;
- tsd_t *tsd;
- prof_tdata_t *tdata;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
- tsd = tsd_fetch();
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- return 0;
- }
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ return 0;
+ }
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
- bt_count = ckh_count(&bt2gctx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
- return bt_count;
+ return bt_count;
}
#endif
static int
-prof_dump_open_impl(bool propagate_err, const char *filename) {
+prof_dump_open_impl(bool propagate_err, const char *filename) {
int fd;
fd = creat(filename, 0644);
- if (fd == -1 && !propagate_err) {
+ if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
- if (opt_abort) {
+ if (opt_abort) {
abort();
- }
+ }
}
- return fd;
+ return fd;
}
-prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
+prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
static bool
-prof_dump_flush(bool propagate_err) {
+prof_dump_flush(bool propagate_err) {
bool ret = false;
ssize_t err;
cassert(config_prof);
- err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
+ err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
- if (!propagate_err) {
+ if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
- if (opt_abort) {
+ if (opt_abort) {
abort();
- }
+ }
}
ret = true;
}
prof_dump_buf_end = 0;
- return ret;
+ return ret;
}
static bool
-prof_dump_close(bool propagate_err) {
+prof_dump_close(bool propagate_err) {
bool ret;
assert(prof_dump_fd != -1);
@@ -1285,12 +1285,12 @@ prof_dump_close(bool propagate_err) {
close(prof_dump_fd);
prof_dump_fd = -1;
- return ret;
+ return ret;
}
static bool
-prof_dump_write(bool propagate_err, const char *s) {
- size_t i, slen, n;
+prof_dump_write(bool propagate_err, const char *s) {
+ size_t i, slen, n;
cassert(config_prof);
@@ -1298,13 +1298,13 @@ prof_dump_write(bool propagate_err, const char *s) {
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
- if (prof_dump_flush(propagate_err) && propagate_err) {
- return true;
- }
- }
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ if (prof_dump_flush(propagate_err) && propagate_err) {
+ return true;
+ }
+ }
- if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) {
+ if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) {
/* Finish writing. */
n = slen - i;
} else {
@@ -1315,14 +1315,14 @@ prof_dump_write(bool propagate_err, const char *s) {
prof_dump_buf_end += n;
i += n;
}
- assert(i == slen);
+ assert(i == slen);
- return false;
+ return false;
}
-JEMALLOC_FORMAT_PRINTF(2, 3)
+JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
-prof_dump_printf(bool propagate_err, const char *format, ...) {
+prof_dump_printf(bool propagate_err, const char *format, ...) {
bool ret;
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
@@ -1332,408 +1332,408 @@ prof_dump_printf(bool propagate_err, const char *format, ...) {
va_end(ap);
ret = prof_dump_write(propagate_err, buf);
- return ret;
-}
-
-static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- malloc_mutex_lock(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_initializing:
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
- return;
- case prof_tctx_state_nominal:
- tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
-
- memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
-
- tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- tdata->cnt_summed.accumobjs +=
- tctx->dump_cnts.accumobjs;
- tdata->cnt_summed.accumbytes +=
- tctx->dump_cnts.accumbytes;
- }
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- not_reached();
- }
-}
-
-static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
- gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
- gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
- }
-}
-
-static prof_tctx_t *
-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- /* New since dumping started; ignore. */
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
- break;
- default:
- not_reached();
- }
-
- return NULL;
-}
-
-struct prof_tctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
-static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
- struct prof_tctx_dump_iter_arg_s *arg =
- (struct prof_tctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_initializing:
- case prof_tctx_state_nominal:
- /* Not captured by this dump. */
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- if (prof_dump_printf(arg->propagate_err,
- " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
- "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
- tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
- tctx->dump_cnts.accumbytes)) {
- return tctx;
- }
- break;
- default:
- not_reached();
- }
- return NULL;
-}
-
-static prof_tctx_t *
-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
- prof_tctx_t *ret;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- /* New since dumping started; ignore. */
- break;
- case prof_tctx_state_dumping:
- tctx->state = prof_tctx_state_nominal;
- break;
- case prof_tctx_state_purgatory:
- ret = tctx;
- goto label_return;
- default:
- not_reached();
- }
-
- ret = NULL;
-label_return:
- return ret;
+ return ret;
}
static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
+ }
+}
+
+static void
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ }
+}
+
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return NULL;
+}
+
+struct prof_tctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
+ struct prof_tctx_dump_iter_arg_s *arg =
+ (struct prof_tctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ if (prof_dump_printf(arg->propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+ "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+ tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+ tctx->dump_cnts.accumbytes)) {
+ return tctx;
+ }
+ break;
+ default:
+ not_reached();
+ }
+ return NULL;
+}
+
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+ prof_tctx_t *ret;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return ret;
+}
+
+static void
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
cassert(config_prof);
- malloc_mutex_lock(tsdn, gctx->lock);
+ malloc_mutex_lock(tsdn, gctx->lock);
/*
- * Increment nlimbo so that gctx won't go away before dump.
- * Additionally, link gctx into the dump list so that it is included in
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
* prof_dump()'s second pass.
*/
- gctx->nlimbo++;
- gctx_tree_insert(gctxs, gctx);
-
- memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
-
- malloc_mutex_unlock(tsdn, gctx->lock);
-}
-
-struct prof_gctx_merge_iter_arg_s {
- tsdn_t *tsdn;
- size_t leak_ngctx;
-};
-
-static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
- struct prof_gctx_merge_iter_arg_s *arg =
- (struct prof_gctx_merge_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, gctx->lock);
- tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
- (void *)arg->tsdn);
- if (gctx->cnt_summed.curobjs != 0) {
- arg->leak_ngctx++;
- }
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
-
- return NULL;
-}
-
-static void
-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
- prof_tdata_t *tdata = prof_tdata_get(tsd, false);
- prof_gctx_t *gctx;
-
- /*
- * Standard tree iteration won't work here, because as soon as we
- * decrement gctx->nlimbo and unlock gctx, another thread can
- * concurrently destroy it, which will corrupt the tree. Therefore,
- * tear down the tree one node at a time during iteration.
- */
- while ((gctx = gctx_tree_first(gctxs)) != NULL) {
- gctx_tree_remove(gctxs, gctx);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- {
- prof_tctx_t *next;
-
- next = NULL;
- do {
- prof_tctx_t *to_destroy =
- tctx_tree_iter(&gctx->tctxs, next,
- prof_tctx_finish_iter,
- (void *)tsd_tsdn(tsd));
- if (to_destroy != NULL) {
- next = tctx_tree_next(&gctx->tctxs,
- to_destroy);
- tctx_tree_remove(&gctx->tctxs,
- to_destroy);
- idalloctm(tsd_tsdn(tsd), to_destroy,
- NULL, NULL, true, true);
- } else {
- next = NULL;
- }
- } while (next != NULL);
- }
- gctx->nlimbo--;
- if (prof_gctx_should_destroy(gctx)) {
- gctx->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- }
- }
-}
-
-struct prof_tdata_merge_iter_arg_s {
- tsdn_t *tsdn;
- prof_cnt_t cnt_all;
-};
-
-static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque) {
- struct prof_tdata_merge_iter_arg_s *arg =
- (struct prof_tdata_merge_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, tdata->lock);
- if (!tdata->expired) {
- size_t tabind;
- union {
- prof_tctx_t *p;
- void *v;
- } tctx;
-
- tdata->dumping = true;
- memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
- for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
- &tctx.v);) {
- prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
- }
-
- arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
- arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
- if (opt_prof_accum) {
- arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
- arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
- }
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(tsdn, gctx->lock);
+}
+
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t leak_ngctx;
+};
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ struct prof_gctx_merge_iter_arg_s *arg =
+ (struct prof_gctx_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
+ if (gctx->cnt_summed.curobjs != 0) {
+ arg->leak_ngctx++;
+ }
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+
+ return NULL;
+}
+
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
+
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, NULL, true, true);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+ }
+}
+
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t cnt_all;
+};
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *opaque) {
+ struct prof_tdata_merge_iter_arg_s *arg =
+ (struct prof_tdata_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);) {
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ }
+
+ arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
+ }
} else {
- tdata->dumping = false;
- }
- malloc_mutex_unlock(arg->tsdn, tdata->lock);
-
- return NULL;
-}
-
-static prof_tdata_t *
-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- bool propagate_err = *(bool *)arg;
-
- if (!tdata->dumping) {
- return NULL;
- }
-
- if (prof_dump_printf(propagate_err,
- " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
- tdata->thr_uid, tdata->cnt_summed.curobjs,
- tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
- tdata->cnt_summed.accumbytes,
- (tdata->thread_name != NULL) ? " " : "",
- (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
- return tdata;
- }
- return NULL;
-}
-
-static bool
-prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
- const prof_cnt_t *cnt_all) {
- bool ret;
-
- if (prof_dump_printf(propagate_err,
- "heap_v2/%"FMTu64"\n"
- " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
- ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
- cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
- return true;
- }
-
- malloc_mutex_lock(tsdn, &tdatas_mtx);
- ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
- (void *)&propagate_err) != NULL);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
- return ret;
-}
-prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
+ tdata->dumping = false;
+ }
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
+
+ return NULL;
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
+ bool propagate_err = *(bool *)arg;
+
+ if (!tdata->dumping) {
+ return NULL;
+ }
+
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
+ tdata->thr_uid, tdata->cnt_summed.curobjs,
+ tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
+ tdata->cnt_summed.accumbytes,
+ (tdata->thread_name != NULL) ? " " : "",
+ (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
+ return tdata;
+ }
+ return NULL;
+}
+
+static bool
+prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
+ const prof_cnt_t *cnt_all) {
+ bool ret;
+
+ if (prof_dump_printf(propagate_err,
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
+ return true;
+ }
+
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
+ (void *)&propagate_err) != NULL);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ return ret;
+}
+prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
static bool
-prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
+prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
bool ret;
unsigned i;
- struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
+ struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof);
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
- /* Avoid dumping such gctx's that have no useful data. */
- if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
- assert(gctx->cnt_summed.curobjs == 0);
- assert(gctx->cnt_summed.curbytes == 0);
- assert(gctx->cnt_summed.accumobjs == 0);
- assert(gctx->cnt_summed.accumbytes == 0);
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
ret = false;
goto label_return;
}
- if (prof_dump_printf(propagate_err, "@")) {
+ if (prof_dump_printf(propagate_err, "@")) {
ret = true;
goto label_return;
}
for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
(uintptr_t)bt->vec[i])) {
ret = true;
goto label_return;
}
}
- if (prof_dump_printf(propagate_err,
- "\n"
- " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
- gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
- gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
- ret = true;
- goto label_return;
- }
-
- prof_tctx_dump_iter_arg.tsdn = tsdn;
- prof_tctx_dump_iter_arg.propagate_err = propagate_err;
- if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
- (void *)&prof_tctx_dump_iter_arg) != NULL) {
+ if (prof_dump_printf(propagate_err,
+ "\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
+ gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
ret = true;
goto label_return;
}
+ prof_tctx_dump_iter_arg.tsdn = tsdn;
+ prof_tctx_dump_iter_arg.propagate_err = propagate_err;
+ if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
+ (void *)&prof_tctx_dump_iter_arg) != NULL) {
+ ret = true;
+ goto label_return;
+ }
+
ret = false;
label_return:
- return ret;
-}
-
-#ifndef _WIN32
-JEMALLOC_FORMAT_PRINTF(1, 2)
-static int
-prof_open_maps(const char *format, ...) {
- int mfd;
- va_list ap;
- char filename[PATH_MAX + 1];
-
- va_start(ap, format);
- malloc_vsnprintf(filename, sizeof(filename), format, ap);
- va_end(ap);
-
-#if defined(O_CLOEXEC)
- mfd = open(filename, O_RDONLY | O_CLOEXEC);
-#else
- mfd = open(filename, O_RDONLY);
- if (mfd != -1) {
- fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
- }
-#endif
-
- return mfd;
-}
-#endif
-
-static int
-prof_getpid(void) {
-#ifdef _WIN32
- return GetCurrentProcessId();
-#else
- return getpid();
-#endif
-}
-
+ return ret;
+}
+
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...) {
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+
+#if defined(O_CLOEXEC)
+ mfd = open(filename, O_RDONLY | O_CLOEXEC);
+#else
+ mfd = open(filename, O_RDONLY);
+ if (mfd != -1) {
+ fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
+ }
+#endif
+
+ return mfd;
+}
+#endif
+
+static int
+prof_getpid(void) {
+#ifdef _WIN32
+ return GetCurrentProcessId();
+#else
+ return getpid();
+#endif
+}
+
static bool
-prof_dump_maps(bool propagate_err) {
+prof_dump_maps(bool propagate_err) {
bool ret;
int mfd;
cassert(config_prof);
#ifdef __FreeBSD__
- mfd = prof_open_maps("/proc/curproc/map");
-#elif defined(_WIN32)
- mfd = -1; // Not implemented
+ mfd = prof_open_maps("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
#else
- {
- int pid = prof_getpid();
-
- mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
- if (mfd == -1) {
- mfd = prof_open_maps("/proc/%d/maps", pid);
- }
- }
+ {
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1) {
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
+ }
#endif
if (mfd != -1) {
ssize_t nread;
@@ -1754,9 +1754,9 @@ prof_dump_maps(bool propagate_err) {
goto label_return;
}
}
- nread = malloc_read_fd(mfd,
- &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
- - prof_dump_buf_end);
+ nread = malloc_read_fd(mfd,
+ &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
+ - prof_dump_buf_end);
} while (nread > 0);
} else {
ret = true;
@@ -1765,391 +1765,391 @@ prof_dump_maps(bool propagate_err) {
ret = false;
label_return:
- if (mfd != -1) {
+ if (mfd != -1) {
close(mfd);
- }
- return ret;
+ }
+ return ret;
}
-/*
- * See prof_sample_threshold_update() comment for why the body of this function
- * is conditionally compiled.
- */
+/*
+ * See prof_sample_threshold_update() comment for why the body of this function
+ * is conditionally compiled.
+ */
static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
- const char *filename) {
-#ifdef JEMALLOC_PROF
- /*
- * Scaling is equivalent AdjustSamples() in jeprof, but the result may
- * differ slightly from what jeprof reports, because here we scale the
- * summary values, whereas jeprof scales each context individually and
- * reports the sums of the scaled values.
- */
- if (cnt_all->curbytes != 0) {
- double sample_period = (double)((uint64_t)1 << lg_prof_sample);
- double ratio = (((double)cnt_all->curbytes) /
- (double)cnt_all->curobjs) / sample_period;
- double scale_factor = 1.0 / (1.0 - exp(-ratio));
- uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
- * scale_factor);
- uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
- scale_factor);
-
- malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
- " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
- curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
- 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
+ const char *filename) {
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
+ if (cnt_all->curbytes != 0) {
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
- "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
-#endif
-}
-
-struct prof_gctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
-static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
- prof_gctx_t *ret;
- struct prof_gctx_dump_iter_arg_s *arg =
- (struct prof_gctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, gctx->lock);
-
- if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
- gctxs)) {
- ret = gctx;
- goto label_return;
- }
-
- ret = NULL;
-label_return:
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
- return ret;
-}
-
-static void
-prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
- struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
- struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- prof_gctx_tree_t *gctxs) {
+#endif
+}
+
+struct prof_gctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ prof_gctx_t *ret;
+ struct prof_gctx_dump_iter_arg_s *arg =
+ (struct prof_gctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+
+ if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
+ gctxs)) {
+ ret = gctx;
+ goto label_return;
+ }
+
+ ret = NULL;
+label_return:
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ return ret;
+}
+
+static void
+prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
+ struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
+ struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
+ prof_gctx_tree_t *gctxs) {
size_t tabind;
union {
- prof_gctx_t *p;
+ prof_gctx_t *p;
void *v;
- } gctx;
-
- prof_enter(tsd, tdata);
-
- /*
- * Put gctx's in limbo and clear their counters in preparation for
- * summing.
- */
- gctx_tree_new(gctxs);
- for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
- prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
- }
-
- /*
- * Iterate over tdatas, and for the non-expired ones snapshot their tctx
- * stats and merge them into the associated gctx's.
- */
- prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
- memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
- (void *)prof_tdata_merge_iter_arg);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
- /* Merge tctx stats into gctx's. */
- prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
- prof_gctx_merge_iter_arg->leak_ngctx = 0;
- gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
- (void *)prof_gctx_merge_iter_arg);
-
- prof_leave(tsd, tdata);
-}
-
-static bool
-prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
- bool leakcheck, prof_tdata_t *tdata,
- struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
- struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
- prof_gctx_tree_t *gctxs) {
+ } gctx;
+
+ prof_enter(tsd, tdata);
+
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
+ }
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
+ prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
+ memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ (void *)prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
+ prof_gctx_merge_iter_arg->leak_ngctx = 0;
+ gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
+ (void *)prof_gctx_merge_iter_arg);
+
+ prof_leave(tsd, tdata);
+}
+
+static bool
+prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck, prof_tdata_t *tdata,
+ struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
+ struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
+ struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
+ prof_gctx_tree_t *gctxs) {
/* Create dump file. */
- if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
- return true;
- }
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
+ return true;
+ }
/* Dump profile header. */
- if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
- &prof_tdata_merge_iter_arg->cnt_all)) {
+ if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
+ &prof_tdata_merge_iter_arg->cnt_all)) {
goto label_write_error;
- }
+ }
- /* Dump per gctx profile stats. */
- prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
- prof_gctx_dump_iter_arg->propagate_err = propagate_err;
- if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
- (void *)prof_gctx_dump_iter_arg) != NULL) {
- goto label_write_error;
+ /* Dump per gctx profile stats. */
+ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
+ prof_gctx_dump_iter_arg->propagate_err = propagate_err;
+ if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
+ (void *)prof_gctx_dump_iter_arg) != NULL) {
+ goto label_write_error;
}
/* Dump /proc/<pid>/maps if possible. */
- if (prof_dump_maps(propagate_err)) {
+ if (prof_dump_maps(propagate_err)) {
goto label_write_error;
- }
+ }
- if (prof_dump_close(propagate_err)) {
- return true;
- }
+ if (prof_dump_close(propagate_err)) {
+ return true;
+ }
- return false;
+ return false;
label_write_error:
prof_dump_close(propagate_err);
- return true;
-}
-
-static bool
-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
- bool leakcheck) {
- cassert(config_prof);
- assert(tsd_reentrancy_level_get(tsd) == 0);
-
- prof_tdata_t * tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return true;
- }
-
- pre_reentrancy(tsd, NULL);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
-
- prof_gctx_tree_t gctxs;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
- prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
- &prof_gctx_merge_iter_arg, &gctxs);
- bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
- &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
- &prof_gctx_dump_iter_arg, &gctxs);
- prof_gctx_finish(tsd, &gctxs);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
- post_reentrancy(tsd);
-
- if (err) {
- return true;
- }
-
- if (leakcheck) {
- prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
- prof_gctx_merge_iter_arg.leak_ngctx, filename);
- }
- return false;
-}
-
-#ifdef JEMALLOC_JET
-void
-prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
- uint64_t *accumbytes) {
- tsd_t *tsd;
- prof_tdata_t *tdata;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- prof_gctx_tree_t gctxs;
-
- tsd = tsd_fetch();
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- if (curobjs != NULL) {
- *curobjs = 0;
- }
- if (curbytes != NULL) {
- *curbytes = 0;
- }
- if (accumobjs != NULL) {
- *accumobjs = 0;
- }
- if (accumbytes != NULL) {
- *accumbytes = 0;
- }
- return;
- }
-
- prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
- &prof_gctx_merge_iter_arg, &gctxs);
- prof_gctx_finish(tsd, &gctxs);
-
- if (curobjs != NULL) {
- *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
- }
- if (curbytes != NULL) {
- *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
- }
- if (accumobjs != NULL) {
- *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
- }
- if (accumbytes != NULL) {
- *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
- }
-}
-#endif
-
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
-#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+ return true;
+}
+
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck) {
+ cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t * tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
+
+ pre_reentrancy(tsd, NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+
+ prof_gctx_tree_t gctxs;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
+ &prof_gctx_merge_iter_arg, &gctxs);
+ bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
+ &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
+ &prof_gctx_dump_iter_arg, &gctxs);
+ prof_gctx_finish(tsd, &gctxs);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ post_reentrancy(tsd);
+
+ if (err) {
+ return true;
+ }
+
+ if (leakcheck) {
+ prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
+ prof_gctx_merge_iter_arg.leak_ngctx, filename);
+ }
+ return false;
+}
+
+#ifdef JEMALLOC_JET
+void
+prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
+ uint64_t *accumbytes) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ prof_gctx_tree_t gctxs;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ if (curobjs != NULL) {
+ *curobjs = 0;
+ }
+ if (curbytes != NULL) {
+ *curbytes = 0;
+ }
+ if (accumobjs != NULL) {
+ *accumobjs = 0;
+ }
+ if (accumbytes != NULL) {
+ *accumbytes = 0;
+ }
+ return;
+ }
+
+ prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
+ &prof_gctx_merge_iter_arg, &gctxs);
+ prof_gctx_finish(tsd, &gctxs);
+
+ if (curobjs != NULL) {
+ *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
+ }
+ if (curbytes != NULL) {
+ *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
+ }
+ if (accumobjs != NULL) {
+ *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
+ }
+ if (accumbytes != NULL) {
+ *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
+ }
+}
+#endif
+
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
-prof_dump_filename(char *filename, char v, uint64_t vseq) {
+prof_dump_filename(char *filename, char v, uint64_t vseq) {
cassert(config_prof);
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"FMTu64".%c%"FMTu64".heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"FMTu64".%c.heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ "%s.%d.%"FMTu64".%c.heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
static void
-prof_fdump(void) {
- tsd_t *tsd;
+prof_fdump(void) {
+ tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
- assert(opt_prof_final);
- assert(opt_prof_prefix[0] != '\0');
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
- if (!prof_booted) {
+ if (!prof_booted) {
return;
- }
- tsd = tsd_fetch();
- assert(tsd_reentrancy_level_get(tsd) == 0);
-
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, opt_prof_leak);
-}
-
-bool
-prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
- cassert(config_prof);
-
-#ifndef JEMALLOC_ATOMIC_U64
- if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
- WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
- return true;
- }
- prof_accum->accumbytes = 0;
-#else
- atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
-#endif
- return false;
+ }
+ tsd = tsd_fetch();
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
+}
+
+bool
+prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
+ cassert(config_prof);
+
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
+ WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ prof_accum->accumbytes = 0;
+#else
+ atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
+#endif
+ return false;
}
void
-prof_idump(tsdn_t *tsdn) {
- tsd_t *tsd;
- prof_tdata_t *tdata;
+prof_idump(tsdn_t *tsdn) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
+ if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
return;
- }
- tsd = tsdn_tsd(tsdn);
- if (tsd_reentrancy_level_get(tsd) > 0) {
+ }
+ tsd = tsdn_tsd(tsdn);
+ if (tsd_reentrancy_level_get(tsd) > 0) {
return;
- }
-
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- return;
- }
- if (tdata->enq) {
- tdata->enq_idump = true;
+ }
+
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
return;
}
+ if (tdata->enq) {
+ tdata->enq_idump = true;
+ return;
+ }
if (opt_prof_prefix[0] != '\0') {
- char filename[PATH_MAX + 1];
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ char filename[PATH_MAX + 1];
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
}
}
bool
-prof_mdump(tsd_t *tsd, const char *filename) {
+prof_mdump(tsd_t *tsd, const char *filename) {
cassert(config_prof);
- assert(tsd_reentrancy_level_get(tsd) == 0);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
- if (!opt_prof || !prof_booted) {
- return true;
- }
- char filename_buf[DUMP_FILENAME_BUFSIZE];
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0') {
- return true;
- }
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ if (opt_prof_prefix[0] == '\0') {
+ return true;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
filename = filename_buf;
}
- return prof_dump(tsd, true, filename, false);
+ return prof_dump(tsd, true, filename, false);
}
void
-prof_gdump(tsdn_t *tsdn) {
- tsd_t *tsd;
- prof_tdata_t *tdata;
+prof_gdump(tsdn_t *tsdn) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
+ if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
return;
- }
- tsd = tsdn_tsd(tsdn);
- if (tsd_reentrancy_level_get(tsd) > 0) {
- return;
- }
-
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
+ }
+ tsd = tsdn_tsd(tsdn);
+ if (tsd_reentrancy_level_get(tsd) > 0) {
return;
- }
- if (tdata->enq) {
- tdata->enq_gdump = true;
+ }
+
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
return;
}
+ if (tdata->enq) {
+ tdata->enq_gdump = true;
+ return;
+ }
if (opt_prof_prefix[0] != '\0') {
- char filename[DUMP_FILENAME_BUFSIZE];
- malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
+ char filename[DUMP_FILENAME_BUFSIZE];
+ malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
- malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, false);
+ malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
}
}
static void
-prof_bt_hash(const void *key, size_t r_hash[2]) {
+prof_bt_hash(const void *key, size_t r_hash[2]) {
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
@@ -2158,778 +2158,778 @@ prof_bt_hash(const void *key, size_t r_hash[2]) {
}
static bool
-prof_bt_keycomp(const void *k1, const void *k2) {
+prof_bt_keycomp(const void *k1, const void *k2) {
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
cassert(config_prof);
- if (bt1->len != bt2->len) {
- return false;
- }
+ if (bt1->len != bt2->len) {
+ return false;
+ }
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-static void
-prof_bt_node_hash(const void *key, size_t r_hash[2]) {
- const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
- prof_bt_hash((void *)(&bt_node->bt), r_hash);
-}
-
-static bool
-prof_bt_node_keycomp(const void *k1, const void *k2) {
- const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
- const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
- return prof_bt_keycomp((void *)(&bt_node1->bt),
- (void *)(&bt_node2->bt));
-}
-
-static void
-prof_thr_node_hash(const void *key, size_t r_hash[2]) {
- const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
- hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
-}
-
-static bool
-prof_thr_node_keycomp(const void *k1, const void *k2) {
- const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
- const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
- return thr_node1->thr_uid == thr_node2->thr_uid;
-}
-
-static uint64_t
-prof_thr_uid_alloc(tsdn_t *tsdn) {
- uint64_t thr_uid;
-
- malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
- thr_uid = next_thr_uid;
- next_thr_uid++;
- malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
-
- return thr_uid;
-}
-
-static prof_tdata_t *
-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
- char *thread_name, bool active) {
- prof_tdata_t *tdata;
-
+static void
+prof_bt_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
+ prof_bt_hash((void *)(&bt_node->bt), r_hash);
+}
+
+static bool
+prof_bt_node_keycomp(const void *k1, const void *k2) {
+ const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
+ const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
+ return prof_bt_keycomp((void *)(&bt_node1->bt),
+ (void *)(&bt_node2->bt));
+}
+
+static void
+prof_thr_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
+ hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
+}
+
+static bool
+prof_thr_node_keycomp(const void *k1, const void *k2) {
+ const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
+ const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
+ return thr_node1->thr_uid == thr_node2->thr_uid;
+}
+
+static uint64_t
+prof_thr_uid_alloc(tsdn_t *tsdn) {
+ uint64_t thr_uid;
+
+ malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
+ thr_uid = next_thr_uid;
+ next_thr_uid++;
+ malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
+
+ return thr_uid;
+}
+
+static prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active) {
+ prof_tdata_t *tdata;
+
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
- sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
- if (tdata == NULL) {
- return NULL;
- }
-
- tdata->lock = prof_tdata_mutex_choose(thr_uid);
- tdata->thr_uid = thr_uid;
- tdata->thr_discrim = thr_discrim;
- tdata->thread_name = thread_name;
- tdata->attached = true;
- tdata->expired = false;
- tdata->tctx_uid_next = 0;
-
- if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
- return NULL;
- }
-
- tdata->prng_state = (uint64_t)(uintptr_t)tdata;
- prof_sample_threshold_update(tdata);
-
- tdata->enq = false;
- tdata->enq_idump = false;
- tdata->enq_gdump = false;
-
- tdata->dumping = false;
- tdata->active = active;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_insert(&tdatas, tdata);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
- return tdata;
-}
-
-prof_tdata_t *
-prof_tdata_init(tsd_t *tsd) {
- return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
- NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
-}
-
-static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
- if (tdata->attached && !even_if_attached) {
- return false;
- }
- if (ckh_count(&tdata->bt2tctx) != 0) {
- return false;
- }
- return true;
-}
-
-static bool
-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached) {
- malloc_mutex_assert_owner(tsdn, tdata->lock);
-
- return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
-}
-
-static void
-prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
-
- tdata_tree_remove(&tdatas, tdata);
-
- assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-
- if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
- true);
- }
- ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
-}
-
-static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-}
-
-static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
- bool destroy_tdata;
-
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- if (tdata->attached) {
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
- true);
+ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
+ sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (tdata == NULL) {
+ return NULL;
+ }
+
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+ return NULL;
+ }
+
+ tdata->prng_state = (uint64_t)(uintptr_t)tdata;
+ prof_sample_threshold_update(tdata);
+
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
+
+ tdata->dumping = false;
+ tdata->active = active;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ return tdata;
+}
+
+prof_tdata_t *
+prof_tdata_init(tsd_t *tsd) {
+ return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
+ NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
+}
+
+static bool
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
+ if (tdata->attached && !even_if_attached) {
+ return false;
+ }
+ if (ckh_count(&tdata->bt2tctx) != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
+
+ return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
+}
+
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ }
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+}
+
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
/*
- * Only detach if !destroy_tdata, because detaching would allow
- * another thread to win the race to destroy tdata.
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
*/
- if (!destroy_tdata) {
- tdata->attached = false;
- }
- tsd_prof_tdata_set(tsd, NULL);
- } else {
- destroy_tdata = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (destroy_tdata) {
- prof_tdata_destroy(tsd, tdata, true);
- }
-}
-
-prof_tdata_t *
-prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
- uint64_t thr_uid = tdata->thr_uid;
- uint64_t thr_discrim = tdata->thr_discrim + 1;
- char *thread_name = (tdata->thread_name != NULL) ?
- prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
- bool active = tdata->active;
-
- prof_tdata_detach(tsd, tdata);
- return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
- active);
-}
-
-static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
- bool destroy_tdata;
-
- malloc_mutex_lock(tsdn, tdata->lock);
- if (!tdata->expired) {
- tdata->expired = true;
- destroy_tdata = tdata->attached ? false :
- prof_tdata_should_destroy(tsdn, tdata, false);
- } else {
- destroy_tdata = false;
- }
- malloc_mutex_unlock(tsdn, tdata->lock);
-
- return destroy_tdata;
-}
-
-static prof_tdata_t *
-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
-}
-
-void
-prof_reset(tsd_t *tsd, size_t lg_sample) {
- prof_tdata_t *next;
-
- assert(lg_sample < (sizeof(uint64_t) << 3));
-
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-
- lg_prof_sample = lg_sample;
-
- next = NULL;
- do {
- prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
- prof_tdata_reset_iter, (void *)tsd);
- if (to_destroy != NULL) {
- next = tdata_tree_next(&tdatas, to_destroy);
- prof_tdata_destroy_locked(tsd, to_destroy, false);
- } else {
- next = NULL;
- }
- } while (next != NULL);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
-}
-
-void
-prof_tdata_cleanup(tsd_t *tsd) {
- prof_tdata_t *tdata;
-
- if (!config_prof) {
- return;
- }
-
- tdata = tsd_prof_tdata_get(tsd);
- if (tdata != NULL) {
- prof_tdata_detach(tsd, tdata);
- }
-}
-
-bool
-prof_active_get(tsdn_t *tsdn) {
- bool prof_active_current;
-
- malloc_mutex_lock(tsdn, &prof_active_mtx);
- prof_active_current = prof_active;
- malloc_mutex_unlock(tsdn, &prof_active_mtx);
- return prof_active_current;
-}
-
-bool
-prof_active_set(tsdn_t *tsdn, bool active) {
- bool prof_active_old;
-
- malloc_mutex_lock(tsdn, &prof_active_mtx);
- prof_active_old = prof_active;
- prof_active = active;
- malloc_mutex_unlock(tsdn, &prof_active_mtx);
- return prof_active_old;
-}
-
-#ifdef JEMALLOC_JET
-size_t
-prof_log_bt_count(void) {
- size_t cnt = 0;
- prof_bt_node_t *node = log_bt_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-size_t
-prof_log_alloc_count(void) {
- size_t cnt = 0;
- prof_alloc_node_t *node = log_alloc_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-size_t
-prof_log_thr_count(void) {
- size_t cnt = 0;
- prof_thr_node_t *node = log_thr_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-bool
-prof_log_is_logging(void) {
- return prof_logging_state == prof_logging_state_started;
-}
-
-bool
-prof_log_rep_check(void) {
- if (prof_logging_state == prof_logging_state_stopped
- && log_tables_initialized) {
- return true;
- }
-
- if (log_bt_last != NULL && log_bt_last->next != NULL) {
- return true;
- }
- if (log_thr_last != NULL && log_thr_last->next != NULL) {
- return true;
- }
- if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
- return true;
- }
-
- size_t bt_count = prof_log_bt_count();
- size_t thr_count = prof_log_thr_count();
- size_t alloc_count = prof_log_alloc_count();
-
-
- if (prof_logging_state == prof_logging_state_stopped) {
- if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
- return true;
- }
- }
-
- prof_alloc_node_t *node = log_alloc_first;
- while (node != NULL) {
- if (node->alloc_bt_ind >= bt_count) {
- return true;
- }
- if (node->free_bt_ind >= bt_count) {
- return true;
- }
- if (node->alloc_thr_ind >= thr_count) {
- return true;
- }
- if (node->free_thr_ind >= thr_count) {
- return true;
- }
- if (node->alloc_time_ns > node->free_time_ns) {
- return true;
- }
- node = node->next;
- }
-
- return false;
-}
-
-void
-prof_log_dummy_set(bool new_value) {
- prof_log_dummy = new_value;
-}
-#endif
-
-bool
-prof_log_start(tsdn_t *tsdn, const char *filename) {
- if (!opt_prof || !prof_booted) {
- return true;
- }
-
- bool ret = false;
- size_t buf_size = PATH_MAX + 1;
-
- malloc_mutex_lock(tsdn, &log_mtx);
-
- if (prof_logging_state != prof_logging_state_stopped) {
- ret = true;
- } else if (filename == NULL) {
- /* Make default name. */
- malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json",
- opt_prof_prefix, prof_getpid(), log_seq);
- log_seq++;
- prof_logging_state = prof_logging_state_started;
- } else if (strlen(filename) >= buf_size) {
- ret = true;
- } else {
- strcpy(log_filename, filename);
- prof_logging_state = prof_logging_state_started;
- }
-
- if (!ret) {
- nstime_update(&log_start_timestamp);
- }
-
- malloc_mutex_unlock(tsdn, &log_mtx);
-
- return ret;
-}
-
-/* Used as an atexit function to stop logging on exit. */
-static void
-prof_log_stop_final(void) {
- tsd_t *tsd = tsd_fetch();
- prof_log_stop(tsd_tsdn(tsd));
-}
-
-struct prof_emitter_cb_arg_s {
- int fd;
- ssize_t ret;
-};
-
-static void
-prof_emitter_write_cb(void *opaque, const char *to_write) {
- struct prof_emitter_cb_arg_s *arg =
- (struct prof_emitter_cb_arg_s *)opaque;
- size_t bytes = strlen(to_write);
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- return;
- }
-#endif
- arg->ret = write(arg->fd, (void *)to_write, bytes);
-}
-
-/*
- * prof_log_emit_{...} goes through the appropriate linked list, emitting each
- * node to the json and deallocating it.
- */
-static void
-prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "threads");
- prof_thr_node_t *thr_node = log_thr_first;
- prof_thr_node_t *thr_old_node;
- while (thr_node != NULL) {
- emitter_json_object_begin(emitter);
-
- emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
- &thr_node->thr_uid);
-
- char *thr_name = thr_node->name;
-
- emitter_json_kv(emitter, "thr_name", emitter_type_string,
- &thr_name);
-
- emitter_json_object_end(emitter);
- thr_old_node = thr_node;
- thr_node = thr_node->next;
- idalloc(tsd, thr_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "stack_traces");
- prof_bt_node_t *bt_node = log_bt_first;
- prof_bt_node_t *bt_old_node;
- /*
- * Calculate how many hex digits we need: twice number of bytes, two for
- * "0x", and then one more for terminating '\0'.
- */
- char buf[2 * sizeof(intptr_t) + 3];
- size_t buf_sz = sizeof(buf);
- while (bt_node != NULL) {
- emitter_json_array_begin(emitter);
- size_t i;
- for (i = 0; i < bt_node->bt.len; i++) {
- malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
- char *trace_str = buf;
- emitter_json_value(emitter, emitter_type_string,
- &trace_str);
- }
- emitter_json_array_end(emitter);
-
- bt_old_node = bt_node;
- bt_node = bt_node->next;
- idalloc(tsd, bt_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "allocations");
- prof_alloc_node_t *alloc_node = log_alloc_first;
- prof_alloc_node_t *alloc_old_node;
- while (alloc_node != NULL) {
- emitter_json_object_begin(emitter);
-
- emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
- &alloc_node->alloc_thr_ind);
-
- emitter_json_kv(emitter, "free_thread", emitter_type_size,
- &alloc_node->free_thr_ind);
-
- emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
- &alloc_node->alloc_bt_ind);
-
- emitter_json_kv(emitter, "free_trace", emitter_type_size,
- &alloc_node->free_bt_ind);
-
- emitter_json_kv(emitter, "alloc_timestamp",
- emitter_type_uint64, &alloc_node->alloc_time_ns);
-
- emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
- &alloc_node->free_time_ns);
-
- emitter_json_kv(emitter, "usize", emitter_type_uint64,
- &alloc_node->usize);
-
- emitter_json_object_end(emitter);
-
- alloc_old_node = alloc_node;
- alloc_node = alloc_node->next;
- idalloc(tsd, alloc_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_metadata(emitter_t *emitter) {
- emitter_json_object_kv_begin(emitter, "info");
-
- nstime_t now = NSTIME_ZERO_INITIALIZER;
-
- nstime_update(&now);
- uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
- emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
-
- char *vers = JEMALLOC_VERSION;
- emitter_json_kv(emitter, "version",
- emitter_type_string, &vers);
-
- emitter_json_kv(emitter, "lg_sample_rate",
- emitter_type_int, &lg_prof_sample);
-
- int pid = prof_getpid();
- emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
-
- emitter_json_object_end(emitter);
-}
-
-
-bool
-prof_log_stop(tsdn_t *tsdn) {
- if (!opt_prof || !prof_booted) {
- return true;
- }
-
- tsd_t *tsd = tsdn_tsd(tsdn);
- malloc_mutex_lock(tsdn, &log_mtx);
-
- if (prof_logging_state != prof_logging_state_started) {
- malloc_mutex_unlock(tsdn, &log_mtx);
- return true;
- }
-
- /*
- * Set the state to dumping. We'll set it to stopped when we're done.
- * Since other threads won't be able to start/stop/log when the state is
- * dumping, we don't have to hold the lock during the whole method.
- */
- prof_logging_state = prof_logging_state_dumping;
- malloc_mutex_unlock(tsdn, &log_mtx);
-
-
- emitter_t emitter;
-
- /* Create a file. */
-
- int fd;
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- fd = 0;
- } else {
- fd = creat(log_filename, 0644);
- }
-#else
- fd = creat(log_filename, 0644);
-#endif
-
- if (fd == -1) {
- malloc_printf("<jemalloc>: creat() for log file \"%s\" "
- " failed with %d\n", log_filename, errno);
- if (opt_abort) {
- abort();
- }
- return true;
- }
-
- /* Emit to json. */
- struct prof_emitter_cb_arg_s arg;
- arg.fd = fd;
- emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb,
- (void *)(&arg));
-
- emitter_begin(&emitter);
- prof_log_emit_metadata(&emitter);
- prof_log_emit_threads(tsd, &emitter);
- prof_log_emit_traces(tsd, &emitter);
- prof_log_emit_allocs(tsd, &emitter);
- emitter_end(&emitter);
-
- /* Reset global state. */
- if (log_tables_initialized) {
- ckh_delete(tsd, &log_bt_node_set);
- ckh_delete(tsd, &log_thr_node_set);
- }
- log_tables_initialized = false;
- log_bt_index = 0;
- log_thr_index = 0;
- log_bt_first = NULL;
- log_bt_last = NULL;
- log_thr_first = NULL;
- log_thr_last = NULL;
- log_alloc_first = NULL;
- log_alloc_last = NULL;
-
- malloc_mutex_lock(tsdn, &log_mtx);
- prof_logging_state = prof_logging_state_stopped;
- malloc_mutex_unlock(tsdn, &log_mtx);
-
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- return false;
- }
-#endif
- return close(fd);
-}
-
-const char *
-prof_thread_name_get(tsd_t *tsd) {
- prof_tdata_t *tdata;
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return "";
- }
- return (tdata->thread_name != NULL ? tdata->thread_name : "");
-}
-
-static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
- char *ret;
- size_t size;
-
- if (thread_name == NULL) {
- return NULL;
- }
-
- size = strlen(thread_name) + 1;
- if (size == 1) {
- return "";
- }
-
- ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
- if (ret == NULL) {
- return NULL;
- }
- memcpy(ret, thread_name, size);
- return ret;
-}
-
-int
-prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
- prof_tdata_t *tdata;
- unsigned i;
- char *s;
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return EAGAIN;
- }
-
- /* Validate input. */
- if (thread_name == NULL) {
- return EFAULT;
- }
- for (i = 0; thread_name[i] != '\0'; i++) {
- char c = thread_name[i];
- if (!isgraph(c) && !isblank(c)) {
- return EFAULT;
- }
- }
-
- s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
- if (s == NULL) {
- return EAGAIN;
- }
-
- if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
- true);
- tdata->thread_name = NULL;
- }
- if (strlen(s) > 0) {
- tdata->thread_name = s;
- }
- return 0;
-}
-
-bool
-prof_thread_active_get(tsd_t *tsd) {
- prof_tdata_t *tdata;
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return false;
- }
- return tdata->active;
-}
-
-bool
-prof_thread_active_set(tsd_t *tsd, bool active) {
- prof_tdata_t *tdata;
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return true;
- }
- tdata->active = active;
- return false;
-}
-
-bool
-prof_thread_active_init_get(tsdn_t *tsdn) {
- bool active_init;
-
- malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
- active_init = prof_thread_active_init;
- malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
- return active_init;
-}
-
-bool
-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
- bool active_init_old;
-
- malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
- active_init_old = prof_thread_active_init;
- prof_thread_active_init = active_init;
- malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
- return active_init_old;
-}
-
-bool
-prof_gdump_get(tsdn_t *tsdn) {
- bool prof_gdump_current;
-
- malloc_mutex_lock(tsdn, &prof_gdump_mtx);
- prof_gdump_current = prof_gdump_val;
- malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
- return prof_gdump_current;
-}
-
-bool
-prof_gdump_set(tsdn_t *tsdn, bool gdump) {
- bool prof_gdump_old;
-
- malloc_mutex_lock(tsdn, &prof_gdump_mtx);
- prof_gdump_old = prof_gdump_val;
- prof_gdump_val = gdump;
- malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
- return prof_gdump_old;
-}
-
+ if (!destroy_tdata) {
+ tdata->attached = false;
+ }
+ tsd_prof_tdata_set(tsd, NULL);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, true);
+ }
+}
+
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
+ char *thread_name = (tdata->thread_name != NULL) ?
+ prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ bool active = tdata->active;
+
+ prof_tdata_detach(tsd, tdata);
+ return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active);
+}
+
+static bool
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsdn, tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tsdn, tdata, false);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsdn, tdata->lock);
+
+ return destroy_tdata;
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+}
+
void
-prof_boot0(void) {
+prof_reset(tsd_t *tsd, size_t lg_sample) {
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, (void *)tsd);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+}
+
+void
+prof_tdata_cleanup(tsd_t *tsd) {
+ prof_tdata_t *tdata;
+
+ if (!config_prof) {
+ return;
+ }
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (tdata != NULL) {
+ prof_tdata_detach(tsd, tdata);
+ }
+}
+
+bool
+prof_active_get(tsdn_t *tsdn) {
+ bool prof_active_current;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_current = prof_active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return prof_active_current;
+}
+
+bool
+prof_active_set(tsdn_t *tsdn, bool active) {
+ bool prof_active_old;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_old = prof_active;
+ prof_active = active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return prof_active_old;
+}
+
+#ifdef JEMALLOC_JET
+size_t
+prof_log_bt_count(void) {
+ size_t cnt = 0;
+ prof_bt_node_t *node = log_bt_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+size_t
+prof_log_alloc_count(void) {
+ size_t cnt = 0;
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+size_t
+prof_log_thr_count(void) {
+ size_t cnt = 0;
+ prof_thr_node_t *node = log_thr_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+bool
+prof_log_is_logging(void) {
+ return prof_logging_state == prof_logging_state_started;
+}
+
+bool
+prof_log_rep_check(void) {
+ if (prof_logging_state == prof_logging_state_stopped
+ && log_tables_initialized) {
+ return true;
+ }
+
+ if (log_bt_last != NULL && log_bt_last->next != NULL) {
+ return true;
+ }
+ if (log_thr_last != NULL && log_thr_last->next != NULL) {
+ return true;
+ }
+ if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
+ return true;
+ }
+
+ size_t bt_count = prof_log_bt_count();
+ size_t thr_count = prof_log_thr_count();
+ size_t alloc_count = prof_log_alloc_count();
+
+
+ if (prof_logging_state == prof_logging_state_stopped) {
+ if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
+ return true;
+ }
+ }
+
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ if (node->alloc_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->free_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->alloc_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->free_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->alloc_time_ns > node->free_time_ns) {
+ return true;
+ }
+ node = node->next;
+ }
+
+ return false;
+}
+
+void
+prof_log_dummy_set(bool new_value) {
+ prof_log_dummy = new_value;
+}
+#endif
+
+bool
+prof_log_start(tsdn_t *tsdn, const char *filename) {
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+
+ bool ret = false;
+ size_t buf_size = PATH_MAX + 1;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_stopped) {
+ ret = true;
+ } else if (filename == NULL) {
+ /* Make default name. */
+ malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json",
+ opt_prof_prefix, prof_getpid(), log_seq);
+ log_seq++;
+ prof_logging_state = prof_logging_state_started;
+ } else if (strlen(filename) >= buf_size) {
+ ret = true;
+ } else {
+ strcpy(log_filename, filename);
+ prof_logging_state = prof_logging_state_started;
+ }
+
+ if (!ret) {
+ nstime_update(&log_start_timestamp);
+ }
+
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+ return ret;
+}
+
+/* Used as an atexit function to stop logging on exit. */
+static void
+prof_log_stop_final(void) {
+ tsd_t *tsd = tsd_fetch();
+ prof_log_stop(tsd_tsdn(tsd));
+}
+
+struct prof_emitter_cb_arg_s {
+ int fd;
+ ssize_t ret;
+};
+
+static void
+prof_emitter_write_cb(void *opaque, const char *to_write) {
+ struct prof_emitter_cb_arg_s *arg =
+ (struct prof_emitter_cb_arg_s *)opaque;
+ size_t bytes = strlen(to_write);
+#ifdef JEMALLOC_JET
+ if (prof_log_dummy) {
+ return;
+ }
+#endif
+ arg->ret = write(arg->fd, (void *)to_write, bytes);
+}
+
+/*
+ * prof_log_emit_{...} goes through the appropriate linked list, emitting each
+ * node to the json and deallocating it.
+ */
+static void
+prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "threads");
+ prof_thr_node_t *thr_node = log_thr_first;
+ prof_thr_node_t *thr_old_node;
+ while (thr_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
+ &thr_node->thr_uid);
+
+ char *thr_name = thr_node->name;
+
+ emitter_json_kv(emitter, "thr_name", emitter_type_string,
+ &thr_name);
+
+ emitter_json_object_end(emitter);
+ thr_old_node = thr_node;
+ thr_node = thr_node->next;
+ idalloc(tsd, thr_old_node);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "stack_traces");
+ prof_bt_node_t *bt_node = log_bt_first;
+ prof_bt_node_t *bt_old_node;
+ /*
+ * Calculate how many hex digits we need: twice number of bytes, two for
+ * "0x", and then one more for terminating '\0'.
+ */
+ char buf[2 * sizeof(intptr_t) + 3];
+ size_t buf_sz = sizeof(buf);
+ while (bt_node != NULL) {
+ emitter_json_array_begin(emitter);
+ size_t i;
+ for (i = 0; i < bt_node->bt.len; i++) {
+ malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
+ char *trace_str = buf;
+ emitter_json_value(emitter, emitter_type_string,
+ &trace_str);
+ }
+ emitter_json_array_end(emitter);
+
+ bt_old_node = bt_node;
+ bt_node = bt_node->next;
+ idalloc(tsd, bt_old_node);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "allocations");
+ prof_alloc_node_t *alloc_node = log_alloc_first;
+ prof_alloc_node_t *alloc_old_node;
+ while (alloc_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
+ &alloc_node->alloc_thr_ind);
+
+ emitter_json_kv(emitter, "free_thread", emitter_type_size,
+ &alloc_node->free_thr_ind);
+
+ emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
+ &alloc_node->alloc_bt_ind);
+
+ emitter_json_kv(emitter, "free_trace", emitter_type_size,
+ &alloc_node->free_bt_ind);
+
+ emitter_json_kv(emitter, "alloc_timestamp",
+ emitter_type_uint64, &alloc_node->alloc_time_ns);
+
+ emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
+ &alloc_node->free_time_ns);
+
+ emitter_json_kv(emitter, "usize", emitter_type_uint64,
+ &alloc_node->usize);
+
+ emitter_json_object_end(emitter);
+
+ alloc_old_node = alloc_node;
+ alloc_node = alloc_node->next;
+ idalloc(tsd, alloc_old_node);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_metadata(emitter_t *emitter) {
+ emitter_json_object_kv_begin(emitter, "info");
+
+ nstime_t now = NSTIME_ZERO_INITIALIZER;
+
+ nstime_update(&now);
+ uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
+ emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
+
+ char *vers = JEMALLOC_VERSION;
+ emitter_json_kv(emitter, "version",
+ emitter_type_string, &vers);
+
+ emitter_json_kv(emitter, "lg_sample_rate",
+ emitter_type_int, &lg_prof_sample);
+
+ int pid = prof_getpid();
+ emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
+
+ emitter_json_object_end(emitter);
+}
+
+
+bool
+prof_log_stop(tsdn_t *tsdn) {
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ malloc_mutex_unlock(tsdn, &log_mtx);
+ return true;
+ }
+
+ /*
+ * Set the state to dumping. We'll set it to stopped when we're done.
+ * Since other threads won't be able to start/stop/log when the state is
+ * dumping, we don't have to hold the lock during the whole method.
+ */
+ prof_logging_state = prof_logging_state_dumping;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+
+ emitter_t emitter;
+
+ /* Create a file. */
+
+ int fd;
+#ifdef JEMALLOC_JET
+ if (prof_log_dummy) {
+ fd = 0;
+ } else {
+ fd = creat(log_filename, 0644);
+ }
+#else
+ fd = creat(log_filename, 0644);
+#endif
+
+ if (fd == -1) {
+ malloc_printf("<jemalloc>: creat() for log file \"%s\" "
+ " failed with %d\n", log_filename, errno);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+
+ /* Emit to json. */
+ struct prof_emitter_cb_arg_s arg;
+ arg.fd = fd;
+ emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb,
+ (void *)(&arg));
+
+ emitter_begin(&emitter);
+ prof_log_emit_metadata(&emitter);
+ prof_log_emit_threads(tsd, &emitter);
+ prof_log_emit_traces(tsd, &emitter);
+ prof_log_emit_allocs(tsd, &emitter);
+ emitter_end(&emitter);
+
+ /* Reset global state. */
+ if (log_tables_initialized) {
+ ckh_delete(tsd, &log_bt_node_set);
+ ckh_delete(tsd, &log_thr_node_set);
+ }
+ log_tables_initialized = false;
+ log_bt_index = 0;
+ log_thr_index = 0;
+ log_bt_first = NULL;
+ log_bt_last = NULL;
+ log_thr_first = NULL;
+ log_thr_last = NULL;
+ log_alloc_first = NULL;
+ log_alloc_last = NULL;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+ prof_logging_state = prof_logging_state_stopped;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+#ifdef JEMALLOC_JET
+ if (prof_log_dummy) {
+ return false;
+ }
+#endif
+ return close(fd);
+}
+
+const char *
+prof_thread_name_get(tsd_t *tsd) {
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return "";
+ }
+ return (tdata->thread_name != NULL ? tdata->thread_name : "");
+}
+
+static char *
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL) {
+ return NULL;
+ }
+
+ size = strlen(thread_name) + 1;
+ if (size == 1) {
+ return "";
+ }
+
+ ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (ret == NULL) {
+ return NULL;
+ }
+ memcpy(ret, thread_name, size);
+ return ret;
+}
+
+int
+prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return EAGAIN;
+ }
+
+ /* Validate input. */
+ if (thread_name == NULL) {
+ return EFAULT;
+ }
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c)) {
+ return EFAULT;
+ }
+ }
+
+ s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
+ if (s == NULL) {
+ return EAGAIN;
+ }
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0) {
+ tdata->thread_name = s;
+ }
+ return 0;
+}
+
+bool
+prof_thread_active_get(tsd_t *tsd) {
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return false;
+ }
+ return tdata->active;
+}
+
+bool
+prof_thread_active_set(tsd_t *tsd, bool active) {
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
+ tdata->active = active;
+ return false;
+}
+
+bool
+prof_thread_active_init_get(tsdn_t *tsdn) {
+ bool active_init;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init = prof_thread_active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return active_init;
+}
+
+bool
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
+ bool active_init_old;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init_old = prof_thread_active_init;
+ prof_thread_active_init = active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return active_init_old;
+}
+
+bool
+prof_gdump_get(tsdn_t *tsdn) {
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return prof_gdump_current;
+}
+
+bool
+prof_gdump_set(tsdn_t *tsdn, bool gdump) {
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return prof_gdump_old;
+}
+
+void
+prof_boot0(void) {
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
@@ -2937,15 +2937,15 @@ prof_boot0(void) {
}
void
-prof_boot1(void) {
+prof_boot1(void) {
cassert(config_prof);
/*
- * opt_prof must be in its final state before any arenas are
- * initialized, so this function must be executed early.
+ * opt_prof must be in its final state before any arenas are
+ * initialized, so this function must be executed early.
*/
- if (opt_prof_leak && !opt_prof) {
+ if (opt_prof_leak && !opt_prof) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
@@ -2961,211 +2961,211 @@ prof_boot1(void) {
}
bool
-prof_boot2(tsd_t *tsd) {
+prof_boot2(tsd_t *tsd) {
cassert(config_prof);
if (opt_prof) {
unsigned i;
- lg_prof_sample = opt_lg_prof_sample;
-
- prof_active = opt_prof_active;
- if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- prof_gdump_val = opt_prof_gdump;
- if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- prof_thread_active_init = opt_prof_thread_active_init;
- if (malloc_mutex_init(&prof_thread_active_init_mtx,
- "prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- return true;
- }
- if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- tdata_tree_new(&tdatas);
- if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- next_thr_uid = 0;
- if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
- return true;
- }
- if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
- atexit(prof_fdump) != 0) {
+ lg_prof_sample = opt_lg_prof_sample;
+
+ prof_active = opt_prof_active;
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ prof_thread_active_init = opt_prof_thread_active_init;
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init",
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ return true;
+ }
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ tdata_tree_new(&tdatas);
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ next_thr_uid = 0;
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
+ WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort) {
+ if (opt_abort) {
abort();
- }
- }
-
- if (opt_prof_log) {
- prof_log_start(tsd_tsdn(tsd), NULL);
- }
-
- if (atexit(prof_log_stop_final) != 0) {
- malloc_write("<jemalloc>: Error in atexit() "
- "for logging\n");
- if (opt_abort) {
- abort();
- }
- }
-
- if (malloc_mutex_init(&log_mtx, "prof_log",
- WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
- prof_bt_node_hash, prof_bt_node_keycomp)) {
- return true;
- }
-
- if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
- prof_thr_node_hash, prof_thr_node_keycomp)) {
- return true;
- }
-
- log_tables_initialized = true;
-
- gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
- CACHELINE);
- if (gctx_locks == NULL) {
- return true;
- }
+ }
+ }
+
+ if (opt_prof_log) {
+ prof_log_start(tsd_tsdn(tsd), NULL);
+ }
+
+ if (atexit(prof_log_stop_final) != 0) {
+ malloc_write("<jemalloc>: Error in atexit() "
+ "for logging\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ if (malloc_mutex_init(&log_mtx, "prof_log",
+ WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
+ prof_bt_node_hash, prof_bt_node_keycomp)) {
+ return true;
+ }
+
+ if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
+ prof_thr_node_hash, prof_thr_node_keycomp)) {
+ return true;
+ }
+
+ log_tables_initialized = true;
+
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
+ if (gctx_locks == NULL) {
+ return true;
+ }
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
- WITNESS_RANK_PROF_GCTX,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- }
-
- tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
- CACHELINE);
- if (tdata_locks == NULL) {
- return true;
- }
- for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
- WITNESS_RANK_PROF_TDATA,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- }
+ if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
+ WITNESS_RANK_PROF_GCTX,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ }
+
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
+ if (tdata_locks == NULL) {
+ return true;
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
+ WITNESS_RANK_PROF_TDATA,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ }
#ifdef JEMALLOC_PROF_LIBGCC
- /*
- * Cause the backtracing machinery to allocate its internal
- * state before enabling profiling.
- */
- _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+ /*
+ * Cause the backtracing machinery to allocate its internal
+ * state before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
#endif
- }
+ }
prof_booted = true;
- return false;
+ return false;
}
void
-prof_prefork0(tsdn_t *tsdn) {
- if (config_prof && opt_prof) {
+prof_prefork0(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
- malloc_mutex_prefork(tsdn, &prof_dump_mtx);
- malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
- malloc_mutex_prefork(tsdn, &tdatas_mtx);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- malloc_mutex_prefork(tsdn, &tdata_locks[i]);
- }
- for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- malloc_mutex_prefork(tsdn, &gctx_locks[i]);
- }
- }
-}
-
-void
-prof_prefork1(tsdn_t *tsdn) {
- if (config_prof && opt_prof) {
- malloc_mutex_prefork(tsdn, &prof_active_mtx);
- malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
- malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
- malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_mtx);
+ malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
+ malloc_mutex_prefork(tsdn, &tdatas_mtx);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ }
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ malloc_mutex_prefork(tsdn, &gctx_locks[i]);
+ }
}
}
void
-prof_postfork_parent(tsdn_t *tsdn) {
- if (config_prof && opt_prof) {
+prof_prefork1(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
+ malloc_mutex_prefork(tsdn, &prof_active_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ }
+}
+
+void
+prof_postfork_parent(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
- malloc_mutex_postfork_parent(tsdn,
- &prof_thread_active_init_mtx);
- malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
- }
- for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
- }
- malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
- malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
+ malloc_mutex_postfork_parent(tsdn,
+ &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ }
+ malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
}
}
void
-prof_postfork_child(tsdn_t *tsdn) {
- if (config_prof && opt_prof) {
+prof_postfork_child(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
- malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
- malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
- }
- for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
- }
- malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
- malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ }
+ malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
}
}
diff --git a/contrib/libs/jemalloc/src/rtree.c b/contrib/libs/jemalloc/src/rtree.c
index 4ae41fe2fe..87f385e3b4 100644
--- a/contrib/libs/jemalloc/src/rtree.c
+++ b/contrib/libs/jemalloc/src/rtree.c
@@ -1,320 +1,320 @@
-#define JEMALLOC_RTREE_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
+#define JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
-/*
- * Only the most significant bits of keys passed to rtree_{read,write}() are
- * used.
- */
-bool
-rtree_new(rtree_t *rtree, bool zeroed) {
-#ifdef JEMALLOC_JET
- if (!zeroed) {
- memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
- }
-#else
- assert(zeroed);
-#endif
+/*
+ * Only the most significant bits of keys passed to rtree_{read,write}() are
+ * used.
+ */
+bool
+rtree_new(rtree_t *rtree, bool zeroed) {
+#ifdef JEMALLOC_JET
+ if (!zeroed) {
+ memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
+ }
+#else
+ assert(zeroed);
+#endif
- if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
- malloc_mutex_rank_exclusive)) {
- return true;
+ if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
+ malloc_mutex_rank_exclusive)) {
+ return true;
}
- return false;
-}
-
-static rtree_node_elm_t *
-rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
- sizeof(rtree_node_elm_t), CACHELINE);
-}
-rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
-
-static void
-rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
- /* Nodes are never deleted during normal operation. */
- not_reached();
-}
-rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
- rtree_node_dalloc_impl;
+ return false;
+}
-static rtree_leaf_elm_t *
-rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
- sizeof(rtree_leaf_elm_t), CACHELINE);
-}
-rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
-
-static void
-rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
- /* Leaves are never deleted during normal operation. */
- not_reached();
-}
-rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
- rtree_leaf_dalloc_impl;
-
-#ifdef JEMALLOC_JET
-# if RTREE_HEIGHT > 1
-static void
-rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
- unsigned level) {
- size_t nchildren = ZU(1) << rtree_levels[level].bits;
- if (level + 2 < RTREE_HEIGHT) {
- for (size_t i = 0; i < nchildren; i++) {
- rtree_node_elm_t *node =
- (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
- ATOMIC_RELAXED);
- if (node != NULL) {
- rtree_delete_subtree(tsdn, rtree, node, level +
- 1);
- }
- }
- } else {
- for (size_t i = 0; i < nchildren; i++) {
- rtree_leaf_elm_t *leaf =
- (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
- ATOMIC_RELAXED);
- if (leaf != NULL) {
- rtree_leaf_dalloc(tsdn, rtree, leaf);
- }
- }
+static rtree_node_elm_t *
+rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_node_elm_t), CACHELINE);
+}
+rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
+
+static void
+rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
+ /* Nodes are never deleted during normal operation. */
+ not_reached();
+}
+rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
+ rtree_node_dalloc_impl;
+
+static rtree_leaf_elm_t *
+rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_leaf_elm_t), CACHELINE);
+}
+rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
+
+static void
+rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
+ /* Leaves are never deleted during normal operation. */
+ not_reached();
+}
+rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
+ rtree_leaf_dalloc_impl;
+
+#ifdef JEMALLOC_JET
+# if RTREE_HEIGHT > 1
+static void
+rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
+ unsigned level) {
+ size_t nchildren = ZU(1) << rtree_levels[level].bits;
+ if (level + 2 < RTREE_HEIGHT) {
+ for (size_t i = 0; i < nchildren; i++) {
+ rtree_node_elm_t *node =
+ (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
+ ATOMIC_RELAXED);
+ if (node != NULL) {
+ rtree_delete_subtree(tsdn, rtree, node, level +
+ 1);
+ }
+ }
+ } else {
+ for (size_t i = 0; i < nchildren; i++) {
+ rtree_leaf_elm_t *leaf =
+ (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
+ ATOMIC_RELAXED);
+ if (leaf != NULL) {
+ rtree_leaf_dalloc(tsdn, rtree, leaf);
+ }
+ }
}
- if (subtree != rtree->root) {
- rtree_node_dalloc(tsdn, rtree, subtree);
+ if (subtree != rtree->root) {
+ rtree_node_dalloc(tsdn, rtree, subtree);
}
-}
-# endif
+}
+# endif
-void
-rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
-# if RTREE_HEIGHT > 1
- rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
-# endif
+void
+rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
+# if RTREE_HEIGHT > 1
+ rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
+# endif
}
-#endif
+#endif
-static rtree_node_elm_t *
-rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
- atomic_p_t *elmp) {
- malloc_mutex_lock(tsdn, &rtree->init_lock);
- /*
- * If *elmp is non-null, then it was initialized with the init lock
- * held, so we can get by with 'relaxed' here.
- */
- rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
- if (node == NULL) {
- node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
- rtree_levels[level].bits);
- if (node == NULL) {
- malloc_mutex_unlock(tsdn, &rtree->init_lock);
- return NULL;
- }
- /*
- * Even though we hold the lock, a later reader might not; we
- * need release semantics.
- */
- atomic_store_p(elmp, node, ATOMIC_RELEASE);
- }
- malloc_mutex_unlock(tsdn, &rtree->init_lock);
+static rtree_node_elm_t *
+rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
+ atomic_p_t *elmp) {
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+ /*
+ * If *elmp is non-null, then it was initialized with the init lock
+ * held, so we can get by with 'relaxed' here.
+ */
+ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
+ if (node == NULL) {
+ node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
+ rtree_levels[level].bits);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ return NULL;
+ }
+ /*
+ * Even though we hold the lock, a later reader might not; we
+ * need release semantics.
+ */
+ atomic_store_p(elmp, node, ATOMIC_RELEASE);
+ }
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
- return node;
-}
+ return node;
+}
-static rtree_leaf_elm_t *
-rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
- malloc_mutex_lock(tsdn, &rtree->init_lock);
- /*
- * If *elmp is non-null, then it was initialized with the init lock
- * held, so we can get by with 'relaxed' here.
- */
- rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
- if (leaf == NULL) {
- leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
- rtree_levels[RTREE_HEIGHT-1].bits);
- if (leaf == NULL) {
- malloc_mutex_unlock(tsdn, &rtree->init_lock);
- return NULL;
+static rtree_leaf_elm_t *
+rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+ /*
+ * If *elmp is non-null, then it was initialized with the init lock
+ * held, so we can get by with 'relaxed' here.
+ */
+ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
+ if (leaf == NULL) {
+ leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ if (leaf == NULL) {
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ return NULL;
}
- /*
- * Even though we hold the lock, a later reader might not; we
- * need release semantics.
- */
- atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
- }
- malloc_mutex_unlock(tsdn, &rtree->init_lock);
-
- return leaf;
-}
-
-static bool
-rtree_node_valid(rtree_node_elm_t *node) {
- return ((uintptr_t)node != (uintptr_t)0);
-}
-
-static bool
-rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
- return ((uintptr_t)leaf != (uintptr_t)0);
-}
-
-static rtree_node_elm_t *
-rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
- rtree_node_elm_t *node;
-
- if (dependent) {
- node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
- ATOMIC_RELAXED);
- } else {
- node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
- ATOMIC_ACQUIRE);
+ /*
+ * Even though we hold the lock, a later reader might not; we
+ * need release semantics.
+ */
+ atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
}
-
- assert(!dependent || node != NULL);
- return node;
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+
+ return leaf;
}
-static rtree_node_elm_t *
-rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level, bool dependent) {
- rtree_node_elm_t *node;
+static bool
+rtree_node_valid(rtree_node_elm_t *node) {
+ return ((uintptr_t)node != (uintptr_t)0);
+}
- node = rtree_child_node_tryread(elm, dependent);
- if (!dependent && unlikely(!rtree_node_valid(node))) {
- node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
- }
- assert(!dependent || node != NULL);
- return node;
+static bool
+rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
+ return ((uintptr_t)leaf != (uintptr_t)0);
}
-static rtree_leaf_elm_t *
-rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
- rtree_leaf_elm_t *leaf;
+static rtree_node_elm_t *
+rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
+ rtree_node_elm_t *node;
- if (dependent) {
- leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
- ATOMIC_RELAXED);
- } else {
- leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
- ATOMIC_ACQUIRE);
- }
-
- assert(!dependent || leaf != NULL);
- return leaf;
+ if (dependent) {
+ node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_RELAXED);
+ } else {
+ node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_ACQUIRE);
+ }
+
+ assert(!dependent || node != NULL);
+ return node;
}
-static rtree_leaf_elm_t *
-rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level, bool dependent) {
- rtree_leaf_elm_t *leaf;
+static rtree_node_elm_t *
+rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent) {
+ rtree_node_elm_t *node;
- leaf = rtree_child_leaf_tryread(elm, dependent);
- if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
- leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
- }
- assert(!dependent || leaf != NULL);
- return leaf;
+ node = rtree_child_node_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(node))) {
+ node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
+ }
+ assert(!dependent || node != NULL);
+ return node;
}
-rtree_leaf_elm_t *
-rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, bool init_missing) {
- rtree_node_elm_t *node;
- rtree_leaf_elm_t *leaf;
-#if RTREE_HEIGHT > 1
- node = rtree->root;
-#else
- leaf = rtree->root;
-#endif
-
- if (config_debug) {
- uintptr_t leafkey = rtree_leafkey(key);
- for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
- assert(rtree_ctx->cache[i].leafkey != leafkey);
- }
- for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
- assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
- }
- }
-
-#define RTREE_GET_CHILD(level) { \
- assert(level < RTREE_HEIGHT-1); \
- if (level != 0 && !dependent && \
- unlikely(!rtree_node_valid(node))) { \
- return NULL; \
- } \
- uintptr_t subkey = rtree_subkey(key, level); \
- if (level + 2 < RTREE_HEIGHT) { \
- node = init_missing ? \
- rtree_child_node_read(tsdn, rtree, \
- &node[subkey], level, dependent) : \
- rtree_child_node_tryread(&node[subkey], \
- dependent); \
- } else { \
- leaf = init_missing ? \
- rtree_child_leaf_read(tsdn, rtree, \
- &node[subkey], level, dependent) : \
- rtree_child_leaf_tryread(&node[subkey], \
- dependent); \
- } \
- }
- /*
- * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
- * (1) evict last entry in L2 cache; (2) move the collision slot from L1
- * cache down to L2; and 3) fill L1.
- */
-#define RTREE_GET_LEAF(level) { \
- assert(level == RTREE_HEIGHT-1); \
- if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
- return NULL; \
- } \
- if (RTREE_CTX_NCACHE_L2 > 1) { \
- memmove(&rtree_ctx->l2_cache[1], \
- &rtree_ctx->l2_cache[0], \
- sizeof(rtree_ctx_cache_elm_t) * \
- (RTREE_CTX_NCACHE_L2 - 1)); \
- } \
- size_t slot = rtree_cache_direct_map(key); \
- rtree_ctx->l2_cache[0].leafkey = \
- rtree_ctx->cache[slot].leafkey; \
- rtree_ctx->l2_cache[0].leaf = \
- rtree_ctx->cache[slot].leaf; \
- uintptr_t leafkey = rtree_leafkey(key); \
- rtree_ctx->cache[slot].leafkey = leafkey; \
- rtree_ctx->cache[slot].leaf = leaf; \
- uintptr_t subkey = rtree_subkey(key, level); \
- return &leaf[subkey]; \
- }
- if (RTREE_HEIGHT > 1) {
- RTREE_GET_CHILD(0)
- }
- if (RTREE_HEIGHT > 2) {
- RTREE_GET_CHILD(1)
- }
- if (RTREE_HEIGHT > 3) {
- for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
- RTREE_GET_CHILD(i)
- }
- }
- RTREE_GET_LEAF(RTREE_HEIGHT-1)
-#undef RTREE_GET_CHILD
-#undef RTREE_GET_LEAF
- not_reached();
-}
+static rtree_leaf_elm_t *
+rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
+ rtree_leaf_elm_t *leaf;
-void
-rtree_ctx_data_init(rtree_ctx_t *ctx) {
- for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
- rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
- cache->leafkey = RTREE_LEAFKEY_INVALID;
- cache->leaf = NULL;
- }
- for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
- rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
- cache->leafkey = RTREE_LEAFKEY_INVALID;
- cache->leaf = NULL;
- }
+ if (dependent) {
+ leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_RELAXED);
+ } else {
+ leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_ACQUIRE);
+ }
+
+ assert(!dependent || leaf != NULL);
+ return leaf;
}
+
+static rtree_leaf_elm_t *
+rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent) {
+ rtree_leaf_elm_t *leaf;
+
+ leaf = rtree_child_leaf_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
+ leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
+ }
+ assert(!dependent || leaf != NULL);
+ return leaf;
+}
+
+rtree_leaf_elm_t *
+rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, bool init_missing) {
+ rtree_node_elm_t *node;
+ rtree_leaf_elm_t *leaf;
+#if RTREE_HEIGHT > 1
+ node = rtree->root;
+#else
+ leaf = rtree->root;
+#endif
+
+ if (config_debug) {
+ uintptr_t leafkey = rtree_leafkey(key);
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
+ assert(rtree_ctx->cache[i].leafkey != leafkey);
+ }
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
+ assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
+ }
+ }
+
+#define RTREE_GET_CHILD(level) { \
+ assert(level < RTREE_HEIGHT-1); \
+ if (level != 0 && !dependent && \
+ unlikely(!rtree_node_valid(node))) { \
+ return NULL; \
+ } \
+ uintptr_t subkey = rtree_subkey(key, level); \
+ if (level + 2 < RTREE_HEIGHT) { \
+ node = init_missing ? \
+ rtree_child_node_read(tsdn, rtree, \
+ &node[subkey], level, dependent) : \
+ rtree_child_node_tryread(&node[subkey], \
+ dependent); \
+ } else { \
+ leaf = init_missing ? \
+ rtree_child_leaf_read(tsdn, rtree, \
+ &node[subkey], level, dependent) : \
+ rtree_child_leaf_tryread(&node[subkey], \
+ dependent); \
+ } \
+ }
+ /*
+ * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
+ * (1) evict last entry in L2 cache; (2) move the collision slot from L1
+ * cache down to L2; and 3) fill L1.
+ */
+#define RTREE_GET_LEAF(level) { \
+ assert(level == RTREE_HEIGHT-1); \
+ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
+ return NULL; \
+ } \
+ if (RTREE_CTX_NCACHE_L2 > 1) { \
+ memmove(&rtree_ctx->l2_cache[1], \
+ &rtree_ctx->l2_cache[0], \
+ sizeof(rtree_ctx_cache_elm_t) * \
+ (RTREE_CTX_NCACHE_L2 - 1)); \
+ } \
+ size_t slot = rtree_cache_direct_map(key); \
+ rtree_ctx->l2_cache[0].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[0].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ uintptr_t leafkey = rtree_leafkey(key); \
+ rtree_ctx->cache[slot].leafkey = leafkey; \
+ rtree_ctx->cache[slot].leaf = leaf; \
+ uintptr_t subkey = rtree_subkey(key, level); \
+ return &leaf[subkey]; \
+ }
+ if (RTREE_HEIGHT > 1) {
+ RTREE_GET_CHILD(0)
+ }
+ if (RTREE_HEIGHT > 2) {
+ RTREE_GET_CHILD(1)
+ }
+ if (RTREE_HEIGHT > 3) {
+ for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
+ RTREE_GET_CHILD(i)
+ }
+ }
+ RTREE_GET_LEAF(RTREE_HEIGHT-1)
+#undef RTREE_GET_CHILD
+#undef RTREE_GET_LEAF
+ not_reached();
+}
+
+void
+rtree_ctx_data_init(rtree_ctx_t *ctx) {
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
+ rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
+ cache->leafkey = RTREE_LEAFKEY_INVALID;
+ cache->leaf = NULL;
+ }
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
+ rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
+ cache->leafkey = RTREE_LEAFKEY_INVALID;
+ cache->leaf = NULL;
+ }
+}
diff --git a/contrib/libs/jemalloc/src/safety_check.c b/contrib/libs/jemalloc/src/safety_check.c
index 804155dcfc..78bd9912d4 100644
--- a/contrib/libs/jemalloc/src/safety_check.c
+++ b/contrib/libs/jemalloc/src/safety_check.c
@@ -1,24 +1,24 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-static void (*safety_check_abort)(const char *message);
-
-void safety_check_set_abort(void (*abort_fn)(const char *)) {
- safety_check_abort = abort_fn;
-}
-
-void safety_check_fail(const char *format, ...) {
- char buf[MALLOC_PRINTF_BUFSIZE];
-
- va_list ap;
- va_start(ap, format);
- malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
- va_end(ap);
-
- if (safety_check_abort == NULL) {
- malloc_write(buf);
- abort();
- } else {
- safety_check_abort(buf);
- }
-}
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+static void (*safety_check_abort)(const char *message);
+
+void safety_check_set_abort(void (*abort_fn)(const char *)) {
+ safety_check_abort = abort_fn;
+}
+
+void safety_check_fail(const char *format, ...) {
+ char buf[MALLOC_PRINTF_BUFSIZE];
+
+ va_list ap;
+ va_start(ap, format);
+ malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
+ va_end(ap);
+
+ if (safety_check_abort == NULL) {
+ malloc_write(buf);
+ abort();
+ } else {
+ safety_check_abort(buf);
+ }
+}
diff --git a/contrib/libs/jemalloc/src/sc.c b/contrib/libs/jemalloc/src/sc.c
index 89ddb6ba6a..e6390ff7b8 100644
--- a/contrib/libs/jemalloc/src/sc.c
+++ b/contrib/libs/jemalloc/src/sc.c
@@ -1,313 +1,313 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/sc.h"
-
-/*
- * This module computes the size classes used to satisfy allocations. The logic
- * here was ported more or less line-by-line from a shell script, and because of
- * that is not the most idiomatic C. Eventually we should fix this, but for now
- * at least the damage is compartmentalized to this file.
- */
-
-sc_data_t sc_data_global;
-
-static size_t
-reg_size_compute(int lg_base, int lg_delta, int ndelta) {
- return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
-}
-
-/* Returns the number of pages in the slab. */
-static int
-slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
- size_t page = (ZU(1) << lg_page);
- size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
-
- size_t try_slab_size = page;
- size_t try_nregs = try_slab_size / reg_size;
- size_t perfect_slab_size = 0;
- bool perfect = false;
- /*
- * This loop continues until we find the least common multiple of the
- * page size and size class size. Size classes are all of the form
- * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
- * (ndelta + ngroup) * delta. The way we choose slabbing strategies
- * means that delta is at most the page size and ndelta < ngroup. So
- * the loop executes for at most 2 * ngroup - 1 iterations, which is
- * also the bound on the number of pages in a slab chosen by default.
- * With the current default settings, this is at most 7.
- */
- while (!perfect) {
- perfect_slab_size = try_slab_size;
- size_t perfect_nregs = try_nregs;
- try_slab_size += page;
- try_nregs = try_slab_size / reg_size;
- if (perfect_slab_size == perfect_nregs * reg_size) {
- perfect = true;
- }
- }
- return (int)(perfect_slab_size / page);
-}
-
-static void
-size_class(
- /* Output. */
- sc_t *sc,
- /* Configuration decisions. */
- int lg_max_lookup, int lg_page, int lg_ngroup,
- /* Inputs specific to the size class. */
- int index, int lg_base, int lg_delta, int ndelta) {
- sc->index = index;
- sc->lg_base = lg_base;
- sc->lg_delta = lg_delta;
- sc->ndelta = ndelta;
- sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta)
- % (ZU(1) << lg_page) == 0);
- size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
- if (index == 0) {
- assert(!sc->psz);
- }
- if (size < (ZU(1) << (lg_page + lg_ngroup))) {
- sc->bin = true;
- sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
- } else {
- sc->bin = false;
- sc->pgs = 0;
- }
- if (size <= (ZU(1) << lg_max_lookup)) {
- sc->lg_delta_lookup = lg_delta;
- } else {
- sc->lg_delta_lookup = 0;
- }
-}
-
-static void
-size_classes(
- /* Output. */
- sc_data_t *sc_data,
- /* Determined by the system. */
- size_t lg_ptr_size, int lg_quantum,
- /* Configuration decisions. */
- int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) {
- int ptr_bits = (1 << lg_ptr_size) * 8;
- int ngroup = (1 << lg_ngroup);
- int ntiny = 0;
- int nlbins = 0;
- int lg_tiny_maxclass = (unsigned)-1;
- int nbins = 0;
- int npsizes = 0;
-
- int index = 0;
-
- int ndelta = 0;
- int lg_base = lg_tiny_min;
- int lg_delta = lg_base;
-
- /* Outputs that we update as we go. */
- size_t lookup_maxclass = 0;
- size_t small_maxclass = 0;
- int lg_large_minclass = 0;
- size_t large_maxclass = 0;
-
- /* Tiny size classes. */
- while (lg_base < lg_quantum) {
- sc_t *sc = &sc_data->sc[index];
- size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
- lg_base, lg_delta, ndelta);
- if (sc->lg_delta_lookup != 0) {
- nlbins = index + 1;
- }
- if (sc->psz) {
- npsizes++;
- }
- if (sc->bin) {
- nbins++;
- }
- ntiny++;
- /* Final written value is correct. */
- lg_tiny_maxclass = lg_base;
- index++;
- lg_delta = lg_base;
- lg_base++;
- }
-
- /* First non-tiny (pseudo) group. */
- if (ntiny != 0) {
- sc_t *sc = &sc_data->sc[index];
- /*
- * See the note in sc.h; the first non-tiny size class has an
- * unusual encoding.
- */
- lg_base--;
- ndelta = 1;
- size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
- lg_base, lg_delta, ndelta);
- index++;
- lg_base++;
- lg_delta++;
- if (sc->psz) {
- npsizes++;
- }
- if (sc->bin) {
- nbins++;
- }
- }
- while (ndelta < ngroup) {
- sc_t *sc = &sc_data->sc[index];
- size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
- lg_base, lg_delta, ndelta);
- index++;
- ndelta++;
- if (sc->psz) {
- npsizes++;
- }
- if (sc->bin) {
- nbins++;
- }
- }
-
- /* All remaining groups. */
- lg_base = lg_base + lg_ngroup;
- while (lg_base < ptr_bits - 1) {
- ndelta = 1;
- int ndelta_limit;
- if (lg_base == ptr_bits - 2) {
- ndelta_limit = ngroup - 1;
- } else {
- ndelta_limit = ngroup;
- }
- while (ndelta <= ndelta_limit) {
- sc_t *sc = &sc_data->sc[index];
- size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
- lg_base, lg_delta, ndelta);
- if (sc->lg_delta_lookup != 0) {
- nlbins = index + 1;
- /* Final written value is correct. */
- lookup_maxclass = (ZU(1) << lg_base)
- + (ZU(ndelta) << lg_delta);
- }
- if (sc->psz) {
- npsizes++;
- }
- if (sc->bin) {
- nbins++;
- /* Final written value is correct. */
- small_maxclass = (ZU(1) << lg_base)
- + (ZU(ndelta) << lg_delta);
- if (lg_ngroup > 0) {
- lg_large_minclass = lg_base + 1;
- } else {
- lg_large_minclass = lg_base + 2;
- }
- }
- large_maxclass = (ZU(1) << lg_base)
- + (ZU(ndelta) << lg_delta);
- index++;
- ndelta++;
- }
- lg_base++;
- lg_delta++;
- }
- /* Additional outputs. */
- int nsizes = index;
- unsigned lg_ceil_nsizes = lg_ceil(nsizes);
-
- /* Fill in the output data. */
- sc_data->ntiny = ntiny;
- sc_data->nlbins = nlbins;
- sc_data->nbins = nbins;
- sc_data->nsizes = nsizes;
- sc_data->lg_ceil_nsizes = lg_ceil_nsizes;
- sc_data->npsizes = npsizes;
- sc_data->lg_tiny_maxclass = lg_tiny_maxclass;
- sc_data->lookup_maxclass = lookup_maxclass;
- sc_data->small_maxclass = small_maxclass;
- sc_data->lg_large_minclass = lg_large_minclass;
- sc_data->large_minclass = (ZU(1) << lg_large_minclass);
- sc_data->large_maxclass = large_maxclass;
-
- /*
- * We compute these values in two ways:
- * - Incrementally, as above.
- * - In macros, in sc.h.
- * The computation is easier when done incrementally, but putting it in
- * a constant makes it available to the fast paths without having to
- * touch the extra global cacheline. We assert, however, that the two
- * computations are equivalent.
- */
- assert(sc_data->npsizes == SC_NPSIZES);
- assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS);
- assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS);
- assert(sc_data->large_minclass == SC_LARGE_MINCLASS);
- assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
- assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
-
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/sc.h"
+
+/*
+ * This module computes the size classes used to satisfy allocations. The logic
+ * here was ported more or less line-by-line from a shell script, and because of
+ * that is not the most idiomatic C. Eventually we should fix this, but for now
+ * at least the damage is compartmentalized to this file.
+ */
+
+sc_data_t sc_data_global;
+
+static size_t
+reg_size_compute(int lg_base, int lg_delta, int ndelta) {
+ return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
+}
+
+/* Returns the number of pages in the slab. */
+static int
+slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
+ size_t page = (ZU(1) << lg_page);
+ size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
+
+ size_t try_slab_size = page;
+ size_t try_nregs = try_slab_size / reg_size;
+ size_t perfect_slab_size = 0;
+ bool perfect = false;
/*
- * In the allocation fastpath, we want to assume that we can
- * unconditionally subtract the requested allocation size from
- * a ssize_t, and detect passing through 0 correctly. This
- * results in optimal generated code. For this to work, the
- * maximum allocation size must be less than SSIZE_MAX.
- */
- assert(SC_LARGE_MAXCLASS < SSIZE_MAX);
-}
-
-void
-sc_data_init(sc_data_t *sc_data) {
- assert(!sc_data->initialized);
-
- int lg_max_lookup = 12;
-
- size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
- lg_max_lookup, LG_PAGE, 2);
-
- sc_data->initialized = true;
-}
-
-static void
-sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) {
- size_t min_pgs = reg_size / PAGE;
- if (reg_size % PAGE != 0) {
- min_pgs++;
- }
- /*
- * BITMAP_MAXBITS is actually determined by putting the smallest
- * possible size-class on one page, so this can never be 0.
- */
- size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE;
-
- assert(min_pgs <= max_pgs);
- assert(min_pgs > 0);
- assert(max_pgs >= 1);
- if (pgs_guess < min_pgs) {
- sc->pgs = (int)min_pgs;
- } else if (pgs_guess > max_pgs) {
- sc->pgs = (int)max_pgs;
- } else {
- sc->pgs = (int)pgs_guess;
- }
-}
-
-void
-sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
- assert(data->initialized);
- for (int i = 0; i < data->nsizes; i++) {
- sc_t *sc = &data->sc[i];
- if (!sc->bin) {
- break;
- }
- size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta,
- sc->ndelta);
- if (begin <= reg_size && reg_size <= end) {
- sc_data_update_sc_slab_size(sc, reg_size, pgs);
- }
- }
-}
-
-void
-sc_boot(sc_data_t *data) {
- sc_data_init(data);
-}
+ * This loop continues until we find the least common multiple of the
+ * page size and size class size. Size classes are all of the form
+ * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
+ * (ndelta + ngroup) * delta. The way we choose slabbing strategies
+ * means that delta is at most the page size and ndelta < ngroup. So
+ * the loop executes for at most 2 * ngroup - 1 iterations, which is
+ * also the bound on the number of pages in a slab chosen by default.
+ * With the current default settings, this is at most 7.
+ */
+ while (!perfect) {
+ perfect_slab_size = try_slab_size;
+ size_t perfect_nregs = try_nregs;
+ try_slab_size += page;
+ try_nregs = try_slab_size / reg_size;
+ if (perfect_slab_size == perfect_nregs * reg_size) {
+ perfect = true;
+ }
+ }
+ return (int)(perfect_slab_size / page);
+}
+
+static void
+size_class(
+ /* Output. */
+ sc_t *sc,
+ /* Configuration decisions. */
+ int lg_max_lookup, int lg_page, int lg_ngroup,
+ /* Inputs specific to the size class. */
+ int index, int lg_base, int lg_delta, int ndelta) {
+ sc->index = index;
+ sc->lg_base = lg_base;
+ sc->lg_delta = lg_delta;
+ sc->ndelta = ndelta;
+ sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta)
+ % (ZU(1) << lg_page) == 0);
+ size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
+ if (index == 0) {
+ assert(!sc->psz);
+ }
+ if (size < (ZU(1) << (lg_page + lg_ngroup))) {
+ sc->bin = true;
+ sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
+ } else {
+ sc->bin = false;
+ sc->pgs = 0;
+ }
+ if (size <= (ZU(1) << lg_max_lookup)) {
+ sc->lg_delta_lookup = lg_delta;
+ } else {
+ sc->lg_delta_lookup = 0;
+ }
+}
+
+static void
+size_classes(
+ /* Output. */
+ sc_data_t *sc_data,
+ /* Determined by the system. */
+ size_t lg_ptr_size, int lg_quantum,
+ /* Configuration decisions. */
+ int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) {
+ int ptr_bits = (1 << lg_ptr_size) * 8;
+ int ngroup = (1 << lg_ngroup);
+ int ntiny = 0;
+ int nlbins = 0;
+ int lg_tiny_maxclass = (unsigned)-1;
+ int nbins = 0;
+ int npsizes = 0;
+
+ int index = 0;
+
+ int ndelta = 0;
+ int lg_base = lg_tiny_min;
+ int lg_delta = lg_base;
+
+ /* Outputs that we update as we go. */
+ size_t lookup_maxclass = 0;
+ size_t small_maxclass = 0;
+ int lg_large_minclass = 0;
+ size_t large_maxclass = 0;
+
+ /* Tiny size classes. */
+ while (lg_base < lg_quantum) {
+ sc_t *sc = &sc_data->sc[index];
+ size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
+ lg_base, lg_delta, ndelta);
+ if (sc->lg_delta_lookup != 0) {
+ nlbins = index + 1;
+ }
+ if (sc->psz) {
+ npsizes++;
+ }
+ if (sc->bin) {
+ nbins++;
+ }
+ ntiny++;
+ /* Final written value is correct. */
+ lg_tiny_maxclass = lg_base;
+ index++;
+ lg_delta = lg_base;
+ lg_base++;
+ }
+
+ /* First non-tiny (pseudo) group. */
+ if (ntiny != 0) {
+ sc_t *sc = &sc_data->sc[index];
+ /*
+ * See the note in sc.h; the first non-tiny size class has an
+ * unusual encoding.
+ */
+ lg_base--;
+ ndelta = 1;
+ size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
+ lg_base, lg_delta, ndelta);
+ index++;
+ lg_base++;
+ lg_delta++;
+ if (sc->psz) {
+ npsizes++;
+ }
+ if (sc->bin) {
+ nbins++;
+ }
+ }
+ while (ndelta < ngroup) {
+ sc_t *sc = &sc_data->sc[index];
+ size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
+ lg_base, lg_delta, ndelta);
+ index++;
+ ndelta++;
+ if (sc->psz) {
+ npsizes++;
+ }
+ if (sc->bin) {
+ nbins++;
+ }
+ }
+
+ /* All remaining groups. */
+ lg_base = lg_base + lg_ngroup;
+ while (lg_base < ptr_bits - 1) {
+ ndelta = 1;
+ int ndelta_limit;
+ if (lg_base == ptr_bits - 2) {
+ ndelta_limit = ngroup - 1;
+ } else {
+ ndelta_limit = ngroup;
+ }
+ while (ndelta <= ndelta_limit) {
+ sc_t *sc = &sc_data->sc[index];
+ size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
+ lg_base, lg_delta, ndelta);
+ if (sc->lg_delta_lookup != 0) {
+ nlbins = index + 1;
+ /* Final written value is correct. */
+ lookup_maxclass = (ZU(1) << lg_base)
+ + (ZU(ndelta) << lg_delta);
+ }
+ if (sc->psz) {
+ npsizes++;
+ }
+ if (sc->bin) {
+ nbins++;
+ /* Final written value is correct. */
+ small_maxclass = (ZU(1) << lg_base)
+ + (ZU(ndelta) << lg_delta);
+ if (lg_ngroup > 0) {
+ lg_large_minclass = lg_base + 1;
+ } else {
+ lg_large_minclass = lg_base + 2;
+ }
+ }
+ large_maxclass = (ZU(1) << lg_base)
+ + (ZU(ndelta) << lg_delta);
+ index++;
+ ndelta++;
+ }
+ lg_base++;
+ lg_delta++;
+ }
+ /* Additional outputs. */
+ int nsizes = index;
+ unsigned lg_ceil_nsizes = lg_ceil(nsizes);
+
+ /* Fill in the output data. */
+ sc_data->ntiny = ntiny;
+ sc_data->nlbins = nlbins;
+ sc_data->nbins = nbins;
+ sc_data->nsizes = nsizes;
+ sc_data->lg_ceil_nsizes = lg_ceil_nsizes;
+ sc_data->npsizes = npsizes;
+ sc_data->lg_tiny_maxclass = lg_tiny_maxclass;
+ sc_data->lookup_maxclass = lookup_maxclass;
+ sc_data->small_maxclass = small_maxclass;
+ sc_data->lg_large_minclass = lg_large_minclass;
+ sc_data->large_minclass = (ZU(1) << lg_large_minclass);
+ sc_data->large_maxclass = large_maxclass;
+
+ /*
+ * We compute these values in two ways:
+ * - Incrementally, as above.
+ * - In macros, in sc.h.
+ * The computation is easier when done incrementally, but putting it in
+ * a constant makes it available to the fast paths without having to
+ * touch the extra global cacheline. We assert, however, that the two
+ * computations are equivalent.
+ */
+ assert(sc_data->npsizes == SC_NPSIZES);
+ assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS);
+ assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS);
+ assert(sc_data->large_minclass == SC_LARGE_MINCLASS);
+ assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
+ assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
+
+ /*
+ * In the allocation fastpath, we want to assume that we can
+ * unconditionally subtract the requested allocation size from
+ * a ssize_t, and detect passing through 0 correctly. This
+ * results in optimal generated code. For this to work, the
+ * maximum allocation size must be less than SSIZE_MAX.
+ */
+ assert(SC_LARGE_MAXCLASS < SSIZE_MAX);
+}
+
+void
+sc_data_init(sc_data_t *sc_data) {
+ assert(!sc_data->initialized);
+
+ int lg_max_lookup = 12;
+
+ size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
+ lg_max_lookup, LG_PAGE, 2);
+
+ sc_data->initialized = true;
+}
+
+static void
+sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) {
+ size_t min_pgs = reg_size / PAGE;
+ if (reg_size % PAGE != 0) {
+ min_pgs++;
+ }
+ /*
+ * BITMAP_MAXBITS is actually determined by putting the smallest
+ * possible size-class on one page, so this can never be 0.
+ */
+ size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE;
+
+ assert(min_pgs <= max_pgs);
+ assert(min_pgs > 0);
+ assert(max_pgs >= 1);
+ if (pgs_guess < min_pgs) {
+ sc->pgs = (int)min_pgs;
+ } else if (pgs_guess > max_pgs) {
+ sc->pgs = (int)max_pgs;
+ } else {
+ sc->pgs = (int)pgs_guess;
+ }
+}
+
+void
+sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
+ assert(data->initialized);
+ for (int i = 0; i < data->nsizes; i++) {
+ sc_t *sc = &data->sc[i];
+ if (!sc->bin) {
+ break;
+ }
+ size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta,
+ sc->ndelta);
+ if (begin <= reg_size && reg_size <= end) {
+ sc_data_update_sc_slab_size(sc, reg_size, pgs);
+ }
+ }
+}
+
+void
+sc_boot(sc_data_t *data) {
+ sc_data_init(data);
+}
diff --git a/contrib/libs/jemalloc/src/stats.c b/contrib/libs/jemalloc/src/stats.c
index 118e05d291..354802c11b 100644
--- a/contrib/libs/jemalloc/src/stats.c
+++ b/contrib/libs/jemalloc/src/stats.c
@@ -1,1457 +1,1457 @@
-#define JEMALLOC_STATS_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/emitter.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_prof.h"
-
-const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
-#define OP(mtx) #mtx,
- MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
-};
-
-const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
-#define OP(mtx) #mtx,
- MUTEX_PROF_ARENA_MUTEXES
-#undef OP
-};
-
-#define CTL_GET(n, v, t) do { \
+#define JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_prof.h"
+
+const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
+#define OP(mtx) #mtx,
+ MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+};
+
+const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
+#define OP(mtx) #mtx,
+ MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+};
+
+#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
- xmallctl(n, (void *)v, &sz, NULL, 0); \
+ xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_GET(n, i, v, t) do { \
- size_t mib[CTL_MAX_DEPTH]; \
+#define CTL_M2_GET(n, i, v, t) do { \
+ size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = (i); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ mib[2] = (i); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_M4_GET(n, i, j, v, t) do { \
- size_t mib[CTL_MAX_DEPTH]; \
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+ size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = (i); \
- mib[4] = (j); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ mib[2] = (i); \
+ mib[4] = (j); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
/* Data. */
-bool opt_stats_print = false;
-char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
+bool opt_stats_print = false;
+char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
/******************************************************************************/
-static uint64_t
-rate_per_second(uint64_t value, uint64_t uptime_ns) {
- uint64_t billion = 1000000000;
- if (uptime_ns == 0 || value == 0) {
- return 0;
- }
- if (uptime_ns < billion) {
- return value;
- } else {
- uint64_t uptime_s = uptime_ns / billion;
- return value / uptime_s;
- }
-}
-
-/* Calculate x.yyy and output a string (takes a fixed sized char array). */
-static bool
-get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
- if (divisor == 0 || dividend > divisor) {
- /* The rate is not supposed to be greater than 1. */
- return true;
- }
- if (dividend > 0) {
- assert(UINT64_MAX / dividend >= 1000);
- }
-
- unsigned n = (unsigned)((dividend * 1000) / divisor);
- if (n < 10) {
- malloc_snprintf(str, 6, "0.00%u", n);
- } else if (n < 100) {
- malloc_snprintf(str, 6, "0.0%u", n);
- } else if (n < 1000) {
- malloc_snprintf(str, 6, "0.%u", n);
- } else {
- malloc_snprintf(str, 6, "1");
- }
-
- return false;
-}
-
-#define MUTEX_CTL_STR_MAX_LENGTH 128
-static void
-gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
- const char *mutex, const char *counter) {
- malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
-}
-
-static void
-mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
- emitter_col_t *name,
- emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
- emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
- mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
- mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
-
- emitter_col_t *col;
-
- if (name != NULL) {
- emitter_col_init(name, row);
- name->justify = emitter_justify_left;
- name->width = 21;
- name->type = emitter_type_title;
- name->str_val = table_name;
- }
-
-#define WIDTH_uint32_t 12
-#define WIDTH_uint64_t 16
-#define OP(counter, counter_type, human, derived, base_counter) \
- col = &col_##counter_type[k_##counter_type]; \
- ++k_##counter_type; \
- emitter_col_init(col, row); \
- col->justify = emitter_justify_right; \
- col->width = derived ? 8 : WIDTH_##counter_type; \
- col->type = emitter_type_title; \
- col->str_val = human;
- MUTEX_PROF_COUNTERS
-#undef OP
-#undef WIDTH_uint32_t
-#undef WIDTH_uint64_t
- col_uint64_t[mutex_counter_total_wait_time_ps].width = 10;
-}
-
-static void
-mutex_stats_read_global(const char *name, emitter_col_t *col_name,
- emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
- emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
- uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
-
- col_name->str_val = name;
-
- emitter_col_t *dst;
-#define EMITTER_TYPE_uint32_t emitter_type_uint32
-#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
- dst = &col_##counter_type[mutex_counter_##counter]; \
- dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "mutexes", name, #counter); \
- CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
- }
- MUTEX_PROF_COUNTERS
-#undef OP
-#undef EMITTER_TYPE_uint32_t
-#undef EMITTER_TYPE_uint64_t
-}
-
-static void
-mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
- const char *name, emitter_col_t *col_name,
- emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
- emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
- uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
-
- col_name->str_val = name;
-
- emitter_col_t *dst;
-#define EMITTER_TYPE_uint32_t emitter_type_uint32
-#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
- dst = &col_##counter_type[mutex_counter_##counter]; \
- dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\
- CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
- }
- MUTEX_PROF_COUNTERS
-#undef OP
-#undef EMITTER_TYPE_uint32_t
-#undef EMITTER_TYPE_uint64_t
-}
-
-static void
-mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind,
- emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
- emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
- uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
- emitter_col_t *dst;
-
-#define EMITTER_TYPE_uint32_t emitter_type_uint32
-#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
- dst = &col_##counter_type[mutex_counter_##counter]; \
- dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.bins.0","mutex", #counter); \
- CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
- (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
- }
- MUTEX_PROF_COUNTERS
-#undef OP
-#undef EMITTER_TYPE_uint32_t
-#undef EMITTER_TYPE_uint64_t
-}
-
-/* "row" can be NULL to avoid emitting in table mode. */
+static uint64_t
+rate_per_second(uint64_t value, uint64_t uptime_ns) {
+ uint64_t billion = 1000000000;
+ if (uptime_ns == 0 || value == 0) {
+ return 0;
+ }
+ if (uptime_ns < billion) {
+ return value;
+ } else {
+ uint64_t uptime_s = uptime_ns / billion;
+ return value / uptime_s;
+ }
+}
+
+/* Calculate x.yyy and output a string (takes a fixed sized char array). */
+static bool
+get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
+ if (divisor == 0 || dividend > divisor) {
+ /* The rate is not supposed to be greater than 1. */
+ return true;
+ }
+ if (dividend > 0) {
+ assert(UINT64_MAX / dividend >= 1000);
+ }
+
+ unsigned n = (unsigned)((dividend * 1000) / divisor);
+ if (n < 10) {
+ malloc_snprintf(str, 6, "0.00%u", n);
+ } else if (n < 100) {
+ malloc_snprintf(str, 6, "0.0%u", n);
+ } else if (n < 1000) {
+ malloc_snprintf(str, 6, "0.%u", n);
+ } else {
+ malloc_snprintf(str, 6, "1");
+ }
+
+ return false;
+}
+
+#define MUTEX_CTL_STR_MAX_LENGTH 128
static void
-mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
- emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
- emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
- if (row != NULL) {
- emitter_table_row(emitter, row);
- }
-
- mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
- mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
-
- emitter_col_t *col;
-
-#define EMITTER_TYPE_uint32_t emitter_type_uint32
-#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, type, human, derived, base_counter) \
- if (!derived) { \
- col = &col_##type[k_##type]; \
- ++k_##type; \
- emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \
- (const void *)&col->bool_val); \
- }
- MUTEX_PROF_COUNTERS;
-#undef OP
-#undef EMITTER_TYPE_uint32_t
-#undef EMITTER_TYPE_uint64_t
-}
-
-#define COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t col_##column_name; \
- emitter_col_init(&col_##column_name, &row_name); \
- col_##column_name.justify = emitter_justify_##left_or_right; \
- col_##column_name.width = col_width; \
- col_##column_name.type = emitter_type_##etype;
-
-#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \
- COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t header_##column_name; \
- emitter_col_init(&header_##column_name, &header_##row_name); \
- header_##column_name.justify = emitter_justify_##left_or_right; \
- header_##column_name.width = col_width; \
- header_##column_name.type = emitter_type_title; \
- header_##column_name.str_val = human ? human : #column_name;
-
-
-static void
-stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) {
+gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
+ const char *mutex, const char *counter) {
+ malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
+}
+
+static void
+mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
+ emitter_col_t *name,
+ emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
+ emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
+ mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
+ mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
+
+ emitter_col_t *col;
+
+ if (name != NULL) {
+ emitter_col_init(name, row);
+ name->justify = emitter_justify_left;
+ name->width = 21;
+ name->type = emitter_type_title;
+ name->str_val = table_name;
+ }
+
+#define WIDTH_uint32_t 12
+#define WIDTH_uint64_t 16
+#define OP(counter, counter_type, human, derived, base_counter) \
+ col = &col_##counter_type[k_##counter_type]; \
+ ++k_##counter_type; \
+ emitter_col_init(col, row); \
+ col->justify = emitter_justify_right; \
+ col->width = derived ? 8 : WIDTH_##counter_type; \
+ col->type = emitter_type_title; \
+ col->str_val = human;
+ MUTEX_PROF_COUNTERS
+#undef OP
+#undef WIDTH_uint32_t
+#undef WIDTH_uint64_t
+ col_uint64_t[mutex_counter_total_wait_time_ps].width = 10;
+}
+
+static void
+mutex_stats_read_global(const char *name, emitter_col_t *col_name,
+ emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
+ emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
+ uint64_t uptime) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+
+ col_name->str_val = name;
+
+ emitter_col_t *dst;
+#define EMITTER_TYPE_uint32_t emitter_type_uint32
+#define EMITTER_TYPE_uint64_t emitter_type_uint64
+#define OP(counter, counter_type, human, derived, base_counter) \
+ dst = &col_##counter_type[mutex_counter_##counter]; \
+ dst->type = EMITTER_TYPE_##counter_type; \
+ if (!derived) { \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "mutexes", name, #counter); \
+ CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ }
+ MUTEX_PROF_COUNTERS
+#undef OP
+#undef EMITTER_TYPE_uint32_t
+#undef EMITTER_TYPE_uint64_t
+}
+
+static void
+mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
+ const char *name, emitter_col_t *col_name,
+ emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
+ emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
+ uint64_t uptime) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+
+ col_name->str_val = name;
+
+ emitter_col_t *dst;
+#define EMITTER_TYPE_uint32_t emitter_type_uint32
+#define EMITTER_TYPE_uint64_t emitter_type_uint64
+#define OP(counter, counter_type, human, derived, base_counter) \
+ dst = &col_##counter_type[mutex_counter_##counter]; \
+ dst->type = EMITTER_TYPE_##counter_type; \
+ if (!derived) { \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\
+ CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ }
+ MUTEX_PROF_COUNTERS
+#undef OP
+#undef EMITTER_TYPE_uint32_t
+#undef EMITTER_TYPE_uint64_t
+}
+
+static void
+mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind,
+ emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
+ emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
+ uint64_t uptime) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ emitter_col_t *dst;
+
+#define EMITTER_TYPE_uint32_t emitter_type_uint32
+#define EMITTER_TYPE_uint64_t emitter_type_uint64
+#define OP(counter, counter_type, human, derived, base_counter) \
+ dst = &col_##counter_type[mutex_counter_##counter]; \
+ dst->type = EMITTER_TYPE_##counter_type; \
+ if (!derived) { \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "arenas.0.bins.0","mutex", #counter); \
+ CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ }
+ MUTEX_PROF_COUNTERS
+#undef OP
+#undef EMITTER_TYPE_uint32_t
+#undef EMITTER_TYPE_uint64_t
+}
+
+/* "row" can be NULL to avoid emitting in table mode. */
+static void
+mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
+ emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
+ emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
+ if (row != NULL) {
+ emitter_table_row(emitter, row);
+ }
+
+ mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
+ mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
+
+ emitter_col_t *col;
+
+#define EMITTER_TYPE_uint32_t emitter_type_uint32
+#define EMITTER_TYPE_uint64_t emitter_type_uint64
+#define OP(counter, type, human, derived, base_counter) \
+ if (!derived) { \
+ col = &col_##type[k_##type]; \
+ ++k_##type; \
+ emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \
+ (const void *)&col->bool_val); \
+ }
+ MUTEX_PROF_COUNTERS;
+#undef OP
+#undef EMITTER_TYPE_uint32_t
+#undef EMITTER_TYPE_uint64_t
+}
+
+#define COL(row_name, column_name, left_or_right, col_width, etype) \
+ emitter_col_t col_##column_name; \
+ emitter_col_init(&col_##column_name, &row_name); \
+ col_##column_name.justify = emitter_justify_##left_or_right; \
+ col_##column_name.width = col_width; \
+ col_##column_name.type = emitter_type_##etype;
+
+#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \
+ COL(row_name, column_name, left_or_right, col_width, etype) \
+ emitter_col_t header_##column_name; \
+ emitter_col_init(&header_##column_name, &header_##row_name); \
+ header_##column_name.justify = emitter_justify_##left_or_right; \
+ header_##column_name.width = col_width; \
+ header_##column_name.type = emitter_type_title; \
+ header_##column_name.str_val = human ? human : #column_name;
+
+
+static void
+stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) {
size_t page;
- bool in_gap, in_gap_prev;
- unsigned nbins, j;
+ bool in_gap, in_gap_prev;
+ unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
CTL_GET("arenas.nbins", &nbins, unsigned);
- emitter_row_t header_row;
- emitter_row_init(&header_row);
-
- emitter_row_t row;
- emitter_row_init(&row);
-
- COL_HDR(row, size, NULL, right, 20, size)
- COL_HDR(row, ind, NULL, right, 4, unsigned)
- COL_HDR(row, allocated, NULL, right, 13, uint64)
- COL_HDR(row, nmalloc, NULL, right, 13, uint64)
- COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, ndalloc, NULL, right, 13, uint64)
- COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, nrequests, NULL, right, 13, uint64)
- COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
- COL_HDR(row, nshards, NULL, right, 9, unsigned)
- COL_HDR(row, curregs, NULL, right, 13, size)
- COL_HDR(row, curslabs, NULL, right, 13, size)
- COL_HDR(row, nonfull_slabs, NULL, right, 15, size)
- COL_HDR(row, regs, NULL, right, 5, unsigned)
- COL_HDR(row, pgs, NULL, right, 4, size)
- /* To buffer a right- and left-justified column. */
- COL_HDR(row, justify_spacer, NULL, right, 1, title)
- COL_HDR(row, util, NULL, right, 6, title)
- COL_HDR(row, nfills, NULL, right, 13, uint64)
- COL_HDR(row, nfills_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, nflushes, NULL, right, 13, uint64)
- COL_HDR(row, nflushes_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, nslabs, NULL, right, 13, uint64)
- COL_HDR(row, nreslabs, NULL, right, 13, uint64)
- COL_HDR(row, nreslabs_ps, "(#/sec)", right, 8, uint64)
-
- /* Don't want to actually print the name. */
- header_justify_spacer.str_val = " ";
- col_justify_spacer.str_val = " ";
-
- emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters];
- emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters];
-
- emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters];
- emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters];
-
- if (mutex) {
- mutex_stats_init_cols(&row, NULL, NULL, col_mutex64,
- col_mutex32);
- mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64,
- header_mutex32);
- }
-
- /*
- * We print a "bins:" header as part of the table row; we need to adjust
- * the header size column to compensate.
- */
- header_size.width -=5;
- emitter_table_printf(emitter, "bins:");
- emitter_table_row(emitter, &header_row);
- emitter_json_array_kv_begin(emitter, "bins");
-
- for (j = 0, in_gap = false; j < nbins; j++) {
- uint64_t nslabs;
- size_t reg_size, slab_size, curregs;
- size_t curslabs;
- size_t nonfull_slabs;
- uint32_t nregs, nshards;
- uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
- uint64_t nreslabs;
-
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
- uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nslabs == 0);
-
- if (in_gap_prev && !in_gap) {
- emitter_table_printf(emitter,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
- CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
- CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t);
-
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
- &nrequests, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs,
- size_t);
-
- if (mutex) {
- mutex_stats_read_arena_bin(i, j, col_mutex64,
- col_mutex32, uptime);
- }
-
- emitter_json_object_begin(emitter);
- emitter_json_kv(emitter, "nmalloc", emitter_type_uint64,
- &nmalloc);
- emitter_json_kv(emitter, "ndalloc", emitter_type_uint64,
- &ndalloc);
- emitter_json_kv(emitter, "curregs", emitter_type_size,
- &curregs);
- emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
- &nrequests);
- emitter_json_kv(emitter, "nfills", emitter_type_uint64,
- &nfills);
- emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
- &nflushes);
- emitter_json_kv(emitter, "nreslabs", emitter_type_uint64,
- &nreslabs);
- emitter_json_kv(emitter, "curslabs", emitter_type_size,
- &curslabs);
- emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size,
- &nonfull_slabs);
- if (mutex) {
- emitter_json_object_kv_begin(emitter, "mutex");
- mutex_stats_emit(emitter, NULL, col_mutex64,
- col_mutex32);
- emitter_json_object_end(emitter);
- }
- emitter_json_object_end(emitter);
-
- size_t availregs = nregs * curslabs;
- char util[6];
- if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util))
- {
- if (availregs == 0) {
- malloc_snprintf(util, sizeof(util), "1");
- } else if (curregs > availregs) {
- /*
- * Race detected: the counters were read in
- * separate mallctl calls and concurrent
- * operations happened in between. In this case
- * no meaningful utilization can be computed.
- */
- malloc_snprintf(util, sizeof(util), " race");
+ emitter_row_t header_row;
+ emitter_row_init(&header_row);
+
+ emitter_row_t row;
+ emitter_row_init(&row);
+
+ COL_HDR(row, size, NULL, right, 20, size)
+ COL_HDR(row, ind, NULL, right, 4, unsigned)
+ COL_HDR(row, allocated, NULL, right, 13, uint64)
+ COL_HDR(row, nmalloc, NULL, right, 13, uint64)
+ COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, ndalloc, NULL, right, 13, uint64)
+ COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, nrequests, NULL, right, 13, uint64)
+ COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
+ COL_HDR(row, nshards, NULL, right, 9, unsigned)
+ COL_HDR(row, curregs, NULL, right, 13, size)
+ COL_HDR(row, curslabs, NULL, right, 13, size)
+ COL_HDR(row, nonfull_slabs, NULL, right, 15, size)
+ COL_HDR(row, regs, NULL, right, 5, unsigned)
+ COL_HDR(row, pgs, NULL, right, 4, size)
+ /* To buffer a right- and left-justified column. */
+ COL_HDR(row, justify_spacer, NULL, right, 1, title)
+ COL_HDR(row, util, NULL, right, 6, title)
+ COL_HDR(row, nfills, NULL, right, 13, uint64)
+ COL_HDR(row, nfills_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, nflushes, NULL, right, 13, uint64)
+ COL_HDR(row, nflushes_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, nslabs, NULL, right, 13, uint64)
+ COL_HDR(row, nreslabs, NULL, right, 13, uint64)
+ COL_HDR(row, nreslabs_ps, "(#/sec)", right, 8, uint64)
+
+ /* Don't want to actually print the name. */
+ header_justify_spacer.str_val = " ";
+ col_justify_spacer.str_val = " ";
+
+ emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters];
+ emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters];
+
+ emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters];
+ emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters];
+
+ if (mutex) {
+ mutex_stats_init_cols(&row, NULL, NULL, col_mutex64,
+ col_mutex32);
+ mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64,
+ header_mutex32);
+ }
+
+ /*
+ * We print a "bins:" header as part of the table row; we need to adjust
+ * the header size column to compensate.
+ */
+ header_size.width -=5;
+ emitter_table_printf(emitter, "bins:");
+ emitter_table_row(emitter, &header_row);
+ emitter_json_array_kv_begin(emitter, "bins");
+
+ for (j = 0, in_gap = false; j < nbins; j++) {
+ uint64_t nslabs;
+ size_t reg_size, slab_size, curregs;
+ size_t curslabs;
+ size_t nonfull_slabs;
+ uint32_t nregs, nshards;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nreslabs;
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
+ uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nslabs == 0);
+
+ if (in_gap_prev && !in_gap) {
+ emitter_table_printf(emitter,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t);
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs,
+ size_t);
+
+ if (mutex) {
+ mutex_stats_read_arena_bin(i, j, col_mutex64,
+ col_mutex32, uptime);
+ }
+
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "nmalloc", emitter_type_uint64,
+ &nmalloc);
+ emitter_json_kv(emitter, "ndalloc", emitter_type_uint64,
+ &ndalloc);
+ emitter_json_kv(emitter, "curregs", emitter_type_size,
+ &curregs);
+ emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
+ &nrequests);
+ emitter_json_kv(emitter, "nfills", emitter_type_uint64,
+ &nfills);
+ emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
+ &nflushes);
+ emitter_json_kv(emitter, "nreslabs", emitter_type_uint64,
+ &nreslabs);
+ emitter_json_kv(emitter, "curslabs", emitter_type_size,
+ &curslabs);
+ emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size,
+ &nonfull_slabs);
+ if (mutex) {
+ emitter_json_object_kv_begin(emitter, "mutex");
+ mutex_stats_emit(emitter, NULL, col_mutex64,
+ col_mutex32);
+ emitter_json_object_end(emitter);
+ }
+ emitter_json_object_end(emitter);
+
+ size_t availregs = nregs * curslabs;
+ char util[6];
+ if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util))
+ {
+ if (availregs == 0) {
+ malloc_snprintf(util, sizeof(util), "1");
+ } else if (curregs > availregs) {
+ /*
+ * Race detected: the counters were read in
+ * separate mallctl calls and concurrent
+ * operations happened in between. In this case
+ * no meaningful utilization can be computed.
+ */
+ malloc_snprintf(util, sizeof(util), " race");
} else {
- not_reached();
+ not_reached();
}
}
-
- col_size.size_val = reg_size;
- col_ind.unsigned_val = j;
- col_allocated.size_val = curregs * reg_size;
- col_nmalloc.uint64_val = nmalloc;
- col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
- col_ndalloc.uint64_val = ndalloc;
- col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
- col_nrequests.uint64_val = nrequests;
- col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
- col_nshards.unsigned_val = nshards;
- col_curregs.size_val = curregs;
- col_curslabs.size_val = curslabs;
- col_nonfull_slabs.size_val = nonfull_slabs;
- col_regs.unsigned_val = nregs;
- col_pgs.size_val = slab_size / page;
- col_util.str_val = util;
- col_nfills.uint64_val = nfills;
- col_nfills_ps.uint64_val = rate_per_second(nfills, uptime);
- col_nflushes.uint64_val = nflushes;
- col_nflushes_ps.uint64_val = rate_per_second(nflushes, uptime);
- col_nslabs.uint64_val = nslabs;
- col_nreslabs.uint64_val = nreslabs;
- col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime);
-
- /*
- * Note that mutex columns were initialized above, if mutex ==
- * true.
- */
-
- emitter_table_row(emitter, &row);
+
+ col_size.size_val = reg_size;
+ col_ind.unsigned_val = j;
+ col_allocated.size_val = curregs * reg_size;
+ col_nmalloc.uint64_val = nmalloc;
+ col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
+ col_ndalloc.uint64_val = ndalloc;
+ col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
+ col_nrequests.uint64_val = nrequests;
+ col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ col_nshards.unsigned_val = nshards;
+ col_curregs.size_val = curregs;
+ col_curslabs.size_val = curslabs;
+ col_nonfull_slabs.size_val = nonfull_slabs;
+ col_regs.unsigned_val = nregs;
+ col_pgs.size_val = slab_size / page;
+ col_util.str_val = util;
+ col_nfills.uint64_val = nfills;
+ col_nfills_ps.uint64_val = rate_per_second(nfills, uptime);
+ col_nflushes.uint64_val = nflushes;
+ col_nflushes_ps.uint64_val = rate_per_second(nflushes, uptime);
+ col_nslabs.uint64_val = nslabs;
+ col_nreslabs.uint64_val = nreslabs;
+ col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime);
+
+ /*
+ * Note that mutex columns were initialized above, if mutex ==
+ * true.
+ */
+
+ emitter_table_row(emitter, &row);
}
- emitter_json_array_end(emitter); /* Close "bins". */
-
- if (in_gap) {
- emitter_table_printf(emitter, " ---\n");
+ emitter_json_array_end(emitter); /* Close "bins". */
+
+ if (in_gap) {
+ emitter_table_printf(emitter, " ---\n");
}
}
static void
-stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
- unsigned nbins, nlextents, j;
- bool in_gap, in_gap_prev;
-
- CTL_GET("arenas.nbins", &nbins, unsigned);
- CTL_GET("arenas.nlextents", &nlextents, unsigned);
-
- emitter_row_t header_row;
- emitter_row_init(&header_row);
- emitter_row_t row;
- emitter_row_init(&row);
-
- COL_HDR(row, size, NULL, right, 20, size)
- COL_HDR(row, ind, NULL, right, 4, unsigned)
- COL_HDR(row, allocated, NULL, right, 13, size)
- COL_HDR(row, nmalloc, NULL, right, 13, uint64)
- COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, ndalloc, NULL, right, 13, uint64)
- COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, nrequests, NULL, right, 13, uint64)
- COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
- COL_HDR(row, curlextents, NULL, right, 13, size)
-
- /* As with bins, we label the large extents table. */
- header_size.width -= 6;
- emitter_table_printf(emitter, "large:");
- emitter_table_row(emitter, &header_row);
- emitter_json_array_kv_begin(emitter, "lextents");
-
- for (j = 0, in_gap = false; j < nlextents; j++) {
+stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
+ unsigned nbins, nlextents, j;
+ bool in_gap, in_gap_prev;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlextents", &nlextents, unsigned);
+
+ emitter_row_t header_row;
+ emitter_row_init(&header_row);
+ emitter_row_t row;
+ emitter_row_init(&row);
+
+ COL_HDR(row, size, NULL, right, 20, size)
+ COL_HDR(row, ind, NULL, right, 4, unsigned)
+ COL_HDR(row, allocated, NULL, right, 13, size)
+ COL_HDR(row, nmalloc, NULL, right, 13, uint64)
+ COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, ndalloc, NULL, right, 13, uint64)
+ COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, nrequests, NULL, right, 13, uint64)
+ COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR(row, curlextents, NULL, right, 13, size)
+
+ /* As with bins, we label the large extents table. */
+ header_size.width -= 6;
+ emitter_table_printf(emitter, "large:");
+ emitter_table_row(emitter, &header_row);
+ emitter_json_array_kv_begin(emitter, "lextents");
+
+ for (j = 0, in_gap = false; j < nlextents; j++) {
uint64_t nmalloc, ndalloc, nrequests;
- size_t lextent_size, curlextents;
-
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
- &nmalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
- &ndalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
- &nrequests, uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nrequests == 0);
-
- if (in_gap_prev && !in_gap) {
- emitter_table_printf(emitter,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
- &curlextents, size_t);
-
- emitter_json_object_begin(emitter);
- emitter_json_kv(emitter, "curlextents", emitter_type_size,
- &curlextents);
- emitter_json_object_end(emitter);
-
- col_size.size_val = lextent_size;
- col_ind.unsigned_val = nbins + j;
- col_allocated.size_val = curlextents * lextent_size;
- col_nmalloc.uint64_val = nmalloc;
- col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
- col_ndalloc.uint64_val = ndalloc;
- col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
- col_nrequests.uint64_val = nrequests;
- col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
- col_curlextents.size_val = curlextents;
-
- if (!in_gap) {
- emitter_table_row(emitter, &row);
+ size_t lextent_size, curlextents;
+
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nrequests == 0);
+
+ if (in_gap_prev && !in_gap) {
+ emitter_table_printf(emitter,
+ " ---\n");
}
+
+ CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
+ &curlextents, size_t);
+
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "curlextents", emitter_type_size,
+ &curlextents);
+ emitter_json_object_end(emitter);
+
+ col_size.size_val = lextent_size;
+ col_ind.unsigned_val = nbins + j;
+ col_allocated.size_val = curlextents * lextent_size;
+ col_nmalloc.uint64_val = nmalloc;
+ col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
+ col_ndalloc.uint64_val = ndalloc;
+ col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
+ col_nrequests.uint64_val = nrequests;
+ col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ col_curlextents.size_val = curlextents;
+
+ if (!in_gap) {
+ emitter_table_row(emitter, &row);
+ }
}
- emitter_json_array_end(emitter); /* Close "lextents". */
- if (in_gap) {
- emitter_table_printf(emitter, " ---\n");
- }
+ emitter_json_array_end(emitter); /* Close "lextents". */
+ if (in_gap) {
+ emitter_table_printf(emitter, " ---\n");
+ }
}
static void
-stats_arena_extents_print(emitter_t *emitter, unsigned i) {
- unsigned j;
- bool in_gap, in_gap_prev;
- emitter_row_t header_row;
- emitter_row_init(&header_row);
- emitter_row_t row;
- emitter_row_init(&row);
-
- COL_HDR(row, size, NULL, right, 20, size)
- COL_HDR(row, ind, NULL, right, 4, unsigned)
- COL_HDR(row, ndirty, NULL, right, 13, size)
- COL_HDR(row, dirty, NULL, right, 13, size)
- COL_HDR(row, nmuzzy, NULL, right, 13, size)
- COL_HDR(row, muzzy, NULL, right, 13, size)
- COL_HDR(row, nretained, NULL, right, 13, size)
- COL_HDR(row, retained, NULL, right, 13, size)
- COL_HDR(row, ntotal, NULL, right, 13, size)
- COL_HDR(row, total, NULL, right, 13, size)
-
- /* Label this section. */
- header_size.width -= 8;
- emitter_table_printf(emitter, "extents:");
- emitter_table_row(emitter, &header_row);
- emitter_json_array_kv_begin(emitter, "extents");
-
- in_gap = false;
- for (j = 0; j < SC_NPSIZES; j++) {
- size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
- muzzy_bytes, retained_bytes, total_bytes;
- CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j,
- &ndirty, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j,
- &nmuzzy, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j,
- &nretained, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j,
- &dirty_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j,
- &muzzy_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j,
- &retained_bytes, size_t);
- total = ndirty + nmuzzy + nretained;
- total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
-
- in_gap_prev = in_gap;
- in_gap = (total == 0);
-
- if (in_gap_prev && !in_gap) {
- emitter_table_printf(emitter,
- " ---\n");
- }
-
- emitter_json_object_begin(emitter);
- emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty);
- emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy);
- emitter_json_kv(emitter, "nretained", emitter_type_size,
- &nretained);
-
- emitter_json_kv(emitter, "dirty_bytes", emitter_type_size,
- &dirty_bytes);
- emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size,
- &muzzy_bytes);
- emitter_json_kv(emitter, "retained_bytes", emitter_type_size,
- &retained_bytes);
- emitter_json_object_end(emitter);
-
- col_size.size_val = sz_pind2sz(j);
- col_ind.size_val = j;
- col_ndirty.size_val = ndirty;
- col_dirty.size_val = dirty_bytes;
- col_nmuzzy.size_val = nmuzzy;
- col_muzzy.size_val = muzzy_bytes;
- col_nretained.size_val = nretained;
- col_retained.size_val = retained_bytes;
- col_ntotal.size_val = total;
- col_total.size_val = total_bytes;
-
- if (!in_gap) {
- emitter_table_row(emitter, &row);
- }
- }
- emitter_json_array_end(emitter); /* Close "extents". */
- if (in_gap) {
- emitter_table_printf(emitter, " ---\n");
- }
-}
-
-static void
-stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
- emitter_row_t row;
- emitter_col_t col_name;
- emitter_col_t col64[mutex_prof_num_uint64_t_counters];
- emitter_col_t col32[mutex_prof_num_uint32_t_counters];
-
- emitter_row_init(&row);
- mutex_stats_init_cols(&row, "", &col_name, col64, col32);
-
- emitter_json_object_kv_begin(emitter, "mutexes");
- emitter_table_row(emitter, &row);
-
- for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
- i++) {
- const char *name = arena_mutex_names[i];
- emitter_json_object_kv_begin(emitter, name);
- mutex_stats_read_arena(arena_ind, i, name, &col_name, col64,
- col32, uptime);
- mutex_stats_emit(emitter, &row, col64, col32);
- emitter_json_object_end(emitter); /* Close the mutex dict. */
- }
- emitter_json_object_end(emitter); /* End "mutexes". */
-}
-
-static void
-stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
- bool mutex, bool extents) {
+stats_arena_extents_print(emitter_t *emitter, unsigned i) {
+ unsigned j;
+ bool in_gap, in_gap_prev;
+ emitter_row_t header_row;
+ emitter_row_init(&header_row);
+ emitter_row_t row;
+ emitter_row_init(&row);
+
+ COL_HDR(row, size, NULL, right, 20, size)
+ COL_HDR(row, ind, NULL, right, 4, unsigned)
+ COL_HDR(row, ndirty, NULL, right, 13, size)
+ COL_HDR(row, dirty, NULL, right, 13, size)
+ COL_HDR(row, nmuzzy, NULL, right, 13, size)
+ COL_HDR(row, muzzy, NULL, right, 13, size)
+ COL_HDR(row, nretained, NULL, right, 13, size)
+ COL_HDR(row, retained, NULL, right, 13, size)
+ COL_HDR(row, ntotal, NULL, right, 13, size)
+ COL_HDR(row, total, NULL, right, 13, size)
+
+ /* Label this section. */
+ header_size.width -= 8;
+ emitter_table_printf(emitter, "extents:");
+ emitter_table_row(emitter, &header_row);
+ emitter_json_array_kv_begin(emitter, "extents");
+
+ in_gap = false;
+ for (j = 0; j < SC_NPSIZES; j++) {
+ size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
+ muzzy_bytes, retained_bytes, total_bytes;
+ CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j,
+ &ndirty, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j,
+ &nmuzzy, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j,
+ &nretained, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j,
+ &dirty_bytes, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j,
+ &muzzy_bytes, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j,
+ &retained_bytes, size_t);
+ total = ndirty + nmuzzy + nretained;
+ total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
+
+ in_gap_prev = in_gap;
+ in_gap = (total == 0);
+
+ if (in_gap_prev && !in_gap) {
+ emitter_table_printf(emitter,
+ " ---\n");
+ }
+
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty);
+ emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy);
+ emitter_json_kv(emitter, "nretained", emitter_type_size,
+ &nretained);
+
+ emitter_json_kv(emitter, "dirty_bytes", emitter_type_size,
+ &dirty_bytes);
+ emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size,
+ &muzzy_bytes);
+ emitter_json_kv(emitter, "retained_bytes", emitter_type_size,
+ &retained_bytes);
+ emitter_json_object_end(emitter);
+
+ col_size.size_val = sz_pind2sz(j);
+ col_ind.size_val = j;
+ col_ndirty.size_val = ndirty;
+ col_dirty.size_val = dirty_bytes;
+ col_nmuzzy.size_val = nmuzzy;
+ col_muzzy.size_val = muzzy_bytes;
+ col_nretained.size_val = nretained;
+ col_retained.size_val = retained_bytes;
+ col_ntotal.size_val = total;
+ col_total.size_val = total_bytes;
+
+ if (!in_gap) {
+ emitter_table_row(emitter, &row);
+ }
+ }
+ emitter_json_array_end(emitter); /* Close "extents". */
+ if (in_gap) {
+ emitter_table_printf(emitter, " ---\n");
+ }
+}
+
+static void
+stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
+ emitter_row_t row;
+ emitter_col_t col_name;
+ emitter_col_t col64[mutex_prof_num_uint64_t_counters];
+ emitter_col_t col32[mutex_prof_num_uint32_t_counters];
+
+ emitter_row_init(&row);
+ mutex_stats_init_cols(&row, "", &col_name, col64, col32);
+
+ emitter_json_object_kv_begin(emitter, "mutexes");
+ emitter_table_row(emitter, &row);
+
+ for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
+ i++) {
+ const char *name = arena_mutex_names[i];
+ emitter_json_object_kv_begin(emitter, name);
+ mutex_stats_read_arena(arena_ind, i, name, &col_name, col64,
+ col32, uptime);
+ mutex_stats_emit(emitter, &row, col64, col32);
+ emitter_json_object_end(emitter); /* Close the mutex dict. */
+ }
+ emitter_json_object_end(emitter); /* End "mutexes". */
+}
+
+static void
+stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
+ bool mutex, bool extents) {
unsigned nthreads;
const char *dss;
- ssize_t dirty_decay_ms, muzzy_decay_ms;
- size_t page, pactive, pdirty, pmuzzy, mapped, retained;
- size_t base, internal, resident, metadata_thp, extent_avail;
- uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
- uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
+ ssize_t dirty_decay_ms, muzzy_decay_ms;
+ size_t page, pactive, pdirty, pmuzzy, mapped, retained;
+ size_t base, internal, resident, metadata_thp, extent_avail;
+ uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
+ uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
size_t small_allocated;
- uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills,
- small_nflushes;
+ uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills,
+ small_nflushes;
size_t large_allocated;
- uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
- large_nflushes;
- size_t tcache_bytes, abandoned_vm;
- uint64_t uptime;
+ uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
+ large_nflushes;
+ size_t tcache_bytes, abandoned_vm;
+ uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
- CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
- emitter_kv(emitter, "nthreads", "assigned threads",
- emitter_type_unsigned, &nthreads);
-
- CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
- emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64,
- &uptime);
-
- CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
- emitter_kv(emitter, "dss", "dss allocation precedence",
- emitter_type_string, &dss);
-
- CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
- ssize_t);
- CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
- ssize_t);
- CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
- CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
- CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
- CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
- CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
- uint64_t);
- CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
- CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
- CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
- uint64_t);
- CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
-
- emitter_row_t decay_row;
- emitter_row_init(&decay_row);
-
- /* JSON-style emission. */
- emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize,
- &dirty_decay_ms);
- emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize,
- &muzzy_decay_ms);
-
- emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive);
- emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty);
- emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy);
-
- emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64,
- &dirty_npurge);
- emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64,
- &dirty_nmadvise);
- emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64,
- &dirty_purged);
-
- emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64,
- &muzzy_npurge);
- emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64,
- &muzzy_nmadvise);
- emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64,
- &muzzy_purged);
-
- /* Table-style emission. */
- COL(decay_row, decay_type, right, 9, title);
- col_decay_type.str_val = "decaying:";
-
- COL(decay_row, decay_time, right, 6, title);
- col_decay_time.str_val = "time";
-
- COL(decay_row, decay_npages, right, 13, title);
- col_decay_npages.str_val = "npages";
-
- COL(decay_row, decay_sweeps, right, 13, title);
- col_decay_sweeps.str_val = "sweeps";
-
- COL(decay_row, decay_madvises, right, 13, title);
- col_decay_madvises.str_val = "madvises";
-
- COL(decay_row, decay_purged, right, 13, title);
- col_decay_purged.str_val = "purged";
-
- /* Title row. */
- emitter_table_row(emitter, &decay_row);
-
- /* Dirty row. */
- col_decay_type.str_val = "dirty:";
-
- if (dirty_decay_ms >= 0) {
- col_decay_time.type = emitter_type_ssize;
- col_decay_time.ssize_val = dirty_decay_ms;
- } else {
- col_decay_time.type = emitter_type_title;
- col_decay_time.str_val = "N/A";
- }
-
- col_decay_npages.type = emitter_type_size;
- col_decay_npages.size_val = pdirty;
-
- col_decay_sweeps.type = emitter_type_uint64;
- col_decay_sweeps.uint64_val = dirty_npurge;
-
- col_decay_madvises.type = emitter_type_uint64;
- col_decay_madvises.uint64_val = dirty_nmadvise;
-
- col_decay_purged.type = emitter_type_uint64;
- col_decay_purged.uint64_val = dirty_purged;
-
- emitter_table_row(emitter, &decay_row);
-
- /* Muzzy row. */
- col_decay_type.str_val = "muzzy:";
-
- if (muzzy_decay_ms >= 0) {
- col_decay_time.type = emitter_type_ssize;
- col_decay_time.ssize_val = muzzy_decay_ms;
- } else {
- col_decay_time.type = emitter_type_title;
- col_decay_time.str_val = "N/A";
- }
-
- col_decay_npages.type = emitter_type_size;
- col_decay_npages.size_val = pmuzzy;
-
- col_decay_sweeps.type = emitter_type_uint64;
- col_decay_sweeps.uint64_val = muzzy_npurge;
-
- col_decay_madvises.type = emitter_type_uint64;
- col_decay_madvises.uint64_val = muzzy_nmadvise;
-
- col_decay_purged.type = emitter_type_uint64;
- col_decay_purged.uint64_val = muzzy_purged;
-
- emitter_table_row(emitter, &decay_row);
-
- /* Small / large / total allocation counts. */
- emitter_row_t alloc_count_row;
- emitter_row_init(&alloc_count_row);
-
- COL(alloc_count_row, count_title, left, 21, title);
- col_count_title.str_val = "";
-
- COL(alloc_count_row, count_allocated, right, 16, title);
- col_count_allocated.str_val = "allocated";
-
- COL(alloc_count_row, count_nmalloc, right, 16, title);
- col_count_nmalloc.str_val = "nmalloc";
- COL(alloc_count_row, count_nmalloc_ps, right, 8, title);
- col_count_nmalloc_ps.str_val = "(#/sec)";
-
- COL(alloc_count_row, count_ndalloc, right, 16, title);
- col_count_ndalloc.str_val = "ndalloc";
- COL(alloc_count_row, count_ndalloc_ps, right, 8, title);
- col_count_ndalloc_ps.str_val = "(#/sec)";
-
- COL(alloc_count_row, count_nrequests, right, 16, title);
- col_count_nrequests.str_val = "nrequests";
- COL(alloc_count_row, count_nrequests_ps, right, 10, title);
- col_count_nrequests_ps.str_val = "(#/sec)";
-
- COL(alloc_count_row, count_nfills, right, 16, title);
- col_count_nfills.str_val = "nfill";
- COL(alloc_count_row, count_nfills_ps, right, 10, title);
- col_count_nfills_ps.str_val = "(#/sec)";
-
- COL(alloc_count_row, count_nflushes, right, 16, title);
- col_count_nflushes.str_val = "nflush";
- COL(alloc_count_row, count_nflushes_ps, right, 10, title);
- col_count_nflushes_ps.str_val = "(#/sec)";
-
- emitter_table_row(emitter, &alloc_count_row);
-
- col_count_nmalloc_ps.type = emitter_type_uint64;
- col_count_ndalloc_ps.type = emitter_type_uint64;
- col_count_nrequests_ps.type = emitter_type_uint64;
- col_count_nfills_ps.type = emitter_type_uint64;
- col_count_nflushes_ps.type = emitter_type_uint64;
-
-#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \
- CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \
- &small_or_large##_##name, valtype##_t); \
- emitter_json_kv(emitter, #name, emitter_type_##valtype, \
- &small_or_large##_##name); \
- col_count_##name.type = emitter_type_##valtype; \
- col_count_##name.valtype##_val = small_or_large##_##name;
-
- emitter_json_object_kv_begin(emitter, "small");
- col_count_title.str_val = "small:";
-
- GET_AND_EMIT_ALLOC_STAT(small, allocated, size)
- GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64)
- col_count_nmalloc_ps.uint64_val =
- rate_per_second(col_count_nmalloc.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64)
- col_count_ndalloc_ps.uint64_val =
- rate_per_second(col_count_ndalloc.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64)
- col_count_nrequests_ps.uint64_val =
- rate_per_second(col_count_nrequests.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64)
- col_count_nfills_ps.uint64_val =
- rate_per_second(col_count_nfills.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64)
- col_count_nflushes_ps.uint64_val =
- rate_per_second(col_count_nflushes.uint64_val, uptime);
-
- emitter_table_row(emitter, &alloc_count_row);
- emitter_json_object_end(emitter); /* Close "small". */
-
- emitter_json_object_kv_begin(emitter, "large");
- col_count_title.str_val = "large:";
-
- GET_AND_EMIT_ALLOC_STAT(large, allocated, size)
- GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64)
- col_count_nmalloc_ps.uint64_val =
- rate_per_second(col_count_nmalloc.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64)
- col_count_ndalloc_ps.uint64_val =
- rate_per_second(col_count_ndalloc.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64)
- col_count_nrequests_ps.uint64_val =
- rate_per_second(col_count_nrequests.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64)
- col_count_nfills_ps.uint64_val =
- rate_per_second(col_count_nfills.uint64_val, uptime);
- GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64)
- col_count_nflushes_ps.uint64_val =
- rate_per_second(col_count_nflushes.uint64_val, uptime);
-
- emitter_table_row(emitter, &alloc_count_row);
- emitter_json_object_end(emitter); /* Close "large". */
-
-#undef GET_AND_EMIT_ALLOC_STAT
-
- /* Aggregated small + large stats are emitter only in table mode. */
- col_count_title.str_val = "total:";
- col_count_allocated.size_val = small_allocated + large_allocated;
- col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc;
- col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc;
- col_count_nrequests.uint64_val = small_nrequests + large_nrequests;
- col_count_nfills.uint64_val = small_nfills + large_nfills;
- col_count_nflushes.uint64_val = small_nflushes + large_nflushes;
- col_count_nmalloc_ps.uint64_val =
- rate_per_second(col_count_nmalloc.uint64_val, uptime);
- col_count_ndalloc_ps.uint64_val =
- rate_per_second(col_count_ndalloc.uint64_val, uptime);
- col_count_nrequests_ps.uint64_val =
- rate_per_second(col_count_nrequests.uint64_val, uptime);
- col_count_nfills_ps.uint64_val =
- rate_per_second(col_count_nfills.uint64_val, uptime);
- col_count_nflushes_ps.uint64_val =
- rate_per_second(col_count_nflushes.uint64_val, uptime);
- emitter_table_row(emitter, &alloc_count_row);
-
- emitter_row_t mem_count_row;
- emitter_row_init(&mem_count_row);
-
- emitter_col_t mem_count_title;
- emitter_col_init(&mem_count_title, &mem_count_row);
- mem_count_title.justify = emitter_justify_left;
- mem_count_title.width = 21;
- mem_count_title.type = emitter_type_title;
- mem_count_title.str_val = "";
-
- emitter_col_t mem_count_val;
- emitter_col_init(&mem_count_val, &mem_count_row);
- mem_count_val.justify = emitter_justify_right;
- mem_count_val.width = 16;
- mem_count_val.type = emitter_type_title;
- mem_count_val.str_val = "";
-
- emitter_table_row(emitter, &mem_count_row);
- mem_count_val.type = emitter_type_size;
-
- /* Active count in bytes is emitted only in table mode. */
- mem_count_title.str_val = "active:";
- mem_count_val.size_val = pactive * page;
- emitter_table_row(emitter, &mem_count_row);
-
-#define GET_AND_EMIT_MEM_STAT(stat) \
- CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \
- emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \
- mem_count_title.str_val = #stat":"; \
- mem_count_val.size_val = stat; \
- emitter_table_row(emitter, &mem_count_row);
-
- GET_AND_EMIT_MEM_STAT(mapped)
- GET_AND_EMIT_MEM_STAT(retained)
- GET_AND_EMIT_MEM_STAT(base)
- GET_AND_EMIT_MEM_STAT(internal)
- GET_AND_EMIT_MEM_STAT(metadata_thp)
- GET_AND_EMIT_MEM_STAT(tcache_bytes)
- GET_AND_EMIT_MEM_STAT(resident)
- GET_AND_EMIT_MEM_STAT(abandoned_vm)
- GET_AND_EMIT_MEM_STAT(extent_avail)
-#undef GET_AND_EMIT_MEM_STAT
-
- if (mutex) {
- stats_arena_mutexes_print(emitter, i, uptime);
- }
- if (bins) {
- stats_arena_bins_print(emitter, mutex, i, uptime);
- }
- if (large) {
- stats_arena_lextents_print(emitter, i, uptime);
+ CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
+ emitter_kv(emitter, "nthreads", "assigned threads",
+ emitter_type_unsigned, &nthreads);
+
+ CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
+ emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64,
+ &uptime);
+
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
+ emitter_kv(emitter, "dss", "dss allocation precedence",
+ emitter_type_string, &dss);
+
+ CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
+ ssize_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
+ ssize_t);
+ CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+ CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+ CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
+ CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
+ uint64_t);
+ CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
+ uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
+
+ emitter_row_t decay_row;
+ emitter_row_init(&decay_row);
+
+ /* JSON-style emission. */
+ emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize,
+ &dirty_decay_ms);
+ emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize,
+ &muzzy_decay_ms);
+
+ emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive);
+ emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty);
+ emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy);
+
+ emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64,
+ &dirty_npurge);
+ emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64,
+ &dirty_nmadvise);
+ emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64,
+ &dirty_purged);
+
+ emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64,
+ &muzzy_npurge);
+ emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64,
+ &muzzy_nmadvise);
+ emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64,
+ &muzzy_purged);
+
+ /* Table-style emission. */
+ COL(decay_row, decay_type, right, 9, title);
+ col_decay_type.str_val = "decaying:";
+
+ COL(decay_row, decay_time, right, 6, title);
+ col_decay_time.str_val = "time";
+
+ COL(decay_row, decay_npages, right, 13, title);
+ col_decay_npages.str_val = "npages";
+
+ COL(decay_row, decay_sweeps, right, 13, title);
+ col_decay_sweeps.str_val = "sweeps";
+
+ COL(decay_row, decay_madvises, right, 13, title);
+ col_decay_madvises.str_val = "madvises";
+
+ COL(decay_row, decay_purged, right, 13, title);
+ col_decay_purged.str_val = "purged";
+
+ /* Title row. */
+ emitter_table_row(emitter, &decay_row);
+
+ /* Dirty row. */
+ col_decay_type.str_val = "dirty:";
+
+ if (dirty_decay_ms >= 0) {
+ col_decay_time.type = emitter_type_ssize;
+ col_decay_time.ssize_val = dirty_decay_ms;
+ } else {
+ col_decay_time.type = emitter_type_title;
+ col_decay_time.str_val = "N/A";
}
- if (extents) {
- stats_arena_extents_print(emitter, i);
- }
-}
-
-static void
-stats_general_print(emitter_t *emitter) {
- const char *cpv;
- bool bv, bv2;
- unsigned uv;
- uint32_t u32v;
- uint64_t u64v;
- ssize_t ssv, ssv2;
- size_t sv, bsz, usz, ssz, sssz, cpsz;
-
- bsz = sizeof(bool);
- usz = sizeof(unsigned);
- ssz = sizeof(size_t);
- sssz = sizeof(ssize_t);
- cpsz = sizeof(const char *);
-
- CTL_GET("version", &cpv, const char *);
- emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
-
- /* config. */
- emitter_dict_begin(emitter, "config", "Build-time option settings");
-#define CONFIG_WRITE_BOOL(name) \
- do { \
- CTL_GET("config."#name, &bv, bool); \
- emitter_kv(emitter, #name, "config."#name, \
- emitter_type_bool, &bv); \
- } while (0)
-
- CONFIG_WRITE_BOOL(cache_oblivious);
- CONFIG_WRITE_BOOL(debug);
- CONFIG_WRITE_BOOL(fill);
- CONFIG_WRITE_BOOL(lazy_lock);
- emitter_kv(emitter, "malloc_conf", "config.malloc_conf",
- emitter_type_string, &config_malloc_conf);
- CONFIG_WRITE_BOOL(opt_safety_checks);
- CONFIG_WRITE_BOOL(prof);
- CONFIG_WRITE_BOOL(prof_libgcc);
- CONFIG_WRITE_BOOL(prof_libunwind);
- CONFIG_WRITE_BOOL(stats);
- CONFIG_WRITE_BOOL(utrace);
- CONFIG_WRITE_BOOL(xmalloc);
-#undef CONFIG_WRITE_BOOL
- emitter_dict_end(emitter); /* Close "config" dict. */
-
- /* opt. */
-#define OPT_WRITE(name, var, size, emitter_type) \
- if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \
- 0) { \
- emitter_kv(emitter, name, "opt."name, emitter_type, \
- &var); \
- }
-
-#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \
- altname) \
- if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \
- 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \
- == 0) { \
- emitter_kv_note(emitter, name, "opt."name, \
- emitter_type, &var1, altname, emitter_type, \
- &var2); \
+ col_decay_npages.type = emitter_type_size;
+ col_decay_npages.size_val = pdirty;
+
+ col_decay_sweeps.type = emitter_type_uint64;
+ col_decay_sweeps.uint64_val = dirty_npurge;
+
+ col_decay_madvises.type = emitter_type_uint64;
+ col_decay_madvises.uint64_val = dirty_nmadvise;
+
+ col_decay_purged.type = emitter_type_uint64;
+ col_decay_purged.uint64_val = dirty_purged;
+
+ emitter_table_row(emitter, &decay_row);
+
+ /* Muzzy row. */
+ col_decay_type.str_val = "muzzy:";
+
+ if (muzzy_decay_ms >= 0) {
+ col_decay_time.type = emitter_type_ssize;
+ col_decay_time.ssize_val = muzzy_decay_ms;
+ } else {
+ col_decay_time.type = emitter_type_title;
+ col_decay_time.str_val = "N/A";
}
-#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool)
-#define OPT_WRITE_BOOL_MUTABLE(name, altname) \
- OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname)
-
-#define OPT_WRITE_UNSIGNED(name) \
- OPT_WRITE(name, uv, usz, emitter_type_unsigned)
-
-#define OPT_WRITE_SIZE_T(name) \
- OPT_WRITE(name, sv, ssz, emitter_type_size)
-#define OPT_WRITE_SSIZE_T(name) \
- OPT_WRITE(name, ssv, sssz, emitter_type_ssize)
-#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \
- OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \
- altname)
-
-#define OPT_WRITE_CHAR_P(name) \
- OPT_WRITE(name, cpv, cpsz, emitter_type_string)
-
- emitter_dict_begin(emitter, "opt", "Run-time option settings");
-
- OPT_WRITE_BOOL("abort")
- OPT_WRITE_BOOL("abort_conf")
- OPT_WRITE_BOOL("confirm_conf")
- OPT_WRITE_BOOL("retain")
- OPT_WRITE_CHAR_P("dss")
- OPT_WRITE_UNSIGNED("narenas")
- OPT_WRITE_CHAR_P("percpu_arena")
- OPT_WRITE_SIZE_T("oversize_threshold")
- OPT_WRITE_CHAR_P("metadata_thp")
- OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
- OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
- OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
- OPT_WRITE_SIZE_T("lg_extent_max_active_fit")
- OPT_WRITE_CHAR_P("junk")
- OPT_WRITE_BOOL("zero")
- OPT_WRITE_BOOL("utrace")
- OPT_WRITE_BOOL("xmalloc")
- OPT_WRITE_BOOL("tcache")
- OPT_WRITE_SSIZE_T("lg_tcache_max")
- OPT_WRITE_CHAR_P("thp")
- OPT_WRITE_BOOL("prof")
- OPT_WRITE_CHAR_P("prof_prefix")
- OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active")
- OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init",
- "prof.thread_active_init")
- OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample")
- OPT_WRITE_BOOL("prof_accum")
- OPT_WRITE_SSIZE_T("lg_prof_interval")
- OPT_WRITE_BOOL("prof_gdump")
- OPT_WRITE_BOOL("prof_final")
- OPT_WRITE_BOOL("prof_leak")
- OPT_WRITE_BOOL("stats_print")
- OPT_WRITE_CHAR_P("stats_print_opts")
-
- emitter_dict_end(emitter);
-
-#undef OPT_WRITE
-#undef OPT_WRITE_MUTABLE
+ col_decay_npages.type = emitter_type_size;
+ col_decay_npages.size_val = pmuzzy;
+
+ col_decay_sweeps.type = emitter_type_uint64;
+ col_decay_sweeps.uint64_val = muzzy_npurge;
+
+ col_decay_madvises.type = emitter_type_uint64;
+ col_decay_madvises.uint64_val = muzzy_nmadvise;
+
+ col_decay_purged.type = emitter_type_uint64;
+ col_decay_purged.uint64_val = muzzy_purged;
+
+ emitter_table_row(emitter, &decay_row);
+
+ /* Small / large / total allocation counts. */
+ emitter_row_t alloc_count_row;
+ emitter_row_init(&alloc_count_row);
+
+ COL(alloc_count_row, count_title, left, 21, title);
+ col_count_title.str_val = "";
+
+ COL(alloc_count_row, count_allocated, right, 16, title);
+ col_count_allocated.str_val = "allocated";
+
+ COL(alloc_count_row, count_nmalloc, right, 16, title);
+ col_count_nmalloc.str_val = "nmalloc";
+ COL(alloc_count_row, count_nmalloc_ps, right, 8, title);
+ col_count_nmalloc_ps.str_val = "(#/sec)";
+
+ COL(alloc_count_row, count_ndalloc, right, 16, title);
+ col_count_ndalloc.str_val = "ndalloc";
+ COL(alloc_count_row, count_ndalloc_ps, right, 8, title);
+ col_count_ndalloc_ps.str_val = "(#/sec)";
+
+ COL(alloc_count_row, count_nrequests, right, 16, title);
+ col_count_nrequests.str_val = "nrequests";
+ COL(alloc_count_row, count_nrequests_ps, right, 10, title);
+ col_count_nrequests_ps.str_val = "(#/sec)";
+
+ COL(alloc_count_row, count_nfills, right, 16, title);
+ col_count_nfills.str_val = "nfill";
+ COL(alloc_count_row, count_nfills_ps, right, 10, title);
+ col_count_nfills_ps.str_val = "(#/sec)";
+
+ COL(alloc_count_row, count_nflushes, right, 16, title);
+ col_count_nflushes.str_val = "nflush";
+ COL(alloc_count_row, count_nflushes_ps, right, 10, title);
+ col_count_nflushes_ps.str_val = "(#/sec)";
+
+ emitter_table_row(emitter, &alloc_count_row);
+
+ col_count_nmalloc_ps.type = emitter_type_uint64;
+ col_count_ndalloc_ps.type = emitter_type_uint64;
+ col_count_nrequests_ps.type = emitter_type_uint64;
+ col_count_nfills_ps.type = emitter_type_uint64;
+ col_count_nflushes_ps.type = emitter_type_uint64;
+
+#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \
+ CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \
+ &small_or_large##_##name, valtype##_t); \
+ emitter_json_kv(emitter, #name, emitter_type_##valtype, \
+ &small_or_large##_##name); \
+ col_count_##name.type = emitter_type_##valtype; \
+ col_count_##name.valtype##_val = small_or_large##_##name;
+
+ emitter_json_object_kv_begin(emitter, "small");
+ col_count_title.str_val = "small:";
+
+ GET_AND_EMIT_ALLOC_STAT(small, allocated, size)
+ GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64)
+ col_count_nmalloc_ps.uint64_val =
+ rate_per_second(col_count_nmalloc.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64)
+ col_count_ndalloc_ps.uint64_val =
+ rate_per_second(col_count_ndalloc.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64)
+ col_count_nrequests_ps.uint64_val =
+ rate_per_second(col_count_nrequests.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64)
+ col_count_nfills_ps.uint64_val =
+ rate_per_second(col_count_nfills.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64)
+ col_count_nflushes_ps.uint64_val =
+ rate_per_second(col_count_nflushes.uint64_val, uptime);
+
+ emitter_table_row(emitter, &alloc_count_row);
+ emitter_json_object_end(emitter); /* Close "small". */
+
+ emitter_json_object_kv_begin(emitter, "large");
+ col_count_title.str_val = "large:";
+
+ GET_AND_EMIT_ALLOC_STAT(large, allocated, size)
+ GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64)
+ col_count_nmalloc_ps.uint64_val =
+ rate_per_second(col_count_nmalloc.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64)
+ col_count_ndalloc_ps.uint64_val =
+ rate_per_second(col_count_ndalloc.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64)
+ col_count_nrequests_ps.uint64_val =
+ rate_per_second(col_count_nrequests.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64)
+ col_count_nfills_ps.uint64_val =
+ rate_per_second(col_count_nfills.uint64_val, uptime);
+ GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64)
+ col_count_nflushes_ps.uint64_val =
+ rate_per_second(col_count_nflushes.uint64_val, uptime);
+
+ emitter_table_row(emitter, &alloc_count_row);
+ emitter_json_object_end(emitter); /* Close "large". */
+
+#undef GET_AND_EMIT_ALLOC_STAT
+
+ /* Aggregated small + large stats are emitter only in table mode. */
+ col_count_title.str_val = "total:";
+ col_count_allocated.size_val = small_allocated + large_allocated;
+ col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc;
+ col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc;
+ col_count_nrequests.uint64_val = small_nrequests + large_nrequests;
+ col_count_nfills.uint64_val = small_nfills + large_nfills;
+ col_count_nflushes.uint64_val = small_nflushes + large_nflushes;
+ col_count_nmalloc_ps.uint64_val =
+ rate_per_second(col_count_nmalloc.uint64_val, uptime);
+ col_count_ndalloc_ps.uint64_val =
+ rate_per_second(col_count_ndalloc.uint64_val, uptime);
+ col_count_nrequests_ps.uint64_val =
+ rate_per_second(col_count_nrequests.uint64_val, uptime);
+ col_count_nfills_ps.uint64_val =
+ rate_per_second(col_count_nfills.uint64_val, uptime);
+ col_count_nflushes_ps.uint64_val =
+ rate_per_second(col_count_nflushes.uint64_val, uptime);
+ emitter_table_row(emitter, &alloc_count_row);
+
+ emitter_row_t mem_count_row;
+ emitter_row_init(&mem_count_row);
+
+ emitter_col_t mem_count_title;
+ emitter_col_init(&mem_count_title, &mem_count_row);
+ mem_count_title.justify = emitter_justify_left;
+ mem_count_title.width = 21;
+ mem_count_title.type = emitter_type_title;
+ mem_count_title.str_val = "";
+
+ emitter_col_t mem_count_val;
+ emitter_col_init(&mem_count_val, &mem_count_row);
+ mem_count_val.justify = emitter_justify_right;
+ mem_count_val.width = 16;
+ mem_count_val.type = emitter_type_title;
+ mem_count_val.str_val = "";
+
+ emitter_table_row(emitter, &mem_count_row);
+ mem_count_val.type = emitter_type_size;
+
+ /* Active count in bytes is emitted only in table mode. */
+ mem_count_title.str_val = "active:";
+ mem_count_val.size_val = pactive * page;
+ emitter_table_row(emitter, &mem_count_row);
+
+#define GET_AND_EMIT_MEM_STAT(stat) \
+ CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \
+ emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \
+ mem_count_title.str_val = #stat":"; \
+ mem_count_val.size_val = stat; \
+ emitter_table_row(emitter, &mem_count_row);
+
+ GET_AND_EMIT_MEM_STAT(mapped)
+ GET_AND_EMIT_MEM_STAT(retained)
+ GET_AND_EMIT_MEM_STAT(base)
+ GET_AND_EMIT_MEM_STAT(internal)
+ GET_AND_EMIT_MEM_STAT(metadata_thp)
+ GET_AND_EMIT_MEM_STAT(tcache_bytes)
+ GET_AND_EMIT_MEM_STAT(resident)
+ GET_AND_EMIT_MEM_STAT(abandoned_vm)
+ GET_AND_EMIT_MEM_STAT(extent_avail)
+#undef GET_AND_EMIT_MEM_STAT
+
+ if (mutex) {
+ stats_arena_mutexes_print(emitter, i, uptime);
+ }
+ if (bins) {
+ stats_arena_bins_print(emitter, mutex, i, uptime);
+ }
+ if (large) {
+ stats_arena_lextents_print(emitter, i, uptime);
+ }
+ if (extents) {
+ stats_arena_extents_print(emitter, i);
+ }
+}
+
+static void
+stats_general_print(emitter_t *emitter) {
+ const char *cpv;
+ bool bv, bv2;
+ unsigned uv;
+ uint32_t u32v;
+ uint64_t u64v;
+ ssize_t ssv, ssv2;
+ size_t sv, bsz, usz, ssz, sssz, cpsz;
+
+ bsz = sizeof(bool);
+ usz = sizeof(unsigned);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
+
+ CTL_GET("version", &cpv, const char *);
+ emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
+
+ /* config. */
+ emitter_dict_begin(emitter, "config", "Build-time option settings");
+#define CONFIG_WRITE_BOOL(name) \
+ do { \
+ CTL_GET("config."#name, &bv, bool); \
+ emitter_kv(emitter, #name, "config."#name, \
+ emitter_type_bool, &bv); \
+ } while (0)
+
+ CONFIG_WRITE_BOOL(cache_oblivious);
+ CONFIG_WRITE_BOOL(debug);
+ CONFIG_WRITE_BOOL(fill);
+ CONFIG_WRITE_BOOL(lazy_lock);
+ emitter_kv(emitter, "malloc_conf", "config.malloc_conf",
+ emitter_type_string, &config_malloc_conf);
+
+ CONFIG_WRITE_BOOL(opt_safety_checks);
+ CONFIG_WRITE_BOOL(prof);
+ CONFIG_WRITE_BOOL(prof_libgcc);
+ CONFIG_WRITE_BOOL(prof_libunwind);
+ CONFIG_WRITE_BOOL(stats);
+ CONFIG_WRITE_BOOL(utrace);
+ CONFIG_WRITE_BOOL(xmalloc);
+#undef CONFIG_WRITE_BOOL
+ emitter_dict_end(emitter); /* Close "config" dict. */
+
+ /* opt. */
+#define OPT_WRITE(name, var, size, emitter_type) \
+ if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \
+ 0) { \
+ emitter_kv(emitter, name, "opt."name, emitter_type, \
+ &var); \
+ }
+
+#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \
+ altname) \
+ if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \
+ 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \
+ == 0) { \
+ emitter_kv_note(emitter, name, "opt."name, \
+ emitter_type, &var1, altname, emitter_type, \
+ &var2); \
+ }
+
+#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool)
+#define OPT_WRITE_BOOL_MUTABLE(name, altname) \
+ OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname)
+
+#define OPT_WRITE_UNSIGNED(name) \
+ OPT_WRITE(name, uv, usz, emitter_type_unsigned)
+
+#define OPT_WRITE_SIZE_T(name) \
+ OPT_WRITE(name, sv, ssz, emitter_type_size)
+#define OPT_WRITE_SSIZE_T(name) \
+ OPT_WRITE(name, ssv, sssz, emitter_type_ssize)
+#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \
+ OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \
+ altname)
+
+#define OPT_WRITE_CHAR_P(name) \
+ OPT_WRITE(name, cpv, cpsz, emitter_type_string)
+
+ emitter_dict_begin(emitter, "opt", "Run-time option settings");
+
+ OPT_WRITE_BOOL("abort")
+ OPT_WRITE_BOOL("abort_conf")
+ OPT_WRITE_BOOL("confirm_conf")
+ OPT_WRITE_BOOL("retain")
+ OPT_WRITE_CHAR_P("dss")
+ OPT_WRITE_UNSIGNED("narenas")
+ OPT_WRITE_CHAR_P("percpu_arena")
+ OPT_WRITE_SIZE_T("oversize_threshold")
+ OPT_WRITE_CHAR_P("metadata_thp")
+ OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
+ OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
+ OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
+ OPT_WRITE_SIZE_T("lg_extent_max_active_fit")
+ OPT_WRITE_CHAR_P("junk")
+ OPT_WRITE_BOOL("zero")
+ OPT_WRITE_BOOL("utrace")
+ OPT_WRITE_BOOL("xmalloc")
+ OPT_WRITE_BOOL("tcache")
+ OPT_WRITE_SSIZE_T("lg_tcache_max")
+ OPT_WRITE_CHAR_P("thp")
+ OPT_WRITE_BOOL("prof")
+ OPT_WRITE_CHAR_P("prof_prefix")
+ OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active")
+ OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init",
+ "prof.thread_active_init")
+ OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample")
+ OPT_WRITE_BOOL("prof_accum")
+ OPT_WRITE_SSIZE_T("lg_prof_interval")
+ OPT_WRITE_BOOL("prof_gdump")
+ OPT_WRITE_BOOL("prof_final")
+ OPT_WRITE_BOOL("prof_leak")
+ OPT_WRITE_BOOL("stats_print")
+ OPT_WRITE_CHAR_P("stats_print_opts")
+
+ emitter_dict_end(emitter);
+
+#undef OPT_WRITE
+#undef OPT_WRITE_MUTABLE
#undef OPT_WRITE_BOOL
-#undef OPT_WRITE_BOOL_MUTABLE
-#undef OPT_WRITE_UNSIGNED
+#undef OPT_WRITE_BOOL_MUTABLE
+#undef OPT_WRITE_UNSIGNED
#undef OPT_WRITE_SSIZE_T
-#undef OPT_WRITE_SSIZE_T_MUTABLE
+#undef OPT_WRITE_SSIZE_T_MUTABLE
#undef OPT_WRITE_CHAR_P
- /* prof. */
- if (config_prof) {
- emitter_dict_begin(emitter, "prof", "Profiling settings");
-
- CTL_GET("prof.thread_active_init", &bv, bool);
- emitter_kv(emitter, "thread_active_init",
- "prof.thread_active_init", emitter_type_bool, &bv);
-
- CTL_GET("prof.active", &bv, bool);
- emitter_kv(emitter, "active", "prof.active", emitter_type_bool,
- &bv);
-
- CTL_GET("prof.gdump", &bv, bool);
- emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool,
- &bv);
-
- CTL_GET("prof.interval", &u64v, uint64_t);
- emitter_kv(emitter, "interval", "prof.interval",
- emitter_type_uint64, &u64v);
-
- CTL_GET("prof.lg_sample", &ssv, ssize_t);
- emitter_kv(emitter, "lg_sample", "prof.lg_sample",
- emitter_type_ssize, &ssv);
-
- emitter_dict_end(emitter); /* Close "prof". */
- }
-
- /* arenas. */
- /*
- * The json output sticks arena info into an "arenas" dict; the table
- * output puts them at the top-level.
- */
- emitter_json_object_kv_begin(emitter, "arenas");
-
- CTL_GET("arenas.narenas", &uv, unsigned);
- emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv);
-
- /*
- * Decay settings are emitted only in json mode; in table mode, they're
- * emitted as notes with the opt output, above.
- */
- CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
- emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv);
-
- CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
- emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv);
-
- CTL_GET("arenas.quantum", &sv, size_t);
- emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv);
-
- CTL_GET("arenas.page", &sv, size_t);
- emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv);
-
- if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
- emitter_kv(emitter, "tcache_max",
- "Maximum thread-cached size class", emitter_type_size, &sv);
- }
-
- unsigned nbins;
- CTL_GET("arenas.nbins", &nbins, unsigned);
- emitter_kv(emitter, "nbins", "Number of bin size classes",
- emitter_type_unsigned, &nbins);
-
- unsigned nhbins;
- CTL_GET("arenas.nhbins", &nhbins, unsigned);
- emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
- emitter_type_unsigned, &nhbins);
-
- /*
- * We do enough mallctls in a loop that we actually want to omit them
- * (not just omit the printing).
- */
- if (emitter->output == emitter_output_json) {
- emitter_json_array_kv_begin(emitter, "bin");
- for (unsigned i = 0; i < nbins; i++) {
- emitter_json_object_begin(emitter);
-
- CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
- emitter_json_kv(emitter, "size", emitter_type_size,
- &sv);
-
- CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
- emitter_json_kv(emitter, "nregs", emitter_type_uint32,
- &u32v);
-
- CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
- emitter_json_kv(emitter, "slab_size", emitter_type_size,
- &sv);
-
- CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t);
- emitter_json_kv(emitter, "nshards", emitter_type_uint32,
- &u32v);
-
- emitter_json_object_end(emitter);
+ /* prof. */
+ if (config_prof) {
+ emitter_dict_begin(emitter, "prof", "Profiling settings");
+
+ CTL_GET("prof.thread_active_init", &bv, bool);
+ emitter_kv(emitter, "thread_active_init",
+ "prof.thread_active_init", emitter_type_bool, &bv);
+
+ CTL_GET("prof.active", &bv, bool);
+ emitter_kv(emitter, "active", "prof.active", emitter_type_bool,
+ &bv);
+
+ CTL_GET("prof.gdump", &bv, bool);
+ emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool,
+ &bv);
+
+ CTL_GET("prof.interval", &u64v, uint64_t);
+ emitter_kv(emitter, "interval", "prof.interval",
+ emitter_type_uint64, &u64v);
+
+ CTL_GET("prof.lg_sample", &ssv, ssize_t);
+ emitter_kv(emitter, "lg_sample", "prof.lg_sample",
+ emitter_type_ssize, &ssv);
+
+ emitter_dict_end(emitter); /* Close "prof". */
+ }
+
+ /* arenas. */
+ /*
+ * The json output sticks arena info into an "arenas" dict; the table
+ * output puts them at the top-level.
+ */
+ emitter_json_object_kv_begin(emitter, "arenas");
+
+ CTL_GET("arenas.narenas", &uv, unsigned);
+ emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv);
+
+ /*
+ * Decay settings are emitted only in json mode; in table mode, they're
+ * emitted as notes with the opt output, above.
+ */
+ CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
+ emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv);
+
+ CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
+ emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv);
+
+ CTL_GET("arenas.quantum", &sv, size_t);
+ emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv);
+
+ CTL_GET("arenas.page", &sv, size_t);
+ emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv);
+
+ if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
+ emitter_kv(emitter, "tcache_max",
+ "Maximum thread-cached size class", emitter_type_size, &sv);
+ }
+
+ unsigned nbins;
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ emitter_kv(emitter, "nbins", "Number of bin size classes",
+ emitter_type_unsigned, &nbins);
+
+ unsigned nhbins;
+ CTL_GET("arenas.nhbins", &nhbins, unsigned);
+ emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
+ emitter_type_unsigned, &nhbins);
+
+ /*
+ * We do enough mallctls in a loop that we actually want to omit them
+ * (not just omit the printing).
+ */
+ if (emitter->output == emitter_output_json) {
+ emitter_json_array_kv_begin(emitter, "bin");
+ for (unsigned i = 0; i < nbins; i++) {
+ emitter_json_object_begin(emitter);
+
+ CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ emitter_json_kv(emitter, "size", emitter_type_size,
+ &sv);
+
+ CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ emitter_json_kv(emitter, "nregs", emitter_type_uint32,
+ &u32v);
+
+ CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
+ emitter_json_kv(emitter, "slab_size", emitter_type_size,
+ &sv);
+
+ CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t);
+ emitter_json_kv(emitter, "nshards", emitter_type_uint32,
+ &u32v);
+
+ emitter_json_object_end(emitter);
}
- emitter_json_array_end(emitter); /* Close "bin". */
- }
-
- unsigned nlextents;
- CTL_GET("arenas.nlextents", &nlextents, unsigned);
- emitter_kv(emitter, "nlextents", "Number of large size classes",
- emitter_type_unsigned, &nlextents);
-
- if (emitter->output == emitter_output_json) {
- emitter_json_array_kv_begin(emitter, "lextent");
- for (unsigned i = 0; i < nlextents; i++) {
- emitter_json_object_begin(emitter);
-
- CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
- emitter_json_kv(emitter, "size", emitter_type_size,
- &sv);
-
- emitter_json_object_end(emitter);
+ emitter_json_array_end(emitter); /* Close "bin". */
+ }
+
+ unsigned nlextents;
+ CTL_GET("arenas.nlextents", &nlextents, unsigned);
+ emitter_kv(emitter, "nlextents", "Number of large size classes",
+ emitter_type_unsigned, &nlextents);
+
+ if (emitter->output == emitter_output_json) {
+ emitter_json_array_kv_begin(emitter, "lextent");
+ for (unsigned i = 0; i < nlextents; i++) {
+ emitter_json_object_begin(emitter);
+
+ CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
+ emitter_json_kv(emitter, "size", emitter_type_size,
+ &sv);
+
+ emitter_json_object_end(emitter);
}
- emitter_json_array_end(emitter); /* Close "lextent". */
- }
-
- emitter_json_object_end(emitter); /* Close "arenas" */
-}
-
-static void
-stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
- bool unmerged, bool bins, bool large, bool mutex, bool extents) {
- /*
- * These should be deleted. We keep them around for a while, to aid in
- * the transition to the emitter code.
- */
- size_t allocated, active, metadata, metadata_thp, resident, mapped,
- retained;
- size_t num_background_threads;
- uint64_t background_thread_num_runs, background_thread_run_interval;
-
- CTL_GET("stats.allocated", &allocated, size_t);
- CTL_GET("stats.active", &active, size_t);
- CTL_GET("stats.metadata", &metadata, size_t);
- CTL_GET("stats.metadata_thp", &metadata_thp, size_t);
- CTL_GET("stats.resident", &resident, size_t);
- CTL_GET("stats.mapped", &mapped, size_t);
- CTL_GET("stats.retained", &retained, size_t);
-
- if (have_background_thread) {
- CTL_GET("stats.background_thread.num_threads",
- &num_background_threads, size_t);
- CTL_GET("stats.background_thread.num_runs",
- &background_thread_num_runs, uint64_t);
- CTL_GET("stats.background_thread.run_interval",
- &background_thread_run_interval, uint64_t);
- } else {
- num_background_threads = 0;
- background_thread_num_runs = 0;
- background_thread_run_interval = 0;
+ emitter_json_array_end(emitter); /* Close "lextent". */
}
- /* Generic global stats. */
- emitter_json_object_kv_begin(emitter, "stats");
- emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated);
- emitter_json_kv(emitter, "active", emitter_type_size, &active);
- emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata);
- emitter_json_kv(emitter, "metadata_thp", emitter_type_size,
- &metadata_thp);
- emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
- emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
- emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
-
- emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
- "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
- "retained: %zu\n", allocated, active, metadata, metadata_thp,
- resident, mapped, retained);
-
- /* Background thread stats. */
- emitter_json_object_kv_begin(emitter, "background_thread");
- emitter_json_kv(emitter, "num_threads", emitter_type_size,
- &num_background_threads);
- emitter_json_kv(emitter, "num_runs", emitter_type_uint64,
- &background_thread_num_runs);
- emitter_json_kv(emitter, "run_interval", emitter_type_uint64,
- &background_thread_run_interval);
- emitter_json_object_end(emitter); /* Close "background_thread". */
-
- emitter_table_printf(emitter, "Background threads: %zu, "
- "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n",
- num_background_threads, background_thread_num_runs,
- background_thread_run_interval);
-
- if (mutex) {
- emitter_row_t row;
- emitter_col_t name;
- emitter_col_t col64[mutex_prof_num_uint64_t_counters];
- emitter_col_t col32[mutex_prof_num_uint32_t_counters];
- uint64_t uptime;
-
- emitter_row_init(&row);
- mutex_stats_init_cols(&row, "", &name, col64, col32);
-
- emitter_table_row(emitter, &row);
- emitter_json_object_kv_begin(emitter, "mutexes");
-
- CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
-
- for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
- mutex_stats_read_global(global_mutex_names[i], &name,
- col64, col32, uptime);
- emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
- mutex_stats_emit(emitter, &row, col64, col32);
- emitter_json_object_end(emitter);
- }
-
- emitter_json_object_end(emitter); /* Close "mutexes". */
- }
-
- emitter_json_object_end(emitter); /* Close "stats". */
-
- if (merged || destroyed || unmerged) {
- unsigned narenas;
-
- emitter_json_object_kv_begin(emitter, "stats.arenas");
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- size_t mib[3];
- size_t miblen = sizeof(mib) / sizeof(size_t);
- size_t sz;
- VARIABLE_ARRAY(bool, initialized, narenas);
- bool destroyed_initialized;
- unsigned i, j, ninitialized;
-
- xmallctlnametomib("arena.0.initialized", mib, &miblen);
- for (i = ninitialized = 0; i < narenas; i++) {
- mib[1] = i;
- sz = sizeof(bool);
- xmallctlbymib(mib, miblen, &initialized[i], &sz,
- NULL, 0);
- if (initialized[i]) {
- ninitialized++;
- }
- }
- mib[1] = MALLCTL_ARENAS_DESTROYED;
- sz = sizeof(bool);
- xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
- NULL, 0);
-
- /* Merged stats. */
- if (merged && (ninitialized > 1 || !unmerged)) {
- /* Print merged arena stats. */
- emitter_table_printf(emitter, "Merged arenas stats:\n");
- emitter_json_object_kv_begin(emitter, "merged");
- stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
- large, mutex, extents);
- emitter_json_object_end(emitter); /* Close "merged". */
+ emitter_json_object_end(emitter); /* Close "arenas" */
+}
+
+static void
+stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
+ bool unmerged, bool bins, bool large, bool mutex, bool extents) {
+ /*
+ * These should be deleted. We keep them around for a while, to aid in
+ * the transition to the emitter code.
+ */
+ size_t allocated, active, metadata, metadata_thp, resident, mapped,
+ retained;
+ size_t num_background_threads;
+ uint64_t background_thread_num_runs, background_thread_run_interval;
+
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.metadata_thp", &metadata_thp, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
+ CTL_GET("stats.retained", &retained, size_t);
+
+ if (have_background_thread) {
+ CTL_GET("stats.background_thread.num_threads",
+ &num_background_threads, size_t);
+ CTL_GET("stats.background_thread.num_runs",
+ &background_thread_num_runs, uint64_t);
+ CTL_GET("stats.background_thread.run_interval",
+ &background_thread_run_interval, uint64_t);
+ } else {
+ num_background_threads = 0;
+ background_thread_num_runs = 0;
+ background_thread_run_interval = 0;
+ }
+
+ /* Generic global stats. */
+ emitter_json_object_kv_begin(emitter, "stats");
+ emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated);
+ emitter_json_kv(emitter, "active", emitter_type_size, &active);
+ emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata);
+ emitter_json_kv(emitter, "metadata_thp", emitter_type_size,
+ &metadata_thp);
+ emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
+ emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
+ emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
+
+ emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
+ "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
+ "retained: %zu\n", allocated, active, metadata, metadata_thp,
+ resident, mapped, retained);
+
+ /* Background thread stats. */
+ emitter_json_object_kv_begin(emitter, "background_thread");
+ emitter_json_kv(emitter, "num_threads", emitter_type_size,
+ &num_background_threads);
+ emitter_json_kv(emitter, "num_runs", emitter_type_uint64,
+ &background_thread_num_runs);
+ emitter_json_kv(emitter, "run_interval", emitter_type_uint64,
+ &background_thread_run_interval);
+ emitter_json_object_end(emitter); /* Close "background_thread". */
+
+ emitter_table_printf(emitter, "Background threads: %zu, "
+ "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n",
+ num_background_threads, background_thread_num_runs,
+ background_thread_run_interval);
+
+ if (mutex) {
+ emitter_row_t row;
+ emitter_col_t name;
+ emitter_col_t col64[mutex_prof_num_uint64_t_counters];
+ emitter_col_t col32[mutex_prof_num_uint32_t_counters];
+ uint64_t uptime;
+
+ emitter_row_init(&row);
+ mutex_stats_init_cols(&row, "", &name, col64, col32);
+
+ emitter_table_row(emitter, &row);
+ emitter_json_object_kv_begin(emitter, "mutexes");
+
+ CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
+
+ for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
+ mutex_stats_read_global(global_mutex_names[i], &name,
+ col64, col32, uptime);
+ emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
+ mutex_stats_emit(emitter, &row, col64, col32);
+ emitter_json_object_end(emitter);
}
- /* Destroyed stats. */
- if (destroyed_initialized && destroyed) {
- /* Print destroyed arena stats. */
- emitter_table_printf(emitter,
- "Destroyed arenas stats:\n");
- emitter_json_object_kv_begin(emitter, "destroyed");
- stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
- bins, large, mutex, extents);
- emitter_json_object_end(emitter); /* Close "destroyed". */
- }
-
- /* Unmerged stats. */
- if (unmerged) {
- for (i = j = 0; i < narenas; i++) {
- if (initialized[i]) {
- char arena_ind_str[20];
- malloc_snprintf(arena_ind_str,
- sizeof(arena_ind_str), "%u", i);
- emitter_json_object_kv_begin(emitter,
- arena_ind_str);
- emitter_table_printf(emitter,
- "arenas[%s]:\n", arena_ind_str);
- stats_arena_print(emitter, i, bins,
- large, mutex, extents);
- /* Close "<arena-ind>". */
- emitter_json_object_end(emitter);
+ emitter_json_object_end(emitter); /* Close "mutexes". */
+ }
+
+ emitter_json_object_end(emitter); /* Close "stats". */
+
+ if (merged || destroyed || unmerged) {
+ unsigned narenas;
+
+ emitter_json_object_kv_begin(emitter, "stats.arenas");
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ size_t sz;
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ bool destroyed_initialized;
+ unsigned i, j, ninitialized;
+
+ xmallctlnametomib("arena.0.initialized", mib, &miblen);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ mib[1] = i;
+ sz = sizeof(bool);
+ xmallctlbymib(mib, miblen, &initialized[i], &sz,
+ NULL, 0);
+ if (initialized[i]) {
+ ninitialized++;
+ }
+ }
+ mib[1] = MALLCTL_ARENAS_DESTROYED;
+ sz = sizeof(bool);
+ xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
+ NULL, 0);
+
+ /* Merged stats. */
+ if (merged && (ninitialized > 1 || !unmerged)) {
+ /* Print merged arena stats. */
+ emitter_table_printf(emitter, "Merged arenas stats:\n");
+ emitter_json_object_kv_begin(emitter, "merged");
+ stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
+ large, mutex, extents);
+ emitter_json_object_end(emitter); /* Close "merged". */
+ }
+
+ /* Destroyed stats. */
+ if (destroyed_initialized && destroyed) {
+ /* Print destroyed arena stats. */
+ emitter_table_printf(emitter,
+ "Destroyed arenas stats:\n");
+ emitter_json_object_kv_begin(emitter, "destroyed");
+ stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
+ bins, large, mutex, extents);
+ emitter_json_object_end(emitter); /* Close "destroyed". */
+ }
+
+ /* Unmerged stats. */
+ if (unmerged) {
+ for (i = j = 0; i < narenas; i++) {
+ if (initialized[i]) {
+ char arena_ind_str[20];
+ malloc_snprintf(arena_ind_str,
+ sizeof(arena_ind_str), "%u", i);
+ emitter_json_object_kv_begin(emitter,
+ arena_ind_str);
+ emitter_table_printf(emitter,
+ "arenas[%s]:\n", arena_ind_str);
+ stats_arena_print(emitter, i, bins,
+ large, mutex, extents);
+ /* Close "<arena-ind>". */
+ emitter_json_object_end(emitter);
}
}
}
- emitter_json_object_end(emitter); /* Close "stats.arenas". */
- }
-}
-
-void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts) {
- int err;
- uint64_t epoch;
- size_t u64sz;
-#define OPTION(o, v, d, s) bool v = d;
- STATS_PRINT_OPTIONS
-#undef OPTION
-
- /*
- * Refresh stats, in case mallctl() was called by the application.
- *
- * Check for OOM here, since refreshing the ctl cache can trigger
- * allocation. In practice, none of the subsequent mallctl()-related
- * calls in this function will cause OOM if this one succeeds.
- * */
- epoch = 1;
- u64sz = sizeof(uint64_t);
- err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
- sizeof(uint64_t));
- if (err != 0) {
- if (err == EAGAIN) {
- malloc_write("<jemalloc>: Memory allocation failure in "
- "mallctl(\"epoch\", ...)\n");
- return;
- }
- malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
- "...)\n");
- abort();
- }
-
- if (opts != NULL) {
- for (unsigned i = 0; opts[i] != '\0'; i++) {
- switch (opts[i]) {
-#define OPTION(o, v, d, s) case o: v = s; break;
- STATS_PRINT_OPTIONS
-#undef OPTION
- default:;
- }
- }
- }
-
- emitter_t emitter;
- emitter_init(&emitter,
- json ? emitter_output_json : emitter_output_table, write_cb,
- cbopaque);
- emitter_begin(&emitter);
- emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
- emitter_json_object_kv_begin(&emitter, "jemalloc");
-
- if (general) {
- stats_general_print(&emitter);
+ emitter_json_object_end(emitter); /* Close "stats.arenas". */
}
- if (config_stats) {
- stats_print_helper(&emitter, merged, destroyed, unmerged,
- bins, large, mutex, extents);
- }
-
- emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
- emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
- emitter_end(&emitter);
}
+
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts) {
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+#define OPTION(o, v, d, s) bool v = d;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
+ sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
+
+ if (opts != NULL) {
+ for (unsigned i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+#define OPTION(o, v, d, s) case o: v = s; break;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ default:;
+ }
+ }
+ }
+
+ emitter_t emitter;
+ emitter_init(&emitter,
+ json ? emitter_output_json : emitter_output_table, write_cb,
+ cbopaque);
+ emitter_begin(&emitter);
+ emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
+ emitter_json_object_kv_begin(&emitter, "jemalloc");
+
+ if (general) {
+ stats_general_print(&emitter);
+ }
+ if (config_stats) {
+ stats_print_helper(&emitter, merged, destroyed, unmerged,
+ bins, large, mutex, extents);
+ }
+
+ emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
+ emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
+ emitter_end(&emitter);
+}
diff --git a/contrib/libs/jemalloc/src/sz.c b/contrib/libs/jemalloc/src/sz.c
index 8633fb0500..0502da37ac 100644
--- a/contrib/libs/jemalloc/src/sz.c
+++ b/contrib/libs/jemalloc/src/sz.c
@@ -1,64 +1,64 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/sz.h"
-
-JEMALLOC_ALIGNED(CACHELINE)
-size_t sz_pind2sz_tab[SC_NPSIZES+1];
-
-static void
-sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
- int pind = 0;
- for (unsigned i = 0; i < SC_NSIZES; i++) {
- const sc_t *sc = &sc_data->sc[i];
- if (sc->psz) {
- sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
- + (ZU(sc->ndelta) << sc->lg_delta);
- pind++;
- }
- }
- for (int i = pind; i <= (int)SC_NPSIZES; i++) {
- sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
- }
-}
-
-JEMALLOC_ALIGNED(CACHELINE)
-size_t sz_index2size_tab[SC_NSIZES];
-
-static void
-sz_boot_index2size_tab(const sc_data_t *sc_data) {
- for (unsigned i = 0; i < SC_NSIZES; i++) {
- const sc_t *sc = &sc_data->sc[i];
- sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
- + (ZU(sc->ndelta) << (sc->lg_delta));
- }
-}
-
-/*
- * To keep this table small, we divide sizes by the tiny min size, which gives
- * the smallest interval for which the result can change.
- */
-JEMALLOC_ALIGNED(CACHELINE)
-uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1];
-
-static void
-sz_boot_size2index_tab(const sc_data_t *sc_data) {
- size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
- size_t dst_ind = 0;
- for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
- sc_ind++) {
- const sc_t *sc = &sc_data->sc[sc_ind];
- size_t sz = (ZU(1) << sc->lg_base)
- + (ZU(sc->ndelta) << sc->lg_delta);
- size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
- >> SC_LG_TINY_MIN);
- for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
- sz_size2index_tab[dst_ind] = sc_ind;
- }
- }
-}
-
-void
-sz_boot(const sc_data_t *sc_data) {
- sz_boot_pind2sz_tab(sc_data);
- sz_boot_index2size_tab(sc_data);
- sz_boot_size2index_tab(sc_data);
-}
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/sz.h"
+
+JEMALLOC_ALIGNED(CACHELINE)
+size_t sz_pind2sz_tab[SC_NPSIZES+1];
+
+static void
+sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
+ int pind = 0;
+ for (unsigned i = 0; i < SC_NSIZES; i++) {
+ const sc_t *sc = &sc_data->sc[i];
+ if (sc->psz) {
+ sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
+ + (ZU(sc->ndelta) << sc->lg_delta);
+ pind++;
+ }
+ }
+ for (int i = pind; i <= (int)SC_NPSIZES; i++) {
+ sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
+ }
+}
+
+JEMALLOC_ALIGNED(CACHELINE)
+size_t sz_index2size_tab[SC_NSIZES];
+
+static void
+sz_boot_index2size_tab(const sc_data_t *sc_data) {
+ for (unsigned i = 0; i < SC_NSIZES; i++) {
+ const sc_t *sc = &sc_data->sc[i];
+ sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
+ + (ZU(sc->ndelta) << (sc->lg_delta));
+ }
+}
+
+/*
+ * To keep this table small, we divide sizes by the tiny min size, which gives
+ * the smallest interval for which the result can change.
+ */
+JEMALLOC_ALIGNED(CACHELINE)
+uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1];
+
+static void
+sz_boot_size2index_tab(const sc_data_t *sc_data) {
+ size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
+ size_t dst_ind = 0;
+ for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
+ sc_ind++) {
+ const sc_t *sc = &sc_data->sc[sc_ind];
+ size_t sz = (ZU(1) << sc->lg_base)
+ + (ZU(sc->ndelta) << sc->lg_delta);
+ size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
+ >> SC_LG_TINY_MIN);
+ for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
+ sz_size2index_tab[dst_ind] = sc_ind;
+ }
+ }
+}
+
+void
+sz_boot(const sc_data_t *sc_data) {
+ sz_boot_pind2sz_tab(sc_data);
+ sz_boot_index2size_tab(sc_data);
+ sz_boot_size2index_tab(sc_data);
+}
diff --git a/contrib/libs/jemalloc/src/tcache.c b/contrib/libs/jemalloc/src/tcache.c
index 50099a9f2c..2c574e8f48 100644
--- a/contrib/libs/jemalloc/src/tcache.c
+++ b/contrib/libs/jemalloc/src/tcache.c
@@ -1,193 +1,193 @@
-#define JEMALLOC_TCACHE_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/sc.h"
-
+#define JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/sc.h"
+
/******************************************************************************/
/* Data. */
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
-cache_bin_info_t *tcache_bin_info;
+cache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
-unsigned nhbins;
+unsigned nhbins;
size_t tcache_maxclass;
-tcaches_t *tcaches;
-
-/* Index of first element within tcaches that has never been used. */
-static unsigned tcaches_past;
-
-/* Head of singly linked list tracking available tcaches elements. */
-static tcaches_t *tcaches_avail;
-
-/* Protects tcaches{,_past,_avail}. */
-static malloc_mutex_t tcaches_mtx;
-
+tcaches_t *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t *tcaches_avail;
+
+/* Protects tcaches{,_past,_avail}. */
+static malloc_mutex_t tcaches_mtx;
+
/******************************************************************************/
-size_t
-tcache_salloc(tsdn_t *tsdn, const void *ptr) {
- return arena_salloc(tsdn, ptr);
+size_t
+tcache_salloc(tsdn_t *tsdn, const void *ptr) {
+ return arena_salloc(tsdn, ptr);
}
void
-tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
- szind_t binind = tcache->next_gc_bin;
-
- cache_bin_t *tbin;
- if (binind < SC_NBINS) {
- tbin = tcache_small_bin_get(tcache, binind);
- } else {
- tbin = tcache_large_bin_get(tcache, binind);
- }
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
+ szind_t binind = tcache->next_gc_bin;
+
+ cache_bin_t *tbin;
+ if (binind < SC_NBINS) {
+ tbin = tcache_small_bin_get(tcache, binind);
+ } else {
+ tbin = tcache_large_bin_get(tcache, binind);
+ }
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
- if (binind < SC_NBINS) {
- tcache_bin_flush_small(tsd, tcache, tbin, binind,
- tbin->ncached - tbin->low_water + (tbin->low_water
- >> 2));
- /*
- * Reduce fill count by 2X. Limit lg_fill_div such that
- * the fill count is always at least 1.
- */
- cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
- if ((tbin_info->ncached_max >>
- (tcache->lg_fill_div[binind] + 1)) >= 1) {
- tcache->lg_fill_div[binind]++;
- }
+ if (binind < SC_NBINS) {
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that
+ * the fill count is always at least 1.
+ */
+ cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+ if ((tbin_info->ncached_max >>
+ (tcache->lg_fill_div[binind] + 1)) >= 1) {
+ tcache->lg_fill_div[binind]++;
+ }
} else {
- tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
- - tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
}
} else if (tbin->low_water < 0) {
/*
- * Increase fill count by 2X for small bins. Make sure
- * lg_fill_div stays greater than 0.
+ * Increase fill count by 2X for small bins. Make sure
+ * lg_fill_div stays greater than 0.
*/
- if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
- tcache->lg_fill_div[binind]--;
- }
+ if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
+ tcache->lg_fill_div[binind]--;
+ }
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins) {
+ if (tcache->next_gc_bin == nhbins) {
tcache->next_gc_bin = 0;
- }
+ }
}
void *
-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
void *ret;
- assert(tcache->arena != NULL);
- arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
+ assert(tcache->arena != NULL);
+ arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
- if (config_prof) {
+ if (config_prof) {
tcache->prof_accumbytes = 0;
- }
- ret = cache_bin_alloc_easy(tbin, tcache_success);
-
- return ret;
-}
+ }
+ ret = cache_bin_alloc_easy(tbin, tcache_success);
-/* Enabled with --enable-extra-size-check. */
-static void
-tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
- size_t nflush, extent_t **extents){
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- /*
- * Verify that the items in the tcache all have the correct size; this
- * is useful for catching sized deallocation bugs, also to fail early
- * instead of corrupting metadata. Since this can be turned on for opt
- * builds, avoid the branch in the loop.
- */
- szind_t szind;
- size_t sz_sum = binind * nflush;
- for (unsigned i = 0 ; i < nflush; i++) {
- rtree_extent_szind_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true,
- &extents[i], &szind);
- sz_sum -= szind;
- }
- if (sz_sum != 0) {
- safety_check_fail("<jemalloc>: size mismatch in thread cache "
- "detected, likely caused by sized deallocation bugs by "
- "application. Abort.\n");
- abort();
- }
+ return ret;
}
+/* Enabled with --enable-extra-size-check. */
+static void
+tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
+ size_t nflush, extent_t **extents){
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ /*
+ * Verify that the items in the tcache all have the correct size; this
+ * is useful for catching sized deallocation bugs, also to fail early
+ * instead of corrupting metadata. Since this can be turned on for opt
+ * builds, avoid the branch in the loop.
+ */
+ szind_t szind;
+ size_t sz_sum = binind * nflush;
+ for (unsigned i = 0 ; i < nflush; i++) {
+ rtree_extent_szind_read(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true,
+ &extents[i], &szind);
+ sz_sum -= szind;
+ }
+ if (sz_sum != 0) {
+ safety_check_fail("<jemalloc>: size mismatch in thread cache "
+ "detected, likely caused by sized deallocation bugs by "
+ "application. Abort.\n");
+ abort();
+ }
+}
+
void
-tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
- szind_t binind, unsigned rem) {
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
+ szind_t binind, unsigned rem) {
bool merged_stats = false;
- assert(binind < SC_NBINS);
- assert((cache_bin_sz_t)rem <= tbin->ncached);
-
- arena_t *arena = tcache->arena;
- assert(arena != NULL);
- unsigned nflush = tbin->ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
-
- /* Look up extent once per item. */
- if (config_opt_safety_checks) {
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
- nflush, item_extent);
- } else {
- for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd),
- *(tbin->avail - 1 - i));
- }
- }
- while (nflush > 0) {
+ assert(binind < SC_NBINS);
+ assert((cache_bin_sz_t)rem <= tbin->ncached);
+
+ arena_t *arena = tcache->arena;
+ assert(arena != NULL);
+ unsigned nflush = tbin->ncached - rem;
+ VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+
+ /* Look up extent once per item. */
+ if (config_opt_safety_checks) {
+ tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
+ nflush, item_extent);
+ } else {
+ for (unsigned i = 0 ; i < nflush; i++) {
+ item_extent[i] = iealloc(tsd_tsdn(tsd),
+ *(tbin->avail - 1 - i));
+ }
+ }
+ while (nflush > 0) {
/* Lock the arena bin associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned bin_arena_ind = extent_arena_ind_get(extent);
- arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
- false);
- unsigned binshard = extent_binshard_get(extent);
- assert(binshard < bin_infos[binind].n_shards);
- bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
-
- if (config_prof && bin_arena == arena) {
- if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes)) {
- prof_idump(tsd_tsdn(tsd));
- }
+ extent_t *extent = item_extent[0];
+ unsigned bin_arena_ind = extent_arena_ind_get(extent);
+ arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
+ false);
+ unsigned binshard = extent_binshard_get(extent);
+ assert(binshard < bin_infos[binind].n_shards);
+ bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
+
+ if (config_prof && bin_arena == arena) {
+ if (arena_prof_accum(tsd_tsdn(tsd), arena,
+ tcache->prof_accumbytes)) {
+ prof_idump(tsd_tsdn(tsd));
+ }
tcache->prof_accumbytes = 0;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- if (config_stats && bin_arena == arena && !merged_stats) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (config_stats && bin_arena == arena && !merged_stats) {
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
- unsigned ndeferred = 0;
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
-
- if (extent_arena_ind_get(extent) == bin_arena_ind
- && extent_binshard_get(extent) == binshard) {
- arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, bin, binind, extent, ptr);
+ unsigned ndeferred = 0;
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ extent = item_extent[i];
+ assert(ptr != NULL && extent != NULL);
+
+ if (extent_arena_ind_get(extent) == bin_arena_ind
+ && extent_binshard_get(extent) == binshard) {
+ arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
+ bin_arena, bin, binind, extent, ptr);
} else {
/*
* This object was allocated via a different
@@ -195,573 +195,573 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
- nflush = ndeferred;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ nflush = ndeferred;
}
- if (config_stats && !merged_stats) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
- &binshard);
+ unsigned binshard;
+ bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
+ &binshard);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water) {
+ if (tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
- }
+ }
}
void
-tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache) {
+tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache) {
bool merged_stats = false;
assert(binind < nhbins);
- assert((cache_bin_sz_t)rem <= tbin->ncached);
-
- arena_t *tcache_arena = tcache->arena;
- assert(tcache_arena != NULL);
- unsigned nflush = tbin->ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
-
-#ifndef JEMALLOC_EXTRA_SIZE_CHECK
- /* Look up extent once per item. */
- for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
- }
-#else
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
- item_extent);
-#endif
- while (nflush > 0) {
+ assert((cache_bin_sz_t)rem <= tbin->ncached);
+
+ arena_t *tcache_arena = tcache->arena;
+ assert(tcache_arena != NULL);
+ unsigned nflush = tbin->ncached - rem;
+ VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+
+#ifndef JEMALLOC_EXTRA_SIZE_CHECK
+ /* Look up extent once per item. */
+ for (unsigned i = 0 ; i < nflush; i++) {
+ item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
+ }
+#else
+ tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
+ item_extent);
+#endif
+ while (nflush > 0) {
/* Lock the arena associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned locked_arena_ind = extent_arena_ind_get(extent);
- arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
- locked_arena_ind, false);
- bool idump;
+ extent_t *extent = item_extent[0];
+ unsigned locked_arena_ind = extent_arena_ind_get(extent);
+ arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
+ locked_arena_ind, false);
+ bool idump;
- if (config_prof) {
+ if (config_prof) {
idump = false;
- }
-
- bool lock_large = !arena_is_auto(locked_arena);
- if (lock_large) {
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
- }
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- assert(ptr != NULL);
- extent = item_extent[i];
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
- extent);
- }
- }
- if ((config_prof || config_stats) &&
- (locked_arena == tcache_arena)) {
+ }
+
+ bool lock_large = !arena_is_auto(locked_arena);
+ if (lock_large) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ }
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ extent = item_extent[i];
+ if (extent_arena_ind_get(extent) == locked_arena_ind) {
+ large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
+ extent);
+ }
+ }
+ if ((config_prof || config_stats) &&
+ (locked_arena == tcache_arena)) {
if (config_prof) {
- idump = arena_prof_accum(tsd_tsdn(tsd),
- tcache_arena, tcache->prof_accumbytes);
+ idump = arena_prof_accum(tsd_tsdn(tsd),
+ tcache_arena, tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
- arena_stats_large_flush_nrequests_add(
- tsd_tsdn(tsd), &tcache_arena->stats, binind,
- tbin->tstats.nrequests);
+ arena_stats_large_flush_nrequests_add(
+ tsd_tsdn(tsd), &tcache_arena->stats, binind,
+ tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
}
- if (lock_large) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
- }
-
- unsigned ndeferred = 0;
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
-
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_finish(tsd_tsdn(tsd), extent);
- } else {
+ if (lock_large) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ }
+
+ unsigned ndeferred = 0;
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ extent = item_extent[i];
+ assert(ptr != NULL && extent != NULL);
+
+ if (extent_arena_ind_get(extent) == locked_arena_ind) {
+ large_dalloc_finish(tsd_tsdn(tsd), extent);
+ } else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
* Stash the object, so that it can be handled
* in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
- if (config_prof && idump) {
- prof_idump(tsd_tsdn(tsd));
- }
- arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
- ndeferred);
- nflush = ndeferred;
- }
- if (config_stats && !merged_stats) {
+ if (config_prof && idump) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+ arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
+ ndeferred);
+ nflush = ndeferred;
+ }
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd),
- &tcache_arena->stats, binind, tbin->tstats.nrequests);
+ arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd),
+ &tcache_arena->stats, binind, tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water) {
+ if (tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
- }
+ }
}
void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- assert(tcache->arena == NULL);
- tcache->arena = arena;
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ assert(tcache->arena == NULL);
+ tcache->arena = arena;
if (config_stats) {
/* Link into list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
-
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
- cache_bin_array_descriptor_init(
- &tcache->cache_bin_array_descriptor, tcache->bins_small,
- tcache->bins_large);
- ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
-
- malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
+ cache_bin_array_descriptor_init(
+ &tcache->cache_bin_array_descriptor, tcache->bins_small,
+ tcache->bins_large);
+ ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
+ &tcache->cache_bin_array_descriptor, link);
+
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
-static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
- arena_t *arena = tcache->arena;
- assert(arena != NULL);
+static void
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
+ arena_t *arena = tcache->arena;
+ assert(arena != NULL);
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
- if (config_debug) {
- bool in_ql = false;
- tcache_t *iter;
- ql_foreach(iter, &arena->tcache_ql, link) {
- if (iter == tcache) {
- in_ql = true;
- break;
- }
- }
- assert(in_ql);
- }
- ql_remove(&arena->tcache_ql, tcache, link);
- ql_remove(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
- tcache_stats_merge(tsdn, tcache, arena);
- malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
- }
- tcache->arena = NULL;
-}
-
-void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- tcache_arena_dissociate(tsdn, tcache);
- tcache_arena_associate(tsdn, tcache, arena);
-}
-
-bool
-tsd_tcache_enabled_data_init(tsd_t *tsd) {
- /* Called upon tsd initialization. */
- tsd_tcache_enabled_set(tsd, opt_tcache);
- tsd_slow_update(tsd);
-
- if (opt_tcache) {
- /* Trigger tcache init. */
- tsd_tcache_data_init(tsd);
- }
-
- return false;
-}
-
-/* Initialize auto tcache (embedded in TSD). */
-static void
-tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
- memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
- tcache->prof_accumbytes = 0;
- tcache->next_gc_bin = 0;
- tcache->arena = NULL;
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
-
- size_t stack_offset = 0;
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
- memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
- unsigned i = 0;
- for (; i < SC_NBINS; i++) {
- tcache->lg_fill_div[i] = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
- tcache_small_bin_get(tcache, i)->avail =
- (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
- }
- for (; i < nhbins; i++) {
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- tcache_large_bin_get(tcache, i)->avail =
- (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
- }
- assert(stack_offset == stack_nelms * sizeof(void *));
-}
-
-/* Initialize auto tcache (embedded in TSD). */
-bool
-tsd_tcache_data_init(tsd_t *tsd) {
- tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
- assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
- size_t size = stack_nelms * sizeof(void *);
- /* Avoid false cacheline sharing. */
- size = sz_sa2u(size, CACHELINE);
-
- void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
- NULL, true, arena_get(TSDN_NULL, 0, true));
- if (avail_array == NULL) {
- return true;
- }
-
- tcache_init(tsd, tcache, avail_array);
- /*
- * Initialization is a bit tricky here. After malloc init is done, all
- * threads can rely on arena_choose and associate tcache accordingly.
- * However, the thread that does actual malloc bootstrapping relies on
- * functional tsd, and it can only rely on a0. In that case, we
- * associate its tcache to a0 temporarily, and later on
- * arena_choose_hard() will re-associate properly.
- */
- tcache->arena = NULL;
- arena_t *arena;
- if (!malloc_initialized()) {
- /* If in initialization, assign to a0. */
- arena = arena_get(tsd_tsdn(tsd), 0, false);
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
- } else {
- arena = arena_choose(tsd, NULL);
- /* This may happen if thread.tcache.enabled is used. */
- if (tcache->arena == NULL) {
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
- }
- }
- assert(arena == tcache->arena);
-
- return false;
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ if (config_debug) {
+ bool in_ql = false;
+ tcache_t *iter;
+ ql_foreach(iter, &arena->tcache_ql, link) {
+ if (iter == tcache) {
+ in_ql = true;
+ break;
+ }
+ }
+ assert(in_ql);
+ }
+ ql_remove(&arena->tcache_ql, tcache, link);
+ ql_remove(&arena->cache_bin_array_descriptor_ql,
+ &tcache->cache_bin_array_descriptor, link);
+ tcache_stats_merge(tsdn, tcache, arena);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
+ }
+ tcache->arena = NULL;
}
-/* Created manual tcache for tcache.create mallctl. */
+void
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ tcache_arena_dissociate(tsdn, tcache);
+ tcache_arena_associate(tsdn, tcache, arena);
+}
+
+bool
+tsd_tcache_enabled_data_init(tsd_t *tsd) {
+ /* Called upon tsd initialization. */
+ tsd_tcache_enabled_set(tsd, opt_tcache);
+ tsd_slow_update(tsd);
+
+ if (opt_tcache) {
+ /* Trigger tcache init. */
+ tsd_tcache_data_init(tsd);
+ }
+
+ return false;
+}
+
+/* Initialize auto tcache (embedded in TSD). */
+static void
+tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
+ memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
+ tcache->prof_accumbytes = 0;
+ tcache->next_gc_bin = 0;
+ tcache->arena = NULL;
+
+ ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+
+ size_t stack_offset = 0;
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
+ memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
+ unsigned i = 0;
+ for (; i < SC_NBINS; i++) {
+ tcache->lg_fill_div[i] = 1;
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ /*
+ * avail points past the available space. Allocations will
+ * access the slots toward higher addresses (for the benefit of
+ * prefetch).
+ */
+ tcache_small_bin_get(tcache, i)->avail =
+ (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ }
+ for (; i < nhbins; i++) {
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ tcache_large_bin_get(tcache, i)->avail =
+ (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ }
+ assert(stack_offset == stack_nelms * sizeof(void *));
+}
+
+/* Initialize auto tcache (embedded in TSD). */
+bool
+tsd_tcache_data_init(tsd_t *tsd) {
+ tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
+ assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
+ size_t size = stack_nelms * sizeof(void *);
+ /* Avoid false cacheline sharing. */
+ size = sz_sa2u(size, CACHELINE);
+
+ void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
+ NULL, true, arena_get(TSDN_NULL, 0, true));
+ if (avail_array == NULL) {
+ return true;
+ }
+
+ tcache_init(tsd, tcache, avail_array);
+ /*
+ * Initialization is a bit tricky here. After malloc init is done, all
+ * threads can rely on arena_choose and associate tcache accordingly.
+ * However, the thread that does actual malloc bootstrapping relies on
+ * functional tsd, and it can only rely on a0. In that case, we
+ * associate its tcache to a0 temporarily, and later on
+ * arena_choose_hard() will re-associate properly.
+ */
+ tcache->arena = NULL;
+ arena_t *arena;
+ if (!malloc_initialized()) {
+ /* If in initialization, assign to a0. */
+ arena = arena_get(tsd_tsdn(tsd), 0, false);
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ } else {
+ arena = arena_choose(tsd, NULL);
+ /* This may happen if thread.tcache.enabled is used. */
+ if (tcache->arena == NULL) {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ }
+ }
+ assert(arena == tcache->arena);
+
+ return false;
+}
+
+/* Created manual tcache for tcache.create mallctl. */
tcache_t *
-tcache_create_explicit(tsd_t *tsd) {
+tcache_create_explicit(tsd_t *tsd) {
tcache_t *tcache;
size_t size, stack_offset;
- size = sizeof(tcache_t);
+ size = sizeof(tcache_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
- /* Avoid false cacheline sharing. */
- size = sz_sa2u(size, CACHELINE);
+ /* Avoid false cacheline sharing. */
+ size = sz_sa2u(size, CACHELINE);
- tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
- arena_get(TSDN_NULL, 0, true));
- if (tcache == NULL) {
- return NULL;
+ tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
+ arena_get(TSDN_NULL, 0, true));
+ if (tcache == NULL) {
+ return NULL;
}
- tcache_init(tsd, tcache,
- (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
+ tcache_init(tsd, tcache,
+ (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
- return tcache;
+ return tcache;
}
-static void
-tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
- assert(tcache->arena != NULL);
+static void
+tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
+ assert(tcache->arena != NULL);
- for (unsigned i = 0; i < SC_NBINS; i++) {
- cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
- tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
+ tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
- if (config_stats) {
- assert(tbin->tstats.nrequests == 0);
+ if (config_stats) {
+ assert(tbin->tstats.nrequests == 0);
}
}
- for (unsigned i = SC_NBINS; i < nhbins; i++) {
- cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
+ for (unsigned i = SC_NBINS; i < nhbins; i++) {
+ cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
- if (config_stats) {
- assert(tbin->tstats.nrequests == 0);
+ if (config_stats) {
+ assert(tbin->tstats.nrequests == 0);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
- tcache->prof_accumbytes)) {
- prof_idump(tsd_tsdn(tsd));
- }
+ arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
+ tcache->prof_accumbytes)) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+}
+
+void
+tcache_flush(tsd_t *tsd) {
+ assert(tcache_available(tsd));
+ tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
+}
+
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
+ tcache_flush_cache(tsd, tcache);
+ arena_t *arena = tcache->arena;
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
+
+ if (tsd_tcache) {
+ /* Release the avail array for the TSD embedded auto tcache. */
+ void *avail_array =
+ (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
+ (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
+ idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
+ } else {
+ /* Release both the tcache struct and avail array. */
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
+ }
+
+ /*
+ * The deallocation and tcache flush above may not trigger decay since
+ * we are on the tcache shutdown path (potentially with non-nominal
+ * tsd). Manually trigger decay to avoid pathological cases. Also
+ * include arena 0 because the tcache array is allocated from it.
+ */
+ arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false),
+ false, false);
+
+ if (arena_nthreads_get(arena, false) == 0 &&
+ !background_thread_enabled()) {
+ /* Force purging when no threads assigned to the arena anymore. */
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ } else {
+ arena_decay(tsd_tsdn(tsd), arena, false, false);
+ }
}
+/* For auto tcache (embedded in TSD) only. */
void
-tcache_flush(tsd_t *tsd) {
- assert(tcache_available(tsd));
- tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
-}
-
-static void
-tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
- tcache_flush_cache(tsd, tcache);
- arena_t *arena = tcache->arena;
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
-
- if (tsd_tcache) {
- /* Release the avail array for the TSD embedded auto tcache. */
- void *avail_array =
- (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
- (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
- idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
- } else {
- /* Release both the tcache struct and avail array. */
- idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
- }
-
- /*
- * The deallocation and tcache flush above may not trigger decay since
- * we are on the tcache shutdown path (potentially with non-nominal
- * tsd). Manually trigger decay to avoid pathological cases. Also
- * include arena 0 because the tcache array is allocated from it.
- */
- arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false),
- false, false);
-
- if (arena_nthreads_get(arena, false) == 0 &&
- !background_thread_enabled()) {
- /* Force purging when no threads assigned to the arena anymore. */
- arena_decay(tsd_tsdn(tsd), arena, false, true);
- } else {
- arena_decay(tsd_tsdn(tsd), arena, false, false);
- }
-}
-
-/* For auto tcache (embedded in TSD) only. */
-void
-tcache_cleanup(tsd_t *tsd) {
- tcache_t *tcache = tsd_tcachep_get(tsd);
- if (!tcache_available(tsd)) {
- assert(tsd_tcache_enabled_get(tsd) == false);
- if (config_debug) {
- assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
- }
- return;
- }
- assert(tsd_tcache_enabled_get(tsd));
- assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
-
- tcache_destroy(tsd, tcache, true);
- if (config_debug) {
- tcache_small_bin_get(tcache, 0)->avail = NULL;
+tcache_cleanup(tsd_t *tsd) {
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+ if (!tcache_available(tsd)) {
+ assert(tsd_tcache_enabled_get(tsd) == false);
+ if (config_debug) {
+ assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
+ }
+ return;
+ }
+ assert(tsd_tcache_enabled_get(tsd));
+ assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
+
+ tcache_destroy(tsd, tcache, true);
+ if (config_debug) {
+ tcache_small_bin_get(tcache, 0)->avail = NULL;
}
}
void
-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
unsigned i;
cassert(config_stats);
/* Merge and reset tcache stats. */
- for (i = 0; i < SC_NBINS; i++) {
- cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
+ for (i = 0; i < SC_NBINS; i++) {
+ cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
+ unsigned binshard;
+ bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0;
}
for (; i < nhbins; i++) {
- cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i,
- tbin->tstats.nrequests);
+ cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i,
+ tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
}
-static bool
-tcaches_create_prep(tsd_t *tsd) {
- bool err;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
-
- if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
- * (MALLOCX_TCACHE_MAX+1), CACHELINE);
- if (tcaches == NULL) {
- err = true;
- goto label_return;
- }
- }
-
- if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
- err = true;
- goto label_return;
- }
-
- err = false;
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
- return err;
-}
-
+static bool
+tcaches_create_prep(tsd_t *tsd) {
+ bool err;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+
+ if (tcaches == NULL) {
+ tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
+ * (MALLOCX_TCACHE_MAX+1), CACHELINE);
+ if (tcaches == NULL) {
+ err = true;
+ goto label_return;
+ }
+ }
+
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
+ err = true;
+ goto label_return;
+ }
+
+ err = false;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ return err;
+}
+
bool
-tcaches_create(tsd_t *tsd, unsigned *r_ind) {
- witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
-
- bool err;
-
- if (tcaches_create_prep(tsd)) {
- err = true;
- goto label_return;
- }
-
- tcache_t *tcache = tcache_create_explicit(tsd);
- if (tcache == NULL) {
- err = true;
- goto label_return;
- }
-
- tcaches_t *elm;
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
- if (tcaches_avail != NULL) {
- elm = tcaches_avail;
- tcaches_avail = tcaches_avail->next;
- elm->tcache = tcache;
- *r_ind = (unsigned)(elm - tcaches);
- } else {
- elm = &tcaches[tcaches_past];
- elm->tcache = tcache;
- *r_ind = tcaches_past;
- tcaches_past++;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
-
- err = false;
-label_return:
- witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
- return err;
-}
-
-static tcache_t *
-tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
-
- if (elm->tcache == NULL) {
- return NULL;
- }
- tcache_t *tcache = elm->tcache;
- if (allow_reinit) {
- elm->tcache = TCACHES_ELM_NEED_REINIT;
- } else {
- elm->tcache = NULL;
- }
-
- if (tcache == TCACHES_ELM_NEED_REINIT) {
- return NULL;
- }
- return tcache;
-}
-
-void
-tcaches_flush(tsd_t *tsd, unsigned ind) {
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
- tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
- if (tcache != NULL) {
- /* Destroy the tcache; recreate in tcaches_get() if needed. */
- tcache_destroy(tsd, tcache, false);
- }
-}
-
-void
-tcaches_destroy(tsd_t *tsd, unsigned ind) {
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
- tcaches_t *elm = &tcaches[ind];
- tcache_t *tcache = tcaches_elm_remove(tsd, elm, false);
- elm->next = tcaches_avail;
- tcaches_avail = elm;
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
- if (tcache != NULL) {
- tcache_destroy(tsd, tcache, false);
- }
-}
-
-bool
-tcache_boot(tsdn_t *tsdn) {
- /* If necessary, clamp opt_lg_tcache_max. */
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
- SC_SMALL_MAXCLASS) {
- tcache_maxclass = SC_SMALL_MAXCLASS;
- } else {
- tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
- }
-
- if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- nhbins = sz_size2index(tcache_maxclass) + 1;
-
+tcaches_create(tsd_t *tsd, unsigned *r_ind) {
+ witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
+
+ bool err;
+
+ if (tcaches_create_prep(tsd)) {
+ err = true;
+ goto label_return;
+ }
+
+ tcache_t *tcache = tcache_create_explicit(tsd);
+ if (tcache == NULL) {
+ err = true;
+ goto label_return;
+ }
+
+ tcaches_t *elm;
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcaches_avail != NULL) {
+ elm = tcaches_avail;
+ tcaches_avail = tcaches_avail->next;
+ elm->tcache = tcache;
+ *r_ind = (unsigned)(elm - tcaches);
+ } else {
+ elm = &tcaches[tcaches_past];
+ elm->tcache = tcache;
+ *r_ind = tcaches_past;
+ tcaches_past++;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+
+ err = false;
+label_return:
+ witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
+ return err;
+}
+
+static tcache_t *
+tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
+
+ if (elm->tcache == NULL) {
+ return NULL;
+ }
+ tcache_t *tcache = elm->tcache;
+ if (allow_reinit) {
+ elm->tcache = TCACHES_ELM_NEED_REINIT;
+ } else {
+ elm->tcache = NULL;
+ }
+
+ if (tcache == TCACHES_ELM_NEED_REINIT) {
+ return NULL;
+ }
+ return tcache;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcache != NULL) {
+ /* Destroy the tcache; recreate in tcaches_get() if needed. */
+ tcache_destroy(tsd, tcache, false);
+ }
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ tcaches_t *elm = &tcaches[ind];
+ tcache_t *tcache = tcaches_elm_remove(tsd, elm, false);
+ elm->next = tcaches_avail;
+ tcaches_avail = elm;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcache != NULL) {
+ tcache_destroy(tsd, tcache, false);
+ }
+}
+
+bool
+tcache_boot(tsdn_t *tsdn) {
+ /* If necessary, clamp opt_lg_tcache_max. */
+ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
+ SC_SMALL_MAXCLASS) {
+ tcache_maxclass = SC_SMALL_MAXCLASS;
+ } else {
+ tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ }
+
+ if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ nhbins = sz_size2index(tcache_maxclass) + 1;
+
/* Initialize tcache_bin_info. */
- tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
- * sizeof(cache_bin_info_t), CACHELINE);
- if (tcache_bin_info == NULL) {
- return true;
- }
+ tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
+ * sizeof(cache_bin_info_t), CACHELINE);
+ if (tcache_bin_info == NULL) {
+ return true;
+ }
stack_nelms = 0;
- unsigned i;
- for (i = 0; i < SC_NBINS; i++) {
- if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+ unsigned i;
+ for (i = 0; i < SC_NBINS; i++) {
+ if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
tcache_bin_info[i].ncached_max =
- TCACHE_NSLOTS_SMALL_MIN;
- } else if ((bin_infos[i].nregs << 1) <=
- TCACHE_NSLOTS_SMALL_MAX) {
- tcache_bin_info[i].ncached_max =
- (bin_infos[i].nregs << 1);
+ TCACHE_NSLOTS_SMALL_MIN;
+ } else if ((bin_infos[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
+ tcache_bin_info[i].ncached_max =
+ (bin_infos[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
@@ -773,26 +773,26 @@ tcache_boot(tsdn_t *tsdn) {
stack_nelms += tcache_bin_info[i].ncached_max;
}
- return false;
+ return false;
}
-void
-tcache_prefork(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_prefork(tsdn, &tcaches_mtx);
- }
-}
-
-void
-tcache_postfork_parent(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
- }
-}
-
-void
-tcache_postfork_child(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
- }
+void
+tcache_prefork(tsdn_t *tsdn) {
+ if (!config_prof && opt_tcache) {
+ malloc_mutex_prefork(tsdn, &tcaches_mtx);
+ }
+}
+
+void
+tcache_postfork_parent(tsdn_t *tsdn) {
+ if (!config_prof && opt_tcache) {
+ malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
+ }
+}
+
+void
+tcache_postfork_child(tsdn_t *tsdn) {
+ if (!config_prof && opt_tcache) {
+ malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
+ }
}
diff --git a/contrib/libs/jemalloc/src/test_hooks.c b/contrib/libs/jemalloc/src/test_hooks.c
index ace00d9c46..55d4a7417f 100644
--- a/contrib/libs/jemalloc/src/test_hooks.c
+++ b/contrib/libs/jemalloc/src/test_hooks.c
@@ -1,12 +1,12 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-
-/*
- * The hooks are a little bit screwy -- they're not genuinely exported in the
- * sense that we want them available to end-users, but we do want them visible
- * from outside the generated library, so that we can use them in test code.
- */
-JEMALLOC_EXPORT
-void (*test_hooks_arena_new_hook)() = NULL;
-
-JEMALLOC_EXPORT
-void (*test_hooks_libc_hook)() = NULL;
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+/*
+ * The hooks are a little bit screwy -- they're not genuinely exported in the
+ * sense that we want them available to end-users, but we do want them visible
+ * from outside the generated library, so that we can use them in test code.
+ */
+JEMALLOC_EXPORT
+void (*test_hooks_arena_new_hook)() = NULL;
+
+JEMALLOC_EXPORT
+void (*test_hooks_libc_hook)() = NULL;
diff --git a/contrib/libs/jemalloc/src/ticker.c b/contrib/libs/jemalloc/src/ticker.c
index d7b8cd26c0..bded4301ec 100644
--- a/contrib/libs/jemalloc/src/ticker.c
+++ b/contrib/libs/jemalloc/src/ticker.c
@@ -1,3 +1,3 @@
-#define JEMALLOC_TICKER_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
+#define JEMALLOC_TICKER_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/libs/jemalloc/src/tsd.c b/contrib/libs/jemalloc/src/tsd.c
index a31f6b9698..968fcf645a 100644
--- a/contrib/libs/jemalloc/src/tsd.c
+++ b/contrib/libs/jemalloc/src/tsd.c
@@ -1,351 +1,351 @@
-#define JEMALLOC_TSD_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
-
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+
/******************************************************************************/
/* Data. */
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
-/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
-JEMALLOC_DIAGNOSTIC_PUSH
-JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
-JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false;
-bool tsd_booted = false;
-#elif (defined(JEMALLOC_TLS))
-JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
-pthread_key_t tsd_tsd;
-bool tsd_booted = false;
-#elif (defined(_WIN32))
-DWORD tsd_tsd;
-tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
-bool tsd_booted = false;
-#else
-
-/*
- * This contains a mutex, but it's pretty convenient to allow the mutex code to
- * have a dependency on tsd. So we define the struct here, and only refer to it
- * by pointer in the header.
- */
-struct tsd_init_head_s {
- ql_head(tsd_init_block_t) blocks;
- malloc_mutex_t lock;
-};
-
-pthread_key_t tsd_tsd;
-tsd_init_head_t tsd_init_head = {
- ql_head_initializer(blocks),
- MALLOC_MUTEX_INITIALIZER
-};
-
-tsd_wrapper_t tsd_boot_wrapper = {
- false,
- TSD_INITIALIZER
-};
-bool tsd_booted = false;
-#endif
-
-JEMALLOC_DIAGNOSTIC_POP
-
+/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
+
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
+JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false;
+bool tsd_booted = false;
+#elif (defined(JEMALLOC_TLS))
+JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
+pthread_key_t tsd_tsd;
+bool tsd_booted = false;
+#elif (defined(_WIN32))
+DWORD tsd_tsd;
+tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
+bool tsd_booted = false;
+#else
+
+/*
+ * This contains a mutex, but it's pretty convenient to allow the mutex code to
+ * have a dependency on tsd. So we define the struct here, and only refer to it
+ * by pointer in the header.
+ */
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+
+pthread_key_t tsd_tsd;
+tsd_init_head_t tsd_init_head = {
+ ql_head_initializer(blocks),
+ MALLOC_MUTEX_INITIALIZER
+};
+
+tsd_wrapper_t tsd_boot_wrapper = {
+ false,
+ TSD_INITIALIZER
+};
+bool tsd_booted = false;
+#endif
+
+JEMALLOC_DIAGNOSTIC_POP
+
/******************************************************************************/
-/* A list of all the tsds in the nominal state. */
-typedef ql_head(tsd_t) tsd_list_t;
-static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
-static malloc_mutex_t tsd_nominal_tsds_lock;
-
-/* How many slow-path-enabling features are turned on. */
-static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
-
-static bool
-tsd_in_nominal_list(tsd_t *tsd) {
- tsd_t *tsd_list;
- bool found = false;
- /*
- * We don't know that tsd is nominal; it might not be safe to get data
- * out of it here.
- */
- malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
- ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
- if (tsd == tsd_list) {
- found = true;
- break;
- }
- }
- malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock);
- return found;
-}
-
-static void
-tsd_add_nominal(tsd_t *tsd) {
- assert(!tsd_in_nominal_list(tsd));
- assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
- ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link);
- malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
-}
-
-static void
-tsd_remove_nominal(tsd_t *tsd) {
- assert(tsd_in_nominal_list(tsd));
- assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
- malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
-}
-
-static void
-tsd_force_recompute(tsdn_t *tsdn) {
- /*
- * The stores to tsd->state here need to synchronize with the exchange
- * in tsd_slow_update.
- */
- atomic_fence(ATOMIC_RELEASE);
- malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
- tsd_t *remote_tsd;
- ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
- assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
- <= tsd_state_nominal_max);
- tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute,
- ATOMIC_RELAXED);
- }
- malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
+/* A list of all the tsds in the nominal state. */
+typedef ql_head(tsd_t) tsd_list_t;
+static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
+static malloc_mutex_t tsd_nominal_tsds_lock;
+
+/* How many slow-path-enabling features are turned on. */
+static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
+
+static bool
+tsd_in_nominal_list(tsd_t *tsd) {
+ tsd_t *tsd_list;
+ bool found = false;
+ /*
+ * We don't know that tsd is nominal; it might not be safe to get data
+ * out of it here.
+ */
+ malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
+ ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
+ if (tsd == tsd_list) {
+ found = true;
+ break;
+ }
+ }
+ malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock);
+ return found;
}
+static void
+tsd_add_nominal(tsd_t *tsd) {
+ assert(!tsd_in_nominal_list(tsd));
+ assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
+ ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+ ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+}
+
+static void
+tsd_remove_nominal(tsd_t *tsd) {
+ assert(tsd_in_nominal_list(tsd));
+ assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+ ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+}
+
+static void
+tsd_force_recompute(tsdn_t *tsdn) {
+ /*
+ * The stores to tsd->state here need to synchronize with the exchange
+ * in tsd_slow_update.
+ */
+ atomic_fence(ATOMIC_RELEASE);
+ malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
+ tsd_t *remote_tsd;
+ ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
+ assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
+ <= tsd_state_nominal_max);
+ tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute,
+ ATOMIC_RELAXED);
+ }
+ malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
+}
+
void
-tsd_global_slow_inc(tsdn_t *tsdn) {
- atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
- /*
- * We unconditionally force a recompute, even if the global slow count
- * was already positive. If we didn't, then it would be possible for us
- * to return to the user, have the user synchronize externally with some
- * other thread, and then have that other thread not have picked up the
- * update yet (since the original incrementing thread might still be
- * making its way through the tsd list).
- */
- tsd_force_recompute(tsdn);
-}
-
-void tsd_global_slow_dec(tsdn_t *tsdn) {
- atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
- /* See the note in ..._inc(). */
- tsd_force_recompute(tsdn);
-}
-
-static bool
-tsd_local_slow(tsd_t *tsd) {
- return !tsd_tcache_enabled_get(tsd)
- || tsd_reentrancy_level_get(tsd) > 0;
-}
-
-bool
-tsd_global_slow() {
- return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
-}
-
-/******************************************************************************/
-
-static uint8_t
-tsd_state_compute(tsd_t *tsd) {
- if (!tsd_nominal(tsd)) {
- return tsd_state_get(tsd);
- }
- /* We're in *a* nominal state; but which one? */
- if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) {
- return tsd_state_nominal_slow;
- } else {
- return tsd_state_nominal;
- }
-}
-
-void
-tsd_slow_update(tsd_t *tsd) {
- uint8_t old_state;
- do {
- uint8_t new_state = tsd_state_compute(tsd);
- old_state = tsd_atomic_exchange(&tsd->state, new_state,
- ATOMIC_ACQUIRE);
- } while (old_state == tsd_state_nominal_recompute);
-}
-
-void
-tsd_state_set(tsd_t *tsd, uint8_t new_state) {
- /* Only the tsd module can change the state *to* recompute. */
- assert(new_state != tsd_state_nominal_recompute);
- uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED);
- if (old_state > tsd_state_nominal_max) {
- /*
- * Not currently in the nominal list, but it might need to be
- * inserted there.
- */
- assert(!tsd_in_nominal_list(tsd));
- tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED);
- if (new_state <= tsd_state_nominal_max) {
- tsd_add_nominal(tsd);
- }
- } else {
- /*
- * We're currently nominal. If the new state is non-nominal,
- * great; we take ourselves off the list and just enter the new
- * state.
- */
- assert(tsd_in_nominal_list(tsd));
- if (new_state > tsd_state_nominal_max) {
- tsd_remove_nominal(tsd);
- tsd_atomic_store(&tsd->state, new_state,
- ATOMIC_RELAXED);
- } else {
- /*
- * This is the tricky case. We're transitioning from
- * one nominal state to another. The caller can't know
- * about any races that are occuring at the same time,
- * so we always have to recompute no matter what.
- */
- tsd_slow_update(tsd);
- }
- }
-}
-
-static bool
-tsd_data_init(tsd_t *tsd) {
- /*
- * We initialize the rtree context first (before the tcache), since the
- * tcache initialization depends on it.
- */
- rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
-
- /*
- * A nondeterministic seed based on the address of tsd reduces
- * the likelihood of lockstep non-uniform cache index
- * utilization among identical concurrent processes, but at the
- * cost of test repeatability. For debug builds, instead use a
- * deterministic seed.
- */
- *tsd_offset_statep_get(tsd) = config_debug ? 0 :
- (uint64_t)(uintptr_t)tsd;
-
- return tsd_tcache_enabled_data_init(tsd);
-}
-
-static void
-assert_tsd_data_cleanup_done(tsd_t *tsd) {
- assert(!tsd_nominal(tsd));
- assert(!tsd_in_nominal_list(tsd));
- assert(*tsd_arenap_get_unsafe(tsd) == NULL);
- assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
- assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
- assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
- assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
- assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
-}
-
-static bool
-tsd_data_init_nocleanup(tsd_t *tsd) {
- assert(tsd_state_get(tsd) == tsd_state_reincarnated ||
- tsd_state_get(tsd) == tsd_state_minimal_initialized);
- /*
- * During reincarnation, there is no guarantee that the cleanup function
- * will be called (deallocation may happen after all tsd destructors).
- * We set up tsd in a way that no cleanup is needed.
- */
- rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
- *tsd_tcache_enabledp_get_unsafe(tsd) = false;
- *tsd_reentrancy_levelp_get(tsd) = 1;
- assert_tsd_data_cleanup_done(tsd);
-
- return false;
-}
-
-tsd_t *
-tsd_fetch_slow(tsd_t *tsd, bool minimal) {
- assert(!tsd_fast(tsd));
-
- if (tsd_state_get(tsd) == tsd_state_nominal_slow) {
- /*
- * On slow path but no work needed. Note that we can't
- * necessarily *assert* that we're slow, because we might be
- * slow because of an asynchronous modification to global state,
- * which might be asynchronously modified *back*.
- */
- } else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) {
- tsd_slow_update(tsd);
- } else if (tsd_state_get(tsd) == tsd_state_uninitialized) {
- if (!minimal) {
- if (tsd_booted) {
- tsd_state_set(tsd, tsd_state_nominal);
- tsd_slow_update(tsd);
- /* Trigger cleanup handler registration. */
- tsd_set(tsd);
- tsd_data_init(tsd);
- }
- } else {
- tsd_state_set(tsd, tsd_state_minimal_initialized);
- tsd_set(tsd);
- tsd_data_init_nocleanup(tsd);
- }
- } else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) {
- if (!minimal) {
- /* Switch to fully initialized. */
- tsd_state_set(tsd, tsd_state_nominal);
- assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
- (*tsd_reentrancy_levelp_get(tsd))--;
- tsd_slow_update(tsd);
- tsd_data_init(tsd);
- } else {
- assert_tsd_data_cleanup_done(tsd);
- }
- } else if (tsd_state_get(tsd) == tsd_state_purgatory) {
- tsd_state_set(tsd, tsd_state_reincarnated);
- tsd_set(tsd);
- tsd_data_init_nocleanup(tsd);
- } else {
- assert(tsd_state_get(tsd) == tsd_state_reincarnated);
- }
-
- return tsd;
-}
-
-void *
-malloc_tsd_malloc(size_t size) {
- return a0malloc(CACHELINE_CEILING(size));
+tsd_global_slow_inc(tsdn_t *tsdn) {
+ atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
+ /*
+ * We unconditionally force a recompute, even if the global slow count
+ * was already positive. If we didn't, then it would be possible for us
+ * to return to the user, have the user synchronize externally with some
+ * other thread, and then have that other thread not have picked up the
+ * update yet (since the original incrementing thread might still be
+ * making its way through the tsd list).
+ */
+ tsd_force_recompute(tsdn);
+}
+
+void tsd_global_slow_dec(tsdn_t *tsdn) {
+ atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
+ /* See the note in ..._inc(). */
+ tsd_force_recompute(tsdn);
}
+static bool
+tsd_local_slow(tsd_t *tsd) {
+ return !tsd_tcache_enabled_get(tsd)
+ || tsd_reentrancy_level_get(tsd) > 0;
+}
+
+bool
+tsd_global_slow() {
+ return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
+}
+
+/******************************************************************************/
+
+static uint8_t
+tsd_state_compute(tsd_t *tsd) {
+ if (!tsd_nominal(tsd)) {
+ return tsd_state_get(tsd);
+ }
+ /* We're in *a* nominal state; but which one? */
+ if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) {
+ return tsd_state_nominal_slow;
+ } else {
+ return tsd_state_nominal;
+ }
+}
+
void
-malloc_tsd_dalloc(void *wrapper) {
- a0dalloc(wrapper);
+tsd_slow_update(tsd_t *tsd) {
+ uint8_t old_state;
+ do {
+ uint8_t new_state = tsd_state_compute(tsd);
+ old_state = tsd_atomic_exchange(&tsd->state, new_state,
+ ATOMIC_ACQUIRE);
+ } while (old_state == tsd_state_nominal_recompute);
+}
+
+void
+tsd_state_set(tsd_t *tsd, uint8_t new_state) {
+ /* Only the tsd module can change the state *to* recompute. */
+ assert(new_state != tsd_state_nominal_recompute);
+ uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED);
+ if (old_state > tsd_state_nominal_max) {
+ /*
+ * Not currently in the nominal list, but it might need to be
+ * inserted there.
+ */
+ assert(!tsd_in_nominal_list(tsd));
+ tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED);
+ if (new_state <= tsd_state_nominal_max) {
+ tsd_add_nominal(tsd);
+ }
+ } else {
+ /*
+ * We're currently nominal. If the new state is non-nominal,
+ * great; we take ourselves off the list and just enter the new
+ * state.
+ */
+ assert(tsd_in_nominal_list(tsd));
+ if (new_state > tsd_state_nominal_max) {
+ tsd_remove_nominal(tsd);
+ tsd_atomic_store(&tsd->state, new_state,
+ ATOMIC_RELAXED);
+ } else {
+ /*
+ * This is the tricky case. We're transitioning from
+ * one nominal state to another. The caller can't know
+ * about any races that are occuring at the same time,
+ * so we always have to recompute no matter what.
+ */
+ tsd_slow_update(tsd);
+ }
+ }
}
+static bool
+tsd_data_init(tsd_t *tsd) {
+ /*
+ * We initialize the rtree context first (before the tcache), since the
+ * tcache initialization depends on it.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+
+ /*
+ * A nondeterministic seed based on the address of tsd reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ *tsd_offset_statep_get(tsd) = config_debug ? 0 :
+ (uint64_t)(uintptr_t)tsd;
+
+ return tsd_tcache_enabled_data_init(tsd);
+}
+
+static void
+assert_tsd_data_cleanup_done(tsd_t *tsd) {
+ assert(!tsd_nominal(tsd));
+ assert(!tsd_in_nominal_list(tsd));
+ assert(*tsd_arenap_get_unsafe(tsd) == NULL);
+ assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
+ assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
+ assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
+ assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
+ assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
+}
+
+static bool
+tsd_data_init_nocleanup(tsd_t *tsd) {
+ assert(tsd_state_get(tsd) == tsd_state_reincarnated ||
+ tsd_state_get(tsd) == tsd_state_minimal_initialized);
+ /*
+ * During reincarnation, there is no guarantee that the cleanup function
+ * will be called (deallocation may happen after all tsd destructors).
+ * We set up tsd in a way that no cleanup is needed.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ *tsd_tcache_enabledp_get_unsafe(tsd) = false;
+ *tsd_reentrancy_levelp_get(tsd) = 1;
+ assert_tsd_data_cleanup_done(tsd);
+
+ return false;
+}
+
+tsd_t *
+tsd_fetch_slow(tsd_t *tsd, bool minimal) {
+ assert(!tsd_fast(tsd));
+
+ if (tsd_state_get(tsd) == tsd_state_nominal_slow) {
+ /*
+ * On slow path but no work needed. Note that we can't
+ * necessarily *assert* that we're slow, because we might be
+ * slow because of an asynchronous modification to global state,
+ * which might be asynchronously modified *back*.
+ */
+ } else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) {
+ tsd_slow_update(tsd);
+ } else if (tsd_state_get(tsd) == tsd_state_uninitialized) {
+ if (!minimal) {
+ if (tsd_booted) {
+ tsd_state_set(tsd, tsd_state_nominal);
+ tsd_slow_update(tsd);
+ /* Trigger cleanup handler registration. */
+ tsd_set(tsd);
+ tsd_data_init(tsd);
+ }
+ } else {
+ tsd_state_set(tsd, tsd_state_minimal_initialized);
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ }
+ } else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) {
+ if (!minimal) {
+ /* Switch to fully initialized. */
+ tsd_state_set(tsd, tsd_state_nominal);
+ assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
+ (*tsd_reentrancy_levelp_get(tsd))--;
+ tsd_slow_update(tsd);
+ tsd_data_init(tsd);
+ } else {
+ assert_tsd_data_cleanup_done(tsd);
+ }
+ } else if (tsd_state_get(tsd) == tsd_state_purgatory) {
+ tsd_state_set(tsd, tsd_state_reincarnated);
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ } else {
+ assert(tsd_state_get(tsd) == tsd_state_reincarnated);
+ }
+
+ return tsd;
+}
+
+void *
+malloc_tsd_malloc(size_t size) {
+ return a0malloc(CACHELINE_CEILING(size));
+}
+
+void
+malloc_tsd_dalloc(void *wrapper) {
+ a0dalloc(wrapper);
+}
+
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
void
-_malloc_thread_cleanup(void) {
+_malloc_thread_cleanup(void) {
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
- for (i = 0; i < ncleanups; i++) {
+ for (i = 0; i < ncleanups; i++) {
pending[i] = true;
- }
+ }
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
- if (pending[i]) {
+ if (pending[i]) {
again = true;
- }
+ }
}
}
} while (again);
@@ -353,96 +353,96 @@ _malloc_thread_cleanup(void) {
#endif
void
-malloc_tsd_cleanup_register(bool (*f)(void)) {
+malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
-static void
-tsd_do_data_cleanup(tsd_t *tsd) {
- prof_tdata_cleanup(tsd);
- iarena_cleanup(tsd);
- arena_cleanup(tsd);
- arenas_tdata_cleanup(tsd);
- tcache_cleanup(tsd);
- witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
-}
-
+static void
+tsd_do_data_cleanup(tsd_t *tsd) {
+ prof_tdata_cleanup(tsd);
+ iarena_cleanup(tsd);
+ arena_cleanup(tsd);
+ arenas_tdata_cleanup(tsd);
+ tcache_cleanup(tsd);
+ witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
+}
+
void
-tsd_cleanup(void *arg) {
- tsd_t *tsd = (tsd_t *)arg;
-
- switch (tsd_state_get(tsd)) {
- case tsd_state_uninitialized:
- /* Do nothing. */
- break;
- case tsd_state_minimal_initialized:
- /* This implies the thread only did free() in its life time. */
- /* Fall through. */
- case tsd_state_reincarnated:
- /*
- * Reincarnated means another destructor deallocated memory
- * after the destructor was called. Cleanup isn't required but
- * is still called for testing and completeness.
- */
- assert_tsd_data_cleanup_done(tsd);
- /* Fall through. */
- case tsd_state_nominal:
- case tsd_state_nominal_slow:
- tsd_do_data_cleanup(tsd);
- tsd_state_set(tsd, tsd_state_purgatory);
- tsd_set(tsd);
- break;
- case tsd_state_purgatory:
- /*
- * The previous time this destructor was called, we set the
- * state to tsd_state_purgatory so that other destructors
- * wouldn't cause re-creation of the tsd. This time, do
- * nothing, and do not request another callback.
- */
- break;
- default:
- not_reached();
- }
-#ifdef JEMALLOC_JET
- test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
- int *data = tsd_test_datap_get_unsafe(tsd);
- if (test_callback != NULL) {
- test_callback(data);
- }
-#endif
-}
-
-tsd_t *
-malloc_tsd_boot0(void) {
- tsd_t *tsd;
-
+tsd_cleanup(void *arg) {
+ tsd_t *tsd = (tsd_t *)arg;
+
+ switch (tsd_state_get(tsd)) {
+ case tsd_state_uninitialized:
+ /* Do nothing. */
+ break;
+ case tsd_state_minimal_initialized:
+ /* This implies the thread only did free() in its life time. */
+ /* Fall through. */
+ case tsd_state_reincarnated:
+ /*
+ * Reincarnated means another destructor deallocated memory
+ * after the destructor was called. Cleanup isn't required but
+ * is still called for testing and completeness.
+ */
+ assert_tsd_data_cleanup_done(tsd);
+ /* Fall through. */
+ case tsd_state_nominal:
+ case tsd_state_nominal_slow:
+ tsd_do_data_cleanup(tsd);
+ tsd_state_set(tsd, tsd_state_purgatory);
+ tsd_set(tsd);
+ break;
+ case tsd_state_purgatory:
+ /*
+ * The previous time this destructor was called, we set the
+ * state to tsd_state_purgatory so that other destructors
+ * wouldn't cause re-creation of the tsd. This time, do
+ * nothing, and do not request another callback.
+ */
+ break;
+ default:
+ not_reached();
+ }
+#ifdef JEMALLOC_JET
+ test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
+ int *data = tsd_test_datap_get_unsafe(tsd);
+ if (test_callback != NULL) {
+ test_callback(data);
+ }
+#endif
+}
+
+tsd_t *
+malloc_tsd_boot0(void) {
+ tsd_t *tsd;
+
ncleanups = 0;
- if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
- WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
- return NULL;
- }
- if (tsd_boot0()) {
- return NULL;
- }
- tsd = tsd_fetch();
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
- return tsd;
-}
-
-void
-malloc_tsd_boot1(void) {
- tsd_boot1();
- tsd_t *tsd = tsd_fetch();
- /* malloc_slow has been set properly. Update tsd_slow. */
- tsd_slow_update(tsd);
- *tsd_arenas_tdata_bypassp_get(tsd) = false;
+ if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
+ WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
+ return NULL;
+ }
+ if (tsd_boot0()) {
+ return NULL;
+ }
+ tsd = tsd_fetch();
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ return tsd;
}
+void
+malloc_tsd_boot1(void) {
+ tsd_boot1();
+ tsd_t *tsd = tsd_fetch();
+ /* malloc_slow has been set properly. Update tsd_slow. */
+ tsd_slow_update(tsd);
+ *tsd_arenas_tdata_bypassp_get(tsd) = false;
+}
+
#ifdef _WIN32
static BOOL WINAPI
-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
@@ -455,80 +455,80 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
default:
break;
}
- return true;
+ return true;
}
-/*
- * We need to be able to say "read" here (in the "pragma section"), but have
- * hooked "read". We won't read for the rest of the file, so we can get away
- * with unhooking.
- */
-#ifdef read
-# undef read
-#endif
-
+/*
+ * We need to be able to say "read" here (in the "pragma section"), but have
+ * hooked "read". We won't read for the rest of the file, so we can get away
+ * with unhooking.
+ */
+#ifdef read
+# undef read
+#endif
+
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
-# pragma comment(linker, "/INCLUDE:_tls_callback")
+# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
-# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) )
+# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) )
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
+BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *
-tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
pthread_t self = pthread_self();
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
- malloc_mutex_lock(TSDN_NULL, &head->lock);
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
- return iter->data;
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return iter->data;
}
}
/* Insert block into list. */
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
- return NULL;
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return NULL;
}
void
-tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
- malloc_mutex_lock(TSDN_NULL, &head->lock);
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_remove(&head->blocks, block, link);
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
}
#endif
-
-void
-tsd_prefork(tsd_t *tsd) {
- malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
-}
-
-void
-tsd_postfork_parent(tsd_t *tsd) {
- malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
-}
-
-void
-tsd_postfork_child(tsd_t *tsd) {
- malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_new(&tsd_nominal_tsds);
-
- if (tsd_state_get(tsd) <= tsd_state_nominal_max) {
- tsd_add_nominal(tsd);
- }
-}
+
+void
+tsd_prefork(tsd_t *tsd) {
+ malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+}
+
+void
+tsd_postfork_parent(tsd_t *tsd) {
+ malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+}
+
+void
+tsd_postfork_child(tsd_t *tsd) {
+ malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
+ ql_new(&tsd_nominal_tsds);
+
+ if (tsd_state_get(tsd) <= tsd_state_nominal_max) {
+ tsd_add_nominal(tsd);
+ }
+}
diff --git a/contrib/libs/jemalloc/src/witness.c b/contrib/libs/jemalloc/src/witness.c
index f42b72ad1a..80722002c0 100644
--- a/contrib/libs/jemalloc/src/witness.c
+++ b/contrib/libs/jemalloc/src/witness.c
@@ -1,100 +1,100 @@
-#define JEMALLOC_WITNESS_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/malloc_io.h"
-
-void
-witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp, void *opaque) {
- witness->name = name;
- witness->rank = rank;
- witness->comp = comp;
- witness->opaque = opaque;
-}
-
-static void
-witness_lock_error_impl(const witness_list_t *witnesses,
- const witness_t *witness) {
- witness_t *w;
-
- malloc_printf("<jemalloc>: Lock rank order reversal:");
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
- malloc_printf(" %s(%u)\n", witness->name, witness->rank);
- abort();
-}
-witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
-
-static void
-witness_owner_error_impl(const witness_t *witness) {
- malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
- witness->rank);
- abort();
-}
-witness_owner_error_t *JET_MUTABLE witness_owner_error =
- witness_owner_error_impl;
-
-static void
-witness_not_owner_error_impl(const witness_t *witness) {
- malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
- witness->rank);
- abort();
-}
-witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
- witness_not_owner_error_impl;
-
-static void
-witness_depth_error_impl(const witness_list_t *witnesses,
- witness_rank_t rank_inclusive, unsigned depth) {
- witness_t *w;
-
- malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
- (depth != 1) ? "s" : "", rank_inclusive);
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
- malloc_printf("\n");
- abort();
-}
-witness_depth_error_t *JET_MUTABLE witness_depth_error =
- witness_depth_error_impl;
-
-void
-witnesses_cleanup(witness_tsd_t *witness_tsd) {
- witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
-
- /* Do nothing. */
-}
-
-void
-witness_prefork(witness_tsd_t *witness_tsd) {
- if (!config_debug) {
- return;
- }
- witness_tsd->forking = true;
-}
-
-void
-witness_postfork_parent(witness_tsd_t *witness_tsd) {
- if (!config_debug) {
- return;
- }
- witness_tsd->forking = false;
-}
-
-void
-witness_postfork_child(witness_tsd_t *witness_tsd) {
- if (!config_debug) {
- return;
- }
-#ifndef JEMALLOC_MUTEX_INIT_CB
- witness_list_t *witnesses;
-
- witnesses = &witness_tsd->witnesses;
- ql_new(witnesses);
-#endif
- witness_tsd->forking = false;
-}
+#define JEMALLOC_WITNESS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
+
+void
+witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp, void *opaque) {
+ witness->name = name;
+ witness->rank = rank;
+ witness->comp = comp;
+ witness->opaque = opaque;
+}
+
+static void
+witness_lock_error_impl(const witness_list_t *witnesses,
+ const witness_t *witness) {
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Lock rank order reversal:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf(" %s(%u)\n", witness->name, witness->rank);
+ abort();
+}
+witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
+
+static void
+witness_owner_error_impl(const witness_t *witness) {
+ malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+witness_owner_error_t *JET_MUTABLE witness_owner_error =
+ witness_owner_error_impl;
+
+static void
+witness_not_owner_error_impl(const witness_t *witness) {
+ malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
+ witness_not_owner_error_impl;
+
+static void
+witness_depth_error_impl(const witness_list_t *witnesses,
+ witness_rank_t rank_inclusive, unsigned depth) {
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
+ (depth != 1) ? "s" : "", rank_inclusive);
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf("\n");
+ abort();
+}
+witness_depth_error_t *JET_MUTABLE witness_depth_error =
+ witness_depth_error_impl;
+
+void
+witnesses_cleanup(witness_tsd_t *witness_tsd) {
+ witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
+
+ /* Do nothing. */
+}
+
+void
+witness_prefork(witness_tsd_t *witness_tsd) {
+ if (!config_debug) {
+ return;
+ }
+ witness_tsd->forking = true;
+}
+
+void
+witness_postfork_parent(witness_tsd_t *witness_tsd) {
+ if (!config_debug) {
+ return;
+ }
+ witness_tsd->forking = false;
+}
+
+void
+witness_postfork_child(witness_tsd_t *witness_tsd) {
+ if (!config_debug) {
+ return;
+ }
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ witness_list_t *witnesses;
+
+ witnesses = &witness_tsd->witnesses;
+ ql_new(witnesses);
+#endif
+ witness_tsd->forking = false;
+}
diff --git a/contrib/libs/jemalloc/src/zone.c b/contrib/libs/jemalloc/src/zone.c
index 23dfdd04a9..123a7b038f 100644
--- a/contrib/libs/jemalloc/src/zone.c
+++ b/contrib/libs/jemalloc/src/zone.c
@@ -1,83 +1,83 @@
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/assert.h"
-
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
-/* Definitions of the following structs in malloc/malloc.h might be too old
- * for the built binary to run on newer versions of OSX. So use the newest
- * possible version of those structs.
- */
-typedef struct _malloc_zone_t {
- void *reserved1;
- void *reserved2;
- size_t (*size)(struct _malloc_zone_t *, const void *);
- void *(*malloc)(struct _malloc_zone_t *, size_t);
- void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
- void *(*valloc)(struct _malloc_zone_t *, size_t);
- void (*free)(struct _malloc_zone_t *, void *);
- void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
- void (*destroy)(struct _malloc_zone_t *);
- const char *zone_name;
- unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
- void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
- struct malloc_introspection_t *introspect;
- unsigned version;
- void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
- void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
- size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
-} malloc_zone_t;
-
-typedef struct {
- vm_address_t address;
- vm_size_t size;
-} vm_range_t;
-
-typedef struct malloc_statistics_t {
- unsigned blocks_in_use;
- size_t size_in_use;
- size_t max_size_in_use;
- size_t size_allocated;
-} malloc_statistics_t;
-
-typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
-
-typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
-
-typedef struct malloc_introspection_t {
- kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
- size_t (*good_size)(malloc_zone_t *, size_t);
- boolean_t (*check)(malloc_zone_t *);
- void (*print)(malloc_zone_t *, boolean_t);
- void (*log)(malloc_zone_t *, void *);
- void (*force_lock)(malloc_zone_t *);
- void (*force_unlock)(malloc_zone_t *);
- void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
- boolean_t (*zone_locked)(malloc_zone_t *);
- boolean_t (*enable_discharge_checking)(malloc_zone_t *);
- boolean_t (*disable_discharge_checking)(malloc_zone_t *);
- void (*discharge)(malloc_zone_t *, void *);
-#ifdef __BLOCKS__
- void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
-#else
- void *enumerate_unavailable_without_blocks;
-#endif
- void (*reinit_lock)(malloc_zone_t *);
-} malloc_introspection_t;
-
-extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
-
-extern malloc_zone_t *malloc_default_zone(void);
-
-extern void malloc_zone_register(malloc_zone_t *zone);
-
-extern void malloc_zone_unregister(malloc_zone_t *zone);
-
+/* Definitions of the following structs in malloc/malloc.h might be too old
+ * for the built binary to run on newer versions of OSX. So use the newest
+ * possible version of those structs.
+ */
+typedef struct _malloc_zone_t {
+ void *reserved1;
+ void *reserved2;
+ size_t (*size)(struct _malloc_zone_t *, const void *);
+ void *(*malloc)(struct _malloc_zone_t *, size_t);
+ void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
+ void *(*valloc)(struct _malloc_zone_t *, size_t);
+ void (*free)(struct _malloc_zone_t *, void *);
+ void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
+ void (*destroy)(struct _malloc_zone_t *);
+ const char *zone_name;
+ unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
+ void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
+ struct malloc_introspection_t *introspect;
+ unsigned version;
+ void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
+ void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
+ size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
+} malloc_zone_t;
+
+typedef struct {
+ vm_address_t address;
+ vm_size_t size;
+} vm_range_t;
+
+typedef struct malloc_statistics_t {
+ unsigned blocks_in_use;
+ size_t size_in_use;
+ size_t max_size_in_use;
+ size_t size_allocated;
+} malloc_statistics_t;
+
+typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
+
+typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
+
+typedef struct malloc_introspection_t {
+ kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
+ size_t (*good_size)(malloc_zone_t *, size_t);
+ boolean_t (*check)(malloc_zone_t *);
+ void (*print)(malloc_zone_t *, boolean_t);
+ void (*log)(malloc_zone_t *, void *);
+ void (*force_lock)(malloc_zone_t *);
+ void (*force_unlock)(malloc_zone_t *);
+ void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
+ boolean_t (*zone_locked)(malloc_zone_t *);
+ boolean_t (*enable_discharge_checking)(malloc_zone_t *);
+ boolean_t (*disable_discharge_checking)(malloc_zone_t *);
+ void (*discharge)(malloc_zone_t *, void *);
+#ifdef __BLOCKS__
+ void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
+#else
+ void *enumerate_unavailable_without_blocks;
+#endif
+ void (*reinit_lock)(malloc_zone_t *);
+} malloc_introspection_t;
+
+extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
+
+extern malloc_zone_t *malloc_default_zone(void);
+
+extern void malloc_zone_register(malloc_zone_t *zone);
+
+extern void malloc_zone_unregister(malloc_zone_t *zone);
+
/*
- * The malloc_default_purgeable_zone() function is only available on >= 10.6.
+ * The malloc_default_purgeable_zone() function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
@@ -86,15 +86,15 @@ JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
-static malloc_zone_t *default_zone, *purgeable_zone;
-static malloc_zone_t jemalloc_zone;
-static struct malloc_introspection_t jemalloc_zone_introspect;
-static pid_t zone_force_lock_pid = -1;
+static malloc_zone_t *default_zone, *purgeable_zone;
+static malloc_zone_t jemalloc_zone;
+static struct malloc_introspection_t jemalloc_zone_introspect;
+static pid_t zone_force_lock_pid = -1;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static size_t zone_size(malloc_zone_t *zone, const void *ptr);
+static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
@@ -104,25 +104,25 @@ static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
-static void zone_destroy(malloc_zone_t *zone);
-static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
- void **results, unsigned num_requested);
-static void zone_batch_free(struct _malloc_zone_t *zone,
- void **to_be_freed, unsigned num_to_be_freed);
-static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
+static void zone_destroy(malloc_zone_t *zone);
+static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
+ void **results, unsigned num_requested);
+static void zone_batch_free(struct _malloc_zone_t *zone,
+ void **to_be_freed, unsigned num_to_be_freed);
+static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
-static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
- vm_address_t zone_address, memory_reader_t reader,
- vm_range_recorder_t recorder);
-static boolean_t zone_check(malloc_zone_t *zone);
-static void zone_print(malloc_zone_t *zone, boolean_t verbose);
-static void zone_log(malloc_zone_t *zone, void *address);
+static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder);
+static boolean_t zone_check(malloc_zone_t *zone);
+static void zone_print(malloc_zone_t *zone, boolean_t verbose);
+static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
-static void zone_statistics(malloc_zone_t *zone,
- malloc_statistics_t *stats);
-static boolean_t zone_locked(malloc_zone_t *zone);
-static void zone_reinit_lock(malloc_zone_t *zone);
+static void zone_statistics(malloc_zone_t *zone,
+ malloc_statistics_t *stats);
+static boolean_t zone_locked(malloc_zone_t *zone);
+static void zone_reinit_lock(malloc_zone_t *zone);
/******************************************************************************/
/*
@@ -130,7 +130,7 @@ static void zone_reinit_lock(malloc_zone_t *zone);
*/
static size_t
-zone_size(malloc_zone_t *zone, const void *ptr) {
+zone_size(malloc_zone_t *zone, const void *ptr) {
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
@@ -138,33 +138,33 @@ zone_size(malloc_zone_t *zone, const void *ptr) {
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
- * reside within a mapped extent before determining size.
+ * reside within a mapped extent before determining size.
*/
- return ivsalloc(tsdn_fetch(), ptr);
+ return ivsalloc(tsdn_fetch(), ptr);
}
static void *
-zone_malloc(malloc_zone_t *zone, size_t size) {
- return je_malloc(size);
+zone_malloc(malloc_zone_t *zone, size_t size) {
+ return je_malloc(size);
}
static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
- return je_calloc(num, size);
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
+ return je_calloc(num, size);
}
static void *
-zone_valloc(malloc_zone_t *zone, size_t size) {
+zone_valloc(malloc_zone_t *zone, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
- return ret;
+ return ret;
}
static void
-zone_free(malloc_zone_t *zone, void *ptr) {
- if (ivsalloc(tsdn_fetch(), ptr) != 0) {
+zone_free(malloc_zone_t *zone, void *ptr) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
}
@@ -173,30 +173,30 @@ zone_free(malloc_zone_t *zone, void *ptr) {
}
static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
- if (ivsalloc(tsdn_fetch(), ptr) != 0) {
- return je_realloc(ptr, size);
- }
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
+ return je_realloc(ptr, size);
+ }
- return realloc(ptr, size);
+ return realloc(ptr, size);
}
static void *
-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
- return ret;
+ return ret;
}
static void
-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
- size_t alloc_size;
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
+ size_t alloc_size;
- alloc_size = ivsalloc(tsdn_fetch(), ptr);
- if (alloc_size != 0) {
- assert(alloc_size == size);
+ alloc_size = ivsalloc(tsdn_fetch(), ptr);
+ if (alloc_size != 0) {
+ assert(alloc_size == size);
je_free(ptr);
return;
}
@@ -204,170 +204,170 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
free(ptr);
}
-static void
-zone_destroy(malloc_zone_t *zone) {
+static void
+zone_destroy(malloc_zone_t *zone) {
/* This function should never be called. */
not_reached();
}
-static unsigned
-zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
- unsigned num_requested) {
- unsigned i;
-
- for (i = 0; i < num_requested; i++) {
- results[i] = je_malloc(size);
- if (!results[i])
- break;
- }
-
- return i;
-}
-
-static void
-zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
- unsigned num_to_be_freed) {
- unsigned i;
-
- for (i = 0; i < num_to_be_freed; i++) {
- zone_free(zone, to_be_freed[i]);
- to_be_freed[i] = NULL;
- }
-}
-
+static unsigned
+zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
+ unsigned num_requested) {
+ unsigned i;
+
+ for (i = 0; i < num_requested; i++) {
+ results[i] = je_malloc(size);
+ if (!results[i])
+ break;
+ }
+
+ return i;
+}
+
+static void
+zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
+ unsigned num_to_be_freed) {
+ unsigned i;
+
+ for (i = 0; i < num_to_be_freed; i++) {
+ zone_free(zone, to_be_freed[i]);
+ to_be_freed[i] = NULL;
+ }
+}
+
static size_t
-zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
- return 0;
-}
+zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
+ return 0;
+}
-static size_t
-zone_good_size(malloc_zone_t *zone, size_t size) {
- if (size == 0) {
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size) {
+ if (size == 0) {
size = 1;
- }
- return sz_s2u(size);
-}
-
-static kern_return_t
-zone_enumerator(task_t task, void *data, unsigned type_mask,
- vm_address_t zone_address, memory_reader_t reader,
- vm_range_recorder_t recorder) {
- return KERN_SUCCESS;
-}
-
-static boolean_t
-zone_check(malloc_zone_t *zone) {
- return true;
-}
-
-static void
-zone_print(malloc_zone_t *zone, boolean_t verbose) {
-}
-
-static void
-zone_log(malloc_zone_t *zone, void *address) {
+ }
+ return sz_s2u(size);
}
+static kern_return_t
+zone_enumerator(task_t task, void *data, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder) {
+ return KERN_SUCCESS;
+}
+
+static boolean_t
+zone_check(malloc_zone_t *zone) {
+ return true;
+}
+
static void
-zone_force_lock(malloc_zone_t *zone) {
- if (isthreaded) {
- /*
- * See the note in zone_force_unlock, below, to see why we need
- * this.
- */
- assert(zone_force_lock_pid == -1);
- zone_force_lock_pid = getpid();
+zone_print(malloc_zone_t *zone, boolean_t verbose) {
+}
+
+static void
+zone_log(malloc_zone_t *zone, void *address) {
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone) {
+ if (isthreaded) {
+ /*
+ * See the note in zone_force_unlock, below, to see why we need
+ * this.
+ */
+ assert(zone_force_lock_pid == -1);
+ zone_force_lock_pid = getpid();
jemalloc_prefork();
- }
-}
-
-static void
-zone_force_unlock(malloc_zone_t *zone) {
- /*
- * zone_force_lock and zone_force_unlock are the entry points to the
- * forking machinery on OS X. The tricky thing is, the child is not
- * allowed to unlock mutexes locked in the parent, even if owned by the
- * forking thread (and the mutex type we use in OS X will fail an assert
- * if we try). In the child, we can get away with reinitializing all
- * the mutexes, which has the effect of unlocking them. In the parent,
- * doing this would mean we wouldn't wake any waiters blocked on the
- * mutexes we unlock. So, we record the pid of the current thread in
- * zone_force_lock, and use that to detect if we're in the parent or
- * child here, to decide which unlock logic we need.
- */
- if (isthreaded) {
- assert(zone_force_lock_pid != -1);
- if (getpid() == zone_force_lock_pid) {
- jemalloc_postfork_parent();
- } else {
- jemalloc_postfork_child();
- }
- zone_force_lock_pid = -1;
- }
+ }
}
static void
-zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
- /* We make no effort to actually fill the values */
- stats->blocks_in_use = 0;
- stats->size_in_use = 0;
- stats->max_size_in_use = 0;
- stats->size_allocated = 0;
+zone_force_unlock(malloc_zone_t *zone) {
+ /*
+ * zone_force_lock and zone_force_unlock are the entry points to the
+ * forking machinery on OS X. The tricky thing is, the child is not
+ * allowed to unlock mutexes locked in the parent, even if owned by the
+ * forking thread (and the mutex type we use in OS X will fail an assert
+ * if we try). In the child, we can get away with reinitializing all
+ * the mutexes, which has the effect of unlocking them. In the parent,
+ * doing this would mean we wouldn't wake any waiters blocked on the
+ * mutexes we unlock. So, we record the pid of the current thread in
+ * zone_force_lock, and use that to detect if we're in the parent or
+ * child here, to decide which unlock logic we need.
+ */
+ if (isthreaded) {
+ assert(zone_force_lock_pid != -1);
+ if (getpid() == zone_force_lock_pid) {
+ jemalloc_postfork_parent();
+ } else {
+ jemalloc_postfork_child();
+ }
+ zone_force_lock_pid = -1;
+ }
+}
+
+static void
+zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
+ /* We make no effort to actually fill the values */
+ stats->blocks_in_use = 0;
+ stats->size_in_use = 0;
+ stats->max_size_in_use = 0;
+ stats->size_allocated = 0;
}
-static boolean_t
-zone_locked(malloc_zone_t *zone) {
- /* Pretend no lock is being held */
- return false;
-}
-
-static void
-zone_reinit_lock(malloc_zone_t *zone) {
- /* As of OSX 10.12, this function is only used when force_unlock would
- * be used if the zone version were < 9. So just use force_unlock. */
- zone_force_unlock(zone);
-}
-
-static void
-zone_init(void) {
- jemalloc_zone.size = zone_size;
- jemalloc_zone.malloc = zone_malloc;
- jemalloc_zone.calloc = zone_calloc;
- jemalloc_zone.valloc = zone_valloc;
- jemalloc_zone.free = zone_free;
- jemalloc_zone.realloc = zone_realloc;
- jemalloc_zone.destroy = zone_destroy;
- jemalloc_zone.zone_name = "jemalloc_zone";
- jemalloc_zone.batch_malloc = zone_batch_malloc;
- jemalloc_zone.batch_free = zone_batch_free;
- jemalloc_zone.introspect = &jemalloc_zone_introspect;
- jemalloc_zone.version = 9;
- jemalloc_zone.memalign = zone_memalign;
- jemalloc_zone.free_definite_size = zone_free_definite_size;
- jemalloc_zone.pressure_relief = zone_pressure_relief;
-
- jemalloc_zone_introspect.enumerator = zone_enumerator;
- jemalloc_zone_introspect.good_size = zone_good_size;
- jemalloc_zone_introspect.check = zone_check;
- jemalloc_zone_introspect.print = zone_print;
- jemalloc_zone_introspect.log = zone_log;
- jemalloc_zone_introspect.force_lock = zone_force_lock;
- jemalloc_zone_introspect.force_unlock = zone_force_unlock;
- jemalloc_zone_introspect.statistics = zone_statistics;
- jemalloc_zone_introspect.zone_locked = zone_locked;
- jemalloc_zone_introspect.enable_discharge_checking = NULL;
- jemalloc_zone_introspect.disable_discharge_checking = NULL;
- jemalloc_zone_introspect.discharge = NULL;
-#ifdef __BLOCKS__
- jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
-#else
- jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
-#endif
- jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
-}
-
-static malloc_zone_t *
-zone_default_get(void) {
+static boolean_t
+zone_locked(malloc_zone_t *zone) {
+ /* Pretend no lock is being held */
+ return false;
+}
+
+static void
+zone_reinit_lock(malloc_zone_t *zone) {
+ /* As of OSX 10.12, this function is only used when force_unlock would
+ * be used if the zone version were < 9. So just use force_unlock. */
+ zone_force_unlock(zone);
+}
+
+static void
+zone_init(void) {
+ jemalloc_zone.size = zone_size;
+ jemalloc_zone.malloc = zone_malloc;
+ jemalloc_zone.calloc = zone_calloc;
+ jemalloc_zone.valloc = zone_valloc;
+ jemalloc_zone.free = zone_free;
+ jemalloc_zone.realloc = zone_realloc;
+ jemalloc_zone.destroy = zone_destroy;
+ jemalloc_zone.zone_name = "jemalloc_zone";
+ jemalloc_zone.batch_malloc = zone_batch_malloc;
+ jemalloc_zone.batch_free = zone_batch_free;
+ jemalloc_zone.introspect = &jemalloc_zone_introspect;
+ jemalloc_zone.version = 9;
+ jemalloc_zone.memalign = zone_memalign;
+ jemalloc_zone.free_definite_size = zone_free_definite_size;
+ jemalloc_zone.pressure_relief = zone_pressure_relief;
+
+ jemalloc_zone_introspect.enumerator = zone_enumerator;
+ jemalloc_zone_introspect.good_size = zone_good_size;
+ jemalloc_zone_introspect.check = zone_check;
+ jemalloc_zone_introspect.print = zone_print;
+ jemalloc_zone_introspect.log = zone_log;
+ jemalloc_zone_introspect.force_lock = zone_force_lock;
+ jemalloc_zone_introspect.force_unlock = zone_force_unlock;
+ jemalloc_zone_introspect.statistics = zone_statistics;
+ jemalloc_zone_introspect.zone_locked = zone_locked;
+ jemalloc_zone_introspect.enable_discharge_checking = NULL;
+ jemalloc_zone_introspect.disable_discharge_checking = NULL;
+ jemalloc_zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
+ jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
+}
+
+static malloc_zone_t *
+zone_default_get(void) {
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
@@ -377,72 +377,72 @@ zone_default_get(void) {
* if one is present (apparently enabled when malloc stack logging is
* enabled), or the first registered zone otherwise. In practice this
* means unless malloc stack logging is enabled, the first registered
- * zone is the default. So get the list of zones to get the first one,
- * instead of relying on malloc_default_zone.
+ * zone is the default. So get the list of zones to get the first one,
+ * instead of relying on malloc_default_zone.
*/
- if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
- (vm_address_t**)&zones, &num_zones)) {
- /*
- * Reset the value in case the failure happened after it was
- * set.
- */
+ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
+ (vm_address_t**)&zones, &num_zones)) {
+ /*
+ * Reset the value in case the failure happened after it was
+ * set.
+ */
num_zones = 0;
}
- if (num_zones) {
+ if (num_zones) {
return zones[0];
- }
+ }
return malloc_default_zone();
}
-/* As written, this function can only promote jemalloc_zone. */
-static void
-zone_promote(void) {
- malloc_zone_t *zone;
-
- do {
- /*
- * Unregister and reregister the default zone. On OSX >= 10.6,
- * unregistering takes the last registered zone and places it
- * at the location of the specified zone. Unregistering the
- * default zone thus makes the last registered one the default.
- * On OSX < 10.6, unregistering shifts all registered zones.
- * The first registered zone then becomes the default.
- */
- malloc_zone_unregister(default_zone);
- malloc_zone_register(default_zone);
-
- /*
- * On OSX 10.6, having the default purgeable zone appear before
- * the default zone makes some things crash because it thinks it
- * owns the default zone allocated pointers. We thus
- * unregister/re-register it in order to ensure it's always
- * after the default zone. On OSX < 10.6, there is no purgeable
- * zone, so this does nothing. On OSX >= 10.6, unregistering
- * replaces the purgeable zone with the last registered zone
- * above, i.e. the default zone. Registering it again then puts
- * it at the end, obviously after the default zone.
- */
- if (purgeable_zone != NULL) {
- malloc_zone_unregister(purgeable_zone);
- malloc_zone_register(purgeable_zone);
- }
-
- zone = zone_default_get();
- } while (zone != &jemalloc_zone);
-}
-
+/* As written, this function can only promote jemalloc_zone. */
+static void
+zone_promote(void) {
+ malloc_zone_t *zone;
+
+ do {
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it
+ * at the location of the specified zone. Unregistering the
+ * default zone thus makes the last registered one the default.
+ * On OSX < 10.6, unregistering shifts all registered zones.
+ * The first registered zone then becomes the default.
+ */
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
+
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before
+ * the default zone makes some things crash because it thinks it
+ * owns the default zone allocated pointers. We thus
+ * unregister/re-register it in order to ensure it's always
+ * after the default zone. On OSX < 10.6, there is no purgeable
+ * zone, so this does nothing. On OSX >= 10.6, unregistering
+ * replaces the purgeable zone with the last registered zone
+ * above, i.e. the default zone. Registering it again then puts
+ * it at the end, obviously after the default zone.
+ */
+ if (purgeable_zone != NULL) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
+
+ zone = zone_default_get();
+ } while (zone != &jemalloc_zone);
+}
+
JEMALLOC_ATTR(constructor)
void
-zone_register(void) {
+zone_register(void) {
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
- default_zone = zone_default_get();
- if (!default_zone->zone_name || strcmp(default_zone->zone_name,
- "DefaultMallocZone") != 0) {
+ default_zone = zone_default_get();
+ if (!default_zone->zone_name || strcmp(default_zone->zone_name,
+ "DefaultMallocZone") != 0) {
return;
}
@@ -451,19 +451,19 @@ zone_register(void) {
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
- * malloc_default_purgeable_zone() is called beforehand so that the
+ * malloc_default_purgeable_zone() is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
- purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
- malloc_default_purgeable_zone();
+ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
+ malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
- zone_init();
- malloc_zone_register(&jemalloc_zone);
+ zone_init();
+ malloc_zone_register(&jemalloc_zone);
- /* Promote the custom zone to be default. */
- zone_promote();
+ /* Promote the custom zone to be default. */
+ zone_promote();
}
diff --git a/contrib/libs/jemalloc/ya.make b/contrib/libs/jemalloc/ya.make
index 586de30ab0..da98716897 100644
--- a/contrib/libs/jemalloc/ya.make
+++ b/contrib/libs/jemalloc/ya.make
@@ -1,5 +1,5 @@
# Generated by devtools/yamaker from nixpkgs 21.11.
-
+
LIBRARY()
OWNER(
@@ -7,7 +7,7 @@ OWNER(
g:cpp-contrib
)
-VERSION(5.2.1)
+VERSION(5.2.1)
ORIGINAL_SOURCE(https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2)
@@ -27,65 +27,65 @@ IF (OS_WINDOWS)
ADDINCL(
contrib/libs/jemalloc/include/msvc_compat
)
-ELSE()
+ELSE()
CFLAGS(
-funroll-loops
)
- IF (OS_DARWIN OR OS_IOS)
- SRCS(
- GLOBAL reg_zone.cpp
- src/zone.c
- )
- ELSE()
- PEERDIR(
- contrib/libs/libunwind
- )
+ IF (OS_DARWIN OR OS_IOS)
+ SRCS(
+ GLOBAL reg_zone.cpp
+ src/zone.c
+ )
+ ELSE()
+ PEERDIR(
+ contrib/libs/libunwind
+ )
CFLAGS(
-fvisibility=hidden
)
- ENDIF()
-ENDIF()
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+ ENDIF()
+ENDIF()
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
SRCS(
- hack.cpp
+ hack.cpp
src/arena.c
- src/background_thread.c
+ src/background_thread.c
src/base.c
- src/bin.c
+ src/bin.c
src/bitmap.c
src/ckh.c
src/ctl.c
- src/div.c
+ src/div.c
src/extent.c
- src/extent_dss.c
- src/extent_mmap.c
+ src/extent_dss.c
+ src/extent_mmap.c
src/hash.c
- src/hook.c
+ src/hook.c
src/jemalloc.c
- src/jemalloc_cpp.cpp
- src/large.c
- src/log.c
- src/malloc_io.c
+ src/jemalloc_cpp.cpp
+ src/large.c
+ src/log.c
+ src/malloc_io.c
src/mutex.c
- src/mutex_pool.c
- src/nstime.c
- src/pages.c
- src/prng.c
+ src/mutex_pool.c
+ src/nstime.c
+ src/pages.c
+ src/prng.c
src/prof.c
src/rtree.c
- src/safety_check.c
- src/sc.c
+ src/safety_check.c
+ src/sc.c
src/stats.c
- src/sz.c
+ src/sz.c
src/tcache.c
- src/test_hooks.c
- src/ticker.c
+ src/test_hooks.c
+ src/ticker.c
src/tsd.c
- src/witness.c
+ src/witness.c
)
END()
diff --git a/contrib/libs/ya.make b/contrib/libs/ya.make
index 9c4640fdcf..a1eb040e14 100644
--- a/contrib/libs/ya.make
+++ b/contrib/libs/ya.make
@@ -137,7 +137,7 @@ RECURSE(
libbpf
libbz2
libc_compat
- libc_userver_workarounds
+ libc_userver_workarounds
libcpuid
libcroco
libde265
diff --git a/contrib/restricted/boost/boost/asio/detail/fenced_block.hpp b/contrib/restricted/boost/boost/asio/detail/fenced_block.hpp
index 789b9d8a80..4bdffb2659 100644
--- a/contrib/restricted/boost/boost/asio/detail/fenced_block.hpp
+++ b/contrib/restricted/boost/boost/asio/detail/fenced_block.hpp
@@ -25,7 +25,7 @@
#elif defined(__MACH__) && defined(__APPLE__)
# include <boost/asio/detail/macos_fenced_block.hpp>
#elif defined(__sun)
-# error include <boost/asio/detail/solaris_fenced_block.hpp>
+# error include <boost/asio/detail/solaris_fenced_block.hpp>
#elif defined(__GNUC__) && defined(__arm__) \
&& !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
# include <boost/asio/detail/gcc_arm_fenced_block.hpp>
diff --git a/contrib/restricted/boost/boost/filesystem/operations.hpp b/contrib/restricted/boost/boost/filesystem/operations.hpp
index a4f652fbf8..a67f31516b 100644
--- a/contrib/restricted/boost/boost/filesystem/operations.hpp
+++ b/contrib/restricted/boost/boost/filesystem/operations.hpp
@@ -908,9 +908,9 @@ namespace detail
~directory_iterator() {}
- directory_iterator& increment(system::error_code& ec) BOOST_NOEXCEPT
+ directory_iterator& increment(system::error_code& ec) BOOST_NOEXCEPT
{
- detail::directory_iterator_increment(*this, &ec);
+ detail::directory_iterator_increment(*this, &ec);
return *this;
}
diff --git a/contrib/restricted/boost/libs/filesystem/src/operations.cpp b/contrib/restricted/boost/libs/filesystem/src/operations.cpp
index 044019ceb9..ad92611849 100644
--- a/contrib/restricted/boost/libs/filesystem/src/operations.cpp
+++ b/contrib/restricted/boost/libs/filesystem/src/operations.cpp
@@ -2345,9 +2345,9 @@ namespace detail
&& (filename.size()== 1
|| (filename[1] == dot
&& filename.size()== 2)))
- {
- detail::directory_iterator_increment(it, ec);
- }
+ {
+ detail::directory_iterator_increment(it, ec);
+ }
}
}
diff --git a/contrib/restricted/boost/libs/regex/ya.make b/contrib/restricted/boost/libs/regex/ya.make
index 379c0831ae..503204a923 100644
--- a/contrib/restricted/boost/libs/regex/ya.make
+++ b/contrib/restricted/boost/libs/regex/ya.make
@@ -16,10 +16,10 @@ PEERDIR(
contrib/libs/icu
)
-CFLAGS(
- -DBOOST_HAS_ICU
-)
-
+CFLAGS(
+ -DBOOST_HAS_ICU
+)
+
SRCS(
src/c_regex_traits.cpp
src/cpp_regex_traits.cpp