diff options
author | thegeorg <thegeorg@yandex-team.ru> | 2022-05-10 11:24:08 +0300 |
---|---|---|
committer | thegeorg <thegeorg@yandex-team.ru> | 2022-05-10 11:24:08 +0300 |
commit | 3eee94a865e8dff399329dee911906cddb7b0d67 (patch) | |
tree | 90049cea53c3a2850723b8104f922d1fc70e94b1 /contrib/libs/jemalloc/src/sz.c | |
parent | 8d4afd14b8ae14ffb50992a59dc674e30e076a8e (diff) | |
download | ydb-3eee94a865e8dff399329dee911906cddb7b0d67.tar.gz |
Update contrib/libs/jemalloc to 5.3.0
ref:984a35af48908b64eabafda01bb2e47403689121
Diffstat (limited to 'contrib/libs/jemalloc/src/sz.c')
-rw-r--r-- | contrib/libs/jemalloc/src/sz.c | 52 |
1 files changed, 51 insertions, 1 deletions
diff --git a/contrib/libs/jemalloc/src/sz.c b/contrib/libs/jemalloc/src/sz.c index 8633fb0500..d3115dda7c 100644 --- a/contrib/libs/jemalloc/src/sz.c +++ b/contrib/libs/jemalloc/src/sz.c @@ -1,8 +1,57 @@ #include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/sz.h" JEMALLOC_ALIGNED(CACHELINE) size_t sz_pind2sz_tab[SC_NPSIZES+1]; +size_t sz_large_pad; + +size_t +sz_psz_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +size_t +sz_psz_quantize_ceil(size_t size) { + size_t ret; + + assert(size > 0); + assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = sz_psz_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; + } + return ret; +} static void sz_boot_pind2sz_tab(const sc_data_t *sc_data) { @@ -57,7 +106,8 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) { } void -sz_boot(const sc_data_t *sc_data) { +sz_boot(const sc_data_t *sc_data, bool cache_oblivious) { + sz_large_pad = cache_oblivious ? PAGE : 0; sz_boot_pind2sz_tab(sc_data); sz_boot_index2size_tab(sc_data); sz_boot_size2index_tab(sc_data); |