aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/tcmalloc/patches
diff options
context:
space:
mode:
authorprime <prime@yandex-team.ru>2022-02-10 16:46:01 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:01 +0300
commite34f3f0e381020a427f44fbd50463d9a04089db3 (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/libs/tcmalloc/patches
parent3695a7cd42b74a4987d8d5a8f2e2443556998943 (diff)
downloadydb-e34f3f0e381020a427f44fbd50463d9a04089db3.tar.gz
Restoring authorship annotation for <prime@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/tcmalloc/patches')
-rw-r--r--contrib/libs/tcmalloc/patches/fork.patch620
-rw-r--r--contrib/libs/tcmalloc/patches/yandex.patch178
2 files changed, 399 insertions, 399 deletions
diff --git a/contrib/libs/tcmalloc/patches/fork.patch b/contrib/libs/tcmalloc/patches/fork.patch
index b29bb78261..2503394431 100644
--- a/contrib/libs/tcmalloc/patches/fork.patch
+++ b/contrib/libs/tcmalloc/patches/fork.patch
@@ -1,310 +1,310 @@
---- contrib/libs/tcmalloc/tcmalloc/central_freelist.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/central_freelist.h (working tree)
-@@ -70,6 +70,14 @@ class CentralFreeList {
-
- SpanStats GetSpanStats() const;
-
-+ void AcquireInternalLocks() {
-+ lock_.Lock();
-+ }
-+
-+ void ReleaseInternalLocks() {
-+ lock_.Unlock();
-+ }
-+
- private:
- // Release an object to spans.
- // Returns object's span if it become completely free.
---- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (working tree)
-@@ -1031,6 +1031,20 @@ void CPUCache::PrintInPbtxt(PbtxtRegion *region) const {
- }
- }
-
-+void CPUCache::AcquireInternalLocks() {
-+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
-+ ++cpu) {
-+ resize_[cpu].lock.Lock();
-+ }
-+}
-+
-+void CPUCache::ReleaseInternalLocks() {
-+ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
-+ ++cpu) {
-+ resize_[cpu].lock.Unlock();
-+ }
-+}
-+
- void CPUCache::PerClassResizeInfo::Init() {
- state_.store(0, std::memory_order_relaxed);
- }
---- contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (working tree)
-@@ -164,6 +164,9 @@ class CPUCache {
- void Print(Printer* out) const;
- void PrintInPbtxt(PbtxtRegion* region) const;
-
-+ void AcquireInternalLocks();
-+ void ReleaseInternalLocks();
-+
- private:
- // Per-size-class freelist resizing info.
- class PerClassResizeInfo {
---- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (working tree)
-@@ -116,6 +116,10 @@ ABSL_ATTRIBUTE_WEAK int64_t
- MallocExtension_Internal_GetMaxTotalThreadCacheBytes();
- ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
- int64_t value);
-+
-+ABSL_ATTRIBUTE_WEAK void
-+MallocExtension_EnableForkSupport();
-+
- }
-
- #endif
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (working tree)
-@@ -460,6 +460,14 @@ void MallocExtension::SetBackgroundReleaseRate(BytesPerSecond rate) {
- #endif
- }
-
-+void MallocExtension::EnableForkSupport() {
-+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
-+ if (&MallocExtension_EnableForkSupport != nullptr) {
-+ MallocExtension_EnableForkSupport();
-+ }
-+#endif
-+}
-+
- } // namespace tcmalloc
-
- // Default implementation just returns size. The expectation is that
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (working tree)
-@@ -468,6 +468,10 @@ class MallocExtension final {
- // Specifies the release rate from the page heap. ProcessBackgroundActions
- // must be called for this to be operative.
- static void SetBackgroundReleaseRate(BytesPerSecond rate);
-+
-+ // Enables fork support.
-+ // Allocator will continue to function correctly in the child, after calling fork().
-+ static void EnableForkSupport();
- };
-
- } // namespace tcmalloc
---- contrib/libs/tcmalloc/tcmalloc/static_vars.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc (working tree)
-@@ -59,6 +59,7 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
- Static::bucket_allocator_;
- ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
- ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
-+ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
- ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
- ABSL_CONST_INIT PageMap Static::pagemap_;
- ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
-@@ -116,6 +117,13 @@ ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
- pagemap_.MapRootWithSmallPages();
- guardedpage_allocator_.Init(/*max_alloced_pages=*/64, /*total_pages=*/128);
- inited_.store(true, std::memory_order_release);
-+
-+ pageheap_lock.Unlock();
-+ pthread_atfork(
-+ TCMallocPreFork,
-+ TCMallocPostFork,
-+ TCMallocPostFork);
-+ pageheap_lock.Lock();
- }
- }
-
---- contrib/libs/tcmalloc/tcmalloc/static_vars.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (working tree)
-@@ -50,6 +50,9 @@ class CPUCache;
- class PageMap;
- class ThreadCache;
-
-+void TCMallocPreFork();
-+void TCMallocPostFork();
-+
- class Static {
- public:
- // True if InitIfNecessary() has run to completion.
-@@ -124,6 +127,9 @@ class Static {
- static void ActivateCPUCache() { cpu_cache_active_ = true; }
- static void DeactivateCPUCache() { cpu_cache_active_ = false; }
-
-+ static bool ForkSupportEnabled() { return fork_support_enabled_; }
-+ static void EnableForkSupport() { fork_support_enabled_ = true; }
-+
- static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
- return
- #ifndef TCMALLOC_DEPRECATED_PERTHREAD
-@@ -169,6 +175,7 @@ class Static {
- static PageHeapAllocator<StackTraceTable::Bucket> bucket_allocator_;
- ABSL_CONST_INIT static std::atomic<bool> inited_;
- static bool cpu_cache_active_;
-+ static bool fork_support_enabled_;
- ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
- ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
- numa_topology_;
---- contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (working tree)
-@@ -354,6 +354,14 @@ ABSL_CONST_INIT std::atomic<int> system_release_errors = ATOMIC_VAR_INIT(0);
-
- } // namespace
-
-+void AcquireSystemAllocLock() {
-+ spinlock.Lock();
-+}
-+
-+void ReleaseSystemAllocLock() {
-+ spinlock.Unlock();
-+}
-+
- void* SystemAlloc(size_t bytes, size_t* actual_bytes, size_t alignment,
- const MemoryTag tag) {
- // If default alignment is set request the minimum alignment provided by
---- contrib/libs/tcmalloc/tcmalloc/system-alloc.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/system-alloc.h (working tree)
-@@ -50,6 +50,9 @@ void *SystemAlloc(size_t bytes, size_t *actual_bytes, size_t alignment,
- // call to SystemRelease.
- int SystemReleaseErrors();
-
-+void AcquireSystemAllocLock();
-+void ReleaseSystemAllocLock();
-+
- // This call is a hint to the operating system that the pages
- // contained in the specified range of memory will not be used for a
- // while, and can be released for use by other processes or the OS.
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (index)
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (working tree)
-@@ -1117,6 +1117,40 @@ extern "C" void MallocExtension_Internal_ReleaseMemoryToSystem(
- }
- }
-
-+extern "C" void MallocExtension_EnableForkSupport() {
-+ Static::EnableForkSupport();
-+}
-+
-+void TCMallocPreFork() {
-+ if (!Static::ForkSupportEnabled()) {
-+ return;
-+ }
-+
-+ if (Static::CPUCacheActive()) {
-+ Static::cpu_cache().AcquireInternalLocks();
-+ }
-+ Static::transfer_cache().AcquireInternalLocks();
-+ guarded_page_lock.Lock();
-+ release_lock.Lock();
-+ pageheap_lock.Lock();
-+ AcquireSystemAllocLock();
-+}
-+
-+void TCMallocPostFork() {
-+ if (!Static::ForkSupportEnabled()) {
-+ return;
-+ }
-+
-+ ReleaseSystemAllocLock();
-+ pageheap_lock.Unlock();
-+ guarded_page_lock.Unlock();
-+ release_lock.Unlock();
-+ Static::transfer_cache().ReleaseInternalLocks();
-+ if (Static::CPUCacheActive()) {
-+ Static::cpu_cache().ReleaseInternalLocks();
-+ }
-+}
-+
- // nallocx slow path.
- // Moved to a separate function because size_class_with_alignment is not inlined
- // which would cause nallocx to become non-leaf function with stack frame and
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (working tree)
-@@ -120,4 +120,7 @@ void TCMallocInternalDeleteArrayNothrow(void* p, const std::nothrow_t&) __THROW
- }
- #endif
-
-+void TCMallocInternalAcquireLocks();
-+void TCMallocInternalReleaseLocks();
-+
- #endif // TCMALLOC_TCMALLOC_H_
---- contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (working tree)
-@@ -176,6 +176,26 @@ class TransferCacheManager : public StaticForwarder {
- }
- }
-
-+ void AcquireInternalLocks() {
-+ for (int i = 0; i < kNumClasses; ++i) {
-+ if (implementation_ == TransferCacheImplementation::Ring) {
-+ cache_[i].rbtc.AcquireInternalLocks();
-+ } else {
-+ cache_[i].tc.AcquireInternalLocks();
-+ }
-+ }
-+ }
-+
-+ void ReleaseInternalLocks() {
-+ for (int i = 0; i < kNumClasses; ++i) {
-+ if (implementation_ == TransferCacheImplementation::Ring) {
-+ cache_[i].rbtc.ReleaseInternalLocks();
-+ } else {
-+ cache_[i].tc.ReleaseInternalLocks();
-+ }
-+ }
-+ }
-+
- void InsertRange(int size_class, absl::Span<void *> batch) {
- if (implementation_ == TransferCacheImplementation::Ring) {
- cache_[size_class].rbtc.InsertRange(size_class, batch);
-@@ -295,6 +315,9 @@ class TransferCacheManager {
- return TransferCacheImplementation::None;
- }
-
-+ void AcquireInternalLocks() {}
-+ void ReleaseInternalLocks() {}
-+
- private:
- CentralFreeList freelist_[kNumClasses];
- } ABSL_CACHELINE_ALIGNED;
---- contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (index)
-+++ contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (working tree)
-@@ -366,6 +366,18 @@ class TransferCache {
- return freelist_do_not_access_directly_;
- }
-
-+ void AcquireInternalLocks()
-+ {
-+ freelist().AcquireInternalLocks();
-+ lock_.Lock();
-+ }
-+
-+ void ReleaseInternalLocks()
-+ {
-+ lock_.Unlock();
-+ freelist().ReleaseInternalLocks();
-+ }
-+
- private:
- // Returns first object of the i-th slot.
- void **GetSlot(size_t i) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
-@@ -468,6 +480,18 @@ class RingBufferTransferCache {
-
- // These methods all do internal locking.
-
-+ void AcquireInternalLocks()
-+ {
-+ freelist().AcquireInternalLocks();
-+ lock_.Lock();
-+ }
-+
-+ void ReleaseInternalLocks()
-+ {
-+ lock_.Unlock();
-+ freelist().ReleaseInternalLocks();
-+ }
-+
- // Insert the specified batch into the transfer cache. N is the number of
- // elements in the range. RemoveRange() is the opposite operation.
- void InsertRange(int size_class, absl::Span<void *> batch)
+--- contrib/libs/tcmalloc/tcmalloc/central_freelist.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/central_freelist.h (working tree)
+@@ -70,6 +70,14 @@ class CentralFreeList {
+
+ SpanStats GetSpanStats() const;
+
++ void AcquireInternalLocks() {
++ lock_.Lock();
++ }
++
++ void ReleaseInternalLocks() {
++ lock_.Unlock();
++ }
++
+ private:
+ // Release an object to spans.
+ // Returns object's span if it become completely free.
+--- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (working tree)
+@@ -1031,6 +1031,20 @@ void CPUCache::PrintInPbtxt(PbtxtRegion *region) const {
+ }
+ }
+
++void CPUCache::AcquireInternalLocks() {
++ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
++ ++cpu) {
++ resize_[cpu].lock.Lock();
++ }
++}
++
++void CPUCache::ReleaseInternalLocks() {
++ for (int cpu = 0, num_cpus = absl::base_internal::NumCPUs(); cpu < num_cpus;
++ ++cpu) {
++ resize_[cpu].lock.Unlock();
++ }
++}
++
+ void CPUCache::PerClassResizeInfo::Init() {
+ state_.store(0, std::memory_order_relaxed);
+ }
+--- contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.h (working tree)
+@@ -164,6 +164,9 @@ class CPUCache {
+ void Print(Printer* out) const;
+ void PrintInPbtxt(PbtxtRegion* region) const;
+
++ void AcquireInternalLocks();
++ void ReleaseInternalLocks();
++
+ private:
+ // Per-size-class freelist resizing info.
+ class PerClassResizeInfo {
+--- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (working tree)
+@@ -116,6 +116,10 @@ ABSL_ATTRIBUTE_WEAK int64_t
+ MallocExtension_Internal_GetMaxTotalThreadCacheBytes();
+ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
+ int64_t value);
++
++ABSL_ATTRIBUTE_WEAK void
++MallocExtension_EnableForkSupport();
++
+ }
+
+ #endif
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (working tree)
+@@ -460,6 +460,14 @@ void MallocExtension::SetBackgroundReleaseRate(BytesPerSecond rate) {
+ #endif
+ }
+
++void MallocExtension::EnableForkSupport() {
++#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
++ if (&MallocExtension_EnableForkSupport != nullptr) {
++ MallocExtension_EnableForkSupport();
++ }
++#endif
++}
++
+ } // namespace tcmalloc
+
+ // Default implementation just returns size. The expectation is that
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (working tree)
+@@ -468,6 +468,10 @@ class MallocExtension final {
+ // Specifies the release rate from the page heap. ProcessBackgroundActions
+ // must be called for this to be operative.
+ static void SetBackgroundReleaseRate(BytesPerSecond rate);
++
++ // Enables fork support.
++ // Allocator will continue to function correctly in the child, after calling fork().
++ static void EnableForkSupport();
+ };
+
+ } // namespace tcmalloc
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc (working tree)
+@@ -59,6 +59,7 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
+ Static::bucket_allocator_;
+ ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
+ ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
++ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
+ ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
+ ABSL_CONST_INIT PageMap Static::pagemap_;
+ ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
+@@ -116,6 +117,13 @@ ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE void Static::SlowInitIfNecessary() {
+ pagemap_.MapRootWithSmallPages();
+ guardedpage_allocator_.Init(/*max_alloced_pages=*/64, /*total_pages=*/128);
+ inited_.store(true, std::memory_order_release);
++
++ pageheap_lock.Unlock();
++ pthread_atfork(
++ TCMallocPreFork,
++ TCMallocPostFork,
++ TCMallocPostFork);
++ pageheap_lock.Lock();
+ }
+ }
+
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (working tree)
+@@ -50,6 +50,9 @@ class CPUCache;
+ class PageMap;
+ class ThreadCache;
+
++void TCMallocPreFork();
++void TCMallocPostFork();
++
+ class Static {
+ public:
+ // True if InitIfNecessary() has run to completion.
+@@ -124,6 +127,9 @@ class Static {
+ static void ActivateCPUCache() { cpu_cache_active_ = true; }
+ static void DeactivateCPUCache() { cpu_cache_active_ = false; }
+
++ static bool ForkSupportEnabled() { return fork_support_enabled_; }
++ static void EnableForkSupport() { fork_support_enabled_ = true; }
++
+ static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
+ return
+ #ifndef TCMALLOC_DEPRECATED_PERTHREAD
+@@ -169,6 +175,7 @@ class Static {
+ static PageHeapAllocator<StackTraceTable::Bucket> bucket_allocator_;
+ ABSL_CONST_INIT static std::atomic<bool> inited_;
+ static bool cpu_cache_active_;
++ static bool fork_support_enabled_;
+ ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
+ ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
+ numa_topology_;
+--- contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/system-alloc.cc (working tree)
+@@ -354,6 +354,14 @@ ABSL_CONST_INIT std::atomic<int> system_release_errors = ATOMIC_VAR_INIT(0);
+
+ } // namespace
+
++void AcquireSystemAllocLock() {
++ spinlock.Lock();
++}
++
++void ReleaseSystemAllocLock() {
++ spinlock.Unlock();
++}
++
+ void* SystemAlloc(size_t bytes, size_t* actual_bytes, size_t alignment,
+ const MemoryTag tag) {
+ // If default alignment is set request the minimum alignment provided by
+--- contrib/libs/tcmalloc/tcmalloc/system-alloc.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/system-alloc.h (working tree)
+@@ -50,6 +50,9 @@ void *SystemAlloc(size_t bytes, size_t *actual_bytes, size_t alignment,
+ // call to SystemRelease.
+ int SystemReleaseErrors();
+
++void AcquireSystemAllocLock();
++void ReleaseSystemAllocLock();
++
+ // This call is a hint to the operating system that the pages
+ // contained in the specified range of memory will not be used for a
+ // while, and can be released for use by other processes or the OS.
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (index)
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (working tree)
+@@ -1117,6 +1117,40 @@ extern "C" void MallocExtension_Internal_ReleaseMemoryToSystem(
+ }
+ }
+
++extern "C" void MallocExtension_EnableForkSupport() {
++ Static::EnableForkSupport();
++}
++
++void TCMallocPreFork() {
++ if (!Static::ForkSupportEnabled()) {
++ return;
++ }
++
++ if (Static::CPUCacheActive()) {
++ Static::cpu_cache().AcquireInternalLocks();
++ }
++ Static::transfer_cache().AcquireInternalLocks();
++ guarded_page_lock.Lock();
++ release_lock.Lock();
++ pageheap_lock.Lock();
++ AcquireSystemAllocLock();
++}
++
++void TCMallocPostFork() {
++ if (!Static::ForkSupportEnabled()) {
++ return;
++ }
++
++ ReleaseSystemAllocLock();
++ pageheap_lock.Unlock();
++ guarded_page_lock.Unlock();
++ release_lock.Unlock();
++ Static::transfer_cache().ReleaseInternalLocks();
++ if (Static::CPUCacheActive()) {
++ Static::cpu_cache().ReleaseInternalLocks();
++ }
++}
++
+ // nallocx slow path.
+ // Moved to a separate function because size_class_with_alignment is not inlined
+ // which would cause nallocx to become non-leaf function with stack frame and
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.h (working tree)
+@@ -120,4 +120,7 @@ void TCMallocInternalDeleteArrayNothrow(void* p, const std::nothrow_t&) __THROW
+ }
+ #endif
+
++void TCMallocInternalAcquireLocks();
++void TCMallocInternalReleaseLocks();
++
+ #endif // TCMALLOC_TCMALLOC_H_
+--- contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/transfer_cache.h (working tree)
+@@ -176,6 +176,26 @@ class TransferCacheManager : public StaticForwarder {
+ }
+ }
+
++ void AcquireInternalLocks() {
++ for (int i = 0; i < kNumClasses; ++i) {
++ if (implementation_ == TransferCacheImplementation::Ring) {
++ cache_[i].rbtc.AcquireInternalLocks();
++ } else {
++ cache_[i].tc.AcquireInternalLocks();
++ }
++ }
++ }
++
++ void ReleaseInternalLocks() {
++ for (int i = 0; i < kNumClasses; ++i) {
++ if (implementation_ == TransferCacheImplementation::Ring) {
++ cache_[i].rbtc.ReleaseInternalLocks();
++ } else {
++ cache_[i].tc.ReleaseInternalLocks();
++ }
++ }
++ }
++
+ void InsertRange(int size_class, absl::Span<void *> batch) {
+ if (implementation_ == TransferCacheImplementation::Ring) {
+ cache_[size_class].rbtc.InsertRange(size_class, batch);
+@@ -295,6 +315,9 @@ class TransferCacheManager {
+ return TransferCacheImplementation::None;
+ }
+
++ void AcquireInternalLocks() {}
++ void ReleaseInternalLocks() {}
++
+ private:
+ CentralFreeList freelist_[kNumClasses];
+ } ABSL_CACHELINE_ALIGNED;
+--- contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (index)
++++ contrib/libs/tcmalloc/tcmalloc/transfer_cache_internals.h (working tree)
+@@ -366,6 +366,18 @@ class TransferCache {
+ return freelist_do_not_access_directly_;
+ }
+
++ void AcquireInternalLocks()
++ {
++ freelist().AcquireInternalLocks();
++ lock_.Lock();
++ }
++
++ void ReleaseInternalLocks()
++ {
++ lock_.Unlock();
++ freelist().ReleaseInternalLocks();
++ }
++
+ private:
+ // Returns first object of the i-th slot.
+ void **GetSlot(size_t i) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
+@@ -468,6 +480,18 @@ class RingBufferTransferCache {
+
+ // These methods all do internal locking.
+
++ void AcquireInternalLocks()
++ {
++ freelist().AcquireInternalLocks();
++ lock_.Lock();
++ }
++
++ void ReleaseInternalLocks()
++ {
++ lock_.Unlock();
++ freelist().ReleaseInternalLocks();
++ }
++
+ // Insert the specified batch into the transfer cache. N is the number of
+ // elements in the range. RemoveRange() is the opposite operation.
+ void InsertRange(int size_class, absl::Span<void *> batch)
diff --git a/contrib/libs/tcmalloc/patches/yandex.patch b/contrib/libs/tcmalloc/patches/yandex.patch
index 98eaf2f4d8..12d11f2dad 100644
--- a/contrib/libs/tcmalloc/patches/yandex.patch
+++ b/contrib/libs/tcmalloc/patches/yandex.patch
@@ -1,91 +1,91 @@
-commit ab4069ebdd376db4d32c29e1a2414565ec849249
-author: prime
-date: 2021-10-07T14:52:42+03:00
+commit ab4069ebdd376db4d32c29e1a2414565ec849249
+author: prime
+date: 2021-10-07T14:52:42+03:00
+
+ Apply yandex patches
+
+--- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -1112,6 +1112,11 @@ extern "C" bool MallocExtension_Internal_GetPerCpuCachesActive() {
+ return tcmalloc::tcmalloc_internal::Static::CPUCacheActive();
+ }
- Apply yandex patches
++extern "C" void MallocExtension_Internal_DeactivatePerCpuCaches() {
++ tcmalloc::tcmalloc_internal::Parameters::set_per_cpu_caches(false);
++ tcmalloc::tcmalloc_internal::Static::DeactivateCPUCache();
++}
++
+ extern "C" int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize() {
+ return tcmalloc::tcmalloc_internal::Parameters::max_per_cpu_cache_size();
+ }
+--- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -75,6 +75,7 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetMemoryLimit(
+ ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetNumericProperty(
+ const char* name_data, size_t name_size, size_t* value);
+ ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetPerCpuCachesActive();
++ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_DeactivatePerCpuCaches();
+ ABSL_ATTRIBUTE_WEAK int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize();
+ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetSkipSubreleaseInterval(
+ absl::Duration* ret);
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -287,6 +287,16 @@ bool MallocExtension::PerCpuCachesActive() {
+ #endif
+ }
+
++void MallocExtension::DeactivatePerCpuCaches() {
++#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
++ if (MallocExtension_Internal_DeactivatePerCpuCaches == nullptr) {
++ return;
++ }
++
++ MallocExtension_Internal_DeactivatePerCpuCaches();
++#endif
++}
++
+ int32_t MallocExtension::GetMaxPerCpuCacheSize() {
+ #if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
+ if (MallocExtension_Internal_GetMaxPerCpuCacheSize == nullptr) {
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -329,6 +329,11 @@ class MallocExtension final {
+ // Gets whether TCMalloc is using per-CPU caches.
+ static bool PerCpuCachesActive();
+
++ // Extension for unified agent.
++ //
++ // Should be removed in the future https://st.yandex-team.ru/UNIFIEDAGENT-321
++ static void DeactivatePerCpuCaches();
++
+ // Gets the current maximum cache size per CPU cache.
+ static int32_t GetMaxPerCpuCacheSize();
+ // Sets the maximum cache size per CPU cache. This is a per-core limit.
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.h (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -122,6 +122,7 @@ class Static {
+ return cpu_cache_active_;
+ }
+ static void ActivateCPUCache() { cpu_cache_active_ = true; }
++ static void DeactivateCPUCache() { cpu_cache_active_ = false; }
+
+ static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
+ return
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (5096009d22199137186c9a972bc88409d8ebd513)
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
+@@ -2210,14 +2210,7 @@ extern "C" void* TCMallocInternalNewArray(size_t size)
+ TCMALLOC_ALIAS(TCMallocInternalNew);
+ #else
+ {
+- void* p = fast_alloc(CppPolicy().WithoutHooks(), size);
+- // We keep this next instruction out of fast_alloc for a reason: when
+- // it's in, and new just calls fast_alloc, the optimizer may fold the
+- // new call into fast_alloc, which messes up our whole section-based
+- // stacktracing (see ABSL_ATTRIBUTE_SECTION, above). This ensures fast_alloc
+- // isn't the last thing this fn calls, and prevents the folding.
+- MallocHook::InvokeNewHook(p, size);
+- return p;
++ return fast_alloc(CppPolicy().WithoutHooks(), size);
+ }
+ #endif // TCMALLOC_ALIAS
---- contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/cpu_cache.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -1112,6 +1112,11 @@ extern "C" bool MallocExtension_Internal_GetPerCpuCachesActive() {
- return tcmalloc::tcmalloc_internal::Static::CPUCacheActive();
- }
-
-+extern "C" void MallocExtension_Internal_DeactivatePerCpuCaches() {
-+ tcmalloc::tcmalloc_internal::Parameters::set_per_cpu_caches(false);
-+ tcmalloc::tcmalloc_internal::Static::DeactivateCPUCache();
-+}
-+
- extern "C" int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize() {
- return tcmalloc::tcmalloc_internal::Parameters::max_per_cpu_cache_size();
- }
---- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -75,6 +75,7 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetMemoryLimit(
- ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetNumericProperty(
- const char* name_data, size_t name_size, size_t* value);
- ABSL_ATTRIBUTE_WEAK bool MallocExtension_Internal_GetPerCpuCachesActive();
-+ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_DeactivatePerCpuCaches();
- ABSL_ATTRIBUTE_WEAK int32_t MallocExtension_Internal_GetMaxPerCpuCacheSize();
- ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_GetSkipSubreleaseInterval(
- absl::Duration* ret);
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -287,6 +287,16 @@ bool MallocExtension::PerCpuCachesActive() {
- #endif
- }
-
-+void MallocExtension::DeactivatePerCpuCaches() {
-+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
-+ if (MallocExtension_Internal_DeactivatePerCpuCaches == nullptr) {
-+ return;
-+ }
-+
-+ MallocExtension_Internal_DeactivatePerCpuCaches();
-+#endif
-+}
-+
- int32_t MallocExtension::GetMaxPerCpuCacheSize() {
- #if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
- if (MallocExtension_Internal_GetMaxPerCpuCacheSize == nullptr) {
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -329,6 +329,11 @@ class MallocExtension final {
- // Gets whether TCMalloc is using per-CPU caches.
- static bool PerCpuCachesActive();
-
-+ // Extension for unified agent.
-+ //
-+ // Should be removed in the future https://st.yandex-team.ru/UNIFIEDAGENT-321
-+ static void DeactivatePerCpuCaches();
-+
- // Gets the current maximum cache size per CPU cache.
- static int32_t GetMaxPerCpuCacheSize();
- // Sets the maximum cache size per CPU cache. This is a per-core limit.
---- contrib/libs/tcmalloc/tcmalloc/static_vars.h (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -122,6 +122,7 @@ class Static {
- return cpu_cache_active_;
- }
- static void ActivateCPUCache() { cpu_cache_active_ = true; }
-+ static void DeactivateCPUCache() { cpu_cache_active_ = false; }
-
- static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
- return
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (5096009d22199137186c9a972bc88409d8ebd513)
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (ab4069ebdd376db4d32c29e1a2414565ec849249)
-@@ -2210,14 +2210,7 @@ extern "C" void* TCMallocInternalNewArray(size_t size)
- TCMALLOC_ALIAS(TCMallocInternalNew);
- #else
- {
-- void* p = fast_alloc(CppPolicy().WithoutHooks(), size);
-- // We keep this next instruction out of fast_alloc for a reason: when
-- // it's in, and new just calls fast_alloc, the optimizer may fold the
-- // new call into fast_alloc, which messes up our whole section-based
-- // stacktracing (see ABSL_ATTRIBUTE_SECTION, above). This ensures fast_alloc
-- // isn't the last thing this fn calls, and prevents the folding.
-- MallocHook::InvokeNewHook(p, size);
-- return p;
-+ return fast_alloc(CppPolicy().WithoutHooks(), size);
- }
- #endif // TCMALLOC_ALIAS
-