aboutsummaryrefslogtreecommitdiffstats
path: root/yql
diff options
context:
space:
mode:
authorilezhankin <ilezhankin@yandex-team.com>2025-01-30 18:27:11 +0300
committerilezhankin <ilezhankin@yandex-team.com>2025-01-30 19:14:18 +0300
commit00f8410dfd96fac35968e94d442a24c8958f2560 (patch)
treed31fc3ef62afcaf774ee5d08bad93b5c58eca1b3 /yql
parent6e502f2086936c78bd6af6f9435bd2d2ea314247 (diff)
downloadydb-00f8410dfd96fac35968e94d442a24c8958f2560.tar.gz
Allow to choose the default allocator for MKQL at runtime
This feature is useful in scenarios when the production query fails with any memory issues on user side - and we want to profile the memory allocations without recompiling the binary commit_hash:12045b8a01693e53c27fec35d03e9ef016c363eb
Diffstat (limited to 'yql')
-rw-r--r--yql/essentials/minikql/aligned_page_pool.cpp147
-rw-r--r--yql/essentials/minikql/aligned_page_pool.h21
-rw-r--r--yql/essentials/minikql/mkql_alloc.cpp48
-rw-r--r--yql/essentials/minikql/mkql_alloc.h62
-rw-r--r--yql/essentials/utils/memory_profiling/ya.make5
5 files changed, 188 insertions, 95 deletions
diff --git a/yql/essentials/minikql/aligned_page_pool.cpp b/yql/essentials/minikql/aligned_page_pool.cpp
index 3de855c8fd..fa40200fb1 100644
--- a/yql/essentials/minikql/aligned_page_pool.cpp
+++ b/yql/essentials/minikql/aligned_page_pool.cpp
@@ -6,19 +6,31 @@
#include <util/string/strip.h>
#include <util/system/align.h>
#include <util/system/compiler.h>
-#include <util/system/info.h>
#include <util/system/error.h>
+#include <util/system/info.h>
#include <util/thread/lfstack.h>
#if defined(_win_)
-#include <util/system/winint.h>
+# include <util/system/winint.h>
#elif defined(_unix_)
-#include <sys/types.h>
-#include <sys/mman.h>
+# include <sys/types.h>
+# include <sys/mman.h>
#endif
namespace NKikimr {
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+# if defined(PROFILE_MEMORY_ALLOCATIONS)
+static bool IsDefaultAllocator = true;
+# else
+static bool IsDefaultAllocator = false;
+# endif
+void UseDefaultAllocator() {
+ // TODO: check that we didn't already used the MKQL allocator
+ IsDefaultAllocator = true;
+}
+#endif
+
static ui64 SYS_PAGE_SIZE = NSystemInfo::GetPageSize();
constexpr ui32 MidLevels = 10;
@@ -85,14 +97,16 @@ public:
private:
size_t PushPage(void* addr) {
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- FreePage(addr);
- return GetPageSize();
-#else
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocator)) {
+ FreePage(addr);
+ return GetPageSize();
+ }
+#endif
+
++Count;
Pages.Enqueue(addr);
return 0;
-#endif
}
void FreePage(void* addr) {
@@ -127,6 +141,11 @@ public:
}
void* DoMmap(size_t size) {
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ // No memory maps allowed while using default allocator
+ Y_DEBUG_ABORT_UNLESS(!IsDefaultAllocator);
+#endif
+
void* res = T::Mmap(size);
TotalMmappedBytes += size;
return res;
@@ -297,11 +316,15 @@ TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() {
size_t activeBlocksSize = 0;
for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
activeBlocksSize += it->second;
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- ReturnBlock(it->first, it->second);
-#else
- Free(it->first, it->second);
+
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocator)) {
+ ReturnBlock(it->first, it->second);
+ return;
+ }
#endif
+
+ Free(it->first, it->second);
}
if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) {
@@ -394,58 +417,73 @@ void* TAlignedPagePoolImpl<T>::GetPage() {
throw TMemoryLimitExceededException();
}
-#ifndef PROFILE_MEMORY_ALLOCATIONS
- if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
- TotalAllocated += POOL_PAGE_SIZE;
- if (AllocNotifyCallback) {
- AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
- }
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_LIKELY(!IsDefaultAllocator)) {
+#endif
+ if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
+ TotalAllocated += POOL_PAGE_SIZE;
+ if (AllocNotifyCallback) {
+ AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
+ }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
+ }
+ ++PageGlobalHitCount;
+ AllPages.emplace(ptr);
+
+ UpdatePeaks();
+ return ptr;
}
- ++PageGlobalHitCount;
- AllPages.emplace(ptr);
- UpdatePeaks();
- return ptr;
+ ++PageMissCount;
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-
- ++PageMissCount;
#endif
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- const auto res = GetBlock(POOL_PAGE_SIZE);
-#else
- const auto res = Alloc(POOL_PAGE_SIZE);
- AllPages.emplace(res);
+ void* res;
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocator)) {
+ res = GetBlock(POOL_PAGE_SIZE);
+ } else {
#endif
+ res = Alloc(POOL_PAGE_SIZE);
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ }
+#endif
+ AllPages.emplace(res);
return res;
}
template<typename T>
void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept {
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- ReturnBlock(addr, POOL_PAGE_SIZE);
-#else
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocator)) {
+ ReturnBlock(addr, POOL_PAGE_SIZE);
+ return;
+ }
+#endif
+
Y_DEBUG_ABORT_UNLESS(AllPages.find(addr) != AllPages.end());
FreePages.emplace(addr);
-#endif
}
template<typename T>
void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- OffloadAlloc(size);
- auto ret = malloc(size);
- if (!ret) {
- throw TMemoryLimitExceededException();
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocator)) {
+ OffloadAlloc(size);
+ auto ret = malloc(size);
+ if (!ret) {
+ throw TMemoryLimitExceededException();
+ }
+
+ return ret;
}
+#endif
- return ret;
-#else
if (size == POOL_PAGE_SIZE) {
return GetPage();
} else {
@@ -453,24 +491,27 @@ void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
Y_DEBUG_ABORT_UNLESS(ActiveBlocks.emplace(ptr, size).second);
return ptr;
}
-#endif
}
template<typename T>
void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept {
Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- OffloadFree(size);
- free(ptr);
-#else
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocator)) {
+ OffloadFree(size);
+ free(ptr);
+ UpdateMemoryYellowZone();
+ return;
+ }
+#endif
+
if (size == POOL_PAGE_SIZE) {
ReturnPage(ptr);
} else {
Free(ptr, size);
Y_DEBUG_ABORT_UNLESS(ActiveBlocks.erase(ptr));
}
-#endif
UpdateMemoryYellowZone();
}
@@ -644,6 +685,14 @@ void TAlignedPagePoolImpl<T>::ResetGlobalsUT()
TGlobalPools<T, false>::Instance().Reset();
}
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+// static
+template<typename T>
+bool TAlignedPagePoolImpl<T>::IsDefaultAllocatorUsed() {
+ return IsDefaultAllocator;
+}
+#endif
+
template class TAlignedPagePoolImpl<>;
template class TAlignedPagePoolImpl<TFakeAlignedMmap>;
template class TAlignedPagePoolImpl<TFakeUnalignedMmap>;
diff --git a/yql/essentials/minikql/aligned_page_pool.h b/yql/essentials/minikql/aligned_page_pool.h
index 5ab8fcec1c..4a5b1d2e55 100644
--- a/yql/essentials/minikql/aligned_page_pool.h
+++ b/yql/essentials/minikql/aligned_page_pool.h
@@ -1,21 +1,26 @@
#pragma once
+#include <library/cpp/monlib/dynamic_counters/counters.h>
+
#include <util/generic/yexception.h>
#include <util/stream/output.h>
#include <util/string/builder.h>
-#include <util/system/yassert.h>
#include <util/system/defaults.h>
+#include <util/system/yassert.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-#include <type_traits>
#include <stack>
-#include <vector>
-#include <unordered_set>
#include <unordered_map>
+#include <unordered_set>
+#include <vector>
namespace NKikimr {
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+// By default the default allocator is not used unless PROFILE_MEMORY_ALLOCATIONS is defined.
+// Call this method once at the start of the process - to enable usage of default allocator.
+void UseDefaultAllocator();
+#endif
+
struct TAlignedPagePoolCounters {
explicit TAlignedPagePoolCounters(::NMonitoring::TDynamicCounterPtr countersRoot = nullptr, const TString& name = TString());
@@ -233,6 +238,10 @@ public:
IsMemoryYellowZoneForcefullyChanged = true;
}
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ static bool IsDefaultAllocatorUsed();
+#endif
+
protected:
void* Alloc(size_t size);
void Free(void* ptr, size_t size) noexcept;
diff --git a/yql/essentials/minikql/mkql_alloc.cpp b/yql/essentials/minikql/mkql_alloc.cpp
index 839b011679..539a3349ed 100644
--- a/yql/essentials/minikql/mkql_alloc.cpp
+++ b/yql/essentials/minikql/mkql_alloc.cpp
@@ -49,13 +49,18 @@ void TAllocState::CleanupPAllocList(TListEntry* root) {
void TAllocState::CleanupArrowList(TListEntry* root) {
for (auto curr = root->Right; curr != root; ) {
auto next = curr->Right;
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- free(curr);
-#else
- auto size = ((TMkqlArrowHeader*)curr)->Size;
- auto fullSize = size + sizeof(TMkqlArrowHeader);
- ReleaseAlignedPage(curr, fullSize);
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
+ free(curr);
+ } else {
+#endif
+ auto size = ((TMkqlArrowHeader*)curr)->Size;
+ auto fullSize = size + sizeof(TMkqlArrowHeader);
+ ReleaseAlignedPage(curr, fullSize);
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ }
#endif
+
curr = next;
}
@@ -256,15 +261,21 @@ void* MKQLArrowAllocate(ui64 size) {
state->OffloadAlloc(fullSize);
}
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- auto ptr = malloc(fullSize);
- if (!ptr) {
- throw TMemoryLimitExceededException();
+ void* ptr;
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
+ ptr = malloc(fullSize);
+ if (!ptr) {
+ throw TMemoryLimitExceededException();
+ }
+ } else {
+#endif
+ ptr = GetAlignedPage(fullSize);
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#else
- auto ptr = GetAlignedPage(fullSize);
#endif
- auto header = (TMkqlArrowHeader*)ptr;
+
+ auto* header = (TMkqlArrowHeader*)ptr;
if (state->EnableArrowTracking) {
header->Entry.Link(&state->ArrowBlocksRoot);
Y_ENSURE(state->ArrowBuffers.insert(header + 1).second);
@@ -297,11 +308,14 @@ void MKQLArrowFree(const void* mem, ui64 size) {
}
Y_ENSURE(size == header->Size);
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- free(header);
-#else
- ReleaseAlignedPage(header, fullSize);
+
+#if defined(ALLOW_MEMORY_ALLOCATOR)
+ if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
+ free(header);
+ return;
+ }
#endif
+ ReleaseAlignedPage(header, fullSize);
}
void MKQLArrowUntrack(const void* mem) {
diff --git a/yql/essentials/minikql/mkql_alloc.h b/yql/essentials/minikql/mkql_alloc.h
index 24c18f41a8..abcb6cc73d 100644
--- a/yql/essentials/minikql/mkql_alloc.h
+++ b/yql/essentials/minikql/mkql_alloc.h
@@ -60,10 +60,23 @@ struct TAllocState : public TAlignedPagePool
bool SupportsSizedAllocators = false;
void* LargeAlloc(size_t size) {
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
+ return malloc(size);
+ }
+#endif
+
return Alloc(size);
}
void LargeFree(void* ptr, size_t size) noexcept {
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
+ free(ptr);
+ return;
+ }
+#endif
+
Free(ptr, size);
}
@@ -288,17 +301,20 @@ private:
};
void* MKQLAllocSlow(size_t sz, TAllocState* state, const EMemorySubPool mPool);
+
inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
Y_DEBUG_ABORT_UNLESS(state);
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
- if (!ret) {
- throw TMemoryLimitExceededException();
- }
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
+ auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
+ if (!ret) {
+ throw TMemoryLimitExceededException();
+ }
- ret->Link(&state->OffloadedBlocksRoot);
- return ret + 1;
+ ret->Link(&state->OffloadedBlocksRoot);
+ return ret + 1;
+ }
#endif
auto currPage = state->CurrentPages[(TMemorySubPoolIdx)mPool];
@@ -315,13 +331,12 @@ inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemor
inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
Y_DEBUG_ABORT_UNLESS(state);
- bool useMemalloc = state->SupportsSizedAllocators && sz > MaxPageUserData;
-
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- useMemalloc = true;
+ bool useMalloc = state->SupportsSizedAllocators && sz > MaxPageUserData;
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ useMalloc = useMalloc || TAllocState::IsDefaultAllocatorUsed();
#endif
- if (useMemalloc) {
+ if (Y_UNLIKELY(useMalloc)) {
state->OffloadAlloc(sizeof(TAllocState::TListEntry) + sz);
auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
if (!ret) {
@@ -350,14 +365,16 @@ inline void MKQLFreeDeprecated(const void* mem, const EMemorySubPool mPool) noex
return;
}
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- TAllocState *state = TlsAllocState;
- Y_DEBUG_ABORT_UNLESS(state);
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
+ TAllocState *state = TlsAllocState;
+ Y_DEBUG_ABORT_UNLESS(state);
- auto entry = (TAllocState::TListEntry*)(mem) - 1;
- entry->Unlink();
- free(entry);
- return;
+ auto entry = (TAllocState::TListEntry*)(mem) - 1;
+ entry->Unlink();
+ free(entry);
+ return;
+ }
#endif
TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(mem);
@@ -378,12 +395,11 @@ inline void MKQLFreeFastWithSize(const void* mem, size_t sz, TAllocState* state,
Y_DEBUG_ABORT_UNLESS(state);
bool useFree = state->SupportsSizedAllocators && sz > MaxPageUserData;
-
-#ifdef PROFILE_MEMORY_ALLOCATIONS
- useFree = true;
+#if defined(ALLOW_DEFAULT_ALLOCATOR)
+ useFree = useFree || TAllocState::IsDefaultAllocatorUsed();
#endif
- if (useFree) {
+ if (Y_UNLIKELY(useFree)) {
auto entry = (TAllocState::TListEntry*)(mem) - 1;
entry->Unlink();
free(entry);
diff --git a/yql/essentials/utils/memory_profiling/ya.make b/yql/essentials/utils/memory_profiling/ya.make
index 8c26d79785..3e728ecbb8 100644
--- a/yql/essentials/utils/memory_profiling/ya.make
+++ b/yql/essentials/utils/memory_profiling/ya.make
@@ -2,6 +2,11 @@ LIBRARY()
IF (PROFILE_MEMORY_ALLOCATIONS)
CFLAGS(GLOBAL -DPROFILE_MEMORY_ALLOCATIONS)
+ CFLAGS(GLOBAL -DALLOW_DEFAULT_ALLOCATOR)
+ENDIF()
+
+IF (ALLOW_DEFAULT_ALLOCATOR)
+ CFLAGS(GLOBAL -DALLOW_DEFAULT_ALLOCATOR)
ENDIF()
END()