diff options
author | DMokhnatkin <dmitriy.mokhnatkin@gmail.com> | 2022-04-20 14:35:40 +0300 |
---|---|---|
committer | DMokhnatkin <dmitriy.mokhnatkin@gmail.com> | 2022-04-20 14:35:40 +0300 |
commit | 296eff11b1bb65653a0a388b78f373ff7df5d447 (patch) | |
tree | b40e0bfa5456f29ff7ff56534749715904a92f4a | |
parent | 748f6ea01e485802c076897a02bd1647013e4c6d (diff) | |
download | ydb-296eff11b1bb65653a0a388b78f373ff7df5d447.tar.gz |
YQ-804: replace mkql allocator for extra stats
ref:9cfcc03f22a9dad03b0473bbba5c6db52d702eb8
-rw-r--r-- | ydb/library/yql/minikql/aligned_page_pool.cpp | 45 | ||||
-rw-r--r-- | ydb/library/yql/minikql/mkql_alloc.h | 35 |
2 files changed, 73 insertions, 7 deletions
diff --git a/ydb/library/yql/minikql/aligned_page_pool.cpp b/ydb/library/yql/minikql/aligned_page_pool.cpp index e015c255c8..563c0b5803 100644 --- a/ydb/library/yql/minikql/aligned_page_pool.cpp +++ b/ydb/library/yql/minikql/aligned_page_pool.cpp @@ -35,11 +35,7 @@ public: ~TGlobalPagePool() { void* addr = nullptr; while (Pages.Dequeue(&addr)) { -#ifdef _win_ - Y_VERIFY_DEBUG(::VirtualFree(addr, 0, MEM_RELEASE), "VirtualFree failed: %s", LastSystemErrorText()); -#else - Y_VERIFY_DEBUG(0 == ::munmap(addr, PageSize), "munmap failed: %s", LastSystemErrorText()); -#endif + FreePage(addr); } } @@ -54,8 +50,12 @@ public: } void PushPage(void* addr) { +#ifdef PROFILE_MEMORY_ALLOCATIONS + FreePage(addr); +#else AtomicIncrement(Count); Pages.Enqueue(addr); +#endif } ui64 GetPageCount() const { @@ -71,6 +71,15 @@ public: } private: + void FreePage(void* addr) { +#ifdef _win_ + Y_VERIFY_DEBUG(::VirtualFree(addr, 0, MEM_RELEASE), "VirtualFree failed: %s", LastSystemErrorText()); +#else + Y_VERIFY_DEBUG(0 == ::munmap(addr, PageSize), "munmap failed: %s", LastSystemErrorText()); +#endif + } + +private: const size_t PageSize; TAtomic Count = 0; TLockFreeStack<void*> Pages; @@ -121,7 +130,11 @@ TAlignedPagePool::~TAlignedPagePool() { size_t activeBlocksSize = 0; for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) { activeBlocksSize += it->second; +#ifdef PROFILE_MEMORY_ALLOCATIONS + ReturnBlock(it->first, it->second); +#else Free(it->first, it->second); +#endif } if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) { @@ -226,18 +239,33 @@ void* TAlignedPagePool::GetPage() { } ++PageMissCount; + +#ifdef PROFILE_MEMORY_ALLOCATIONS + const auto res = GetBlock(POOL_PAGE_SIZE); +#else const auto res = Alloc(POOL_PAGE_SIZE); +#endif + AllPages.emplace(res); return res; } void TAlignedPagePool::ReturnPage(void* addr) noexcept { Y_VERIFY_DEBUG(AllPages.find(addr) != AllPages.end()); +#ifdef PROFILE_MEMORY_ALLOCATIONS + ReturnBlock(addr, POOL_PAGE_SIZE); +#else FreePages.emplace(addr); +#endif } void* TAlignedPagePool::GetBlock(size_t size) { Y_VERIFY_DEBUG(size >= POOL_PAGE_SIZE); + +#ifdef PROFILE_MEMORY_ALLOCATIONS + OffloadAlloc(size); + return malloc(size); +#else if (size == POOL_PAGE_SIZE) { return GetPage(); } else { @@ -245,16 +273,23 @@ void* TAlignedPagePool::GetBlock(size_t size) { Y_VERIFY_DEBUG(ActiveBlocks.emplace(ptr, size).second); return ptr; } +#endif } void TAlignedPagePool::ReturnBlock(void* ptr, size_t size) noexcept { Y_VERIFY_DEBUG(size >= POOL_PAGE_SIZE); + +#ifdef PROFILE_MEMORY_ALLOCATIONS + OffloadFree(size); + free(ptr); +#else if (size == POOL_PAGE_SIZE) { ReturnPage(ptr); } else { Free(ptr, size); Y_VERIFY_DEBUG(ActiveBlocks.erase(ptr)); } +#endif } void* TAlignedPagePool::Alloc(size_t size) { diff --git a/ydb/library/yql/minikql/mkql_alloc.h b/ydb/library/yql/minikql/mkql_alloc.h index bf17e527bc..33d02f3a77 100644 --- a/ydb/library/yql/minikql/mkql_alloc.h +++ b/ydb/library/yql/minikql/mkql_alloc.h @@ -293,6 +293,13 @@ private: void* MKQLAllocSlow(size_t sz, TAllocState* state); inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state) { Y_VERIFY_DEBUG(state); + +#ifdef PROFILE_MEMORY_ALLOCATIONS + auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz); + ret->Link(&state->OffloadedBlocksRoot); + return ret + 1; +#endif + auto currPage = state->CurrentPage; if (Y_LIKELY(currPage->Offset + sz <= currPage->Capacity)) { void* ret = (char*)currPage + currPage->Offset; @@ -306,7 +313,14 @@ inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state) { inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state) { Y_VERIFY_DEBUG(state); - if (state->SupportsSizedAllocators && sz > MaxPageUserData) { + + bool useMemalloc = state->SupportsSizedAllocators && sz > MaxPageUserData; + +#ifdef PROFILE_MEMORY_ALLOCATIONS + useMemalloc = true; +#endif + + if (useMemalloc) { state->OffloadAlloc(sizeof(TAllocState::TListEntry) + sz); auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz); ret->Link(&state->OffloadedBlocksRoot); @@ -331,6 +345,16 @@ inline void MKQLFreeDeprecated(const void* p) noexcept { return; } +#ifdef PROFILE_MEMORY_ALLOCATIONS + TAllocState *state = TlsAllocState; + Y_VERIFY_DEBUG(state); + + auto entry = (TAllocState::TListEntry*)(mem) - 1; + entry->Unlink(); + free(entry); + return; +#endif + TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(p); if (Y_LIKELY(--header->UseCount != 0)) { return; @@ -345,7 +369,14 @@ inline void MKQLFreeFastWithSize(const void* mem, size_t sz, TAllocState* state) } Y_VERIFY_DEBUG(state); - if (state->SupportsSizedAllocators && sz > MaxPageUserData) { + + bool useFree = state->SupportsSizedAllocators && sz > MaxPageUserData; + +#ifdef PROFILE_MEMORY_ALLOCATIONS + useFree = true; +#endif + + if (useFree) { auto entry = (TAllocState::TListEntry*)(mem) - 1; entry->Unlink(); free(entry); |