diff options
author | aozeritsky <aozeritsky@ydb.tech> | 2023-07-06 12:13:22 +0300 |
---|---|---|
committer | aozeritsky <aozeritsky@ydb.tech> | 2023-07-06 12:13:22 +0300 |
commit | b7e0a08cab9583cb83546494333d0c0f87260be2 (patch) | |
tree | 9dfb7e6e600e3d08c63684988556b877851fabc2 | |
parent | 06cc85228c999258c5349c0e5b283b3d53892247 (diff) | |
download | ydb-b7e0a08cab9583cb83546494333d0c0f87260be2.tar.gz |
Alloc 32 pages at once
-rw-r--r-- | ydb/library/yql/minikql/aligned_page_pool.cpp | 273 | ||||
-rw-r--r-- | ydb/library/yql/minikql/aligned_page_pool.h | 46 | ||||
-rw-r--r-- | ydb/library/yql/minikql/aligned_page_pool_ut.cpp | 140 | ||||
-rw-r--r-- | ydb/library/yql/minikql/ut/CMakeLists.darwin-x86_64.txt | 1 | ||||
-rw-r--r-- | ydb/library/yql/minikql/ut/CMakeLists.linux-aarch64.txt | 1 | ||||
-rw-r--r-- | ydb/library/yql/minikql/ut/CMakeLists.linux-x86_64.txt | 1 | ||||
-rw-r--r-- | ydb/library/yql/minikql/ut/CMakeLists.windows-x86_64.txt | 1 | ||||
-rw-r--r-- | ydb/library/yql/minikql/ut/ya.make | 1 |
8 files changed, 357 insertions, 107 deletions
diff --git a/ydb/library/yql/minikql/aligned_page_pool.cpp b/ydb/library/yql/minikql/aligned_page_pool.cpp index d5d83b368c..f72cf62d25 100644 --- a/ydb/library/yql/minikql/aligned_page_pool.cpp +++ b/ydb/library/yql/minikql/aligned_page_pool.cpp @@ -27,6 +27,7 @@ static_assert(MaxMidSize == 64 * 1024 * 1024, "Upper memory block 64 Mb"); namespace { +template<typename T> class TGlobalPagePool { public: TGlobalPagePool(size_t pageSize) @@ -73,11 +74,7 @@ public: private: void FreePage(void* addr) { -#ifdef _win_ - Y_VERIFY_DEBUG(::VirtualFree(addr, 0, MEM_RELEASE), "VirtualFree failed: %s", LastSystemErrorText()); -#else - Y_VERIFY_DEBUG(0 == ::munmap(addr, PageSize), "munmap failed: %s", LastSystemErrorText()); -#endif + Y_VERIFY_DEBUG(0 == T::Munmap(addr, PageSize), "Munmap failed: %s", LastSystemErrorText()); } private: @@ -86,30 +83,104 @@ private: TLockFreeStack<void*> Pages; }; +template<typename T> class TGlobalPools { public: - static TGlobalPools& Instance() { - return *Singleton<TGlobalPools>(); + static TGlobalPools<T>& Instance() { + return *Singleton<TGlobalPools<T>>(); } - TGlobalPagePool& Get(ui32 index) { + TGlobalPagePool<T>& Get(ui32 index) { return *Pools[index]; } TGlobalPools() { + Reset(); + } + + void Reset() + { + Pools.clear(); Pools.reserve(MidLevels + 1); for (ui32 i = 0; i <= MidLevels; ++i) { - Pools.emplace_back(MakeHolder<TGlobalPagePool>(TAlignedPagePool::POOL_PAGE_SIZE << i)); + Pools.emplace_back(MakeHolder<TGlobalPagePool<T>>(TAlignedPagePool::POOL_PAGE_SIZE << i)); } } private: - TVector<THolder<TGlobalPagePool>> Pools; + TVector<THolder<TGlobalPagePool<T>>> Pools; }; } // unnamed +#ifdef _win_ +#define MAP_FAILED (void*)(-1) +inline void* TSystemMmap::Mmap(size_t size) +{ + if (auto res = ::VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE)) { + return res; + } else { + return MAP_FAILED; + } +} + +inline int TSystemMmap::Munmap(void* addr, size_t size) +{ + Y_VERIFY(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address"); + Y_VERIFY(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size"); + return !::VirtualFree(addr, size, MEM_DECOMMIT); +} +#else +inline void* TSystemMmap::Mmap(size_t size) +{ + return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); +} + +inline int TSystemMmap::Munmap(void* addr, size_t size) +{ + return ::munmap(addr, size); +} +#endif + +std::function<void(size_t size)> TFakeAlignedMmap::OnMmap = {}; +std::function<void(void* addr, size_t size)> TFakeAlignedMmap::OnMunmap = {}; + +void* TFakeAlignedMmap::Mmap(size_t size) +{ + if (OnMmap) { + OnMmap(size); + } + return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE); +} + +int TFakeAlignedMmap::Munmap(void* addr, size_t size) +{ + if (OnMunmap) { + OnMunmap(addr, size); + } + return 0; +} + +std::function<void(size_t size)> TFakeUnalignedMmap::OnMmap = {}; +std::function<void(void* addr, size_t size)> TFakeUnalignedMmap::OnMunmap = {}; + +void* TFakeUnalignedMmap::Mmap(size_t size) +{ + if (OnMmap) { + OnMmap(size); + } + return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE+1); +} + +int TFakeUnalignedMmap::Munmap(void* addr, size_t size) +{ + if (OnMunmap) { + OnMunmap(addr, size); + } + return 0; +} + TAlignedPagePoolCounters::TAlignedPagePoolCounters(::NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) { if (!countersRoot || name.empty()) return; @@ -120,7 +191,8 @@ TAlignedPagePoolCounters::TAlignedPagePoolCounters(::NMonitoring::TDynamicCounte LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true); } -TAlignedPagePool::~TAlignedPagePool() { +template<typename T> +TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() { if (CheckLostMem && !UncaughtException()) { Y_VERIFY_DEBUG(TotalAllocated == FreePages.size() * POOL_PAGE_SIZE, "memory leak; Expected %ld, actual %ld (%ld page(s), %ld offloaded); allocator created at: %s", @@ -150,7 +222,7 @@ TAlignedPagePool::~TAlignedPagePool() { AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes, AllPages.size()); for (auto &ptr : AllPages) { - TGlobalPools::Instance().Get(0).PushPage(ptr); + TGlobalPools<T>::Instance().Get(0).PushPage(ptr); } if (Counters.TotalBytesAllocatedCntr) { @@ -162,7 +234,8 @@ TAlignedPagePool::~TAlignedPagePool() { TotalAllocated = 0; } -void TAlignedPagePool::ReleaseFreePages() { +template<typename T> +void TAlignedPagePoolImpl<T>::ReleaseFreePages() { TotalAllocated -= FreePages.size() * POOL_PAGE_SIZE; if (Counters.TotalBytesAllocatedCntr) { (*Counters.TotalBytesAllocatedCntr) -= FreePages.size() * POOL_PAGE_SIZE; @@ -170,11 +243,12 @@ void TAlignedPagePool::ReleaseFreePages() { for (; !FreePages.empty(); FreePages.pop()) { AllPages.erase(FreePages.top()); - TGlobalPools::Instance().Get(0).PushPage(FreePages.top()); + TGlobalPools<T>::Instance().Get(0).PushPage(FreePages.top()); } } -void TAlignedPagePool::OffloadAlloc(ui64 size) { +template<typename T> +void TAlignedPagePoolImpl<T>::OffloadAlloc(ui64 size) { if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) { throw TMemoryLimitExceededException(); } @@ -204,7 +278,8 @@ void TAlignedPagePool::OffloadAlloc(ui64 size) { UpdatePeaks(); } -void TAlignedPagePool::OffloadFree(ui64 size) noexcept { +template<typename T> +void TAlignedPagePoolImpl<T>::OffloadFree(ui64 size) noexcept { TotalAllocated -= size; OffloadedActiveBytes -= size; if (Counters.TotalBytesAllocatedCntr) { @@ -212,7 +287,8 @@ void TAlignedPagePool::OffloadFree(ui64 size) noexcept { } } -void* TAlignedPagePool::GetPage() { +template<typename T> +void* TAlignedPagePoolImpl<T>::GetPage() { ++PageAllocCount; if (!FreePages.empty()) { ++PageHitCount; @@ -225,7 +301,7 @@ void* TAlignedPagePool::GetPage() { throw TMemoryLimitExceededException(); } - if (const auto ptr = TGlobalPools::Instance().Get(0).GetPage()) { + if (const auto ptr = TGlobalPools<T>::Instance().Get(0).GetPage()) { TotalAllocated += POOL_PAGE_SIZE; if (AllocNotifyCallback) { AllocNotifyCurrentBytes += POOL_PAGE_SIZE; @@ -252,7 +328,8 @@ void* TAlignedPagePool::GetPage() { return res; } -void TAlignedPagePool::ReturnPage(void* addr) noexcept { +template<typename T> +void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept { Y_VERIFY_DEBUG(AllPages.find(addr) != AllPages.end()); #ifdef PROFILE_MEMORY_ALLOCATIONS ReturnBlock(addr, POOL_PAGE_SIZE); @@ -261,7 +338,8 @@ void TAlignedPagePool::ReturnPage(void* addr) noexcept { #endif } -void* TAlignedPagePool::GetBlock(size_t size) { +template<typename T> +void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) { Y_VERIFY_DEBUG(size >= POOL_PAGE_SIZE); #ifdef PROFILE_MEMORY_ALLOCATIONS @@ -283,7 +361,8 @@ void* TAlignedPagePool::GetBlock(size_t size) { #endif } -void TAlignedPagePool::ReturnBlock(void* ptr, size_t size) noexcept { +template<typename T> +void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept { Y_VERIFY_DEBUG(size >= POOL_PAGE_SIZE); #ifdef PROFILE_MEMORY_ALLOCATIONS @@ -299,7 +378,8 @@ void TAlignedPagePool::ReturnBlock(void* ptr, size_t size) noexcept { #endif } -void* TAlignedPagePool::Alloc(size_t size) { +template<typename T> +void* TAlignedPagePoolImpl<T>::Alloc(size_t size) { void* res = nullptr; size = AlignUp(size, SYS_PAGE_SIZE); @@ -314,11 +394,11 @@ void* TAlignedPagePool::Alloc(size_t size) { } } - if (size > TAlignedPagePool::POOL_PAGE_SIZE && size <= MaxMidSize) { + if (size > POOL_PAGE_SIZE && size <= MaxMidSize) { size = FastClp2(size); - auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE); + auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE); Y_VERIFY_DEBUG(level >= 1 && level <= MidLevels); - if (res = TGlobalPools::Instance().Get(level).GetPage()) { + if (res = TGlobalPools<T>::Instance().Get(level).GetPage()) { TotalAllocated += size; if (AllocNotifyCallback) { AllocNotifyCurrentBytes += size; @@ -333,75 +413,60 @@ void* TAlignedPagePool::Alloc(size_t size) { } if (!res) { -#ifdef _win_ - res = ::VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); - if (Y_UNLIKELY(0 == res)) { - ythrow yexception() << "VirtualAlloc failed to allocate " << size << " bytes: " << LastSystemErrorText(); - } - Y_VERIFY((reinterpret_cast<uintptr_t>(res) & PAGE_ADDR_MASK) == reinterpret_cast<uintptr_t>(res), "Got unaligned address"); - TotalAllocated += size; - if (AllocNotifyCallback) { - AllocNotifyCurrentBytes += size; - } - if (Counters.TotalBytesAllocatedCntr) { - (*Counters.TotalBytesAllocatedCntr) += size; - } -#else - void* mem = ::mmap(nullptr, size + POOL_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); + auto allocSize = size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE; + void* mem = T::Mmap(allocSize); if (Y_UNLIKELY(MAP_FAILED == mem)) { - ythrow yexception() << "mmap failed to allocate " << (size + POOL_PAGE_SIZE) << " bytes: " << LastSystemErrorText(); + ythrow yexception() << "Mmap failed to allocate " << (size + POOL_PAGE_SIZE) << " bytes: " << LastSystemErrorText(); } - if ((reinterpret_cast<uintptr_t>(mem) & PAGE_ADDR_MASK) == reinterpret_cast<uintptr_t>(mem)) { - // We got already aligned address - res = mem; - if (AlignUp(size, POOL_PAGE_SIZE) == size) { - // Extra space is also page-aligned. Put it to the free page list - const auto extraPage = reinterpret_cast<ui8*>(mem) + size; - AllPages.emplace(extraPage); - FreePages.emplace(extraPage); - TotalAllocated += size + POOL_PAGE_SIZE; - if (AllocNotifyCallback) { - AllocNotifyCurrentBytes += size + POOL_PAGE_SIZE; - } - if (Counters.TotalBytesAllocatedCntr) { - (*Counters.TotalBytesAllocatedCntr) += size + POOL_PAGE_SIZE; - } - } else { - // Return extra space to the system - if (Y_UNLIKELY(0 != ::munmap(reinterpret_cast<ui8*>(mem) + size, POOL_PAGE_SIZE))) { - ythrow yexception() << "munmap(0x" << IntToString<16>(reinterpret_cast<uintptr_t>(mem) + size) - << ", " << (0 + POOL_PAGE_SIZE) << ") failed: " << LastSystemErrorText(); - } - TotalAllocated += size; - if (AllocNotifyCallback) { - AllocNotifyCurrentBytes += size; - } - if (Counters.TotalBytesAllocatedCntr) { - (*Counters.TotalBytesAllocatedCntr) += size; - } + res = AlignUp(mem, POOL_PAGE_SIZE); + const size_t off = reinterpret_cast<intptr_t>(res) - reinterpret_cast<intptr_t>(mem); + if (Y_UNLIKELY(off)) { + // unmap prefix + if (Y_UNLIKELY(0 != T::Munmap(mem, off))) { + ythrow yexception() << "Munmap(0x" + << IntToString<16>(reinterpret_cast<uintptr_t>(mem)) + << ", " << off << ") failed: " << LastSystemErrorText(); } - } else { - res = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(mem) & PAGE_ADDR_MASK) + POOL_PAGE_SIZE); - const size_t off = reinterpret_cast<intptr_t>(res) - reinterpret_cast<intptr_t>(mem); - // Return extra space before/after aligned region to the system - if (Y_UNLIKELY(0 != ::munmap(mem, off))) { - ythrow yexception() << "munmap(0x" << IntToString<16>(reinterpret_cast<uintptr_t>(mem)) << ", " << off + } + // Extra space is also page-aligned. Put it to the free page list + auto alignedSize = AlignUp(size, POOL_PAGE_SIZE); + ui64 extraPages = (allocSize - off - alignedSize) / POOL_PAGE_SIZE; + ui64 tail = (allocSize - off - alignedSize) % POOL_PAGE_SIZE; + auto extraPage = reinterpret_cast<ui8*>(res) + alignedSize; + for (ui64 i = 0; i < extraPages; ++i) { + AllPages.emplace(extraPage); + FreePages.emplace(extraPage); + extraPage += POOL_PAGE_SIZE; + } + if (size != alignedSize) { + // unmap unaligned hole + if (Y_UNLIKELY(0 != T::Munmap(reinterpret_cast<ui8*>(res) + size, alignedSize - size))) { + ythrow yexception() << "Munmap(0x" + << IntToString<16>(reinterpret_cast<uintptr_t>(reinterpret_cast<ui8*>(res)+size)) + << ", " << alignedSize - size << ") failed: " << LastSystemErrorText(); } - if (Y_UNLIKELY(0 != ::munmap(reinterpret_cast<ui8*>(res) + size, POOL_PAGE_SIZE - off))) { - ythrow yexception() << "munmap(0x" << IntToString<16>(reinterpret_cast<uintptr_t>(res) + size) - << ", " << (POOL_PAGE_SIZE - off) << ") failed: " << LastSystemErrorText(); - } - TotalAllocated += size; - if (AllocNotifyCallback) { - AllocNotifyCurrentBytes += size; - } - if (Counters.TotalBytesAllocatedCntr) { - (*Counters.TotalBytesAllocatedCntr) += size; + } + if (tail) { + // unmap suffix + Y_VERIFY_DEBUG(extraPage+tail <= reinterpret_cast<ui8*>(mem) + size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE); + if (Y_UNLIKELY(0 != T::Munmap(extraPage, tail))) { + ythrow yexception() << "Munmap(0x" + << IntToString<16>(reinterpret_cast<uintptr_t>(extraPage)) + << ", " << tail + << ") failed: " << LastSystemErrorText(); } } -#endif + auto extraSize = extraPages * POOL_PAGE_SIZE; + auto totalSize = size + extraSize; + TotalAllocated += totalSize; + if (AllocNotifyCallback) { + AllocNotifyCurrentBytes += totalSize; + } + if (Counters.TotalBytesAllocatedCntr) { + (*Counters.TotalBytesAllocatedCntr) += totalSize; + } } if (Counters.AllocationsCntr) { @@ -412,20 +477,17 @@ void* TAlignedPagePool::Alloc(size_t size) { return res; } -void TAlignedPagePool::Free(void* ptr, size_t size) noexcept { +template<typename T> +void TAlignedPagePoolImpl<T>::Free(void* ptr, size_t size) noexcept { size = AlignUp(size, SYS_PAGE_SIZE); if (size <= MaxMidSize) size = FastClp2(size); if (size <= MaxMidSize) { - auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE); + auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE); Y_VERIFY_DEBUG(level >= 1 && level <= MidLevels); - TGlobalPools::Instance().Get(level).PushPage(ptr); + TGlobalPools<T>::Instance().Get(level).PushPage(ptr); } else { -#ifdef _win_ - Y_VERIFY(::VirtualFree(ptr, 0, MEM_RELEASE)); -#else - Y_VERIFY(!::munmap(ptr, size)); -#endif + Y_VERIFY(!T::Munmap(ptr, size)); } Y_VERIFY_DEBUG(TotalAllocated >= size); @@ -435,7 +497,8 @@ void TAlignedPagePool::Free(void* ptr, size_t size) noexcept { } } -bool TAlignedPagePool::TryIncreaseLimit(ui64 required) { +template<typename T> +bool TAlignedPagePoolImpl<T>::TryIncreaseLimit(ui64 required) { if (!IncreaseMemoryLimitCallback) { return false; } @@ -443,19 +506,31 @@ bool TAlignedPagePool::TryIncreaseLimit(ui64 required) { return Limit >= required; } -ui64 TAlignedPagePool::GetGlobalPagePoolSize() { +template<typename T> +ui64 TAlignedPagePoolImpl<T>::GetGlobalPagePoolSize() { ui64 size = 0; for (size_t level = 0; level <= MidLevels; ++level) { - size += TGlobalPools::Instance().Get(level).GetSize(); + size += TGlobalPools<T>::Instance().Get(level).GetSize(); } return size; } -void TAlignedPagePool::PrintStat(size_t usedPages, IOutputStream& out) const { +template<typename T> +void TAlignedPagePoolImpl<T>::PrintStat(size_t usedPages, IOutputStream& out) const { usedPages += GetFreePageCount(); out << "Count of free pages: " << GetFreePageCount() << Endl; - out << "Allocated for blocks: " << (GetAllocated() - usedPages * TAlignedPagePool::POOL_PAGE_SIZE) << Endl; + out << "Allocated for blocks: " << (GetAllocated() - usedPages * POOL_PAGE_SIZE) << Endl; out << "Total allocated by lists: " << GetAllocated() << Endl; } +template<typename T> +void TAlignedPagePoolImpl<T>::ResetGlobalsUT() +{ + TGlobalPools<T>::Instance().Reset(); +} + +template class TAlignedPagePoolImpl<>; +template class TAlignedPagePoolImpl<TFakeAlignedMmap>; +template class TAlignedPagePoolImpl<TFakeUnalignedMmap>; + } // NKikimr diff --git a/ydb/library/yql/minikql/aligned_page_pool.h b/ydb/library/yql/minikql/aligned_page_pool.h index eec114e129..9115500466 100644 --- a/ydb/library/yql/minikql/aligned_page_pool.h +++ b/ydb/library/yql/minikql/aligned_page_pool.h @@ -39,12 +39,38 @@ public: virtual ~TMemoryLimitExceededException() = default; }; -class TAlignedPagePool { +class TSystemMmap { +public: + static void* Mmap(size_t size); + static int Munmap(void* addr, size_t size); +}; + +class TFakeAlignedMmap { +public: + static std::function<void(size_t size)> OnMmap; + static std::function<void(void* addr, size_t size)> OnMunmap; + + static void* Mmap(size_t size); + static int Munmap(void* addr, size_t size); +}; + +class TFakeUnalignedMmap { +public: + static std::function<void(size_t size)> OnMmap; + static std::function<void(void* addr, size_t size)> OnMunmap; + + static void* Mmap(size_t size); + static int Munmap(void* addr, size_t size); +}; + +template<typename TMmap = TSystemMmap> +class TAlignedPagePoolImpl { public: static constexpr ui64 POOL_PAGE_SIZE = 1ULL << 16; // 64k static constexpr ui64 PAGE_ADDR_MASK = ~(POOL_PAGE_SIZE - 1); + static constexpr ui64 ALLOC_AHEAD_PAGES = 31; - explicit TAlignedPagePool(const TSourceLocation& location, + explicit TAlignedPagePoolImpl(const TSourceLocation& location, const TAlignedPagePoolCounters& counters = TAlignedPagePoolCounters()) : Counters(counters) , DebugInfo(TStringBuilder() << location) @@ -54,13 +80,13 @@ public: } } - TAlignedPagePool(const TAlignedPagePool&) = delete; - TAlignedPagePool(TAlignedPagePool&& other) = delete; + TAlignedPagePoolImpl(const TAlignedPagePoolImpl&) = delete; + TAlignedPagePoolImpl(TAlignedPagePoolImpl&& other) = delete; - TAlignedPagePool& operator = (const TAlignedPagePool&) = delete; - TAlignedPagePool& operator = (TAlignedPagePool&& other) = delete; + TAlignedPagePoolImpl& operator = (const TAlignedPagePoolImpl&) = delete; + TAlignedPagePoolImpl& operator = (TAlignedPagePoolImpl&& other) = delete; - ~TAlignedPagePool(); + ~TAlignedPagePoolImpl(); inline size_t GetAllocated() const noexcept { return TotalAllocated; @@ -86,7 +112,7 @@ public: void ReturnPage(void* addr) noexcept; - void Swap(TAlignedPagePool& other) { + void Swap(TAlignedPagePoolImpl& other) { DoSwap(FreePages, other.FreePages); DoSwap(AllPages, other.AllPages); DoSwap(ActiveBlocks, other.ActiveBlocks); @@ -186,6 +212,8 @@ public: IncreaseMemoryLimitCallback = std::move(callback); } + static void ResetGlobalsUT(); + protected: void* Alloc(size_t size); void Free(void* ptr, size_t size) noexcept; @@ -227,4 +255,6 @@ protected: TString DebugInfo; }; +using TAlignedPagePool = TAlignedPagePoolImpl<>; + } // NKikimr diff --git a/ydb/library/yql/minikql/aligned_page_pool_ut.cpp b/ydb/library/yql/minikql/aligned_page_pool_ut.cpp new file mode 100644 index 0000000000..3afc09eb65 --- /dev/null +++ b/ydb/library/yql/minikql/aligned_page_pool_ut.cpp @@ -0,0 +1,140 @@ +#include "aligned_page_pool.h" + +#include <library/cpp/testing/unittest/registar.h> + +#include <util/system/info.h> + +namespace NKikimr { +namespace NMiniKQL { + +Y_UNIT_TEST_SUITE(TAlignedPagePoolTest) { + +Y_UNIT_TEST(AlignedMmapPageSize) { + TAlignedPagePool::ResetGlobalsUT(); + TAlignedPagePoolImpl<TFakeAlignedMmap> alloc(__LOCATION__); + + int munmaps = 0; + TFakeAlignedMmap::OnMunmap = [&](void* addr, size_t s) { + Y_UNUSED(addr); + Y_UNUSED(s); + munmaps ++; + }; + + auto size = TAlignedPagePool::POOL_PAGE_SIZE; + auto block = std::shared_ptr<void>(alloc.GetBlock(size), [&](void* addr) { alloc.ReturnBlock(addr, size); }); + TFakeAlignedMmap::OnMunmap = {}; + UNIT_ASSERT_EQUAL(0, munmaps); + + UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(block.get()), TAlignedPagePool::POOL_PAGE_SIZE); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetFreePageCount() + , TAlignedPagePool::ALLOC_AHEAD_PAGES); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetAllocated() + , TAlignedPagePool::POOL_PAGE_SIZE + TAlignedPagePool::ALLOC_AHEAD_PAGES*TAlignedPagePool::POOL_PAGE_SIZE + ); +} + +#ifndef _win_ +Y_UNIT_TEST(UnalignedMmapPageSize) { + TAlignedPagePool::ResetGlobalsUT(); + TAlignedPagePoolImpl<TFakeUnalignedMmap> alloc(__LOCATION__); + + int munmaps = 0; + TFakeUnalignedMmap::OnMunmap = [&](void* addr, size_t s) { + Y_UNUSED(addr); + if (munmaps == 0) { + UNIT_ASSERT_VALUES_EQUAL(s, TAlignedPagePool::POOL_PAGE_SIZE - 1); + } else { + UNIT_ASSERT_VALUES_EQUAL(s, 1); + } + munmaps ++; + }; + + auto size = TAlignedPagePool::POOL_PAGE_SIZE; + auto block = std::shared_ptr<void>(alloc.GetBlock(size), [&](void* addr) { alloc.ReturnBlock(addr, size); }); + TFakeUnalignedMmap::OnMunmap = {}; + UNIT_ASSERT_EQUAL(2, munmaps); + + UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(block.get()), 2 * TAlignedPagePool::POOL_PAGE_SIZE); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetFreePageCount() + , TAlignedPagePool::ALLOC_AHEAD_PAGES - 1); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetAllocated() + , TAlignedPagePool::POOL_PAGE_SIZE + (TAlignedPagePool::ALLOC_AHEAD_PAGES - 1) * TAlignedPagePool::POOL_PAGE_SIZE + ); +} + +Y_UNIT_TEST(AlignedMmapUnalignedSize) { + TAlignedPagePool::ResetGlobalsUT(); + TAlignedPagePoolImpl<TFakeAlignedMmap> alloc(__LOCATION__); + auto smallSize = NSystemInfo::GetPageSize(); + auto size = smallSize + 1024 * TAlignedPagePool::POOL_PAGE_SIZE; + + int munmaps = 0; + TFakeAlignedMmap::OnMunmap = [&](void* addr, size_t s) { + if (munmaps == 0) { + UNIT_ASSERT_VALUES_EQUAL(s, TAlignedPagePool::POOL_PAGE_SIZE - smallSize); + UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(addr), TAlignedPagePool::POOL_PAGE_SIZE + size); + } else { + UNIT_ASSERT_VALUES_EQUAL(s, smallSize); + UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(addr), TAlignedPagePool::POOL_PAGE_SIZE + TAlignedPagePool::ALLOC_AHEAD_PAGES * TAlignedPagePool::POOL_PAGE_SIZE + size - smallSize); + } + + munmaps ++; + }; + + auto block = std::shared_ptr<void>(alloc.GetBlock(size), [&](void* addr) { alloc.ReturnBlock(addr, size); }); + TFakeAlignedMmap::OnMunmap = {}; + + UNIT_ASSERT_EQUAL(2, munmaps); + + UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(block.get()), TAlignedPagePool::POOL_PAGE_SIZE); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetFreePageCount() + , TAlignedPagePool::ALLOC_AHEAD_PAGES - 1); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetAllocated() + , size + (TAlignedPagePool::ALLOC_AHEAD_PAGES - 1) * TAlignedPagePool::POOL_PAGE_SIZE + ); +} + +Y_UNIT_TEST(UnalignedMmapUnalignedSize) { + TAlignedPagePool::ResetGlobalsUT(); + TAlignedPagePoolImpl<TFakeUnalignedMmap> alloc(__LOCATION__); + auto smallSize = NSystemInfo::GetPageSize(); + auto size = smallSize + 1024 * TAlignedPagePool::POOL_PAGE_SIZE; + int munmaps = 0; + TFakeUnalignedMmap::OnMunmap = [&](void* addr, size_t s) { + Y_UNUSED(addr); + if (munmaps == 0) { + UNIT_ASSERT_VALUES_EQUAL(s, TAlignedPagePool::POOL_PAGE_SIZE - 1); + } else if (munmaps == 1) { + UNIT_ASSERT_VALUES_EQUAL(s, TAlignedPagePool::POOL_PAGE_SIZE - smallSize); + } else { + UNIT_ASSERT_VALUES_EQUAL(s, smallSize + 1); + } + munmaps ++; + }; + + auto block = std::shared_ptr<void>(alloc.GetBlock(size), [&](void* addr) { alloc.ReturnBlock(addr, size); }); + TFakeUnalignedMmap::OnMunmap = {}; + UNIT_ASSERT_EQUAL(3, munmaps); + + UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(block.get()), 2 * TAlignedPagePool::POOL_PAGE_SIZE); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetFreePageCount() + , TAlignedPagePool::ALLOC_AHEAD_PAGES - 2); + + UNIT_ASSERT_VALUES_EQUAL(alloc.GetAllocated() + , size + (TAlignedPagePool::ALLOC_AHEAD_PAGES - 2) * TAlignedPagePool::POOL_PAGE_SIZE + ); +} + +#endif // _win_ + +} // Y_UNIT_TEST_SUITE(TAlignedPagePoolTest) + +} // namespace NMiniKQL +} // namespace NKikimr diff --git a/ydb/library/yql/minikql/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/yql/minikql/ut/CMakeLists.darwin-x86_64.txt index d296bb1cd1..04c6363899 100644 --- a/ydb/library/yql/minikql/ut/CMakeLists.darwin-x86_64.txt +++ b/ydb/library/yql/minikql/ut/CMakeLists.darwin-x86_64.txt @@ -33,6 +33,7 @@ target_link_options(ydb-library-yql-minikql-ut PRIVATE CoreFoundation ) target_sources(ydb-library-yql-minikql-ut PRIVATE + ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/aligned_page_pool_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/compact_hash_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_alloc_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_node_builder_ut.cpp diff --git a/ydb/library/yql/minikql/ut/CMakeLists.linux-aarch64.txt b/ydb/library/yql/minikql/ut/CMakeLists.linux-aarch64.txt index 550b9e00ed..debeb23a6c 100644 --- a/ydb/library/yql/minikql/ut/CMakeLists.linux-aarch64.txt +++ b/ydb/library/yql/minikql/ut/CMakeLists.linux-aarch64.txt @@ -36,6 +36,7 @@ target_link_options(ydb-library-yql-minikql-ut PRIVATE -ldl ) target_sources(ydb-library-yql-minikql-ut PRIVATE + ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/aligned_page_pool_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/compact_hash_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_alloc_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_node_builder_ut.cpp diff --git a/ydb/library/yql/minikql/ut/CMakeLists.linux-x86_64.txt b/ydb/library/yql/minikql/ut/CMakeLists.linux-x86_64.txt index 72b628a1ec..3e006b3a97 100644 --- a/ydb/library/yql/minikql/ut/CMakeLists.linux-x86_64.txt +++ b/ydb/library/yql/minikql/ut/CMakeLists.linux-x86_64.txt @@ -37,6 +37,7 @@ target_link_options(ydb-library-yql-minikql-ut PRIVATE -ldl ) target_sources(ydb-library-yql-minikql-ut PRIVATE + ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/aligned_page_pool_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/compact_hash_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_alloc_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_node_builder_ut.cpp diff --git a/ydb/library/yql/minikql/ut/CMakeLists.windows-x86_64.txt b/ydb/library/yql/minikql/ut/CMakeLists.windows-x86_64.txt index 2d572a10ec..9580b98938 100644 --- a/ydb/library/yql/minikql/ut/CMakeLists.windows-x86_64.txt +++ b/ydb/library/yql/minikql/ut/CMakeLists.windows-x86_64.txt @@ -26,6 +26,7 @@ target_link_libraries(ydb-library-yql-minikql-ut PUBLIC udf-service-exception_policy ) target_sources(ydb-library-yql-minikql-ut PRIVATE + ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/aligned_page_pool_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/compact_hash_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_alloc_ut.cpp ${CMAKE_SOURCE_DIR}/ydb/library/yql/minikql/mkql_node_builder_ut.cpp diff --git a/ydb/library/yql/minikql/ut/ya.make b/ydb/library/yql/minikql/ut/ya.make index 92f2639be6..7b77bbeeec 100644 --- a/ydb/library/yql/minikql/ut/ya.make +++ b/ydb/library/yql/minikql/ut/ya.make @@ -12,6 +12,7 @@ ELSE() ENDIF() SRCS( + aligned_page_pool_ut.cpp compact_hash_ut.cpp mkql_alloc_ut.cpp mkql_node_builder_ut.cpp |