diff options
author | Devtools Arcadia <arcadia-devtools@yandex-team.ru> | 2022-02-07 18:08:42 +0300 |
---|---|---|
committer | Devtools Arcadia <arcadia-devtools@mous.vla.yp-c.yandex.net> | 2022-02-07 18:08:42 +0300 |
commit | 1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch) | |
tree | e26c9fed0de5d9873cce7e00bc214573dc2195b7 /library/cpp/coroutine/engine/stack | |
download | ydb-1110808a9d39d4b808aef724c861a2e1a38d2a69.tar.gz |
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'library/cpp/coroutine/engine/stack')
22 files changed, 1760 insertions, 0 deletions
diff --git a/library/cpp/coroutine/engine/stack/benchmark/alloc_bm.cpp b/library/cpp/coroutine/engine/stack/benchmark/alloc_bm.cpp new file mode 100644 index 0000000000..38d713d274 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/benchmark/alloc_bm.cpp @@ -0,0 +1,316 @@ +#include <benchmark/benchmark.h> + +#include <util/generic/vector.h> +#include <util/system/yassert.h> + +#include <library/cpp/coroutine/engine/stack/stack_allocator.h> +#include <library/cpp/coroutine/engine/stack/stack_guards.h> +#include <library/cpp/coroutine/engine/stack/stack_pool.h> +#include <library/cpp/coroutine/engine/stack/stack_utils.h> + + +namespace NCoro::NStack::NBenchmark { + + const char* TestCoroName = "any_name"; + constexpr uint64_t BigCoroSize = PageSize * 25; + constexpr uint64_t SmallCoroSize = PageSize * 4; + constexpr uint64_t ManyStacks = 4096; + + void BasicOperations(TStackHolder& stack) { + Y_VERIFY(!stack.Get().empty()); + stack.LowerCanaryOk(); + stack.UpperCanaryOk(); + } + + void WriteStack(TStackHolder& stack) { + auto memory = stack.Get(); + Y_VERIFY(!memory.empty()); + stack.LowerCanaryOk(); + stack.UpperCanaryOk(); + for (uint64_t i = PageSize / 2; i < memory.size(); i += PageSize * 2) { + memory[i] = 42; + } + } + + static void BM_GetAlignedMemory(benchmark::State& state) { + char* raw = nullptr; + char* aligned = nullptr; + for (auto _ : state) { + if (NCoro::NStack::GetAlignedMemory(state.range(0), raw, aligned)) { + free(raw); + } + } + } + BENCHMARK(BM_GetAlignedMemory)->RangeMultiplier(16)->Range(1, 1024 * 1024); + + static void BM_GetAlignedMemoryReleaseRss(benchmark::State& state) { + char* raw = nullptr; + char* aligned = nullptr; + for (auto _ : state) { + if (NCoro::NStack::GetAlignedMemory(state.range(0), raw, aligned)) { + const auto toFree = state.range(0) > 2 ? state.range(0) - 2 : 1; + ReleaseRss(aligned, toFree); + free(raw); + } + } + } + BENCHMARK(BM_GetAlignedMemoryReleaseRss)->RangeMultiplier(16)->Range(1, 1024 * 1024); + + static void BM_PoolAllocator(benchmark::State& state) { + auto allocator = GetAllocator(TPoolAllocatorSettings{}, (EGuard)state.range(0)); + for (auto _ : state) { + TStackHolder stack(*allocator, state.range(1), TestCoroName); + BasicOperations(stack); + } + } + BENCHMARK(BM_PoolAllocator) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + + static void BM_DefaultAllocator(benchmark::State& state) { + auto allocator = GetAllocator(Nothing(), (EGuard)state.range(0)); + for (auto _ : state) { + TStackHolder stack(*allocator, state.range(1), TestCoroName); + BasicOperations(stack); + } + } + BENCHMARK(BM_DefaultAllocator) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + + static void BM_PoolAllocatorManyStacksOneAtTime(benchmark::State& state) { + TPoolAllocatorSettings settings; + settings.StacksPerChunk = state.range(2); + auto allocator = GetAllocator(settings, (EGuard)state.range(0)); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + TStackHolder stack(*allocator, state.range(1), TestCoroName); + BasicOperations(stack); + } + } + } + BENCHMARK(BM_PoolAllocatorManyStacksOneAtTime) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1024}); + + static void BM_DefaultAllocatorManyStacksOneAtTime(benchmark::State& state) { + auto allocator = GetAllocator(Nothing(), (EGuard)state.range(0)); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + TStackHolder stack(*allocator, state.range(1), TestCoroName); + BasicOperations(stack); + } + } + } + BENCHMARK(BM_DefaultAllocatorManyStacksOneAtTime) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + + static void BM_PoolAllocatorManyStacks(benchmark::State& state) { + TPoolAllocatorSettings settings; + settings.StacksPerChunk = state.range(2); + auto allocator = GetAllocator(settings, (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.emplace_back(*allocator, state.range(1), TestCoroName); + BasicOperations(stacks.back()); + } + } + } + BENCHMARK(BM_PoolAllocatorManyStacks) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1024}); + + static void BM_DefaultAllocatorManyStacks(benchmark::State& state) { + auto allocator = GetAllocator(Nothing(), (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.push_back(TStackHolder(*allocator, state.range(1), TestCoroName)); + BasicOperations(stacks.back()); + } + } + } + BENCHMARK(BM_DefaultAllocatorManyStacks) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + + // ------------------------------------------------------------------------ + static void BM_PoolAllocatorManyStacksReleased(benchmark::State& state) { + TPoolAllocatorSettings settings; + settings.StacksPerChunk = state.range(2); + auto allocator = GetAllocator(settings, (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.emplace_back(*allocator, state.range(1), TestCoroName); + BasicOperations(stacks.back()); + } + stacks.clear(); + } + } + BENCHMARK(BM_PoolAllocatorManyStacksReleased) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1024}); + + static void BM_DefaultAllocatorManyStacksReleased(benchmark::State& state) { + auto allocator = GetAllocator(Nothing(), (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.push_back(TStackHolder(*allocator, state.range(1), TestCoroName)); + BasicOperations(stacks.back()); + } + stacks.clear(); + } + } + BENCHMARK(BM_DefaultAllocatorManyStacksReleased) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + + // ------------------------------------------------------------------------ + static void BM_PoolAllocatorManyStacksReleasedAndRealloc(benchmark::State& state) { + TPoolAllocatorSettings settings; + settings.StacksPerChunk = state.range(2); + auto allocator = GetAllocator(settings, (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.emplace_back(*allocator, state.range(1), TestCoroName); + BasicOperations(stacks.back()); + } + stacks.clear(); + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.emplace_back(*allocator, state.range(1), TestCoroName); + BasicOperations(stacks.back()); + } + } + } + BENCHMARK(BM_PoolAllocatorManyStacksReleasedAndRealloc) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 8192}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 8192}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 8192}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 8192}); + + static void BM_DefaultAllocatorManyStacksReleasedAndRealloc(benchmark::State& state) { + auto allocator = GetAllocator(Nothing(), (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.push_back(TStackHolder(*allocator, state.range(1), TestCoroName)); + BasicOperations(stacks.back()); + } + stacks.clear(); + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.push_back(TStackHolder(*allocator, state.range(1), TestCoroName)); + BasicOperations(stacks.back()); + } + } + } + BENCHMARK(BM_DefaultAllocatorManyStacksReleasedAndRealloc) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + + // ------------------------------------------------------------------------ + static void BM_PoolAllocatorManyStacksMemoryWriteReleasedAndRealloc(benchmark::State& state) { + TPoolAllocatorSettings settings; + settings.StacksPerChunk = state.range(2); + auto allocator = GetAllocator(settings, (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.emplace_back(*allocator, state.range(1), TestCoroName); + WriteStack(stacks.back()); + } + stacks.clear(); + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.emplace_back(*allocator, state.range(1), TestCoroName); + WriteStack(stacks.back()); + } + } + } + BENCHMARK(BM_PoolAllocatorManyStacksMemoryWriteReleasedAndRealloc) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 1024}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 1024}) + ->Args({(int64_t)EGuard::Canary, BigCoroSize, 8192}) + ->Args({(int64_t)EGuard::Canary, SmallCoroSize, 8192}) + ->Args({(int64_t)EGuard::Page, BigCoroSize, 8192}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize, 8192}); + + static void BM_DefaultAllocatorManyStacksMemoryWriteReleasedAndRealloc(benchmark::State& state) { + auto allocator = GetAllocator(Nothing(), (EGuard)state.range(0)); + TVector<TStackHolder> stacks; // store stacks during benchmark + stacks.reserve(ManyStacks); + for (auto _ : state) { + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.push_back(TStackHolder(*allocator, state.range(1), TestCoroName)); + WriteStack(stacks.back()); + } + stacks.clear(); + for (uint64_t i = 0; i < ManyStacks; ++i) { + stacks.push_back(TStackHolder(*allocator, state.range(1), TestCoroName)); + WriteStack(stacks.back()); + } + } + } + BENCHMARK(BM_DefaultAllocatorManyStacksMemoryWriteReleasedAndRealloc) + ->Args({(int64_t)EGuard::Canary, BigCoroSize}) // old version - ArgsProduct() is not supported + ->Args({(int64_t)EGuard::Canary, SmallCoroSize}) + ->Args({(int64_t)EGuard::Page, BigCoroSize}) + ->Args({(int64_t)EGuard::Page, SmallCoroSize}); + +} + +BENCHMARK_MAIN(); diff --git a/library/cpp/coroutine/engine/stack/benchmark/ya.make b/library/cpp/coroutine/engine/stack/benchmark/ya.make new file mode 100644 index 0000000000..b2942fe8ca --- /dev/null +++ b/library/cpp/coroutine/engine/stack/benchmark/ya.make @@ -0,0 +1,13 @@ +G_BENCHMARK() + +OWNER(g:balancer) + +SRCS( + alloc_bm.cpp +) + +PEERDIR( + library/cpp/coroutine/engine +) + +END()
\ No newline at end of file diff --git a/library/cpp/coroutine/engine/stack/stack.cpp b/library/cpp/coroutine/engine/stack/stack.cpp new file mode 100644 index 0000000000..e29450261d --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack.cpp @@ -0,0 +1,67 @@ +#include "stack.h" + +#include "stack_allocator.h" +#include "stack_guards.h" + + +namespace NCoro::NStack { + +namespace NDetails { + + TStack::TStack(void* rawMemory, void* alignedMemory, uint64_t alignedSize, const char* /*name*/) + : RawMemory_((char*)rawMemory) + , AlignedMemory_((char*)alignedMemory) + , Size_(alignedSize) + { + Y_ASSERT(AlignedMemory_ && RawMemory_ && Size_); + Y_ASSERT(!(Size_ & PageSizeMask)); + Y_ASSERT(!((uint64_t)AlignedMemory_ & PageSizeMask)); + } + + TStack::TStack(TStack&& rhs) noexcept + : RawMemory_(rhs.RawMemory_) + , AlignedMemory_(rhs.AlignedMemory_) + , Size_(rhs.Size_) + { + rhs.Reset(); + } + + TStack& TStack::operator=(TStack&& rhs) noexcept { + std::swap(*this, rhs); + rhs.Reset(); + return *this; + } + + void TStack::Reset() noexcept { + Y_ASSERT(AlignedMemory_ && RawMemory_ && Size_); + + RawMemory_ = nullptr; + AlignedMemory_ = nullptr; + Size_ = 0; + } + +} // namespace NDetails + + + TStackHolder::TStackHolder(NStack::IAllocator& allocator, uint32_t size, const char* name) noexcept + : Allocator_(allocator) + , Stack_(Allocator_.AllocStack(size, name)) + {} + + TStackHolder::~TStackHolder() { + Allocator_.FreeStack(Stack_); + } + + TArrayRef<char> TStackHolder::Get() noexcept { + return Allocator_.GetStackWorkspace(Stack_.GetAlignedMemory(), Stack_.GetSize()); + } + + bool TStackHolder::LowerCanaryOk() const noexcept { + return Allocator_.CheckStackOverflow(Stack_.GetAlignedMemory()); + } + + bool TStackHolder::UpperCanaryOk() const noexcept { + return Allocator_.CheckStackOverride(Stack_.GetAlignedMemory(), Stack_.GetSize()); + } + +} diff --git a/library/cpp/coroutine/engine/stack/stack.h b/library/cpp/coroutine/engine/stack/stack.h new file mode 100644 index 0000000000..7d98ba4c68 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack.h @@ -0,0 +1,77 @@ +#pragma once + +#include <util/generic/array_ref.h> +#include <util/generic/fwd.h> +#include <util/generic/noncopyable.h> + +#include <cstdint> + + +namespace NCoro::NStack { + + class IAllocator; + +namespace NDetails { + + //! Do not use directly, use TStackHolder instead + class TStack final : private TMoveOnly { + public: + /*! rawMemory: can be used by unaligned allocator to free stack memory after use + * alignedMemory: pointer to aligned memory on which stack workspace and guard are actually placed + * alignedSize: size of workspace memory + memory for guard + * guard: guard to protect this stack + * name: name of coroutine for which this stack is allocated + */ + TStack(void* rawMemory, void* alignedMemory, uint64_t alignedSize, const char* name); + TStack(TStack&& rhs) noexcept; + TStack& operator=(TStack&& rhs) noexcept; + + char* GetRawMemory() const noexcept { + return RawMemory_; + } + + char* GetAlignedMemory() const noexcept { + return AlignedMemory_; + } + + //! Stack size (includes memory for guard) + uint64_t GetSize() const noexcept { + return Size_; + } + + //! Resets parameters, should be called after stack memory is freed + void Reset() noexcept; + + private: + char* RawMemory_ = nullptr; // not owned + char* AlignedMemory_ = nullptr; // not owned + uint64_t Size_ = 0; + }; + +} // namespace NDetails + + class TStackHolder final : private TMoveOnly { + public: + explicit TStackHolder(IAllocator& allocator, uint32_t size, const char* name) noexcept; + TStackHolder(TStackHolder&&) = default; + TStackHolder& operator=(TStackHolder&&) = default; + + ~TStackHolder(); + + char* GetAlignedMemory() const noexcept { + return Stack_.GetAlignedMemory(); + } + uint64_t GetSize() const noexcept { + return Stack_.GetSize(); + } + + TArrayRef<char> Get() noexcept; + bool LowerCanaryOk() const noexcept; + bool UpperCanaryOk() const noexcept; + + private: + IAllocator& Allocator_; + NDetails::TStack Stack_; + }; + +} diff --git a/library/cpp/coroutine/engine/stack/stack_allocator.cpp b/library/cpp/coroutine/engine/stack/stack_allocator.cpp new file mode 100644 index 0000000000..bf12134e6b --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_allocator.cpp @@ -0,0 +1,26 @@ +#include "stack_allocator.h" + + +namespace NCoro::NStack { + + THolder<IAllocator> GetAllocator(TMaybe<TPoolAllocatorSettings> poolSettings, EGuard guardType) { + THolder<IAllocator> allocator; + if (poolSettings) { + if (guardType == EGuard::Canary) { + allocator = MakeHolder<TPoolAllocator<TCanaryGuard>>(*poolSettings); + } else { + Y_ASSERT(guardType == EGuard::Page); + allocator = MakeHolder<TPoolAllocator<TPageGuard>>(*poolSettings); + } + } else { + if (guardType == EGuard::Canary) { + allocator = MakeHolder<TSimpleAllocator<TCanaryGuard>>(); + } else { + Y_ASSERT(guardType == EGuard::Page); + allocator = MakeHolder<TSimpleAllocator<TPageGuard>>(); + } + } + return allocator; + } + +}
\ No newline at end of file diff --git a/library/cpp/coroutine/engine/stack/stack_allocator.h b/library/cpp/coroutine/engine/stack/stack_allocator.h new file mode 100644 index 0000000000..da3c3a93a1 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_allocator.h @@ -0,0 +1,52 @@ +#pragma once + +#include "stack.h" +#include "stack_common.h" + +#include <util/generic/maybe.h> +#include <util/generic/noncopyable.h> +#include <util/generic/ptr.h> + +#include <cstdint> + + +namespace NCoro::NStack { + + class IAllocator : private TNonCopyable { + public: + virtual ~IAllocator() = default; + + //! Size should be page-aligned. Stack would be protected by guard, thus, actual + //! workspace for stack = size - size of guard. + NDetails::TStack AllocStack(uint64_t size, const char* name) { + uint64_t alignedSize = (size + PageSize - 1) & ~PageSizeMask; + Y_ASSERT(alignedSize < 10 * 1024 * PageSize); // more than 10K pages for stack - do you really need it? +#if defined(_san_enabled_) || !defined(NDEBUG) + alignedSize *= DebugOrSanStackMultiplier; +#endif + return DoAllocStack(alignedSize, name); + } + + void FreeStack(NDetails::TStack& stack) noexcept { + if (stack.GetAlignedMemory()) { + DoFreeStack(stack); + } + }; + + virtual TAllocatorStats GetStackStats() const noexcept = 0; + + // Stack helpers + virtual TArrayRef<char> GetStackWorkspace(void* stack, uint64_t size) noexcept = 0; + virtual bool CheckStackOverflow(void* stack) const noexcept = 0; + virtual bool CheckStackOverride(void* stack, uint64_t size) const noexcept = 0; + + private: + virtual NDetails::TStack DoAllocStack(uint64_t size, const char* name) = 0; + virtual void DoFreeStack(NDetails::TStack& stack) noexcept = 0; + }; + + THolder<IAllocator> GetAllocator(TMaybe<TPoolAllocatorSettings> poolSettings, EGuard guardType); + +} + +#include "stack_allocator.inl" diff --git a/library/cpp/coroutine/engine/stack/stack_allocator.inl b/library/cpp/coroutine/engine/stack/stack_allocator.inl new file mode 100644 index 0000000000..0f25a4167b --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_allocator.inl @@ -0,0 +1,138 @@ +#include "stack_guards.h" +#include "stack_pool.h" +#include "stack_utils.h" + +#include <util/generic/hash.h> + +#ifdef _linux_ +#include <unistd.h> +#endif + + +namespace NCoro::NStack { + + template<typename TGuard> + class TPoolAllocator final : public IAllocator { + public: + explicit TPoolAllocator(const TPoolAllocatorSettings& settings); + + TArrayRef<char> GetStackWorkspace(void* stack, uint64_t size) noexcept override { + return Guard_.GetWorkspace(stack, size); + } + bool CheckStackOverflow(void* stack) const noexcept override { + return Guard_.CheckOverflow(stack); + } + bool CheckStackOverride(void* stack, uint64_t size) const noexcept override { + return Guard_.CheckOverride(stack, size); + } + + TAllocatorStats GetStackStats() const noexcept override { + TAllocatorStats stats; + for (const auto& i : Pools_) { + stats.ReleasedSize += i.second.GetReleasedSize(); + stats.NotReleasedSize += i.second.GetFullSize(); + stats.NumOfAllocated += i.second.GetNumOfAllocated(); + } + return stats; + } + + private: // methods + NDetails::TStack DoAllocStack(uint64_t size, const char* name) override; + void DoFreeStack(NDetails::TStack& stack) noexcept override; + + private: // data + const TPoolAllocatorSettings PoolSettings_; + const TGuard& Guard_; + THashMap<uint64_t, TPool<TGuard>> Pools_; // key - stack size + }; + + template<typename TGuard> + TPoolAllocator<TGuard>::TPoolAllocator(const TPoolAllocatorSettings& settings) + : PoolSettings_(settings) + , Guard_(GetGuard<TGuard>()) + { +#ifdef _linux_ + Y_VERIFY(sysconf(_SC_PAGESIZE) == PageSize); +#endif + } + + template<typename TGuard> + NDetails::TStack TPoolAllocator<TGuard>::DoAllocStack(uint64_t alignedSize, const char* name) { + Y_ASSERT(alignedSize > Guard_.GetSize()); + + auto pool = Pools_.find(alignedSize); + if (pool == Pools_.end()) { + Y_ASSERT(Pools_.size() < 1000); // too many different sizes for coroutine stacks + auto [newPool, success] = Pools_.emplace(alignedSize, TPool<TGuard>{alignedSize, PoolSettings_, Guard_}); + Y_VERIFY(success, "Failed to add new coroutine pool"); + pool = newPool; + } + return pool->second.AllocStack(name); + } + + template<typename TGuard> + void TPoolAllocator<TGuard>::DoFreeStack(NDetails::TStack& stack) noexcept { + auto pool = Pools_.find(stack.GetSize()); + Y_VERIFY(pool != Pools_.end(), "Attempt to free stack from another allocator"); + pool->second.FreeStack(stack); + } + + // ------------------------------------------------------------------------ + // + template<typename TGuard> + class TSimpleAllocator : public IAllocator { + public: + explicit TSimpleAllocator(); + + TArrayRef<char> GetStackWorkspace(void* stack, uint64_t size) noexcept override { + return Guard_.GetWorkspace(stack, size); + } + bool CheckStackOverflow(void* stack) const noexcept override { + return Guard_.CheckOverflow(stack); + } + bool CheckStackOverride(void* stack, uint64_t size) const noexcept override { + return Guard_.CheckOverride(stack, size); + } + + TAllocatorStats GetStackStats() const noexcept override { return {}; } // not used for simple allocator + + private: // methods + NDetails::TStack DoAllocStack(uint64_t size, const char* name) override; + void DoFreeStack(NDetails::TStack& stack) noexcept override; + + private: // data + const TGuard& Guard_; + }; + + + template<typename TGuard> + TSimpleAllocator<TGuard>::TSimpleAllocator() + : Guard_(GetGuard<TGuard>()) + {} + + template<typename TGuard> + NDetails::TStack TSimpleAllocator<TGuard>::DoAllocStack(uint64_t alignedSize, const char* name) { + Y_ASSERT(alignedSize > Guard_.GetSize()); + + char* rawPtr = nullptr; + char* alignedPtr = nullptr; // with extra space for previous guard in this type of allocator + + Y_VERIFY(GetAlignedMemory((alignedSize + Guard_.GetPageAlignedSize()) / PageSize, rawPtr, alignedPtr)); // + memory for previous guard + char* alignedStackMemory = alignedPtr + Guard_.GetPageAlignedSize(); // after previous guard + + // Default allocator sets both guards, because it doesn't have memory chunk with previous stack and guard on it + Guard_.Protect((void*)alignedPtr, Guard_.GetPageAlignedSize(), false); // first guard should be before stack memory + Guard_.Protect(alignedStackMemory, alignedSize, true); // second guard is placed on stack memory + + return NDetails::TStack{rawPtr, alignedStackMemory, alignedSize, name}; + } + + template<typename TGuard> + void TSimpleAllocator<TGuard>::DoFreeStack(NDetails::TStack& stack) noexcept { + Guard_.RemoveProtection(stack.GetAlignedMemory() - Guard_.GetPageAlignedSize(), Guard_.GetPageAlignedSize()); + Guard_.RemoveProtection(stack.GetAlignedMemory(), stack.GetSize()); + + free(stack.GetRawMemory()); + stack.Reset(); + } +} diff --git a/library/cpp/coroutine/engine/stack/stack_common.h b/library/cpp/coroutine/engine/stack/stack_common.h new file mode 100644 index 0000000000..ed2d74d296 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_common.h @@ -0,0 +1,35 @@ +#pragma once + +#include <cstdint> + +class TContExecutor; + +namespace NCoro::NStack { + + static constexpr uint64_t PageSize = 4096; + static constexpr uint64_t PageSizeMask = PageSize - 1; // for checks + static constexpr uint64_t DebugOrSanStackMultiplier = 4; // for debug or sanitizer builds + static constexpr uint64_t SmallStackMaxSizeInPages = 6; + + enum class EGuard { + Canary, //!< writes some data to check it for corruption + Page, //!< prohibits access to page memory + }; + + struct TPoolAllocatorSettings { + uint64_t RssPagesToKeep = 1; + uint64_t SmallStackRssPagesToKeep = 1; // for stack less than SmallStackMaxSizeInPages + uint64_t ReleaseRate = 2; +#if !defined(_san_enabled_) && defined(NDEBUG) + uint64_t StacksPerChunk = 256; +#else + uint64_t StacksPerChunk = 2; +#endif + }; + + struct TAllocatorStats { + uint64_t ReleasedSize = 0; + uint64_t NotReleasedSize = 0; + uint64_t NumOfAllocated = 0; + }; +} diff --git a/library/cpp/coroutine/engine/stack/stack_guards.cpp b/library/cpp/coroutine/engine/stack/stack_guards.cpp new file mode 100644 index 0000000000..b8bcff039e --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_guards.cpp @@ -0,0 +1,17 @@ +#include "stack_guards.h" + + +namespace NCoro::NStack { + + template<> + const TCanaryGuard& GetGuard<TCanaryGuard>() noexcept { + static const TCanaryGuard guard; + return guard; + } + + template<> + const TPageGuard& GetGuard<TPageGuard>() noexcept { + static const TPageGuard guard; + return guard; + } +}
\ No newline at end of file diff --git a/library/cpp/coroutine/engine/stack/stack_guards.h b/library/cpp/coroutine/engine/stack/stack_guards.h new file mode 100644 index 0000000000..3a7ef26481 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_guards.h @@ -0,0 +1,123 @@ +#pragma once + +#include "stack_common.h" + +#include <util/generic/array_ref.h> +#include <util/generic/strbuf.h> +#include <util/system/protect.h> + + +namespace NCoro::NStack { + + /*! Guard detect stack overflow/override, by setting memory before and after stack with predefined values/properties. + * Actually, it sets memory only after the end of stack workspace memory - previous guard section should be set + * already (for previous stack in case of pool allocator) and can be checked on demand. + * Stack pointer should be page-aligned. + */ + + + //! Checks integrity by writing a predefined sequence and comparing it with original + class TCanaryGuard final { + public: + //! Size of guard section in bytes + static constexpr uint64_t GetSize() { return Canary.size(); } + //! Size of page-aligned guard section in bytes + static constexpr uint64_t GetPageAlignedSize() { return AlignedSize_; } + + //! Get stack memory between guard sections + static TArrayRef<char> GetWorkspace(void* stack, uint64_t size) noexcept { + Y_ASSERT( !((uint64_t)stack & PageSizeMask) ); + Y_ASSERT( !(size & PageSizeMask) ); + Y_ASSERT(size > Canary.size()); + + return {(char*) stack, size - Canary.size()}; + } + + /*! Set guard section before the end of stack memory (at stack + size - guard size position) + * checkPrevious: check guard before stack memory for integrity + */ + static void Protect(void* stack, uint64_t size, bool checkPrevious) noexcept { + Y_ASSERT( !((uint64_t)stack & PageSizeMask) ); // stack pointer should be page aligned + Y_ASSERT( !(size & PageSizeMask) ); // stack size should be page aligned + Y_ASSERT(size >= Canary.size()); // stack should have enough space to place guard + + if (checkPrevious) { + Y_VERIFY(CheckOverflow(stack), "Previous stack was corrupted"); + } + auto guardPos = (char*) stack + size - Canary.size(); + memcpy(guardPos, Canary.data(), Canary.size()); + } + + //! This guard doesn't change memory flags + static constexpr void RemoveProtection(void*, uint64_t) {} + //! Should remove protection before returning memory to system + static constexpr bool ShouldRemoveProtectionBeforeFree() { return false; } + + static bool CheckOverflow(void* stack) noexcept { + Y_ASSERT(stack); + + char* guardPos = (char*) ((uint64_t)stack - Canary.size()); + return TStringBuf(guardPos, Canary.size()) == Canary; + } + + static bool CheckOverride(void* stack, uint64_t size) noexcept { + Y_ASSERT(stack); + Y_ASSERT(size > Canary.size()); + + char* guardPos = (char*) ((uint64_t)stack + size - Canary.size()); + return TStringBuf(guardPos, Canary.size()) == Canary; + } + + private: + static constexpr TStringBuf Canary = "[ThisIsACanaryCoroutineStackGuardIfYouReadThisTheStackIsStillOK]"; + static_assert(Canary.size() == 64); + static constexpr uint64_t AlignedSize_ = (Canary.size() + PageSize - 1) & ~PageSizeMask; + }; + + + // ------------------------------------------------------------------------ + // + //! Ensures integrity by removing access rights for border pages + class TPageGuard final { + public: + //! Size of guard section in bytes + static constexpr uint64_t GetSize() { return PageSize; } + //! Size of page-aligned guard section in bytes + static constexpr uint64_t GetPageAlignedSize() { return PageSize; } + + static TArrayRef<char> GetWorkspace(void* stack, uint64_t size) noexcept { + Y_ASSERT( !((uint64_t)stack & PageSizeMask) ); + Y_ASSERT( !(size & PageSizeMask) ); + Y_ASSERT(size > PageSize); + + return {(char*)stack, size - PageSize}; + } + + static void Protect(void* stack, uint64_t size, bool /*checkPrevious*/) noexcept { + Y_ASSERT( !((uint64_t)stack & PageSizeMask) ); // stack pointer should be page aligned + Y_ASSERT( !(size & PageSizeMask) ); // stack size should be page aligned + Y_ASSERT(size >= PageSize); // stack should have enough space to place guard + + ProtectMemory((char*)stack + size - PageSize, PageSize, PM_NONE); + } + + //! Remove protection, to allow stack memory be freed + static void RemoveProtection(void* stack, uint64_t size) noexcept { + Y_ASSERT( !((uint64_t)stack & PageSizeMask) ); + Y_ASSERT( !(size & PageSizeMask) ); + Y_ASSERT(size >= PageSize); + + ProtectMemory((char*)stack + size - PageSize, PageSize, PM_WRITE | PM_READ); + } + //! Should remove protection before returning memory to system + static constexpr bool ShouldRemoveProtectionBeforeFree() { return true; } + + //! For page guard is not used - it crashes process at once in this case. + static constexpr bool CheckOverflow(void*) { return true; } + static constexpr bool CheckOverride(void*, uint64_t) { return true; } + }; + + + template<typename TGuard> + const TGuard& GetGuard() noexcept; +} diff --git a/library/cpp/coroutine/engine/stack/stack_pool.h b/library/cpp/coroutine/engine/stack/stack_pool.h new file mode 100644 index 0000000000..27a8e9394b --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_pool.h @@ -0,0 +1,54 @@ +#pragma once + +#include "stack.h" +#include "stack_common.h" + +#include <util/generic/noncopyable.h> +#include <util/generic/ptr.h> +#include <util/generic/vector.h> + + +namespace NCoro::NStack { + + class IGuard; + class TStorage; + struct TPoolAllocatorSettings; + + template<typename TGuard> + class TPool final : private TMoveOnly { + struct TMemory { + char* Raw = nullptr; + char* Aligned = nullptr; // points to aligned memory, which includes space for first page guard + }; + public: + TPool(uint64_t stackSize, const TPoolAllocatorSettings& settings, const TGuard& guard); + TPool(TPool&& other) noexcept; + ~TPool(); + + NDetails::TStack AllocStack(const char* name); + void FreeStack(NDetails::TStack& stack); + + uint64_t GetReleasedSize() const noexcept; + uint64_t GetFullSize() const noexcept; + uint64_t GetNumOfAllocated() const noexcept { return NumOfAllocated_; } + + private: + void AllocNewMemoryChunk(); + bool IsSmallStack() const noexcept; + bool IsStackFromThisPool(const NDetails::TStack& stack) const noexcept; + NDetails::TStack AllocNewStack(const char* name); + + private: + const uint64_t StackSize_ = 0; + uint64_t RssPagesToKeep_ = 0; + const TGuard& Guard_; + TVector<TMemory> Memory_; // memory chunks + THolder<TStorage> Storage_; + char* NextToAlloc_ = nullptr; // points to next available stack in the last memory chunk + const uint64_t ChunkSize_ = 0; + uint64_t NumOfAllocated_ = 0; + }; + +} + +#include "stack_pool.inl" diff --git a/library/cpp/coroutine/engine/stack/stack_pool.inl b/library/cpp/coroutine/engine/stack/stack_pool.inl new file mode 100644 index 0000000000..6e08e05a48 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_pool.inl @@ -0,0 +1,132 @@ +#include "stack_storage.h" +#include "stack_utils.h" + + +namespace NCoro::NStack { + + template<typename TGuard> + TPool<TGuard>::TPool(uint64_t stackSize, const TPoolAllocatorSettings& settings, const TGuard& guard) + : StackSize_(stackSize) + , RssPagesToKeep_(IsSmallStack() ? settings.SmallStackRssPagesToKeep : settings.RssPagesToKeep) + , Guard_(guard) + , ChunkSize_(Guard_.GetPageAlignedSize() + StackSize_ * settings.StacksPerChunk) + { + Y_ASSERT(RssPagesToKeep_); + if (!RssPagesToKeep_) { + RssPagesToKeep_ = 1; // at least guard should be kept + } + + const uint64_t stackSizeInPages = stackSize / PageSize; + Y_ASSERT(stackSizeInPages >= RssPagesToKeep_); + if (stackSizeInPages < RssPagesToKeep_) { + RssPagesToKeep_ = stackSizeInPages; // keep all stack pages + } + + Y_ASSERT(StackSize_ && !(StackSize_ & PageSizeMask)); // stack size is not zero and page aligned + Y_ASSERT(Guard_.GetSize() < StackSize_); // stack has enough space to place guard + Y_ASSERT(stackSizeInPages >= RssPagesToKeep_); + + Storage_ = MakeHolder<TStorage>(StackSize_, RssPagesToKeep_, settings.ReleaseRate); + + AllocNewMemoryChunk(); + } + + template<typename TGuard> + TPool<TGuard>::TPool(TPool&& other) noexcept = default; + + template<typename TGuard> + TPool<TGuard>::~TPool() { + if (!Memory_.empty()) { + Y_ASSERT(NextToAlloc_ && StackSize_); + + for (const auto& chunk : Memory_) { + Y_ASSERT(chunk.Raw && chunk.Aligned); + + if (Guard_.ShouldRemoveProtectionBeforeFree()) { + Guard_.RemoveProtection(chunk.Aligned, Guard_.GetPageAlignedSize()); // first page in chunk + + const char* endOfStacksMemory = chunk.Aligned + ChunkSize_; + for (char* i = chunk.Aligned + Guard_.GetPageAlignedSize(); i < endOfStacksMemory; i += StackSize_) { + Guard_.RemoveProtection(i, StackSize_); + } + } + + free(chunk.Raw); + } + } + } + + template<typename TGuard> + NDetails::TStack TPool<TGuard>::AllocStack(const char* name) { + Y_ASSERT(!Memory_.empty()); + + if (!Storage_->IsEmpty()) { + return Storage_->GetStack(Guard_, name); + } else { + ++NumOfAllocated_; + return AllocNewStack(name); + } + } + + template<typename TGuard> + void TPool<TGuard>::FreeStack(NDetails::TStack& stack) { + Y_ASSERT(Storage_->Size() < ((ChunkSize_ - Guard_.GetPageAlignedSize()) / StackSize_) * Memory_.size()); + Y_ASSERT(IsStackFromThisPool(stack)); + + Storage_->ReturnStack(stack); + } + + template<typename TGuard> + uint64_t TPool<TGuard>::GetReleasedSize() const noexcept { + return Storage_->GetReleasedSize(); + } + template<typename TGuard> + uint64_t TPool<TGuard>::GetFullSize() const noexcept { + return Storage_->GetFullSize(); + } + + template<typename TGuard> + void TPool<TGuard>::AllocNewMemoryChunk() { + const uint64_t totalSizeInPages = ChunkSize_ / PageSize; + + TMemory memory; + const auto res = GetAlignedMemory(totalSizeInPages, memory.Raw, memory.Aligned); + Y_VERIFY(res, "Failed to allocate memory for coro stack pool"); + + NextToAlloc_ = memory.Aligned + Guard_.GetPageAlignedSize(); // skip first guard page + Guard_.Protect(memory.Aligned, Guard_.GetPageAlignedSize(), false); // protect first guard page + + Memory_.push_back(std::move(memory)); + } + + template<typename TGuard> + bool TPool<TGuard>::IsSmallStack() const noexcept { + return StackSize_ / PageSize <= SmallStackMaxSizeInPages; + } + + template<typename TGuard> + bool TPool<TGuard>::IsStackFromThisPool(const NDetails::TStack& stack) const noexcept { + for (const auto& chunk : Memory_) { + const char* endOfStacksMemory = chunk.Aligned + ChunkSize_; + if (chunk.Raw <= stack.GetRawMemory() && stack.GetRawMemory() < endOfStacksMemory) { + return true; + } + } + return false; + } + + template<typename TGuard> + NDetails::TStack TPool<TGuard>::AllocNewStack(const char* name) { + if (NextToAlloc_ + StackSize_ > Memory_.rbegin()->Aligned + ChunkSize_) { + AllocNewMemoryChunk(); // also sets NextToAlloc_ to first stack position in new allocated chunk of memory + } + Y_ASSERT(NextToAlloc_ + StackSize_ <= Memory_.rbegin()->Aligned + ChunkSize_); + + char* newStack = NextToAlloc_; + NextToAlloc_ += StackSize_; + + Guard_.Protect(newStack, StackSize_, true); + return NDetails::TStack{newStack, newStack, StackSize_, name}; + } + +} diff --git a/library/cpp/coroutine/engine/stack/stack_storage.cpp b/library/cpp/coroutine/engine/stack/stack_storage.cpp new file mode 100644 index 0000000000..6dc2b2d44b --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_storage.cpp @@ -0,0 +1,46 @@ +#include "stack_storage.h" + +#include "stack.h" +#include "stack_utils.h" + +#include <library/cpp/coroutine/engine/impl.h> + + +namespace NCoro::NStack { + + TStorage::TStorage(uint64_t stackSize, uint64_t rssPagesToKeep, uint64_t releaseRate) + : StackSize_(stackSize) + , RssPagesToKeep_(rssPagesToKeep) + , ReleaseRate_(releaseRate ? releaseRate : 1) + { + Y_ASSERT(StackSize_ && RssPagesToKeep_); + } + + bool TStorage::IsEmpty() const noexcept { + return Released_.empty() && Full_.empty(); + } + + uint64_t TStorage::Size() const noexcept { + return Released_.size() + Full_.size(); + } + + void TStorage::ReturnStack(NDetails::TStack& stack) { + thread_local uint64_t i = 0; + if (++i % ReleaseRate_ != 0) { + Full_.push_back(stack.GetAlignedMemory()); + } else { + ReleaseMemory(stack.GetAlignedMemory(), RssPagesToKeep_); + Released_.push_back(stack.GetAlignedMemory()); + } + stack.Reset(); + } + + void TStorage::ReleaseMemory([[maybe_unused]] char* alignedStackMemory, [[maybe_unused]] uint64_t pagesToKeep) noexcept { +#if !defined(_san_enabled_) && defined(NDEBUG) + uint64_t numOfPagesToFree = StackSize_ / PageSize; + numOfPagesToFree -= pagesToKeep; + ReleaseRss(alignedStackMemory, numOfPagesToFree); +#endif + } + +} diff --git a/library/cpp/coroutine/engine/stack/stack_storage.h b/library/cpp/coroutine/engine/stack/stack_storage.h new file mode 100644 index 0000000000..25fe2cfb17 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_storage.h @@ -0,0 +1,60 @@ +#pragma once + +#include "stack.h" + +#include <util/datetime/base.h> +#include <util/generic/deque.h> + + +class TCont; + +namespace NCoro::NStack { + + class IGuard; + + class TStorage final : private TMoveOnly { + public: + TStorage(uint64_t stackSize, uint64_t rssPagesToKeep, uint64_t releaseRate); + + bool IsEmpty() const noexcept; + uint64_t Size() const noexcept; + + uint64_t GetReleasedSize() const noexcept { return Released_.size(); } + uint64_t GetFullSize() const noexcept { return Full_.size(); } + + template<typename TGuard> + NDetails::TStack GetStack(const TGuard& guard, const char* name); + void ReturnStack(NDetails::TStack& stack); + + private: + void ReleaseMemory(char* alignedStackMemory, uint64_t pagesToKeep) noexcept; + + private: + TDeque<void*> Released_; //!< stacks memory with released RSS memory + TDeque<void*> Full_; //!< stacks memory with RSS memory + uint64_t StackSize_ = 0; + uint64_t RssPagesToKeep_ = 0; + const uint64_t ReleaseRate_ = 1; + }; + + + template<typename TGuard> + NDetails::TStack TStorage::GetStack(const TGuard& guard, const char* name) { + Y_VERIFY(!IsEmpty()); // check before call + + void* newStack = nullptr; + if (!Full_.empty()) { + newStack = Full_.back(); + Full_.pop_back(); + } else { + Y_ASSERT(!Released_.empty()); + newStack = Released_.back(); + Released_.pop_back(); + } + + Y_VERIFY(guard.CheckOverflow(newStack), "corrupted stack in pool"); + Y_VERIFY(guard.CheckOverride(newStack, StackSize_), "corrupted stack in pool"); + + return NDetails::TStack{newStack, newStack, StackSize_, name}; + } +} diff --git a/library/cpp/coroutine/engine/stack/stack_utils.cpp b/library/cpp/coroutine/engine/stack/stack_utils.cpp new file mode 100644 index 0000000000..1548529b66 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_utils.cpp @@ -0,0 +1,84 @@ +#include "stack_utils.h" + +#include <contrib/libs/linux-headers/asm-generic/errno-base.h> +#include <util/generic/scope.h> +#include <util/system/yassert.h> + +#ifdef _linux_ +#include <sys/mman.h> +#endif + +#include <cerrno> +#include <cstdlib> +#include <cstring> + + +namespace NCoro::NStack { + +#ifdef _linux_ + bool GetAlignedMemory(uint64_t sizeInPages, char*& rawPtr, char*& alignedPtr) noexcept { + Y_ASSERT(sizeInPages); + + void* ptr = nullptr; + int error = posix_memalign(&ptr, PageSize, sizeInPages * PageSize); + alignedPtr = rawPtr = (char*)ptr; + return rawPtr && alignedPtr && !error; + } +#else + bool GetAlignedMemory(uint64_t sizeInPages, char*& rawPtr, char*& alignedPtr) noexcept { + Y_ASSERT(sizeInPages); + + rawPtr = (char*) malloc((sizeInPages + 1) * PageSize); // +1 in case result would be unaligned + alignedPtr = (char*)( ((uint64_t)rawPtr + PageSize - 1) & ~PageSizeMask); + return rawPtr && alignedPtr; + } +#endif + +#ifdef _linux_ + void ReleaseRss(char* alignedPtr, uint64_t numOfPages) noexcept { + Y_VERIFY( !((uint64_t)alignedPtr & PageSizeMask), "Not aligned pointer to release RSS memory"); + if (!numOfPages) { + return; + } + if (auto res = madvise((void*) alignedPtr, numOfPages * PageSize, MADV_DONTNEED); res) { + Y_VERIFY(errno == EAGAIN || errno == ENOMEM, "Failed to release memory"); + } + } +#else + void ReleaseRss(char*, uint64_t) noexcept { + } +#endif + +#ifdef _linux_ + uint64_t CountMapped(char* alignedPtr, uint64_t numOfPages) noexcept { + Y_VERIFY( !((uint64_t)alignedPtr & PageSizeMask) ); + Y_ASSERT(numOfPages); + + uint64_t result = 0; + unsigned char* mappedPages = (unsigned char*) calloc(numOfPages, numOfPages); + Y_VERIFY(mappedPages); + Y_DEFER { + free(mappedPages); + }; + + if (!mincore((void*)alignedPtr, numOfPages * PageSize, mappedPages)) { + for (uint64_t i = 0; i < numOfPages; ++i) { + if (mappedPages[i] & 1) { + ++result; + } + } + } else { + Y_ASSERT(false); + return 0; + } + + return result; + } + +#else + uint64_t CountMapped(char*, uint64_t) noexcept { + return 0; // stub for Windows tests + } +#endif + +} diff --git a/library/cpp/coroutine/engine/stack/stack_utils.h b/library/cpp/coroutine/engine/stack/stack_utils.h new file mode 100644 index 0000000000..46c65240b5 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/stack_utils.h @@ -0,0 +1,27 @@ +#pragma once + +#include "stack_common.h" + + +namespace NCoro::NStack { + /*! Actual size of allocated memory can exceed size in pages, due to unaligned allocation. + * @param sizeInPages : number of pages to allocate + * @param rawPtr : pointer to unaligned memory. Should be passed to free() when is not used any more. + * @param alignedPtr : pointer to beginning of first fully allocated page + * @return : true on success + */ + bool GetAlignedMemory(uint64_t sizeInPages, char*& rawPtr, char*& alignedPtr) noexcept; + + /*! Release mapped RSS memory. + * @param alignedPt : page-size aligned memory on which RSS memory should be freed + * @param numOfPages : number of pages to free from RSS memory + */ + void ReleaseRss(char* alignedPtr, uint64_t numOfPages) noexcept; + + /*! Count pages with RSS memory + * @param alignedPtr : pointer to page-aligned memory for which calculations would be done + * @param numOfPages : number of pages to check + * @return : number of pages with RSS memory + */ + uint64_t CountMapped(char* alignedPtr, uint64_t numOfPages) noexcept; +} diff --git a/library/cpp/coroutine/engine/stack/ut/stack_allocator_ut.cpp b/library/cpp/coroutine/engine/stack/ut/stack_allocator_ut.cpp new file mode 100644 index 0000000000..a7283d44a3 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/ut/stack_allocator_ut.cpp @@ -0,0 +1,115 @@ +#include <library/cpp/coroutine/engine/stack/stack_allocator.h> +#include <library/cpp/coroutine/engine/stack/stack_common.h> +#include <library/cpp/testing/gtest/gtest.h> + + +using namespace testing; + +namespace NCoro::NStack::Tests { + + enum class EAllocator { + Pool, // allocates page-size aligned stacks from pools + Simple // uses malloc/free for each stack + }; + + class TAllocatorParamFixture : public TestWithParam< std::tuple<EGuard, EAllocator> > { + protected: // methods + void SetUp() override { + EGuard guardType; + EAllocator allocType; + std::tie(guardType, allocType) = GetParam(); + + TMaybe<TPoolAllocatorSettings> poolSettings; + if (allocType == EAllocator::Pool) { + poolSettings = TPoolAllocatorSettings{}; + } + + Allocator_ = GetAllocator(poolSettings, guardType); + } + + protected: // data + THolder<IAllocator> Allocator_; + }; + + + TEST_P(TAllocatorParamFixture, StackAllocationAndRelease) { + uint64_t stackSize = PageSize * 12; + auto stack = Allocator_->AllocStack(stackSize, "test_stack"); +#if defined(_san_enabled_) || !defined(NDEBUG) + stackSize *= DebugOrSanStackMultiplier; +#endif + + // Correct stack should have + EXPECT_EQ(stack.GetSize(), stackSize); // predefined size + EXPECT_FALSE((uint64_t)stack.GetAlignedMemory() & PageSizeMask); // aligned pointer + // Writable workspace + auto workspace = Allocator_->GetStackWorkspace(stack.GetAlignedMemory(), stack.GetSize()); + for (uint64_t i = 0; i < workspace.size(); i += 512) { + workspace[i] = 42; + } + EXPECT_TRUE(Allocator_->CheckStackOverflow(stack.GetAlignedMemory())); + EXPECT_TRUE(Allocator_->CheckStackOverride(stack.GetAlignedMemory(), stack.GetSize())); + + Allocator_->FreeStack(stack); + EXPECT_FALSE(stack.GetRawMemory()); + } + + INSTANTIATE_TEST_SUITE_P(AllocatorTestParams, TAllocatorParamFixture, + Combine(Values(EGuard::Canary, EGuard::Page), Values(EAllocator::Pool, EAllocator::Simple))); + + + // ------------------------------------------------------------------------ + // Test that allocated stack has guards + // + template<class AllocatorType> + THolder<IAllocator> GetAllocator(EGuard guardType); + + struct TPoolTag {}; + struct TSimpleTag {}; + + template<> + THolder<IAllocator> GetAllocator<TPoolTag>(EGuard guardType) { + TMaybe<TPoolAllocatorSettings> poolSettings = TPoolAllocatorSettings{}; + return GetAllocator(poolSettings, guardType); + } + + template<> + THolder<IAllocator> GetAllocator<TSimpleTag>(EGuard guardType) { + TMaybe<TPoolAllocatorSettings> poolSettings; + return GetAllocator(poolSettings, guardType); + } + + + template <class AllocatorType> + class TAllocatorFixture : public Test { + protected: + TAllocatorFixture() + : Allocator_(GetAllocator<AllocatorType>(EGuard::Page)) + {} + + const uint64_t StackSize_ = PageSize * 2; + THolder<IAllocator> Allocator_; + }; + + typedef Types<TPoolTag, TSimpleTag> Implementations; + TYPED_TEST_SUITE(TAllocatorFixture, Implementations); + + TYPED_TEST(TAllocatorFixture, StackOverflow) { + ASSERT_DEATH({ + auto stack = this->Allocator_->AllocStack(this->StackSize_, "test_stack"); + + // Overwrite previous guard, crash is here + *(stack.GetAlignedMemory() - 1) = 42; + }, ""); + } + + TYPED_TEST(TAllocatorFixture, StackOverride) { + ASSERT_DEATH({ + auto stack = this->Allocator_->AllocStack(this->StackSize_, "test_stack"); + + // Overwrite guard, crash is here + *(stack.GetAlignedMemory() + stack.GetSize() - 1) = 42; + }, ""); + } + +} diff --git a/library/cpp/coroutine/engine/stack/ut/stack_guards_ut.cpp b/library/cpp/coroutine/engine/stack/ut/stack_guards_ut.cpp new file mode 100644 index 0000000000..9da9a9b3d5 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/ut/stack_guards_ut.cpp @@ -0,0 +1,158 @@ +#include <library/cpp/coroutine/engine/stack/stack_common.h> +#include <library/cpp/coroutine/engine/stack/stack_guards.h> +#include <library/cpp/coroutine/engine/stack/stack_utils.h> +#include <library/cpp/testing/gtest/gtest.h> + + +using namespace testing; + +namespace NCoro::NStack::Tests { + + template <class TGuard> + class TGuardFixture : public Test { + protected: + TGuardFixture() : Guard_(GetGuard<TGuard>()) {} + + const TGuard& Guard_; + }; + + typedef Types<TCanaryGuard, TPageGuard> Implementations; + TYPED_TEST_SUITE(TGuardFixture, Implementations); + + TYPED_TEST(TGuardFixture, GuardSize) { + const auto size = this->Guard_.GetSize(); + EXPECT_GE(size, 64ul); + EXPECT_FALSE(size & 63ul); // check 64-byte alignment + } + + TYPED_TEST(TGuardFixture, GuardAlignedSize) { + const auto size = this->Guard_.GetPageAlignedSize(); + EXPECT_GE(size, PageSize); + EXPECT_FALSE(size & PageSizeMask); // check page-alignment + } + + TYPED_TEST(TGuardFixture, StackWorkspace) { + for (uint64_t sizeInPages : {2, 5, 12}) { + char *rawPtr, *alignedPtr = nullptr; + ASSERT_TRUE(GetAlignedMemory(sizeInPages, rawPtr, alignedPtr)); + auto workspace = this->Guard_.GetWorkspace(alignedPtr, sizeInPages * PageSize); + EXPECT_EQ(workspace.size(), sizeInPages * PageSize - this->Guard_.GetSize()) << " size in pages " << sizeInPages; + + this->Guard_.Protect(alignedPtr, sizeInPages * PageSize, false); + workspace = this->Guard_.GetWorkspace(alignedPtr, sizeInPages * PageSize); + EXPECT_EQ(workspace.size(), sizeInPages * PageSize - this->Guard_.GetSize()) << " size in pages " << sizeInPages; + + this->Guard_.RemoveProtection(alignedPtr, sizeInPages * PageSize); + workspace = this->Guard_.GetWorkspace(alignedPtr, sizeInPages * PageSize); + EXPECT_EQ(workspace.size(), sizeInPages * PageSize - this->Guard_.GetSize()) << " size in pages " << sizeInPages; + + free(rawPtr); + } + } + + TYPED_TEST(TGuardFixture, SetRemoveProtectionWorks) { + char *rawPtr, *alignedPtr = nullptr; + constexpr uint64_t sizeInPages = 4; + ASSERT_TRUE(GetAlignedMemory(sizeInPages + 1, rawPtr, alignedPtr)); + + this->Guard_.Protect(alignedPtr, PageSize, false); // set previous guard + alignedPtr += PageSize; // leave first page for previous guard + this->Guard_.Protect(alignedPtr, sizeInPages * PageSize, true); + + EXPECT_TRUE(this->Guard_.CheckOverflow(alignedPtr)); + EXPECT_TRUE(this->Guard_.CheckOverride(alignedPtr, sizeInPages * PageSize)); + + this->Guard_.RemoveProtection(alignedPtr, sizeInPages * PageSize); + this->Guard_.RemoveProtection(alignedPtr - PageSize, PageSize); // remove previous guard + + free(rawPtr); + } + + TEST(StackGuardTest, CanaryGuardTestOverflow) { + const auto& guard = GetGuard<TCanaryGuard>(); + + char *rawPtr, *alignedPtr = nullptr; + constexpr uint64_t sizeInPages = 4; + ASSERT_TRUE(GetAlignedMemory(sizeInPages + 1, rawPtr, alignedPtr)); + guard.Protect(alignedPtr, PageSize, false); // set previous guard + alignedPtr += PageSize; // leave first page for previous guard + guard.Protect(alignedPtr, sizeInPages * PageSize, true); + + EXPECT_TRUE(guard.CheckOverflow(alignedPtr)); + EXPECT_TRUE(guard.CheckOverride(alignedPtr, sizeInPages * PageSize)); + + // Overwrite previous guard + *(alignedPtr - 1) = 42; + + EXPECT_FALSE(guard.CheckOverflow(alignedPtr)); + + free(rawPtr); + } + + TEST(StackGuardTest, CanaryGuardTestOverride) { + const auto& guard = GetGuard<TCanaryGuard>(); + + char *rawPtr, *alignedPtr = nullptr; + constexpr uint64_t sizeInPages = 4; + ASSERT_TRUE(GetAlignedMemory(sizeInPages + 1, rawPtr, alignedPtr)); + guard.Protect(alignedPtr, PageSize, false); // set previous guard + alignedPtr += PageSize; // leave first page for previous guard + guard.Protect(alignedPtr, sizeInPages * PageSize, true); + + EXPECT_TRUE(guard.CheckOverflow(alignedPtr)); + EXPECT_TRUE(guard.CheckOverride(alignedPtr, sizeInPages * PageSize)); + + // Overwrite guard + *(alignedPtr + sizeInPages * PageSize - 1) = 42; + + EXPECT_FALSE(guard.CheckOverride(alignedPtr, sizeInPages * PageSize)); + + free(rawPtr); + } + + TEST(StackGuardDeathTest, PageGuardTestOverflow) { + ASSERT_DEATH({ + const auto &guard = GetGuard<TPageGuard>(); + + char* rawPtr = nullptr; + char* alignedPtr = nullptr; + constexpr uint64_t sizeInPages = 4; + ASSERT_TRUE(GetAlignedMemory(sizeInPages + 1, rawPtr, alignedPtr)); + + guard.Protect(alignedPtr, PageSize, false); // set previous guard + alignedPtr += PageSize; // leave first page for previous guard + guard.Protect(alignedPtr, sizeInPages * PageSize, true); + + // Overwrite previous guard, crash is here + *(alignedPtr - 1) = 42; + + guard.RemoveProtection(alignedPtr, sizeInPages * PageSize); + guard.RemoveProtection(alignedPtr - PageSize, PageSize); // remove previous guard + + free(rawPtr); + }, ""); + } + + TEST(StackGuardDeathTest, PageGuardTestOverride) { + ASSERT_DEATH({ + const auto &guard = GetGuard<TPageGuard>(); + + char* rawPtr = nullptr; + char* alignedPtr = nullptr; + constexpr uint64_t sizeInPages = 4; + ASSERT_TRUE(GetAlignedMemory(sizeInPages + 1, rawPtr, alignedPtr)); + guard.Protect(alignedPtr, PageSize, false); // set previous guard + alignedPtr += PageSize; // leave first page for previous guard + guard.Protect(alignedPtr, sizeInPages * PageSize, true); + + // Overwrite guard, crash is here + *(alignedPtr + sizeInPages * PageSize - 1) = 42; + + guard.RemoveProtection(alignedPtr, sizeInPages * PageSize); + guard.RemoveProtection(alignedPtr - PageSize, PageSize); // remove previous guard + + free(rawPtr); + }, ""); + } + +} diff --git a/library/cpp/coroutine/engine/stack/ut/stack_pool_ut.cpp b/library/cpp/coroutine/engine/stack/ut/stack_pool_ut.cpp new file mode 100644 index 0000000000..9e3e5e7117 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/ut/stack_pool_ut.cpp @@ -0,0 +1,70 @@ +#include <library/cpp/coroutine/engine/stack/stack_common.h> +#include <library/cpp/coroutine/engine/stack/stack_guards.h> +#include <library/cpp/coroutine/engine/stack/stack_pool.h> +#include <library/cpp/testing/gtest/gtest.h> + + +using namespace testing; + +namespace NCoro::NStack::Tests { + + template <class TGuard> + class TPoolFixture : public Test { + protected: + TPoolFixture() : Guard_(GetGuard<TGuard>()), Pool_(StackSize_, TPoolAllocatorSettings{1, 1, 8, 32}, Guard_) {} + + const uint64_t StackSize_ = PageSize * 4; + const TGuard& Guard_; + TPool<TGuard> Pool_; + }; + + typedef Types<TCanaryGuard, TPageGuard> Implementations; + TYPED_TEST_SUITE(TPoolFixture, Implementations); + + TYPED_TEST(TPoolFixture, AllocAndFreeStack) { + auto stack = this->Pool_.AllocStack("test_stack"); + this->Pool_.FreeStack(stack); + EXPECT_FALSE(stack.GetRawMemory()); + } + + TYPED_TEST(TPoolFixture, FreedStackReused) { + auto stack = this->Pool_.AllocStack("test_stack"); + auto rawPtr = stack.GetRawMemory(); + auto alignedPtr = stack.GetAlignedMemory(); + + this->Pool_.FreeStack(stack); + EXPECT_FALSE(stack.GetRawMemory()); + + auto stack2 = this->Pool_.AllocStack("test_stack"); + EXPECT_EQ(rawPtr, stack2.GetRawMemory()); + EXPECT_EQ(alignedPtr, stack2.GetAlignedMemory()); + + this->Pool_.FreeStack(stack2); + EXPECT_FALSE(stack2.GetRawMemory()); + } + + TYPED_TEST(TPoolFixture, MruFreedStackReused) { + auto stack = this->Pool_.AllocStack("test_stack"); + auto rawPtr = stack.GetRawMemory(); + auto alignedPtr = stack.GetAlignedMemory(); + auto stack2 = this->Pool_.AllocStack("test_stack"); + auto stack3 = this->Pool_.AllocStack("test_stack"); + + this->Pool_.FreeStack(stack2); + EXPECT_FALSE(stack2.GetRawMemory()); + + this->Pool_.FreeStack(stack); + EXPECT_FALSE(stack.GetRawMemory()); + + auto stack4 = this->Pool_.AllocStack("test_stack"); + EXPECT_EQ(rawPtr, stack4.GetRawMemory()); + EXPECT_EQ(alignedPtr, stack4.GetAlignedMemory()); + + this->Pool_.FreeStack(stack3); + EXPECT_FALSE(stack.GetRawMemory()); + + this->Pool_.FreeStack(stack4); + EXPECT_FALSE(stack4.GetRawMemory()); + } + +} diff --git a/library/cpp/coroutine/engine/stack/ut/stack_ut.cpp b/library/cpp/coroutine/engine/stack/ut/stack_ut.cpp new file mode 100644 index 0000000000..31f8ad6b61 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/ut/stack_ut.cpp @@ -0,0 +1,60 @@ +#include <library/cpp/coroutine/engine/stack/stack.h> +#include <library/cpp/coroutine/engine/stack/stack_common.h> +#include <library/cpp/coroutine/engine/stack/stack_guards.h> +#include <library/cpp/coroutine/engine/stack/stack_utils.h> +#include <library/cpp/testing/gtest/gtest.h> + + +using namespace testing; + +namespace NCoro::NStack::Tests { + + constexpr uint64_t StackSizeInPages = 4; + + template <class TGuard> + class TStackFixture : public Test { + protected: // methods + TStackFixture() + : Guard_(GetGuard<TGuard>()) + , StackSize_(StackSizeInPages * PageSize) + {} + + void SetUp() override { + ASSERT_TRUE(GetAlignedMemory(StackSizeInPages, RawMemory_, AlignedMemory_)); + Stack_ = MakeHolder<NDetails::TStack>(RawMemory_, AlignedMemory_, StackSize_, "test_stack"); + Guard_.Protect(AlignedMemory_, StackSize_, false); + } + + void TearDown() override { + Guard_.RemoveProtection(AlignedMemory_, StackSize_); + free(Stack_->GetRawMemory()); + Stack_->Reset(); + EXPECT_EQ(Stack_->GetRawMemory(), nullptr); + } + + protected: // data + const TGuard& Guard_; + const uint64_t StackSize_ = 0; + char* RawMemory_ = nullptr; + char* AlignedMemory_ = nullptr; + THolder<NDetails::TStack> Stack_; + }; + + typedef Types<TCanaryGuard, TPageGuard> Implementations; + TYPED_TEST_SUITE(TStackFixture, Implementations); + + TYPED_TEST(TStackFixture, PointersAndSize) { + EXPECT_EQ(this->Stack_->GetRawMemory(), this->RawMemory_); + EXPECT_EQ(this->Stack_->GetAlignedMemory(), this->AlignedMemory_); + EXPECT_EQ(this->Stack_->GetSize(), this->StackSize_); + } + + TYPED_TEST(TStackFixture, WriteStack) { + auto workspace = this->Guard_.GetWorkspace(this->Stack_->GetAlignedMemory(), this->Stack_->GetSize()); + for (uint64_t i = 0; i < workspace.size(); i += 512) { + workspace[i] = 42; + } + EXPECT_TRUE(this->Guard_.CheckOverride(this->Stack_->GetAlignedMemory(), this->Stack_->GetSize())); + } + +} diff --git a/library/cpp/coroutine/engine/stack/ut/stack_utils_ut.cpp b/library/cpp/coroutine/engine/stack/ut/stack_utils_ut.cpp new file mode 100644 index 0000000000..dc0593dcf2 --- /dev/null +++ b/library/cpp/coroutine/engine/stack/ut/stack_utils_ut.cpp @@ -0,0 +1,73 @@ +#include <library/cpp/coroutine/engine/stack/stack_common.h> +#include <library/cpp/coroutine/engine/stack/stack_utils.h> +#include <library/cpp/testing/gtest/gtest.h> + + +using namespace testing; + +namespace NCoro::NStack::Tests { + + TEST(StackUtilsTest, Allocation) { + char *rawPtr, *alignedPtr = nullptr; + for (uint64_t i : {1, 2, 3, 4, 11}) { + EXPECT_TRUE(GetAlignedMemory(i, rawPtr, alignedPtr)); + EXPECT_TRUE(rawPtr); + EXPECT_TRUE(alignedPtr); + EXPECT_FALSE((uint64_t)alignedPtr & PageSizeMask); + free(rawPtr); + } + } + +#if !defined(_san_enabled_) && defined(_linux_) + + TEST(StackUtilsTest, RssReleaseOnePage) { + char *rawPtr, *alignedPtr = nullptr; + for (uint64_t i : {1, 2, 8}) { + EXPECT_TRUE(GetAlignedMemory(i, rawPtr, alignedPtr)); + EXPECT_TRUE(rawPtr); + EXPECT_TRUE(alignedPtr); + EXPECT_FALSE((uint64_t)alignedPtr & PageSizeMask); + + ReleaseRss(alignedPtr, i); // allocator can provide reused memory with RSS memory on it + EXPECT_EQ(CountMapped(alignedPtr, i), 0ul); // no RSS memory allocated + + *(alignedPtr + (i - 1) * PageSize) = 42; // map RSS memory + EXPECT_EQ(CountMapped(alignedPtr, i), 1ul); + + ReleaseRss(alignedPtr, i); + EXPECT_EQ(CountMapped(alignedPtr, i), 0ul) << "number of pages " << i; // no RSS memory allocated + + free(rawPtr); + } + } + + TEST(StackUtilsTest, RssReleaseSeveralPages) { + char *rawPtr, *alignedPtr = nullptr; + + for (uint64_t i : {1, 2, 5, 8}) { + EXPECT_TRUE(GetAlignedMemory(i, rawPtr, alignedPtr)); + EXPECT_TRUE(rawPtr); + EXPECT_TRUE(alignedPtr); + EXPECT_FALSE((uint64_t)alignedPtr & PageSizeMask); + + ReleaseRss(alignedPtr, i); // allocator can provide reused memory with RSS memory on it + EXPECT_EQ(CountMapped(alignedPtr, i), 0ul); // no RSS memory allocated + + for (uint64_t page = 0; page < i; ++page) { + *(alignedPtr + page * PageSize) = 42; // map RSS memory + EXPECT_EQ(CountMapped(alignedPtr, page + 1), page + 1); + } + + const uint64_t pagesToKeep = (i > 2) ? 2 : i; + + ReleaseRss(alignedPtr, i - pagesToKeep); + EXPECT_EQ(CountMapped(alignedPtr, i), pagesToKeep) << "number of pages " << i; // no RSS memory allocated + + free(rawPtr); + } + } + +#endif + +} + diff --git a/library/cpp/coroutine/engine/stack/ut/ya.make b/library/cpp/coroutine/engine/stack/ut/ya.make new file mode 100644 index 0000000000..65c5af9b7f --- /dev/null +++ b/library/cpp/coroutine/engine/stack/ut/ya.make @@ -0,0 +1,17 @@ +GTEST() + +OWNER(g:balancer) + +SRCS( + stack_allocator_ut.cpp + stack_guards_ut.cpp + stack_pool_ut.cpp + stack_ut.cpp + stack_utils_ut.cpp +) + +PEERDIR( + library/cpp/coroutine/engine +) + +END()
\ No newline at end of file |