diff options
author | robot-piglet <robot-piglet@yandex-team.com> | 2024-03-29 06:01:22 +0300 |
---|---|---|
committer | robot-piglet <robot-piglet@yandex-team.com> | 2024-03-29 06:08:31 +0300 |
commit | 68504504c5056b2c93ba61abb1f3c1737b56930f (patch) | |
tree | 156507e0fba00bfef537652cbc30ba0ae4880104 | |
parent | 323ccf76cc8d558a958ceb49d1d2645a6a00d654 (diff) | |
download | ydb-68504504c5056b2c93ba61abb1f3c1737b56930f.tar.gz |
Intermediate changes
-rw-r--r-- | yt/yt/core/concurrency/unittests/fair_share_invoker_pool_ut.cpp | 18 | ||||
-rw-r--r-- | yt/yt/core/concurrency/unittests/profiled_fair_share_invoker_pool_ut.cpp | 18 |
2 files changed, 18 insertions, 18 deletions
diff --git a/yt/yt/core/concurrency/unittests/fair_share_invoker_pool_ut.cpp b/yt/yt/core/concurrency/unittests/fair_share_invoker_pool_ut.cpp index 65cb477b6f..e6f0b6feac 100644 --- a/yt/yt/core/concurrency/unittests/fair_share_invoker_pool_ut.cpp +++ b/yt/yt/core/concurrency/unittests/fair_share_invoker_pool_ut.cpp @@ -23,7 +23,7 @@ namespace { //////////////////////////////////////////////////////////////////////////////// -constexpr auto Margin = TDuration::MilliSeconds(1); +constexpr auto Margin = TDuration::MilliSeconds(20); constexpr auto Quantum = TDuration::MilliSeconds(100); //////////////////////////////////////////////////////////////////////////////// @@ -427,7 +427,7 @@ TEST_F(TFairShareInvokerPoolTest, CpuTimeAccountingBetweenContextSwitchesIsNotSu EXPECT_TRUE(!invocationOrder.empty()); }).AsyncVia(invokerPool->GetInvoker(0)).Run(); - YT_VERIFY(started.Wait(Quantum * 100)); + started.Wait(); // After 10 quantums of time (see notification of the #started variable) we start Fairness test in the second thread. // In case of better implementation we expect to have non-fair CPU time distribution between first and second invokers, @@ -457,7 +457,7 @@ TEST_F(TFairShareInvokerPoolTest, GetTotalWaitTimeEstimateStuckAction) NThreading::TEvent event; auto action = BIND([&event] { - event.Wait(TDuration::Seconds(100)); + event.Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run(); @@ -481,7 +481,7 @@ TEST_F(TFairShareInvokerPoolTest, GetTotalWaitTimeEstimateRelevancyDecay) NThreading::TEvent event; auto action = BIND([&event] { - event.Wait(100 * Quantum); + event.Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run(); @@ -507,14 +507,14 @@ TEST_F(TFairShareInvokerPoolTest, GetTotalWaitTimeEstimateSeveralActions) auto invokerPool = CreateInvokerPool(Queues_[0]->GetInvoker(), 1); // Make aggregator never forget a sample. - invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Days(100000000000000000)); + invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Max()); std::vector<NThreading::TEvent> leashes(ActionCount); std::vector<TFuture<void>> actions; for (int idx = 0; idx < ActionCount; ++idx) { actions.push_back(BIND([&leashes, idx] { - leashes[idx].Wait(100 * Quantum); + leashes[idx].Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run()); @@ -556,7 +556,7 @@ TEST_F(TFairShareInvokerPoolTest, GetTotalWaitEstimateUncorrelatedWithOtherInvok }; auto invokerPool = CreateInvokerPool(Queues_[0]->GetInvoker(), 2); // Make aggregator never forget a sample. - invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Days(100000000000000000)); + invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Max()); std::vector<NThreading::TEvent> leashes(2); std::vector<TFuture<void>> actions; @@ -568,7 +568,7 @@ TEST_F(TFairShareInvokerPoolTest, GetTotalWaitEstimateUncorrelatedWithOtherInvok } else { executionOrderEnforcer(2); } - leashes[idx].Wait(100 * Quantum); + leashes[idx].Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run()); @@ -577,7 +577,7 @@ TEST_F(TFairShareInvokerPoolTest, GetTotalWaitEstimateUncorrelatedWithOtherInvok NThreading::TEvent secondaryLeash; auto secondaryAction = BIND([&executionOrderEnforcer, &secondaryLeash] { executionOrderEnforcer(1); - secondaryLeash.Wait(100 * Quantum); + secondaryLeash.Wait(); }) .AsyncVia(invokerPool->GetInvoker(1)) .Run(); diff --git a/yt/yt/core/concurrency/unittests/profiled_fair_share_invoker_pool_ut.cpp b/yt/yt/core/concurrency/unittests/profiled_fair_share_invoker_pool_ut.cpp index 89b84ad4ea..86055fe9aa 100644 --- a/yt/yt/core/concurrency/unittests/profiled_fair_share_invoker_pool_ut.cpp +++ b/yt/yt/core/concurrency/unittests/profiled_fair_share_invoker_pool_ut.cpp @@ -28,7 +28,7 @@ using namespace NProfiling; //////////////////////////////////////////////////////////////////////////////// -constexpr auto Margin = TDuration::MilliSeconds(1); +constexpr auto Margin = TDuration::MilliSeconds(20); constexpr auto Quantum = TDuration::MilliSeconds(100); //////////////////////////////////////////////////////////////////////////////// @@ -440,7 +440,7 @@ TEST_F(TProfiledFairShareInvokerPoolTest, CpuTimeAccountingBetweenContextSwitche EXPECT_TRUE(!invocationOrder.empty()); }).AsyncVia(invokerPool->GetInvoker(0)).Run(); - YT_VERIFY(started.Wait(Quantum * 100)); + started.Wait(); // After 10 quantums of time (see notification of the #started variable) we start Fairness test in the second thread. // In case of better implementation we expect to have non-fair CPU time distribution between first and second invokers, @@ -470,7 +470,7 @@ TEST_F(TProfiledFairShareInvokerPoolTest, GetTotalWaitTimeEstimateStuckAction) NThreading::TEvent event; auto action = BIND([&event]{ - event.Wait(TDuration::Seconds(100)); + event.Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run(); @@ -494,7 +494,7 @@ TEST_F(TProfiledFairShareInvokerPoolTest, GetTotalWaitTimeEstimateRelevancyDecay NThreading::TEvent event; auto action = BIND([&event]{ - event.Wait(100 * Quantum); + event.Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run(); @@ -520,14 +520,14 @@ TEST_F(TProfiledFairShareInvokerPoolTest, GetTotalWaitTimeEstimateSeveralActions auto invokerPool = CreateInvokerPool(Queues_[0]->GetInvoker(), 1); // Make aggregator never forget a sample. - invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Days(100000000000000000)); + invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Max()); std::vector<NThreading::TEvent> leashes(ActionCount); std::vector<TFuture<void>> actions; for (int idx = 0; idx < ActionCount; ++idx) { actions.emplace_back(BIND([&leashes, idx] { - leashes[idx].Wait(100 * Quantum); + leashes[idx].Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run()); @@ -569,7 +569,7 @@ TEST_F(TProfiledFairShareInvokerPoolTest, GetTotalWaitEstimateUncorrelatedWithOt }; auto invokerPool = CreateInvokerPool(Queues_[0]->GetInvoker(), 2); // Make aggregator never forget a sample. - invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Days(100000000000000000)); + invokerPool->UpdateActionTimeRelevancyHalflife(TDuration::Max()); std::vector<NThreading::TEvent> leashes(2); std::vector<TFuture<void>> actions; @@ -581,7 +581,7 @@ TEST_F(TProfiledFairShareInvokerPoolTest, GetTotalWaitEstimateUncorrelatedWithOt } else { executionOrderEnforcer(2); } - leashes[idx].Wait(100 * Quantum); + leashes[idx].Wait(); }) .AsyncVia(invokerPool->GetInvoker(0)) .Run()); @@ -590,7 +590,7 @@ TEST_F(TProfiledFairShareInvokerPoolTest, GetTotalWaitEstimateUncorrelatedWithOt NThreading::TEvent secondaryLeash; auto secondaryAction = BIND([&executionOrderEnforcer, &secondaryLeash] { executionOrderEnforcer(1); - secondaryLeash.Wait(100 * Quantum); + secondaryLeash.Wait(); }).AsyncVia(invokerPool->GetInvoker(1)).Run(); auto start = GetInstant(); |