diff options
author | Vasily Gerasimov <UgnineSirdis@gmail.com> | 2022-02-10 16:49:09 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:49:09 +0300 |
commit | 6cdc8f140213c595e4ad38bc3d97fcef1146b8c3 (patch) | |
tree | f69637041e6fed76ebae0c74ae1fa0c4be6ab5b4 | |
parent | e5d4696304c6689379ac7ce334512404d4b7836c (diff) | |
download | ydb-6cdc8f140213c595e4ad38bc3d97fcef1146b8c3.tar.gz |
Restoring authorship annotation for Vasily Gerasimov <UgnineSirdis@gmail.com>. Commit 1 of 2.
851 files changed, 56564 insertions, 56564 deletions
diff --git a/build/scripts/coverage-info.py b/build/scripts/coverage-info.py index 94491d92564..9f9a7e66057 100644 --- a/build/scripts/coverage-info.py +++ b/build/scripts/coverage-info.py @@ -4,7 +4,7 @@ import sys import tarfile import collections import subprocess -import re +import re GCDA_EXT = '.gcda' @@ -150,13 +150,13 @@ def gen_info_global(cmd, cov_info, probe_path, update_stat, lcov_args): def init_all_coverage_files(gcno_archive, fname2gcno, fname2info, geninfo_executable, gcov_tool, gen_info, prefix_filter, exclude_files): - with tarfile.open(gcno_archive) as gcno_tf: - for gcno_item in gcno_tf: - if gcno_item.isfile() and gcno_item.name.endswith(GCNO_EXT): - gcno_tf.extract(gcno_item) - - gcno_name = gcno_item.name - source_fname = gcno_name[:-len(GCNO_EXT)] + with tarfile.open(gcno_archive) as gcno_tf: + for gcno_item in gcno_tf: + if gcno_item.isfile() and gcno_item.name.endswith(GCNO_EXT): + gcno_tf.extract(gcno_item) + + gcno_name = gcno_item.name + source_fname = gcno_name[:-len(GCNO_EXT)] if prefix_filter and not source_fname.startswith(prefix_filter): sys.stderr.write("Skipping {} (doesn't match prefix '{}')\n".format(source_fname, prefix_filter)) continue @@ -164,19 +164,19 @@ def init_all_coverage_files(gcno_archive, fname2gcno, fname2info, geninfo_execut sys.stderr.write("Skipping {} (matched exclude pattern '{}')\n".format(source_fname, exclude_files.pattern)) continue - fname2gcno[source_fname] = gcno_name - - if os.path.getsize(gcno_name) > 0: - coverage_info = source_fname + '.' + str(len(fname2info[source_fname])) + '.info' - fname2info[source_fname].append(coverage_info) - geninfo_cmd = [ - geninfo_executable, - '--gcov-tool', gcov_tool, - '-i', gcno_name, - '-o', coverage_info + '.tmp' - ] - gen_info(geninfo_cmd, coverage_info) - + fname2gcno[source_fname] = gcno_name + + if os.path.getsize(gcno_name) > 0: + coverage_info = source_fname + '.' + str(len(fname2info[source_fname])) + '.info' + fname2info[source_fname].append(coverage_info) + geninfo_cmd = [ + geninfo_executable, + '--gcov-tool', gcov_tool, + '-i', gcno_name, + '-o', coverage_info + '.tmp' + ] + gen_info(geninfo_cmd, coverage_info) + def process_all_coverage_files(gcda_archive, fname2gcno, fname2info, geninfo_executable, gcov_tool, gen_info): with tarfile.open(gcda_archive) as gcda_tf: @@ -190,10 +190,10 @@ def process_all_coverage_files(gcda_archive, fname2gcno, fname2info, geninfo_exe gcda_item.name = gcda_new_name gcda_tf.extract(gcda_item) if os.path.getsize(gcda_new_name) > 0: - coverage_info = suff + '.' + str(len(fname2info[suff])) + '.info' - fname2info[suff].append(coverage_info) + coverage_info = suff + '.' + str(len(fname2info[suff])) + '.info' + fname2info[suff].append(coverage_info) geninfo_cmd = [ - geninfo_executable, + geninfo_executable, '--gcov-tool', gcov_tool, gcda_new_name, '-o', coverage_info + '.tmp' @@ -272,7 +272,7 @@ if __name__ == '__main__': parser.add_argument('--gcda-archive', action='store') parser.add_argument('--gcov-tool', action='store') parser.add_argument('--prefix-filter', action='store') - parser.add_argument('--exclude-regexp', action='store') + parser.add_argument('--exclude-regexp', action='store') parser.add_argument('--teamcity-stat-output', action='store_const', const=True) parser.add_argument('--coverage-report-path', action='store') parser.add_argument('--gcov-report', action='store') diff --git a/library/cpp/actors/core/actor.h b/library/cpp/actors/core/actor.h index ed29bd14b9e..7c08f2a49d4 100644 --- a/library/cpp/actors/core/actor.h +++ b/library/cpp/actors/core/actor.h @@ -401,7 +401,7 @@ namespace NActors { template <typename T> struct HasActorName<T, decltype((void)T::ActorName, (const char*)nullptr)>: std::true_type { }; - static ui32 GetActivityTypeIndex() { + static ui32 GetActivityTypeIndex() { if constexpr(HasActorName<TDerived>::value) { return TLocalProcessKey<TActorActivityTag, TDerived::ActorName>::GetIndex(); } else { @@ -425,8 +425,8 @@ namespace NActors { // static constexpr char ActorName[] = "UNNAMED"; - TActor(void (TDerived::*func)(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx), ui32 activityType = GetActivityTypeIndex()) - : IActor(static_cast<TReceiveFunc>(func), activityType) + TActor(void (TDerived::*func)(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx), ui32 activityType = GetActivityTypeIndex()) + : IActor(static_cast<TReceiveFunc>(func), activityType) { } public: diff --git a/library/cpp/actors/core/executor_thread.cpp b/library/cpp/actors/core/executor_thread.cpp index 446b651efd2..5d292462bbc 100644 --- a/library/cpp/actors/core/executor_thread.cpp +++ b/library/cpp/actors/core/executor_thread.cpp @@ -72,7 +72,7 @@ namespace NActors { } void TExecutorThread::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) { - ++CurrentActorScheduledEventsCounter; + ++CurrentActorScheduledEventsCounter; Ctx.Executor->Schedule(deadline, ev, cookie, Ctx.WorkerId); } @@ -153,7 +153,7 @@ namespace NActors { TCallstack::GetTlsCallstack().SetLinesToSkip(); #endif CurrentRecipient = recipient; - CurrentActorScheduledEventsCounter = 0; + CurrentActorScheduledEventsCounter = 0; if (executed == 0) { double usec = Ctx.AddActivationStats(AtomicLoad(&mailbox->ScheduleMoment), hpprev); diff --git a/library/cpp/actors/core/mon_stats.h b/library/cpp/actors/core/mon_stats.h index d55552af0cb..96ba808854b 100644 --- a/library/cpp/actors/core/mon_stats.h +++ b/library/cpp/actors/core/mon_stats.h @@ -79,7 +79,7 @@ namespace NActors { TVector<NHPTimer::STime> ElapsedTicksByActivity; TVector<ui64> ReceivedEventsByActivity; TVector<i64> ActorsAliveByActivity; // the sum should be positive, but per-thread might be negative - TVector<ui64> ScheduledEventsByActivity; + TVector<ui64> ScheduledEventsByActivity; ui64 PoolActorRegistrations = 0; ui64 PoolDestroyedActors = 0; ui64 PoolAllocatedMailboxes = 0; @@ -91,7 +91,7 @@ namespace NActors { : ElapsedTicksByActivity(activityVecSize) , ReceivedEventsByActivity(activityVecSize) , ActorsAliveByActivity(activityVecSize) - , ScheduledEventsByActivity(activityVecSize) + , ScheduledEventsByActivity(activityVecSize) {} template <typename T> @@ -126,7 +126,7 @@ namespace NActors { AggregateOne(ElapsedTicksByActivity, other.ElapsedTicksByActivity); AggregateOne(ReceivedEventsByActivity, other.ReceivedEventsByActivity); AggregateOne(ActorsAliveByActivity, other.ActorsAliveByActivity); - AggregateOne(ScheduledEventsByActivity, other.ScheduledEventsByActivity); + AggregateOne(ScheduledEventsByActivity, other.ScheduledEventsByActivity); RelaxedStore( &PoolActorRegistrations, diff --git a/library/cpp/actors/helpers/selfping_actor.cpp b/library/cpp/actors/helpers/selfping_actor.cpp index f9bfaf8dc09..9fe2e7875f5 100644 --- a/library/cpp/actors/helpers/selfping_actor.cpp +++ b/library/cpp/actors/helpers/selfping_actor.cpp @@ -64,7 +64,7 @@ private: const NMonitoring::TDynamicCounters::TCounterPtr Counter; const NMonitoring::TDynamicCounters::TCounterPtr CalculationTimeCounter; - NSlidingWindow::TSlidingWindow<NSlidingWindow::TMaxOperation<ui64>> SlidingWindow; + NSlidingWindow::TSlidingWindow<NSlidingWindow::TMaxOperation<ui64>> SlidingWindow; NSlidingWindow::TSlidingWindow<TAvgOperation<ui64>> CalculationSlidingWindow; THPTimer Timer; diff --git a/library/cpp/actors/ya.make b/library/cpp/actors/ya.make index 737c7fbc186..3c308c08998 100644 --- a/library/cpp/actors/ya.make +++ b/library/cpp/actors/ya.make @@ -1,7 +1,7 @@ RECURSE_FOR_TESTS(ut) RECURSE( - log_backend + log_backend core dnsresolver examples diff --git a/library/cpp/colorizer/colors.cpp b/library/cpp/colorizer/colors.cpp index decc5c9847d..47408ac08f9 100644 --- a/library/cpp/colorizer/colors.cpp +++ b/library/cpp/colorizer/colors.cpp @@ -1,6 +1,6 @@ #include "colors.h" -#include <util/stream/output.h> +#include <util/stream/output.h> #include <util/generic/singleton.h> #include <util/system/env.h> @@ -166,10 +166,10 @@ bool TColors::CalcIsTTY(FILE* file) { #endif } -TColors::TColors(FILE* f) +TColors::TColors(FILE* f) : IsTTY_(true) { - SetIsTTY(CalcIsTTY(f)); + SetIsTTY(CalcIsTTY(f)); } TColors::TColors(bool ontty) @@ -418,7 +418,7 @@ TStringBuf TColors::WhiteColor() const noexcept { } -namespace { +namespace { class TStdErrColors: public TColors { public: TStdErrColors() @@ -426,24 +426,24 @@ namespace { { } }; - - class TStdOutColors: public TColors { - public: - TStdOutColors() - : TColors(stdout) - { - } - }; - - class TDisabledColors: public TColors { - public: - TDisabledColors() - : TColors(false) - { - } - }; -} // anonymous namespace - + + class TStdOutColors: public TColors { + public: + TStdOutColors() + : TColors(stdout) + { + } + }; + + class TDisabledColors: public TColors { + public: + TDisabledColors() + : TColors(false) + { + } + }; +} // anonymous namespace + TColors& NColorizer::StdErr() { return *Singleton<TStdErrColors>(); } @@ -453,13 +453,13 @@ TColors& NColorizer::StdOut() { } TColors& NColorizer::AutoColors(IOutputStream& os) { - if (&os == &Cerr) { - return StdErr(); - } - if (&os == &Cout) { - return StdOut(); - } - return *Singleton<TDisabledColors>(); + if (&os == &Cerr) { + return StdErr(); + } + if (&os == &Cout) { + return StdOut(); + } + return *Singleton<TDisabledColors>(); } size_t NColorizer::TotalAnsiEscapeCodeLen(TStringBuf text) { diff --git a/library/cpp/colorizer/colors.h b/library/cpp/colorizer/colors.h index 474a918994e..c40570c9bc3 100644 --- a/library/cpp/colorizer/colors.h +++ b/library/cpp/colorizer/colors.h @@ -1,6 +1,6 @@ #pragma once -#include "fwd.h" +#include "fwd.h" #include <util/generic/string.h> #include <util/generic/strbuf.h> @@ -104,8 +104,8 @@ namespace NColorizer { static bool CalcIsTTY(FILE* file); public: - explicit TColors(FILE* = stderr); - explicit TColors(bool ontty); + explicit TColors(FILE* = stderr); + explicit TColors(bool ontty); TStringBuf Reset() const noexcept; @@ -213,9 +213,9 @@ namespace NColorizer { }; /// Singletone `TColors` instances for stderr/stdout. - TColors& StdErr(); - TColors& StdOut(); - + TColors& StdErr(); + TColors& StdOut(); + /// Choose `TColors` depending on output stream. If passed stream is stderr/stdout, return a corresponding /// singletone. Otherwise, return a disabled singletone (which you can, but should *not* enable). TColors& AutoColors(IOutputStream& os); diff --git a/library/cpp/colorizer/fwd.h b/library/cpp/colorizer/fwd.h index d71efdc0534..d94f7282849 100644 --- a/library/cpp/colorizer/fwd.h +++ b/library/cpp/colorizer/fwd.h @@ -1,11 +1,11 @@ -#pragma once - +#pragma once + class IOutputStream; - -namespace NColorizer { - class TColors; - - TColors& StdErr(); - TColors& StdOut(); + +namespace NColorizer { + class TColors; + + TColors& StdErr(); + TColors& StdOut(); TColors& AutoColors(IOutputStream&); } diff --git a/library/cpp/containers/comptrie/comptrie_builder.h b/library/cpp/containers/comptrie/comptrie_builder.h index cf7d2e39a34..f8a4926ef03 100644 --- a/library/cpp/containers/comptrie/comptrie_builder.h +++ b/library/cpp/containers/comptrie/comptrie_builder.h @@ -46,7 +46,7 @@ public: typedef typename TCompactTrieKeySelector<TSymbol>::TKey TKey; typedef typename TCompactTrieKeySelector<TSymbol>::TKeyBuf TKeyBuf; - explicit TCompactTrieBuilder(TCompactTrieBuilderFlags flags = CTBF_NONE, TPacker packer = TPacker(), IAllocator* alloc = TDefaultAllocator::Instance()); + explicit TCompactTrieBuilder(TCompactTrieBuilderFlags flags = CTBF_NONE, TPacker packer = TPacker(), IAllocator* alloc = TDefaultAllocator::Instance()); // All Add.. methods return true if it was a new key, false if the key already existed. @@ -72,14 +72,14 @@ public: } bool Find(const TSymbol* key, size_t keylen, TData* value) const; - bool Find(const TKeyBuf& key, TData* value = nullptr) const { + bool Find(const TKeyBuf& key, TData* value = nullptr) const { return Find(key.data(), key.size(), value); } - bool FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixLen, TData* value = nullptr) const; - bool FindLongestPrefix(const TKeyBuf& key, size_t* prefixLen, TData* value = nullptr) const { + bool FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixLen, TData* value = nullptr) const; + bool FindLongestPrefix(const TKeyBuf& key, size_t* prefixLen, TData* value = nullptr) const { return FindLongestPrefix(key.data(), key.size(), prefixLen, value); - } + } size_t Save(IOutputStream& os) const; size_t SaveAndDestroy(IOutputStream& os); diff --git a/library/cpp/containers/comptrie/comptrie_builder.inl b/library/cpp/containers/comptrie/comptrie_builder.inl index f273fa65710..e1a99da902b 100644 --- a/library/cpp/containers/comptrie/comptrie_builder.inl +++ b/library/cpp/containers/comptrie/comptrie_builder.inl @@ -1,20 +1,20 @@ #pragma once #include "comptrie_impl.h" -#include "comptrie_trie.h" +#include "comptrie_trie.h" #include "make_fast_layout.h" #include "array_with_size.h" #include <library/cpp/containers/compact_vector/compact_vector.h> -#include <util/memory/alloc.h> +#include <util/memory/alloc.h> #include <util/memory/blob.h> #include <util/memory/pool.h> #include <util/memory/tempbuf.h> #include <util/memory/smallobj.h> #include <util/generic/algorithm.h> #include <util/generic/buffer.h> -#include <util/generic/strbuf.h> +#include <util/generic/strbuf.h> #include <util/system/align.h> #include <util/stream/buffer.h> @@ -49,8 +49,8 @@ protected: void ConvertSymbolArrayToChar(const TSymbol* key, size_t keylen, TTempBuf& buf, size_t ckeylen) const; void NodeLinkTo(TNode* thiz, const TBlob& label, TNode* node); TNode* NodeForwardAdd(TNode* thiz, const char* label, size_t len, size_t& passed, size_t* nodeCount); - bool FindEntryImpl(const char* key, size_t keylen, TData* value) const; - bool FindLongestPrefixImpl(const char* keyptr, size_t keylen, size_t* prefixLen, TData* value) const; + bool FindEntryImpl(const char* key, size_t keylen, TData* value) const; + bool FindLongestPrefixImpl(const char* keyptr, size_t keylen, size_t* prefixLen, TData* value) const; size_t NodeMeasureSubtree(TNode* thiz) const; ui64 NodeSaveSubtree(TNode* thiz, IOutputStream& os) const; @@ -67,7 +67,7 @@ protected: ui64 ArcSaveAndDestroy(const TArc* thiz, IOutputStream& os); public: - TCompactTrieBuilderImpl(TCompactTrieBuilderFlags flags, TPacker packer, IAllocator* alloc); + TCompactTrieBuilderImpl(TCompactTrieBuilderFlags flags, TPacker packer, IAllocator* alloc); virtual ~TCompactTrieBuilderImpl(); void DestroyNode(TNode* node); @@ -81,14 +81,14 @@ public: bool AddSubtreeInFile(const TSymbol* key, size_t keylen, const TString& fileName); bool AddSubtreeInBuffer(const TSymbol* key, size_t keylen, TArrayWithSizeHolder<char>&& buffer); bool FindEntry(const TSymbol* key, size_t keylen, TData* value) const; - bool FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixlen, TData* value) const; + bool FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixlen, TData* value) const; size_t Save(IOutputStream& os) const; size_t SaveAndDestroy(IOutputStream& os); void Clear(); - // lies if some key was added at least twice + // lies if some key was added at least twice size_t GetEntryCount() const; size_t GetNodeCount() const; @@ -121,25 +121,25 @@ public: virtual ui64 Save(const TBuilderImpl* builder, IOutputStream& os) const = 0; virtual ui64 SaveAndDestroy(TBuilderImpl* builder, IOutputStream& os) = 0; virtual void Destroy(TBuilderImpl*) { } - - // Tries to find key in subtree. - // Returns next node to find the key in (to avoid recursive calls) - // If it has end result, writes it to @value and @result arguments and returns nullptr - virtual const TNode* Find(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const = 0; - virtual const TNode* FindLongestPrefix(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const = 0; + + // Tries to find key in subtree. + // Returns next node to find the key in (to avoid recursive calls) + // If it has end result, writes it to @value and @result arguments and returns nullptr + virtual const TNode* Find(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const = 0; + virtual const TNode* FindLongestPrefix(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const = 0; }; class TArcSet: public ISubtree, public TCompactVector<TArc> { public: typedef typename TCompactVector<TArc>::iterator iterator; - typedef typename TCompactVector<TArc>::const_iterator const_iterator; + typedef typename TCompactVector<TArc>::const_iterator const_iterator; - TArcSet() { + TArcSet() { Y_ASSERT(reinterpret_cast<ISubtree*>(this) == static_cast<void*>(this)); // This assumption is used in TNode::Subtree() - } - + } + iterator Find(char ch); - const_iterator Find(char ch) const; + const_iterator Find(char ch) const; void Add(const TBlob& s, TNode* node); bool IsLast() const override { @@ -148,9 +148,9 @@ public: const TNode* Find(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const override; const TNode* FindLongestPrefix(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const override { - return Find(key, value, result, packer); - } - + return Find(key, value, result, packer); + } + ui64 Measure(const TBuilderImpl* builder) const override { return MeasureRange(builder, 0, this->size()); } @@ -217,40 +217,40 @@ public: struct TBufferedSubtree: public ISubtree { TArrayWithSizeHolder<char> Buffer; - TBufferedSubtree() { + TBufferedSubtree() { Y_ASSERT(reinterpret_cast<ISubtree*>(this) == static_cast<void*>(this)); // This assumption is used in TNode::Subtree() - } - + } + bool IsLast() const override { return Buffer.Empty(); } const TNode* Find(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const override { - if (Buffer.Empty()) { - result = false; - return nullptr; - } - - TCompactTrie<char, D, S> trie(Buffer.Get(), Buffer.Size(), packer); + if (Buffer.Empty()) { + result = false; + return nullptr; + } + + TCompactTrie<char, D, S> trie(Buffer.Get(), Buffer.Size(), packer); result = trie.Find(key.data(), key.size(), value); - - return nullptr; - } - + + return nullptr; + } + const TNode* FindLongestPrefix(TStringBuf& key, TData* value, bool& result, const TPacker& packer) const override { - if (Buffer.Empty()) { - result = false; - return nullptr; - } - - TCompactTrie<char, D, S> trie(Buffer.Get(), Buffer.Size(), packer); - size_t prefixLen = 0; + if (Buffer.Empty()) { + result = false; + return nullptr; + } + + TCompactTrie<char, D, S> trie(Buffer.Get(), Buffer.Size(), packer); + size_t prefixLen = 0; result = trie.FindLongestPrefix(key.data(), key.size(), &prefixLen, value); - key = key.SubStr(prefixLen); - - return nullptr; - } - + key = key.SubStr(prefixLen); + + return nullptr; + } + ui64 Measure(const TBuilderImpl*) const override { return Buffer.Size(); } @@ -283,7 +283,7 @@ public: Data.Reset(new TData); Data->FileName = fileName; Data->Size = size; - + Y_ASSERT(reinterpret_cast<ISubtree*>(this) == static_cast<void*>(this)); // This assumption is used in TNode::Subtree() } @@ -292,30 +292,30 @@ public: } const TNode* Find(TStringBuf& key, typename TCompactTrieBuilder::TData* value, bool& result, const TPacker& packer) const override { - if (!Data) { - result = false; - return nullptr; - } - - TCompactTrie<char, D, S> trie(TBlob::FromFile(Data->FileName), packer); + if (!Data) { + result = false; + return nullptr; + } + + TCompactTrie<char, D, S> trie(TBlob::FromFile(Data->FileName), packer); result = trie.Find(key.data(), key.size(), value); - return nullptr; - } - + return nullptr; + } + const TNode* FindLongestPrefix(TStringBuf& key, typename TCompactTrieBuilder::TData* value, bool& result, const TPacker& packer) const override { - if (!Data) { - result = false; - return nullptr; - } - - TCompactTrie<char, D, S> trie(TBlob::FromFile(Data->FileName), packer); - size_t prefixLen = 0; + if (!Data) { + result = false; + return nullptr; + } + + TCompactTrie<char, D, S> trie(TBlob::FromFile(Data->FileName), packer); + size_t prefixLen = 0; result = trie.FindLongestPrefix(key.data(), key.size(), &prefixLen, value); - key = key.SubStr(prefixLen); - - return nullptr; - } - + key = key.SubStr(prefixLen); + + return nullptr; + } + ui64 Measure(const TBuilderImpl*) const override { return Data->Size; } @@ -351,26 +351,26 @@ public: EPayload PayloadType; - inline const char* PayloadPtr() const { - return ((const char*) this) + sizeof(TNode); - } - + inline const char* PayloadPtr() const { + return ((const char*) this) + sizeof(TNode); + } + inline char* PayloadPtr() { return ((char*) this) + sizeof(TNode); } // *Payload() - inline const char*& PayloadAsPtr() const { - const char** payload = (const char**) PayloadPtr(); - return *payload; - } - + inline const char*& PayloadAsPtr() const { + const char** payload = (const char**) PayloadPtr(); + return *payload; + } + inline char*& PayloadAsPtr() { char** payload = (char**) PayloadPtr(); return *payload; } - inline const char* GetPayload() const { + inline const char* GetPayload() const { switch (PayloadType) { case DATA_INSIDE: return PayloadPtr(); @@ -383,11 +383,11 @@ public: } } - inline char* GetPayload() { - const TNode* thiz = this; - return const_cast<char*>(thiz->GetPayload()); // const_cast is to avoid copy-paste style - } - + inline char* GetPayload() { + const TNode* thiz = this; + return const_cast<char*>(thiz->GetPayload()); // const_cast is to avoid copy-paste style + } + bool IsFinal() const { return PayloadType != DATA_ABSENT; } @@ -420,8 +420,8 @@ public: // TCompactTrieBuilder template <class T, class D, class S> -TCompactTrieBuilder<T, D, S>::TCompactTrieBuilder(TCompactTrieBuilderFlags flags, TPacker packer, IAllocator* alloc) - : Impl(new TCompactTrieBuilderImpl(flags, packer, alloc)) +TCompactTrieBuilder<T, D, S>::TCompactTrieBuilder(TCompactTrieBuilderFlags flags, TPacker packer, IAllocator* alloc) + : Impl(new TCompactTrieBuilderImpl(flags, packer, alloc)) { } @@ -452,7 +452,7 @@ bool TCompactTrieBuilder<T, D, S>::Find(const TSymbol* key, size_t keylen, TData template <class T, class D, class S> bool TCompactTrieBuilder<T, D, S>::FindLongestPrefix( - const TSymbol* key, size_t keylen, size_t* prefixlen, TData* value) const { + const TSymbol* key, size_t keylen, size_t* prefixlen, TData* value) const { return Impl->FindLongestPrefix(key, keylen, prefixlen, value); } @@ -484,10 +484,10 @@ size_t TCompactTrieBuilder<T, D, S>::GetNodeCount() const { // TCompactTrieBuilder::TCompactTrieBuilderImpl template <class T, class D, class S> -TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TCompactTrieBuilderImpl(TCompactTrieBuilderFlags flags, TPacker packer, IAllocator* alloc) - : Pool(1000000, TMemoryPool::TLinearGrow::Instance(), alloc) +TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TCompactTrieBuilderImpl(TCompactTrieBuilderFlags flags, TPacker packer, IAllocator* alloc) + : Pool(1000000, TMemoryPool::TLinearGrow::Instance(), alloc) , PayloadSize(sizeof(void*)) // XXX: find better value - , NodeAllocator(new TFixedSizeAllocator(sizeof(TNode) + PayloadSize, alloc)) + , NodeAllocator(new TFixedSizeAllocator(sizeof(TNode) + PayloadSize, alloc)) , Flags(flags) , EntryCount(0) , NodeCount(1) @@ -662,81 +662,81 @@ template <class T, class D, class S> bool TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::FindEntry(const TSymbol* key, size_t keylen, TData* value) const { using namespace NCompactTrie; - if (!keylen) { - const char zero = '\0'; - return FindEntryImpl(&zero, 1, value); - } else { - size_t ckeylen = keylen * sizeof(TSymbol); - TTempBuf ckeybuf(ckeylen); - ConvertSymbolArrayToChar(key, keylen, ckeybuf, ckeylen); - return FindEntryImpl(ckeybuf.Data(), ckeylen, value); + if (!keylen) { + const char zero = '\0'; + return FindEntryImpl(&zero, 1, value); + } else { + size_t ckeylen = keylen * sizeof(TSymbol); + TTempBuf ckeybuf(ckeylen); + ConvertSymbolArrayToChar(key, keylen, ckeybuf, ckeylen); + return FindEntryImpl(ckeybuf.Data(), ckeylen, value); } -} - -template <class T, class D, class S> -bool TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::FindEntryImpl(const char* keyptr, size_t keylen, TData* value) const { - const TNode* node = Root; - bool result = false; - TStringBuf key(keyptr, keylen); - while (key && (node = node->Subtree()->Find(key, value, result, Packer))) { +} + +template <class T, class D, class S> +bool TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::FindEntryImpl(const char* keyptr, size_t keylen, TData* value) const { + const TNode* node = Root; + bool result = false; + TStringBuf key(keyptr, keylen); + while (key && (node = node->Subtree()->Find(key, value, result, Packer))) { } - return result; + return result; } template <class T, class D, class S> bool TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::FindLongestPrefix( - const TSymbol* key, size_t keylen, size_t* prefixlen, TData* value) const { + const TSymbol* key, size_t keylen, size_t* prefixlen, TData* value) const { using namespace NCompactTrie; - if (!keylen) { - const char zero = '\0'; - const bool ret = FindLongestPrefixImpl(&zero, 1, prefixlen, value); - if (ret && prefixlen) - *prefixlen = 0; // empty key found - return ret; - } else { - size_t ckeylen = keylen * sizeof(TSymbol); - TTempBuf ckeybuf(ckeylen); - ConvertSymbolArrayToChar(key, keylen, ckeybuf, ckeylen); - bool ret = FindLongestPrefixImpl(ckeybuf.Data(), ckeylen, prefixlen, value); - if (ret && prefixlen && *prefixlen == 1 && ckeybuf.Data()[0] == '\0') - *prefixlen = 0; // if we have found empty key, set prefixlen to zero - else if (!ret) // try to find value with empty key, because empty key is prefix of a every key - ret = FindLongestPrefix(nullptr, 0, prefixlen, value); - - if (ret && prefixlen) - *prefixlen /= sizeof(TSymbol); - - return ret; + if (!keylen) { + const char zero = '\0'; + const bool ret = FindLongestPrefixImpl(&zero, 1, prefixlen, value); + if (ret && prefixlen) + *prefixlen = 0; // empty key found + return ret; + } else { + size_t ckeylen = keylen * sizeof(TSymbol); + TTempBuf ckeybuf(ckeylen); + ConvertSymbolArrayToChar(key, keylen, ckeybuf, ckeylen); + bool ret = FindLongestPrefixImpl(ckeybuf.Data(), ckeylen, prefixlen, value); + if (ret && prefixlen && *prefixlen == 1 && ckeybuf.Data()[0] == '\0') + *prefixlen = 0; // if we have found empty key, set prefixlen to zero + else if (!ret) // try to find value with empty key, because empty key is prefix of a every key + ret = FindLongestPrefix(nullptr, 0, prefixlen, value); + + if (ret && prefixlen) + *prefixlen /= sizeof(TSymbol); + + return ret; } -} - -template <class T, class D, class S> -bool TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::FindLongestPrefixImpl(const char* keyptr, size_t keylen, size_t* prefixLen, TData* value) const { - const TNode* node = Root; - const TNode* lastFinalNode = nullptr; - bool endResult = false; - TStringBuf key(keyptr, keylen); - TStringBuf keyTail = key; - TStringBuf lastFinalKeyTail; - while (keyTail && (node = node->Subtree()->FindLongestPrefix(keyTail, value, endResult, Packer))) { - if (endResult) // no more ways to find prefix and prefix has been found - break; - - if (node->IsFinal()) { - lastFinalNode = node; - lastFinalKeyTail = keyTail; +} + +template <class T, class D, class S> +bool TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::FindLongestPrefixImpl(const char* keyptr, size_t keylen, size_t* prefixLen, TData* value) const { + const TNode* node = Root; + const TNode* lastFinalNode = nullptr; + bool endResult = false; + TStringBuf key(keyptr, keylen); + TStringBuf keyTail = key; + TStringBuf lastFinalKeyTail; + while (keyTail && (node = node->Subtree()->FindLongestPrefix(keyTail, value, endResult, Packer))) { + if (endResult) // no more ways to find prefix and prefix has been found + break; + + if (node->IsFinal()) { + lastFinalNode = node; + lastFinalKeyTail = keyTail; } } - if (!endResult && lastFinalNode) { + if (!endResult && lastFinalNode) { if (value) - Packer.UnpackLeaf(lastFinalNode->GetPayload(), *value); - keyTail = lastFinalKeyTail; - endResult = true; + Packer.UnpackLeaf(lastFinalNode->GetPayload(), *value); + keyTail = lastFinalKeyTail; + endResult = true; } - if (endResult && prefixLen) + if (endResult && prefixLen) *prefixLen = keyTail ? key.size() - keyTail.size() : key.size(); - return endResult; + return endResult; } template <class T, class D, class S> @@ -991,60 +991,60 @@ typename TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet:: } template <class T, class D, class S> -typename TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::const_iterator - TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::Find(char ch) const { - using namespace NCompTriePrivate; - const_iterator it = LowerBound(this->begin(), this->end(), ch, TCmp()); - - if (it != this->end() && it->Label[0] == (unsigned char)ch) { - return it; - } - - return this->end(); -} - -template <class T, class D, class S> +typename TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::const_iterator + TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::Find(char ch) const { + using namespace NCompTriePrivate; + const_iterator it = LowerBound(this->begin(), this->end(), ch, TCmp()); + + if (it != this->end() && it->Label[0] == (unsigned char)ch) { + return it; + } + + return this->end(); +} + +template <class T, class D, class S> void TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::Add(const TBlob& s, TNode* node) { using namespace NCompTriePrivate; this->insert(LowerBound(this->begin(), this->end(), s[0], TCmp()), TArc(s, node)); } -template <class T, class D, class S> -const typename TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode* - TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::Find( - TStringBuf& key, TData* value, bool& result, const TPacker& packer) const { - result = false; - if (!key) - return nullptr; - - const const_iterator it = Find(key[0]); - if (it != this->end()) { - const char* const arcLabel = it->Label.AsCharPtr(); - const size_t arcLabelLen = it->Label.Length(); +template <class T, class D, class S> +const typename TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode* + TCompactTrieBuilder<T, D, S>::TCompactTrieBuilderImpl::TNode::TArcSet::Find( + TStringBuf& key, TData* value, bool& result, const TPacker& packer) const { + result = false; + if (!key) + return nullptr; + + const const_iterator it = Find(key[0]); + if (it != this->end()) { + const char* const arcLabel = it->Label.AsCharPtr(); + const size_t arcLabelLen = it->Label.Length(); if (key.size() >= arcLabelLen && memcmp(key.data(), arcLabel, arcLabelLen) == 0) { - const TStringBuf srcKey = key; - key = key.SubStr(arcLabelLen); - const TNode* const node = it->Node; + const TStringBuf srcKey = key; + key = key.SubStr(arcLabelLen); + const TNode* const node = it->Node; if (srcKey.size() == arcLabelLen) { - // unpack value of it->Node, if it has value - if (!node->IsFinal()) - return nullptr; - - if (value) - packer.UnpackLeaf(node->GetPayload(), *value); - - result = true; - return nullptr; - } - - // find in subtree - return node; - } - } - - return nullptr; -} - + // unpack value of it->Node, if it has value + if (!node->IsFinal()) + return nullptr; + + if (value) + packer.UnpackLeaf(node->GetPayload(), *value); + + result = true; + return nullptr; + } + + // find in subtree + return node; + } + } + + return nullptr; +} + // Different //---------------------------------------------------------------------------------------------------------------------- diff --git a/library/cpp/containers/comptrie/comptrie_impl.h b/library/cpp/containers/comptrie/comptrie_impl.h index f41c38311a4..d0ef94a518b 100644 --- a/library/cpp/containers/comptrie/comptrie_impl.h +++ b/library/cpp/containers/comptrie/comptrie_impl.h @@ -180,7 +180,7 @@ namespace NCompactTrie { // Advances the data pointer to the root of the subtrie beginning after the symbol, // zeroes it if this subtrie is empty. // If there is a value associated with the symbol, makes the value pointer point to it, - // otherwise sets it to nullptr. + // otherwise sets it to nullptr. // Returns true if the symbol was succesfully found in the trie, false otherwise. template <typename TSymbol, class TPacker> Y_FORCE_INLINE bool Advance(const char*& datapos, const char* const dataend, const char*& value, @@ -193,7 +193,7 @@ namespace NCompactTrie { return false; // no such arc } - value = nullptr; + value = nullptr; Y_ASSERT(datapos <= dataend); if ((flags & MT_FINAL)) { @@ -203,7 +203,7 @@ namespace NCompactTrie { if (!(flags & MT_NEXT)) { if (i == 0) { - datapos = nullptr; + datapos = nullptr; return true; } return false; // no further way diff --git a/library/cpp/containers/comptrie/comptrie_trie.h b/library/cpp/containers/comptrie/comptrie_trie.h index 40ec1e52b32..f006f3cf791 100644 --- a/library/cpp/containers/comptrie/comptrie_trie.h +++ b/library/cpp/containers/comptrie/comptrie_trie.h @@ -80,8 +80,8 @@ public: bool IsInitialized() const; bool IsEmpty() const; - bool Find(const TSymbol* key, size_t keylen, TData* value = nullptr) const; - bool Find(const TKeyBuf& key, TData* value = nullptr) const { + bool Find(const TSymbol* key, size_t keylen, TData* value = nullptr) const; + bool Find(const TKeyBuf& key, TData* value = nullptr) const { return Find(key.data(), key.size(), value); } @@ -122,8 +122,8 @@ public: void FindPhrases(const TKeyBuf& key, TPhraseMatchVector& matches, TSymbol separator = TSymbol(' ')) const { return FindPhrases(key.data(), key.size(), matches, separator); } - bool FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixLen, TData* value = nullptr, bool* hasNext = nullptr) const; - bool FindLongestPrefix(const TKeyBuf& key, size_t* prefixLen, TData* value = nullptr, bool* hasNext = nullptr) const { + bool FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixLen, TData* value = nullptr, bool* hasNext = nullptr) const; + bool FindLongestPrefix(const TKeyBuf& key, size_t* prefixLen, TData* value = nullptr, bool* hasNext = nullptr) const { return FindLongestPrefix(key.data(), key.size(), prefixLen, value, hasNext); } @@ -315,18 +315,18 @@ void TCompactTrie<T, D, S>::Init(const TBlob& data, TPacker packer) { template <class T, class D, class S> bool TCompactTrie<T, D, S>::IsInitialized() const { - return DataHolder.Data() != nullptr; + return DataHolder.Data() != nullptr; } template <class T, class D, class S> bool TCompactTrie<T, D, S>::IsEmpty() const { - return DataHolder.Size() == 0 && EmptyValue == nullptr; + return DataHolder.Size() == 0 && EmptyValue == nullptr; } template <class T, class D, class S> -bool TCompactTrie<T, D, S>::Find(const TSymbol* key, size_t keylen, TData* value) const { +bool TCompactTrie<T, D, S>::Find(const TSymbol* key, size_t keylen, TData* value) const { size_t prefixLen = 0; - const char* valuepos = nullptr; + const char* valuepos = nullptr; bool hasNext; if (!LookupLongestPrefix(key, keylen, prefixLen, valuepos, hasNext) || prefixLen != keylen) return false; @@ -366,7 +366,7 @@ bool TCompactTrie<T, D, S>::FindTails(const TSymbol* key, size_t keylen, TCompac const char* const dataend = datapos + len; const TSymbol* keyend = key + keylen; - const char* value = nullptr; + const char* value = nullptr; while (key != keyend) { T label = *(key++); @@ -400,7 +400,7 @@ inline bool TCompactTrie<T, D, S>::FindTails(TSymbol label, TCompactTrie<T, D, S const char* datastart = DataHolder.AsCharPtr(); const char* dataend = datastart + len; const char* datapos = datastart; - const char* value = nullptr; + const char* value = nullptr; if (!NCompactTrie::Advance(datapos, dataend, value, label, Packer)) return false; @@ -460,8 +460,8 @@ void TCompactTrie<T, D, S>::Print(IOutputStream& os) { } template <class T, class D, class S> -bool TCompactTrie<T, D, S>::FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixLen, TData* value, bool* hasNext) const { - const char* valuepos = nullptr; +bool TCompactTrie<T, D, S>::FindLongestPrefix(const TSymbol* key, size_t keylen, size_t* prefixLen, TData* value, bool* hasNext) const { + const char* valuepos = nullptr; size_t tempPrefixLen = 0; bool tempHasNext; bool found = LookupLongestPrefix(key, keylen, tempPrefixLen, valuepos, tempHasNext); @@ -475,7 +475,7 @@ bool TCompactTrie<T, D, S>::FindLongestPrefix(const TSymbol* key, size_t keylen, } template <class T, class D, class S> -bool TCompactTrie<T, D, S>::LookupLongestPrefix(const TSymbol* key, size_t keylen, size_t& prefixLen, const char*& valuepos, bool& hasNext) const { +bool TCompactTrie<T, D, S>::LookupLongestPrefix(const TSymbol* key, size_t keylen, size_t& prefixLen, const char*& valuepos, bool& hasNext) const { using namespace NCompactTrie; const char* datapos = DataHolder.AsCharPtr(); diff --git a/library/cpp/containers/comptrie/comptrie_ut.cpp b/library/cpp/containers/comptrie/comptrie_ut.cpp index 74bee09b5d6..707468d90ec 100644 --- a/library/cpp/containers/comptrie/comptrie_ut.cpp +++ b/library/cpp/containers/comptrie/comptrie_ut.cpp @@ -5,7 +5,7 @@ #include <utility> #include <util/charset/wide.h> -#include <util/generic/algorithm.h> +#include <util/generic/algorithm.h> #include <util/generic/buffer.h> #include <util/generic/map.h> #include <util/generic/vector.h> @@ -17,9 +17,9 @@ #include <util/random/random.h> #include <util/random/fast.h> -#include <util/string/hex.h> +#include <util/string/hex.h> #include <util/string/cast.h> - + #include "comptrie.h" #include "set.h" #include "first_symbol_iterator.h" @@ -27,8 +27,8 @@ #include "pattern_searcher.h" #include <array> -#include <iterator> - +#include <iterator> + class TCompactTrieTest: public TTestBase { private: @@ -108,7 +108,7 @@ private: UNIT_TEST(TestBuilderFindLongestPrefix); UNIT_TEST(TestBuilderFindLongestPrefixWithEmptyValue); - + UNIT_TEST(TestPatternSearcherEmpty); UNIT_TEST(TestPatternSearcherSimple); UNIT_TEST(TestPatternSearcherRandom); @@ -242,10 +242,10 @@ public: void TestFirstSymbolIteratorChar32(); void TestArrayPacker(); - - void TestBuilderFindLongestPrefix(); - void TestBuilderFindLongestPrefix(size_t keysCount, double branchProbability, bool isPrefixGrouped, bool hasEmptyKey); - void TestBuilderFindLongestPrefixWithEmptyValue(); + + void TestBuilderFindLongestPrefix(); + void TestBuilderFindLongestPrefix(size_t keysCount, double branchProbability, bool isPrefixGrouped, bool hasEmptyKey); + void TestBuilderFindLongestPrefixWithEmptyValue(); void TestPatternSearcherOnDataset( const TVector<TString>& patterns, @@ -396,7 +396,7 @@ void TCompactTrieTest::CheckData(const char* data, size_t datalen) { UNIT_ASSERT(trie.FindLongestPrefix(key, &prefixLen, &value)); UNIT_ASSERT_EQUAL(len, prefixLen); UNIT_ASSERT_EQUAL(len * 2, value); - UNIT_ASSERT(trie.FindLongestPrefix(key, &prefixLen, nullptr)); + UNIT_ASSERT(trie.FindLongestPrefix(key, &prefixLen, nullptr)); UNIT_ASSERT_EQUAL(len, prefixLen); } @@ -646,21 +646,21 @@ void TCompactTrieTest::TestUninitializedNonEmpty() { UNIT_ASSERT(it == tails.End()); } -static char RandChar() { - return char(RandomNumber<size_t>() % 256); -} - +static char RandChar() { + return char(RandomNumber<size_t>() % 256); +} + static TString RandStr(const size_t max) { size_t len = RandomNumber<size_t>() % max; TString key; for (size_t j = 0; j < len; ++j) - key += RandChar(); + key += RandChar(); return key; } template <class T, bool minimize> void TCompactTrieTest::TestRandom(const size_t n, const size_t maxKeySize) { - const TStringBuf EMPTY_KEY = TStringBuf("", 1); + const TStringBuf EMPTY_KEY = TStringBuf("", 1); TCompactTrieBuilder<char, typename T::TData, T> builder; typedef TMap<TString, typename T::TData> TKeys; TKeys keys; @@ -668,7 +668,7 @@ void TCompactTrieTest::TestRandom(const size_t n, const size_t maxKeySize) { typename T::TData dummy; for (size_t i = 0; i < n; ++i) { const TString key = RandStr(maxKeySize); - if (key != EMPTY_KEY && keys.find(key) == keys.end()) { + if (key != EMPTY_KEY && keys.find(key) == keys.end()) { const typename T::TData val = T::Data(key); keys[key] = val; UNIT_ASSERT_C(!builder.Find(key.data(), key.size(), &dummy), "key = " << HexEncode(TString(key))); @@ -691,7 +691,7 @@ void TCompactTrieTest::TestRandom(const size_t n, const size_t maxKeySize) { TCompactTrieBuilder<char, typename T::TData, T> prefixGroupedBuilder(CTBF_PREFIX_GROUPED); for (typename TKeys::const_iterator i = keys.begin(), mi = keys.end(); i != mi; ++i) { - UNIT_ASSERT(!prefixGroupedBuilder.Find(i->first.c_str(), i->first.size(), &dummy)); + UNIT_ASSERT(!prefixGroupedBuilder.Find(i->first.c_str(), i->first.size(), &dummy)); UNIT_ASSERT(trie.Find(i->first.c_str(), i->first.size(), &dummy)); UNIT_ASSERT(dummy == i->second); if (minimize) { @@ -700,17 +700,17 @@ void TCompactTrieTest::TestRandom(const size_t n, const size_t maxKeySize) { } prefixGroupedBuilder.Add(i->first.c_str(), i->first.size(), dummy); - UNIT_ASSERT(prefixGroupedBuilder.Find(i->first.c_str(), i->first.size(), &dummy)); - - for (typename TKeys::const_iterator j = keys.begin(), end = keys.end(); j != end; ++j) { - typename T::TData valFound; - if (j->first <= i->first) { - UNIT_ASSERT(prefixGroupedBuilder.Find(j->first.c_str(), j->first.size(), &valFound)); - UNIT_ASSERT_VALUES_EQUAL(j->second, valFound); - } else { - UNIT_ASSERT(!prefixGroupedBuilder.Find(j->first.c_str(), j->first.size(), &valFound)); - } - } + UNIT_ASSERT(prefixGroupedBuilder.Find(i->first.c_str(), i->first.size(), &dummy)); + + for (typename TKeys::const_iterator j = keys.begin(), end = keys.end(); j != end; ++j) { + typename T::TData valFound; + if (j->first <= i->first) { + UNIT_ASSERT(prefixGroupedBuilder.Find(j->first.c_str(), j->first.size(), &valFound)); + UNIT_ASSERT_VALUES_EQUAL(j->second, valFound); + } else { + UNIT_ASSERT(!prefixGroupedBuilder.Find(j->first.c_str(), j->first.size(), &valFound)); + } + } } TBufferStream prefixGroupedBuffer; @@ -790,18 +790,18 @@ void TCompactTrieTest::TestPrefixGrouped() { }; for (size_t i = 0; i < Y_ARRAY_SIZE(data); ++i) { - ui32 val = strlen(data[i]) + 1; - b1.Add(data[i], strlen(data[i]), val); + ui32 val = strlen(data[i]) + 1; + b1.Add(data[i], strlen(data[i]), val); for (size_t j = 0; j < Y_ARRAY_SIZE(data); ++j) { - ui32 mustHave = strlen(data[j]) + 1; - ui32 found = 0; - if (j <= i) { - UNIT_ASSERT(b1.Find(data[j], strlen(data[j]), &found)); - UNIT_ASSERT_VALUES_EQUAL(mustHave, found); - } else { - UNIT_ASSERT(!b1.Find(data[j], strlen(data[j]), &found)); - } - } + ui32 mustHave = strlen(data[j]) + 1; + ui32 found = 0; + if (j <= i) { + UNIT_ASSERT(b1.Find(data[j], strlen(data[j]), &found)); + UNIT_ASSERT_VALUES_EQUAL(mustHave, found); + } else { + UNIT_ASSERT(!b1.Find(data[j], strlen(data[j]), &found)); + } + } } { @@ -1017,7 +1017,7 @@ class TCompactTrieTest::TDummyPacker: public TNullPacker<T> { public: static T Data(const TString&) { T data; - TNullPacker<T>().UnpackLeaf(nullptr, data); + TNullPacker<T>().UnpackLeaf(nullptr, data); return data; } @@ -1280,7 +1280,7 @@ void TCompactTrieTest::TestFindLongestPrefixWithEmptyValue() { } { TCompactTrie<wchar16, ui32> trie(buffer.Buffer().Data(), buffer.Buffer().Size()); - size_t prefixLen = 123; + size_t prefixLen = 123; ui32 value = 0; UNIT_ASSERT(trie.FindLongestPrefix(u"google", &prefixLen, &value)); @@ -1465,121 +1465,121 @@ void TCompactTrieTest::TestArrayPacker() { UNIT_ASSERT_VALUES_EQUAL(dataZzz.second, trieTwo.Get(dataZzz.first)); UNIT_ASSERT_VALUES_EQUAL(dataWww.second, trieTwo.Get(dataWww.first)); } - -void TCompactTrieTest::TestBuilderFindLongestPrefix() { - const size_t sizes[] = {10, 100}; + +void TCompactTrieTest::TestBuilderFindLongestPrefix() { + const size_t sizes[] = {10, 100}; const double branchProbabilities[] = {0.01, 0.1, 0.5, 0.9, 0.99}; - for (size_t size : sizes) { - for (double branchProbability : branchProbabilities) { - TestBuilderFindLongestPrefix(size, branchProbability, false, false); - TestBuilderFindLongestPrefix(size, branchProbability, false, true); - TestBuilderFindLongestPrefix(size, branchProbability, true, false); - TestBuilderFindLongestPrefix(size, branchProbability, true, true); - } - } -} - -void TCompactTrieTest::TestBuilderFindLongestPrefix(size_t keysCount, double branchProbability, bool isPrefixGrouped, bool hasEmptyKey) { + for (size_t size : sizes) { + for (double branchProbability : branchProbabilities) { + TestBuilderFindLongestPrefix(size, branchProbability, false, false); + TestBuilderFindLongestPrefix(size, branchProbability, false, true); + TestBuilderFindLongestPrefix(size, branchProbability, true, false); + TestBuilderFindLongestPrefix(size, branchProbability, true, true); + } + } +} + +void TCompactTrieTest::TestBuilderFindLongestPrefix(size_t keysCount, double branchProbability, bool isPrefixGrouped, bool hasEmptyKey) { TVector<TString> keys; TString keyToAdd; - for (size_t i = 0; i < keysCount; ++i) { - const size_t prevKeyLen = keyToAdd.Size(); - // add two random chars to prev key - keyToAdd += RandChar(); - keyToAdd += RandChar(); - const bool changeBranch = prevKeyLen && RandomNumber<double>() < branchProbability; - if (changeBranch) { - const size_t branchPlace = RandomNumber<size_t>(prevKeyLen + 1); // random place in [0, prevKeyLen] - *(keyToAdd.begin() + branchPlace) = RandChar(); - } - keys.push_back(keyToAdd); - } - - if (isPrefixGrouped) - Sort(keys.begin(), keys.end()); - else + for (size_t i = 0; i < keysCount; ++i) { + const size_t prevKeyLen = keyToAdd.Size(); + // add two random chars to prev key + keyToAdd += RandChar(); + keyToAdd += RandChar(); + const bool changeBranch = prevKeyLen && RandomNumber<double>() < branchProbability; + if (changeBranch) { + const size_t branchPlace = RandomNumber<size_t>(prevKeyLen + 1); // random place in [0, prevKeyLen] + *(keyToAdd.begin() + branchPlace) = RandChar(); + } + keys.push_back(keyToAdd); + } + + if (isPrefixGrouped) + Sort(keys.begin(), keys.end()); + else Shuffle(keys.begin(), keys.end()); - + TCompactTrieBuilder<char, TString> builder(isPrefixGrouped ? CTBF_PREFIX_GROUPED : CTBF_NONE); const TString EMPTY_VALUE = "empty"; - if (hasEmptyKey) - builder.Add(nullptr, 0, EMPTY_VALUE); - - for (size_t i = 0; i < keysCount; ++i) { + if (hasEmptyKey) + builder.Add(nullptr, 0, EMPTY_VALUE); + + for (size_t i = 0; i < keysCount; ++i) { const TString& key = keys[i]; - - for (size_t j = 0; j < keysCount; ++j) { + + for (size_t j = 0; j < keysCount; ++j) { const TString& otherKey = keys[j]; - const bool exists = j < i; - size_t expectedSize = 0; - if (exists) { - expectedSize = otherKey.size(); - } else { - size_t max = 0; - for (size_t k = 0; k < i; ++k) + const bool exists = j < i; + size_t expectedSize = 0; + if (exists) { + expectedSize = otherKey.size(); + } else { + size_t max = 0; + for (size_t k = 0; k < i; ++k) if (keys[k].Size() < otherKey.Size() && keys[k].Size() > max && otherKey.StartsWith(keys[k])) - max = keys[k].Size(); - expectedSize = max; - } - - size_t prefixSize = 0xfcfcfc; + max = keys[k].Size(); + expectedSize = max; + } + + size_t prefixSize = 0xfcfcfc; TString value = "abcd"; - const bool expectedResult = hasEmptyKey || expectedSize != 0; + const bool expectedResult = hasEmptyKey || expectedSize != 0; UNIT_ASSERT_VALUES_EQUAL_C(expectedResult, builder.FindLongestPrefix(otherKey.data(), otherKey.size(), &prefixSize, &value), "otherKey = " << HexEncode(otherKey)); - if (expectedResult) { - UNIT_ASSERT_VALUES_EQUAL(expectedSize, prefixSize); - if (expectedSize) { - UNIT_ASSERT_VALUES_EQUAL(TStringBuf(otherKey).SubStr(0, prefixSize), value); - } else { - UNIT_ASSERT_VALUES_EQUAL(EMPTY_VALUE, value); - } - } else { - UNIT_ASSERT_VALUES_EQUAL("abcd", value); - UNIT_ASSERT_VALUES_EQUAL(0xfcfcfc, prefixSize); - } - - for (int c = 0; c < 10; ++c) { + if (expectedResult) { + UNIT_ASSERT_VALUES_EQUAL(expectedSize, prefixSize); + if (expectedSize) { + UNIT_ASSERT_VALUES_EQUAL(TStringBuf(otherKey).SubStr(0, prefixSize), value); + } else { + UNIT_ASSERT_VALUES_EQUAL(EMPTY_VALUE, value); + } + } else { + UNIT_ASSERT_VALUES_EQUAL("abcd", value); + UNIT_ASSERT_VALUES_EQUAL(0xfcfcfc, prefixSize); + } + + for (int c = 0; c < 10; ++c) { TString extendedKey = otherKey; - extendedKey += RandChar(); - size_t extendedPrefixSize = 0xdddddd; + extendedKey += RandChar(); + size_t extendedPrefixSize = 0xdddddd; TString extendedValue = "dcba"; UNIT_ASSERT_VALUES_EQUAL(expectedResult, builder.FindLongestPrefix(extendedKey.data(), extendedKey.size(), &extendedPrefixSize, &extendedValue)); - if (expectedResult) { - UNIT_ASSERT_VALUES_EQUAL(value, extendedValue); - UNIT_ASSERT_VALUES_EQUAL(prefixSize, extendedPrefixSize); - } else { - UNIT_ASSERT_VALUES_EQUAL("dcba", extendedValue); - UNIT_ASSERT_VALUES_EQUAL(0xdddddd, extendedPrefixSize); - } - } - } + if (expectedResult) { + UNIT_ASSERT_VALUES_EQUAL(value, extendedValue); + UNIT_ASSERT_VALUES_EQUAL(prefixSize, extendedPrefixSize); + } else { + UNIT_ASSERT_VALUES_EQUAL("dcba", extendedValue); + UNIT_ASSERT_VALUES_EQUAL(0xdddddd, extendedPrefixSize); + } + } + } builder.Add(key.data(), key.size(), key); - } - - TBufferOutput buffer; - builder.Save(buffer); -} - -void TCompactTrieTest::TestBuilderFindLongestPrefixWithEmptyValue() { - TCompactTrieBuilder<wchar16, ui32> builder; + } + + TBufferOutput buffer; + builder.Save(buffer); +} + +void TCompactTrieTest::TestBuilderFindLongestPrefixWithEmptyValue() { + TCompactTrieBuilder<wchar16, ui32> builder; builder.Add(u"", 42); builder.Add(u"yandex", 271828); builder.Add(u"ya", 31415); - - size_t prefixLen = 123; - ui32 value = 0; - + + size_t prefixLen = 123; + ui32 value = 0; + UNIT_ASSERT(builder.FindLongestPrefix(u"google", &prefixLen, &value)); - UNIT_ASSERT_VALUES_EQUAL(prefixLen, 0); - UNIT_ASSERT_VALUES_EQUAL(value, 42); - + UNIT_ASSERT_VALUES_EQUAL(prefixLen, 0); + UNIT_ASSERT_VALUES_EQUAL(value, 42); + UNIT_ASSERT(builder.FindLongestPrefix(u"yahoo", &prefixLen, &value)); - UNIT_ASSERT_VALUES_EQUAL(prefixLen, 2); - UNIT_ASSERT_VALUES_EQUAL(value, 31415); - - TBufferOutput buffer; - builder.Save(buffer); -} + UNIT_ASSERT_VALUES_EQUAL(prefixLen, 2); + UNIT_ASSERT_VALUES_EQUAL(value, 31415); + + TBufferOutput buffer; + builder.Save(buffer); +} void TCompactTrieTest::TestPatternSearcherEmpty() { TCompactPatternSearcherBuilder<char, ui32> builder; diff --git a/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.cpp b/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.cpp index 7334a43c362..ff3b6c2ff64 100644 --- a/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.cpp +++ b/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.cpp @@ -1 +1 @@ -#include "disjoint_interval_tree.h" +#include "disjoint_interval_tree.h" diff --git a/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h b/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h index 1f899c99913..d3bd6f77483 100644 --- a/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h +++ b/library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h @@ -1,227 +1,227 @@ -#pragma once - -#include <util/generic/map.h> -#include <util/system/yassert.h> - -#include <type_traits> - -template <class T> -class TDisjointIntervalTree { -private: - static_assert(std::is_integral<T>::value, "expect std::is_integral<T>::value"); - - using TTree = TMap<T, T>; // [key, value) - using TIterator = typename TTree::iterator; - using TConstIterator = typename TTree::const_iterator; - using TReverseIterator = typename TTree::reverse_iterator; - using TThis = TDisjointIntervalTree<T>; - - TTree Tree; - size_t NumElements; - -public: - TDisjointIntervalTree() - : NumElements() - { - } - - void Insert(const T t) { - InsertInterval(t, t + 1); - } - - // we assume that none of elements from [begin, end) belong to tree. - void InsertInterval(const T begin, const T end) { - InsertIntervalImpl(begin, end); - NumElements += (size_t)(end - begin); - } - - bool Has(const T t) const { - return const_cast<TThis*>(this)->FindContaining(t) != Tree.end(); - } - - bool Intersects(const T begin, const T end) { - if (Empty()) { - return false; - } - - TIterator l = Tree.lower_bound(begin); - if (l != Tree.end()) { - if (l->first < end) { - return true; - } else if (l != Tree.begin()) { - --l; - return l->second > begin; - } else { - return false; - } - } else { - auto last = Tree.rbegin(); - return begin < last->second; - } - } - - TConstIterator FindContaining(const T t) const { - return const_cast<TThis*>(this)->FindContaining(t); - } - - // Erase element. Returns true when element has been deleted, otherwise false. - bool Erase(const T t) { - TIterator n = FindContaining(t); - if (n == Tree.end()) { - return false; - } - - --NumElements; - - T& begin = const_cast<T&>(n->first); - T& end = const_cast<T&>(n->second); - - // Optimization hack. - if (t == begin) { - if (++begin == end) { // OK to change key since intervals do not intersect. - Tree.erase(n); - return true; - } - - } else if (t == end - 1) { - --end; - - } else { - const T e = end; - end = t; - InsertIntervalImpl(t + 1, e); - } - - Y_ASSERT(begin < end); - return true; - } - - // Erase interval. Returns number of elements removed from set. - size_t EraseInterval(const T begin, const T end) { - Y_ASSERT(begin < end); - - if (Empty()) { - return 0; - } - - size_t elementsRemoved = 0; - - TIterator completelyRemoveBegin = Tree.lower_bound(begin); - if ((completelyRemoveBegin != Tree.end() && completelyRemoveBegin->first > begin && completelyRemoveBegin != Tree.begin()) - || completelyRemoveBegin == Tree.end()) { - // Look at the interval. It could contain [begin, end). - TIterator containingBegin = completelyRemoveBegin; - --containingBegin; - if (containingBegin->first < begin && begin < containingBegin->second) { // Contains begin. - if (containingBegin->second > end) { // Contains end. - const T prevEnd = containingBegin->second; - Y_ASSERT(containingBegin->second - begin <= NumElements); - - Y_ASSERT(containingBegin->second - containingBegin->first > end - begin); - containingBegin->second = begin; - InsertIntervalImpl(end, prevEnd); - - elementsRemoved = end - begin; - NumElements -= elementsRemoved; - return elementsRemoved; - } else { - elementsRemoved += containingBegin->second - begin; - containingBegin->second = begin; - } - } - } - - TIterator completelyRemoveEnd = completelyRemoveBegin != Tree.end() ? Tree.lower_bound(end) : Tree.end(); - if (completelyRemoveEnd != Tree.end() && completelyRemoveEnd != Tree.begin() && completelyRemoveEnd->first != end) { - TIterator containingEnd = completelyRemoveEnd; - --containingEnd; - if (containingEnd->second > end) { - T& leftBorder = const_cast<T&>(containingEnd->first); - - Y_ASSERT(leftBorder < end); - - --completelyRemoveEnd; // Don't remove the whole interval. - - // Optimization hack. - elementsRemoved += end - leftBorder; - leftBorder = end; // OK to change key since intervals do not intersect. - } - } - - for (TIterator i = completelyRemoveBegin; i != completelyRemoveEnd; ++i) { - elementsRemoved += i->second - i->first; - } - - Tree.erase(completelyRemoveBegin, completelyRemoveEnd); - - Y_ASSERT(elementsRemoved <= NumElements); - NumElements -= elementsRemoved; - - return elementsRemoved; - } - - void Swap(TDisjointIntervalTree& rhv) { - Tree.swap(rhv.Tree); - std::swap(NumElements, rhv.NumElements); - } - - void Clear() { - Tree.clear(); - NumElements = 0; - } - - bool Empty() const { - return Tree.empty(); - } - - size_t GetNumElements() const { - return NumElements; - } - - size_t GetNumIntervals() const { - return Tree.size(); - } - - T Min() const { - Y_ASSERT(!Empty()); - return Tree.begin()->first; - } - - T Max() const { - Y_ASSERT(!Empty()); - return Tree.rbegin()->second; - } - - TConstIterator begin() const { - return Tree.begin(); - } - - TConstIterator end() const { - return Tree.end(); - } - -private: - void InsertIntervalImpl(const T begin, const T end) { - Y_ASSERT(begin < end); - Y_ASSERT(!Intersects(begin, end)); - - TIterator l = Tree.lower_bound(begin); - TIterator p = Tree.end(); - if (l != Tree.begin()) { - p = l; - --p; - } - -#ifndef NDEBUG - TIterator u = Tree.upper_bound(begin); - Y_VERIFY_DEBUG(u == Tree.end() || u->first >= end, "Trying to add [%" PRIu64 ", %" PRIu64 ") which intersects with existing [%" PRIu64 ", %" PRIu64 ")", begin, end, u->first, u->second); - Y_VERIFY_DEBUG(l == Tree.end() || l == u, "Trying to add [%" PRIu64 ", %" PRIu64 ") which intersects with existing [%" PRIu64 ", %" PRIu64 ")", begin, end, l->first, l->second); - Y_VERIFY_DEBUG(p == Tree.end() || p->second <= begin, "Trying to add [%" PRIu64 ", %" PRIu64 ") which intersects with existing [%" PRIu64 ", %" PRIu64 ")", begin, end, p->first, p->second); -#endif - - // try to extend interval - if (p != Tree.end() && p->second == begin) { - p->second = end; +#pragma once + +#include <util/generic/map.h> +#include <util/system/yassert.h> + +#include <type_traits> + +template <class T> +class TDisjointIntervalTree { +private: + static_assert(std::is_integral<T>::value, "expect std::is_integral<T>::value"); + + using TTree = TMap<T, T>; // [key, value) + using TIterator = typename TTree::iterator; + using TConstIterator = typename TTree::const_iterator; + using TReverseIterator = typename TTree::reverse_iterator; + using TThis = TDisjointIntervalTree<T>; + + TTree Tree; + size_t NumElements; + +public: + TDisjointIntervalTree() + : NumElements() + { + } + + void Insert(const T t) { + InsertInterval(t, t + 1); + } + + // we assume that none of elements from [begin, end) belong to tree. + void InsertInterval(const T begin, const T end) { + InsertIntervalImpl(begin, end); + NumElements += (size_t)(end - begin); + } + + bool Has(const T t) const { + return const_cast<TThis*>(this)->FindContaining(t) != Tree.end(); + } + + bool Intersects(const T begin, const T end) { + if (Empty()) { + return false; + } + + TIterator l = Tree.lower_bound(begin); + if (l != Tree.end()) { + if (l->first < end) { + return true; + } else if (l != Tree.begin()) { + --l; + return l->second > begin; + } else { + return false; + } + } else { + auto last = Tree.rbegin(); + return begin < last->second; + } + } + + TConstIterator FindContaining(const T t) const { + return const_cast<TThis*>(this)->FindContaining(t); + } + + // Erase element. Returns true when element has been deleted, otherwise false. + bool Erase(const T t) { + TIterator n = FindContaining(t); + if (n == Tree.end()) { + return false; + } + + --NumElements; + + T& begin = const_cast<T&>(n->first); + T& end = const_cast<T&>(n->second); + + // Optimization hack. + if (t == begin) { + if (++begin == end) { // OK to change key since intervals do not intersect. + Tree.erase(n); + return true; + } + + } else if (t == end - 1) { + --end; + + } else { + const T e = end; + end = t; + InsertIntervalImpl(t + 1, e); + } + + Y_ASSERT(begin < end); + return true; + } + + // Erase interval. Returns number of elements removed from set. + size_t EraseInterval(const T begin, const T end) { + Y_ASSERT(begin < end); + + if (Empty()) { + return 0; + } + + size_t elementsRemoved = 0; + + TIterator completelyRemoveBegin = Tree.lower_bound(begin); + if ((completelyRemoveBegin != Tree.end() && completelyRemoveBegin->first > begin && completelyRemoveBegin != Tree.begin()) + || completelyRemoveBegin == Tree.end()) { + // Look at the interval. It could contain [begin, end). + TIterator containingBegin = completelyRemoveBegin; + --containingBegin; + if (containingBegin->first < begin && begin < containingBegin->second) { // Contains begin. + if (containingBegin->second > end) { // Contains end. + const T prevEnd = containingBegin->second; + Y_ASSERT(containingBegin->second - begin <= NumElements); + + Y_ASSERT(containingBegin->second - containingBegin->first > end - begin); + containingBegin->second = begin; + InsertIntervalImpl(end, prevEnd); + + elementsRemoved = end - begin; + NumElements -= elementsRemoved; + return elementsRemoved; + } else { + elementsRemoved += containingBegin->second - begin; + containingBegin->second = begin; + } + } + } + + TIterator completelyRemoveEnd = completelyRemoveBegin != Tree.end() ? Tree.lower_bound(end) : Tree.end(); + if (completelyRemoveEnd != Tree.end() && completelyRemoveEnd != Tree.begin() && completelyRemoveEnd->first != end) { + TIterator containingEnd = completelyRemoveEnd; + --containingEnd; + if (containingEnd->second > end) { + T& leftBorder = const_cast<T&>(containingEnd->first); + + Y_ASSERT(leftBorder < end); + + --completelyRemoveEnd; // Don't remove the whole interval. + + // Optimization hack. + elementsRemoved += end - leftBorder; + leftBorder = end; // OK to change key since intervals do not intersect. + } + } + + for (TIterator i = completelyRemoveBegin; i != completelyRemoveEnd; ++i) { + elementsRemoved += i->second - i->first; + } + + Tree.erase(completelyRemoveBegin, completelyRemoveEnd); + + Y_ASSERT(elementsRemoved <= NumElements); + NumElements -= elementsRemoved; + + return elementsRemoved; + } + + void Swap(TDisjointIntervalTree& rhv) { + Tree.swap(rhv.Tree); + std::swap(NumElements, rhv.NumElements); + } + + void Clear() { + Tree.clear(); + NumElements = 0; + } + + bool Empty() const { + return Tree.empty(); + } + + size_t GetNumElements() const { + return NumElements; + } + + size_t GetNumIntervals() const { + return Tree.size(); + } + + T Min() const { + Y_ASSERT(!Empty()); + return Tree.begin()->first; + } + + T Max() const { + Y_ASSERT(!Empty()); + return Tree.rbegin()->second; + } + + TConstIterator begin() const { + return Tree.begin(); + } + + TConstIterator end() const { + return Tree.end(); + } + +private: + void InsertIntervalImpl(const T begin, const T end) { + Y_ASSERT(begin < end); + Y_ASSERT(!Intersects(begin, end)); + + TIterator l = Tree.lower_bound(begin); + TIterator p = Tree.end(); + if (l != Tree.begin()) { + p = l; + --p; + } + +#ifndef NDEBUG + TIterator u = Tree.upper_bound(begin); + Y_VERIFY_DEBUG(u == Tree.end() || u->first >= end, "Trying to add [%" PRIu64 ", %" PRIu64 ") which intersects with existing [%" PRIu64 ", %" PRIu64 ")", begin, end, u->first, u->second); + Y_VERIFY_DEBUG(l == Tree.end() || l == u, "Trying to add [%" PRIu64 ", %" PRIu64 ") which intersects with existing [%" PRIu64 ", %" PRIu64 ")", begin, end, l->first, l->second); + Y_VERIFY_DEBUG(p == Tree.end() || p->second <= begin, "Trying to add [%" PRIu64 ", %" PRIu64 ") which intersects with existing [%" PRIu64 ", %" PRIu64 ")", begin, end, p->first, p->second); +#endif + + // try to extend interval + if (p != Tree.end() && p->second == begin) { + p->second = end; //Try to merge 2 intervals - p and next one if possible auto next = p; // Next is not Tree.end() here. @@ -231,42 +231,42 @@ private: Tree.erase(next); } // Maybe new interval extends right interval - } else if (l != Tree.end() && end == l->first) { - T& leftBorder = const_cast<T&>(l->first); - // Optimization hack. - leftBorder = begin; // OK to change key since intervals do not intersect. - } else { - Tree.insert(std::make_pair(begin, end)); - } - } - - TIterator FindContaining(const T t) { - TIterator l = Tree.lower_bound(t); - if (l != Tree.end()) { - if (l->first == t) { - return l; - } - Y_ASSERT(l->first > t); - - if (l == Tree.begin()) { - return Tree.end(); - } - - --l; - Y_ASSERT(l->first != t); - - if (l->first < t && t < l->second) { - return l; - } - - } else if (!Tree.empty()) { // l is larger than Begin of any interval, but maybe it belongs to last interval? - TReverseIterator last = Tree.rbegin(); - Y_ASSERT(last->first != t); - - if (last->first < t && t < last->second) { - return (++last).base(); - } - } - return Tree.end(); - } -}; + } else if (l != Tree.end() && end == l->first) { + T& leftBorder = const_cast<T&>(l->first); + // Optimization hack. + leftBorder = begin; // OK to change key since intervals do not intersect. + } else { + Tree.insert(std::make_pair(begin, end)); + } + } + + TIterator FindContaining(const T t) { + TIterator l = Tree.lower_bound(t); + if (l != Tree.end()) { + if (l->first == t) { + return l; + } + Y_ASSERT(l->first > t); + + if (l == Tree.begin()) { + return Tree.end(); + } + + --l; + Y_ASSERT(l->first != t); + + if (l->first < t && t < l->second) { + return l; + } + + } else if (!Tree.empty()) { // l is larger than Begin of any interval, but maybe it belongs to last interval? + TReverseIterator last = Tree.rbegin(); + Y_ASSERT(last->first != t); + + if (last->first < t && t < last->second) { + return (++last).base(); + } + } + return Tree.end(); + } +}; diff --git a/library/cpp/containers/disjoint_interval_tree/ut/disjoint_interval_tree_ut.cpp b/library/cpp/containers/disjoint_interval_tree/ut/disjoint_interval_tree_ut.cpp index 8474ae89b04..0292c72282f 100644 --- a/library/cpp/containers/disjoint_interval_tree/ut/disjoint_interval_tree_ut.cpp +++ b/library/cpp/containers/disjoint_interval_tree/ut/disjoint_interval_tree_ut.cpp @@ -1,60 +1,60 @@ -#include <library/cpp/testing/unittest/registar.h> - -#include <library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h> - -Y_UNIT_TEST_SUITE(DisjointIntervalTreeTest) { - Y_UNIT_TEST(GenericTest) { - TDisjointIntervalTree<ui64> tree; - tree.Insert(1); - tree.Insert(50); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 2); - - tree.InsertInterval(10, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 22); - - UNIT_ASSERT_VALUES_EQUAL(tree.Min(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.Max(), 51); - - tree.Erase(20); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 4); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 21); - - tree.Clear(); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 0); - } - - Y_UNIT_TEST(MergeIntervalsTest) { - TDisjointIntervalTree<ui64> tree; - tree.Insert(5); - - // Insert interval from right side. - tree.Insert(6); - - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 2); - - { - auto begin = tree.begin(); - UNIT_ASSERT_VALUES_EQUAL(begin->first, 5); - UNIT_ASSERT_VALUES_EQUAL(begin->second, 7); - - ++begin; - UNIT_ASSERT_EQUAL(begin, tree.end()); - } - - // Insert interval from left side. - tree.InsertInterval(2, 5); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - - { - auto begin = tree.begin(); - UNIT_ASSERT_VALUES_EQUAL(begin->first, 2); - UNIT_ASSERT_VALUES_EQUAL(begin->second, 7); - } +#include <library/cpp/testing/unittest/registar.h> + +#include <library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h> + +Y_UNIT_TEST_SUITE(DisjointIntervalTreeTest) { + Y_UNIT_TEST(GenericTest) { + TDisjointIntervalTree<ui64> tree; + tree.Insert(1); + tree.Insert(50); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 2); + + tree.InsertInterval(10, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 22); + + UNIT_ASSERT_VALUES_EQUAL(tree.Min(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.Max(), 51); + + tree.Erase(20); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 4); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 21); + + tree.Clear(); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 0); + } + + Y_UNIT_TEST(MergeIntervalsTest) { + TDisjointIntervalTree<ui64> tree; + tree.Insert(5); + + // Insert interval from right side. + tree.Insert(6); + + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 2); + + { + auto begin = tree.begin(); + UNIT_ASSERT_VALUES_EQUAL(begin->first, 5); + UNIT_ASSERT_VALUES_EQUAL(begin->second, 7); + + ++begin; + UNIT_ASSERT_EQUAL(begin, tree.end()); + } + + // Insert interval from left side. + tree.InsertInterval(2, 5); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + + { + auto begin = tree.begin(); + UNIT_ASSERT_VALUES_EQUAL(begin->first, 2); + UNIT_ASSERT_VALUES_EQUAL(begin->second, 7); + } // Merge all intervals. { @@ -71,209 +71,209 @@ Y_UNIT_TEST_SUITE(DisjointIntervalTreeTest) { UNIT_ASSERT_VALUES_EQUAL(begin->second, 10); } - } - - Y_UNIT_TEST(EraseIntervalTest) { - // 1. Remove from empty tree. - { - TDisjointIntervalTree<ui64> tree; - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(1, 3), 0); - } - - // 2. No such interval in set. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(1, 3), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(20, 30), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - } - - // 3. Remove the whole tree. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(0, 100), 5); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 0); - UNIT_ASSERT(tree.Empty()); - } - - // 4. Remove the whole tree with borders specified exactly as in tree. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(5, 10), 5); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 0); - UNIT_ASSERT(tree.Empty()); - } - - // 5. Specify left border exactly as in existing interval. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(15, 20); - tree.InsertInterval(25, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(15, 100500), 10); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - } - - // 6. Specify left border somewhere in existing interval. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(15, 20); - tree.InsertInterval(25, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(16, 100500), 9); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 6); - } - - // 7. Remove from the center of existing interval. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(15, 20); - tree.InsertInterval(25, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(17, 19), 2); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 4); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 13); - - UNIT_ASSERT(tree.Has(16)); - UNIT_ASSERT(tree.Has(19)); - } - - // 8. Remove from the center of the only existing interval. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(15, 20); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(17, 19), 2); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 3); - - UNIT_ASSERT(tree.Has(16)); - UNIT_ASSERT(tree.Has(19)); - } - - // 9. Specify borders between existing intervals. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(15, 20); - tree.InsertInterval(25, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(10, 15), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(13, 15), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(10, 13), 0); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - } - - // 10. Specify right border exactly as in existing interval. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(15, 20); - tree.InsertInterval(25, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(0, 20), 10); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); - } - - // 11. Specify right border somewhere in existing interval. - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(15, 20); - tree.InsertInterval(25, 30); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); - - UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(2, 17), 7); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); - UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 8); - } - } - - Y_UNIT_TEST(IntersectsTest) { - { - TDisjointIntervalTree<ui64> tree; - UNIT_ASSERT(!tree.Intersects(1, 2)); - } - - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - - UNIT_ASSERT(tree.Intersects(5, 10)); - UNIT_ASSERT(tree.Intersects(5, 6)); - UNIT_ASSERT(tree.Intersects(9, 10)); - UNIT_ASSERT(tree.Intersects(6, 8)); - UNIT_ASSERT(tree.Intersects(1, 8)); - UNIT_ASSERT(tree.Intersects(8, 15)); - UNIT_ASSERT(tree.Intersects(3, 14)); - - UNIT_ASSERT(!tree.Intersects(3, 5)); - UNIT_ASSERT(!tree.Intersects(10, 13)); - } - - { - TDisjointIntervalTree<ui64> tree; - tree.InsertInterval(5, 10); - tree.InsertInterval(20, 30); - - UNIT_ASSERT(tree.Intersects(5, 10)); - UNIT_ASSERT(tree.Intersects(5, 6)); - UNIT_ASSERT(tree.Intersects(9, 10)); - UNIT_ASSERT(tree.Intersects(6, 8)); - UNIT_ASSERT(tree.Intersects(1, 8)); - UNIT_ASSERT(tree.Intersects(8, 15)); - UNIT_ASSERT(tree.Intersects(3, 14)); - UNIT_ASSERT(tree.Intersects(18, 21)); - UNIT_ASSERT(tree.Intersects(3, 50)); - - UNIT_ASSERT(!tree.Intersects(3, 5)); - UNIT_ASSERT(!tree.Intersects(10, 13)); - UNIT_ASSERT(!tree.Intersects(15, 18)); - } - } -} + } + + Y_UNIT_TEST(EraseIntervalTest) { + // 1. Remove from empty tree. + { + TDisjointIntervalTree<ui64> tree; + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(1, 3), 0); + } + + // 2. No such interval in set. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(1, 3), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(20, 30), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + } + + // 3. Remove the whole tree. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(0, 100), 5); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 0); + UNIT_ASSERT(tree.Empty()); + } + + // 4. Remove the whole tree with borders specified exactly as in tree. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(5, 10), 5); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 0); + UNIT_ASSERT(tree.Empty()); + } + + // 5. Specify left border exactly as in existing interval. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(15, 20); + tree.InsertInterval(25, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(15, 100500), 10); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + } + + // 6. Specify left border somewhere in existing interval. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(15, 20); + tree.InsertInterval(25, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(16, 100500), 9); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 6); + } + + // 7. Remove from the center of existing interval. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(15, 20); + tree.InsertInterval(25, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(17, 19), 2); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 4); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 13); + + UNIT_ASSERT(tree.Has(16)); + UNIT_ASSERT(tree.Has(19)); + } + + // 8. Remove from the center of the only existing interval. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(15, 20); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(17, 19), 2); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 3); + + UNIT_ASSERT(tree.Has(16)); + UNIT_ASSERT(tree.Has(19)); + } + + // 9. Specify borders between existing intervals. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(15, 20); + tree.InsertInterval(25, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(10, 15), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(13, 15), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(10, 13), 0); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + } + + // 10. Specify right border exactly as in existing interval. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(15, 20); + tree.InsertInterval(25, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(0, 20), 10); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 1); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 5); + } + + // 11. Specify right border somewhere in existing interval. + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(15, 20); + tree.InsertInterval(25, 30); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 3); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 15); + + UNIT_ASSERT_VALUES_EQUAL(tree.EraseInterval(2, 17), 7); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumIntervals(), 2); + UNIT_ASSERT_VALUES_EQUAL(tree.GetNumElements(), 8); + } + } + + Y_UNIT_TEST(IntersectsTest) { + { + TDisjointIntervalTree<ui64> tree; + UNIT_ASSERT(!tree.Intersects(1, 2)); + } + + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + + UNIT_ASSERT(tree.Intersects(5, 10)); + UNIT_ASSERT(tree.Intersects(5, 6)); + UNIT_ASSERT(tree.Intersects(9, 10)); + UNIT_ASSERT(tree.Intersects(6, 8)); + UNIT_ASSERT(tree.Intersects(1, 8)); + UNIT_ASSERT(tree.Intersects(8, 15)); + UNIT_ASSERT(tree.Intersects(3, 14)); + + UNIT_ASSERT(!tree.Intersects(3, 5)); + UNIT_ASSERT(!tree.Intersects(10, 13)); + } + + { + TDisjointIntervalTree<ui64> tree; + tree.InsertInterval(5, 10); + tree.InsertInterval(20, 30); + + UNIT_ASSERT(tree.Intersects(5, 10)); + UNIT_ASSERT(tree.Intersects(5, 6)); + UNIT_ASSERT(tree.Intersects(9, 10)); + UNIT_ASSERT(tree.Intersects(6, 8)); + UNIT_ASSERT(tree.Intersects(1, 8)); + UNIT_ASSERT(tree.Intersects(8, 15)); + UNIT_ASSERT(tree.Intersects(3, 14)); + UNIT_ASSERT(tree.Intersects(18, 21)); + UNIT_ASSERT(tree.Intersects(3, 50)); + + UNIT_ASSERT(!tree.Intersects(3, 5)); + UNIT_ASSERT(!tree.Intersects(10, 13)); + UNIT_ASSERT(!tree.Intersects(15, 18)); + } + } +} diff --git a/library/cpp/containers/disjoint_interval_tree/ut/ya.make b/library/cpp/containers/disjoint_interval_tree/ut/ya.make index 6736ce0c2bd..0885923e711 100644 --- a/library/cpp/containers/disjoint_interval_tree/ut/ya.make +++ b/library/cpp/containers/disjoint_interval_tree/ut/ya.make @@ -1,12 +1,12 @@ -UNITTEST_FOR(library/cpp/containers/disjoint_interval_tree) - -OWNER( - dcherednik - galaxycrab -) - -SRCS( - disjoint_interval_tree_ut.cpp -) - -END() +UNITTEST_FOR(library/cpp/containers/disjoint_interval_tree) + +OWNER( + dcherednik + galaxycrab +) + +SRCS( + disjoint_interval_tree_ut.cpp +) + +END() diff --git a/library/cpp/containers/disjoint_interval_tree/ya.make b/library/cpp/containers/disjoint_interval_tree/ya.make index b4f5a52a67f..cafad0281ea 100644 --- a/library/cpp/containers/disjoint_interval_tree/ya.make +++ b/library/cpp/containers/disjoint_interval_tree/ya.make @@ -1,10 +1,10 @@ -OWNER( - dcherednik - galaxycrab -) - -LIBRARY() - -SRCS(disjoint_interval_tree.cpp) - -END() +OWNER( + dcherednik + galaxycrab +) + +LIBRARY() + +SRCS(disjoint_interval_tree.cpp) + +END() diff --git a/library/cpp/containers/ring_buffer/ring_buffer.h b/library/cpp/containers/ring_buffer/ring_buffer.h index 41220dcf6bf..c9f0acf7c26 100644 --- a/library/cpp/containers/ring_buffer/ring_buffer.h +++ b/library/cpp/containers/ring_buffer/ring_buffer.h @@ -12,12 +12,12 @@ public: Items.reserve(MaxSize); } - TSimpleRingBuffer(const TSimpleRingBuffer&) = default; - TSimpleRingBuffer(TSimpleRingBuffer&&) = default; - - TSimpleRingBuffer& operator=(const TSimpleRingBuffer&) = default; - TSimpleRingBuffer& operator=(TSimpleRingBuffer&&) = default; - + TSimpleRingBuffer(const TSimpleRingBuffer&) = default; + TSimpleRingBuffer(TSimpleRingBuffer&&) = default; + + TSimpleRingBuffer& operator=(const TSimpleRingBuffer&) = default; + TSimpleRingBuffer& operator=(TSimpleRingBuffer&&) = default; + // First available item size_t FirstIndex() const { return Begin; @@ -55,11 +55,11 @@ public: } } - void Clear() { - Items.clear(); - Begin = 0; - } - + void Clear() { + Items.clear(); + Begin = 0; + } + private: size_t RealIndex(size_t index) const { return index % MaxSize; diff --git a/library/cpp/containers/ya.make b/library/cpp/containers/ya.make index 4b1b315e6a5..6ca17c9c2f9 100644 --- a/library/cpp/containers/ya.make +++ b/library/cpp/containers/ya.make @@ -20,8 +20,8 @@ RECURSE( dense_hash/ut dictionary dictionary/ut - disjoint_interval_tree - disjoint_interval_tree/ut + disjoint_interval_tree + disjoint_interval_tree/ut ext_priority_queue ext_priority_queue/ut fast_trie diff --git a/library/cpp/digest/md5/md5_ut.cpp b/library/cpp/digest/md5/md5_ut.cpp index 1c3e4ad0a9f..0833ad52500 100644 --- a/library/cpp/digest/md5/md5_ut.cpp +++ b/library/cpp/digest/md5/md5_ut.cpp @@ -44,7 +44,7 @@ Y_UNIT_TEST_SUITE(TMD5Test) { fileHash = MD5::File(tmpFile); UNIT_ASSERT_NO_DIFF(fileHash, memoryHash); - NFs::Remove(tmpFile); + NFs::Remove(tmpFile); fileHash = MD5::File(tmpFile); UNIT_ASSERT_EQUAL(fileHash.size(), 0); } diff --git a/library/cpp/getopt/small/last_getopt_opts.cpp b/library/cpp/getopt/small/last_getopt_opts.cpp index 03c432849f1..dfa528b996c 100644 --- a/library/cpp/getopt/small/last_getopt_opts.cpp +++ b/library/cpp/getopt/small/last_getopt_opts.cpp @@ -4,7 +4,7 @@ #include "last_getopt_parser.h" #include <library/cpp/colorizer/colors.h> - + #include <util/stream/format.h> #include <util/charset/utf8.h> @@ -62,7 +62,7 @@ namespace NLastGetopt { return opt->ToShortString(); } } - + TOpts::TOpts(const TStringBuf& optstring) : ArgPermutation_(DEFAULT_ARG_PERMUTATION) , AllowSingleDashForLong_(false) @@ -392,14 +392,14 @@ namespace NLastGetopt { if (requiredOptionsCount == 0) continue; os << Endl << colors.BoldColor() << "Required parameters" << colors.OldColor() << ":" << Endl; - } else { + } else { if (requiredOptionsCount == Opts_.size()) continue; if (requiredOptionsCount == 0) os << Endl << colors.BoldColor() << "Options" << colors.OldColor() << ":" << Endl; else os << Endl << colors.BoldColor() << "Optional parameters" << colors.OldColor() << ":" << Endl; // optional options would be a tautology - } + } for (size_t i = 0; i < Opts_.size(); i++) { const TOpt* opt = Opts_[i].Get(); @@ -469,7 +469,7 @@ namespace NLastGetopt { void TOpts::PrintUsage(const TStringBuf& program, IOutputStream& os) const { PrintUsage(program, os, NColorizer::AutoColors(os)); } - + void TOpts::PrintFreeArgsDesc(IOutputStream& os, const NColorizer::TColors& colors) const { if (0 == FreeArgsMax_) return; diff --git a/library/cpp/getopt/small/last_getopt_opts.h b/library/cpp/getopt/small/last_getopt_opts.h index 825b99c8712..74a8e672373 100644 --- a/library/cpp/getopt/small/last_getopt_opts.h +++ b/library/cpp/getopt/small/last_getopt_opts.h @@ -3,7 +3,7 @@ #include "last_getopt_opt.h" #include <library/cpp/colorizer/fwd.h> - + #include <util/generic/map.h> namespace NLastGetopt { @@ -594,7 +594,7 @@ namespace NLastGetopt { * @param colors colorizer */ void PrintUsage(const TStringBuf& program, IOutputStream& os, const NColorizer::TColors& colors) const; - + /** * Print usage string * diff --git a/library/cpp/getopt/small/last_getopt_parser.cpp b/library/cpp/getopt/small/last_getopt_parser.cpp index 7668b12a035..e6c90aaacd1 100644 --- a/library/cpp/getopt/small/last_getopt_parser.cpp +++ b/library/cpp/getopt/small/last_getopt_parser.cpp @@ -1,7 +1,7 @@ #include "last_getopt_parser.h" #include <library/cpp/colorizer/colors.h> - + #include <util/string/escape.h> namespace NLastGetopt { @@ -385,5 +385,5 @@ namespace NLastGetopt { void TOptsParser::PrintUsage(IOutputStream& os) const { PrintUsage(os, NColorizer::AutoColors(os)); } - -} + +} diff --git a/library/cpp/getopt/small/last_getopt_parser.h b/library/cpp/getopt/small/last_getopt_parser.h index 2cf8a6c308d..8a38b3ae638 100644 --- a/library/cpp/getopt/small/last_getopt_parser.h +++ b/library/cpp/getopt/small/last_getopt_parser.h @@ -3,7 +3,7 @@ #include "last_getopt_opts.h" #include <library/cpp/colorizer/fwd.h> - + #include <util/generic/hash_set.h> #include <util/generic/list.h> @@ -146,9 +146,9 @@ namespace NLastGetopt { const TString& ProgramName() const { return ProgramName_; } - + void PrintUsage(IOutputStream& os = Cout) const; - + void PrintUsage(IOutputStream& os, const NColorizer::TColors& colors) const; }; } //namespace NLastGetopt diff --git a/library/cpp/getopt/small/modchooser.cpp b/library/cpp/getopt/small/modchooser.cpp index 2fa5cfd0703..b84fd37c9a0 100644 --- a/library/cpp/getopt/small/modchooser.cpp +++ b/library/cpp/getopt/small/modchooser.cpp @@ -5,7 +5,7 @@ #include "modchooser.h" #include <library/cpp/colorizer/colors.h> - + #include <util/stream/output.h> #include <util/stream/format.h> #include <util/generic/yexception.h> @@ -279,9 +279,9 @@ TString TModChooser::TMode::FormatFullName(size_t pad) const { void TModChooser::PrintHelp(const TString& progName) const { Cerr << Description << Endl << Endl; - Cerr << NColorizer::StdErr().BoldColor() << "Usage" << NColorizer::StdErr().OldColor() << ": " << progName << " MODE [MODE_OPTIONS]" << Endl; + Cerr << NColorizer::StdErr().BoldColor() << "Usage" << NColorizer::StdErr().OldColor() << ": " << progName << " MODE [MODE_OPTIONS]" << Endl; Cerr << Endl; - Cerr << NColorizer::StdErr().BoldColor() << "Modes" << NColorizer::StdErr().OldColor() << ":" << Endl; + Cerr << NColorizer::StdErr().BoldColor() << "Modes" << NColorizer::StdErr().OldColor() << ":" << Endl; size_t maxModeLen = 0; for (const auto& [name, mode] : Modes) { if (name != mode->Name) diff --git a/library/cpp/getopt/small/ya.make b/library/cpp/getopt/small/ya.make index 96de0f04b1f..e3b8126a58d 100644 --- a/library/cpp/getopt/small/ya.make +++ b/library/cpp/getopt/small/ya.make @@ -2,9 +2,9 @@ LIBRARY() OWNER(pg) -PEERDIR( +PEERDIR( library/cpp/colorizer -) +) SRCS( completer.cpp diff --git a/library/cpp/getopt/ut/last_getopt_ut.cpp b/library/cpp/getopt/ut/last_getopt_ut.cpp index c99a1d053d1..6eb0bd5fca1 100644 --- a/library/cpp/getopt/ut/last_getopt_ut.cpp +++ b/library/cpp/getopt/ut/last_getopt_ut.cpp @@ -4,10 +4,10 @@ #include <library/cpp/testing/unittest/registar.h> #include <util/generic/array_size.h> -#include <util/string/subst.h> -#include <util/string/vector.h> +#include <util/string/subst.h> +#include <util/string/vector.h> #include <util/string/split.h> - + using namespace NLastGetopt; namespace { @@ -605,101 +605,101 @@ Y_UNIT_TEST_SUITE(TLastGetoptTests) { } Y_UNIT_TEST(TestColorPrint) { - TOpts opts; - const char* prog = "my_program"; - opts.AddLongOption("long_option").Required(); - opts.AddLongOption('o', "other"); - opts.AddCharOption('d').DefaultValue("42"); - opts.AddCharOption('s').DefaultValue("str_default"); - opts.SetFreeArgsNum(123, 456); + TOpts opts; + const char* prog = "my_program"; + opts.AddLongOption("long_option").Required(); + opts.AddLongOption('o', "other"); + opts.AddCharOption('d').DefaultValue("42"); + opts.AddCharOption('s').DefaultValue("str_default"); + opts.SetFreeArgsNum(123, 456); opts.SetFreeArgTitle(0, "first_free_arg", "help"); opts.SetFreeArgTitle(2, "second_free_arg"); opts.AddSection("Section", "Section\n text"); const char* cmd[] = {prog}; - TOptsParser parser(&opts, Y_ARRAY_SIZE(cmd), cmd); - TStringStream out; - NColorizer::TColors colors(true); - parser.PrintUsage(out, colors); - - // find options and green color + TOptsParser parser(&opts, Y_ARRAY_SIZE(cmd), cmd); + TStringStream out; + NColorizer::TColors colors(true); + parser.PrintUsage(out, colors); + + // find options and green color UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "--long_option" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "--other" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "-o" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "-d" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "-s" << colors.OldColor()) != TString::npos); - - // find default values + + // find default values UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.CyanColor() << "42" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.CyanColor() << "\"str_default\"" << colors.OldColor()) != TString::npos); - - // find free args + + // find free args UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "123" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "456" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "first_free_arg" << colors.OldColor()) != TString::npos); // free args without help not rendered even if they have custom title UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.GreenColor() << "second_free_arg" << colors.OldColor()) == TString::npos); - - // find signatures + + // find signatures UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.BoldColor() << "Usage" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.BoldColor() << "Required parameters" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.BoldColor() << "Optional parameters" << colors.OldColor()) != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.BoldColor() << "Free args" << colors.OldColor()) != TString::npos); - + // find sections UNIT_ASSERT(out.Str().find(TStringBuilder() << colors.BoldColor() << "Section" << colors.OldColor() << ":") != TString::npos); UNIT_ASSERT(out.Str().find(TStringBuilder() << " Section\n text") != TString::npos); - // print without colors - TStringStream out2; - opts.PrintUsage(prog, out2); + // print without colors + TStringStream out2; + opts.PrintUsage(prog, out2); UNIT_ASSERT(out2.Str().find(colors.GreenColor()) == TString::npos); UNIT_ASSERT(out2.Str().find(colors.CyanColor()) == TString::npos); UNIT_ASSERT(out2.Str().find(colors.BoldColor()) == TString::npos); UNIT_ASSERT(out2.Str().find(colors.OldColor()) == TString::npos); - } - + } + Y_UNIT_TEST(TestPadding) { - const bool withColorsOpt[] = {false, true}; - for (bool withColors : withColorsOpt) { - TOpts opts; - const char* prog = "my_program"; + const bool withColorsOpt[] = {false, true}; + for (bool withColors : withColorsOpt) { + TOpts opts; + const char* prog = "my_program"; opts.AddLongOption("option", "description 1").Required(); // long option opts.AddLongOption('o', "other", "description 2"); // char and long option opts.AddCharOption('d', "description 3").RequiredArgument("DD"); // char option - opts.AddCharOption('s', "description 4\ndescription 5\ndescription 6"); // multiline desc - opts.AddLongOption('l', "very_very_very_loooong_ooooption", "description 7").RequiredArgument("LONG_ARGUMENT"); + opts.AddCharOption('s', "description 4\ndescription 5\ndescription 6"); // multiline desc + opts.AddLongOption('l', "very_very_very_loooong_ooooption", "description 7").RequiredArgument("LONG_ARGUMENT"); const char* cmd[] = {prog}; - TOptsParser parser(&opts, Y_ARRAY_SIZE(cmd), cmd); - - TStringStream out; - NColorizer::TColors colors(withColors); - parser.PrintUsage(out, colors); - + TOptsParser parser(&opts, Y_ARRAY_SIZE(cmd), cmd); + + TStringStream out; + NColorizer::TColors colors(withColors); + parser.PrintUsage(out, colors); + TString printed = out.Str(); - if (withColors) { - // remove not printable characters + if (withColors) { + // remove not printable characters SubstGlobal(printed, TString(colors.BoldColor()), ""); SubstGlobal(printed, TString(colors.GreenColor()), ""); SubstGlobal(printed, TString(colors.CyanColor()), ""); SubstGlobal(printed, TString(colors.OldColor()), ""); - } + } TVector<TString> lines; StringSplitter(printed).Split('\n').SkipEmpty().Collect(&lines); - UNIT_ASSERT(!lines.empty()); + UNIT_ASSERT(!lines.empty()); TVector<size_t> indents; for (const TString& line : lines) { - const size_t indent = line.find("description "); + const size_t indent = line.find("description "); if (indent != TString::npos) - indents.push_back(indent); - } - UNIT_ASSERT_VALUES_EQUAL(indents.size(), 7); - const size_t theOnlyIndent = indents[0]; - for (size_t indent : indents) { - UNIT_ASSERT_VALUES_EQUAL_C(indent, theOnlyIndent, printed); - } - } - } - + indents.push_back(indent); + } + UNIT_ASSERT_VALUES_EQUAL(indents.size(), 7); + const size_t theOnlyIndent = indents[0]; + for (size_t indent : indents) { + UNIT_ASSERT_VALUES_EQUAL_C(indent, theOnlyIndent, printed); + } + } + } + Y_UNIT_TEST(TestAppendTo) { TVector<int> ints; diff --git a/library/cpp/grpc/client/grpc_client_low.h b/library/cpp/grpc/client/grpc_client_low.h index ab0a0627be0..8fbd976414f 100644 --- a/library/cpp/grpc/client/grpc_client_low.h +++ b/library/cpp/grpc/client/grpc_client_low.h @@ -45,7 +45,7 @@ class IQueueClientEvent { public: virtual ~IQueueClientEvent() = default; - //! Execute an action defined by implementation + //! Execute an action defined by implementation virtual bool Execute(bool ok) = 0; //! Finish and destroy event diff --git a/library/cpp/grpc/server/grpc_server.h b/library/cpp/grpc/server/grpc_server.h index d6814a90a0d..93187f70be1 100644 --- a/library/cpp/grpc/server/grpc_server.h +++ b/library/cpp/grpc/server/grpc_server.h @@ -105,7 +105,7 @@ class IQueueEvent { public: virtual ~IQueueEvent() = default; - //! Execute an action defined by implementation. + //! Execute an action defined by implementation. virtual bool Execute(bool ok) = 0; //! It is time to perform action requested by AcquireToken server method. It will be called under lock which is also diff --git a/library/cpp/json/json_writer.h b/library/cpp/json/json_writer.h index c7f5c9499a4..5da559ac359 100644 --- a/library/cpp/json/json_writer.h +++ b/library/cpp/json/json_writer.h @@ -5,7 +5,7 @@ #include "json_value.h" #include <library/cpp/json/writer/json.h> - + #include <util/stream/output.h> #include <util/generic/hash.h> #include <util/generic/maybe.h> @@ -133,7 +133,7 @@ namespace NJson { Buf.WriteKey(key); Write(value); } - + // write raw json without checks void UnsafeWrite(const TStringBuf& key, const TStringBuf& value) { Buf.WriteKey(key); @@ -144,7 +144,7 @@ namespace NJson { Buf.WriteKey(key); WriteNull(); } - + template <typename T> void WriteOptional(const TStringBuf& key, const TMaybe<T>& value) { if (value) { diff --git a/library/cpp/json/writer/json.cpp b/library/cpp/json/writer/json.cpp index 02370c2d79e..d4e5eaf6b35 100644 --- a/library/cpp/json/writer/json.cpp +++ b/library/cpp/json/writer/json.cpp @@ -258,16 +258,16 @@ namespace NJsonWriter { } else { ythrow TError() << "JSON writer: invalid float value: " << FloatToString(f); } - } + } size_t len = FloatToString(f, buf, Y_ARRAY_SIZE(buf), mode, ndigits); UnsafeWriteValue(buf, len); return TValueContext(*this); - } + } TValueContext TBuf::WriteFloat(float f, EFloatToStringMode mode, int ndigits) { return WriteFloatImpl(f, mode, ndigits); } - + TValueContext TBuf::WriteDouble(double f, EFloatToStringMode mode, int ndigits) { return WriteFloatImpl(f, mode, ndigits); } diff --git a/library/cpp/json/writer/json.h b/library/cpp/json/writer/json.h index 0aae2531b94..57770fce8f8 100644 --- a/library/cpp/json/writer/json.h +++ b/library/cpp/json/writer/json.h @@ -77,14 +77,14 @@ namespace NJsonWriter { return *this; } - /*** NaN and Inf are not valid json values, - * so if WriteNanAsString is set, writer would write string - * intead of throwing exception (default case) */ + /*** NaN and Inf are not valid json values, + * so if WriteNanAsString is set, writer would write string + * intead of throwing exception (default case) */ TBuf& SetWriteNanAsString(bool writeNanAsString = true) { - WriteNanAsString = writeNanAsString; + WriteNanAsString = writeNanAsString; return *this; - } - + } + /*** Return the string formed in the internal TStringStream. * You may only call it if the `stream' parameter was NULL * at construction time. */ @@ -141,10 +141,10 @@ namespace NJsonWriter { void CheckAndPop(EJsonEntity e); EJsonEntity StackTop() const; - template <class TFloat> - TValueContext WriteFloatImpl(TFloat f, EFloatToStringMode mode, int ndigits); + template <class TFloat> + TValueContext WriteFloatImpl(TFloat f, EFloatToStringMode mode, int ndigits); - private: + private: IOutputStream* Stream; THolder<TStringStream> StringStream; typedef TVector<const TString*> TKeys; @@ -155,7 +155,7 @@ namespace NJsonWriter { bool NeedNewline; const EHtmlEscapeMode EscapeMode; int IndentSpaces; - bool WriteNanAsString; + bool WriteNanAsString; }; // Please don't try to instantiate the classes declared below this point. diff --git a/library/cpp/json/writer/json_ut.cpp b/library/cpp/json/writer/json_ut.cpp index 9980555683f..de6df4e941f 100644 --- a/library/cpp/json/writer/json_ut.cpp +++ b/library/cpp/json/writer/json_ut.cpp @@ -4,8 +4,8 @@ #include "json.h" #include <library/cpp/json/json_value.h> -#include <limits> - +#include <limits> + Y_UNIT_TEST_SUITE(JsonWriter) { Y_UNIT_TEST(Struct) { NJsonWriter::TBuf w; @@ -205,53 +205,53 @@ Y_UNIT_TEST_SUITE(JsonWriter) { const char exp[] = "[0.123457,0.1234567899,0.316,244.1,10385.83,{\"1\":1112,\"2\":1e+03}]"; UNIT_ASSERT_STRINGS_EQUAL(exp, buf.Str()); } - + Y_UNIT_TEST(NanFormatting) { - { - NJsonWriter::TBuf buf; - buf.BeginObject(); - buf.WriteKey("nanvalue"); - UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::quiet_NaN()), yexception); - } - - { - NJsonWriter::TBuf buf; - buf.BeginObject(); - buf.WriteKey("infvalue"); - UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::infinity()), yexception); - } - - { - NJsonWriter::TBuf buf; - buf.BeginList(); - UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::quiet_NaN()), yexception); - } - - { - NJsonWriter::TBuf buf; - buf.BeginList(); - UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::infinity()), yexception); - } - - { - NJsonWriter::TBuf buf; - buf.SetWriteNanAsString(); - - buf.BeginObject() + { + NJsonWriter::TBuf buf; + buf.BeginObject(); + buf.WriteKey("nanvalue"); + UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::quiet_NaN()), yexception); + } + + { + NJsonWriter::TBuf buf; + buf.BeginObject(); + buf.WriteKey("infvalue"); + UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::infinity()), yexception); + } + + { + NJsonWriter::TBuf buf; + buf.BeginList(); + UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::quiet_NaN()), yexception); + } + + { + NJsonWriter::TBuf buf; + buf.BeginList(); + UNIT_ASSERT_EXCEPTION(buf.WriteFloat(std::numeric_limits<double>::infinity()), yexception); + } + + { + NJsonWriter::TBuf buf; + buf.SetWriteNanAsString(); + + buf.BeginObject() .WriteKey("nanvalue") .WriteFloat(std::numeric_limits<double>::quiet_NaN()) .WriteKey("infvalue") .WriteFloat(std::numeric_limits<double>::infinity()) .WriteKey("minus_infvalue") .WriteFloat(-std::numeric_limits<float>::infinity()) - .WriteKey("l") + .WriteKey("l") .BeginList() .WriteFloat(std::numeric_limits<float>::quiet_NaN()) .EndList() .EndObject(); - - UNIT_ASSERT_STRINGS_EQUAL(buf.Str(), R"raw_json({"nanvalue":"nan","infvalue":"inf","minus_infvalue":"-inf","l":["nan"]})raw_json"); - } + + UNIT_ASSERT_STRINGS_EQUAL(buf.Str(), R"raw_json({"nanvalue":"nan","infvalue":"inf","minus_infvalue":"-inf","l":["nan"]})raw_json"); + } { NJsonWriter::TBuf buf; @@ -264,7 +264,7 @@ Y_UNIT_TEST_SUITE(JsonWriter) { UNIT_ASSERT_STRINGS_EQUAL(buf.Str(), R"({"\u003C\u003E&":"Ololo","<>&":"Ololo2"})"); } - } + } Y_UNIT_TEST(WriteUninitializedBoolDoesntCrashProgram) { // makes sense only in release build w/ address sanitizer diff --git a/library/cpp/lfalloc/lf_allocX64.h b/library/cpp/lfalloc/lf_allocX64.h index fd2a906d6ff..af3cc1730a9 100644 --- a/library/cpp/lfalloc/lf_allocX64.h +++ b/library/cpp/lfalloc/lf_allocX64.h @@ -115,7 +115,7 @@ static inline long AtomicSub(TAtomic& a, long b) { #ifndef _darwin_ -#ifndef Y_ARRAY_SIZE +#ifndef Y_ARRAY_SIZE #define Y_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) #endif diff --git a/library/cpp/lwtrace/event.h b/library/cpp/lwtrace/event.h index e53a620c456..48fdc6b85ac 100644 --- a/library/cpp/lwtrace/event.h +++ b/library/cpp/lwtrace/event.h @@ -2,7 +2,7 @@ #include "preprocessor.h" #include "signature.h" -#include "param_traits.h" +#include "param_traits.h" #include <library/cpp/lwtrace/protos/lwtrace.pb.h> diff --git a/library/cpp/lwtrace/param_traits.h b/library/cpp/lwtrace/param_traits.h index a0faeb50dbb..8ff87efd0b0 100644 --- a/library/cpp/lwtrace/param_traits.h +++ b/library/cpp/lwtrace/param_traits.h @@ -1,66 +1,66 @@ -#pragma once -#include "signature.h" - -#include <util/datetime/base.h> -#include <util/string/builder.h> - -#include <limits> - -#ifndef LWTRACE_DISABLE - -namespace NLWTrace { - - template <> - struct TParamTraits<TInstant> { - using TStoreType = double; - using TFuncParam = TInstant; - - inline static void ToString(TStoreType value, TString* out) { - *out = TParamConv<TStoreType>::ToString(value); - } - - inline static TStoreType ToStoreType(TInstant value) { - if (value == TInstant::Max()) { - return std::numeric_limits<TStoreType>::infinity(); - } else { - return static_cast<TStoreType>(value.MicroSeconds()) / 1000000.0; // seconds count - } - } - }; - - template <> - struct TParamTraits<TDuration> { - using TStoreType = double; - using TFuncParam = TDuration; - - inline static void ToString(TStoreType value, TString* out) { - *out = TParamConv<TStoreType>::ToString(value); - } - - inline static TStoreType ToStoreType(TDuration value) { - if (value == TDuration::Max()) { - return std::numeric_limits<TStoreType>::infinity(); - } else { - return static_cast<TStoreType>(value.MicroSeconds()) / 1000.0; // milliseconds count - } - } - }; - - // Param for enum with GENERATE_ENUM_SERIALIZATION enabled or operator<< implemented - template <class TEnum> - struct TEnumParamWithSerialization { - using TStoreType = typename TParamTraits<std::underlying_type_t<TEnum>>::TStoreType; - using TFuncParam = TEnum; - - inline static void ToString(TStoreType stored, TString* out) { - *out = TStringBuilder() << static_cast<TEnum>(stored) << " (" << stored << ")"; - } - - inline static TStoreType ToStoreType(TFuncParam v) { - return static_cast<TStoreType>(v); - } - }; - -} // namespace NLWTrace - -#endif // LWTRACE_DISABLE +#pragma once +#include "signature.h" + +#include <util/datetime/base.h> +#include <util/string/builder.h> + +#include <limits> + +#ifndef LWTRACE_DISABLE + +namespace NLWTrace { + + template <> + struct TParamTraits<TInstant> { + using TStoreType = double; + using TFuncParam = TInstant; + + inline static void ToString(TStoreType value, TString* out) { + *out = TParamConv<TStoreType>::ToString(value); + } + + inline static TStoreType ToStoreType(TInstant value) { + if (value == TInstant::Max()) { + return std::numeric_limits<TStoreType>::infinity(); + } else { + return static_cast<TStoreType>(value.MicroSeconds()) / 1000000.0; // seconds count + } + } + }; + + template <> + struct TParamTraits<TDuration> { + using TStoreType = double; + using TFuncParam = TDuration; + + inline static void ToString(TStoreType value, TString* out) { + *out = TParamConv<TStoreType>::ToString(value); + } + + inline static TStoreType ToStoreType(TDuration value) { + if (value == TDuration::Max()) { + return std::numeric_limits<TStoreType>::infinity(); + } else { + return static_cast<TStoreType>(value.MicroSeconds()) / 1000.0; // milliseconds count + } + } + }; + + // Param for enum with GENERATE_ENUM_SERIALIZATION enabled or operator<< implemented + template <class TEnum> + struct TEnumParamWithSerialization { + using TStoreType = typename TParamTraits<std::underlying_type_t<TEnum>>::TStoreType; + using TFuncParam = TEnum; + + inline static void ToString(TStoreType stored, TString* out) { + *out = TStringBuilder() << static_cast<TEnum>(stored) << " (" << stored << ")"; + } + + inline static TStoreType ToStoreType(TFuncParam v) { + return static_cast<TStoreType>(v); + } + }; + +} // namespace NLWTrace + +#endif // LWTRACE_DISABLE diff --git a/library/cpp/lwtrace/preprocessor.h b/library/cpp/lwtrace/preprocessor.h index 40865467b29..205891fd5e2 100644 --- a/library/cpp/lwtrace/preprocessor.h +++ b/library/cpp/lwtrace/preprocessor.h @@ -57,7 +57,7 @@ /**/ // Use for code generation to handle parameter lists -// NOTE: this is the only place to change if more parameters needed +// NOTE: this is the only place to change if more parameters needed #define FOREACH_PARAMNUM(MACRO, ...) \ MACRO(0, ##__VA_ARGS__) \ MACRO(1, ##__VA_ARGS__) \ @@ -114,9 +114,9 @@ #define LWTRACE_TEMPLATE_PARAMS_NODEF LWTRACE_EXPAND(LWTRACE_EAT FOREACH_PARAMNUM(LWTRACE_TEMPLATE_PARAMS_NODEF_I)(0)) #define LWTRACE_TEMPLATE_ARGS_I(i) (1) TP##i LWTRACE_COMMA #define LWTRACE_TEMPLATE_ARGS LWTRACE_EXPAND(LWTRACE_EAT FOREACH_PARAMNUM(LWTRACE_TEMPLATE_ARGS_I)(0)) -#define LWTRACE_FUNCTION_PARAMS_I(i) (1) typename ::NLWTrace::TParamTraits<TP##i>::TFuncParam p##i = ERROR_not_enough_parameters() LWTRACE_COMMA +#define LWTRACE_FUNCTION_PARAMS_I(i) (1) typename ::NLWTrace::TParamTraits<TP##i>::TFuncParam p##i = ERROR_not_enough_parameters() LWTRACE_COMMA #define LWTRACE_FUNCTION_PARAMS LWTRACE_EXPAND(LWTRACE_EAT FOREACH_PARAMNUM(LWTRACE_FUNCTION_PARAMS_I)(0)) -#define LWTRACE_PREPARE_PARAMS_I(i, params) params.Param[i].template CopyConstruct<typename ::NLWTrace::TParamTraits<TP##i>::TStoreType>(::NLWTrace::TParamTraits<TP##i>::ToStoreType(p##i)); +#define LWTRACE_PREPARE_PARAMS_I(i, params) params.Param[i].template CopyConstruct<typename ::NLWTrace::TParamTraits<TP##i>::TStoreType>(::NLWTrace::TParamTraits<TP##i>::ToStoreType(p##i)); #define LWTRACE_PREPARE_PARAMS(params) \ do { \ FOREACH_PARAMNUM(LWTRACE_PREPARE_PARAMS_I, params) \ diff --git a/library/cpp/lwtrace/probe.h b/library/cpp/lwtrace/probe.h index 31fa282da3f..d4be4c4f4d4 100644 --- a/library/cpp/lwtrace/probe.h +++ b/library/cpp/lwtrace/probe.h @@ -176,7 +176,7 @@ namespace NLWTrace { inline void PreparePtr<TNil>(const TNil&, const TNil*&) { } -#define LWTRACE_SCOPED_FUNCTION_PARAMS_I(i) (1) typename ::NLWTrace::TParamTraits<TP##i>::TFuncParam p##i = ERROR_not_enough_parameters() LWTRACE_COMMA +#define LWTRACE_SCOPED_FUNCTION_PARAMS_I(i) (1) typename ::NLWTrace::TParamTraits<TP##i>::TFuncParam p##i = ERROR_not_enough_parameters() LWTRACE_COMMA #define LWTRACE_SCOPED_FUNCTION_PARAMS LWTRACE_EXPAND(LWTRACE_EAT FOREACH_PARAMNUM(LWTRACE_SCOPED_FUNCTION_PARAMS_I)(0)) #define LWTRACE_SCOPED_FUNCTION_PARAMS_BY_REF_I(i) (1) typename ::NLWTrace::TParamTraits<TP##i>::TStoreType& p##i = *(ERROR_not_enough_parameters*)(HidePointerOrigin(nullptr))LWTRACE_COMMA #define LWTRACE_SCOPED_FUNCTION_PARAMS_BY_REF LWTRACE_EXPAND(LWTRACE_EAT FOREACH_PARAMNUM(LWTRACE_SCOPED_FUNCTION_PARAMS_BY_REF_I)(0)) diff --git a/library/cpp/lwtrace/signature.h b/library/cpp/lwtrace/signature.h index 868bd9bcf26..aaf2e95bc9a 100644 --- a/library/cpp/lwtrace/signature.h +++ b/library/cpp/lwtrace/signature.h @@ -7,17 +7,17 @@ #include <util/generic/cast.h> #include <util/generic/string.h> #include <util/generic/typetraits.h> -#include <util/string/builder.h> +#include <util/string/builder.h> #include <util/string/cast.h> #include <util/string/printf.h> #include <google/protobuf/descriptor.h> #include <google/protobuf/generated_enum_reflection.h> - + #include <library/cpp/lwtrace/protos/lwtrace.pb.h> -#include <type_traits> - +#include <type_traits> + namespace NLWTrace { // Class to hold parameter values parsed from trace query predicate operators template <class T> @@ -465,76 +465,76 @@ namespace NLWTrace { #undef FOREACH_PARAMTYPE_MACRO template <class T> - struct TParamTraits; - - // Enum types traits impl. - template <class TEnum, class = std::enable_if_t<std::is_enum_v<TEnum>>> - struct TEnumParamTraitsImpl { - using TStoreType = typename TParamTraits<std::underlying_type_t<TEnum>>::TStoreType; - using TFuncParam = TEnum; - + struct TParamTraits; + + // Enum types traits impl. + template <class TEnum, class = std::enable_if_t<std::is_enum_v<TEnum>>> + struct TEnumParamTraitsImpl { + using TStoreType = typename TParamTraits<std::underlying_type_t<TEnum>>::TStoreType; + using TFuncParam = TEnum; + inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { - if constexpr (google::protobuf::is_proto_enum<TEnum>::value) { - const google::protobuf::EnumValueDescriptor* valueDescriptor = google::protobuf::GetEnumDescriptor<TEnum>()->FindValueByNumber(stored); - if (valueDescriptor) { - *out = TStringBuilder() << valueDescriptor->name() << " (" << stored << ")"; - } else { - *out = TParamConv<TStoreType>::ToString(stored); - } - } else { - *out = TParamConv<TStoreType>::ToString(stored); - } - } - - inline static TStoreType ToStoreType(TFuncParam v) { - return static_cast<TStoreType>(v); - } + if constexpr (google::protobuf::is_proto_enum<TEnum>::value) { + const google::protobuf::EnumValueDescriptor* valueDescriptor = google::protobuf::GetEnumDescriptor<TEnum>()->FindValueByNumber(stored); + if (valueDescriptor) { + *out = TStringBuilder() << valueDescriptor->name() << " (" << stored << ")"; + } else { + *out = TParamConv<TStoreType>::ToString(stored); + } + } else { + *out = TParamConv<TStoreType>::ToString(stored); + } + } + + inline static TStoreType ToStoreType(TFuncParam v) { + return static_cast<TStoreType>(v); + } }; - template <class TCustomType> - struct TCustomTraitsImpl { - using TStoreType = typename TParamTraits<typename TCustomType::TStoreType>::TStoreType; //see STORE_TYPE_AS - using TFuncParam = typename TCustomType::TFuncParam; - - inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { - TCustomType::ToString(stored, out); - } - - inline static TStoreType ToStoreType(TFuncParam v) { - return TCustomType::ToStoreType(v); - } - }; - - template <class T, bool isEnum> - struct TParamTraitsImpl; - - template <class TEnum> - struct TParamTraitsImpl<TEnum, true> : TEnumParamTraitsImpl<TEnum> { - }; - - template <class TCustomType> - struct TParamTraitsImpl<TCustomType, false> : TCustomTraitsImpl<TCustomType> { - }; - - template <class T> - struct TParamTraits : TParamTraitsImpl<T, std::is_enum_v<T>> { - }; - - // Standard stored types traits. - + template <class TCustomType> + struct TCustomTraitsImpl { + using TStoreType = typename TParamTraits<typename TCustomType::TStoreType>::TStoreType; //see STORE_TYPE_AS + using TFuncParam = typename TCustomType::TFuncParam; + + inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { + TCustomType::ToString(stored, out); + } + + inline static TStoreType ToStoreType(TFuncParam v) { + return TCustomType::ToStoreType(v); + } + }; + + template <class T, bool isEnum> + struct TParamTraitsImpl; + + template <class TEnum> + struct TParamTraitsImpl<TEnum, true> : TEnumParamTraitsImpl<TEnum> { + }; + + template <class TCustomType> + struct TParamTraitsImpl<TCustomType, false> : TCustomTraitsImpl<TCustomType> { + }; + + template <class T> + struct TParamTraits : TParamTraitsImpl<T, std::is_enum_v<T>> { + }; + + // Standard stored types traits. + #define STORE_TYPE_AS(input_t, store_as_t) \ template <> \ struct TParamTraits<input_t> { \ - using TStoreType = store_as_t; \ - using TFuncParam = typename TTypeTraits<input_t>::TFuncParam; \ - \ - inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { \ + using TStoreType = store_as_t; \ + using TFuncParam = typename TTypeTraits<input_t>::TFuncParam; \ + \ + inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { \ *out = TParamConv<TStoreType>::ToString(stored); \ } \ - \ - inline static TStoreType ToStoreType(TFuncParam v) { \ - return v; \ - } \ + \ + inline static TStoreType ToStoreType(TFuncParam v) { \ + return v; \ + } \ }; \ /**/ STORE_TYPE_AS(ui8, ui64); @@ -550,18 +550,18 @@ namespace NLWTrace { #undef STORE_TYPE_AS #undef FOREACH_PARAMTYPE_MACRO - // Nil type staits. + // Nil type staits. template <> struct TParamTraits<TNil> { - using TStoreType = TNil; - using TFuncParam = TTypeTraits<TNil>::TFuncParam; - - inline static void ToString(typename TTypeTraits<TNil>::TFuncParam, TString*) { - } - - inline static TNil ToStoreType(TFuncParam v) { - return v; - } + using TStoreType = TNil; + using TFuncParam = TTypeTraits<TNil>::TFuncParam; + + inline static void ToString(typename TTypeTraits<TNil>::TFuncParam, TString*) { + } + + inline static TNil ToStoreType(TFuncParam v) { + return v; + } }; inline EParamTypePb ParamTypeToProtobuf(const char* paramType) { diff --git a/library/cpp/lwtrace/trace_ut.cpp b/library/cpp/lwtrace/trace_ut.cpp index cb03e4fbde9..fac68698920 100644 --- a/library/cpp/lwtrace/trace_ut.cpp +++ b/library/cpp/lwtrace/trace_ut.cpp @@ -1,28 +1,28 @@ #include "all.h" #include <library/cpp/lwtrace/protos/lwtrace.pb.h> - + #include <library/cpp/testing/unittest/registar.h> #include <google/protobuf/text_format.h> -enum ESimpleEnum { - ValueA, - ValueB, -}; - -enum class EEnumClass { - ValueC, - ValueD, -}; - +enum ESimpleEnum { + ValueA, + ValueB, +}; + +enum class EEnumClass { + ValueC, + ValueD, +}; + #define LWTRACE_UT_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ PROBE(NoParam, GROUPS("Group"), TYPES(), NAMES()) \ PROBE(IntParam, GROUPS("Group"), TYPES(ui32), NAMES("value")) \ PROBE(StringParam, GROUPS("Group"), TYPES(TString), NAMES("value")) \ PROBE(SymbolParam, GROUPS("Group"), TYPES(NLWTrace::TSymbol), NAMES("symbol")) \ PROBE(CheckParam, GROUPS("Group"), TYPES(NLWTrace::TCheck), NAMES("value")) \ - PROBE(EnumParams, GROUPS("Group"), TYPES(ESimpleEnum, EEnumClass), NAMES("simpleEnum", "enumClass")) \ + PROBE(EnumParams, GROUPS("Group"), TYPES(ESimpleEnum, EEnumClass), NAMES("simpleEnum", "enumClass")) \ PROBE(InstantParam, GROUPS("Group"), TYPES(TInstant), NAMES("value")) \ PROBE(DurationParam, GROUPS("Group"), TYPES(TDuration), NAMES("value")) \ PROBE(ProtoEnum, GROUPS("Group"), TYPES(NLWTrace::EOperatorType), NAMES("value")) \ @@ -61,13 +61,13 @@ Y_UNIT_TEST_SUITE(LWTraceTrace) { } } reader; mngr.ReadLog("Query1", reader); - - LWPROBE(EnumParams, ValueA, EEnumClass::ValueC); - LWPROBE(InstantParam, TInstant::Seconds(42)); - LWPROBE(DurationParam, TDuration::MilliSeconds(146)); + + LWPROBE(EnumParams, ValueA, EEnumClass::ValueC); + LWPROBE(InstantParam, TInstant::Seconds(42)); + LWPROBE(DurationParam, TDuration::MilliSeconds(146)); LWPROBE(ProtoEnum, OT_EQ); } - + Y_UNIT_TEST(Predicate) { TManager mngr(*Singleton<TProbeRegistry>(), true); TQuery q; @@ -542,13 +542,13 @@ Y_UNIT_TEST_SUITE(LWTraceTrace) { UNIT_ASSERT(t1.NanoSeconds() - t0.NanoSeconds() >= sleepTimeNs); } - Y_UNIT_TEST(ProtoEnumTraits) { + Y_UNIT_TEST(ProtoEnumTraits) { using TPbEnumTraits = TParamTraits<EOperatorType>; - TString str; + TString str; TPbEnumTraits::ToString(TPbEnumTraits::ToStoreType(OT_EQ), &str); UNIT_ASSERT_STRINGS_EQUAL(str, "OT_EQ (0)"); - } - + } + Y_UNIT_TEST(Track) { TManager mngr(*Singleton<TProbeRegistry>(), true); TQuery q; diff --git a/library/cpp/messagebus/message_status.h b/library/cpp/messagebus/message_status.h index e1878960b34..be274d42522 100644 --- a/library/cpp/messagebus/message_status.h +++ b/library/cpp/messagebus/message_status.h @@ -43,15 +43,15 @@ namespace NBus { // For lwtrace struct TMessageStatusField { typedef int TStoreType; - typedef int TFuncParam; - + typedef int TFuncParam; + static void ToString(int value, TString* out) { *out = GetMessageStatus((NBus::EMessageStatus)value); } - - static int ToStoreType(int value) { - return value; - } + + static int ToStoreType(int value) { + return value; + } }; } // ns diff --git a/library/cpp/messagebus/remote_connection.cpp b/library/cpp/messagebus/remote_connection.cpp index 22932569dbd..283b277d7bd 100644 --- a/library/cpp/messagebus/remote_connection.cpp +++ b/library/cpp/messagebus/remote_connection.cpp @@ -67,7 +67,7 @@ namespace NBus { } TRemoteConnection::TWriterData::~TWriterData() { - Y_VERIFY(AtomicGet(Down)); + Y_VERIFY(AtomicGet(Down)); Y_VERIFY(SendQueue.Empty()); } @@ -119,7 +119,7 @@ namespace NBus { } TRemoteConnection::TReaderData::~TReaderData() { - Y_VERIFY(AtomicGet(Down)); + Y_VERIFY(AtomicGet(Down)); } void TRemoteConnection::Send(TNonDestroyingAutoPtr<TBusMessage> msg) { diff --git a/library/cpp/monlib/dynamic_counters/counters.cpp b/library/cpp/monlib/dynamic_counters/counters.cpp index 3635d87d0d2..b6371eb5c73 100644 --- a/library/cpp/monlib/dynamic_counters/counters.cpp +++ b/library/cpp/monlib/dynamic_counters/counters.cpp @@ -19,10 +19,10 @@ namespace { return dynamic_cast<TExpiringCounter*>(ptr.Get()); } - TExpiringHistogramCounter* AsExpiringHistogramCounter(const TIntrusivePtr<TCountableBase>& ptr) { - return dynamic_cast<TExpiringHistogramCounter*>(ptr.Get()); - } - + TExpiringHistogramCounter* AsExpiringHistogramCounter(const TIntrusivePtr<TCountableBase>& ptr) { + return dynamic_cast<TExpiringHistogramCounter*>(ptr.Get()); + } + THistogramCounter* AsHistogram(const TIntrusivePtr<TCountableBase>& ptr) { return dynamic_cast<THistogramCounter*>(ptr.Get()); } @@ -38,10 +38,10 @@ namespace { THistogramPtr AsHistogramRef(const TIntrusivePtr<TCountableBase>& ptr) { return VerifyDynamicCast<THistogramCounter*>(ptr.Get()); } - - bool IsExpiringCounter(const TIntrusivePtr<TCountableBase>& ptr) { - return AsExpiringCounter(ptr) != nullptr || AsExpiringHistogramCounter(ptr) != nullptr; - } + + bool IsExpiringCounter(const TIntrusivePtr<TCountableBase>& ptr) { + return AsExpiringCounter(ptr) != nullptr || AsExpiringHistogramCounter(ptr) != nullptr; + } } static constexpr TStringBuf INDENT = " "; @@ -76,11 +76,11 @@ THistogramPtr TDynamicCounters::GetHistogram(const TString& value, IHistogramCol THistogramPtr TDynamicCounters::GetNamedHistogram(const TString& name, const TString& value, IHistogramCollectorPtr collector, bool derivative, EVisibility vis) { return AsHistogramRef(GetNamedCounterImpl<false, THistogramCounter>(name, value, std::move(collector), derivative, vis)); -} +} THistogramPtr TDynamicCounters::GetExpiringHistogram(const TString& value, IHistogramCollectorPtr collector, bool derivative, EVisibility vis) { return GetExpiringNamedHistogram("sensor", value, std::move(collector), derivative, vis); -} +} THistogramPtr TDynamicCounters::GetExpiringNamedHistogram(const TString& name, const TString& value, IHistogramCollectorPtr collector, bool derivative, EVisibility vis) { return AsHistogramRef(GetNamedCounterImpl<true, TExpiringHistogramCounter>(name, value, std::move(collector), derivative, vis)); @@ -265,7 +265,7 @@ void TDynamicCounters::RemoveExpired() const { TAtomicBase count = 0; for (auto it = Counters.begin(); it != Counters.end();) { - if (IsExpiringCounter(it->second) && it->second->RefCount() == 1) { + if (IsExpiringCounter(it->second) && it->second->RefCount() == 1) { it = Counters.erase(it); ++count; } else { @@ -275,29 +275,29 @@ void TDynamicCounters::RemoveExpired() const { AtomicSub(ExpiringCount, count); } - -template <bool expiring, class TCounterType, class... TArgs> -TDynamicCounters::TCountablePtr TDynamicCounters::GetNamedCounterImpl(const TString& name, const TString& value, TArgs&&... args) { + +template <bool expiring, class TCounterType, class... TArgs> +TDynamicCounters::TCountablePtr TDynamicCounters::GetNamedCounterImpl(const TString& name, const TString& value, TArgs&&... args) { { TReadGuard g(Lock); auto it = Counters.find({name, value}); if (it != Counters.end()) { return it->second; } - } - + } + auto g = LockForUpdate("GetNamedCounterImpl", name, value); const TChildId key(name, value); auto it = Counters.lower_bound(key); if (it == Counters.end() || it->first != key) { auto value = MakeIntrusive<TCounterType>(std::forward<TArgs>(args)...); it = Counters.emplace_hint(it, key, value); - if constexpr (expiring) { + if constexpr (expiring) { AtomicIncrement(ExpiringCount); - } - } - return it->second; -} + } + } + return it->second; +} template <class TCounterType> TDynamicCounters::TCountablePtr TDynamicCounters::FindNamedCounterImpl(const TString& name, const TString& value) const { diff --git a/library/cpp/monlib/dynamic_counters/counters.h b/library/cpp/monlib/dynamic_counters/counters.h index dc178cfbe01..c25ac18605b 100644 --- a/library/cpp/monlib/dynamic_counters/counters.h +++ b/library/cpp/monlib/dynamic_counters/counters.h @@ -175,10 +175,10 @@ namespace NMonitoring { bool Derivative_; }; - struct TExpiringHistogramCounter: public THistogramCounter { - using THistogramCounter::THistogramCounter; - }; - + struct TExpiringHistogramCounter: public THistogramCounter { + using THistogramCounter::THistogramCounter; + }; + using THistogramPtr = TIntrusivePtr<THistogramCounter>; #ifdef _MSC_VER @@ -280,7 +280,7 @@ namespace NMonitoring { const TString& value, bool derivative = false, TCountableBase::EVisibility visibility = TCountableBase::EVisibility::Public); - + THistogramPtr GetHistogram( const TString& value, IHistogramCollectorPtr collector, @@ -364,8 +364,8 @@ namespace NMonitoring { void RegisterCountable(const TString& name, const TString& value, TCountablePtr countable); void RemoveExpired() const; - template <bool expiring, class TCounterType, class... TArgs> - TCountablePtr GetNamedCounterImpl(const TString& name, const TString& value, TArgs&&... args); + template <bool expiring, class TCounterType, class... TArgs> + TCountablePtr GetNamedCounterImpl(const TString& name, const TString& value, TArgs&&... args); template <class TCounterType> TCountablePtr FindNamedCounterImpl(const TString& name, const TString& value) const; diff --git a/library/cpp/monlib/dynamic_counters/counters_ut.cpp b/library/cpp/monlib/dynamic_counters/counters_ut.cpp index 3591037e0a7..7d44e682a8e 100644 --- a/library/cpp/monlib/dynamic_counters/counters_ut.cpp +++ b/library/cpp/monlib/dynamic_counters/counters_ut.cpp @@ -230,15 +230,15 @@ Y_UNIT_TEST_SUITE(TDynamicCountersTest) { { auto c = rootGroup->GetExpiringCounter("foo"); - auto h = rootGroup->GetExpiringHistogram("bar", ExplicitHistogram({1, 42})); - h->Collect(15); + auto h = rootGroup->GetExpiringHistogram("bar", ExplicitHistogram({1, 42})); + h->Collect(15); TStringStream ss; TCountersPrinter printer(&ss); rootGroup->Accept("root", "counters", printer); UNIT_ASSERT_STRINGS_EQUAL(ss.Str(), "root:counters {\n" - " sensor:bar = {1: 0, 42: 1, inf: 0}\n" + " sensor:bar = {1: 0, 42: 1, inf: 0}\n" " sensor:foo = 0\n" "}\n"); } diff --git a/library/cpp/packers/ut/packers_ut.cpp b/library/cpp/packers/ut/packers_ut.cpp index 18ce2150d1e..53304471c78 100644 --- a/library/cpp/packers/ut/packers_ut.cpp +++ b/library/cpp/packers/ut/packers_ut.cpp @@ -4,7 +4,7 @@ #include <utility> #include <util/charset/wide.h> -#include <util/generic/algorithm.h> +#include <util/generic/algorithm.h> #include <util/generic/buffer.h> #include <util/generic/map.h> #include <util/generic/vector.h> @@ -15,13 +15,13 @@ #include <util/random/random.h> -#include <util/string/hex.h> - +#include <util/string/hex.h> + #include "packers.h" #include <array> -#include <iterator> - +#include <iterator> + class TPackersTest: public TTestBase { private: UNIT_TEST_SUITE(TPackersTest); diff --git a/library/cpp/protobuf/json/config.h b/library/cpp/protobuf/json/config.h index dc84fb4d5db..3985b917bc5 100644 --- a/library/cpp/protobuf/json/config.h +++ b/library/cpp/protobuf/json/config.h @@ -1,19 +1,19 @@ -#pragma once - -#include "string_transform.h" +#pragma once + +#include "string_transform.h" #include "name_generator.h" - -#include <util/generic/vector.h> + +#include <util/generic/vector.h> #include <util/generic/yexception.h> - + #include <functional> -namespace NProtobufJson { +namespace NProtobufJson { struct TProto2JsonConfig { using TSelf = TProto2JsonConfig; - + bool FormatOutput = false; - + enum MissingKeyMode { // Skip missing keys MissingKeySkip = 0, @@ -32,10 +32,10 @@ namespace NProtobufJson { }; MissingKeyMode MissingSingleKeyMode = MissingKeySkip; MissingKeyMode MissingRepeatedKeyMode = MissingKeySkip; - + /// Add null value for missing fields (false by default). bool AddMissingFields = false; - + enum EnumValueMode { EnumNumber = 0, // default EnumName, @@ -44,7 +44,7 @@ namespace NProtobufJson { EnumFullNameLowerCase, }; EnumValueMode EnumMode = EnumNumber; - + enum FldNameMode { FieldNameOriginalCase = 0, // default FieldNameLowerCase, @@ -54,7 +54,7 @@ namespace NProtobufJson { FieldNameSnakeCaseDense // ABC -> abc, UserID -> user_id }; FldNameMode FieldNameMode = FieldNameOriginalCase; - + enum ExtFldNameMode { ExtFldNameFull = 0, // default, field.full_name() ExtFldNameShort // field.name() @@ -64,14 +64,14 @@ namespace NProtobufJson { /// Use 'json_name' protobuf option for field name, mutually exclusive /// with FieldNameMode. bool UseJsonName = false; - + /// Transforms will be applied only to string values (== protobuf fields of string / bytes type). /// yajl_encode_string will be used if no transforms are specified. TVector<TStringTransformPtr> StringTransforms; /// Print map as object, otherwise print it as array of key/value objects bool MapAsObject = false; - + /// Stringify long integers which are not exactly representable by float or double values enum EStringifyLongNumbersMode { StringifyLongNumbersNever = 0, // default @@ -97,34 +97,34 @@ namespace NProtobufJson { MissingSingleKeyMode = mode; return *this; } - + TSelf& SetMissingRepeatedKeyMode(MissingKeyMode mode) { MissingRepeatedKeyMode = mode; return *this; } - + TSelf& SetAddMissingFields(bool add) { AddMissingFields = add; return *this; } - + TSelf& SetEnumMode(EnumValueMode mode) { EnumMode = mode; return *this; } - + TSelf& SetFieldNameMode(FldNameMode mode) { Y_ENSURE(mode == FieldNameOriginalCase || !UseJsonName, "FieldNameMode and UseJsonName are mutually exclusive"); FieldNameMode = mode; return *this; } - + TSelf& SetUseJsonName(bool jsonName) { Y_ENSURE(!jsonName || FieldNameMode == FieldNameOriginalCase, "FieldNameMode and UseJsonName are mutually exclusive"); UseJsonName = jsonName; return *this; } - + TSelf& SetExtensionFieldNameMode(ExtFldNameMode mode) { ExtensionFieldNameMode = mode; return *this; diff --git a/library/cpp/protobuf/json/field_option.h b/library/cpp/protobuf/json/field_option.h index c8a8bfbff5e..3a2db53a922 100644 --- a/library/cpp/protobuf/json/field_option.h +++ b/library/cpp/protobuf/json/field_option.h @@ -1,10 +1,10 @@ -#pragma once - +#pragma once + #include <google/protobuf/descriptor.h> #include <google/protobuf/descriptor.pb.h> #include <google/protobuf/message.h> - -namespace NProtobufJson { + +namespace NProtobufJson { // Functor that defines whether given field has some option set to true // // Example: @@ -20,13 +20,13 @@ namespace NProtobufJson { , Positive(positive) { } - + bool operator()(const NProtoBuf::Message&, const NProtoBuf::FieldDescriptor* field) const { const NProtoBuf::FieldOptions& opt = field->options(); const bool val = opt.GetExtension(Option); return Positive ? val : !val; } - + private: const TFieldOptionExtensionId& Option; bool Positive; @@ -35,6 +35,6 @@ namespace NProtobufJson { template <typename TFieldOptionExtensionId> TFieldOptionFunctor<TFieldOptionExtensionId> MakeFieldOptionFunctor(const TFieldOptionExtensionId& option, bool positive = true) { return TFieldOptionFunctor<TFieldOptionExtensionId>(option, positive); - } - -} + } + +} diff --git a/library/cpp/protobuf/json/filter.h b/library/cpp/protobuf/json/filter.h index 9a3ddb54fe3..7033998e146 100644 --- a/library/cpp/protobuf/json/filter.h +++ b/library/cpp/protobuf/json/filter.h @@ -1,27 +1,27 @@ -#pragma once - -#include "config.h" -#include "proto2json_printer.h" -#include "json_output_create.h" - -#include <util/generic/yexception.h> -#include <util/generic/utility.h> - -#include <functional> - -namespace NProtobufJson { +#pragma once + +#include "config.h" +#include "proto2json_printer.h" +#include "json_output_create.h" + +#include <util/generic/yexception.h> +#include <util/generic/utility.h> + +#include <functional> + +namespace NProtobufJson { template <typename TBasePrinter = TProto2JsonPrinter> // TBasePrinter is assumed to be a TProto2JsonPrinter descendant class TFilteringPrinter: public TBasePrinter { public: using TFieldPredicate = std::function<bool(const NProtoBuf::Message&, const NProtoBuf::FieldDescriptor*)>; - + template <typename... TArgs> TFilteringPrinter(TFieldPredicate isPrinted, TArgs&&... args) : TBasePrinter(std::forward<TArgs>(args)...) , IsPrinted(std::move(isPrinted)) { } - + virtual void PrintField(const NProtoBuf::Message& proto, const NProtoBuf::FieldDescriptor& field, IJsonOutput& json, @@ -37,12 +37,12 @@ namespace NProtobufJson { inline void PrintWithFilter(const NProtoBuf::Message& msg, TFilteringPrinter<>::TFieldPredicate filter, IJsonOutput& output, const TProto2JsonConfig& config = TProto2JsonConfig()) { TFilteringPrinter<> printer(std::move(filter), config); printer.Print(msg, output); - } - + } + inline TString PrintWithFilter(const NProtoBuf::Message& msg, TFilteringPrinter<>::TFieldPredicate filter, const TProto2JsonConfig& config = TProto2JsonConfig()) { TString ret; PrintWithFilter(msg, std::move(filter), *CreateJsonMapOutput(ret, config), config); return ret; - } - -} + } + +} diff --git a/library/cpp/protobuf/json/inline.h b/library/cpp/protobuf/json/inline.h index e2d7bb6ef04..03a4db81017 100644 --- a/library/cpp/protobuf/json/inline.h +++ b/library/cpp/protobuf/json/inline.h @@ -23,10 +23,10 @@ // // 4) And then serialize it to json string with inlining, e.g.: // -// Cout << NProtobufJson::PrintInlined(o, MakeFieldOptionFunctor(this_is_json)) << Endl; -// -// 5) Alternatively you can specify a some more abstract functor for defining raw json fields +// Cout << NProtobufJson::PrintInlined(o, MakeFieldOptionFunctor(this_is_json)) << Endl; // +// 5) Alternatively you can specify a some more abstract functor for defining raw json fields +// // which will print following json to stdout: // {"A":{"inner":"value"}} // instead of @@ -35,17 +35,17 @@ // // See ut/inline_ut.cpp for additional examples of usage. -#include "config.h" -#include "proto2json_printer.h" -#include "json_output_create.h" +#include "config.h" +#include "proto2json_printer.h" +#include "json_output_create.h" #include <library/cpp/protobuf/util/simple_reflection.h> -#include <util/generic/maybe.h> -#include <util/generic/yexception.h> -#include <util/generic/utility.h> +#include <util/generic/maybe.h> +#include <util/generic/yexception.h> +#include <util/generic/utility.h> -#include <functional> +#include <functional> namespace NProtobufJson { template <typename TBasePrinter = TProto2JsonPrinter> // TBasePrinter is assumed to be a TProto2JsonPrinter descendant @@ -100,7 +100,7 @@ namespace NProtobufJson { private: TFieldPredicate IsInlined; }; - + inline void PrintInlined(const NProtoBuf::Message& msg, TInliningPrinter<>::TFieldPredicate isInlined, IJsonOutput& output, const TProto2JsonConfig& config = TProto2JsonConfig()) { TInliningPrinter<> printer(std::move(isInlined), config); printer.Print(msg, output); diff --git a/library/cpp/protobuf/json/json_output.h b/library/cpp/protobuf/json/json_output.h index df143af57a7..2fc79341711 100644 --- a/library/cpp/protobuf/json/json_output.h +++ b/library/cpp/protobuf/json/json_output.h @@ -1,9 +1,9 @@ -#pragma once - -#include <util/generic/ptr.h> -#include <util/generic/strbuf.h> - -namespace NProtobufJson { +#pragma once + +#include <util/generic/ptr.h> +#include <util/generic/strbuf.h> + +namespace NProtobufJson { class IJsonOutput { public: template <typename T> @@ -15,7 +15,7 @@ namespace NProtobufJson { DoWriteNull(); return *this; } - + IJsonOutput& BeginList() { DoBeginList(); return *this; @@ -24,7 +24,7 @@ namespace NProtobufJson { DoEndList(); return *this; } - + IJsonOutput& BeginObject() { DoBeginObject(); return *this; @@ -37,15 +37,15 @@ namespace NProtobufJson { DoEndObject(); return *this; } - + IJsonOutput& WriteRawJson(const TStringBuf& str) { DoWriteRawJson(str); return *this; } - + virtual ~IJsonOutput() { } - + protected: virtual void DoWrite(const TStringBuf& s) = 0; virtual void DoWrite(const TString& s) = 0; @@ -63,17 +63,17 @@ namespace NProtobufJson { virtual void DoWrite(double f) = 0; virtual void DoWrite(bool b) = 0; virtual void DoWriteNull() = 0; - + virtual void DoBeginList() = 0; virtual void DoEndList() = 0; - + virtual void DoBeginObject() = 0; virtual void DoWriteKey(const TStringBuf& key) = 0; virtual void DoEndObject() = 0; - + virtual void DoWriteRawJson(const TStringBuf& str) = 0; }; - + using TJsonMapOutputPtr = THolder<IJsonOutput>; - + } diff --git a/library/cpp/protobuf/json/json_output_create.cpp b/library/cpp/protobuf/json/json_output_create.cpp index 378e4ea65a0..0c1575d0ca0 100644 --- a/library/cpp/protobuf/json/json_output_create.cpp +++ b/library/cpp/protobuf/json/json_output_create.cpp @@ -1,18 +1,18 @@ -#include "json_output_create.h" - -#include "config.h" -#include "json_writer_output.h" -#include "json_value_output.h" - -namespace NProtobufJson { +#include "json_output_create.h" + +#include "config.h" +#include "json_writer_output.h" +#include "json_value_output.h" + +namespace NProtobufJson { TJsonMapOutputPtr CreateJsonMapOutput(IOutputStream& out, const NJson::TJsonWriterConfig& config) { return MakeHolder<TJsonWriterOutput>(&out, config); } - + TJsonMapOutputPtr CreateJsonMapOutput(NJson::TJsonWriter& writer) { return MakeHolder<TBaseJsonWriterOutput>(writer); } - + TJsonMapOutputPtr CreateJsonMapOutput(TString& str, const TProto2JsonConfig& config) { return MakeHolder<TJsonStringWriterOutput>(&str, config); } @@ -20,13 +20,13 @@ namespace NProtobufJson { TJsonMapOutputPtr CreateJsonMapOutput(TStringStream& out, const TProto2JsonConfig& config) { return MakeHolder<TJsonWriterOutput>(&out, config); } - + TJsonMapOutputPtr CreateJsonMapOutput(IOutputStream& out, const TProto2JsonConfig& config) { return MakeHolder<TJsonWriterOutput>(&out, config); } - + TJsonMapOutputPtr CreateJsonMapOutput(NJson::TJsonValue& json) { return MakeHolder<TJsonValueOutput>(json); } - -} + +} diff --git a/library/cpp/protobuf/json/json_output_create.h b/library/cpp/protobuf/json/json_output_create.h index ad3889f5e99..9f7b195cc75 100644 --- a/library/cpp/protobuf/json/json_output_create.h +++ b/library/cpp/protobuf/json/json_output_create.h @@ -1,22 +1,22 @@ -#pragma once - -#include "config.h" -#include "json_output.h" - -namespace NJson { +#pragma once + +#include "config.h" +#include "json_output.h" + +namespace NJson { class TJsonValue; class TJsonWriter; struct TJsonWriterConfig; } - + class IOutputStream; -class TStringStream; - -namespace NProtobufJson { +class TStringStream; + +namespace NProtobufJson { TJsonMapOutputPtr CreateJsonMapOutput(IOutputStream& out, const NJson::TJsonWriterConfig& config); TJsonMapOutputPtr CreateJsonMapOutput(NJson::TJsonWriter& writer); TJsonMapOutputPtr CreateJsonMapOutput(IOutputStream& out, const TProto2JsonConfig& config = TProto2JsonConfig()); TJsonMapOutputPtr CreateJsonMapOutput(TString& str, const TProto2JsonConfig& config = TProto2JsonConfig()); TJsonMapOutputPtr CreateJsonMapOutput(NJson::TJsonValue& json); - + } diff --git a/library/cpp/protobuf/json/json_value_output.cpp b/library/cpp/protobuf/json/json_value_output.cpp index d845cc1c74a..33a0f3d339a 100644 --- a/library/cpp/protobuf/json/json_value_output.cpp +++ b/library/cpp/protobuf/json/json_value_output.cpp @@ -1,12 +1,12 @@ #include "json_value_output.h" #include <library/cpp/json/json_reader.h> - -namespace NProtobufJson { + +namespace NProtobufJson { template <typename T> void TJsonValueOutput::WriteImpl(const T& t) { Y_ASSERT(Context.top().Type == TContext::JSON_ARRAY || Context.top().Type == TContext::JSON_AFTER_KEY); - + if (Context.top().Type == TContext::JSON_AFTER_KEY) { Context.top().Value = t; Context.pop(); @@ -14,23 +14,23 @@ namespace NProtobufJson { Context.top().Value.AppendValue(t); } } - + void TJsonValueOutput::DoWrite(const TStringBuf& s) { WriteImpl(s); } - + void TJsonValueOutput::DoWrite(const TString& s) { WriteImpl(s); } - + void TJsonValueOutput::DoWrite(int i) { WriteImpl(i); } - + void TJsonValueOutput::DoWrite(unsigned int i) { WriteImpl(i); } - + void TJsonValueOutput::DoWrite(long long i) { WriteImpl(i); } @@ -38,26 +38,26 @@ namespace NProtobufJson { void TJsonValueOutput::DoWrite(unsigned long long i) { WriteImpl(i); } - + void TJsonValueOutput::DoWrite(float f) { WriteImpl(f); } - + void TJsonValueOutput::DoWrite(double f) { WriteImpl(f); } - + void TJsonValueOutput::DoWrite(bool b) { WriteImpl(b); } - + void TJsonValueOutput::DoWriteNull() { WriteImpl(NJson::JSON_NULL); } - + void TJsonValueOutput::DoBeginList() { Y_ASSERT(Context.top().Type == TContext::JSON_ARRAY || Context.top().Type == TContext::JSON_AFTER_KEY); - + if (Context.top().Type == TContext::JSON_AFTER_KEY) { Context.top().Type = TContext::JSON_ARRAY; Context.top().Value.SetType(NJson::JSON_ARRAY); @@ -65,15 +65,15 @@ namespace NProtobufJson { Context.emplace(TContext::JSON_ARRAY, Context.top().Value.AppendValue(NJson::JSON_ARRAY)); } } - + void TJsonValueOutput::DoEndList() { Y_ASSERT(Context.top().Type == TContext::JSON_ARRAY); Context.pop(); } - + void TJsonValueOutput::DoBeginObject() { Y_ASSERT(Context.top().Type == TContext::JSON_ARRAY || Context.top().Type == TContext::JSON_AFTER_KEY); - + if (Context.top().Type == TContext::JSON_AFTER_KEY) { Context.top().Type = TContext::JSON_MAP; Context.top().Value.SetType(NJson::JSON_MAP); @@ -81,20 +81,20 @@ namespace NProtobufJson { Context.emplace(TContext::JSON_MAP, Context.top().Value.AppendValue(NJson::JSON_MAP)); } } - + void TJsonValueOutput::DoWriteKey(const TStringBuf& key) { Y_ASSERT(Context.top().Type == TContext::JSON_MAP); Context.emplace(TContext::JSON_AFTER_KEY, Context.top().Value[key]); } - + void TJsonValueOutput::DoEndObject() { Y_ASSERT(Context.top().Type == TContext::JSON_MAP); Context.pop(); } - + void TJsonValueOutput::DoWriteRawJson(const TStringBuf& str) { Y_ASSERT(Context.top().Type == TContext::JSON_ARRAY || Context.top().Type == TContext::JSON_AFTER_KEY); - + if (Context.top().Type == TContext::JSON_AFTER_KEY) { NJson::ReadJsonTree(str, &Context.top().Value); Context.pop(); @@ -102,5 +102,5 @@ namespace NProtobufJson { NJson::ReadJsonTree(str, &Context.top().Value.AppendValue(NJson::JSON_UNDEFINED)); } } - -} + +} diff --git a/library/cpp/protobuf/json/json_value_output.h b/library/cpp/protobuf/json/json_value_output.h index 3fc6ff2ab0f..8d71d36af47 100644 --- a/library/cpp/protobuf/json/json_value_output.h +++ b/library/cpp/protobuf/json/json_value_output.h @@ -1,12 +1,12 @@ -#pragma once - +#pragma once + #include "json_output.h" #include <library/cpp/json/writer/json_value.h> - + #include <util/generic/stack.h> -namespace NProtobufJson { +namespace NProtobufJson { class TJsonValueOutput: public IJsonOutput { public: TJsonValueOutput(NJson::TJsonValue& value) @@ -14,7 +14,7 @@ namespace NProtobufJson { { Context.emplace(TContext::JSON_AFTER_KEY, Root); } - + void DoWrite(const TStringBuf& s) override; void DoWrite(const TString& s) override; void DoWrite(int i) override; @@ -25,27 +25,27 @@ namespace NProtobufJson { void DoWrite(double f) override; void DoWrite(bool b) override; void DoWriteNull() override; - + void DoBeginList() override; void DoEndList() override; - + void DoBeginObject() override; void DoWriteKey(const TStringBuf& key) override; void DoEndObject() override; - + void DoWriteRawJson(const TStringBuf& str) override; - + private: template <typename T> void WriteImpl(const T& t); - + struct TContext { enum EType { JSON_MAP, JSON_ARRAY, JSON_AFTER_KEY, }; - + TContext(EType type, NJson::TJsonValue& value) : Type(type) , Value(value) @@ -55,9 +55,9 @@ namespace NProtobufJson { EType Type; NJson::TJsonValue& Value; }; - + NJson::TJsonValue& Root; TStack<TContext, TVector<TContext>> Context; }; - + } diff --git a/library/cpp/protobuf/json/json_writer_output.cpp b/library/cpp/protobuf/json/json_writer_output.cpp index 288f645bab0..d6258eae1b9 100644 --- a/library/cpp/protobuf/json/json_writer_output.cpp +++ b/library/cpp/protobuf/json/json_writer_output.cpp @@ -1,6 +1,6 @@ -#include "json_writer_output.h" - -namespace NProtobufJson { +#include "json_writer_output.h" + +namespace NProtobufJson { NJson::TJsonWriterConfig TJsonWriterOutput::CreateJsonWriterConfig(const TProto2JsonConfig& config) { NJson::TJsonWriterConfig jsonConfig; jsonConfig.FormatOutput = config.FormatOutput; @@ -8,15 +8,15 @@ namespace NProtobufJson { jsonConfig.ValidateUtf8 = false; jsonConfig.DontEscapeStrings = false; jsonConfig.WriteNanAsString = config.WriteNanAsString; - + for (size_t i = 0; i < config.StringTransforms.size(); ++i) { Y_ASSERT(config.StringTransforms[i]); if (config.StringTransforms[i]->GetType() == IStringTransform::EscapeTransform) { jsonConfig.DontEscapeStrings = true; break; } - } + } return jsonConfig; - } + } -} +} diff --git a/library/cpp/protobuf/json/json_writer_output.h b/library/cpp/protobuf/json/json_writer_output.h index 3d8a2daa56c..a634d9d5651 100644 --- a/library/cpp/protobuf/json/json_writer_output.h +++ b/library/cpp/protobuf/json/json_writer_output.h @@ -1,21 +1,21 @@ -#pragma once - -#include "json_output.h" -#include "config.h" - +#pragma once + +#include "json_output.h" +#include "config.h" + #include <library/cpp/json/json_writer.h> - + #include <util/string/builder.h> -#include <util/generic/store_policy.h> - -namespace NProtobufJson { +#include <util/generic/store_policy.h> + +namespace NProtobufJson { class TBaseJsonWriterOutput: public IJsonOutput { public: TBaseJsonWriterOutput(NJson::TJsonWriter& writer) : Writer(writer) { } - + private: void DoWrite(int i) override { Writer.Write(i); @@ -47,14 +47,14 @@ namespace NProtobufJson { void DoWrite(const TString& s) override { Writer.Write(s); } - + void DoBeginList() override { Writer.OpenArray(); } void DoEndList() override { Writer.CloseArray(); } - + void DoBeginObject() override { Writer.OpenMap(); } @@ -64,14 +64,14 @@ namespace NProtobufJson { void DoEndObject() override { Writer.CloseMap(); } - + void DoWriteRawJson(const TStringBuf& str) override { Writer.UnsafeWrite(str); } - + NJson::TJsonWriter& Writer; }; - + class TJsonWriterOutput: public TEmbedPolicy<NJson::TJsonWriter>, public TBaseJsonWriterOutput { public: TJsonWriterOutput(IOutputStream* outputStream, const NJson::TJsonWriterConfig& cfg) @@ -79,17 +79,17 @@ namespace NProtobufJson { , TBaseJsonWriterOutput(*Ptr()) { } - + TJsonWriterOutput(IOutputStream* outputStream, const TProto2JsonConfig& cfg) : TEmbedPolicy<NJson::TJsonWriter>(outputStream, CreateJsonWriterConfig(cfg)) , TBaseJsonWriterOutput(*Ptr()) { } - + private: static NJson::TJsonWriterConfig CreateJsonWriterConfig(const TProto2JsonConfig& cfg); }; - + class TJsonStringWriterOutput: public TEmbedPolicy<TStringOutput>, public TJsonWriterOutput { public: template <typename TConfig> @@ -99,5 +99,5 @@ namespace NProtobufJson { { } }; - + } diff --git a/library/cpp/protobuf/json/proto2json.cpp b/library/cpp/protobuf/json/proto2json.cpp index 3d76a916867..728b98d1f9b 100644 --- a/library/cpp/protobuf/json/proto2json.cpp +++ b/library/cpp/protobuf/json/proto2json.cpp @@ -1,8 +1,8 @@ #include "proto2json.h" -#include "json_output_create.h" -#include "proto2json_printer.h" - +#include "json_output_create.h" +#include "proto2json_printer.h" + #include <library/cpp/json/json_reader.h> #include <library/cpp/json/json_value.h> #include <library/cpp/json/json_writer.h> @@ -13,7 +13,7 @@ #include <util/stream/str.h> #include <util/system/yassert.h> -namespace NProtobufJson { +namespace NProtobufJson { void Proto2Json(const NProtoBuf::Message& proto, IJsonOutput& jsonOutput, const TProto2JsonConfig& config, bool closeMap) { TProto2JsonPrinter printer(config); diff --git a/library/cpp/protobuf/json/proto2json.h b/library/cpp/protobuf/json/proto2json.h index 89a1781a40d..191c406e5ec 100644 --- a/library/cpp/protobuf/json/proto2json.h +++ b/library/cpp/protobuf/json/proto2json.h @@ -1,18 +1,18 @@ #pragma once -#include "config.h" -#include "json_output.h" +#include "config.h" +#include "json_output.h" #include <google/protobuf/descriptor.h> #include <google/protobuf/descriptor.pb.h> #include <google/protobuf/message.h> - + #include <util/generic/fwd.h> #include <util/generic/vector.h> #include <util/generic/yexception.h> #include <util/stream/str.h> -#include <functional> +#include <functional> namespace NJson { class TJsonValue; diff --git a/library/cpp/protobuf/json/proto2json_printer.cpp b/library/cpp/protobuf/json/proto2json_printer.cpp index 6123eab0f25..833a0d294b3 100644 --- a/library/cpp/protobuf/json/proto2json_printer.cpp +++ b/library/cpp/protobuf/json/proto2json_printer.cpp @@ -1,14 +1,14 @@ -#include "proto2json_printer.h" -#include "config.h" +#include "proto2json_printer.h" +#include "config.h" #include "util.h" - -#include <util/generic/yexception.h> + +#include <util/generic/yexception.h> #include <util/string/ascii.h> #include <util/string/cast.h> - -namespace NProtobufJson { + +namespace NProtobufJson { using namespace NProtoBuf; - + class TJsonKeyBuilder { public: TJsonKeyBuilder(const FieldDescriptor& field, const TProto2JsonConfig& config, TString& tmpBuf) @@ -30,7 +30,7 @@ namespace NProtobufJson { NewKeyBuf = NewKeyStr; return; } - + switch (config.FieldNameMode) { case TProto2JsonConfig::FieldNameOriginalCase: { NewKeyBuf = field.name(); @@ -43,14 +43,14 @@ namespace NProtobufJson { NewKeyBuf = NewKeyStr; break; } - + case TProto2JsonConfig::FieldNameUpperCase: { NewKeyStr = field.name(); NewKeyStr.to_upper(); NewKeyBuf = NewKeyStr; break; } - + case TProto2JsonConfig::FieldNameCamelCase: { NewKeyStr = field.name(); if (!NewKeyStr.empty()) { @@ -59,7 +59,7 @@ namespace NProtobufJson { NewKeyBuf = NewKeyStr; break; } - + case TProto2JsonConfig::FieldNameSnakeCase: { NewKeyStr = field.name(); ToSnakeCase(&NewKeyStr); @@ -81,7 +81,7 @@ namespace NProtobufJson { const TStringBuf& GetKey() const { return NewKeyBuf; - } + } private: TStringBuf NewKeyBuf; @@ -91,26 +91,26 @@ namespace NProtobufJson { TProto2JsonPrinter::TProto2JsonPrinter(const TProto2JsonConfig& cfg) : Config(cfg) { - } - + } + TProto2JsonPrinter::~TProto2JsonPrinter() { - } - + } + TStringBuf TProto2JsonPrinter::MakeKey(const FieldDescriptor& field) { return TJsonKeyBuilder(field, GetConfig(), TmpBuf).GetKey(); } - + template <bool InMapContext, typename T> std::enable_if_t<InMapContext, void> WriteWithMaybeEmptyKey(IJsonOutput& json, const TStringBuf& key, const T& value) { json.WriteKey(key).Write(value); } - + template <bool InMapContext, typename T> std::enable_if_t<!InMapContext, void> WriteWithMaybeEmptyKey(IJsonOutput& array, const TStringBuf& key, const T& value) { Y_ASSERT(!key); array.Write(value); } - + template <bool InMapContext> void TProto2JsonPrinter::PrintStringValue(const FieldDescriptor& field, const TStringBuf& key, const TString& value, @@ -131,7 +131,7 @@ namespace NProtobufJson { WriteWithMaybeEmptyKey<InMapContext>(json, key, value); } } - + template <bool InMapContext> void TProto2JsonPrinter::PrintEnumValue(const TStringBuf& key, const EnumValueDescriptor* value, @@ -146,52 +146,52 @@ namespace NProtobufJson { WriteWithMaybeEmptyKey<InMapContext>(json, key, value->number()); break; } - + case TProto2JsonConfig::EnumName: { WriteWithMaybeEmptyKey<InMapContext>(json, key, value->name()); break; } - + case TProto2JsonConfig::EnumFullName: { WriteWithMaybeEmptyKey<InMapContext>(json, key, value->full_name()); break; - } - + } + case TProto2JsonConfig::EnumNameLowerCase: { TString newName = value->name(); newName.to_lower(); WriteWithMaybeEmptyKey<InMapContext>(json, key, newName); break; } - + case TProto2JsonConfig::EnumFullNameLowerCase: { TString newName = value->full_name(); newName.to_lower(); WriteWithMaybeEmptyKey<InMapContext>(json, key, newName); break; } - + default: Y_VERIFY_DEBUG(false, "Unknown EnumMode."); } - } - + } + void TProto2JsonPrinter::PrintSingleField(const Message& proto, const FieldDescriptor& field, IJsonOutput& json, TStringBuf key) { Y_VERIFY(!field.is_repeated(), "field is repeated."); - + if (!key) { key = MakeKey(field); } - + #define FIELD_TO_JSON(EProtoCppType, ProtoGet) \ case FieldDescriptor::EProtoCppType: { \ json.WriteKey(key).Write(reflection->ProtoGet(proto, &field)); \ break; \ - } - + } + #define INT_FIELD_TO_JSON(EProtoCppType, ProtoGet) \ case FieldDescriptor::EProtoCppType: { \ const auto value = reflection->ProtoGet(proto, &field); \ @@ -204,7 +204,7 @@ namespace NProtobufJson { } const Reflection* reflection = proto.GetReflection(); - + bool shouldPrintField = reflection->HasField(proto, &field); if (!shouldPrintField && GetConfig().MissingSingleKeyMode == TProto2JsonConfig::MissingKeyExplicitDefaultThrowRequired) { if (field.has_default_value()) { @@ -225,25 +225,25 @@ namespace NProtobufJson { FIELD_TO_JSON(CPPTYPE_DOUBLE, GetDouble); FIELD_TO_JSON(CPPTYPE_FLOAT, GetFloat); FIELD_TO_JSON(CPPTYPE_BOOL, GetBool); - + case FieldDescriptor::CPPTYPE_MESSAGE: { json.WriteKey(key); Print(reflection->GetMessage(proto, &field), json); break; } - + case FieldDescriptor::CPPTYPE_ENUM: { PrintEnumValue<true>(key, reflection->GetEnum(proto, &field), json); break; } - + case FieldDescriptor::CPPTYPE_STRING: { TString scratch; const TString& value = reflection->GetStringReference(proto, &field, &scratch); PrintStringValue<true>(field, key, value, json); break; } - + default: ythrow yexception() << "Unknown protobuf field type: " << static_cast<int>(field.cpp_type()) << "."; @@ -254,36 +254,36 @@ namespace NProtobufJson { json.WriteKey(key).WriteNull(); break; } - + case TProto2JsonConfig::MissingKeySkip: case TProto2JsonConfig::MissingKeyExplicitDefaultThrowRequired: default: break; } - } + } #undef FIELD_TO_JSON } - + void TProto2JsonPrinter::PrintRepeatedField(const Message& proto, const FieldDescriptor& field, IJsonOutput& json, TStringBuf key) { Y_VERIFY(field.is_repeated(), "field isn't repeated."); - + const bool isMap = field.is_map() && GetConfig().MapAsObject; if (!key) { key = MakeKey(field); - } - + } + #define REPEATED_FIELD_TO_JSON(EProtoCppType, ProtoGet) \ case FieldDescriptor::EProtoCppType: { \ - for (size_t i = 0, endI = reflection->FieldSize(proto, &field); i < endI; ++i) \ + for (size_t i = 0, endI = reflection->FieldSize(proto, &field); i < endI; ++i) \ json.Write(reflection->ProtoGet(proto, &field, i)); \ break; \ - } - + } + const Reflection* reflection = proto.GetReflection(); - + if (reflection->FieldSize(proto, &field) > 0) { json.WriteKey(key); if (isMap) { @@ -291,7 +291,7 @@ namespace NProtobufJson { } else { json.BeginList(); } - + switch (field.cpp_type()) { REPEATED_FIELD_TO_JSON(CPPTYPE_INT32, GetRepeatedInt32); REPEATED_FIELD_TO_JSON(CPPTYPE_INT64, GetRepeatedInt64); @@ -300,7 +300,7 @@ namespace NProtobufJson { REPEATED_FIELD_TO_JSON(CPPTYPE_DOUBLE, GetRepeatedDouble); REPEATED_FIELD_TO_JSON(CPPTYPE_FLOAT, GetRepeatedFloat); REPEATED_FIELD_TO_JSON(CPPTYPE_BOOL, GetRepeatedBool); - + case FieldDescriptor::CPPTYPE_MESSAGE: { if (isMap) { for (size_t i = 0, endI = reflection->FieldSize(proto, &field); i < endI; ++i) { @@ -334,7 +334,7 @@ namespace NProtobufJson { ythrow yexception() << "Unknown protobuf field type: " << static_cast<int>(field.cpp_type()) << "."; } - + if (isMap) { json.EndObject(); } else { @@ -346,7 +346,7 @@ namespace NProtobufJson { json.WriteKey(key).WriteNull(); break; } - + case TProto2JsonConfig::MissingKeyDefault: { json.WriteKey(key); if (isMap) { @@ -361,12 +361,12 @@ namespace NProtobufJson { case TProto2JsonConfig::MissingKeyExplicitDefaultThrowRequired: default: break; - } - } - + } + } + #undef REPEATED_FIELD_TO_JSON } - + void TProto2JsonPrinter::PrintKeyValue(const NProtoBuf::Message& proto, IJsonOutput& json) { const FieldDescriptor* keyField = proto.GetDescriptor()->FindFieldByName("key"); @@ -376,7 +376,7 @@ namespace NProtobufJson { Y_VERIFY(valueField, "Map entry value field not found."); PrintField(proto, *valueField, json, key); } - + TString TProto2JsonPrinter::MakeKey(const NProtoBuf::Message& proto, const NProtoBuf::FieldDescriptor& field) { const Reflection* reflection = proto.GetReflection(); @@ -433,11 +433,11 @@ namespace NProtobufJson { break; default: ythrow yexception() << "Unsupported key type."; - } - + } + return result; - } - + } + void TProto2JsonPrinter::PrintField(const Message& proto, const FieldDescriptor& field, IJsonOutput& json, @@ -449,13 +449,13 @@ namespace NProtobufJson { else PrintSingleField(proto, field, json, key); } - + void TProto2JsonPrinter::Print(const Message& proto, IJsonOutput& json, bool closeMap) { const Descriptor* descriptor = proto.GetDescriptor(); Y_ASSERT(descriptor); - + json.BeginObject(); - + // Iterate over all non-extension fields for (int f = 0, endF = descriptor->field_count(); f < endF; ++f) { const FieldDescriptor* field = descriptor->field(f); @@ -485,8 +485,8 @@ namespace NProtobufJson { if (closeMap) { json.EndObject(); } - } - + } + template <class T, class U> std::enable_if_t<!std::is_unsigned<T>::value, bool> ValueInRange(T value, U range) { return value >= -range && value <= range; @@ -514,4 +514,4 @@ namespace NProtobufJson { return false; } -} +} diff --git a/library/cpp/protobuf/json/proto2json_printer.h b/library/cpp/protobuf/json/proto2json_printer.h index 9dc5aa86c62..866fd8b5456 100644 --- a/library/cpp/protobuf/json/proto2json_printer.h +++ b/library/cpp/protobuf/json/proto2json_printer.h @@ -1,62 +1,62 @@ -#pragma once - -#include "json_output.h" - +#pragma once + +#include "json_output.h" + #include <google/protobuf/descriptor.h> #include <google/protobuf/descriptor.pb.h> #include <google/protobuf/message.h> - -#include <util/generic/strbuf.h> + +#include <util/generic/strbuf.h> #include <util/generic/string.h> - -namespace NProtobufJson { + +namespace NProtobufJson { struct TProto2JsonConfig; - + class TProto2JsonPrinter { public: TProto2JsonPrinter(const TProto2JsonConfig& config); virtual ~TProto2JsonPrinter(); - + virtual void Print(const NProtoBuf::Message& proto, IJsonOutput& json, bool closeMap = true); - + virtual const TProto2JsonConfig& GetConfig() const { return Config; } - + protected: virtual TStringBuf MakeKey(const NProtoBuf::FieldDescriptor& field); - + virtual void PrintField(const NProtoBuf::Message& proto, const NProtoBuf::FieldDescriptor& field, IJsonOutput& json, TStringBuf key = {}); - + void PrintRepeatedField(const NProtoBuf::Message& proto, const NProtoBuf::FieldDescriptor& field, IJsonOutput& json, TStringBuf key = {}); - + void PrintSingleField(const NProtoBuf::Message& proto, const NProtoBuf::FieldDescriptor& field, IJsonOutput& json, TStringBuf key = {}); - + void PrintKeyValue(const NProtoBuf::Message& proto, IJsonOutput& json); - + TString MakeKey(const NProtoBuf::Message& proto, const NProtoBuf::FieldDescriptor& field); - + template <bool InMapContext> void PrintEnumValue(const TStringBuf& key, const NProtoBuf::EnumValueDescriptor* value, IJsonOutput& json); - + template <bool InMapContext> void PrintStringValue(const NProtoBuf::FieldDescriptor& field, const TStringBuf& key, const TString& value, IJsonOutput& json); - + template <class T> bool NeedStringifyNumber(T value) const; @@ -64,5 +64,5 @@ namespace NProtobufJson { const TProto2JsonConfig& Config; TString TmpBuf; }; - + } diff --git a/library/cpp/protobuf/json/ut/filter_ut.cpp b/library/cpp/protobuf/json/ut/filter_ut.cpp index 95c227666fb..a5f17f02300 100644 --- a/library/cpp/protobuf/json/ut/filter_ut.cpp +++ b/library/cpp/protobuf/json/ut/filter_ut.cpp @@ -1,47 +1,47 @@ #include <library/cpp/protobuf/json/ut/filter_ut.pb.h> - + #include <library/cpp/protobuf/json/filter.h> #include <library/cpp/protobuf/json/field_option.h> #include <library/cpp/protobuf/json/proto2json.h> #include <library/cpp/testing/unittest/registar.h> - -using namespace NProtobufJson; - -static NProtobufJsonUt::TFilterTest GetTestMsg() { - NProtobufJsonUt::TFilterTest msg; - msg.SetOptFiltered("1"); - msg.SetNotFiltered("23"); - msg.AddRepFiltered(45); - msg.AddRepFiltered(67); - msg.MutableInner()->AddNumber(100); - msg.MutableInner()->AddNumber(200); - msg.MutableInner()->SetInnerFiltered(235); - return msg; -} - + +using namespace NProtobufJson; + +static NProtobufJsonUt::TFilterTest GetTestMsg() { + NProtobufJsonUt::TFilterTest msg; + msg.SetOptFiltered("1"); + msg.SetNotFiltered("23"); + msg.AddRepFiltered(45); + msg.AddRepFiltered(67); + msg.MutableInner()->AddNumber(100); + msg.MutableInner()->AddNumber(200); + msg.MutableInner()->SetInnerFiltered(235); + return msg; +} + Y_UNIT_TEST_SUITE(TProto2JsonFilterTest){ Y_UNIT_TEST(TestFilterPrinter){ - NProtobufJsonUt::TFilterTest msg = GetTestMsg(); + NProtobufJsonUt::TFilterTest msg = GetTestMsg(); { TString expected = R"({"OptFiltered":"1","NotFiltered":"23","RepFiltered":[45,67],)" R"("Inner":{"Number":[100,200],"InnerFiltered":235}})"; TString my = Proto2Json(msg); UNIT_ASSERT_STRINGS_EQUAL(my, expected); } - + { TString expected = R"({"NotFiltered":"23",)" R"("Inner":{"Number":[100,200]}})"; TString my = PrintWithFilter(msg, MakeFieldOptionFunctor(NProtobufJsonUt::filter_test, false)); UNIT_ASSERT_STRINGS_EQUAL(my, expected); } - + { TString expected = R"({"OptFiltered":"1","RepFiltered":[45,67]})"; TString my = PrintWithFilter(msg, MakeFieldOptionFunctor(NProtobufJsonUt::filter_test)); UNIT_ASSERT_STRINGS_EQUAL(my, expected); } - + { TString expected = R"({"OptFiltered":"1","NotFiltered":"23",)" R"("Inner":{"Number":[100,200]}})"; @@ -66,25 +66,25 @@ Y_UNIT_TEST(NoUnnecessaryCopyFunctor) { struct TFunctorMock { TFunctorMock(size_t* copyCount) : CopyCount(copyCount) - { + { UNIT_ASSERT(*CopyCount <= 1); - } - + } + TFunctorMock(const TFunctorMock& f) : CopyCount(f.CopyCount) - { + { ++*CopyCount; - } - + } + TFunctorMock(TFunctorMock&& f) = default; - + bool operator()(const NProtoBuf::Message&, const NProtoBuf::FieldDescriptor*) const { return false; } - + size_t* CopyCount; }; - + TProto2JsonConfig cfg; TFilteringPrinter<> printer(TFunctorMock(&CopyCount), cfg); UNIT_ASSERT(CopyCount <= 1); diff --git a/library/cpp/protobuf/json/ut/filter_ut.proto b/library/cpp/protobuf/json/ut/filter_ut.proto index 29d630ade45..c4866ed8134 100644 --- a/library/cpp/protobuf/json/ut/filter_ut.proto +++ b/library/cpp/protobuf/json/ut/filter_ut.proto @@ -1,20 +1,20 @@ import "google/protobuf/descriptor.proto"; - -package NProtobufJsonUt; - -extend google.protobuf.FieldOptions { - optional bool filter_test = 58255; - optional bool export_test = 58256; -} - -message TFilterTest { - optional string OptFiltered = 1 [(filter_test) = true, (export_test) = true]; - optional string NotFiltered = 2 [(export_test) = true]; - repeated uint64 RepFiltered = 3 [(filter_test) = true]; - - message TInner { - repeated uint32 Number = 1 [(export_test) = true]; - optional int32 InnerFiltered = 2 [(filter_test) = true]; - } - optional TInner Inner = 4 [(export_test) = true]; -} + +package NProtobufJsonUt; + +extend google.protobuf.FieldOptions { + optional bool filter_test = 58255; + optional bool export_test = 58256; +} + +message TFilterTest { + optional string OptFiltered = 1 [(filter_test) = true, (export_test) = true]; + optional string NotFiltered = 2 [(export_test) = true]; + repeated uint64 RepFiltered = 3 [(filter_test) = true]; + + message TInner { + repeated uint32 Number = 1 [(export_test) = true]; + optional int32 InnerFiltered = 2 [(filter_test) = true]; + } + optional TInner Inner = 4 [(export_test) = true]; +} diff --git a/library/cpp/protobuf/json/ut/inline_ut.cpp b/library/cpp/protobuf/json/ut/inline_ut.cpp index c29ad32e7d6..048e3fa2754 100644 --- a/library/cpp/protobuf/json/ut/inline_ut.cpp +++ b/library/cpp/protobuf/json/ut/inline_ut.cpp @@ -9,42 +9,42 @@ using namespace NProtobufJson; -static NProtobufJsonUt::TInlineTest GetTestMsg() { - NProtobufJsonUt::TInlineTest msg; - msg.SetOptJson(R"({"a":1,"b":"000"})"); - msg.SetNotJson("12{}34"); - msg.AddRepJson("{}"); - msg.AddRepJson("[1,2]"); - msg.MutableInner()->AddNumber(100); - msg.MutableInner()->AddNumber(200); - msg.MutableInner()->SetInnerJson(R"({"xxx":[]})"); - return msg; -} - +static NProtobufJsonUt::TInlineTest GetTestMsg() { + NProtobufJsonUt::TInlineTest msg; + msg.SetOptJson(R"({"a":1,"b":"000"})"); + msg.SetNotJson("12{}34"); + msg.AddRepJson("{}"); + msg.AddRepJson("[1,2]"); + msg.MutableInner()->AddNumber(100); + msg.MutableInner()->AddNumber(200); + msg.MutableInner()->SetInnerJson(R"({"xxx":[]})"); + return msg; +} + Y_UNIT_TEST_SUITE(TProto2JsonInlineTest){ Y_UNIT_TEST(TestNormalPrint){ - NProtobufJsonUt::TInlineTest msg = GetTestMsg(); + NProtobufJsonUt::TInlineTest msg = GetTestMsg(); // normal print should output these fields as just string values TString expRaw = R"({"OptJson":"{\"a\":1,\"b\":\"000\"}","NotJson":"12{}34","RepJson":["{}","[1,2]"],)" R"("Inner":{"Number":[100,200],"InnerJson":"{\"xxx\":[]}"}})"; TString myRaw; Proto2Json(msg, myRaw); UNIT_ASSERT_STRINGS_EQUAL(myRaw, expRaw); - + myRaw = PrintInlined(msg, [](const NProtoBuf::Message&, const NProtoBuf::FieldDescriptor*) { return false; }); UNIT_ASSERT_STRINGS_EQUAL(myRaw, expRaw); // result is the same } - + Y_UNIT_TEST(TestInliningPrinter) { NProtobufJsonUt::TInlineTest msg = GetTestMsg(); // inlined print should output these fields as inlined json sub-objects TString expInlined = R"({"OptJson":{"a":1,"b":"000"},"NotJson":"12{}34","RepJson":[{},[1,2]],)" R"("Inner":{"Number":[100,200],"InnerJson":{"xxx":[]}}})"; - + { TString myInlined = PrintInlined(msg, MakeFieldOptionFunctor(NProtobufJsonUt::inline_test)); UNIT_ASSERT_STRINGS_EQUAL(myInlined, expInlined); - } + } { auto functor = [](const NProtoBuf::Message&, const NProtoBuf::FieldDescriptor* field) { return field->name() == "OptJson" || field->name() == "RepJson" || field->name() == "InnerJson"; @@ -53,7 +53,7 @@ Y_UNIT_TEST(TestInliningPrinter) { UNIT_ASSERT_STRINGS_EQUAL(myInlined, expInlined); } } - + Y_UNIT_TEST(TestNoValues) { // no values - no printing NProtobufJsonUt::TInlineTest msg; @@ -61,35 +61,35 @@ Y_UNIT_TEST(TestNoValues) { msg.MutableInner()->AddNumber(200); TString expInlined = R"({"Inner":{"Number":[100,200]}})"; - + TString myInlined = PrintInlined(msg, MakeFieldOptionFunctor(NProtobufJsonUt::inline_test)); UNIT_ASSERT_STRINGS_EQUAL(myInlined, expInlined); } - + Y_UNIT_TEST(TestMissingKeyModeNull) { NProtobufJsonUt::TInlineTest msg; msg.MutableInner()->AddNumber(100); msg.MutableInner()->AddNumber(200); - + TString expInlined = R"({"OptJson":null,"NotJson":null,"RepJson":null,"Inner":{"Number":[100,200],"InnerJson":null}})"; - + TProto2JsonConfig cfg; cfg.SetMissingSingleKeyMode(TProto2JsonConfig::MissingKeyNull).SetMissingRepeatedKeyMode(TProto2JsonConfig::MissingKeyNull); TString myInlined = PrintInlined(msg, MakeFieldOptionFunctor(NProtobufJsonUt::inline_test), cfg); UNIT_ASSERT_STRINGS_EQUAL(myInlined, expInlined); } - + Y_UNIT_TEST(TestMissingKeyModeDefault) { NProtobufJsonUt::TInlineTestDefaultValues msg; - + TString expInlined = R"({"OptJson":{"default":1},"Number":0,"RepJson":[],"Inner":{"OptJson":{"default":2}}})"; - + TProto2JsonConfig cfg; cfg.SetMissingSingleKeyMode(TProto2JsonConfig::MissingKeyDefault).SetMissingRepeatedKeyMode(TProto2JsonConfig::MissingKeyDefault); TString myInlined = PrintInlined(msg, MakeFieldOptionFunctor(NProtobufJsonUt::inline_test), cfg); UNIT_ASSERT_STRINGS_EQUAL(myInlined, expInlined); } - + Y_UNIT_TEST(NoUnnecessaryCopyFunctor) { size_t CopyCount = 0; struct TFunctorMock { @@ -98,22 +98,22 @@ Y_UNIT_TEST(NoUnnecessaryCopyFunctor) { { UNIT_ASSERT(*CopyCount <= 1); } - + TFunctorMock(const TFunctorMock& f) : CopyCount(f.CopyCount) { ++*CopyCount; } - + TFunctorMock(TFunctorMock&& f) = default; - + bool operator()(const NProtoBuf::Message&, const NProtoBuf::FieldDescriptor*) const { return false; } - + size_t* CopyCount; }; - + TProto2JsonConfig cfg; TInliningPrinter<> printer(TFunctorMock(&CopyCount), cfg); UNIT_ASSERT(CopyCount <= 1); diff --git a/library/cpp/protobuf/json/ut/inline_ut.proto b/library/cpp/protobuf/json/ut/inline_ut.proto index 76bd10232dc..147c3369dd1 100644 --- a/library/cpp/protobuf/json/ut/inline_ut.proto +++ b/library/cpp/protobuf/json/ut/inline_ut.proto @@ -17,13 +17,13 @@ message TInlineTest { } optional TInner Inner = 4; } - -message TInlineTestDefaultValues { - optional string OptJson = 1 [(inline_test) = true, default = "{\"default\":1}"]; - optional uint32 Number = 2; - repeated string RepJson = 3 [(inline_test) = true]; - message TInner { - optional string OptJson = 1 [(inline_test) = true, default = "{\"default\":2}"]; - } - optional TInner Inner = 4; -} + +message TInlineTestDefaultValues { + optional string OptJson = 1 [(inline_test) = true, default = "{\"default\":1}"]; + optional uint32 Number = 2; + repeated string RepJson = 3 [(inline_test) = true]; + message TInner { + optional string OptJson = 1 [(inline_test) = true, default = "{\"default\":2}"]; + } + optional TInner Inner = 4; +} diff --git a/library/cpp/protobuf/json/ut/proto2json_ut.cpp b/library/cpp/protobuf/json/ut/proto2json_ut.cpp index 07e52d7f2f5..9dbec774e2f 100644 --- a/library/cpp/protobuf/json/ut/proto2json_ut.cpp +++ b/library/cpp/protobuf/json/ut/proto2json_ut.cpp @@ -20,8 +20,8 @@ #include <util/system/defaults.h> #include <util/system/yassert.h> -#include <limits> - +#include <limits> + using namespace NProtobufJson; using namespace NProtobufJsonTest; @@ -887,18 +887,18 @@ Y_UNIT_TEST(TestFieldNameMode) { /// TODO: test missing keys } // TestFieldNameMode - + Y_UNIT_TEST(TestNan) { TFlatOptional proto; proto.SetDouble(std::numeric_limits<double>::quiet_NaN()); - + UNIT_ASSERT_EXCEPTION(Proto2Json(proto, TProto2JsonConfig()), yexception); } // TestNan - + Y_UNIT_TEST(TestInf) { TFlatOptional proto; proto.SetFloat(std::numeric_limits<float>::infinity()); - + UNIT_ASSERT_EXCEPTION(Proto2Json(proto, TProto2JsonConfig()), yexception); } // TestInf diff --git a/library/cpp/protobuf/json/ut/ya.make b/library/cpp/protobuf/json/ut/ya.make index b60a6d3c17f..ac6aebeab81 100644 --- a/library/cpp/protobuf/json/ut/ya.make +++ b/library/cpp/protobuf/json/ut/ya.make @@ -3,14 +3,14 @@ UNITTEST_FOR(library/cpp/protobuf/json) OWNER(avitella) SRCS( - filter_ut.cpp + filter_ut.cpp json2proto_ut.cpp proto2json_ut.cpp inline_ut.proto inline_ut.cpp string_transform_ut.cpp - filter_ut.proto - test.proto + filter_ut.proto + test.proto util_ut.cpp ) diff --git a/library/cpp/protobuf/json/ya.make b/library/cpp/protobuf/json/ya.make index 2f2c75cfdb2..b8e3f1a33ce 100644 --- a/library/cpp/protobuf/json/ya.make +++ b/library/cpp/protobuf/json/ya.make @@ -4,9 +4,9 @@ OWNER(avitella) SRCS( json2proto.cpp - json_output_create.cpp - json_value_output.cpp - json_writer_output.cpp + json_output_create.cpp + json_value_output.cpp + json_writer_output.cpp name_generator.cpp proto2json.cpp proto2json_printer.cpp diff --git a/library/cpp/protobuf/util/cast.h b/library/cpp/protobuf/util/cast.h index 83749dfcee5..d368d13766d 100644 --- a/library/cpp/protobuf/util/cast.h +++ b/library/cpp/protobuf/util/cast.h @@ -1,11 +1,11 @@ #pragma once -#include "traits.h" +#include "traits.h" #include <google/protobuf/descriptor.h> #include <google/protobuf/message.h> -#include <util/generic/cast.h> +#include <util/generic/cast.h> namespace NProtoBuf { // C++ compatible conversions of FieldDescriptor::CppType's diff --git a/library/cpp/protobuf/util/path.cpp b/library/cpp/protobuf/util/path.cpp index efa2a42c8a3..8a9c2ba7d73 100644 --- a/library/cpp/protobuf/util/path.cpp +++ b/library/cpp/protobuf/util/path.cpp @@ -1,20 +1,20 @@ -#include "path.h" - -#include <util/generic/yexception.h> - -namespace NProtoBuf { +#include "path.h" + +#include <util/generic/yexception.h> + +namespace NProtoBuf { TFieldPath::TFieldPath() { } - + TFieldPath::TFieldPath(const Descriptor* msgType, const TStringBuf& path) { Init(msgType, path); } - + TFieldPath::TFieldPath(const TVector<const FieldDescriptor*>& path) : Path(path) { } - + bool TFieldPath::InitUnsafe(const Descriptor* msgType, TStringBuf path) { Path.clear(); while (path) { @@ -23,10 +23,10 @@ namespace NProtoBuf { next = path.NextTok('/'); if (!next) return true; - + if (!msgType) // need field but no message type return false; - + TString nextStr(next); const FieldDescriptor* field = msgType->FindFieldByName(nextStr); if (!field) { @@ -41,21 +41,21 @@ namespace NProtoBuf { return false; // ambiguity field = ext; } - } - } - + } + } + if (!field) return false; - + Path.push_back(field); msgType = field->type() == FieldDescriptor::TYPE_MESSAGE ? field->message_type() : nullptr; } return true; - } - + } + void TFieldPath::Init(const Descriptor* msgType, const TStringBuf& path) { if (!InitUnsafe(msgType, path)) ythrow yexception() << "Failed to resolve path \"" << path << "\" relative to " << msgType->full_name(); } -} +} diff --git a/library/cpp/protobuf/util/path.h b/library/cpp/protobuf/util/path.h index 487f643a2d6..4fbee86f263 100644 --- a/library/cpp/protobuf/util/path.h +++ b/library/cpp/protobuf/util/path.h @@ -1,11 +1,11 @@ -#pragma once - +#pragma once + #include <google/protobuf/descriptor.h> #include <google/protobuf/message.h> - -#include <util/generic/vector.h> - -namespace NProtoBuf { + +#include <util/generic/vector.h> + +namespace NProtoBuf { class TFieldPath { public: TFieldPath(); @@ -13,40 +13,40 @@ namespace NProtoBuf { TFieldPath(const TVector<const FieldDescriptor*>& path); TFieldPath(const TFieldPath&) = default; TFieldPath& operator=(const TFieldPath&) = default; - + bool InitUnsafe(const Descriptor* msgType, const TStringBuf path); // noexcept void Init(const Descriptor* msgType, const TStringBuf& path); // throws - + const TVector<const FieldDescriptor*>& Fields() const { return Path; } - + void AddField(const FieldDescriptor* field) { Path.push_back(field); } - + const Descriptor* ParentType() const { return Empty() ? nullptr : Path.front()->containing_type(); } - + const FieldDescriptor* FieldDescr() const { return Empty() ? nullptr : Path.back(); } - + bool Empty() const { return Path.empty(); } - + explicit operator bool() const { return !Empty(); } - + bool operator!() const { return Empty(); } - + private: TVector<const FieldDescriptor*> Path; }; - + } diff --git a/library/cpp/protobuf/util/proto/ya.make b/library/cpp/protobuf/util/proto/ya.make index 4d68047d8b8..3a3d58e4862 100644 --- a/library/cpp/protobuf/util/proto/ya.make +++ b/library/cpp/protobuf/util/proto/ya.make @@ -1,11 +1,11 @@ -PROTO_LIBRARY() - -OWNER(mowgli) - -SRCS( - merge.proto -) - +PROTO_LIBRARY() + +OWNER(mowgli) + +SRCS( + merge.proto +) + EXCLUDE_TAGS(GO_PROTO) -END() +END() diff --git a/library/cpp/protobuf/util/repeated_field_utils.h b/library/cpp/protobuf/util/repeated_field_utils.h index c07bd846475..8f7428b5dc7 100644 --- a/library/cpp/protobuf/util/repeated_field_utils.h +++ b/library/cpp/protobuf/util/repeated_field_utils.h @@ -38,8 +38,8 @@ namespace NProtoBuf { T* ret = field->Add(); MoveRepeatedFieldItem(field, field->size() - 1, index); return ret; - } - + } + template <typename TRepeated> // suitable both for RepeatedField and RepeatedPtrField static void RemoveRepeatedFieldItem(TRepeated* field, size_t index) { if ((int)index >= field->size()) @@ -70,8 +70,8 @@ namespace NProtoBuf { for (int i = begIndex; i < endIndex; ++i, ++shiftIndex) field->SwapElements(shiftIndex, i); } - } - + } + // Remove several items at once, could be more efficient compared to calling RemoveRepeatedFieldItem several times template <typename TRepeated> static void RemoveRepeatedFieldItems(TRepeated* field, const TVector<size_t>& sortedIndices) { diff --git a/library/cpp/protobuf/util/repeated_field_utils_ut.cpp b/library/cpp/protobuf/util/repeated_field_utils_ut.cpp index 58aaaa9e12f..94a494e1a3f 100644 --- a/library/cpp/protobuf/util/repeated_field_utils_ut.cpp +++ b/library/cpp/protobuf/util/repeated_field_utils_ut.cpp @@ -1,46 +1,46 @@ -#include "repeated_field_utils.h" +#include "repeated_field_utils.h" #include <library/cpp/protobuf/util/ut/common_ut.pb.h> - + #include <library/cpp/testing/unittest/registar.h> - -using namespace NProtoBuf; - + +using namespace NProtoBuf; + Y_UNIT_TEST_SUITE(RepeatedFieldUtils) { Y_UNIT_TEST(RemoveIf) { - { - NProtobufUtilUt::TWalkTest msg; - msg.AddRepInt(0); - msg.AddRepInt(1); - msg.AddRepInt(2); - msg.AddRepInt(3); - msg.AddRepInt(4); - msg.AddRepInt(5); - auto cond = [](ui32 val) { - return val % 2 == 0; - }; - RemoveRepeatedFieldItemIf(msg.MutableRepInt(), cond); - UNIT_ASSERT_VALUES_EQUAL(3, msg.RepIntSize()); - UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepInt(0)); - UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepInt(1)); - UNIT_ASSERT_VALUES_EQUAL(5, msg.GetRepInt(2)); - } - - { - NProtobufUtilUt::TWalkTest msg; - msg.AddRepSub()->SetOptInt(0); - msg.AddRepSub()->SetOptInt(1); - msg.AddRepSub()->SetOptInt(2); - msg.AddRepSub()->SetOptInt(3); - msg.AddRepSub()->SetOptInt(4); - msg.AddRepSub()->SetOptInt(5); - auto cond = [](const NProtobufUtilUt::TWalkTest& val) { - return val.GetOptInt() % 2 == 0; - }; - RemoveRepeatedFieldItemIf(msg.MutableRepSub(), cond); - UNIT_ASSERT_VALUES_EQUAL(3, msg.RepSubSize()); - UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepSub(0).GetOptInt()); - UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepSub(1).GetOptInt()); - UNIT_ASSERT_VALUES_EQUAL(5, msg.GetRepSub(2).GetOptInt()); - } - } -} + { + NProtobufUtilUt::TWalkTest msg; + msg.AddRepInt(0); + msg.AddRepInt(1); + msg.AddRepInt(2); + msg.AddRepInt(3); + msg.AddRepInt(4); + msg.AddRepInt(5); + auto cond = [](ui32 val) { + return val % 2 == 0; + }; + RemoveRepeatedFieldItemIf(msg.MutableRepInt(), cond); + UNIT_ASSERT_VALUES_EQUAL(3, msg.RepIntSize()); + UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepInt(0)); + UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepInt(1)); + UNIT_ASSERT_VALUES_EQUAL(5, msg.GetRepInt(2)); + } + + { + NProtobufUtilUt::TWalkTest msg; + msg.AddRepSub()->SetOptInt(0); + msg.AddRepSub()->SetOptInt(1); + msg.AddRepSub()->SetOptInt(2); + msg.AddRepSub()->SetOptInt(3); + msg.AddRepSub()->SetOptInt(4); + msg.AddRepSub()->SetOptInt(5); + auto cond = [](const NProtobufUtilUt::TWalkTest& val) { + return val.GetOptInt() % 2 == 0; + }; + RemoveRepeatedFieldItemIf(msg.MutableRepSub(), cond); + UNIT_ASSERT_VALUES_EQUAL(3, msg.RepSubSize()); + UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepSub(0).GetOptInt()); + UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepSub(1).GetOptInt()); + UNIT_ASSERT_VALUES_EQUAL(5, msg.GetRepSub(2).GetOptInt()); + } + } +} diff --git a/library/cpp/protobuf/util/simple_reflection.cpp b/library/cpp/protobuf/util/simple_reflection.cpp index d842e9ee44d..83afa0ce0cc 100644 --- a/library/cpp/protobuf/util/simple_reflection.cpp +++ b/library/cpp/protobuf/util/simple_reflection.cpp @@ -1,10 +1,10 @@ -#include "simple_reflection.h" - -namespace NProtoBuf { +#include "simple_reflection.h" + +namespace NProtoBuf { const Message* GetMessageHelper(const TConstField& curField, bool) { return curField.HasValue() && curField.IsMessage() ? curField.Get<Message>() : nullptr; } - + Message* GetMessageHelper(TMutableField& curField, bool createPath) { if (curField.IsMessage()) { if (!curField.HasValue()) { @@ -13,14 +13,14 @@ namespace NProtoBuf { } else { return curField.MutableMessage(); } - } + } return nullptr; - } - + } + template <class TField, class TMsg> TMaybe<TField> ByPathImpl(TMsg& msg, const TVector<const FieldDescriptor*>& fieldsPath, bool createPath) { if (fieldsPath.empty()) - return TMaybe<TField>(); + return TMaybe<TField>(); TMsg* curParent = &msg; for (size_t i = 0, size = fieldsPath.size(); i < size; ++i) { const FieldDescriptor* field = fieldsPath[i]; @@ -35,36 +35,36 @@ namespace NProtoBuf { return TField(*curParent, fieldsPath.back()); else return TMaybe<TField>(); - } - + } + TMaybe<TConstField> TConstField::ByPath(const Message& msg, const TVector<const FieldDescriptor*>& fieldsPath) { return ByPathImpl<TConstField, const Message>(msg, fieldsPath, false); } - + TMaybe<TConstField> TConstField::ByPath(const Message& msg, const TStringBuf& path) { TFieldPath fieldPath; if (!fieldPath.InitUnsafe(msg.GetDescriptor(), path)) return TMaybe<TConstField>(); return ByPathImpl<TConstField, const Message>(msg, fieldPath.Fields(), false); } - + TMaybe<TConstField> TConstField::ByPath(const Message& msg, const TFieldPath& path) { return ByPathImpl<TConstField, const Message>(msg, path.Fields(), false); } - + TMaybe<TMutableField> TMutableField::ByPath(Message& msg, const TVector<const FieldDescriptor*>& fieldsPath, bool createPath) { return ByPathImpl<TMutableField, Message>(msg, fieldsPath, createPath); } - + TMaybe<TMutableField> TMutableField::ByPath(Message& msg, const TStringBuf& path, bool createPath) { TFieldPath fieldPath; if (!fieldPath.InitUnsafe(msg.GetDescriptor(), path)) return TMaybe<TMutableField>(); return ByPathImpl<TMutableField, Message>(msg, fieldPath.Fields(), createPath); } - + TMaybe<TMutableField> TMutableField::ByPath(Message& msg, const TFieldPath& path, bool createPath) { return ByPathImpl<TMutableField, Message>(msg, path.Fields(), createPath); } -} +} diff --git a/library/cpp/protobuf/util/simple_reflection.h b/library/cpp/protobuf/util/simple_reflection.h index 61e877a7874..a5dd46ac792 100644 --- a/library/cpp/protobuf/util/simple_reflection.h +++ b/library/cpp/protobuf/util/simple_reflection.h @@ -1,17 +1,17 @@ #pragma once -#include "cast.h" -#include "path.h" +#include "cast.h" +#include "path.h" #include "traits.h" #include <google/protobuf/descriptor.h> #include <google/protobuf/message.h> -#include <util/generic/maybe.h> -#include <util/generic/typetraits.h> -#include <util/generic/vector.h> -#include <util/system/defaults.h> - +#include <util/generic/maybe.h> +#include <util/generic/typetraits.h> +#include <util/generic/vector.h> +#include <util/system/defaults.h> + namespace NProtoBuf { class TConstField { public: @@ -29,7 +29,7 @@ namespace NProtoBuf { const Message& Parent() const { return Msg; } - + const FieldDescriptor* Field() const { return Fd; } @@ -80,7 +80,7 @@ namespace NProtoBuf { bool IsMessage() const { return CppType() == FieldDescriptor::CPPTYPE_MESSAGE; } - + bool HasSameType(const TConstField& other) const { if (CppType() != other.CppType()) return false; @@ -90,7 +90,7 @@ namespace NProtoBuf { return false; return true; } - + protected: bool IsRepeated() const { return Fd->is_repeated(); @@ -137,7 +137,7 @@ namespace NProtoBuf { template <typename T> inline void Add(T value); - + inline void MergeFrom(const TConstField& src); inline void Clear() { @@ -167,17 +167,17 @@ namespace NProtoBuf { return; Refl().SwapElements(Mut(), Fd, index1, index2); } - + inline void Remove(size_t index) { if (index >= Size()) return; - + // Move to the end for (size_t i = index, size = Size(); i < size - 1; ++i) SwapElements(i, i + 1); RemoveLast(); } - + Message* MutableMessage(size_t index = 0) { Y_ASSERT(IsMessage()); if (IsRepeated()) { @@ -193,12 +193,12 @@ namespace NProtoBuf { inline TMsg* AddMessage() { return CheckedCast<TMsg*>(AddMessage()); } - + inline Message* AddMessage() { Y_ASSERT(IsMessage() && IsRepeated()); return Refl().AddMessage(Mut(), Fd); } - + private: Message* Mut() { return const_cast<Message*>(&Msg); diff --git a/library/cpp/protobuf/util/simple_reflection_ut.cpp b/library/cpp/protobuf/util/simple_reflection_ut.cpp index 169d4703c9c..e380991c027 100644 --- a/library/cpp/protobuf/util/simple_reflection_ut.cpp +++ b/library/cpp/protobuf/util/simple_reflection_ut.cpp @@ -7,8 +7,8 @@ using namespace NProtoBuf; Y_UNIT_TEST_SUITE(ProtobufSimpleReflection) { - static TSample GenSampleForMergeFrom() { - TSample smf; + static TSample GenSampleForMergeFrom() { + TSample smf; smf.SetOneStr("one str"); smf.MutableOneMsg()->AddRepInt(1); smf.AddRepMsg()->AddRepInt(2); @@ -20,8 +20,8 @@ Y_UNIT_TEST_SUITE(ProtobufSimpleReflection) { } Y_UNIT_TEST(MergeFromGeneric) { - const TSample src(GenSampleForMergeFrom()); - TSample dst; + const TSample src(GenSampleForMergeFrom()); + TSample dst; const Descriptor* descr = dst.GetDescriptor(); { @@ -52,8 +52,8 @@ Y_UNIT_TEST_SUITE(ProtobufSimpleReflection) { } Y_UNIT_TEST(MergeFromSelf) { - const TSample sample(GenSampleForMergeFrom()); - TSample msg(sample); + const TSample sample(GenSampleForMergeFrom()); + TSample msg(sample); const Descriptor* descr = msg.GetDescriptor(); TMutableField oneStr(msg, descr->FindFieldByName("OneStr")); @@ -66,8 +66,8 @@ Y_UNIT_TEST_SUITE(ProtobufSimpleReflection) { } Y_UNIT_TEST(MergeFromAnotherFD) { - const TSample sample(GenSampleForMergeFrom()); - TSample msg(GenSampleForMergeFrom()); + const TSample sample(GenSampleForMergeFrom()); + TSample msg(GenSampleForMergeFrom()); const Descriptor* descr = msg.GetDescriptor(); { // string @@ -95,205 +95,205 @@ Y_UNIT_TEST_SUITE(ProtobufSimpleReflection) { UNIT_ASSERT_VALUES_EQUAL(msg.RepMsgSize(), sample.RepMsgSize() + 1); } } - + Y_UNIT_TEST(RemoveByIndex) { - TSample msg; - - const Descriptor* descr = msg.GetDescriptor(); - { - TMutableField fld(msg, descr->FindFieldByName("RepMsg")); - msg.AddRepMsg()->AddRepInt(1); - msg.AddRepMsg()->AddRepInt(2); - msg.AddRepMsg()->AddRepInt(3); - - UNIT_ASSERT_VALUES_EQUAL(3, msg.RepMsgSize()); // 1, 2, 3 + TSample msg; + + const Descriptor* descr = msg.GetDescriptor(); + { + TMutableField fld(msg, descr->FindFieldByName("RepMsg")); + msg.AddRepMsg()->AddRepInt(1); + msg.AddRepMsg()->AddRepInt(2); + msg.AddRepMsg()->AddRepInt(3); + + UNIT_ASSERT_VALUES_EQUAL(3, msg.RepMsgSize()); // 1, 2, 3 fld.Remove(1); // from middle - UNIT_ASSERT_VALUES_EQUAL(2, msg.RepMsgSize()); - UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepMsg(0).GetRepInt(0)); - UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepMsg(1).GetRepInt(0)); - - msg.AddRepMsg()->AddRepInt(5); - UNIT_ASSERT_VALUES_EQUAL(3, msg.RepMsgSize()); // 1, 3, 5 + UNIT_ASSERT_VALUES_EQUAL(2, msg.RepMsgSize()); + UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepMsg(0).GetRepInt(0)); + UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepMsg(1).GetRepInt(0)); + + msg.AddRepMsg()->AddRepInt(5); + UNIT_ASSERT_VALUES_EQUAL(3, msg.RepMsgSize()); // 1, 3, 5 fld.Remove(2); // from end - UNIT_ASSERT_VALUES_EQUAL(2, msg.RepMsgSize()); - UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepMsg(0).GetRepInt(0)); - UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepMsg(1).GetRepInt(0)); - msg.ClearRepMsg(); - } - - { - TMutableField fld(msg, descr->FindFieldByName("RepStr")); - msg.AddRepStr("1"); - msg.AddRepStr("2"); - msg.AddRepStr("3"); - UNIT_ASSERT_VALUES_EQUAL(3, msg.RepStrSize()); // "1", "2", "3" + UNIT_ASSERT_VALUES_EQUAL(2, msg.RepMsgSize()); + UNIT_ASSERT_VALUES_EQUAL(1, msg.GetRepMsg(0).GetRepInt(0)); + UNIT_ASSERT_VALUES_EQUAL(3, msg.GetRepMsg(1).GetRepInt(0)); + msg.ClearRepMsg(); + } + + { + TMutableField fld(msg, descr->FindFieldByName("RepStr")); + msg.AddRepStr("1"); + msg.AddRepStr("2"); + msg.AddRepStr("3"); + UNIT_ASSERT_VALUES_EQUAL(3, msg.RepStrSize()); // "1", "2", "3" fld.Remove(0); // from begin - UNIT_ASSERT_VALUES_EQUAL(2, msg.RepStrSize()); - UNIT_ASSERT_VALUES_EQUAL("2", msg.GetRepStr(0)); - UNIT_ASSERT_VALUES_EQUAL("3", msg.GetRepStr(1)); - } - - { - TMutableField fld(msg, descr->FindFieldByName("OneStr")); - msg.SetOneStr("1"); - UNIT_ASSERT(msg.HasOneStr()); - fld.Remove(0); // not repeated - UNIT_ASSERT(!msg.HasOneStr()); - } - } - + UNIT_ASSERT_VALUES_EQUAL(2, msg.RepStrSize()); + UNIT_ASSERT_VALUES_EQUAL("2", msg.GetRepStr(0)); + UNIT_ASSERT_VALUES_EQUAL("3", msg.GetRepStr(1)); + } + + { + TMutableField fld(msg, descr->FindFieldByName("OneStr")); + msg.SetOneStr("1"); + UNIT_ASSERT(msg.HasOneStr()); + fld.Remove(0); // not repeated + UNIT_ASSERT(!msg.HasOneStr()); + } + } + Y_UNIT_TEST(GetFieldByPath) { - // Simple get by path - { - TSample msg; - msg.SetOneStr("1"); - msg.MutableOneMsg()->AddRepInt(2); - msg.MutableOneMsg()->AddRepInt(3); - msg.AddRepMsg()->AddRepInt(4); - msg.MutableRepMsg(0)->AddRepInt(5); - msg.AddRepMsg()->AddRepInt(6); - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "OneStr"); - UNIT_ASSERT(field); + // Simple get by path + { + TSample msg; + msg.SetOneStr("1"); + msg.MutableOneMsg()->AddRepInt(2); + msg.MutableOneMsg()->AddRepInt(3); + msg.AddRepMsg()->AddRepInt(4); + msg.MutableRepMsg(0)->AddRepInt(5); + msg.AddRepMsg()->AddRepInt(6); + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "OneStr"); + UNIT_ASSERT(field); UNIT_ASSERT(field->HasValue()); UNIT_ASSERT_VALUES_EQUAL("1", (field->Get<TString>())); - } - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "OneMsg"); - UNIT_ASSERT(field); + } + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "OneMsg"); + UNIT_ASSERT(field); UNIT_ASSERT(field->HasValue()); - UNIT_ASSERT(field->IsMessageInstance<TInnerSample>()); - } - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "/OneMsg/RepInt"); - UNIT_ASSERT(field); + UNIT_ASSERT(field->IsMessageInstance<TInnerSample>()); + } + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "/OneMsg/RepInt"); + UNIT_ASSERT(field); UNIT_ASSERT(field->HasValue()); - UNIT_ASSERT_VALUES_EQUAL(2, field->Size()); - UNIT_ASSERT_VALUES_EQUAL(2, field->Get<int>(0)); - UNIT_ASSERT_VALUES_EQUAL(3, field->Get<int>(1)); - } - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "RepMsg/RepInt"); - UNIT_ASSERT(field); + UNIT_ASSERT_VALUES_EQUAL(2, field->Size()); + UNIT_ASSERT_VALUES_EQUAL(2, field->Get<int>(0)); + UNIT_ASSERT_VALUES_EQUAL(3, field->Get<int>(1)); + } + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "RepMsg/RepInt"); + UNIT_ASSERT(field); UNIT_ASSERT(field->HasValue()); - UNIT_ASSERT_VALUES_EQUAL(2, field->Size()); - UNIT_ASSERT_VALUES_EQUAL(4, field->Get<int>(0)); - UNIT_ASSERT_VALUES_EQUAL(5, field->Get<int>(1)); - } - } - - // get of unset fields - { - TSample msg; - msg.MutableOneMsg(); - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "OneStr"); - UNIT_ASSERT(field); + UNIT_ASSERT_VALUES_EQUAL(2, field->Size()); + UNIT_ASSERT_VALUES_EQUAL(4, field->Get<int>(0)); + UNIT_ASSERT_VALUES_EQUAL(5, field->Get<int>(1)); + } + } + + // get of unset fields + { + TSample msg; + msg.MutableOneMsg(); + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "OneStr"); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); - } - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "OneMsg/RepInt"); - UNIT_ASSERT(field); + } + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "OneMsg/RepInt"); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); - } - - { - TMaybe<TConstField> field = TConstField::ByPath(msg, "RepMsg/RepInt"); - UNIT_ASSERT(!field); - } - } - - // mutable - { - TSample msg; - msg.MutableOneMsg(); - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneStr"); - UNIT_ASSERT(field); + } + + { + TMaybe<TConstField> field = TConstField::ByPath(msg, "RepMsg/RepInt"); + UNIT_ASSERT(!field); + } + } + + // mutable + { + TSample msg; + msg.MutableOneMsg(); + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneStr"); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); field->Set(TString("zz")); UNIT_ASSERT(field->HasValue()); - UNIT_ASSERT_VALUES_EQUAL("zz", msg.GetOneStr()); - } - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneStr"); - UNIT_ASSERT(field); + UNIT_ASSERT_VALUES_EQUAL("zz", msg.GetOneStr()); + } + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneStr"); + UNIT_ASSERT(field); UNIT_ASSERT(field->HasValue()); field->Set(TString("dd")); UNIT_ASSERT(field->HasValue()); - UNIT_ASSERT_VALUES_EQUAL("dd", msg.GetOneStr()); - } - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneMsg/RepInt"); - UNIT_ASSERT(field); + UNIT_ASSERT_VALUES_EQUAL("dd", msg.GetOneStr()); + } + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneMsg/RepInt"); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); - field->Add(10); - UNIT_ASSERT_VALUES_EQUAL(10, msg.GetOneMsg().GetRepInt(0)); - } - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "RepMsg/RepInt"); - UNIT_ASSERT(!field); - } - } - - // mutable with path creation - { - TSample msg; - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneStr", true); - UNIT_ASSERT(field); + field->Add(10); + UNIT_ASSERT_VALUES_EQUAL(10, msg.GetOneMsg().GetRepInt(0)); + } + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "RepMsg/RepInt"); + UNIT_ASSERT(!field); + } + } + + // mutable with path creation + { + TSample msg; + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneStr", true); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); - } - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneMsg/RepInt", true); - UNIT_ASSERT(field); + } + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "OneMsg/RepInt", true); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); - UNIT_ASSERT(msg.HasOneMsg()); - field->Add(10); - UNIT_ASSERT_VALUES_EQUAL(10, msg.GetOneMsg().GetRepInt(0)); - } - - { - TMaybe<TMutableField> field = TMutableField::ByPath(msg, "RepMsg/RepInt", true); - TMaybe<TMutableField> fieldCopy = TMutableField::ByPath(msg, "RepMsg/RepInt", true); + UNIT_ASSERT(msg.HasOneMsg()); + field->Add(10); + UNIT_ASSERT_VALUES_EQUAL(10, msg.GetOneMsg().GetRepInt(0)); + } + + { + TMaybe<TMutableField> field = TMutableField::ByPath(msg, "RepMsg/RepInt", true); + TMaybe<TMutableField> fieldCopy = TMutableField::ByPath(msg, "RepMsg/RepInt", true); Y_UNUSED(fieldCopy); - UNIT_ASSERT(field); + UNIT_ASSERT(field); UNIT_ASSERT(!field->HasValue()); - UNIT_ASSERT_VALUES_EQUAL(1, msg.RepMsgSize()); - field->Add(12); - UNIT_ASSERT_VALUES_EQUAL(12, field->Get<int>()); - } - } - - // error - { + UNIT_ASSERT_VALUES_EQUAL(1, msg.RepMsgSize()); + field->Add(12); + UNIT_ASSERT_VALUES_EQUAL(12, field->Get<int>()); + } + } + + // error + { {TSample msg; UNIT_ASSERT(!TConstField::ByPath(msg, "SomeField")); } - + { TSample msg; UNIT_ASSERT(!TMutableField::ByPath(msg, "SomeField/FieldSome")); } - + { TSample msg; UNIT_ASSERT(!TMutableField::ByPath(msg, "SomeField/FieldSome", true)); } } - + // extension { TSample msg; @@ -303,13 +303,13 @@ Y_UNIT_TEST_SUITE(ProtobufSimpleReflection) { TInnerSample* subMsg = msg.MutableExtension(NExt::SubMsgExt); subMsg->AddRepInt(20); subMsg->SetExtension(NExt::Ext3Field, 54); - + { TMaybe<TConstField> field = TConstField::ByPath(msg, "NExt.TTestExt.ExtField"); UNIT_ASSERT(field); UNIT_ASSERT(field->HasValue()); UNIT_ASSERT_VALUES_EQUAL("ext", field->Get<TString>()); - } + } { TMaybe<TConstField> field = TConstField::ByPath(msg, "NExt.ExtField"); UNIT_ASSERT(field); diff --git a/library/cpp/protobuf/util/traits.h b/library/cpp/protobuf/util/traits.h index 50f036d0ea4..01b49e4184c 100644 --- a/library/cpp/protobuf/util/traits.h +++ b/library/cpp/protobuf/util/traits.h @@ -177,7 +177,7 @@ namespace NProtoBuf { static inline T GetDefault(const FieldDescriptor* field) { return TBaseTraits::GetDefault(field); } - + static inline bool Has(const Message& msg, const FieldDescriptor* field) { return TBaseTraits::Has(msg, field); } diff --git a/library/cpp/protobuf/util/ut/extensions.proto b/library/cpp/protobuf/util/ut/extensions.proto index 4944f0f5ca7..0ef6a6fec2f 100644 --- a/library/cpp/protobuf/util/ut/extensions.proto +++ b/library/cpp/protobuf/util/ut/extensions.proto @@ -1,22 +1,22 @@ -package NExt; - +package NExt; + import "library/cpp/protobuf/util/ut/sample_for_simple_reflection.proto"; - -message TTestExt { - extend TSample { - optional string ExtField = 100; - } -} - -extend TSample { - optional uint64 ExtField = 150; // the same name, but another full name -} - -extend TSample { - repeated uint64 Ext2Field = 105; - optional TInnerSample SubMsgExt = 111; -} - -extend TInnerSample { - optional uint64 Ext3Field = 100; -} + +message TTestExt { + extend TSample { + optional string ExtField = 100; + } +} + +extend TSample { + optional uint64 ExtField = 150; // the same name, but another full name +} + +extend TSample { + repeated uint64 Ext2Field = 105; + optional TInnerSample SubMsgExt = 111; +} + +extend TInnerSample { + optional uint64 Ext3Field = 100; +} diff --git a/library/cpp/protobuf/util/ut/sample_for_simple_reflection.proto b/library/cpp/protobuf/util/ut/sample_for_simple_reflection.proto index cca1dd869ad..88e4f0f8776 100644 --- a/library/cpp/protobuf/util/ut/sample_for_simple_reflection.proto +++ b/library/cpp/protobuf/util/ut/sample_for_simple_reflection.proto @@ -1,16 +1,16 @@ -message TInnerSample { +message TInnerSample { repeated int32 RepInt = 1; - - extensions 100 to 199; + + extensions 100 to 199; } -message TSample { +message TSample { optional string OneStr = 1; - optional TInnerSample OneMsg = 2; - repeated TInnerSample RepMsg = 3; + optional TInnerSample OneMsg = 2; + repeated TInnerSample RepMsg = 3; repeated string RepStr = 4; optional string AnotherOneStr = 5; - + optional int32 OneInt = 6; repeated int32 RepInt = 7; @@ -21,5 +21,5 @@ message TSample { optional EEnum OneEnum = 8; repeated EEnum RepEnum = 9; - extensions 100 to 199; + extensions 100 to 199; } diff --git a/library/cpp/protobuf/util/ut/ya.make b/library/cpp/protobuf/util/ut/ya.make index 701ba9a8c85..dd850af6cbe 100644 --- a/library/cpp/protobuf/util/ut/ya.make +++ b/library/cpp/protobuf/util/ut/ya.make @@ -3,16 +3,16 @@ OWNER(nga) UNITTEST_FOR(library/cpp/protobuf/util) SRCS( - extensions.proto + extensions.proto sample_for_is_equal.proto sample_for_simple_reflection.proto common_ut.proto pb_io_ut.cpp - is_equal_ut.cpp + is_equal_ut.cpp iterators_ut.cpp - simple_reflection_ut.cpp - repeated_field_utils_ut.cpp - walk_ut.cpp + simple_reflection_ut.cpp + repeated_field_utils_ut.cpp + walk_ut.cpp merge_ut.cpp ) diff --git a/library/cpp/protobuf/util/walk.h b/library/cpp/protobuf/util/walk.h index d15d76562d3..f5559fd907d 100644 --- a/library/cpp/protobuf/util/walk.h +++ b/library/cpp/protobuf/util/walk.h @@ -30,4 +30,4 @@ namespace NProtoBuf { // Returned bool defines if we should walk down deeper to current node children (true), or not (false) void WalkSchema(const Descriptor* descriptor, std::function<bool(const FieldDescriptor*)> onField); -} +} diff --git a/library/cpp/protobuf/util/ya.make b/library/cpp/protobuf/util/ya.make index b62028af58b..6f0299b76bd 100644 --- a/library/cpp/protobuf/util/ya.make +++ b/library/cpp/protobuf/util/ya.make @@ -10,14 +10,14 @@ PEERDIR( ) SRCS( - is_equal.cpp + is_equal.cpp iterators.h merge.cpp - path.cpp - pb_io.cpp + path.cpp + pb_io.cpp pb_utils.h repeated_field_utils.h - simple_reflection.cpp + simple_reflection.cpp walk.cpp ) diff --git a/library/cpp/scheme/scheme.h b/library/cpp/scheme/scheme.h index 3d7c59f3c97..b9319c531e2 100644 --- a/library/cpp/scheme/scheme.h +++ b/library/cpp/scheme/scheme.h @@ -294,7 +294,7 @@ namespace NSc { static const EJsonOpts JO_SAFE = TJsonOpts::JO_SAFE; // JO_SORT_KEYS | JO_SKIP_UNSAFE static const EJsonOpts JO_PARSER_STRICT_WITH_COMMENTS = TJsonOpts::JO_PARSER_STRICT_WITH_COMMENTS; // strict json + strict utf8 static const EJsonOpts JO_PARSER_STRICT = TJsonOpts::JO_PARSER_STRICT; // strict json + strict utf8 + comments are disallowed - static const EJsonOpts JO_PARSER_DISALLOW_DUPLICATE_KEYS = TJsonOpts::JO_PARSER_DISALLOW_DUPLICATE_KEYS; + static const EJsonOpts JO_PARSER_DISALLOW_DUPLICATE_KEYS = TJsonOpts::JO_PARSER_DISALLOW_DUPLICATE_KEYS; static TValue FromJson(TStringBuf, const TJsonOpts& = TJsonOpts()); static TValue FromJsonThrow(TStringBuf, const TJsonOpts& = TJsonOpts()); diff --git a/library/cpp/scheme/scimpl.h b/library/cpp/scheme/scimpl.h index 4f68f16290f..7f0fc758ef1 100644 --- a/library/cpp/scheme/scimpl.h +++ b/library/cpp/scheme/scimpl.h @@ -280,13 +280,13 @@ namespace NSc { TDict::const_iterator it = Dict.find(key); return it != Dict.end() ? it->second : TValue::DefaultValue(); - } - + } + TValue* GetNoAdd(TStringBuf key) { if (!IsDict()) { return nullptr; } - + return Dict.FindPtr(key); } @@ -582,7 +582,7 @@ namespace NSc { TValue& TValue::Back() { return CoreMutable().Back(); } - + const TValue& TValue::Back() const { const TArray& arr = GetArray(); return arr.empty() ? DefaultValue() : arr.back(); diff --git a/library/cpp/scheme/scimpl_defs.h b/library/cpp/scheme/scimpl_defs.h index f3dd66b4379..2c02806fb92 100644 --- a/library/cpp/scheme/scimpl_defs.h +++ b/library/cpp/scheme/scimpl_defs.h @@ -74,7 +74,7 @@ namespace NSc { JO_PARSER_STRICT_JSON = 16, // strict standard json JO_PARSER_STRICT_UTF8 = 32, // strict utf8 JO_PARSER_DISALLOW_COMMENTS = 64, - JO_PARSER_DISALLOW_DUPLICATE_KEYS = 128, + JO_PARSER_DISALLOW_DUPLICATE_KEYS = 128, JO_PRETTY = JO_FORMAT | JO_SORT_KEYS, // pretty print json JO_SAFE = JO_SKIP_UNSAFE | JO_SORT_KEYS, // ensure standard parser-safe json @@ -85,8 +85,8 @@ namespace NSc { public: TJsonOpts(int opts = JO_SORT_KEYS) - : Opts(opts) - , SortKeys(opts & JO_SORT_KEYS) + : Opts(opts) + , SortKeys(opts & JO_SORT_KEYS) , FormatJson(opts & JO_FORMAT) , StringPolicy((opts & JO_SKIP_UNSAFE) ? StringPolicySafe : StringPolicyUnsafe) { @@ -97,7 +97,7 @@ namespace NSc { public: bool RelaxedJson = false; - int Opts = 0; + int Opts = 0; bool SortKeys = true; bool FormatJson = false; diff --git a/library/cpp/scheme/scimpl_json_read.cpp b/library/cpp/scheme/scimpl_json_read.cpp index 8a29cc77391..a0edc15d012 100644 --- a/library/cpp/scheme/scimpl_json_read.cpp +++ b/library/cpp/scheme/scimpl_json_read.cpp @@ -22,13 +22,13 @@ namespace NSc { { } - bool Add(TStringBuf v, bool allowDuplicated) { - if (!ExpectKey || Y_UNLIKELY(!Container->IsDict())) - return false; - - if (!allowDuplicated && Y_UNLIKELY(Container->Has(v))) + bool Add(TStringBuf v, bool allowDuplicated) { + if (!ExpectKey || Y_UNLIKELY(!Container->IsDict())) return false; + if (!allowDuplicated && Y_UNLIKELY(Container->Has(v))) + return false; + LastValue = &Container->GetOrAdd(v); ExpectKey = false; return true; @@ -62,16 +62,16 @@ namespace NSc { public: TValue& Root; TJsonError& Error; - const TJsonOpts& Cfg; + const TJsonOpts& Cfg; TStackType Stack; bool Virgin = true; public: - TJsonDeserializer(TValue& root, TJsonError& err, const TJsonOpts& cfg) + TJsonDeserializer(TValue& root, TJsonError& err, const TJsonOpts& cfg) : Root(root) , Error(err) - , Cfg(cfg) + , Cfg(cfg) { Root.SetNull(); Stack.reserve(10); @@ -143,7 +143,7 @@ namespace NSc { bool OnMapKey(const TStringBuf& k) override { if (Y_UNLIKELY(Stack.empty())) return false; - return Stack.back().Add(k, !(Cfg.Opts & TJsonOpts::JO_PARSER_DISALLOW_DUPLICATE_KEYS)); + return Stack.back().Add(k, !(Cfg.Opts & TJsonOpts::JO_PARSER_DISALLOW_DUPLICATE_KEYS)); } bool OnOpenMap() override { @@ -180,8 +180,8 @@ namespace NSc { } }; - static bool DoParseFromJson(TValue& res, TJsonError& err, TStringBuf json, const TJsonOpts& cfg) { - TJsonDeserializer d(res, err, cfg); + static bool DoParseFromJson(TValue& res, TJsonError& err, TStringBuf json, const TJsonOpts& cfg) { + TJsonDeserializer d(res, err, cfg); if (cfg.RelaxedJson) { return NJson::ReadJsonFast(json, &d); @@ -191,7 +191,7 @@ namespace NSc { } } - static bool DoParseFromJson(TValue& res, TStringBuf json, const TJsonOpts& cfg) { + static bool DoParseFromJson(TValue& res, TStringBuf json, const TJsonOpts& cfg) { TJsonError err; return DoParseFromJson(res, err, json, cfg); } diff --git a/library/cpp/scheme/tests/ut/scheme_json_ut.cpp b/library/cpp/scheme/tests/ut/scheme_json_ut.cpp index daeb2654f9a..37d635238e1 100644 --- a/library/cpp/scheme/tests/ut/scheme_json_ut.cpp +++ b/library/cpp/scheme/tests/ut/scheme_json_ut.cpp @@ -137,7 +137,7 @@ Y_UNIT_TEST_SUITE(TSchemeJsonTest) { NSc::TValue b = NSc::TValue::FromJsonValue(a.ToJsonValue()); UNIT_ASSERT_JSON_EQ_JSON(a, b); } - + Y_UNIT_TEST(TestJsonEmptyContainers) { { NSc::TValue a = NSc::NUt::AssertFromJson("{a:[]}"); @@ -153,9 +153,9 @@ Y_UNIT_TEST_SUITE(TSchemeJsonTest) { Y_UNIT_TEST(TestDuplicateKeys) { const TStringBuf duplicatedKeys = "{\"a\":[{\"b\":1, \"b\":42}]}"; - UNIT_ASSERT_NO_EXCEPTION(NSc::TValue::FromJsonThrow(duplicatedKeys)); - UNIT_ASSERT_EXCEPTION(NSc::TValue::FromJsonThrow(duplicatedKeys, NSc::TValue::JO_PARSER_DISALLOW_DUPLICATE_KEYS), yexception); - UNIT_ASSERT(NSc::TValue::FromJson(duplicatedKeys).IsDict()); - UNIT_ASSERT(NSc::TValue::FromJson(duplicatedKeys, NSc::TValue::JO_PARSER_DISALLOW_DUPLICATE_KEYS).IsNull()); - } + UNIT_ASSERT_NO_EXCEPTION(NSc::TValue::FromJsonThrow(duplicatedKeys)); + UNIT_ASSERT_EXCEPTION(NSc::TValue::FromJsonThrow(duplicatedKeys, NSc::TValue::JO_PARSER_DISALLOW_DUPLICATE_KEYS), yexception); + UNIT_ASSERT(NSc::TValue::FromJson(duplicatedKeys).IsDict()); + UNIT_ASSERT(NSc::TValue::FromJson(duplicatedKeys, NSc::TValue::JO_PARSER_DISALLOW_DUPLICATE_KEYS).IsNull()); + } }; diff --git a/library/cpp/scheme/tests/ut/scheme_ut.cpp b/library/cpp/scheme/tests/ut/scheme_ut.cpp index 1a5d07c31bc..20fe460948d 100644 --- a/library/cpp/scheme/tests/ut/scheme_ut.cpp +++ b/library/cpp/scheme/tests/ut/scheme_ut.cpp @@ -863,17 +863,17 @@ Y_UNIT_TEST_SUITE(TSchemeTest) { } Y_UNIT_TEST(TestGetNoAdd) { - NSc::TValue v = NSc::NUt::AssertFromJson("{a:[null,-1,2,3.4],b:3,c:{d:5}}"); + NSc::TValue v = NSc::NUt::AssertFromJson("{a:[null,-1,2,3.4],b:3,c:{d:5}}"); UNIT_ASSERT(v.GetNoAdd("a") != nullptr); UNIT_ASSERT(v.GetNoAdd("b") != nullptr); UNIT_ASSERT(v.GetNoAdd("c") != nullptr); UNIT_ASSERT(v.GetNoAdd("d") == nullptr); UNIT_ASSERT(v.GetNoAdd("value") == nullptr); - + NSc::TValue* child = v.GetNoAdd("c"); UNIT_ASSERT(child != nullptr); (*child)["e"]["f"] = 42; - const NSc::TValue expectedResult = NSc::NUt::AssertFromJson("{a:[null,-1,2,3.4],b:3,c:{d:5,e:{f:42}}}"); - UNIT_ASSERT_VALUES_EQUAL(v, expectedResult); - } + const NSc::TValue expectedResult = NSc::NUt::AssertFromJson("{a:[null,-1,2,3.4],b:3,c:{d:5,e:{f:42}}}"); + UNIT_ASSERT_VALUES_EQUAL(v, expectedResult); + } }; diff --git a/library/cpp/sliding_window/README.md b/library/cpp/sliding_window/README.md index 47692da7d57..b9c952ba8ec 100644 --- a/library/cpp/sliding_window/README.md +++ b/library/cpp/sliding_window/README.md @@ -1,30 +1,30 @@ -# TSlidingWindow - скользящее окно - +# TSlidingWindow - скользящее окно + [TSlidingWindow](/arc/trunk/arcadia/library/cpp/sliding_window/sliding_window.h) - класс скользящего окна, позволяющий поддерживать и обновлять определённое значение (максимум, сумму) в промежутке времени определённой длины. Разбивает общий временной промежуток на маленькие бакеты (число задаётся в конструкторе) и ротирует их, поддерживая значение за окно. Есть возможность также указать мьютекс или спинлок для синхронизации (по умолчанию TFakeMutex). Использование: - -``` -// Создаём окно, вычисляющее максимум за последние пять минут, поддерживая 50 бакетов со значениями. -TSlidingWindow<TMaxOperation<int>> window(TDuration::Minutes(5), 50); - -// Загружаем значения в различные моменты времени -window.Update(42, TInstant::Now()); - -... // делаем какую-то работу -int currentMaximum = window.Update(50, TInstant::Now()); - -... // делаем ещё что-то -int currentMaximum = window.Update(25, TInstant::Now()); - -... -// Просто получаем значение максимума за последние 5 минут -int currentMaximum = window.Update(TInstant::Now()); - -... -int currentMaximum = window.GetValue(); // получение значения без обновления времени -``` - -# Поддерживаемые функции - -* `TMaxOperation` - максимум -* `TMinOperation` - минимум -* `TSumOperation` - сумма + +``` +// Создаём окно, вычисляющее максимум за последние пять минут, поддерживая 50 бакетов со значениями. +TSlidingWindow<TMaxOperation<int>> window(TDuration::Minutes(5), 50); + +// Загружаем значения в различные моменты времени +window.Update(42, TInstant::Now()); + +... // делаем какую-то работу +int currentMaximum = window.Update(50, TInstant::Now()); + +... // делаем ещё что-то +int currentMaximum = window.Update(25, TInstant::Now()); + +... +// Просто получаем значение максимума за последние 5 минут +int currentMaximum = window.Update(TInstant::Now()); + +... +int currentMaximum = window.GetValue(); // получение значения без обновления времени +``` + +# Поддерживаемые функции + +* `TMaxOperation` - максимум +* `TMinOperation` - минимум +* `TSumOperation` - сумма diff --git a/library/cpp/sliding_window/sliding_window.h b/library/cpp/sliding_window/sliding_window.h index 180bdf93d0d..d140ec7f9c3 100644 --- a/library/cpp/sliding_window/sliding_window.h +++ b/library/cpp/sliding_window/sliding_window.h @@ -1,16 +1,16 @@ #pragma once -#include <util/datetime/base.h> -#include <util/generic/vector.h> -#include <util/system/guard.h> -#include <util/system/mutex.h> -#include <util/system/types.h> -#include <util/system/yassert.h> +#include <util/datetime/base.h> +#include <util/generic/vector.h> +#include <util/system/guard.h> +#include <util/system/mutex.h> +#include <util/system/types.h> +#include <util/system/yassert.h> -#include <functional> -#include <limits> +#include <functional> +#include <limits> -namespace NSlidingWindow { +namespace NSlidingWindow { namespace NPrivate { template <class TValueType_, class TCmp, TValueType_ initialValue> // std::less for max, std::greater for min struct TMinMaxOperationImpl { @@ -33,14 +33,14 @@ namespace NSlidingWindow { } } return windowValue; - } + } static TValueType ClearBuckets(TValueType windowValue, TValueVector& buckets, const size_t firstElemIndex, const size_t bucketsToClear) { Y_ASSERT(!buckets.empty()); Y_ASSERT(firstElemIndex < buckets.size()); Y_ASSERT(bucketsToClear <= buckets.size()); TCmp cmp; - + bool needRecalc = false; size_t current = firstElemIndex; const size_t arraySize = buckets.size(); @@ -51,7 +51,7 @@ namespace NSlidingWindow { } curVal = InitialValue(); current = (current + 1) % arraySize; - } + } if (needRecalc) { windowValue = InitialValue(); for (size_t i = 0; i < firstElemIndex; ++i) { @@ -66,28 +66,28 @@ namespace NSlidingWindow { windowValue = val; } } - } + } return windowValue; - } + } }; - } + } template <class TValueType> using TMaxOperation = NPrivate::TMinMaxOperationImpl<TValueType, std::less<TValueType>, std::numeric_limits<TValueType>::min()>; - + template <class TValueType> using TMinOperation = NPrivate::TMinMaxOperationImpl<TValueType, std::greater<TValueType>, std::numeric_limits<TValueType>::max()>; - + template <class TValueType_> struct TSumOperation { using TValueType = TValueType_; using TValueVector = TVector<TValueType>; - + static constexpr TValueType InitialValue() { return TValueType(); // zero } - + // Updates value in current bucket and returns window value static TValueType UpdateBucket(TValueType windowValue, TValueVector& buckets, size_t index, TValueType newVal) { Y_ASSERT(index < buckets.size()); @@ -95,12 +95,12 @@ namespace NSlidingWindow { windowValue += newVal; return windowValue; } - + static TValueType ClearBuckets(TValueType windowValue, TValueVector& buckets, size_t firstElemIndex, size_t bucketsToClear) { Y_ASSERT(!buckets.empty()); Y_ASSERT(firstElemIndex < buckets.size()); Y_ASSERT(bucketsToClear <= buckets.size()); - + const size_t arraySize = buckets.size(); for (size_t i = 0; i < bucketsToClear; ++i) { TValueType& curVal = buckets[firstElemIndex]; @@ -145,17 +145,17 @@ namespace NSlidingWindow { Length = w.Length; MicroSecondsPerBucket = w.MicroSecondsPerBucket; } - + TSlidingWindow(TSlidingWindow&&) = default; - + TSlidingWindow& operator=(TSlidingWindow&&) = default; TSlidingWindow& operator=(const TSlidingWindow&) = delete; - + // Period of time const TDuration& GetDuration() const { return Length; } - + // Update window with new value and time TValueType Update(TValueType val, TInstant t) { TGuard<TMutexImpl> guard(&Mutex); @@ -163,14 +163,14 @@ namespace NSlidingWindow { UpdateCurrentBucket(val); return WindowValue; } - + // Update just time, without new values TValueType Update(TInstant t) { TGuard<TMutexImpl> guard(&Mutex); AdvanceTime(t); return WindowValue; } - + // Get current window value (without updating current time) TValueType GetValue() const { TGuard<TMutexImpl> guard(&Mutex); @@ -182,7 +182,7 @@ namespace NSlidingWindow { const TSizeType arraySize = Buckets.size(); const TSizeType pos = (FirstElem + arraySize - 1) % arraySize; WindowValue = TOperation::UpdateBucket(WindowValue, Buckets, pos, val); - } + } void AdvanceTime(const TInstant& time) { if (time < PeriodStart + Length) { @@ -193,24 +193,24 @@ namespace NSlidingWindow { PeriodStart = time - Length; return; } - + const TInstant& newPeriodStart = time - Length; const ui64 tmDiff = (newPeriodStart - PeriodStart).MicroSeconds(); const TSizeType bucketsDiff = tmDiff / MicroSecondsPerBucket; const TSizeType arraySize = Buckets.size(); const TSizeType buckets = Min(bucketsDiff, arraySize); - + WindowValue = TOperation::ClearBuckets(WindowValue, Buckets, FirstElem, buckets); FirstElem = (FirstElem + buckets) % arraySize; PeriodStart += TDuration::MicroSeconds(bucketsDiff * MicroSecondsPerBucket); - + // Check that PeriodStart lags behind newPeriodStart // (which is actual, uptodate, precise and equal to time - Length) not more // then MicroSecondsPerBucket Y_ASSERT(newPeriodStart >= PeriodStart); Y_ASSERT((newPeriodStart - PeriodStart).MicroSeconds() <= MicroSecondsPerBucket); } - + mutable TMutexImpl Mutex; TValueVector Buckets; diff --git a/library/cpp/sliding_window/sliding_window_ut.cpp b/library/cpp/sliding_window/sliding_window_ut.cpp index 1e7343a8d3d..22814fadeba 100644 --- a/library/cpp/sliding_window/sliding_window_ut.cpp +++ b/library/cpp/sliding_window/sliding_window_ut.cpp @@ -2,131 +2,131 @@ #include <library/cpp/testing/unittest/registar.h> -using namespace NSlidingWindow; +using namespace NSlidingWindow; Y_UNIT_TEST_SUITE(TSlidingWindowTest) { Y_UNIT_TEST(TestSlidingWindowMax) { - TSlidingWindow<TMaxOperation<unsigned>> w(TDuration::Minutes(5), 5); + TSlidingWindow<TMaxOperation<unsigned>> w(TDuration::Minutes(5), 5); TInstant start = TInstant::MicroSeconds(TDuration::Hours(1).MicroSeconds()); - TInstant now = start; + TInstant now = start; w.Update(5, start); // ~ ~ ~ ~ 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1) + TDuration::Seconds(1); + now += TDuration::Minutes(1) + TDuration::Seconds(1); w.Update(5, now); // 5 ~ ~ ~ 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(3, now); // 5 3 ~ ~ 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(3); + now += TDuration::Minutes(3); w.Update(2, now); // 5 3 ~ ~ 2 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(2, now); // 2 3 ~ ~ 2 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 3); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(2, now); // 2 2 ~ ~ 2 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 2); // ^ - now += TDuration::Minutes(5); + now += TDuration::Minutes(5); w.Update(1, now); // ~ 1 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 1); // ^ - // update current bucket + // update current bucket w.Update(2, now); // ~ 2 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 2); // ^ - + w.Update(1, now + TDuration::Seconds(30)); // ~ 2 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 2); // ^ - - // test idle - now += TDuration::Minutes(1); + + // test idle + now += TDuration::Minutes(1); w.Update(now); // ~ 2 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 2); // ^ - + now += TDuration::Minutes(5); // ~ ~ ~ ~ ~ - UNIT_ASSERT_VALUES_EQUAL(w.Update(now), 0); + UNIT_ASSERT_VALUES_EQUAL(w.Update(now), 0); } Y_UNIT_TEST(TestSlidingWindowMin) { - TSlidingWindow<TMinOperation<unsigned>> w(TDuration::Minutes(5), 5); + TSlidingWindow<TMinOperation<unsigned>> w(TDuration::Minutes(5), 5); TInstant start = TInstant::MicroSeconds(TDuration::Hours(1).MicroSeconds()); - TInstant now = start; + TInstant now = start; w.Update(5, start); // ~ ~ ~ ~ 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1) + TDuration::Seconds(1); + now += TDuration::Minutes(1) + TDuration::Seconds(1); w.Update(5, now); // 5 ~ ~ ~ 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(7, now); // 5 7 ~ ~ 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(3); + now += TDuration::Minutes(3); w.Update(8, now); // 5 7 ~ ~ 8 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(8, now); // 8 7 ~ ~ 8 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 7); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(8, now); // 8 8 ~ ~ 8 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 8); // ^ - now += TDuration::Minutes(5); + now += TDuration::Minutes(5); w.Update(6, now); // ~ 6 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 6); // ^ - // update current bucket + // update current bucket w.Update(5, now); // ~ 5 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - + w.Update(6, now + TDuration::Seconds(30)); // ~ 5 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - - // test idle - now += TDuration::Minutes(1); + + // test idle + now += TDuration::Minutes(1); w.Update(now); // ~ 5 ~ ~ ~ UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - + now += TDuration::Minutes(5); // ~ ~ ~ ~ ~ - UNIT_ASSERT_VALUES_EQUAL(w.Update(now), std::numeric_limits<unsigned>::max()); - } - + UNIT_ASSERT_VALUES_EQUAL(w.Update(now), std::numeric_limits<unsigned>::max()); + } + Y_UNIT_TEST(TestSlidingWindowSum) { - TSlidingWindow<TSumOperation<unsigned>> w(TDuration::Minutes(5), 5); - UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 0); // current sum - + TSlidingWindow<TSumOperation<unsigned>> w(TDuration::Minutes(5), 5); + UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 0); // current sum + TInstant start = TInstant::MicroSeconds(TDuration::Hours(1).MicroSeconds()); - TInstant now = start; + TInstant now = start; w.Update(5, start); // 0 0 0 0 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 5); // ^ - now += TDuration::Minutes(1) + TDuration::Seconds(1); + now += TDuration::Minutes(1) + TDuration::Seconds(1); w.Update(5, now); // 5 0 0 0 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 10); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(3, now); // 5 3 0 0 5 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 13); // ^ - now += TDuration::Minutes(3); + now += TDuration::Minutes(3); w.Update(2, now); // 5 3 0 0 2 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 10); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(2, now); // 2 3 0 0 2 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 7); // ^ - now += TDuration::Minutes(1); + now += TDuration::Minutes(1); w.Update(2, now); // 2 2 0 0 2 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 6); // ^ - now += TDuration::Minutes(5); + now += TDuration::Minutes(5); w.Update(1, now); // 0 1 0 0 0 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 1); // ^ - - // update current bucket + + // update current bucket w.Update(2, now); // 0 3 0 0 0 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 3); // ^ - + w.Update(1, now + TDuration::Seconds(30)); // 0 4 0 0 0 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 4); // ^ - - // test idle - now += TDuration::Minutes(1); + + // test idle + now += TDuration::Minutes(1); w.Update(now); // 0 4 0 0 0 UNIT_ASSERT_VALUES_EQUAL(w.GetValue(), 4); // ^ - + now += TDuration::Minutes(5); // 0 0 0 0 0 - UNIT_ASSERT_VALUES_EQUAL(w.Update(now), 0); - } -} + UNIT_ASSERT_VALUES_EQUAL(w.Update(now), 0); + } +} diff --git a/library/cpp/sliding_window/ut/ya.make b/library/cpp/sliding_window/ut/ya.make index 3839a8dadcd..a82a24e5b72 100644 --- a/library/cpp/sliding_window/ut/ya.make +++ b/library/cpp/sliding_window/ut/ya.make @@ -1,9 +1,9 @@ UNITTEST_FOR(library/cpp/sliding_window) - -OWNER(g:kikimr) - -SRCS( - sliding_window_ut.cpp -) - -END() + +OWNER(g:kikimr) + +SRCS( + sliding_window_ut.cpp +) + +END() diff --git a/library/cpp/sliding_window/ya.make b/library/cpp/sliding_window/ya.make index 79aeaa06bb2..948ffdcb264 100644 --- a/library/cpp/sliding_window/ya.make +++ b/library/cpp/sliding_window/ya.make @@ -1,10 +1,10 @@ -LIBRARY() - -OWNER(g:kikimr) - -SRCS( - sliding_window.cpp - sliding_window.h -) - -END() +LIBRARY() + +OWNER(g:kikimr) + +SRCS( + sliding_window.cpp + sliding_window.h +) + +END() diff --git a/library/cpp/testing/common/env.h b/library/cpp/testing/common/env.h index 7b89aa1bed8..c1247ec96a4 100644 --- a/library/cpp/testing/common/env.h +++ b/library/cpp/testing/common/env.h @@ -23,16 +23,16 @@ TString BinaryPath(TStringBuf path); // @brief return true if environment is testenv otherwise false bool FromYaTest(); - + // @brief returns TestsData dir (from env:ARCADIA_TESTS_DATA_DIR or path to existing folder `arcadia_tests_data` within parent folders) TString GetArcadiaTestsData(); // @brief return current working dir (from env:TEST_WORK_PATH or cwd) TString GetWorkPath(); - + // @brief return tests output path (workdir + testing_out_stuff) TFsPath GetOutputPath(); - + // @brief return path from env:YA_TEST_RAM_DRIVE_PATH const TString& GetRamDrivePath(); diff --git a/library/cpp/testing/gmock_in_unittest/events.cpp b/library/cpp/testing/gmock_in_unittest/events.cpp index dbd65b727d7..2155fb0d81e 100644 --- a/library/cpp/testing/gmock_in_unittest/events.cpp +++ b/library/cpp/testing/gmock_in_unittest/events.cpp @@ -1,32 +1,32 @@ -#include "events.h" - +#include "events.h" + #include <library/cpp/testing/unittest/registar.h> - -#include <util/generic/strbuf.h> + +#include <util/generic/strbuf.h> #include <util/generic/string.h> -#include <util/string/builder.h> - -void TGMockTestEventListener::OnTestPartResult(const testing::TestPartResult& result) { - if (result.failed()) { +#include <util/string/builder.h> + +void TGMockTestEventListener::OnTestPartResult(const testing::TestPartResult& result) { + if (result.failed()) { const TString message = result.message(); const TString summary = result.summary(); - TStringBuilder msg; - if (result.file_name()) + TStringBuilder msg; + if (result.file_name()) msg << result.file_name() << TStringBuf(":"); - if (result.line_number() != -1) + if (result.line_number() != -1) msg << result.line_number() << TStringBuf(":"); - if (summary) { - if (msg) { + if (summary) { + if (msg) { msg << TStringBuf("\n"); - } - msg << summary; - } - if (message && summary != message) { - if (msg) { + } + msg << summary; + } + if (message && summary != message) { + if (msg) { msg << TStringBuf("\n"); - } - msg << message; - } - NUnitTest::NPrivate::RaiseError(result.summary(), msg, result.fatally_failed()); - } -} + } + msg << message; + } + NUnitTest::NPrivate::RaiseError(result.summary(), msg, result.fatally_failed()); + } +} diff --git a/library/cpp/testing/gmock_in_unittest/events.h b/library/cpp/testing/gmock_in_unittest/events.h index 84c10a93de7..11af7dd64cd 100644 --- a/library/cpp/testing/gmock_in_unittest/events.h +++ b/library/cpp/testing/gmock_in_unittest/events.h @@ -1,8 +1,8 @@ -#pragma once - -#include <gtest/gtest.h> - -class TGMockTestEventListener: public testing::EmptyTestEventListener { -public: - void OnTestPartResult(const testing::TestPartResult& result) override; -}; +#pragma once + +#include <gtest/gtest.h> + +class TGMockTestEventListener: public testing::EmptyTestEventListener { +public: + void OnTestPartResult(const testing::TestPartResult& result) override; +}; diff --git a/library/cpp/testing/gmock_in_unittest/example_ut/example_ut.cpp b/library/cpp/testing/gmock_in_unittest/example_ut/example_ut.cpp index 97f19050e42..a6c12b3e5aa 100644 --- a/library/cpp/testing/gmock_in_unittest/example_ut/example_ut.cpp +++ b/library/cpp/testing/gmock_in_unittest/example_ut/example_ut.cpp @@ -1,105 +1,105 @@ #include <library/cpp/testing/gmock_in_unittest/gmock.h> - + #include <library/cpp/testing/unittest/registar.h> - + #include <util/generic/string.h> - -// Set this variable to true if you want to see failures -///////////////////////////////////////////////////////// -static const bool fail = false; -///////////////////////////////////////////////////////// - -class ITestIface { -public: - virtual ~ITestIface() { - } - - virtual void Func1() = 0; - + +// Set this variable to true if you want to see failures +///////////////////////////////////////////////////////// +static const bool fail = false; +///////////////////////////////////////////////////////// + +class ITestIface { +public: + virtual ~ITestIface() { + } + + virtual void Func1() = 0; + virtual int Func2(const TString&) const = 0; -}; - -class TTestMock: public ITestIface { -public: +}; + +class TTestMock: public ITestIface { +public: MOCK_METHOD(void, Func1, (), (override)); MOCK_METHOD(int, Func2, (const TString&), (const, override)); -}; - -using namespace testing; - +}; + +using namespace testing; + Y_UNIT_TEST_SUITE(TExampleGMockTest) { Y_UNIT_TEST(TSimpleTest) { - TTestMock mock; - EXPECT_CALL(mock, Func1()) - .Times(AtLeast(1)); - - if (!fail) { - mock.Func1(); - } - } - + TTestMock mock; + EXPECT_CALL(mock, Func1()) + .Times(AtLeast(1)); + + if (!fail) { + mock.Func1(); + } + } + Y_UNIT_TEST(TNonExpectedCallTest) { - TTestMock mock; - EXPECT_CALL(mock, Func1()) - .Times(AtMost(1)); - mock.Func1(); - if (fail) { - mock.Func1(); - } - } - + TTestMock mock; + EXPECT_CALL(mock, Func1()) + .Times(AtMost(1)); + mock.Func1(); + if (fail) { + mock.Func1(); + } + } + Y_UNIT_TEST(TReturnValuesTest) { - TTestMock mock; + TTestMock mock; EXPECT_CALL(mock, Func2(TString("1"))) - .WillOnce(Return(1)) - .WillRepeatedly(Return(42)); - + .WillOnce(Return(1)) + .WillRepeatedly(Return(42)); + EXPECT_CALL(mock, Func2(TString("hello"))) - .WillOnce(Return(-1)); - - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("hello"), -1); - - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 1); - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); - - if (fail) { - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("hello"), -1); // expected to return -1 only once - } - } - + .WillOnce(Return(-1)); + + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("hello"), -1); + + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 1); + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("1"), 42); + + if (fail) { + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("hello"), -1); // expected to return -1 only once + } + } + Y_UNIT_TEST(TStrictCallSequenceTest) { - TTestMock mock; - { - InSequence seq; - EXPECT_CALL(mock, Func1()) - .Times(1); - EXPECT_CALL(mock, Func2(_)) - .Times(2) - .WillOnce(Return(1)) - .WillOnce(Return(2)); - EXPECT_CALL(mock, Func1()); - } - mock.Func1(); - UNIT_ASSERT_VALUES_EQUAL(mock.Func2("sample"), 1); - if (fail) { - mock.Func1(); - } - UNIT_ASSERT_VALUES_EQUAL(mock.Func2(""), 2); - if (!fail) { - mock.Func1(); - } - } - + TTestMock mock; + { + InSequence seq; + EXPECT_CALL(mock, Func1()) + .Times(1); + EXPECT_CALL(mock, Func2(_)) + .Times(2) + .WillOnce(Return(1)) + .WillOnce(Return(2)); + EXPECT_CALL(mock, Func1()); + } + mock.Func1(); + UNIT_ASSERT_VALUES_EQUAL(mock.Func2("sample"), 1); + if (fail) { + mock.Func1(); + } + UNIT_ASSERT_VALUES_EQUAL(mock.Func2(""), 2); + if (!fail) { + mock.Func1(); + } + } + Y_UNIT_TEST(TUninterestingMethodIsFailureTest) { - StrictMock<TTestMock> mock; - EXPECT_CALL(mock, Func1()) - .Times(1); - mock.Func1(); - if (fail) { - mock.Func1(); - } - } -} + StrictMock<TTestMock> mock; + EXPECT_CALL(mock, Func1()) + .Times(1); + mock.Func1(); + if (fail) { + mock.Func1(); + } + } +} diff --git a/library/cpp/testing/gmock_in_unittest/example_ut/ya.make b/library/cpp/testing/gmock_in_unittest/example_ut/ya.make index d2e5ee5d2a6..81c9ee3fcaa 100644 --- a/library/cpp/testing/gmock_in_unittest/example_ut/ya.make +++ b/library/cpp/testing/gmock_in_unittest/example_ut/ya.make @@ -1,13 +1,13 @@ -UNITTEST() - -OWNER(galaxycrab) - -PEERDIR( +UNITTEST() + +OWNER(galaxycrab) + +PEERDIR( library/cpp/testing/gmock_in_unittest -) - -SRCS( - example_ut.cpp -) - -END() +) + +SRCS( + example_ut.cpp +) + +END() diff --git a/library/cpp/testing/gmock_in_unittest/gmock.h b/library/cpp/testing/gmock_in_unittest/gmock.h index 31f6aee1c3d..65bfc5a5ed8 100644 --- a/library/cpp/testing/gmock_in_unittest/gmock.h +++ b/library/cpp/testing/gmock_in_unittest/gmock.h @@ -1,5 +1,5 @@ -#pragma once +#pragma once #include <library/cpp/testing/gtest_extensions/gtest_extensions.h> - -#include <gmock/gmock.h> + +#include <gmock/gmock.h> diff --git a/library/cpp/testing/gmock_in_unittest/registration.cpp b/library/cpp/testing/gmock_in_unittest/registration.cpp index c2872a4c27e..f6b9e516d1f 100644 --- a/library/cpp/testing/gmock_in_unittest/registration.cpp +++ b/library/cpp/testing/gmock_in_unittest/registration.cpp @@ -1,20 +1,20 @@ -#include "events.h" - -#include <gmock/gmock.h> - +#include "events.h" + +#include <gmock/gmock.h> + #include <library/cpp/testing/unittest/plugin.h> - -namespace { - class TGMockUnittestPlugin: public NUnitTest::NPlugin::IPlugin { - public: - void OnStartMain(int argc, char* argv[]) override { - testing::InitGoogleMock(&argc, argv); - testing::TestEventListeners& listeners = testing::UnitTest::GetInstance()->listeners(); - delete listeners.Release(listeners.default_result_printer()); - listeners.Append(new TGMockTestEventListener()); - } - }; - - NUnitTest::NPlugin::TPluginRegistrator registerGMock(new TGMockUnittestPlugin()); - + +namespace { + class TGMockUnittestPlugin: public NUnitTest::NPlugin::IPlugin { + public: + void OnStartMain(int argc, char* argv[]) override { + testing::InitGoogleMock(&argc, argv); + testing::TestEventListeners& listeners = testing::UnitTest::GetInstance()->listeners(); + delete listeners.Release(listeners.default_result_printer()); + listeners.Append(new TGMockTestEventListener()); + } + }; + + NUnitTest::NPlugin::TPluginRegistrator registerGMock(new TGMockUnittestPlugin()); + } diff --git a/library/cpp/testing/gmock_in_unittest/ya.make b/library/cpp/testing/gmock_in_unittest/ya.make index 5de68ad98dd..c8a6e33396b 100644 --- a/library/cpp/testing/gmock_in_unittest/ya.make +++ b/library/cpp/testing/gmock_in_unittest/ya.make @@ -1,17 +1,17 @@ -LIBRARY() - -OWNER(galaxycrab) - -PEERDIR( +LIBRARY() + +OWNER(galaxycrab) + +PEERDIR( contrib/restricted/googletest/googlemock contrib/restricted/googletest/googletest library/cpp/testing/gtest_extensions library/cpp/testing/unittest -) - -SRCS( - events.cpp - GLOBAL registration.cpp -) - -END() +) + +SRCS( + events.cpp + GLOBAL registration.cpp +) + +END() diff --git a/library/cpp/testing/unittest/plugin.cpp b/library/cpp/testing/unittest/plugin.cpp index 543112f7ac6..fd80eb48ac3 100644 --- a/library/cpp/testing/unittest/plugin.cpp +++ b/library/cpp/testing/unittest/plugin.cpp @@ -1,50 +1,50 @@ -#include "plugin.h" - -#include <util/generic/singleton.h> -#include <util/generic/vector.h> -#include <util/generic/utility.h> - -namespace NUnitTest { - namespace NPlugin { - namespace { - class TPlugins { - public: - void OnStartMain(int argc, char* argv[]) const { - for (const auto& plugin : Plugins) { - plugin->OnStartMain(argc, argv); - } - } - - void OnStopMain(int argc, char* argv[]) const { - for (const auto& plugin : Plugins) { - plugin->OnStopMain(argc, argv); - } - } - - void Register(TSimpleSharedPtr<IPlugin> plugin) { - Plugins.emplace_back(std::move(plugin)); - } - - static TPlugins& Instance() { - return *Singleton<TPlugins>(); - } - - private: +#include "plugin.h" + +#include <util/generic/singleton.h> +#include <util/generic/vector.h> +#include <util/generic/utility.h> + +namespace NUnitTest { + namespace NPlugin { + namespace { + class TPlugins { + public: + void OnStartMain(int argc, char* argv[]) const { + for (const auto& plugin : Plugins) { + plugin->OnStartMain(argc, argv); + } + } + + void OnStopMain(int argc, char* argv[]) const { + for (const auto& plugin : Plugins) { + plugin->OnStopMain(argc, argv); + } + } + + void Register(TSimpleSharedPtr<IPlugin> plugin) { + Plugins.emplace_back(std::move(plugin)); + } + + static TPlugins& Instance() { + return *Singleton<TPlugins>(); + } + + private: TVector<TSimpleSharedPtr<IPlugin>> Plugins; - }; - } // anonymous namespace - - TPluginRegistrator::TPluginRegistrator(TSimpleSharedPtr<IPlugin> plugin) { - TPlugins::Instance().Register(std::move(plugin)); - } - - void OnStartMain(int argc, char* argv[]) { - TPlugins::Instance().OnStartMain(argc, argv); - } - - void OnStopMain(int argc, char* argv[]) { - TPlugins::Instance().OnStopMain(argc, argv); - } - + }; + } // anonymous namespace + + TPluginRegistrator::TPluginRegistrator(TSimpleSharedPtr<IPlugin> plugin) { + TPlugins::Instance().Register(std::move(plugin)); + } + + void OnStartMain(int argc, char* argv[]) { + TPlugins::Instance().OnStartMain(argc, argv); + } + + void OnStopMain(int argc, char* argv[]) { + TPlugins::Instance().OnStopMain(argc, argv); + } + } } diff --git a/library/cpp/testing/unittest/plugin.h b/library/cpp/testing/unittest/plugin.h index 102f2c1469e..7cce973c787 100644 --- a/library/cpp/testing/unittest/plugin.h +++ b/library/cpp/testing/unittest/plugin.h @@ -1,29 +1,29 @@ -#pragma once - -#include <util/generic/ptr.h> - -namespace NUnitTest { +#pragma once + +#include <util/generic/ptr.h> + +namespace NUnitTest { // Plugins are deprecated, please use Y_TEST_HOOK_* from library/cpp/hook/hook.h - namespace NPlugin { - class IPlugin { - public: - virtual ~IPlugin() { - } - - virtual void OnStartMain(int /*argc*/, char* /*argv*/ []) { - } - - virtual void OnStopMain(int /*argc*/, char* /*argv*/ []) { - } - }; - - void OnStartMain(int argc, char* argv[]); - void OnStopMain(int argc, char* argv[]); - - class TPluginRegistrator { - public: - TPluginRegistrator(TSimpleSharedPtr<IPlugin> plugin); - }; - + namespace NPlugin { + class IPlugin { + public: + virtual ~IPlugin() { + } + + virtual void OnStartMain(int /*argc*/, char* /*argv*/ []) { + } + + virtual void OnStopMain(int /*argc*/, char* /*argv*/ []) { + } + }; + + void OnStartMain(int argc, char* argv[]); + void OnStopMain(int argc, char* argv[]); + + class TPluginRegistrator { + public: + TPluginRegistrator(TSimpleSharedPtr<IPlugin> plugin); + }; + } } diff --git a/library/cpp/testing/unittest/registar.cpp b/library/cpp/testing/unittest/registar.cpp index 3679b768ed4..fea174a65e6 100644 --- a/library/cpp/testing/unittest/registar.cpp +++ b/library/cpp/testing/unittest/registar.cpp @@ -1,13 +1,13 @@ -#include "registar.h" - +#include "registar.h" + #include <library/cpp/diff/diff.h> #include <library/cpp/colorizer/colors.h> -#include <util/generic/bt_exception.h> -#include <util/random/fast.h> +#include <util/generic/bt_exception.h> +#include <util/random/fast.h> #include <util/string/printf.h> -#include <util/system/backtrace.h> -#include <util/system/guard.h> +#include <util/system/backtrace.h> +#include <util/system/guard.h> #include <util/system/tls.h> #include <util/system/error.h> #include <util/string/cast.h> @@ -32,46 +32,46 @@ Y_POD_STATIC_THREAD(bool) UnittestThread; Y_POD_STATIC_THREAD(NUnitTest::TTestBase*) currentTest; -::NUnitTest::TRaiseErrorHandler RaiseErrorHandler; +::NUnitTest::TRaiseErrorHandler RaiseErrorHandler; void ::NUnitTest::NPrivate::RaiseError(const char* what, const TString& msg, bool fatalFailure) { Y_VERIFY(UnittestThread, "%s in non-unittest thread with message:\n%s", what, msg.data()); - Y_VERIFY(GetCurrentTest()); - - if (RaiseErrorHandler) { - RaiseErrorHandler(what, msg, fatalFailure); - return; - } - - // Default handler - TBackTrace bt; - bt.Capture(); + Y_VERIFY(GetCurrentTest()); + + if (RaiseErrorHandler) { + RaiseErrorHandler(what, msg, fatalFailure); + return; + } + + // Default handler + TBackTrace bt; + bt.Capture(); GetCurrentTest()->AddError(msg.data(), bt.PrintToString()); - if (::NUnitTest::ContinueOnFail || !fatalFailure) { - return; - } + if (::NUnitTest::ContinueOnFail || !fatalFailure) { + return; + } throw TAssertException(); } -void ::NUnitTest::SetRaiseErrorHandler(::NUnitTest::TRaiseErrorHandler handler) { - Y_VERIFY(UnittestThread); - RaiseErrorHandler = std::move(handler); -} - +void ::NUnitTest::SetRaiseErrorHandler(::NUnitTest::TRaiseErrorHandler handler) { + Y_VERIFY(UnittestThread); + RaiseErrorHandler = std::move(handler); +} + void ::NUnitTest::NPrivate::SetUnittestThread(bool unittestThread) { Y_VERIFY(UnittestThread != unittestThread, "state check"); UnittestThread = unittestThread; } -void ::NUnitTest::NPrivate::SetCurrentTest(TTestBase* test) { - Y_VERIFY(!test || !currentTest, "state check"); - currentTest = test; -} - -NUnitTest::TTestBase* ::NUnitTest::NPrivate::GetCurrentTest() { - return currentTest; -} - +void ::NUnitTest::NPrivate::SetCurrentTest(TTestBase* test) { + Y_VERIFY(!test || !currentTest, "state check"); + currentTest = test; +} + +NUnitTest::TTestBase* ::NUnitTest::NPrivate::GetCurrentTest() { + return currentTest; +} + struct TDiffColorizer { NColorizer::TColors Colors; bool Reverse = false; @@ -321,12 +321,12 @@ void NUnitTest::TTestBase::AddError(const char* msg, TTestContext* context) { AddError(msg, TString(), context); } -void NUnitTest::TTestBase::RunAfterTest(std::function<void()> f) { - with_lock (AfterTestFunctionsLock_) { - AfterTestFunctions_.emplace_back(std::move(f)); - } -} - +void NUnitTest::TTestBase::RunAfterTest(std::function<void()> f) { + with_lock (AfterTestFunctionsLock_) { + AfterTestFunctions_.emplace_back(std::move(f)); + } +} + bool NUnitTest::TTestBase::CheckAccessTest(const char* test) { return Processor()->CheckAccessTest(Name(), test); } @@ -376,18 +376,18 @@ void NUnitTest::TTestBase::BeforeTest() { void NUnitTest::TTestBase::AfterTest() { TearDown(); - - TVector<std::function<void()>> afterTestFunctions; - with_lock (AfterTestFunctionsLock_) { - afterTestFunctions.swap(AfterTestFunctions_); - } - - for (auto i = afterTestFunctions.rbegin(); i != afterTestFunctions.rend(); ++i) { - std::function<void()>& f = *i; - if (f) { - f(); - } - } + + TVector<std::function<void()>> afterTestFunctions; + with_lock (AfterTestFunctionsLock_) { + afterTestFunctions.swap(AfterTestFunctions_); + } + + for (auto i = afterTestFunctions.rbegin(); i != afterTestFunctions.rend(); ++i) { + std::function<void()>& f = *i; + if (f) { + f(); + } + } } bool NUnitTest::TTestBase::GetIsForked() const { diff --git a/library/cpp/testing/unittest/registar.h b/library/cpp/testing/unittest/registar.h index 44517a00924..be38300e1ef 100644 --- a/library/cpp/testing/unittest/registar.h +++ b/library/cpp/testing/unittest/registar.h @@ -2,41 +2,41 @@ #include <library/cpp/dbg_output/dump.h> -#include <util/generic/bt_exception.h> +#include <util/generic/bt_exception.h> #include <util/generic/hash.h> -#include <util/generic/intrlist.h> +#include <util/generic/intrlist.h> #include <util/generic/map.h> #include <util/generic/ptr.h> #include <util/generic/set.h> #include <util/generic/typetraits.h> #include <util/generic/vector.h> -#include <util/generic/yexception.h> +#include <util/generic/yexception.h> #include <util/string/builder.h> #include <util/string/cast.h> #include <util/string/printf.h> -#include <util/system/defaults.h> +#include <util/system/defaults.h> #include <util/system/type_name.h> -#include <util/system/spinlock.h> -#include <util/system/src_location.h> - +#include <util/system/spinlock.h> +#include <util/system/src_location.h> + #include <util/system/rusage.h> #include <cmath> #include <cstdio> -#include <functional> - +#include <functional> + extern bool CheckExceptionMessage(const char*, TString&); namespace NUnitTest { class TTestBase; - + namespace NPrivate { void RaiseError(const char* what, const TString& msg, bool fatalFailure); void SetUnittestThread(bool); - void SetCurrentTest(TTestBase*); - TTestBase* GetCurrentTest(); + void SetCurrentTest(TTestBase*); + TTestBase* GetCurrentTest(); } extern bool ShouldColorizeDiff; @@ -45,21 +45,21 @@ namespace NUnitTest { TString GetFormatTag(const char* name); TString GetResetTag(); - // Raise error handler + // Raise error handler // Used for testing library/cpp/testing/unittest macroses - // and unittest helpers. - // For all other unittests standard handler is used - using TRaiseErrorHandler = std::function<void(const char*, const TString&, bool)>; - - void SetRaiseErrorHandler(TRaiseErrorHandler handler); - - inline void ClearRaiseErrorHandler() { - SetRaiseErrorHandler(TRaiseErrorHandler()); - } - - class TAssertException: public yexception { - }; - + // and unittest helpers. + // For all other unittests standard handler is used + using TRaiseErrorHandler = std::function<void(const char*, const TString&, bool)>; + + void SetRaiseErrorHandler(TRaiseErrorHandler handler); + + inline void ClearRaiseErrorHandler() { + SetRaiseErrorHandler(TRaiseErrorHandler()); + } + + class TAssertException: public yexception { + }; + class ITestSuiteProcessor; struct TTestContext { @@ -97,10 +97,10 @@ namespace NUnitTest { TTestContext* Context; }; - struct TFinish { + struct TFinish { const TTest* test; TTestContext* Context; - bool Success; + bool Success; }; ITestSuiteProcessor(); @@ -159,9 +159,9 @@ namespace NUnitTest { virtual void OnBeforeTest(const TTest* /*test*/); void AddTestError(const TTest& test); - + void AddTestFinish(const TTest& test); - + private: TMap<TString, size_t> TestErrors_; TMap<TString, size_t> CurTestErrors_; @@ -203,11 +203,11 @@ namespace NUnitTest { virtual void TearDown(); void AddError(const char* msg, const TString& backtrace = TString(), TTestContext* context = nullptr); - + void AddError(const char* msg, TTestContext* context); - - void RunAfterTest(std::function<void()> f); // function like atexit to run after current unit test - + + void RunAfterTest(std::function<void()> f); // function like atexit to run after current unit test + protected: bool CheckAccessTest(const char* test); @@ -243,10 +243,10 @@ namespace NUnitTest { private: TTestFactory* Parent_; - size_t TestErrors_; - const char* CurrentSubtest_; - TAdaptiveLock AfterTestFunctionsLock_; - TVector<std::function<void()>> AfterTestFunctions_; + size_t TestErrors_; + const char* CurrentSubtest_; + TAdaptiveLock AfterTestFunctionsLock_; + TVector<std::function<void()>> AfterTestFunctions_; }; #define UNIT_TEST_SUITE(N) \ @@ -304,8 +304,8 @@ private: \ thiz->F(); \ } \ }; \ - this->TTestBase::Run(std::bind(&T##F##Caller::X, this, context), StaticName(), (#F), FF); \ - } + this->TTestBase::Run(std::bind(&T##F##Caller::X, this, context), StaticName(), (#F), FF); \ + } #define UNIT_TEST_IMPL(F, FF) \ UNIT_TEST_CHECK_TEST_IS_DECLARED_ONLY_ONCE(F) { \ @@ -336,20 +336,20 @@ private: \ /* forked process (or main without "--fork-tests") treats some exceptions as success - it's exception test! */ \ } else { \ NUnitTest::TTestContext context(this->TTestBase::Processor()); \ - if (this->CheckAccessTest((#F))) { \ - try { \ - UNIT_TEST_RUN(F, false, context) \ - this->AddError("exception expected", &context); \ - } catch (const ::NUnitTest::TAssertException&) { \ - } catch (const E& e) { \ + if (this->CheckAccessTest((#F))) { \ + try { \ + UNIT_TEST_RUN(F, false, context) \ + this->AddError("exception expected", &context); \ + } catch (const ::NUnitTest::TAssertException&) { \ + } catch (const E& e) { \ TString err; \ - if (!CheckExceptionMessage(e.what(), err)) \ - this->AddError(err.c_str(), &context); \ - } catch (const std::exception& e) { \ - this->AddError(e.what(), &context); \ - } catch (...) { \ - this->AddError("non-std exception!", &context); \ - } \ + if (!CheckExceptionMessage(e.what(), err)) \ + this->AddError(err.c_str(), &context); \ + } catch (const std::exception& e) { \ + this->AddError(e.what(), &context); \ + } catch (...) { \ + this->AddError("non-std exception!", &context); \ + } \ this->Finish((#F), &context); \ } \ } @@ -366,13 +366,13 @@ public: \ ::NUnitTest::NPrivate::RaiseError(R, ::TStringBuilder() << R << " at " << __LOCATION__ << ", " << __PRETTY_FUNCTION__ << ": " << M, true); \ } while (false) -#define UNIT_FAIL_NONFATAL_IMPL(R, M) \ - do { \ +#define UNIT_FAIL_NONFATAL_IMPL(R, M) \ + do { \ ::NUnitTest::NPrivate::RaiseError(R, ::TStringBuilder() << R << " at " << __LOCATION__ << ", " << __PRETTY_FUNCTION__ << ": " << M, false); \ - } while (false) - + } while (false) + #define UNIT_FAIL(M) UNIT_FAIL_IMPL("forced failure", M) -#define UNIT_FAIL_NONFATAL(M) UNIT_FAIL_NONFATAL_IMPL("forced failure", M) +#define UNIT_FAIL_NONFATAL(M) UNIT_FAIL_NONFATAL_IMPL("forced failure", M) //types #define UNIT_ASSERT_TYPES_EQUAL(A, B) \ @@ -536,20 +536,20 @@ public: \ #define UNIT_ASSERT_GE(A, B) UNIT_ASSERT_GE_C(A, B, "") -#define UNIT_CHECK_GENERATED_EXCEPTION_C(A, E, C) \ +#define UNIT_CHECK_GENERATED_EXCEPTION_C(A, E, C) \ do { \ - try { \ - (void)(A); \ - } catch (const ::NUnitTest::TAssertException&) { \ - throw; \ - } catch (const E&) { \ - break; \ - } \ - UNIT_ASSERT_C(0, "Exception hasn't been thrown, but it should have happened " << C); \ + try { \ + (void)(A); \ + } catch (const ::NUnitTest::TAssertException&) { \ + throw; \ + } catch (const E&) { \ + break; \ + } \ + UNIT_ASSERT_C(0, "Exception hasn't been thrown, but it should have happened " << C); \ } while (false) -#define UNIT_CHECK_GENERATED_EXCEPTION(A, E) UNIT_CHECK_GENERATED_EXCEPTION_C(A, E, "") - +#define UNIT_CHECK_GENERATED_EXCEPTION(A, E) UNIT_CHECK_GENERATED_EXCEPTION_C(A, E, "") + #define UNIT_CHECK_GENERATED_NO_EXCEPTION_C(A, E, C) \ do { \ try { \ @@ -562,7 +562,7 @@ public: \ } while (false) #define UNIT_CHECK_GENERATED_NO_EXCEPTION(A, E) UNIT_CHECK_GENERATED_NO_EXCEPTION_C(A, E, "and exception message is:\n" << CurrentExceptionMessage()) - + // Same as UNIT_ASSERT_EXCEPTION_SATISFIES but prints additional string C when nothing was thrown #define UNIT_ASSERT_EXCEPTION_SATISFIES_C(A, E, pred, C) \ do { \ @@ -588,7 +588,7 @@ public: \ #A << " did not throw any exception" \ << " (expected " << #E << ") " << C); \ } \ - } while (false) + } while (false) // Assert that a specific exception is thrown and satisfies predicate pred(e), where e is the exception instance. // Example: @@ -618,15 +618,15 @@ public: \ } while (false) // Assert that a specific exception is thrown and CurrentExceptionMessage() contains substr -#define UNIT_ASSERT_EXCEPTION_CONTAINS(A, E, substr) \ - UNIT_ASSERT_EXCEPTION_CONTAINS_C(A, E, substr, "") - +#define UNIT_ASSERT_EXCEPTION_CONTAINS(A, E, substr) \ + UNIT_ASSERT_EXCEPTION_CONTAINS_C(A, E, substr, "") + // Same as UNIT_ASSERT_EXCEPTION but prints additional string C when nothing was thrown #define UNIT_ASSERT_EXCEPTION_C(A, E, C) UNIT_ASSERT_EXCEPTION_SATISFIES_C(A, E, [](const E&){ return true; }, C) - + // Assert that a specific exception is thrown -#define UNIT_ASSERT_EXCEPTION(A, E) UNIT_ASSERT_EXCEPTION_C(A, E, "") - +#define UNIT_ASSERT_EXCEPTION(A, E) UNIT_ASSERT_EXCEPTION_C(A, E, "") + #define UNIT_ASSERT_NO_EXCEPTION_RESULT_C(A, C) \ [&] () mutable -> decltype(A) { \ static_assert(!std::is_void_v<decltype(A)>); \ @@ -653,10 +653,10 @@ public: \ } catch (...) { \ UNIT_FAIL_IMPL("exception-free assertion failed", Sprintf("%s throws %s\nException message: %s", #A, (::TStringBuilder() << C).data(), CurrentExceptionMessage().data())); \ } \ - } while (false) - -#define UNIT_ASSERT_NO_EXCEPTION(A) UNIT_ASSERT_NO_EXCEPTION_C(A, "") + } while (false) +#define UNIT_ASSERT_NO_EXCEPTION(A) UNIT_ASSERT_NO_EXCEPTION_C(A, "") + namespace NPrivate { template <class T, class U, bool Integers> struct TCompareValuesImpl { @@ -719,7 +719,7 @@ public: \ } \ UNIT_FAIL_IMPL("assertion failed", failMsg); \ } \ - } while (false) + } while (false) #define UNIT_ASSERT_VALUES_EQUAL_C(A, B, C) \ UNIT_ASSERT_VALUES_EQUAL_IMPL(A, B, C, true, "==", "!=") @@ -730,28 +730,28 @@ public: \ #define UNIT_ASSERT_VALUES_EQUAL(A, B) UNIT_ASSERT_VALUES_EQUAL_C(A, B, "") #define UNIT_ASSERT_VALUES_UNEQUAL(A, B) UNIT_ASSERT_VALUES_UNEQUAL_C(A, B, "") -// Checks that test will fail while executing given expression -// Macro for using in unitests for ut helpers -#define UNIT_ASSERT_TEST_FAILS_C(A, C) \ - do { \ - ::NUnitTest::TUnitTestFailChecker checker; \ - try { \ - auto guard = checker.InvokeGuard(); \ - (void)(A); \ - } catch (...) { \ - UNIT_FAIL_IMPL("fail test assertion failure", \ - "code is expected to generate test failure, " \ - "but it throws exception with message: " \ +// Checks that test will fail while executing given expression +// Macro for using in unitests for ut helpers +#define UNIT_ASSERT_TEST_FAILS_C(A, C) \ + do { \ + ::NUnitTest::TUnitTestFailChecker checker; \ + try { \ + auto guard = checker.InvokeGuard(); \ + (void)(A); \ + } catch (...) { \ + UNIT_FAIL_IMPL("fail test assertion failure", \ + "code is expected to generate test failure, " \ + "but it throws exception with message: " \ << CurrentExceptionMessage()); \ - } \ - if (!checker.Failed()) { \ - UNIT_FAIL_IMPL("fail test assertion failure", \ - "code is expected to generate test failure"); \ - } \ - } while (false) - -#define UNIT_ASSERT_TEST_FAILS(A) UNIT_ASSERT_TEST_FAILS_C(A, "") - + } \ + if (!checker.Failed()) { \ + UNIT_FAIL_IMPL("fail test assertion failure", \ + "code is expected to generate test failure"); \ + } \ + } while (false) + +#define UNIT_ASSERT_TEST_FAILS(A) UNIT_ASSERT_TEST_FAILS_C(A, "") + #define UNIT_ADD_METRIC(name, value) ut_context.Metrics[name] = value class TTestFactory { @@ -840,77 +840,77 @@ public: \ using TBaseFixture = TBaseTestCase; - // Class for checking that code raises unittest failure - class TUnitTestFailChecker { - public: - struct TInvokeGuard { - explicit TInvokeGuard(TUnitTestFailChecker& parent) - : Parent(&parent) - { - Parent->SetHandler(); - } - + // Class for checking that code raises unittest failure + class TUnitTestFailChecker { + public: + struct TInvokeGuard { + explicit TInvokeGuard(TUnitTestFailChecker& parent) + : Parent(&parent) + { + Parent->SetHandler(); + } + TInvokeGuard(TInvokeGuard&& guard) noexcept - : Parent(guard.Parent) - { - guard.Parent = nullptr; - } - - ~TInvokeGuard() { - if (Parent) { - ClearRaiseErrorHandler(); - } - } - - TUnitTestFailChecker* Parent; - }; - - TUnitTestFailChecker() = default; - TUnitTestFailChecker(const TUnitTestFailChecker&) = delete; - TUnitTestFailChecker(TUnitTestFailChecker&&) = delete; - - TInvokeGuard InvokeGuard() { - return TInvokeGuard(*this); - } - - const TString& What() const { - return What_; - } - - const TString& Msg() const { - return Msg_; - } - - bool FatalFailure() const { - return FatalFailure_; - } - - bool Failed() const { - return Failed_; - } - - private: - void Handler(const char* what, const TString& msg, bool fatalFailure) { - What_ = what; - Msg_ = msg; - FatalFailure_ = fatalFailure; - Failed_ = true; - } - - void SetHandler() { - TRaiseErrorHandler handler = [this](const char* what, const TString& msg, bool fatalFailure) { - Handler(what, msg, fatalFailure); - }; - SetRaiseErrorHandler(std::move(handler)); - } - - private: - TString What_; - TString Msg_; - bool FatalFailure_ = false; - bool Failed_ = false; - }; - + : Parent(guard.Parent) + { + guard.Parent = nullptr; + } + + ~TInvokeGuard() { + if (Parent) { + ClearRaiseErrorHandler(); + } + } + + TUnitTestFailChecker* Parent; + }; + + TUnitTestFailChecker() = default; + TUnitTestFailChecker(const TUnitTestFailChecker&) = delete; + TUnitTestFailChecker(TUnitTestFailChecker&&) = delete; + + TInvokeGuard InvokeGuard() { + return TInvokeGuard(*this); + } + + const TString& What() const { + return What_; + } + + const TString& Msg() const { + return Msg_; + } + + bool FatalFailure() const { + return FatalFailure_; + } + + bool Failed() const { + return Failed_; + } + + private: + void Handler(const char* what, const TString& msg, bool fatalFailure) { + What_ = what; + Msg_ = msg; + FatalFailure_ = fatalFailure; + Failed_ = true; + } + + void SetHandler() { + TRaiseErrorHandler handler = [this](const char* what, const TString& msg, bool fatalFailure) { + Handler(what, msg, fatalFailure); + }; + SetRaiseErrorHandler(std::move(handler)); + } + + private: + TString What_; + TString Msg_; + bool FatalFailure_ = false; + bool Failed_ = false; + }; + #define UNIT_TEST_SUITE_REGISTRATION(T) \ static const ::NUnitTest::TTestBaseFactory<T> Y_GENERATE_UNIQUE_ID(UTREG_); diff --git a/library/cpp/testing/unittest/registar_ut.cpp b/library/cpp/testing/unittest/registar_ut.cpp index 1f36d53abbe..9442eb3d0ad 100644 --- a/library/cpp/testing/unittest/registar_ut.cpp +++ b/library/cpp/testing/unittest/registar_ut.cpp @@ -1,132 +1,132 @@ #include <library/cpp/testing/unittest/registar.h> - + Y_UNIT_TEST_SUITE(TUnitTestMacroTest) { Y_UNIT_TEST(Assert) { - auto unitAssert = [] { - UNIT_ASSERT(false); - }; - UNIT_ASSERT_TEST_FAILS(unitAssert()); - - UNIT_ASSERT(true); - } - + auto unitAssert = [] { + UNIT_ASSERT(false); + }; + UNIT_ASSERT_TEST_FAILS(unitAssert()); + + UNIT_ASSERT(true); + } + Y_UNIT_TEST(TypesEqual) { - auto typesEqual = [] { - UNIT_ASSERT_TYPES_EQUAL(int, long); - }; - UNIT_ASSERT_TEST_FAILS(typesEqual()); - - UNIT_ASSERT_TYPES_EQUAL(TString, TString); - } - + auto typesEqual = [] { + UNIT_ASSERT_TYPES_EQUAL(int, long); + }; + UNIT_ASSERT_TEST_FAILS(typesEqual()); + + UNIT_ASSERT_TYPES_EQUAL(TString, TString); + } + Y_UNIT_TEST(DoublesEqual) { - auto doublesEqual = [](double d1, double d2, double precision) { - UNIT_ASSERT_DOUBLES_EQUAL(d1, d2, precision); - }; - UNIT_ASSERT_TEST_FAILS(doublesEqual(0.0, 0.5, 0.1)); - UNIT_ASSERT_TEST_FAILS(doublesEqual(0.1, -0.1, 0.1)); - - UNIT_ASSERT_DOUBLES_EQUAL(0.0, 0.01, 0.1); - UNIT_ASSERT_DOUBLES_EQUAL(0.01, 0.0, 0.1); + auto doublesEqual = [](double d1, double d2, double precision) { + UNIT_ASSERT_DOUBLES_EQUAL(d1, d2, precision); + }; + UNIT_ASSERT_TEST_FAILS(doublesEqual(0.0, 0.5, 0.1)); + UNIT_ASSERT_TEST_FAILS(doublesEqual(0.1, -0.1, 0.1)); + + UNIT_ASSERT_DOUBLES_EQUAL(0.0, 0.01, 0.1); + UNIT_ASSERT_DOUBLES_EQUAL(0.01, 0.0, 0.1); constexpr auto nan = std::numeric_limits<double>::quiet_NaN(); UNIT_ASSERT_TEST_FAILS(doublesEqual(nan, 0.5, 0.1)); UNIT_ASSERT_TEST_FAILS(doublesEqual(0.5, nan, 0.1)); UNIT_ASSERT_DOUBLES_EQUAL(nan, nan, 0.1); - } - + } + Y_UNIT_TEST(StringsEqual) { - auto stringsEqual = [](auto s1, auto s2) { - UNIT_ASSERT_STRINGS_EQUAL(s1, s2); - }; - UNIT_ASSERT_TEST_FAILS(stringsEqual("q", "w")); - UNIT_ASSERT_TEST_FAILS(stringsEqual("q", TString("w"))); - UNIT_ASSERT_TEST_FAILS(stringsEqual(TString("q"), "w")); - UNIT_ASSERT_TEST_FAILS(stringsEqual(TString("a"), TString("b"))); + auto stringsEqual = [](auto s1, auto s2) { + UNIT_ASSERT_STRINGS_EQUAL(s1, s2); + }; + UNIT_ASSERT_TEST_FAILS(stringsEqual("q", "w")); + UNIT_ASSERT_TEST_FAILS(stringsEqual("q", TString("w"))); + UNIT_ASSERT_TEST_FAILS(stringsEqual(TString("q"), "w")); + UNIT_ASSERT_TEST_FAILS(stringsEqual(TString("a"), TString("b"))); UNIT_ASSERT_TEST_FAILS(stringsEqual(TString("a"), TStringBuf("b"))); UNIT_ASSERT_TEST_FAILS(stringsEqual("a", TStringBuf("b"))); UNIT_ASSERT_TEST_FAILS(stringsEqual(TStringBuf("a"), "b")); - - TString empty; - TStringBuf emptyBuf; - UNIT_ASSERT_STRINGS_EQUAL("", empty); - UNIT_ASSERT_STRINGS_EQUAL(empty, emptyBuf); - UNIT_ASSERT_STRINGS_EQUAL("", static_cast<const char*>(nullptr)); - } - + + TString empty; + TStringBuf emptyBuf; + UNIT_ASSERT_STRINGS_EQUAL("", empty); + UNIT_ASSERT_STRINGS_EQUAL(empty, emptyBuf); + UNIT_ASSERT_STRINGS_EQUAL("", static_cast<const char*>(nullptr)); + } + Y_UNIT_TEST(StringContains) { - auto stringContains = [](auto s, auto substr) { - UNIT_ASSERT_STRING_CONTAINS(s, substr); - }; - UNIT_ASSERT_TEST_FAILS(stringContains("", "a")); - UNIT_ASSERT_TEST_FAILS(stringContains("lurkmore", "moar")); - - UNIT_ASSERT_STRING_CONTAINS("", ""); - UNIT_ASSERT_STRING_CONTAINS("a", ""); - UNIT_ASSERT_STRING_CONTAINS("failure", "fail"); - UNIT_ASSERT_STRING_CONTAINS("lurkmore", "more"); - } - + auto stringContains = [](auto s, auto substr) { + UNIT_ASSERT_STRING_CONTAINS(s, substr); + }; + UNIT_ASSERT_TEST_FAILS(stringContains("", "a")); + UNIT_ASSERT_TEST_FAILS(stringContains("lurkmore", "moar")); + + UNIT_ASSERT_STRING_CONTAINS("", ""); + UNIT_ASSERT_STRING_CONTAINS("a", ""); + UNIT_ASSERT_STRING_CONTAINS("failure", "fail"); + UNIT_ASSERT_STRING_CONTAINS("lurkmore", "more"); + } + Y_UNIT_TEST(NoDiff) { - auto noDiff = [](auto s1, auto s2) { - UNIT_ASSERT_NO_DIFF(s1, s2); - }; - UNIT_ASSERT_TEST_FAILS(noDiff("q", "w")); - UNIT_ASSERT_TEST_FAILS(noDiff("q", "")); - - UNIT_ASSERT_NO_DIFF("", ""); - UNIT_ASSERT_NO_DIFF("a", "a"); - } - + auto noDiff = [](auto s1, auto s2) { + UNIT_ASSERT_NO_DIFF(s1, s2); + }; + UNIT_ASSERT_TEST_FAILS(noDiff("q", "w")); + UNIT_ASSERT_TEST_FAILS(noDiff("q", "")); + + UNIT_ASSERT_NO_DIFF("", ""); + UNIT_ASSERT_NO_DIFF("a", "a"); + } + Y_UNIT_TEST(StringsUnequal) { - auto stringsUnequal = [](auto s1, auto s2) { - UNIT_ASSERT_STRINGS_UNEQUAL(s1, s2); - }; - UNIT_ASSERT_TEST_FAILS(stringsUnequal("1", "1")); - UNIT_ASSERT_TEST_FAILS(stringsUnequal("", "")); - UNIT_ASSERT_TEST_FAILS(stringsUnequal("42", TString("42"))); - UNIT_ASSERT_TEST_FAILS(stringsUnequal(TString("4"), "4")); + auto stringsUnequal = [](auto s1, auto s2) { + UNIT_ASSERT_STRINGS_UNEQUAL(s1, s2); + }; + UNIT_ASSERT_TEST_FAILS(stringsUnequal("1", "1")); + UNIT_ASSERT_TEST_FAILS(stringsUnequal("", "")); + UNIT_ASSERT_TEST_FAILS(stringsUnequal("42", TString("42"))); + UNIT_ASSERT_TEST_FAILS(stringsUnequal(TString("4"), "4")); UNIT_ASSERT_TEST_FAILS(stringsUnequal("d", TStringBuf("d"))); UNIT_ASSERT_TEST_FAILS(stringsUnequal(TStringBuf("yandex"), "yandex")); UNIT_ASSERT_TEST_FAILS(stringsUnequal(TStringBuf("index"), TString("index"))); UNIT_ASSERT_TEST_FAILS(stringsUnequal(TString("diff"), TStringBuf("diff"))); - - UNIT_ASSERT_STRINGS_UNEQUAL("1", "2"); - UNIT_ASSERT_STRINGS_UNEQUAL("", "3"); + + UNIT_ASSERT_STRINGS_UNEQUAL("1", "2"); + UNIT_ASSERT_STRINGS_UNEQUAL("", "3"); UNIT_ASSERT_STRINGS_UNEQUAL("green", TStringBuf("red")); UNIT_ASSERT_STRINGS_UNEQUAL(TStringBuf("solomon"), "golovan"); - UNIT_ASSERT_STRINGS_UNEQUAL("d", TString("f")); - UNIT_ASSERT_STRINGS_UNEQUAL(TString("yandex"), "index"); + UNIT_ASSERT_STRINGS_UNEQUAL("d", TString("f")); + UNIT_ASSERT_STRINGS_UNEQUAL(TString("yandex"), "index"); UNIT_ASSERT_STRINGS_UNEQUAL(TString("mail"), TStringBuf("yandex")); UNIT_ASSERT_STRINGS_UNEQUAL(TStringBuf("C++"), TString("python")); - } - + } + Y_UNIT_TEST(Equal) { - auto equal = [](auto v1, auto v2) { - UNIT_ASSERT_EQUAL(v1, v2); - }; - UNIT_ASSERT_TEST_FAILS(equal("1", TString("2"))); - UNIT_ASSERT_TEST_FAILS(equal(1, 2)); - UNIT_ASSERT_TEST_FAILS(equal(42ul, static_cast<unsigned short>(24))); - - UNIT_ASSERT_EQUAL("abc", TString("abc")); - UNIT_ASSERT_EQUAL(12l, 12); - UNIT_ASSERT_EQUAL(55, 55); - } - + auto equal = [](auto v1, auto v2) { + UNIT_ASSERT_EQUAL(v1, v2); + }; + UNIT_ASSERT_TEST_FAILS(equal("1", TString("2"))); + UNIT_ASSERT_TEST_FAILS(equal(1, 2)); + UNIT_ASSERT_TEST_FAILS(equal(42ul, static_cast<unsigned short>(24))); + + UNIT_ASSERT_EQUAL("abc", TString("abc")); + UNIT_ASSERT_EQUAL(12l, 12); + UNIT_ASSERT_EQUAL(55, 55); + } + Y_UNIT_TEST(Unequal) { - auto unequal = [](auto v1, auto v2) { - UNIT_ASSERT_UNEQUAL(v1, v2); - }; - UNIT_ASSERT_TEST_FAILS(unequal("x", TString("x"))); - UNIT_ASSERT_TEST_FAILS(unequal(1, 1)); - UNIT_ASSERT_TEST_FAILS(unequal(static_cast<unsigned short>(42), 42ul)); - - UNIT_ASSERT_UNEQUAL("abc", TString("cba")); - UNIT_ASSERT_UNEQUAL(12l, 10); - UNIT_ASSERT_UNEQUAL(33, 50); - } - + auto unequal = [](auto v1, auto v2) { + UNIT_ASSERT_UNEQUAL(v1, v2); + }; + UNIT_ASSERT_TEST_FAILS(unequal("x", TString("x"))); + UNIT_ASSERT_TEST_FAILS(unequal(1, 1)); + UNIT_ASSERT_TEST_FAILS(unequal(static_cast<unsigned short>(42), 42ul)); + + UNIT_ASSERT_UNEQUAL("abc", TString("cba")); + UNIT_ASSERT_UNEQUAL(12l, 10); + UNIT_ASSERT_UNEQUAL(33, 50); + } + Y_UNIT_TEST(LessThan) { auto lt = [](auto v1, auto v2) { UNIT_ASSERT_LT(v1, v2); @@ -236,45 +236,45 @@ Y_UNIT_TEST_SUITE(TUnitTestMacroTest) { } Y_UNIT_TEST(ValuesEqual) { - auto valuesEqual = [](auto v1, auto v2) { - UNIT_ASSERT_VALUES_EQUAL(v1, v2); - }; - UNIT_ASSERT_TEST_FAILS(valuesEqual(1, 2)); - UNIT_ASSERT_TEST_FAILS(valuesEqual(1l, static_cast<short>(2))); - - UNIT_ASSERT_VALUES_EQUAL("yandex", TString("yandex")); - UNIT_ASSERT_VALUES_EQUAL(1.0, 1.0); - } - + auto valuesEqual = [](auto v1, auto v2) { + UNIT_ASSERT_VALUES_EQUAL(v1, v2); + }; + UNIT_ASSERT_TEST_FAILS(valuesEqual(1, 2)); + UNIT_ASSERT_TEST_FAILS(valuesEqual(1l, static_cast<short>(2))); + + UNIT_ASSERT_VALUES_EQUAL("yandex", TString("yandex")); + UNIT_ASSERT_VALUES_EQUAL(1.0, 1.0); + } + Y_UNIT_TEST(ValuesUnequal) { - auto valuesUnequal = [](auto v1, auto v2) { - UNIT_ASSERT_VALUES_UNEQUAL(v1, v2); - }; - UNIT_ASSERT_TEST_FAILS(valuesUnequal(5, 5)); - UNIT_ASSERT_TEST_FAILS(valuesUnequal(static_cast<char>(5), 5l)); - TString test("test"); + auto valuesUnequal = [](auto v1, auto v2) { + UNIT_ASSERT_VALUES_UNEQUAL(v1, v2); + }; + UNIT_ASSERT_TEST_FAILS(valuesUnequal(5, 5)); + UNIT_ASSERT_TEST_FAILS(valuesUnequal(static_cast<char>(5), 5l)); + TString test("test"); UNIT_ASSERT_TEST_FAILS(valuesUnequal("test", test.data())); - - UNIT_ASSERT_VALUES_UNEQUAL("UNIT_ASSERT_VALUES_UNEQUAL", "UNIT_ASSERT_VALUES_EQUAL"); - UNIT_ASSERT_VALUES_UNEQUAL(1.0, 1.1); - } - - class TTestException: public yexception { - public: - TTestException(const TString& text = "test exception", bool throwMe = true) - : ThrowMe(throwMe) - { - *this << text; - } - - virtual ~TTestException() = default; - - virtual void Throw() { - if (ThrowMe) { - throw *this; - } - } - + + UNIT_ASSERT_VALUES_UNEQUAL("UNIT_ASSERT_VALUES_UNEQUAL", "UNIT_ASSERT_VALUES_EQUAL"); + UNIT_ASSERT_VALUES_UNEQUAL(1.0, 1.1); + } + + class TTestException: public yexception { + public: + TTestException(const TString& text = "test exception", bool throwMe = true) + : ThrowMe(throwMe) + { + *this << text; + } + + virtual ~TTestException() = default; + + virtual void Throw() { + if (ThrowMe) { + throw *this; + } + } + std::string ThrowStr() { if (ThrowMe) { throw *this; @@ -283,24 +283,24 @@ Y_UNIT_TEST_SUITE(TUnitTestMacroTest) { return {}; } - void AssertNoException() { - UNIT_ASSERT_NO_EXCEPTION(Throw()); - } - + void AssertNoException() { + UNIT_ASSERT_NO_EXCEPTION(Throw()); + } + void AssertNoExceptionRet() { const TString res = UNIT_ASSERT_NO_EXCEPTION_RESULT(ThrowStr()); } - template <class TExpectedException> - void AssertException() { - UNIT_ASSERT_EXCEPTION(Throw(), TExpectedException); - } - - template <class TExpectedException, class T> - void AssertExceptionContains(const T& substr) { - UNIT_ASSERT_EXCEPTION_CONTAINS(Throw(), TExpectedException, substr); - } - + template <class TExpectedException> + void AssertException() { + UNIT_ASSERT_EXCEPTION(Throw(), TExpectedException); + } + + template <class TExpectedException, class T> + void AssertExceptionContains(const T& substr) { + UNIT_ASSERT_EXCEPTION_CONTAINS(Throw(), TExpectedException, substr); + } + template <class TExpectedException, class P> void AssertExceptionSatisfies(const P& predicate) { UNIT_ASSERT_EXCEPTION_SATISFIES(Throw(), TExpectedException, predicate); @@ -310,58 +310,58 @@ Y_UNIT_TEST_SUITE(TUnitTestMacroTest) { return 5; // just some value for predicate testing } - bool ThrowMe; - }; - - class TOtherTestException: public TTestException { - public: - using TTestException::TTestException; - - // Throws other type of exception - void Throw() override { - if (ThrowMe) { - throw *this; - } - } - }; - + bool ThrowMe; + }; + + class TOtherTestException: public TTestException { + public: + using TTestException::TTestException; + + // Throws other type of exception + void Throw() override { + if (ThrowMe) { + throw *this; + } + } + }; + Y_UNIT_TEST(Exception) { - UNIT_ASSERT_TEST_FAILS(TTestException("", false).AssertException<TTestException>()); - UNIT_ASSERT_TEST_FAILS(TTestException().AssertException<TOtherTestException>()); - - UNIT_ASSERT_EXCEPTION(TOtherTestException().Throw(), TTestException); - UNIT_ASSERT_EXCEPTION(TTestException().Throw(), TTestException); - } - + UNIT_ASSERT_TEST_FAILS(TTestException("", false).AssertException<TTestException>()); + UNIT_ASSERT_TEST_FAILS(TTestException().AssertException<TOtherTestException>()); + + UNIT_ASSERT_EXCEPTION(TOtherTestException().Throw(), TTestException); + UNIT_ASSERT_EXCEPTION(TTestException().Throw(), TTestException); + } + Y_UNIT_TEST(ExceptionAssertionContainsOtherExceptionMessage) { - NUnitTest::TUnitTestFailChecker checker; - { - auto guard = checker.InvokeGuard(); - TTestException("custom exception message").AssertException<TOtherTestException>(); - } - UNIT_ASSERT(checker.Failed()); - UNIT_ASSERT_STRING_CONTAINS(checker.Msg(), "custom exception message"); - } - + NUnitTest::TUnitTestFailChecker checker; + { + auto guard = checker.InvokeGuard(); + TTestException("custom exception message").AssertException<TOtherTestException>(); + } + UNIT_ASSERT(checker.Failed()); + UNIT_ASSERT_STRING_CONTAINS(checker.Msg(), "custom exception message"); + } + Y_UNIT_TEST(NoException) { - UNIT_ASSERT_TEST_FAILS(TTestException().AssertNoException()); + UNIT_ASSERT_TEST_FAILS(TTestException().AssertNoException()); UNIT_ASSERT_TEST_FAILS(TTestException().AssertNoExceptionRet()); - - UNIT_ASSERT_NO_EXCEPTION(TTestException("", false).Throw()); - } - + + UNIT_ASSERT_NO_EXCEPTION(TTestException("", false).Throw()); + } + Y_UNIT_TEST(ExceptionContains) { - UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>("cba")); + UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>("cba")); UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>(TStringBuf("cba"))); - UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>(TString("cba"))); - UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>(TStringBuilder() << "cba")); - - UNIT_ASSERT_TEST_FAILS(TTestException("abc", false).AssertExceptionContains<TTestException>("bc")); - - UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TOtherTestException>("b")); - - UNIT_ASSERT_EXCEPTION_CONTAINS(TTestException("abc").Throw(), TTestException, "a"); - } + UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>(TString("cba"))); + UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TTestException>(TStringBuilder() << "cba")); + + UNIT_ASSERT_TEST_FAILS(TTestException("abc", false).AssertExceptionContains<TTestException>("bc")); + + UNIT_ASSERT_TEST_FAILS(TTestException("abc").AssertExceptionContains<TOtherTestException>("b")); + + UNIT_ASSERT_EXCEPTION_CONTAINS(TTestException("abc").Throw(), TTestException, "a"); + } Y_UNIT_TEST(ExceptionSatisfies) { const auto goodPredicate = [](const TTestException& e) { return e.GetValue() == 5; }; @@ -370,4 +370,4 @@ Y_UNIT_TEST_SUITE(TUnitTestMacroTest) { UNIT_ASSERT_TEST_FAILS(TTestException().AssertExceptionSatisfies<TTestException>(badPredicate)); UNIT_ASSERT_TEST_FAILS(TTestException().AssertExceptionSatisfies<TOtherTestException>(goodPredicate)); } -} +} diff --git a/library/cpp/testing/unittest/tests_data.cpp b/library/cpp/testing/unittest/tests_data.cpp index b51cbc4b87c..a131a54ddf1 100644 --- a/library/cpp/testing/unittest/tests_data.cpp +++ b/library/cpp/testing/unittest/tests_data.cpp @@ -1,5 +1,5 @@ #include "tests_data.h" -#include "registar.h" +#include "registar.h" #include <library/cpp/testing/common/network.h> @@ -35,10 +35,10 @@ public: return GetPort(port); } - ui16 GetTcpAndUdpPort(ui16 port) { + ui16 GetTcpAndUdpPort(ui16 port) { return GetPort(port); - } - + } + ui16 GetPortsRange(const ui16 startPort, const ui16 range) { Y_UNUSED(startPort); auto ports = NTesting::NLegacy::GetFreePortsRange(range); @@ -54,16 +54,16 @@ public: private: void ReservePortForCurrentTest(const TAtomicSharedPtr<NTesting::IPort>& portGuard) { if (EnableReservePortsForCurrentTest) { - TTestBase* currentTest = NUnitTest::NPrivate::GetCurrentTest(); - if (currentTest != nullptr) { - currentTest->RunAfterTest([guard = portGuard]() mutable { - guard = nullptr; // remove reference for allocated port - }); - } - } - } - -private: + TTestBase* currentTest = NUnitTest::NPrivate::GetCurrentTest(); + if (currentTest != nullptr) { + currentTest->RunAfterTest([guard = portGuard]() mutable { + guard = nullptr; // remove reference for allocated port + }); + } + } + } + +private: TMutex Lock; TVector<TAtomicSharedPtr<NTesting::IPort>> ReservedPorts; const bool EnableReservePortsForCurrentTest; @@ -81,18 +81,18 @@ TPortManager::~TPortManager() { ui16 TPortManager::GetPort(ui16 port) { return Impl_->GetTcpPort(port); } - -ui16 TPortManager::GetTcpPort(ui16 port) { + +ui16 TPortManager::GetTcpPort(ui16 port) { return Impl_->GetTcpPort(port); -} - -ui16 TPortManager::GetUdpPort(ui16 port) { +} + +ui16 TPortManager::GetUdpPort(ui16 port) { return Impl_->GetUdpPort(port); -} - -ui16 TPortManager::GetTcpAndUdpPort(ui16 port) { +} + +ui16 TPortManager::GetTcpAndUdpPort(ui16 port) { return Impl_->GetTcpAndUdpPort(port); -} +} ui16 TPortManager::GetPortsRange(const ui16 startPort, const ui16 range) { return Impl_->GetPortsRange(startPort, range); diff --git a/library/cpp/testing/unittest/tests_data.h b/library/cpp/testing/unittest/tests_data.h index 6536bc1ae69..fcc56520bd0 100644 --- a/library/cpp/testing/unittest/tests_data.h +++ b/library/cpp/testing/unittest/tests_data.h @@ -31,19 +31,19 @@ class TPortManager: public TNonCopyable { public: TPortManager(bool reservePortsForCurrentTest = true); ~TPortManager(); - - // Gets free TCP port + + // Gets free TCP port ui16 GetPort(ui16 port = 0); - // Gets free TCP port - ui16 GetTcpPort(ui16 port = 0); - - // Gets free UDP port - ui16 GetUdpPort(ui16 port = 0); - - // Gets one free port for use in both TCP and UDP protocols - ui16 GetTcpAndUdpPort(ui16 port = 0); - + // Gets free TCP port + ui16 GetTcpPort(ui16 port = 0); + + // Gets free UDP port + ui16 GetUdpPort(ui16 port = 0); + + // Gets one free port for use in both TCP and UDP protocols + ui16 GetTcpAndUdpPort(ui16 port = 0); + ui16 GetPortsRange(const ui16 startPort, const ui16 range); private: diff --git a/library/cpp/testing/unittest/ut/ya.make b/library/cpp/testing/unittest/ut/ya.make index 6d4c0959cc0..b5676b187cf 100644 --- a/library/cpp/testing/unittest/ut/ya.make +++ b/library/cpp/testing/unittest/ut/ya.make @@ -4,7 +4,7 @@ OWNER(snowball) SRCS( main.cpp - registar_ut.cpp + registar_ut.cpp ) END() diff --git a/library/cpp/testing/unittest/utmain.cpp b/library/cpp/testing/unittest/utmain.cpp index 305bc6b40fc..b7e1259cab2 100644 --- a/library/cpp/testing/unittest/utmain.cpp +++ b/library/cpp/testing/unittest/utmain.cpp @@ -1,4 +1,4 @@ -#include "plugin.h" +#include "plugin.h" #include "registar.h" #include "utmain.h" @@ -8,27 +8,27 @@ #include <library/cpp/json/writer/json_value.h> #include <library/cpp/testing/common/env.h> #include <library/cpp/testing/hook/hook.h> - -#include <util/datetime/base.h> - + +#include <util/datetime/base.h> + #include <util/generic/hash.h> -#include <util/generic/hash_set.h> +#include <util/generic/hash_set.h> #include <util/generic/scope.h> #include <util/generic/string.h> #include <util/generic/yexception.h> - + #include <util/network/init.h> - + #include <util/stream/file.h> #include <util/stream/output.h> #include <util/string/join.h> -#include <util/string/util.h> - -#include <util/system/defaults.h> -#include <util/system/execpath.h> -#include <util/system/valgrind.h> -#include <util/system/shellcommand.h> +#include <util/string/util.h> +#include <util/system/defaults.h> +#include <util/system/execpath.h> +#include <util/system/valgrind.h> +#include <util/system/shellcommand.h> + #if defined(_win_) #include <fcntl.h> #include <io.h> @@ -141,26 +141,26 @@ private: void OnError(const TError* descr) override { const TString comment = BuildComment(descr->msg, descr->BackTrace.data()); - ErrorMessages.push_back(comment); - } - - void OnFinish(const TFinish* descr) override { - if (descr->Success) { + ErrorMessages.push_back(comment); + } + + void OnFinish(const TFinish* descr) override { + if (descr->Success) { TraceSubtestFinished(descr->test->unit->name.data(), descr->test->name, "good", "", descr->Context); - } else { - TStringBuilder msgs; + } else { + TStringBuilder msgs; for (const TString& m : ErrorMessages) { - if (msgs) { + if (msgs) { msgs << TStringBuf("\n"); - } - msgs << m; - } - if (msgs) { + } + msgs << m; + } + if (msgs) { msgs << TStringBuf("\n"); - } + } TraceSubtestFinished(descr->test->unit->name.data(), descr->test->name, "fail", msgs, descr->Context); - ErrorMessages.clear(); - } + ErrorMessages.clear(); + } } }; @@ -364,8 +364,8 @@ private: } } - void OnFinish(const TFinish* descr) override { - TraceProcessor->Finish(*descr); + void OnFinish(const TFinish* descr) override { + TraceProcessor->Finish(*descr); if (!IsForked && ForkExitedCorrectly) { return; } @@ -373,15 +373,15 @@ private: return; } - if (descr->Success) { + if (descr->Success) { fprintf(stderr, "[%sgood%s] %s::%s\n", LightGreenColor().data(), OldColor().data(), descr->test->unit->name.data(), - descr->test->name); - NOTE_IN_VALGRIND(descr->test); + descr->test->name); + NOTE_IN_VALGRIND(descr->test); PrintTimes(SaveTestDuration()); - if (IsForked) { - fprintf(stderr, "%s", ForkCorrectExitMsg); - } + if (IsForked) { + fprintf(stderr, "%s", ForkCorrectExitMsg); + } } } @@ -613,7 +613,7 @@ static int DoUsage(const char* progname) { << " -h, --help print this help message\n" << " -l, --list print a list of available tests\n" << " -A --list-verbose print a list of available subtests\n" - << " --print-before-test print each test name before running it\n" + << " --print-before-test print each test name before running it\n" << " --print-before-suite print each test suite name before running it\n" << " --show-fails print a list of all failed tests at the end\n" << " --dont-show-fails do not print a list of all failed tests at the end\n" @@ -661,9 +661,9 @@ int NUnitTest::RunMain(int argc, char** argv) { NTesting::THook::CallBeforeRun(); Y_DEFER { NTesting::THook::CallAfterRun(); }; - NPlugin::OnStartMain(argc, argv); + NPlugin::OnStartMain(argc, argv); Y_DEFER { NPlugin::OnStopMain(argc, argv); }; - + TColoredProcessor processor(GetExecPath()); IOutputStream* listStream = &Cout; THolder<IOutputStream> listFile; diff --git a/library/cpp/testing/unittest/ya.make b/library/cpp/testing/unittest/ya.make index aaa4f2ba851..93d23f396f8 100644 --- a/library/cpp/testing/unittest/ya.make +++ b/library/cpp/testing/unittest/ya.make @@ -2,10 +2,10 @@ LIBRARY() PROVIDES(test_framework) -OWNER( - pg - galaxycrab -) +OWNER( + pg + galaxycrab +) PEERDIR( library/cpp/colorizer @@ -19,7 +19,7 @@ PEERDIR( SRCS( gtest.cpp checks.cpp - plugin.cpp + plugin.cpp registar.cpp tests_data.cpp utmain.cpp diff --git a/library/python/testing/yatest_common/yatest/common/network.py b/library/python/testing/yatest_common/yatest/common/network.py index 37bcb1b8e08..a0c462af8cf 100644 --- a/library/python/testing/yatest_common/yatest/common/network.py +++ b/library/python/testing/yatest_common/yatest/common/network.py @@ -42,57 +42,57 @@ class PortManager(object): self.release() def get_port(self, port=0): - ''' - Gets free TCP port - ''' - return self.get_tcp_port(port) - - def get_tcp_port(self, port=0): - ''' - Gets free TCP port - ''' - return self._get_port(port, socket.SOCK_STREAM) - - def get_udp_port(self, port=0): - ''' - Gets free UDP port - ''' - return self._get_port(port, socket.SOCK_DGRAM) - - def get_tcp_and_udp_port(self, port=0): - ''' - Gets one free port for use in both TCP and UDP protocols - ''' - if port and self._no_random_ports(): + ''' + Gets free TCP port + ''' + return self.get_tcp_port(port) + + def get_tcp_port(self, port=0): + ''' + Gets free TCP port + ''' + return self._get_port(port, socket.SOCK_STREAM) + + def get_udp_port(self, port=0): + ''' + Gets free UDP port + ''' + return self._get_port(port, socket.SOCK_DGRAM) + + def get_tcp_and_udp_port(self, port=0): + ''' + Gets one free port for use in both TCP and UDP protocols + ''' + if port and self._no_random_ports(): return port - retries = 20 - while retries > 0: - retries -= 1 - - result_port = self.get_tcp_port() + retries = 20 + while retries > 0: + retries -= 1 + + result_port = self.get_tcp_port() if not self.is_port_free(result_port, socket.SOCK_DGRAM): - self.release_port(result_port) + self.release_port(result_port) # Don't try to _capture_port(), it's already captured in the get_tcp_port() - return result_port - raise Exception('Failed to find port') - - def release_port(self, port): + return result_port + raise Exception('Failed to find port') + + def release_port(self, port): with self._lock: self._release_port_no_lock(port) - + def _release_port_no_lock(self, port): filelock = self._filelocks.pop(port, None) if filelock: filelock.release() - def release(self): + def release(self): with self._lock: while self._filelocks: _, filelock = self._filelocks.popitem() if filelock: filelock.release() - + def get_port_range(self, start_port, count, random_start=True): assert count > 0 if start_port and self._no_random_ports(): @@ -136,17 +136,17 @@ class PortManager(object): assert res, ('There are no available valid ports', self._valid_range) return res - def _get_port(self, port, sock_type): - if port and self._no_random_ports(): - return port - + def _get_port(self, port, sock_type): + if port and self._no_random_ports(): + return port + if len(self._filelocks) >= self._valid_port_count: raise PortManagerException("All valid ports are taken ({}): {}".format(self._valid_range, self._filelocks)) salt = random.randint(0, UI16MAXVAL) for attempt in six.moves.range(self._valid_port_count): probe_port = (salt + attempt) % self._valid_port_count - + for left, right in self._valid_range: if probe_port >= (right - left): probe_port -= right - left @@ -203,8 +203,8 @@ class PortManager(object): filelock.release() return False - def _no_random_ports(self): - return os.environ.get("NO_RANDOM_PORTS") + def _no_random_ports(self): + return os.environ.get("NO_RANDOM_PORTS") def get_valid_port_range(): diff --git a/tools/enum_parser/enum_parser/main.cpp b/tools/enum_parser/enum_parser/main.cpp index 0943c69c1da..2b50cf7740b 100644 --- a/tools/enum_parser/enum_parser/main.cpp +++ b/tools/enum_parser/enum_parser/main.cpp @@ -420,7 +420,7 @@ int main(int argc, char** argv) { if (outputFileName) { - NFs::Remove(outputFileName); + NFs::Remove(outputFileName); hOut.Reset(new TFileOutput(outputFileName)); out = hOut.Get(); diff --git a/util/charset/wide.h b/util/charset/wide.h index 04e6928aab3..86535f3a7f9 100644 --- a/util/charset/wide.h +++ b/util/charset/wide.h @@ -81,7 +81,7 @@ inline const wchar32* SkipSymbol(const wchar32* begin, const wchar32* end) noexc inline wchar32 ReadSymbol(const wchar16* begin, const wchar16* end) noexcept { Y_ASSERT(begin < end); if (IsW16SurrogateLead(*begin)) { - if (begin + 1 < end && IsW16SurrogateTail(*(begin + 1))) + if (begin + 1 < end && IsW16SurrogateTail(*(begin + 1))) return ::NDetail::ReadSurrogatePair(begin); return BROKEN_RUNE; diff --git a/util/datetime/base.h b/util/datetime/base.h index 5e902b8f633..387c6ae2306 100644 --- a/util/datetime/base.h +++ b/util/datetime/base.h @@ -172,7 +172,7 @@ public: } protected: - TValue Value_; // microseconds count + TValue Value_; // microseconds count }; namespace NDateTimeHelpers { diff --git a/util/datetime/parser_ut.cpp b/util/datetime/parser_ut.cpp index 61364af997f..729cefef445 100644 --- a/util/datetime/parser_ut.cpp +++ b/util/datetime/parser_ut.cpp @@ -362,16 +362,16 @@ Y_UNIT_TEST_SUITE(TDateTimeParseTest) { } Y_UNIT_TEST(TestIso8601TimeZone) { - time_t t1, t2, t3, t4; + time_t t1, t2, t3, t4; UNIT_ASSERT(ParseISO8601DateTime("2010-03-28T04:27:00.000+07:00", t1)); UNIT_ASSERT(ParseISO8601DateTime("2010-03-27T21:27:00.000Z", t2)); UNIT_ASSERT(ParseISO8601DateTime("2010-03-27T22:27:00.000+0100", t3)); UNIT_ASSERT(ParseISO8601DateTime("2010-03-27T20:27:00.000-01:00", t4)); - UNIT_ASSERT_VALUES_EQUAL(t1, t2); - UNIT_ASSERT_VALUES_EQUAL(t2, t3); - UNIT_ASSERT_VALUES_EQUAL(t3, t4); - } - + UNIT_ASSERT_VALUES_EQUAL(t1, t2); + UNIT_ASSERT_VALUES_EQUAL(t2, t3); + UNIT_ASSERT_VALUES_EQUAL(t3, t4); + } + Y_UNIT_TEST(TestIso8601Incorrect) { bool ret; time_t t; diff --git a/util/folder/fts.cpp b/util/folder/fts.cpp index 0e6a6f86eb9..bcdad0c6521 100644 --- a/util/folder/fts.cpp +++ b/util/folder/fts.cpp @@ -249,12 +249,12 @@ FTS* yfts_open(char* const* argv, int options, int (*compar)(const FTSENT**, con errno = 0; - Y_ASSERT(argv); - if (!*argv) { - errno = ENOENT; - return nullptr; - } - + Y_ASSERT(argv); + if (!*argv) { + errno = ENOENT; + return nullptr; + } + /* Options check. */ if (options & ~FTS_OPTIONMASK) { errno = EINVAL; @@ -991,7 +991,7 @@ fts_build(FTS* sp, int type) } #endif - // coverity[dead_error_line]: false positive + // coverity[dead_error_line]: false positive if (cderrno) { if (nlinks) { p->fts_info = FTS_NS; @@ -1075,7 +1075,7 @@ fts_build(FTS* sp, int type) (cur->fts_level == FTS_ROOTLEVEL ? FCHDIR(sp, sp->fts_rfd) : fts_safe_changedir(sp, cur->fts_parent, -1, ".."))) { cur->fts_info = FTS_ERR; SET(FTS_STOP); - fts_lfree(head); + fts_lfree(head); return nullptr; } @@ -1084,7 +1084,7 @@ fts_build(FTS* sp, int type) if (type == BREAD) { cur->fts_info = FTS_DP; } - fts_lfree(head); + fts_lfree(head); return nullptr; } diff --git a/util/folder/fts_ut.cpp b/util/folder/fts_ut.cpp index c5d59e35f42..a8145bdd48b 100644 --- a/util/folder/fts_ut.cpp +++ b/util/folder/fts_ut.cpp @@ -3,7 +3,7 @@ #include "tempdir.h" #include <library/cpp/testing/unittest/registar.h> -#include <library/cpp/threading/future/async.h> +#include <library/cpp/threading/future/async.h> #include <util/system/file.h> #include <util/system/tempfile.h> @@ -12,12 +12,12 @@ class TFtsTest: public TTestBase { UNIT_TEST_SUITE(TFtsTest); UNIT_TEST(TestSimple); - UNIT_TEST(TestNoLeakChangingAccessToFolder); + UNIT_TEST(TestNoLeakChangingAccessToFolder); UNIT_TEST_SUITE_END(); public: void TestSimple(); - void TestNoLeakChangingAccessToFolder(); + void TestNoLeakChangingAccessToFolder(); }; void MakeFile(const char* path) { @@ -82,42 +82,42 @@ void TFtsTest::TestSimple() { UNIT_ASSERT_EQUAL(yfts_read(fileTree()), nullptr); } -class TTempDirWithLostAccess: public TTempDir { -public: - ~TTempDirWithLostAccess() { - chmod(Name().data(), 0777); - } -}; - -// https://st.yandex-team.ru/YQ-318 -// Test that detects memory leak in case of error in chdir in fts_build function. -void TFtsTest::TestNoLeakChangingAccessToFolder() { - TTempDirWithLostAccess tempDir; - TString tmpPath = tempDir(); - if (tmpPath.EndsWith(LOCSLASH_S)) { - tmpPath.resize(tmpPath.size() - 1); - } - MakeDirIfNotExist((tmpPath + LOCSLASH_S + "subdir").data()); - - const char* path[2] = {tmpPath.data(), nullptr}; - TFileTree fileTree((char* const*)path, FTS_SEEDOT, FtsCmp); - UNIT_ASSERT(fileTree()); - - CheckEnt(yfts_read(fileTree()), tmpPath.data(), FTS_D); -#ifndef _win32_ - CheckEnt(yfts_read(fileTree()), (tmpPath + LOCSLASH_S ".").data(), FTS_DOT); -#endif // _win32_ - CheckEnt(yfts_read(fileTree()), (tmpPath + LOCSLASH_S "..").data(), FTS_DOT); - CheckEnt(yfts_read(fileTree()), (tmpPath + LOCSLASH_S "subdir").data(), FTS_D); - auto pool = CreateThreadPool(2); - auto chmodFuture = NThreading::Async([name = tmpPath] { - UNIT_ASSERT_C(!chmod(name.data(), 0), "Errno: " << errno); - }, *pool); - auto childrenFuture = NThreading::Async([&] { - yfts_children(fileTree(), 0); - }, *pool); - childrenFuture.Wait(); - chmodFuture.Wait(); -} - +class TTempDirWithLostAccess: public TTempDir { +public: + ~TTempDirWithLostAccess() { + chmod(Name().data(), 0777); + } +}; + +// https://st.yandex-team.ru/YQ-318 +// Test that detects memory leak in case of error in chdir in fts_build function. +void TFtsTest::TestNoLeakChangingAccessToFolder() { + TTempDirWithLostAccess tempDir; + TString tmpPath = tempDir(); + if (tmpPath.EndsWith(LOCSLASH_S)) { + tmpPath.resize(tmpPath.size() - 1); + } + MakeDirIfNotExist((tmpPath + LOCSLASH_S + "subdir").data()); + + const char* path[2] = {tmpPath.data(), nullptr}; + TFileTree fileTree((char* const*)path, FTS_SEEDOT, FtsCmp); + UNIT_ASSERT(fileTree()); + + CheckEnt(yfts_read(fileTree()), tmpPath.data(), FTS_D); +#ifndef _win32_ + CheckEnt(yfts_read(fileTree()), (tmpPath + LOCSLASH_S ".").data(), FTS_DOT); +#endif // _win32_ + CheckEnt(yfts_read(fileTree()), (tmpPath + LOCSLASH_S "..").data(), FTS_DOT); + CheckEnt(yfts_read(fileTree()), (tmpPath + LOCSLASH_S "subdir").data(), FTS_D); + auto pool = CreateThreadPool(2); + auto chmodFuture = NThreading::Async([name = tmpPath] { + UNIT_ASSERT_C(!chmod(name.data(), 0), "Errno: " << errno); + }, *pool); + auto childrenFuture = NThreading::Async([&] { + yfts_children(fileTree(), 0); + }, *pool); + childrenFuture.Wait(); + chmodFuture.Wait(); +} + UNIT_TEST_SUITE_REGISTRATION(TFtsTest); diff --git a/util/folder/iterator_ut.cpp b/util/folder/iterator_ut.cpp index 936becd139b..e4c49c93107 100644 --- a/util/folder/iterator_ut.cpp +++ b/util/folder/iterator_ut.cpp @@ -81,7 +81,7 @@ private: inline ~TDirHier() { for (size_t i = 0; i < Paths_.size(); ++i) { - NFs::Remove(Paths_[Paths_.size() - i - 1].Path); + NFs::Remove(Paths_[Paths_.size() - i - 1].Path); } } diff --git a/util/folder/path.cpp b/util/folder/path.cpp index bfe0c67d687..e16a979fc1d 100644 --- a/util/folder/path.cpp +++ b/util/folder/path.cpp @@ -289,7 +289,7 @@ void TFsPath::RenameTo(const TString& newPath) const { ythrow TIoException() << "bad new file name"; } if (!NFs::Rename(Path_, newPath)) { - ythrow TIoSystemError() << "failed to rename " << Path_ << " to " << newPath; + ythrow TIoSystemError() << "failed to rename " << Path_ << " to " << newPath; } } @@ -326,7 +326,7 @@ TFsPath TFsPath::ReadLink() const { ythrow TIoException() << "not a symlink " << *this; } - return NFs::ReadLink(*this); + return NFs::ReadLink(*this); } bool TFsPath::Exists() const { diff --git a/util/folder/tempdir.cpp b/util/folder/tempdir.cpp index 6fdf8f753c1..2587cd3f52c 100644 --- a/util/folder/tempdir.cpp +++ b/util/folder/tempdir.cpp @@ -25,7 +25,7 @@ TTempDir::TTempDir(const TString& tempDir) : TempDir(tempDir) , Remove(true) { - NFs::Remove(TempDir); + NFs::Remove(TempDir); MakeDirIfNotExist(TempDir.c_str()); } diff --git a/util/generic/algorithm.h b/util/generic/algorithm.h index badfb889933..81104697708 100644 --- a/util/generic/algorithm.h +++ b/util/generic/algorithm.h @@ -1,7 +1,7 @@ #pragma once -#include "is_in.h" -#include "utility.h" +#include "is_in.h" +#include "utility.h" #include <util/system/defaults.h> #include <util/generic/fwd.h> diff --git a/util/generic/cast.h b/util/generic/cast.h index 0d4a41f385b..573e2fc1a84 100644 --- a/util/generic/cast.h +++ b/util/generic/cast.h @@ -117,13 +117,13 @@ inline std::enable_if_t<!::NPrivate::TSafelyConvertible<TSmall, TLarge>::Result, using TLargeInt = ::NPrivate::TUnderlyingTypeOrSelf<TLarge>; if (std::is_unsigned<TSmallInt>::value && std::is_signed<TLargeInt>::value) { - if (IsNegative(largeInt)) { + if (IsNegative(largeInt)) { ythrow TBadCastException() << "Conversion '" << TypeName<TLarge>() << '{' << TLargeInt(largeInt) << "}' to '" << TypeName<TSmallInt>() << "', negative value converted to unsigned"; - } - } - + } + } + TSmallInt smallInt = TSmallInt(largeInt); if (std::is_signed<TSmallInt>::value && std::is_unsigned<TLargeInt>::value) { diff --git a/util/generic/cast_ut.cpp b/util/generic/cast_ut.cpp index 718a8de79de..41130522024 100644 --- a/util/generic/cast_ut.cpp +++ b/util/generic/cast_ut.cpp @@ -16,7 +16,7 @@ private: virtual ~TAaa() = default; }; struct TBbb: public TAaa {}; - + inline void TestVerifyDynamicCast() { TBbb bbb; TAaa* aaa = &bbb; diff --git a/util/generic/is_in.h b/util/generic/is_in.h index 4f175ea5ebd..5f1c6fc3b2d 100644 --- a/util/generic/is_in.h +++ b/util/generic/is_in.h @@ -1,24 +1,24 @@ -#pragma once - -#include "typetraits.h" +#pragma once +#include "typetraits.h" + #include <algorithm> #include <initializer_list> - -template <class I, class T> + +template <class I, class T> static inline bool IsIn(I f, I l, const T& v); - -template <class C, class T> + +template <class C, class T> static inline bool IsIn(const C& c, const T& e); - -namespace NIsInHelper { + +namespace NIsInHelper { Y_HAS_MEMBER(find, FindMethod); Y_HAS_SUBTYPE(const_iterator, ConstIterator); Y_HAS_SUBTYPE(key_type, KeyType); - + template <class T> using TIsAssocCont = TConjunction<THasFindMethod<T>, THasConstIterator<T>, THasKeyType<T>>; - + template <class C, class T, bool isAssoc> struct TIsInTraits { static bool IsIn(const C& c, const T& e) { @@ -27,7 +27,7 @@ namespace NIsInHelper { return ::IsIn(begin(c), end(c), e); } }; - + template <class C, class T> struct TIsInTraits<C, T, true> { static bool IsIn(const C& c, const T& e) { @@ -35,17 +35,17 @@ namespace NIsInHelper { } }; } - -template <class I, class T> + +template <class I, class T> static inline bool IsIn(I f, I l, const T& v) { return std::find(f, l, v) != l; -} - -template <class C, class T> +} + +template <class C, class T> static inline bool IsIn(const C& c, const T& e) { - using namespace NIsInHelper; + using namespace NIsInHelper; return TIsInTraits<C, T, TIsAssocCont<C>::value>::IsIn(c, e); -} +} template <class T, class U> static inline bool IsIn(std::initializer_list<T> l, const U& e) { diff --git a/util/generic/is_in_ut.cpp b/util/generic/is_in_ut.cpp index c668bce8078..49c14bff12c 100644 --- a/util/generic/is_in_ut.cpp +++ b/util/generic/is_in_ut.cpp @@ -1,5 +1,5 @@ #include <library/cpp/testing/unittest/registar.h> - + #include "algorithm.h" #include "hash.h" #include "hash_set.h" @@ -8,68 +8,68 @@ #include "set.h" #include "strbuf.h" #include "string.h" - + Y_UNIT_TEST_SUITE(TIsIn) { template <class TCont, class T> - void TestIsInWithCont(const T& elem) { + void TestIsInWithCont(const T& elem) { class TMapMock: public TCont { - public: - typename TCont::const_iterator find(const typename TCont::key_type& k) const { - ++FindCalled; - return TCont::find(k); - } - - typename TCont::iterator find(const typename TCont::key_type& k) { - ++FindCalled; - return TCont::find(k); - } - - mutable size_t FindCalled = 1; - }; - - TMapMock m; - m.insert(elem); - - // use more effective find method - UNIT_ASSERT(IsIn(m, "found")); - UNIT_ASSERT(m.FindCalled); - m.FindCalled = 0; - - UNIT_ASSERT(!IsIn(m, "not found")); - UNIT_ASSERT(m.FindCalled); - m.FindCalled = 0; - } - + public: + typename TCont::const_iterator find(const typename TCont::key_type& k) const { + ++FindCalled; + return TCont::find(k); + } + + typename TCont::iterator find(const typename TCont::key_type& k) { + ++FindCalled; + return TCont::find(k); + } + + mutable size_t FindCalled = 1; + }; + + TMapMock m; + m.insert(elem); + + // use more effective find method + UNIT_ASSERT(IsIn(m, "found")); + UNIT_ASSERT(m.FindCalled); + m.FindCalled = 0; + + UNIT_ASSERT(!IsIn(m, "not found")); + UNIT_ASSERT(m.FindCalled); + m.FindCalled = 0; + } + Y_UNIT_TEST(IsInTest) { TestIsInWithCont<TMap<TString, TString>>(std::make_pair("found", "1")); TestIsInWithCont<TMultiMap<TString, TString>>(std::make_pair("found", "1")); TestIsInWithCont<THashMap<TString, TString>>(std::make_pair("found", "1")); TestIsInWithCont<THashMultiMap<TString, TString>>(std::make_pair("found", "1")); - + TestIsInWithCont<TSet<TString>>("found"); TestIsInWithCont<TMultiSet<TString>>("found"); TestIsInWithCont<THashSet<TString>>("found"); TestIsInWithCont<THashMultiSet<TString>>("found"); - - // vector also compiles and works + + // vector also compiles and works TVector<TString> v; - v.push_back("found"); - UNIT_ASSERT(IsIn(v, "found")); - UNIT_ASSERT(!IsIn(v, "not found")); - - // iterators interface - UNIT_ASSERT(IsIn(v.begin(), v.end(), "found")); - UNIT_ASSERT(!IsIn(v.begin(), v.end(), "not found")); - + v.push_back("found"); + UNIT_ASSERT(IsIn(v, "found")); + UNIT_ASSERT(!IsIn(v, "not found")); + + // iterators interface + UNIT_ASSERT(IsIn(v.begin(), v.end(), "found")); + UNIT_ASSERT(!IsIn(v.begin(), v.end(), "not found")); + // Works with TString (it has find, but find is not used) TString s = "found"; - UNIT_ASSERT(IsIn(s, 'f')); - UNIT_ASSERT(!IsIn(s, 'z')); - - TStringBuf b = "found"; - UNIT_ASSERT(IsIn(b, 'f')); - UNIT_ASSERT(!IsIn(b, 'z')); - } + UNIT_ASSERT(IsIn(s, 'f')); + UNIT_ASSERT(!IsIn(s, 'z')); + + TStringBuf b = "found"; + UNIT_ASSERT(IsIn(b, 'f')); + UNIT_ASSERT(!IsIn(b, 'z')); + } Y_UNIT_TEST(IsInInitListTest) { const char* abc = "abc"; @@ -113,4 +113,4 @@ Y_UNIT_TEST_SUITE(TIsIn) { UNIT_ASSERT(!IsIn(array, "c")); UNIT_ASSERT(IsIn(array, TStringBuf("d"))); } -} +} diff --git a/util/generic/mem_copy_ut.cpp b/util/generic/mem_copy_ut.cpp index 8b55a11cf68..050fd7fa186 100644 --- a/util/generic/mem_copy_ut.cpp +++ b/util/generic/mem_copy_ut.cpp @@ -3,12 +3,12 @@ #include <library/cpp/testing/unittest/registar.h> namespace { - class TAssignBCalled: public yexception { - }; - + class TAssignBCalled: public yexception { + }; + struct TB { inline TB& operator=(const TB&) { - throw TAssignBCalled(); + throw TAssignBCalled(); return *this; } @@ -69,7 +69,7 @@ Y_UNIT_TEST_SUITE(TestMemCopy) { TC c1[5]; TC c2[5]; - UNIT_ASSERT_EXCEPTION(MemCopy(c2, c1, 5), TAssignBCalled); + UNIT_ASSERT_EXCEPTION(MemCopy(c2, c1, 5), TAssignBCalled); } template <class T> diff --git a/util/generic/ptr.h b/util/generic/ptr.h index 19db0e3ec55..49a6fca6aee 100644 --- a/util/generic/ptr.h +++ b/util/generic/ptr.h @@ -602,17 +602,17 @@ private: mutable T* T_; }; -template <class T, class Ops> +template <class T, class Ops> struct THash<TIntrusivePtr<T, Ops>>: THash<const T*> { - using THash<const T*>::operator(); - inline size_t operator()(const TIntrusivePtr<T, Ops>& ptr) const { - return THash<const T*>::operator()(ptr.Get()); - } -}; - + using THash<const T*>::operator(); + inline size_t operator()(const TIntrusivePtr<T, Ops>& ptr) const { + return THash<const T*>::operator()(ptr.Get()); + } +}; + // Behaves like TIntrusivePtr but returns const T* to prevent user from accidentally modifying the referenced object. template <class T, class Ops> -class TIntrusiveConstPtr: public TPointerBase<TIntrusiveConstPtr<T, Ops>, const T> { +class TIntrusiveConstPtr: public TPointerBase<TIntrusiveConstPtr<T, Ops>, const T> { public: inline TIntrusiveConstPtr(T* t = nullptr) noexcept // we need a non-const pointer to Ref(), UnRef() and eventually delete it. : T_(t) @@ -718,13 +718,13 @@ private: template <class T, class Ops> struct THash<TIntrusiveConstPtr<T, Ops>>: THash<const T*> { - using THash<const T*>::operator(); - inline size_t operator()(const TIntrusiveConstPtr<T, Ops>& ptr) const { - return THash<const T*>::operator()(ptr.Get()); - } -}; - -template <class T, class Ops> + using THash<const T*>::operator(); + inline size_t operator()(const TIntrusiveConstPtr<T, Ops>& ptr) const { + return THash<const T*>::operator()(ptr.Get()); + } +}; + +template <class T, class Ops> class TSimpleIntrusiveOps { using TFunc = void (*)(T*) #if __cplusplus >= 201703 @@ -932,14 +932,14 @@ private: C* C_; }; -template <class T, class C, class D> +template <class T, class C, class D> struct THash<TSharedPtr<T, C, D>>: THash<const T*> { - using THash<const T*>::operator(); - inline size_t operator()(const TSharedPtr<T, C, D>& ptr) const { - return THash<const T*>::operator()(ptr.Get()); - } -}; - + using THash<const T*>::operator(); + inline size_t operator()(const TSharedPtr<T, C, D>& ptr) const { + return THash<const T*>::operator()(ptr.Get()); + } +}; + template <class T, class D = TDelete> using TAtomicSharedPtr = TSharedPtr<T, TAtomicCounter, D>; diff --git a/util/generic/ptr_ut.cpp b/util/generic/ptr_ut.cpp index c2dcff23f6b..210e562a36c 100644 --- a/util/generic/ptr_ut.cpp +++ b/util/generic/ptr_ut.cpp @@ -4,8 +4,8 @@ #include <library/cpp/testing/unittest/registar.h> -#include <util/generic/hash_set.h> -#include <util/generic/is_in.h> +#include <util/generic/hash_set.h> +#include <util/generic/is_in.h> #include <util/stream/output.h> #include <util/system/thread.h> @@ -31,9 +31,9 @@ class TPointerTest: public TTestBase { UNIT_TEST(TestCopyOnWritePtr2); UNIT_TEST(TestOperatorBool); UNIT_TEST(TestMakeShared); - UNIT_TEST(TestComparison); + UNIT_TEST(TestComparison); UNIT_TEST(TestSimpleIntrusivePtrCtorTsan); - UNIT_TEST(TestRefCountedPtrsInHashSet) + UNIT_TEST(TestRefCountedPtrsInHashSet) UNIT_TEST_SUITE_END(); private: @@ -83,10 +83,10 @@ private: void TestCopyOnWritePtr2(); void TestOperatorBool(); void TestMakeShared(); - void TestComparison(); - template <class T, class TRefCountedPtr> - void TestRefCountedPtrsInHashSetImpl(); - void TestRefCountedPtrsInHashSet(); + void TestComparison(); + template <class T, class TRefCountedPtr> + void TestRefCountedPtrsInHashSetImpl(); + void TestRefCountedPtrsInHashSet(); }; UNIT_TEST_SUITE_REGISTRATION(TPointerTest); @@ -709,73 +709,73 @@ void TPointerTest::TestMakeShared() { } } -template <class TPtr> -void TestPtrComparison(const TPtr& ptr) { - UNIT_ASSERT(ptr == ptr); - UNIT_ASSERT(!(ptr != ptr)); - UNIT_ASSERT(ptr == ptr.Get()); - UNIT_ASSERT(!(ptr != ptr.Get())); -} - -void TPointerTest::TestComparison() { +template <class TPtr> +void TestPtrComparison(const TPtr& ptr) { + UNIT_ASSERT(ptr == ptr); + UNIT_ASSERT(!(ptr != ptr)); + UNIT_ASSERT(ptr == ptr.Get()); + UNIT_ASSERT(!(ptr != ptr.Get())); +} + +void TPointerTest::TestComparison() { THolder<A> ptr1(new A); TAutoPtr<A> ptr2; TSimpleSharedPtr<int> ptr3(new int(6)); TIntrusivePtr<A> ptr4; - TIntrusiveConstPtr<A> ptr5 = ptr4; + TIntrusiveConstPtr<A> ptr5 = ptr4; UNIT_ASSERT(ptr1 != nullptr); UNIT_ASSERT(ptr2 == nullptr); UNIT_ASSERT(ptr3 != nullptr); UNIT_ASSERT(ptr4 == nullptr); - UNIT_ASSERT(ptr5 == nullptr); - - TestPtrComparison(ptr1); - TestPtrComparison(ptr2); - TestPtrComparison(ptr3); - TestPtrComparison(ptr4); - TestPtrComparison(ptr5); + UNIT_ASSERT(ptr5 == nullptr); + + TestPtrComparison(ptr1); + TestPtrComparison(ptr2); + TestPtrComparison(ptr3); + TestPtrComparison(ptr4); + TestPtrComparison(ptr5); } - -template <class T, class TRefCountedPtr> -void TPointerTest::TestRefCountedPtrsInHashSetImpl() { - THashSet<TRefCountedPtr> hashSet; - TRefCountedPtr p1(new T()); - UNIT_ASSERT(!IsIn(hashSet, p1)); - UNIT_ASSERT(hashSet.insert(p1).second); - UNIT_ASSERT(IsIn(hashSet, p1)); - UNIT_ASSERT_VALUES_EQUAL(hashSet.size(), 1); - UNIT_ASSERT(!hashSet.insert(p1).second); - - TRefCountedPtr p2(new T()); - UNIT_ASSERT(!IsIn(hashSet, p2)); - UNIT_ASSERT(hashSet.insert(p2).second); - UNIT_ASSERT(IsIn(hashSet, p2)); - UNIT_ASSERT_VALUES_EQUAL(hashSet.size(), 2); -} - + +template <class T, class TRefCountedPtr> +void TPointerTest::TestRefCountedPtrsInHashSetImpl() { + THashSet<TRefCountedPtr> hashSet; + TRefCountedPtr p1(new T()); + UNIT_ASSERT(!IsIn(hashSet, p1)); + UNIT_ASSERT(hashSet.insert(p1).second); + UNIT_ASSERT(IsIn(hashSet, p1)); + UNIT_ASSERT_VALUES_EQUAL(hashSet.size(), 1); + UNIT_ASSERT(!hashSet.insert(p1).second); + + TRefCountedPtr p2(new T()); + UNIT_ASSERT(!IsIn(hashSet, p2)); + UNIT_ASSERT(hashSet.insert(p2).second); + UNIT_ASSERT(IsIn(hashSet, p2)); + UNIT_ASSERT_VALUES_EQUAL(hashSet.size(), 2); +} + struct TCustomIntrusivePtrOps: TDefaultIntrusivePtrOps<A> { -}; - +}; + struct TCustomDeleter: TDelete { -}; - +}; + struct TCustomCounter: TSimpleCounter { - using TSimpleCounterTemplate::TSimpleCounterTemplate; -}; - -void TPointerTest::TestRefCountedPtrsInHashSet() { - // test common case - TestRefCountedPtrsInHashSetImpl<TString, TSimpleSharedPtr<TString>>(); - TestRefCountedPtrsInHashSetImpl<TString, TAtomicSharedPtr<TString>>(); - TestRefCountedPtrsInHashSetImpl<A, TIntrusivePtr<A>>(); - TestRefCountedPtrsInHashSetImpl<A, TIntrusiveConstPtr<A>>(); - - // test with custom ops - TestRefCountedPtrsInHashSetImpl<TString, TSharedPtr<TString, TCustomCounter, TCustomDeleter>>(); - TestRefCountedPtrsInHashSetImpl<A, TIntrusivePtr<A, TCustomIntrusivePtrOps>>(); - TestRefCountedPtrsInHashSetImpl<A, TIntrusiveConstPtr<A, TCustomIntrusivePtrOps>>(); -} + using TSimpleCounterTemplate::TSimpleCounterTemplate; +}; + +void TPointerTest::TestRefCountedPtrsInHashSet() { + // test common case + TestRefCountedPtrsInHashSetImpl<TString, TSimpleSharedPtr<TString>>(); + TestRefCountedPtrsInHashSetImpl<TString, TAtomicSharedPtr<TString>>(); + TestRefCountedPtrsInHashSetImpl<A, TIntrusivePtr<A>>(); + TestRefCountedPtrsInHashSetImpl<A, TIntrusiveConstPtr<A>>(); + + // test with custom ops + TestRefCountedPtrsInHashSetImpl<TString, TSharedPtr<TString, TCustomCounter, TCustomDeleter>>(); + TestRefCountedPtrsInHashSetImpl<A, TIntrusivePtr<A, TCustomIntrusivePtrOps>>(); + TestRefCountedPtrsInHashSetImpl<A, TIntrusiveConstPtr<A, TCustomIntrusivePtrOps>>(); +} class TRefCountedWithStatistics: public TNonCopyable { public: diff --git a/util/generic/strbuf.h b/util/generic/strbuf.h index 70b9360d580..390f30e53d4 100644 --- a/util/generic/strbuf.h +++ b/util/generic/strbuf.h @@ -456,13 +456,13 @@ public: // string subsequences return *this; } - // coverity[exn_spec_violation] + // coverity[exn_spec_violation] inline TdSelf& Trunc(size_t targetSize) noexcept { - // Coverity false positive issue - // exn_spec_violation: An exception of type "std::out_of_range" is thrown but the exception specification "noexcept" doesn't allow it to be thrown. This will result in a call to terminate(). - // fun_call_w_exception: Called function TStringView::substr throws an exception of type "std::out_of_range". - // Suppress this issue because we pass argument pos=0 and string_view can't throw std::out_of_range. - *this = TStringView::substr(0, targetSize); //WARN: removing TStringView:: will lead to an infinite recursion + // Coverity false positive issue + // exn_spec_violation: An exception of type "std::out_of_range" is thrown but the exception specification "noexcept" doesn't allow it to be thrown. This will result in a call to terminate(). + // fun_call_w_exception: Called function TStringView::substr throws an exception of type "std::out_of_range". + // Suppress this issue because we pass argument pos=0 and string_view can't throw std::out_of_range. + *this = TStringView::substr(0, targetSize); //WARN: removing TStringView:: will lead to an infinite recursion return *this; } diff --git a/util/generic/strbuf_ut.cpp b/util/generic/strbuf_ut.cpp index 69cde785af0..01d88d1b9f1 100644 --- a/util/generic/strbuf_ut.cpp +++ b/util/generic/strbuf_ut.cpp @@ -332,20 +332,20 @@ Y_UNIT_TEST_SUITE(TStrBufTest) { char data[] = "Hello\0word"; PassByConstReference(data); } - - Y_UNIT_TEST(TestTruncate) { - TStringBuf s = "123"; - s.Trunc(5); - UNIT_ASSERT_STRINGS_EQUAL(s, "123"); - s.Trunc(3); - UNIT_ASSERT_STRINGS_EQUAL(s, "123"); - s.Trunc(1); - UNIT_ASSERT_STRINGS_EQUAL(s, "1"); - s.Trunc(0); - UNIT_ASSERT_STRINGS_EQUAL(s, ""); - s.Trunc(0); - UNIT_ASSERT_STRINGS_EQUAL(s, ""); - } + + Y_UNIT_TEST(TestTruncate) { + TStringBuf s = "123"; + s.Trunc(5); + UNIT_ASSERT_STRINGS_EQUAL(s, "123"); + s.Trunc(3); + UNIT_ASSERT_STRINGS_EQUAL(s, "123"); + s.Trunc(1); + UNIT_ASSERT_STRINGS_EQUAL(s, "1"); + s.Trunc(0); + UNIT_ASSERT_STRINGS_EQUAL(s, ""); + s.Trunc(0); + UNIT_ASSERT_STRINGS_EQUAL(s, ""); + } } Y_UNIT_TEST_SUITE(TWtrBufTest) { diff --git a/util/generic/typetraits.h b/util/generic/typetraits.h index d165bd1a068..26ddb039d2f 100644 --- a/util/generic/typetraits.h +++ b/util/generic/typetraits.h @@ -198,9 +198,9 @@ class TTypeTraits<void>: public TTypeTraitsBase<void> {}; struct THas##name: std::false_type {}; \ template <class T> \ struct THas##name<T, ::TVoidT<typename T::subtype>>: std::true_type {}; - + #define Y_HAS_SUBTYPE_IMPL_1(name) Y_HAS_SUBTYPE_IMPL_2(name, name) - + /* @def Y_HAS_SUBTYPE * * This macro should be used to define compile-time introspection helper classes for template diff --git a/util/generic/yexception_ut.cpp b/util/generic/yexception_ut.cpp index cb3e29fed84..f8560cde234 100644 --- a/util/generic/yexception_ut.cpp +++ b/util/generic/yexception_ut.cpp @@ -1,11 +1,11 @@ #include "yexception.h" static inline void Throw1DontMove() { - ythrow yexception() << "blabla"; // don't move this line + ythrow yexception() << "blabla"; // don't move this line } static inline void Throw2DontMove() { - ythrow yexception() << 1 << " qw " << 12.1; // don't move this line + ythrow yexception() << 1 << " qw " << 12.1; // don't move this line } #include <library/cpp/testing/unittest/registar.h> @@ -14,7 +14,7 @@ static inline void Throw2DontMove() { #include <util/memory/tempbuf.h> #include <util/random/mersenne.h> #include <util/stream/output.h> -#include <util/string/subst.h> +#include <util/string/subst.h> #include "yexception_ut.h" #include "bt_exception.h" @@ -218,18 +218,18 @@ private: } } - static inline void CheckCurrentExceptionContains(const char* message) { + static inline void CheckCurrentExceptionContains(const char* message) { TString err = CurrentExceptionMessage(); - SubstGlobal(err, '\\', '/'); // remove backslashes from path in message - UNIT_ASSERT(err.Contains(message)); - } - + SubstGlobal(err, '\\', '/'); // remove backslashes from path in message + UNIT_ASSERT(err.Contains(message)); + } + inline void TestRaise1() { try { Throw2DontMove(); UNIT_ASSERT(false); } catch (...) { - CheckCurrentExceptionContains("util/generic/yexception_ut.cpp:8: 1 qw 12.1"); + CheckCurrentExceptionContains("util/generic/yexception_ut.cpp:8: 1 qw 12.1"); } } @@ -240,9 +240,9 @@ private: inline void TestLineInfo() { try { Throw1DontMove(); - UNIT_ASSERT(false); + UNIT_ASSERT(false); } catch (...) { - CheckCurrentExceptionContains("util/generic/yexception_ut.cpp:4: blabla"); + CheckCurrentExceptionContains("util/generic/yexception_ut.cpp:4: blabla"); throw; } diff --git a/util/stream/zlib.cpp b/util/stream/zlib.cpp index 60f4e9439f0..177fb20b05e 100644 --- a/util/stream/zlib.cpp +++ b/util/stream/zlib.cpp @@ -1,7 +1,7 @@ #include "zlib.h" #include <util/memory/addstorage.h> -#include <util/generic/scope.h> +#include <util/generic/scope.h> #include <util/generic/utility.h> #include <contrib/libs/zlib/zlib.h> diff --git a/util/string/builder.h b/util/string/builder.h index 7b548211513..11eb840e23b 100644 --- a/util/string/builder.h +++ b/util/string/builder.h @@ -4,14 +4,14 @@ #include <utility> #include <util/generic/string.h> -namespace NPrivateStringBuilder { +namespace NPrivateStringBuilder { class TStringBuilder: public TString { public: inline TStringBuilder() : Out(*this) { } - + TStringBuilder(TStringBuilder&& rhs) : TString(std::move(rhs)) , Out(*this) @@ -31,9 +31,9 @@ namespace NPrivateStringBuilder { template <class T> static inline TStringBuilder&& operator<<(TStringBuilder&& builder, const T& t) { builder.Out << t; - + return std::move(builder); } -} - -using TStringBuilder = NPrivateStringBuilder::TStringBuilder; +} + +using TStringBuilder = NPrivateStringBuilder::TStringBuilder; diff --git a/util/string/builder_ut.cpp b/util/string/builder_ut.cpp index 22def683ec2..28777c87c1e 100644 --- a/util/string/builder_ut.cpp +++ b/util/string/builder_ut.cpp @@ -37,27 +37,27 @@ Y_UNIT_TEST_SUITE(TStringBuilderTest) { out << sb; TestEquals("a", s); } - + Y_UNIT_TEST(TestStringBuilderRValue) { - struct TRValueAcceptTester { + struct TRValueAcceptTester { static bool IsRValue(const TString&) { - return false; - } - + return false; + } + static bool IsRValue(TString&&) { - return true; - } - }; - - UNIT_ASSERT(TRValueAcceptTester::IsRValue(TStringBuilder() << "a" << 1)); - - TStringBuilder b; - UNIT_ASSERT(!TRValueAcceptTester::IsRValue(b << "a" << 1)); - TStringBuilder b2; - UNIT_ASSERT(!TRValueAcceptTester::IsRValue(b2 << "a" << 1 << TStringBuilder() << "a")); - UNIT_ASSERT_VALUES_EQUAL("a1a", b2); - - UNIT_ASSERT(TRValueAcceptTester::IsRValue(TStringBuilder() << b2)); - UNIT_ASSERT_VALUES_EQUAL("a1a", TStringBuilder() << b2); - } + return true; + } + }; + + UNIT_ASSERT(TRValueAcceptTester::IsRValue(TStringBuilder() << "a" << 1)); + + TStringBuilder b; + UNIT_ASSERT(!TRValueAcceptTester::IsRValue(b << "a" << 1)); + TStringBuilder b2; + UNIT_ASSERT(!TRValueAcceptTester::IsRValue(b2 << "a" << 1 << TStringBuilder() << "a")); + UNIT_ASSERT_VALUES_EQUAL("a1a", b2); + + UNIT_ASSERT(TRValueAcceptTester::IsRValue(TStringBuilder() << b2)); + UNIT_ASSERT_VALUES_EQUAL("a1a", TStringBuilder() << b2); + } } diff --git a/util/string/cast_ut.cpp b/util/string/cast_ut.cpp index 033450c38c4..4e53d6bf7a8 100644 --- a/util/string/cast_ut.cpp +++ b/util/string/cast_ut.cpp @@ -5,8 +5,8 @@ #include <util/charset/wide.h> #include <util/system/defaults.h> -#include <limits> - +#include <limits> + // positive test (return true or no exception) #define test1(t, v) \ F<t>().CheckTryOK(v); \ @@ -279,14 +279,14 @@ Y_UNIT_TEST_SUITE(TCastTest) { UNIT_ASSERT_VALUES_EQUAL(FloatToString(1.2345678901234567), "1.2345678901234567"); // no truncation UNIT_ASSERT_VALUES_EQUAL(FloatToString(5e-324), "5e-324"); // denormalized UNIT_ASSERT_VALUES_EQUAL(FloatToString(-0.0), "-0"); // sign must be preserved - - UNIT_ASSERT_STRINGS_EQUAL(FloatToString(std::numeric_limits<double>::quiet_NaN()), "nan"); - UNIT_ASSERT_STRINGS_EQUAL(FloatToString(std::numeric_limits<double>::infinity()), "inf"); + + UNIT_ASSERT_STRINGS_EQUAL(FloatToString(std::numeric_limits<double>::quiet_NaN()), "nan"); + UNIT_ASSERT_STRINGS_EQUAL(FloatToString(std::numeric_limits<double>::infinity()), "inf"); UNIT_ASSERT_STRINGS_EQUAL(FloatToString(-std::numeric_limits<double>::infinity()), "-inf"); UNIT_ASSERT_STRINGS_EQUAL(FloatToString(std::numeric_limits<float>::quiet_NaN()), "nan"); UNIT_ASSERT_STRINGS_EQUAL(FloatToString(std::numeric_limits<float>::infinity()), "inf"); - UNIT_ASSERT_STRINGS_EQUAL(FloatToString(-std::numeric_limits<float>::infinity()), "-inf"); + UNIT_ASSERT_STRINGS_EQUAL(FloatToString(-std::numeric_limits<float>::infinity()), "-inf"); } Y_UNIT_TEST(TestReadFloats) { diff --git a/util/string/strip.h b/util/string/strip.h index d5ef6da96db..c9172ef19a9 100644 --- a/util/string/strip.h +++ b/util/string/strip.h @@ -69,11 +69,11 @@ struct TStripImpl { const size_t oldLen = e - b; if (stripBeg) { - StripRangeBegin(b, e, criterion); + StripRangeBegin(b, e, criterion); } if (stripEnd) { - StripRangeEnd(b, e, criterion); + StripRangeEnd(b, e, criterion); } const size_t newLen = e - b; @@ -85,7 +85,7 @@ struct TStripImpl { auto b = from.begin(); auto e = from.end(); - if (StripRange(b, e, criterion)) { + if (StripRange(b, e, criterion)) { to = T(b, e - b); return true; @@ -99,7 +99,7 @@ struct TStripImpl { template <class T, class TStripCriterion> static inline T StripString(const T& from, TStripCriterion&& criterion) { T ret; - StripString(from, ret, criterion); + StripString(from, ret, criterion); return ret; } @@ -111,7 +111,7 @@ struct TStripImpl { template <class It, class TStripCriterion> inline bool StripRange(It& b, It& e, TStripCriterion&& criterion) noexcept { - return TStripImpl<true, true>::StripRange(b, e, criterion); + return TStripImpl<true, true>::StripRange(b, e, criterion); } template <class It> @@ -123,7 +123,7 @@ template <class It, class TStripCriterion> inline bool Strip(It& b, size_t& len, TStripCriterion&& criterion) noexcept { It e = b + len; - if (StripRange(b, e, criterion)) { + if (StripRange(b, e, criterion)) { len = e - b; return true; @@ -139,7 +139,7 @@ inline bool Strip(It& b, size_t& len) noexcept { template <class T, class TStripCriterion> static inline bool StripString(const T& from, T& to, TStripCriterion&& criterion) { - return TStripImpl<true, true>::StripString(from, to, criterion); + return TStripImpl<true, true>::StripString(from, to, criterion); } template <class T> @@ -149,7 +149,7 @@ static inline bool StripString(const T& from, T& to) { template <class T, class TStripCriterion> static inline T StripString(const T& from, TStripCriterion&& criterion) { - return TStripImpl<true, true>::StripString(from, criterion); + return TStripImpl<true, true>::StripString(from, criterion); } template <class T> @@ -169,12 +169,12 @@ static inline T StripStringRight(const T& from) { template <class T, class TStripCriterion> static inline T StripStringLeft(const T& from, TStripCriterion&& criterion) { - return TStripImpl<true, false>::StripString(from, criterion); + return TStripImpl<true, false>::StripString(from, criterion); } template <class T, class TStripCriterion> static inline T StripStringRight(const T& from, TStripCriterion&& criterion) { - return TStripImpl<false, true>::StripString(from, criterion); + return TStripImpl<false, true>::StripString(from, criterion); } /// Copies the given string removing leading and trailing spaces. diff --git a/util/string/subst_ut.cpp b/util/string/subst_ut.cpp index 21eccef7795..60ea6aabba6 100644 --- a/util/string/subst_ut.cpp +++ b/util/string/subst_ut.cpp @@ -130,8 +130,8 @@ Y_UNIT_TEST_SUITE(TStringSubst) { s = "aaa"; SubstGlobal(s, "a", ""); UNIT_ASSERT_EQUAL(s, TString("")); - s = "abcdefbcbcdfb"; - SubstGlobal(s, "bc", "bbc", 2); + s = "abcdefbcbcdfb"; + SubstGlobal(s, "bc", "bbc", 2); UNIT_ASSERT_EQUAL(s, TString("abcdefbbcbbcdfb")); s = "Москва ~ Париж"; SubstGlobal(s, " ~ ", " "); @@ -166,7 +166,7 @@ Y_UNIT_TEST_SUITE(TStringSubst) { UNIT_ASSERT_EQUAL(w, u"abcdaBcd"); TString s = "aaa"; - SubstGlobal(s, 'a', 'b', 1); + SubstGlobal(s, 'a', 'b', 1); UNIT_ASSERT_EQUAL(s, TString("abb")); } diff --git a/util/system/execpath.cpp b/util/system/execpath.cpp index 33198af58b0..1c91cd19020 100644 --- a/util/system/execpath.cpp +++ b/util/system/execpath.cpp @@ -64,7 +64,7 @@ static inline TString FreeBSDGetExecPath() { * https://www.freebsd.org/cgi/man.cgi?query=procfs&sektion=5&format=html */ TString path("/proc/curproc/file"); - return NFs::ReadLink(path); + return NFs::ReadLink(path); } else { return TString(); } @@ -129,7 +129,7 @@ static TString GetExecPathImpl() { } #elif defined(_linux_) || defined(_cygwin_) TString path("/proc/self/exe"); - return NFs::ReadLink(path); + return NFs::ReadLink(path); // TODO(yoda): check if the filename ends with " (deleted)" #elif defined(_freebsd_) TString execPath = FreeBSDGetExecPath(); diff --git a/util/system/file.cpp b/util/system/file.cpp index 4a261d020cb..6bf3ae81fd6 100644 --- a/util/system/file.cpp +++ b/util/system/file.cpp @@ -134,7 +134,7 @@ TFileHandle::TFileHandle(const TString& fName, EOpenMode oMode) noexcept { attrMode |= /*FILE_FLAG_NO_BUFFERING |*/ FILE_FLAG_WRITE_THROUGH; } - Fd_ = NFsPrivate::CreateFileWithUtf8Name(fName, faMode, shMode, fcMode, attrMode, inheritHandle); + Fd_ = NFsPrivate::CreateFileWithUtf8Name(fName, faMode, shMode, fcMode, attrMode, inheritHandle); if ((oMode & ::ForAppend) && (Fd_ != INVALID_FHANDLE)) { ::SetFilePointer(Fd_, 0, 0, FILE_END); diff --git a/util/system/file_ut.cpp b/util/system/file_ut.cpp index 941e6a50f3d..83e6eb9c0d9 100644 --- a/util/system/file_ut.cpp +++ b/util/system/file_ut.cpp @@ -210,7 +210,7 @@ void TFileTest::TestLocale() { UNIT_ASSERT_VALUES_EQUAL(f.GetName(), "Имя.txt"); UNIT_ASSERT_VALUES_EQUAL(f.GetLength(), 0); f.Close(); - UNIT_ASSERT(NFs::Remove("Имя.txt")); + UNIT_ASSERT(NFs::Remove("Имя.txt")); #ifdef _unix_ setlocale(LC_CTYPE, loc); #endif diff --git a/util/system/fs.cpp b/util/system/fs.cpp index d2611a8ccc4..e62be57d81a 100644 --- a/util/system/fs.cpp +++ b/util/system/fs.cpp @@ -18,7 +18,7 @@ bool NFs::Remove(const TString& path) { #if defined(_win_) - return NFsPrivate::WinRemove(path); + return NFsPrivate::WinRemove(path); #else return ::remove(path.data()) == 0; #endif @@ -86,21 +86,21 @@ bool NFs::MakeDirectoryRecursive(const TString& path, EFilePermissions mode, boo bool NFs::Rename(const TString& oldPath, const TString& newPath) { #if defined(_win_) - return NFsPrivate::WinRename(oldPath, newPath); + return NFsPrivate::WinRename(oldPath, newPath); #else return ::rename(oldPath.data(), newPath.data()) == 0; #endif } void NFs::HardLinkOrCopy(const TString& existingPath, const TString& newPath) { - if (!NFs::HardLink(existingPath, newPath)) { - Copy(existingPath, newPath); + if (!NFs::HardLink(existingPath, newPath)) { + Copy(existingPath, newPath); } } bool NFs::HardLink(const TString& existingPath, const TString& newPath) { #if defined(_win_) - return NFsPrivate::WinHardLink(existingPath, newPath); + return NFsPrivate::WinHardLink(existingPath, newPath); #elif defined(_unix_) return (0 == link(existingPath.data(), newPath.data())); #endif @@ -108,7 +108,7 @@ bool NFs::HardLink(const TString& existingPath, const TString& newPath) { bool NFs::SymLink(const TString& targetPath, const TString& linkPath) { #if defined(_win_) - return NFsPrivate::WinSymLink(targetPath, linkPath); + return NFsPrivate::WinSymLink(targetPath, linkPath); #elif defined(_unix_) return 0 == symlink(targetPath.data(), linkPath.data()); #endif @@ -116,7 +116,7 @@ bool NFs::SymLink(const TString& targetPath, const TString& linkPath) { TString NFs::ReadLink(const TString& path) { #if defined(_win_) - return NFsPrivate::WinReadLink(path); + return NFsPrivate::WinReadLink(path); #elif defined(_unix_) TTempBuf buf; while (true) { @@ -148,7 +148,7 @@ void NFs::Copy(const TString& existingPath, const TString& newPath) { bool NFs::Exists(const TString& path) { #if defined(_win_) - return NFsPrivate::WinExists(path); + return NFsPrivate::WinExists(path); #elif defined(_unix_) return access(path.data(), F_OK) == 0; #endif diff --git a/util/system/fs.h b/util/system/fs.h index 237daf2d2d7..11161c3a794 100644 --- a/util/system/fs.h +++ b/util/system/fs.h @@ -4,7 +4,7 @@ #include <util/generic/string.h> #include <util/generic/yexception.h> -namespace NFs { +namespace NFs { enum EFilePermission { FP_ALL_EXEC = 01, FP_ALL_WRITE = 02, @@ -23,11 +23,11 @@ namespace NFs { Y_DECLARE_FLAGS(EFilePermissions, EFilePermission); - /// Remove a file or empty directory - /// - /// @param[in] path Path to file or directory - /// @returns true on success or false otherwise - /// LastSystemError() is set in case of failure + /// Remove a file or empty directory + /// + /// @param[in] path Path to file or directory + /// @returns true on success or false otherwise + /// LastSystemError() is set in case of failure bool Remove(const TString& path); /// Remove a file or directory with contents @@ -77,54 +77,54 @@ namespace NFs { return MakeDirectoryRecursive(path, FP_COMMON_FILE, false); } - /// Rename a file or directory. - /// Removes newPath if it exists - /// - /// @param[in] oldPath Path to file or directory to rename - /// @param[in] newPath New path of file or directory - /// @returns true on success or false otherwise - /// LastSystemError() is set in case of failure + /// Rename a file or directory. + /// Removes newPath if it exists + /// + /// @param[in] oldPath Path to file or directory to rename + /// @param[in] newPath New path of file or directory + /// @returns true on success or false otherwise + /// LastSystemError() is set in case of failure bool Rename(const TString& oldPath, const TString& newPath); - - /// Creates a new directory entry for a file - /// or creates a new one with the same content - /// - /// @param[in] existingPath Path to an existing file - /// @param[in] newPath New path of file + + /// Creates a new directory entry for a file + /// or creates a new one with the same content + /// + /// @param[in] existingPath Path to an existing file + /// @param[in] newPath New path of file void HardLinkOrCopy(const TString& existingPath, const TString& newPath); - - /// Creates a new directory entry for a file - /// - /// @param[in] existingPath Path to an existing file - /// @param[in] newPath New path of file - /// @returns true if new link was created or false otherwise - /// LastSystemError() is set in case of failure + + /// Creates a new directory entry for a file + /// + /// @param[in] existingPath Path to an existing file + /// @param[in] newPath New path of file + /// @returns true if new link was created or false otherwise + /// LastSystemError() is set in case of failure bool HardLink(const TString& existingPath, const TString& newPath); - - /// Creates a symlink to a file - /// - /// @param[in] targetPath Path to a target file - /// @param[in] linkPath Path of symlink - /// @returns true if new link was created or false otherwise - /// LastSystemError() is set in case of failure + + /// Creates a symlink to a file + /// + /// @param[in] targetPath Path to a target file + /// @param[in] linkPath Path of symlink + /// @returns true if new link was created or false otherwise + /// LastSystemError() is set in case of failure bool SymLink(const TString& targetPath, const TString& linkPath); - - /// Reads value of a symbolic link - /// - /// @param[in] path Path to a symlink - /// @returns File path that a symlink points to + + /// Reads value of a symbolic link + /// + /// @param[in] path Path to a symlink + /// @returns File path that a symlink points to TString ReadLink(const TString& path); - - /// Append contents of a file to a new file - /// - /// @param[in] dstPath Path to a destination file - /// @param[in] srcPath Path to a source file + + /// Append contents of a file to a new file + /// + /// @param[in] dstPath Path to a destination file + /// @param[in] srcPath Path to a source file void Cat(const TString& dstPath, const TString& srcPath); - - /// Copy contents of a file to a new file - /// - /// @param[in] existingPath Path to an existing file - /// @param[in] newPath New path of file + + /// Copy contents of a file to a new file + /// + /// @param[in] existingPath Path to an existing file + /// @param[in] newPath New path of file void Copy(const TString& existingPath, const TString& newPath); /// Returns path to the current working directory diff --git a/util/system/fs_ut.cpp b/util/system/fs_ut.cpp index de071ebf55b..7739866acba 100644 --- a/util/system/fs_ut.cpp +++ b/util/system/fs_ut.cpp @@ -6,7 +6,7 @@ #include "sysstat.h" #include "fstat.h" #include <util/folder/dirut.h> -#include <util/folder/path.h> +#include <util/folder/path.h> //WARNING: on windows the test must be run with administative rules @@ -114,7 +114,7 @@ void RunRenameTest(TFsPath src, TFsPath dst) { file.Write("123", 3); } - UNIT_ASSERT(NFs::Rename(src, dst)); + UNIT_ASSERT(NFs::Rename(src, dst)); UNIT_ASSERT(NFs::Exists(dst)); UNIT_ASSERT(!NFs::Exists(src)); @@ -128,7 +128,7 @@ void RunRenameTest(TFsPath src, TFsPath dst) { TFile file(dir1 / src, CreateNew | WrOnly); file.Write("123", 3); } - UNIT_ASSERT(NFs::Rename(dir1, dir2)); + UNIT_ASSERT(NFs::Rename(dir1, dir2)); UNIT_ASSERT(NFs::Exists(dir2 / src)); UNIT_ASSERT(!NFs::Exists(dir1)); @@ -137,11 +137,11 @@ void RunRenameTest(TFsPath src, TFsPath dst) { UNIT_ASSERT_VALUES_EQUAL(file.GetLength(), 3); } - UNIT_ASSERT(!NFs::Remove(src)); - UNIT_ASSERT(NFs::Remove(dst)); - UNIT_ASSERT(!NFs::Remove(dir1)); - UNIT_ASSERT(NFs::Remove(dir2 / src)); - UNIT_ASSERT(NFs::Remove(dir2)); + UNIT_ASSERT(!NFs::Remove(src)); + UNIT_ASSERT(NFs::Remove(dst)); + UNIT_ASSERT(!NFs::Remove(dir1)); + UNIT_ASSERT(NFs::Remove(dir2 / src)); + UNIT_ASSERT(NFs::Remove(dir2)); } void TFsTest::TestRename() { @@ -182,8 +182,8 @@ static void RunHardlinkTest(const TFsPath& src, const TFsPath& dst) { UNIT_ASSERT_VALUES_EQUAL(file.GetLength(), 5); } - UNIT_ASSERT(NFs::Remove(dst)); - UNIT_ASSERT(NFs::Remove(src)); + UNIT_ASSERT(NFs::Remove(dst)); + UNIT_ASSERT(NFs::Remove(src)); } void TFsTest::TestHardlink() { @@ -260,20 +260,20 @@ static void RunSymLinkTest(TString fileLocalName, TString symLinkName) { UNIT_ASSERT(fs.IsSymlink()); } - UNIT_ASSERT(NFs::Remove(symLinkName)); + UNIT_ASSERT(NFs::Remove(symLinkName)); UNIT_ASSERT(NFs::Exists(srcFile)); - UNIT_ASSERT(NFs::Remove(linkD1)); + UNIT_ASSERT(NFs::Remove(linkD1)); UNIT_ASSERT(NFs::Exists(srcFile)); - UNIT_ASSERT(!NFs::Remove(subDir)); + UNIT_ASSERT(!NFs::Remove(subDir)); - UNIT_ASSERT(NFs::Remove(srcFile)); - UNIT_ASSERT(NFs::Remove(linkD2)); - UNIT_ASSERT(NFs::Remove(dangling)); - UNIT_ASSERT(NFs::Remove(subsubDir1)); - UNIT_ASSERT(NFs::Remove(subsubDir2)); - UNIT_ASSERT(NFs::Remove(subDir)); + UNIT_ASSERT(NFs::Remove(srcFile)); + UNIT_ASSERT(NFs::Remove(linkD2)); + UNIT_ASSERT(NFs::Remove(dangling)); + UNIT_ASSERT(NFs::Remove(subsubDir1)); + UNIT_ASSERT(NFs::Remove(subsubDir2)); + UNIT_ASSERT(NFs::Remove(subDir)); } void TFsTest::TestSymlink() { diff --git a/util/system/fs_win.cpp b/util/system/fs_win.cpp index a410ccac06d..020d3ca4536 100644 --- a/util/system/fs_win.cpp +++ b/util/system/fs_win.cpp @@ -8,7 +8,7 @@ #include <winioctl.h> -namespace NFsPrivate { +namespace NFsPrivate { static LPCWSTR UTF8ToWCHAR(const TStringBuf str, TUtf16String& wstr) { wstr.resize(str.size()); size_t written = 0; @@ -41,31 +41,31 @@ namespace NFsPrivate { bool WinRename(const TString& oldPath, const TString& newPath) { TUtf16String op, np; - LPCWSTR opPtr = UTF8ToWCHAR(oldPath, op); - LPCWSTR npPtr = UTF8ToWCHAR(newPath, np); + LPCWSTR opPtr = UTF8ToWCHAR(oldPath, op); + LPCWSTR npPtr = UTF8ToWCHAR(newPath, np); if (!opPtr || !npPtr) { ::SetLastError(ERROR_INVALID_NAME); - return false; + return false; } - return MoveFileExW(opPtr, npPtr, MOVEFILE_REPLACE_EXISTING) != 0; + return MoveFileExW(opPtr, npPtr, MOVEFILE_REPLACE_EXISTING) != 0; } bool WinRemove(const TString& path) { TUtf16String wstr; - LPCWSTR wname = UTF8ToWCHAR(path, wstr); + LPCWSTR wname = UTF8ToWCHAR(path, wstr); if (!wname) { ::SetLastError(ERROR_INVALID_NAME); - return false; + return false; } WIN32_FILE_ATTRIBUTE_DATA fad; if (::GetFileAttributesExW(wname, GetFileExInfoStandard, &fad)) { if (fad.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) - return ::RemoveDirectoryW(wname) != 0; - return ::DeleteFileW(wname) != 0; + return ::RemoveDirectoryW(wname) != 0; + return ::DeleteFileW(wname) != 0; } - return false; + return false; } bool WinSymLink(const TString& targetName, const TString& linkName) { @@ -78,7 +78,7 @@ namespace NFsPrivate { TUtf16String tstr; LPCWSTR wname = UTF8ToWCHAR(tName, tstr); TUtf16String lstr; - LPCWSTR lname = UTF8ToWCHAR(linkName, lstr); + LPCWSTR lname = UTF8ToWCHAR(linkName, lstr); // we can't create a dangling link to a dir in this way ui32 attr = ::GetFileAttributesW(wname); @@ -103,8 +103,8 @@ namespace NFsPrivate { bool WinHardLink(const TString& existingPath, const TString& newPath) { TUtf16String ep, np; - LPCWSTR epPtr = UTF8ToWCHAR(existingPath, ep); - LPCWSTR npPtr = UTF8ToWCHAR(newPath, np); + LPCWSTR epPtr = UTF8ToWCHAR(existingPath, ep); + LPCWSTR npPtr = UTF8ToWCHAR(newPath, np); if (!epPtr || !npPtr) { ::SetLastError(ERROR_INVALID_NAME); return false; @@ -115,7 +115,7 @@ namespace NFsPrivate { bool WinExists(const TString& path) { TUtf16String buf; - LPCWSTR ptr = UTF8ToWCHAR(path, buf); + LPCWSTR ptr = UTF8ToWCHAR(path, buf); return ::GetFileAttributesW(ptr) != INVALID_FILE_ATTRIBUTES; } @@ -181,7 +181,7 @@ namespace NFsPrivate { // the end of edited part of <Ntifs.h> TString WinReadLink(const TString& name) { - TFileHandle h = CreateFileWithUtf8Name(name, GENERIC_READ, FILE_SHARE_READ, OPEN_EXISTING, + TFileHandle h = CreateFileWithUtf8Name(name, GENERIC_READ, FILE_SHARE_READ, OPEN_EXISTING, FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, true); TTempBuf buf; while (true) { @@ -204,7 +204,7 @@ namespace NFsPrivate { if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { buf = TTempBuf(buf.Size() * 2); } else { - ythrow yexception() << "can't read link " << name; + ythrow yexception() << "can't read link " << name; } } } diff --git a/util/system/fstat.cpp b/util/system/fstat.cpp index 81e98cbc6bb..5db39741ae2 100644 --- a/util/system/fstat.cpp +++ b/util/system/fstat.cpp @@ -84,7 +84,7 @@ static bool GetStatByHandle(TSystemFStat& fs, FHANDLE f) { static bool GetStatByName(TSystemFStat& fs, const char* fileName, bool nofollow) { #ifdef _win_ - TFileHandle h = NFsPrivate::CreateFileWithUtf8Name(fileName, FILE_READ_ATTRIBUTES | FILE_READ_EA, FILE_SHARE_READ | FILE_SHARE_WRITE, + TFileHandle h = NFsPrivate::CreateFileWithUtf8Name(fileName, FILE_READ_ATTRIBUTES | FILE_READ_EA, FILE_SHARE_READ | FILE_SHARE_WRITE, OPEN_EXISTING, (nofollow ? FILE_FLAG_OPEN_REPARSE_POINT : 0) | FILE_FLAG_BACKUP_SEMANTICS, true); if (!h.IsOpen()) { diff --git a/util/system/fstat_ut.cpp b/util/system/fstat_ut.cpp index 160ecd936e6..938e0178399 100644 --- a/util/system/fstat_ut.cpp +++ b/util/system/fstat_ut.cpp @@ -53,7 +53,7 @@ Y_UNIT_TEST_SUITE(TestFileStat) { UNIT_ASSERT(fs.IsDir()); UNIT_ASSERT(!fs.IsSymlink()); //UNIT_ASSERT(fs.Size == 0); // it fails under unix - UNIT_ASSERT(NFs::Remove("tmpd")); + UNIT_ASSERT(NFs::Remove("tmpd")); fs = TFileStat("tmpd"); UNIT_ASSERT(!fs.IsFile()); UNIT_ASSERT(!fs.IsDir()); diff --git a/util/system/shellcommand.cpp b/util/system/shellcommand.cpp index b1989b5c8c3..47eefde0cd6 100644 --- a/util/system/shellcommand.cpp +++ b/util/system/shellcommand.cpp @@ -218,7 +218,7 @@ private: bool QuoteArguments = false; bool DetachSession = false; bool CloseStreams = false; - TAtomic ShouldCloseInput; + TAtomic ShouldCloseInput; TShellCommandOptions::EHandleMode InputMode = TShellCommandOptions::HANDLE_STREAM; TShellCommandOptions::EHandleMode OutputMode = TShellCommandOptions::HANDLE_STREAM; TShellCommandOptions::EHandleMode ErrorMode = TShellCommandOptions::HANDLE_STREAM; @@ -415,7 +415,7 @@ public: } inline void CloseInput() { - AtomicSet(ShouldCloseInput, true); + AtomicSet(ShouldCloseInput, true); } inline static bool TerminateIsRequired(void* processInfo) { @@ -1035,7 +1035,7 @@ void TShellCommand::TImpl::Communicate(TProcessInfo* pi) { if (!bytesToWrite) { bytesToWrite = input->Read(inputBuffer.Data(), inputBuffer.Capacity()); if (bytesToWrite == 0) { - if (AtomicGet(pi->Parent->ShouldCloseInput)) { + if (AtomicGet(pi->Parent->ShouldCloseInput)) { input = nullptr; } continue; diff --git a/util/system/shellcommand_ut.cpp b/util/system/shellcommand_ut.cpp index 9d849279d23..283bbd92833 100644 --- a/util/system/shellcommand_ut.cpp +++ b/util/system/shellcommand_ut.cpp @@ -1,16 +1,16 @@ #include "shellcommand.h" -#include "compat.h" +#include "compat.h" #include "defaults.h" -#include "fs.h" +#include "fs.h" #include "sigset.h" -#include "spinlock.h" - +#include "spinlock.h" + #include <library/cpp/testing/unittest/env.h> #include <library/cpp/testing/unittest/registar.h> - -#include <util/folder/dirut.h> -#include <util/random/random.h> + +#include <util/folder/dirut.h> +#include <util/random/random.h> #include <util/stream/file.h> #include <util/stream/str.h> #include <util/stream/mem.h> @@ -28,37 +28,37 @@ const size_t textSize = 20000; #endif class TGuardedStringStream: public IInputStream, public IOutputStream { -public: - TGuardedStringStream() { +public: + TGuardedStringStream() { Stream_.Reserve(100); - } - + } + TString Str() const { with_lock (Lock_) { return Stream_.Str(); - } + } return TString(); // line for compiler - } - -protected: - size_t DoRead(void* buf, size_t len) override { + } + +protected: + size_t DoRead(void* buf, size_t len) override { with_lock (Lock_) { return Stream_.Read(buf, len); - } - return 0; // line for compiler - } - - void DoWrite(const void* buf, size_t len) override { + } + return 0; // line for compiler + } + + void DoWrite(const void* buf, size_t len) override { with_lock (Lock_) { return Stream_.Write(buf, len); - } - } - -private: + } + } + +private: TAdaptiveLock Lock_; TStringStream Stream_; -}; - +}; + Y_UNIT_TEST_SUITE(TShellQuoteTest) { Y_UNIT_TEST(TestQuoteArg) { TString cmd; @@ -232,9 +232,9 @@ Y_UNIT_TEST_SUITE(TShellCommandTest) { options.SetClearSignalMask(true); options.SetCloseAllFdsOnExec(true); options.SetCloseInput(false); - TGuardedStringStream write; + TGuardedStringStream write; options.SetInputStream(&write); - TGuardedStringStream read; + TGuardedStringStream read; options.SetOutputStream(&read); options.SetUseShell(true); @@ -283,12 +283,12 @@ Y_UNIT_TEST_SUITE(TShellCommandTest) { Y_UNIT_TEST(TestInterruptSimple) { TShellCommandOptions options; options.SetAsync(true); - options.SetCloseInput(false); - TGuardedStringStream write; - options.SetInputStream(&write); // set input stream that will be waited by cat + options.SetCloseInput(false); + TGuardedStringStream write; + options.SetInputStream(&write); // set input stream that will be waited by cat TShellCommand cmd(catCommand, options); cmd.Run(); - sleep(1); + sleep(1); UNIT_ASSERT(TShellCommand::SHELL_RUNNING == cmd.GetStatus()); cmd.Terminate(); cmd.Wait(); diff --git a/util/system/tempfile.h b/util/system/tempfile.h index de249c129da..6fd736ad7b4 100644 --- a/util/system/tempfile.h +++ b/util/system/tempfile.h @@ -14,7 +14,7 @@ public: } inline ~TTempFile() { - NFs::Remove(Name()); + NFs::Remove(Name()); } inline const TString& Name() const noexcept { diff --git a/ydb/core/actorlib_impl/destruct_actor.h b/ydb/core/actorlib_impl/destruct_actor.h index adc236911d5..d706537b28d 100644 --- a/ydb/core/actorlib_impl/destruct_actor.h +++ b/ydb/core/actorlib_impl/destruct_actor.h @@ -10,7 +10,7 @@ namespace NActors { class TDestructActor: public TActor<TDestructActor> { public: static constexpr auto ActorActivityType() { - return NKikimrServices::TActivity::INTERCONNECT_DESTRUCT_ACTOR; + return NKikimrServices::TActivity::INTERCONNECT_DESTRUCT_ACTOR; } TDestructActor() noexcept @@ -19,7 +19,7 @@ public: private: - STATEFN(WorkingState) { + STATEFN(WorkingState) { /* Destroy event and eventhandle */ ev.Reset(); } diff --git a/ydb/core/base/events.h b/ydb/core/base/events.h index f5fedfe19b2..dea72e7db63 100644 --- a/ydb/core/base/events.h +++ b/ydb/core/base/events.h @@ -134,7 +134,7 @@ struct TKikimrEvents : TEvents { ES_IAM_TOKEN_SERVICE, ES_HEALTH_CHECK, ES_DQ = NYql::NDq::TDqEvents::ES_DQ_COMPUTE, // 4212 - ES_YQ, // 4213 + ES_YQ, // 4213 ES_CHANGE_EXCHANGE, ES_DATABASE_SERVICE, //4215 ES_SEQUENCESHARD, // 4216 diff --git a/ydb/core/base/quoter.h b/ydb/core/base/quoter.h index 41a19775bc8..87f8a4bb4cb 100644 --- a/ydb/core/base/quoter.h +++ b/ydb/core/base/quoter.h @@ -42,8 +42,8 @@ struct TEvQuota { const ui64 Amount; const bool IsUsedAmount; - TResourceLeaf(const TResourceLeaf&) = default; - + TResourceLeaf(const TResourceLeaf&) = default; + TResourceLeaf(ui64 quoterId, ui64 resourceId, ui64 amount, bool isUsedAmount = false) : QuoterId(quoterId) , ResourceId(resourceId) @@ -96,7 +96,7 @@ struct TEvQuota { }; // when cookie present - cancel one request - // when cookie omitted - cancel all requests from sender + // when cookie omitted - cancel all requests from sender struct TEvCancelRequest : public TEventLocal<TEvClearance, EvCancelRequest> {}; // b/w service and quoter proxy @@ -145,12 +145,12 @@ struct TEvQuota { const ui64 ResourceId; const ui64 Tick; - const double Consumed; + const double Consumed; const TTimeSeriesMap<double> History; const ui64 QueueSize; - const double QueueWeight; - const double ExpectedRate; - const double Cap; + const double QueueWeight; + const double ExpectedRate; + const double Cap; TProxyStat(ui64 id, ui64 tick, double consumed, const TTimeSeriesMap<double>& history, ui64 queueSize, double queueWeight, double rate, double cap) : ResourceId(id) @@ -158,7 +158,7 @@ struct TEvQuota { , Consumed(consumed) , History(history) , QueueSize(queueSize) - , QueueWeight(queueWeight) + , QueueWeight(queueWeight) , ExpectedRate(rate) , Cap(cap) {} @@ -193,17 +193,17 @@ struct TEvQuota { struct TUpdateTick { ui32 Channel; ui32 Ticks; - double Rate; + double Rate; ETickPolicy Policy; TUpdateTick() : Channel(0) , Ticks(0) - , Rate(0.0) + , Rate(0.0) , Policy(ETickPolicy::Sustained) {} - TUpdateTick(ui32 channel, ui32 ticks, double rate, ETickPolicy policy) + TUpdateTick(ui32 channel, ui32 ticks, double rate, ETickPolicy policy) : Channel(channel) , Ticks(ticks) , Rate(rate) @@ -222,11 +222,11 @@ struct TEvQuota { struct TProxyResourceUpdate { const ui64 ResourceId; - const double SustainedRate; + const double SustainedRate; const TVector<TUpdateTick> Update; const EUpdateState ResourceState; - TProxyResourceUpdate(ui64 resourceId, double sustainedRate, TVector<TUpdateTick> &&update, EUpdateState resState) + TProxyResourceUpdate(ui64 resourceId, double sustainedRate, TVector<TUpdateTick> &&update, EUpdateState resState) : ResourceId(resourceId) , SustainedRate(sustainedRate) , Update(std::move(update)) diff --git a/ydb/core/base/ya.make b/ydb/core/base/ya.make index 83db5825c3a..32886899b59 100644 --- a/ydb/core/base/ya.make +++ b/ydb/core/base/ya.make @@ -118,8 +118,8 @@ RESOURCE( ydb/core/base/kikimr_issue.txt kikimr_issue.txt ) -GENERATE_ENUM_SERIALIZATION(quoter.h) - +GENERATE_ENUM_SERIALIZATION(quoter.h) + END() RECURSE_FOR_TESTS( diff --git a/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h b/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h index 7e20a7bd81a..a1a181c04ae 100644 --- a/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h +++ b/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h @@ -7,26 +7,26 @@ // Helper class for printing pdisk id in the same was as it done for counters struct TPDiskIdField { typedef ui32 TStoreType; - typedef ui32 TFuncParam; - + typedef ui32 TFuncParam; + static void ToString(ui32 value, TString* out) { *out = Sprintf("%09" PRIu32, value); } - static ui32 ToStoreType(ui32 value) { - return value; - } + static ui32 ToStoreType(ui32 value) { + return value; + } }; namespace NKikimr { namespace NPDisk { struct TRequestTypeField { typedef ui32 TStoreType; - typedef ui32 TFuncParam; - + typedef ui32 TFuncParam; + static void ToString(ui32 value, TString* out); - static ui32 ToStoreType(ui32 value) { - return value; - } + static ui32 ToStoreType(ui32 value) { + return value; + } }; }} @@ -35,22 +35,22 @@ namespace NKikimr { struct TBlobPutTactics { typedef ui64 TStoreType; - typedef ui64 TFuncParam; - + typedef ui64 TFuncParam; + static void ToString(ui64 value, TString* out); - static ui64 ToStoreType(ui64 value) { - return value; - } + static ui64 ToStoreType(ui64 value) { + return value; + } }; struct TEventTypeField { typedef ui64 TStoreType; - typedef ui64 TFuncParam; - + typedef ui64 TFuncParam; + static void ToString(ui64 value, TString* out); - static ui64 ToStoreType(ui64 value) { - return value; - } + static ui64 ToStoreType(ui64 value) { + return value; + } }; } diff --git a/ydb/core/client/flat_ut_client.h b/ydb/core/client/flat_ut_client.h index 07f5b30cc6b..0ed26b833ce 100644 --- a/ydb/core/client/flat_ut_client.h +++ b/ydb/core/client/flat_ut_client.h @@ -13,12 +13,12 @@ namespace NFlatTests { class TFlatMsgBusClient : public Tests::TClient { public: - TFlatMsgBusClient(const Tests::TServerSettings& settings) - : TClient(settings) - {} - + TFlatMsgBusClient(const Tests::TServerSettings& settings) + : TClient(settings) + {} + TFlatMsgBusClient(ui16 port) - : TFlatMsgBusClient(Tests::TServerSettings(port)) + : TFlatMsgBusClient(Tests::TServerSettings(port)) {} void InitRoot() { diff --git a/ydb/core/client/minikql_compile/yql_expr_minikql.cpp b/ydb/core/client/minikql_compile/yql_expr_minikql.cpp index 76f38bf35cb..867fa8206f7 100644 --- a/ydb/core/client/minikql_compile/yql_expr_minikql.cpp +++ b/ydb/core/client/minikql_compile/yql_expr_minikql.cpp @@ -1532,10 +1532,10 @@ private: ++i; } - if (!resolveErrors.empty()) { + if (!resolveErrors.empty()) { TMiniKQLCompileResult result(resolveErrors); - return SendResponseAndDie(result, std::move(compileResolveCookies), ctx); - } + return SendResponseAndDie(result, std::move(compileResolveCookies), ctx); + } TMiniKQLCompileResult result; diff --git a/ydb/core/client/server/msgbus_server_persqueue.cpp b/ydb/core/client/server/msgbus_server_persqueue.cpp index 78b83ef3a3b..27dd82102bc 100644 --- a/ydb/core/client/server/msgbus_server_persqueue.cpp +++ b/ydb/core/client/server/msgbus_server_persqueue.cpp @@ -2,23 +2,23 @@ #include "msgbus_server_persqueue.h" #include "msgbus_server_pq_metacache.h" -#include "msgbus_server_pq_metarequest.h" +#include "msgbus_server_pq_metarequest.h" #include <library/cpp/actors/core/interconnect.h> #include <library/cpp/actors/interconnect/interconnect.h> #include <ydb/core/persqueue/events/global.h> #include <ydb/core/base/appdata.h> #include <ydb/core/tx/tx_proxy/proxy.h> -#include <util/generic/is_in.h> - +#include <util/generic/is_in.h> + namespace NKikimr { namespace NMsgBusProxy { using namespace NSchemeCache; using namespace NPqMetaCacheV2; -const TDuration TPersQueueBaseRequestProcessor::TIMEOUT = TDuration::MilliSeconds(90000); - +const TDuration TPersQueueBaseRequestProcessor::TIMEOUT = TDuration::MilliSeconds(90000); + namespace { const ui32 DefaultTimeout = 90000; const TDuration CHECK_INFLY_SEMAPHORE_DURATION = TDuration::Seconds(1); @@ -131,12 +131,12 @@ TProcessingResult ProcessMetaCacheSingleTopicsResponse( } NKikimrClient::TResponse CreateErrorReply(EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) { - NKikimrClient::TResponse rec; - rec.SetStatus(status); - rec.SetErrorCode(code); - rec.SetErrorReason(errorReason); - return rec; -} + NKikimrClient::TResponse rec; + rec.SetStatus(status); + rec.SetErrorCode(code); + rec.SetErrorReason(errorReason); + return rec; +} struct TTopicInfo { TVector<ui64> Tablets; @@ -165,90 +165,90 @@ struct TTabletInfo { }; TPersQueueBaseRequestProcessor::TPersQueueBaseRequestProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& pqMetaCacheId, bool listNodes) - : RequestProto(new NKikimrClient::TPersQueueRequest(request)) - , RequestId(RequestProto->HasRequestId() ? RequestProto->GetRequestId() : "<none>") + : RequestProto(new NKikimrClient::TPersQueueRequest(request)) + , RequestId(RequestProto->HasRequestId() ? RequestProto->GetRequestId() : "<none>") , PqMetaCache(pqMetaCacheId) - , ListNodes(listNodes) -{ -} + , ListNodes(listNodes) +{ +} void TPersQueueBaseRequestProcessor::SendErrorReplyAndDie(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) { - SendReplyAndDie(CreateErrorReply(status, code, errorReason), ctx); -} + SendReplyAndDie(CreateErrorReply(status, code, errorReason), ctx); +} bool TPersQueueBaseRequestProcessor::ReadyForAnswer(const TActorContext& ) { return ReadyToCreateChildren() && NeedChildrenCreation == false && ChildrenAnswered == Children.size(); -} - -void TPersQueueBaseRequestProcessor::AnswerAndDie(const TActorContext& ctx) { - try { - SendReplyAndDie(MergeSubactorReplies(), ctx); - } catch (const std::exception& ex) { +} + +void TPersQueueBaseRequestProcessor::AnswerAndDie(const TActorContext& ctx) { + try { + SendReplyAndDie(MergeSubactorReplies(), ctx); + } catch (const std::exception& ex) { SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::ERROR, ex.what()); - } -} - -void TPersQueueBaseRequestProcessor::Bootstrap(const TActorContext& ctx) { - LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "proxy got request " << RequestId); - + } +} + +void TPersQueueBaseRequestProcessor::Bootstrap(const TActorContext& ctx) { + LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "proxy got request " << RequestId); + StartTimestamp = ctx.Now(); ctx.Send(PqMetaCache, new NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsRequest(TopicPrefix(ctx))); - if (ListNodes) { + if (ListNodes) { const TActorId nameserviceId = GetNameserviceActorId(); - ctx.Send(nameserviceId, new TEvInterconnect::TEvListNodes()); - } - + ctx.Send(nameserviceId, new TEvInterconnect::TEvListNodes()); + } + Become(&TPersQueueBaseRequestProcessor::StateFunc, ctx, CHECK_INFLY_SEMAPHORE_DURATION, new TEvents::TEvWakeup()); -} - -void TPersQueueBaseRequestProcessor::Die(const TActorContext& ctx) { - // Clear children - for (const auto& child : Children) { - ctx.Send(child.first, new TEvents::TEvPoisonPill()); - } - TActorBootstrapped::Die(ctx); -} - -STFUNC(TPersQueueBaseRequestProcessor::StateFunc) { - switch (ev->GetTypeRewrite()) { - HFunc(TEvInterconnect::TEvNodesInfo, Handle); +} + +void TPersQueueBaseRequestProcessor::Die(const TActorContext& ctx) { + // Clear children + for (const auto& child : Children) { + ctx.Send(child.first, new TEvents::TEvPoisonPill()); + } + TActorBootstrapped::Die(ctx); +} + +STFUNC(TPersQueueBaseRequestProcessor::StateFunc) { + switch (ev->GetTypeRewrite()) { + HFunc(TEvInterconnect::TEvNodesInfo, Handle); HFunc(NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeTopicsResponse, Handle); HFunc(NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsResponse, Handle); - HFunc(TEvPersQueue::TEvResponse, Handle); - CFunc(TEvents::TSystem::Wakeup, HandleTimeout); - CFunc(NActors::TEvents::TSystem::PoisonPill, Die); - } -} - -void TPersQueueBaseRequestProcessor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorContext& ctx) { - if (ev->Get()->Record.GetStatus() != MSTATUS_OK) { - return SendReplyAndDie(std::move(ev->Get()->Record), ctx); - } - auto answeredChild = Children.find(ev->Sender); - Y_VERIFY(answeredChild != Children.end()); - Y_VERIFY(!answeredChild->second->ActorAnswered); - answeredChild->second->Response.Swap(&ev->Get()->Record); - answeredChild->second->ActorAnswered = true; - ++ChildrenAnswered; - Y_VERIFY(ChildrenAnswered <= Children.size()); - + HFunc(TEvPersQueue::TEvResponse, Handle); + CFunc(TEvents::TSystem::Wakeup, HandleTimeout); + CFunc(NActors::TEvents::TSystem::PoisonPill, Die); + } +} + +void TPersQueueBaseRequestProcessor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorContext& ctx) { + if (ev->Get()->Record.GetStatus() != MSTATUS_OK) { + return SendReplyAndDie(std::move(ev->Get()->Record), ctx); + } + auto answeredChild = Children.find(ev->Sender); + Y_VERIFY(answeredChild != Children.end()); + Y_VERIFY(!answeredChild->second->ActorAnswered); + answeredChild->second->Response.Swap(&ev->Get()->Record); + answeredChild->second->ActorAnswered = true; + ++ChildrenAnswered; + Y_VERIFY(ChildrenAnswered <= Children.size()); + if (ReadyForAnswer(ctx)) { - return AnswerAndDie(ctx); - } -} - -void TPersQueueBaseRequestProcessor::Handle(TEvInterconnect::TEvNodesInfo::TPtr& ev, const TActorContext& ctx) { - Y_VERIFY(ListNodes); + return AnswerAndDie(ctx); + } +} + +void TPersQueueBaseRequestProcessor::Handle(TEvInterconnect::TEvNodesInfo::TPtr& ev, const TActorContext& ctx) { + Y_VERIFY(ListNodes); NodesInfo.reset(new TNodesInfo(ev->Release())); - if (ReadyToCreateChildren()) { - if (CreateChildren(ctx)) { - return; - } - } -} - -void TPersQueueBaseRequestProcessor::HandleTimeout(const TActorContext& ctx) { + if (ReadyToCreateChildren()) { + if (CreateChildren(ctx)) { + return; + } + } +} + +void TPersQueueBaseRequestProcessor::HandleTimeout(const TActorContext& ctx) { if (ctx.Now() - StartTimestamp > TIMEOUT) { SendErrorReplyAndDie(ctx, MSTATUS_TIMEOUT, NPersQueue::NErrorCode::ERROR, "Timeout while waiting for response, may be just slow, Marker# PQ16"); return; @@ -257,70 +257,70 @@ void TPersQueueBaseRequestProcessor::HandleTimeout(const TActorContext& ctx) { CreateChildrenIfNeeded(ctx); } ctx.Schedule(CHECK_INFLY_SEMAPHORE_DURATION, new TEvents::TEvWakeup()); -} - +} + void TPersQueueBaseRequestProcessor::GetTopicsListOrThrow(const ::google::protobuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& requests, THashMap<TString, std::shared_ptr<THashSet<ui64>>>& partitionsToRequest) { - for (const auto& topicRequest : requests) { - if (topicRequest.GetTopic().empty()) { - throw std::runtime_error("TopicRequest must have Topic field."); - } + for (const auto& topicRequest : requests) { + if (topicRequest.GetTopic().empty()) { + throw std::runtime_error("TopicRequest must have Topic field."); + } std::shared_ptr<THashSet<ui64>> partitionsToRequestOnTopic(new THashSet<ui64>()); // nonconst - partitionsToRequest[topicRequest.GetTopic()] = partitionsToRequestOnTopic; - for (ui32 partition : topicRequest.GetPartition()) { - const bool inserted = partitionsToRequestOnTopic->insert(partition).second; - if (!inserted) { - TStringBuilder desc; - desc << "multiple partition " << partition - << " in TopicRequest for topic '" << topicRequest.GetTopic() << "'"; - throw std::runtime_error(desc); - } - } - - const bool res = TopicsToRequest.insert(topicRequest.GetTopic()).second; - if (!res) { - TStringBuilder desc; - desc << "multiple TopicRequest for topic '" << topicRequest.GetTopic() << "'"; - throw std::runtime_error(desc); - } - } - -} - + partitionsToRequest[topicRequest.GetTopic()] = partitionsToRequestOnTopic; + for (ui32 partition : topicRequest.GetPartition()) { + const bool inserted = partitionsToRequestOnTopic->insert(partition).second; + if (!inserted) { + TStringBuilder desc; + desc << "multiple partition " << partition + << " in TopicRequest for topic '" << topicRequest.GetTopic() << "'"; + throw std::runtime_error(desc); + } + } + + const bool res = TopicsToRequest.insert(topicRequest.GetTopic()).second; + if (!res) { + TStringBuilder desc; + desc << "multiple TopicRequest for topic '" << topicRequest.GetTopic() << "'"; + throw std::runtime_error(desc); + } + } + +} + void TPersQueueBaseRequestProcessor::Handle( NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeTopicsResponse::TPtr&, const TActorContext& ) { Y_FAIL(); } - + void TPersQueueBaseRequestProcessor::Handle( NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsResponse::TPtr& ev, const TActorContext& ctx ) { auto& path = ev->Get()->Path; if (!ev->Get()->Success) { return SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, - TStringBuilder() << "no path '" << path << "', Marker# PQ17"); - } - + TStringBuilder() << "no path '" << path << "', Marker# PQ17"); + } + if (path != TopicPrefix(ctx)) { return SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, TStringBuilder() << "path '" << path << "' has no correct root prefix '" << TopicPrefix(ctx) - << "', Marker# PQ18"); - } - + << "', Marker# PQ18"); + } + SchemeCacheResponse = std::move(ev->Get()->Result); - if (ReadyToCreateChildren()) { - if (CreateChildren(ctx)) { - return; - } - } -} - -bool TPersQueueBaseRequestProcessor::ReadyToCreateChildren() const { + if (ReadyToCreateChildren()) { + if (CreateChildren(ctx)) { + return; + } + } +} + +bool TPersQueueBaseRequestProcessor::ReadyToCreateChildren() const { return SchemeCacheResponse && (!ListNodes || NodesInfo.get() != nullptr); -} - -bool TPersQueueBaseRequestProcessor::CreateChildren(const TActorContext& ctx) { +} + +bool TPersQueueBaseRequestProcessor::CreateChildren(const TActorContext& ctx) { for (const auto& child : SchemeCacheResponse->ResultSet) { if (child.Kind == TSchemeCacheNavigate::EKind::KindTopic) { TString name = child.PQGroupInfo->Description.GetName(); @@ -328,11 +328,11 @@ bool TPersQueueBaseRequestProcessor::CreateChildren(const TActorContext& ctx) { name = child.Path.back(); } if (!TopicsToRequest.empty() && !IsIn(TopicsToRequest, name)) { - continue; - } + continue; + } ChildrenToCreate.emplace_back(new TPerTopicInfo(child)); - } - } + } + } NeedChildrenCreation = true; return CreateChildrenIfNeeded(ctx); } @@ -367,74 +367,74 @@ bool TPersQueueBaseRequestProcessor::CreateChildrenIfNeeded(const TActorContext& } } - Y_VERIFY(topics.size() == Children.size()); - - if (!TopicsToRequest.empty() && TopicsToRequest.size() != topics.size()) { - // Write helpful error description - Y_VERIFY(topics.size() < TopicsToRequest.size()); - TStringBuilder errorDesc; - errorDesc << "the following topics are not created: "; - for (const TString& topic : TopicsToRequest) { - if (!IsIn(topics, topic)) { - errorDesc << topic << ", "; - } - } + Y_VERIFY(topics.size() == Children.size()); + + if (!TopicsToRequest.empty() && TopicsToRequest.size() != topics.size()) { + // Write helpful error description + Y_VERIFY(topics.size() < TopicsToRequest.size()); + TStringBuilder errorDesc; + errorDesc << "the following topics are not created: "; + for (const TString& topic : TopicsToRequest) { + if (!IsIn(topics, topic)) { + errorDesc << topic << ", "; + } + } SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, - errorDesc << "Marker# PQ95"); - return true; - } + errorDesc << "Marker# PQ95"); + return true; + } if (ReadyForAnswer(ctx)) { - AnswerAndDie(ctx); - return true; - } - return false; -} - -NKikimrClient::TResponse TPersQueueBaseRequestProcessor::MergeSubactorReplies() { - NKikimrClient::TResponse response; - response.SetStatus(MSTATUS_OK); // We need to have status event if we have no children + AnswerAndDie(ctx); + return true; + } + return false; +} + +NKikimrClient::TResponse TPersQueueBaseRequestProcessor::MergeSubactorReplies() { + NKikimrClient::TResponse response; + response.SetStatus(MSTATUS_OK); // We need to have status event if we have no children response.SetErrorCode(NPersQueue::NErrorCode::OK); - for (const auto& child : Children) { - response.MergeFrom(child.second->Response); - } - return response; -} - -TPersQueueBaseRequestProcessor::TNodesInfo::TNodesInfo(THolder<TEvInterconnect::TEvNodesInfo> nodesInfoReply) - : NodesInfoReply(std::move(nodesInfoReply)) -{ - HostNames.reserve(NodesInfoReply->Nodes.size()); - for (const NActors::TEvInterconnect::TNodeInfo& info : NodesInfoReply->Nodes) { - HostNames.emplace(info.NodeId, info.Host); - } -} - + for (const auto& child : Children) { + response.MergeFrom(child.second->Response); + } + return response; +} + +TPersQueueBaseRequestProcessor::TNodesInfo::TNodesInfo(THolder<TEvInterconnect::TEvNodesInfo> nodesInfoReply) + : NodesInfoReply(std::move(nodesInfoReply)) +{ + HostNames.reserve(NodesInfoReply->Nodes.size()); + for (const NActors::TEvInterconnect::TNodeInfo& info : NodesInfoReply->Nodes) { + HostNames.emplace(info.NodeId, info.Host); + } +} + TTopicInfoBasedActor::TTopicInfoBasedActor(const TSchemeEntry& topicEntry, const TString& topicName) : TActorBootstrapped<TTopicInfoBasedActor>() , SchemeEntry(topicEntry) , Name(topicName) , ProcessingResult(ProcessMetaCacheSingleTopicsResponse(SchemeEntry)) -{ -} - +{ +} + void TTopicInfoBasedActor::Bootstrap(const TActorContext &ctx) { Become(&TTopicInfoBasedActor::StateFunc); BootstrapImpl(ctx); -} - +} + STFUNC(TTopicInfoBasedActor::StateFunc) { - switch (ev->GetTypeRewrite()) { - CFunc(NActors::TEvents::TSystem::PoisonPill, Die); - default: - LOG_WARN_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected event type: " << ev->GetTypeRewrite() << ", " << (ev->HasEvent() ? ev->GetBase()->ToString() : "<no data>")); - } -} - - + switch (ev->GetTypeRewrite()) { + CFunc(NActors::TEvents::TSystem::PoisonPill, Die); + default: + LOG_WARN_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected event type: " << ev->GetTypeRewrite() << ", " << (ev->HasEvent() ? ev->GetBase()->ToString() : "<no data>")); + } +} + + class TMessageBusServerPersQueueImpl : public TActorBootstrapped<TMessageBusServerPersQueueImpl> { using TEvAllTopicsDescribeRequest = NMsgBusProxy::NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsRequest; using TEvAllTopicsDescribeResponse = NMsgBusProxy::NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsResponse; - + protected: NKikimrClient::TPersQueueRequest RequestProto; const TString RequestId; @@ -456,7 +456,7 @@ protected: THashMap<ui64, TTabletInfo> TabletInfo; ui32 TopicsAnswered; - THashSet<ui64> TabletsDiscovered; + THashSet<ui64> TabletsDiscovered; THashSet<ui64> TabletsAnswered; ui32 AclRequests; ui32 DescribeRequests; @@ -471,7 +471,7 @@ public: virtual ~TMessageBusServerPersQueueImpl() {} - virtual void SendReplyAndDie(NKikimrClient::TResponse&& response, const TActorContext& ctx) = 0; + virtual void SendReplyAndDie(NKikimrClient::TResponse&& response, const TActorContext& ctx) = 0; TMessageBusServerPersQueueImpl(const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache) : RequestProto(request) @@ -527,7 +527,7 @@ public: auto& d = meta.GetCmdGetReadSessionsInfo(); for (ui32 i = 0; i < d.TopicSize(); ++i) { if (d.GetTopic(i).empty()) { - ErrorReason = "empty topic in GetReadSessionsInfo request"; + ErrorReason = "empty topic in GetReadSessionsInfo request"; return; } TopicInfo[d.GetTopic(i)]; @@ -619,7 +619,7 @@ public: } } } - SendReplyAndDie(std::move(record), ctx); + SendReplyAndDie(std::move(record), ctx); } void AnswerGetTopicMetadata(const TActorContext& ctx) @@ -640,7 +640,7 @@ public: } } - SendReplyAndDie(std::move(record), ctx); + SendReplyAndDie(std::move(record), ctx); } void AnswerGetPartitionOffsets(const TActorContext& ctx) @@ -676,7 +676,7 @@ public: res->SetErrorReason("partition is not ready yet"); } } - SendReplyAndDie(std::move(record), ctx); + SendReplyAndDie(std::move(record), ctx); } void AnswerGetPartitionStatus(const TActorContext& ctx) @@ -712,7 +712,7 @@ public: res->SetStatus(NKikimrPQ::TStatusResponse::STATUS_UNKNOWN); } } - SendReplyAndDie(std::move(record), ctx); + SendReplyAndDie(std::move(record), ctx); } void AnswerGetReadSessionsInfo(const TActorContext& ctx) @@ -772,7 +772,7 @@ public: } } - SendReplyAndDie(std::move(record), ctx); + SendReplyAndDie(std::move(record), ctx); } @@ -785,7 +785,7 @@ public: return false; const auto& meta = RequestProto.GetMetaRequest(); if (meta.HasCmdGetPartitionLocations()) { - if (TopicsAnswered != TopicInfo.size() || TabletInfo.size() != TabletsDiscovered.size() || !NodesInfo) + if (TopicsAnswered != TopicInfo.size() || TabletInfo.size() != TabletsDiscovered.size() || !NodesInfo) return false; AnswerGetPartitionLocations(ctx); return true; @@ -822,7 +822,7 @@ public: ProcessFetchRequestResult(ev, ctx); return; } - SendReplyAndDie(std::move(ev->Get()->Record), ctx); + SendReplyAndDie(std::move(ev->Get()->Record), ctx); } void Handle(TEvPersQueue::TEvOffsetsResponse::TPtr& ev, const TActorContext& ctx) { @@ -899,7 +899,7 @@ public: const TString& topic = p.first; if (!p.second.BalancerTabletId) { - ErrorReason = Sprintf("topic '%s' is not created, Marker# PQ94", topic.c_str()); + ErrorReason = Sprintf("topic '%s' is not created, Marker# PQ94", topic.c_str()); return SendReplyAndDie(CreateErrorReply(MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, ctx), ctx); } ProcessMetadata(p.first, p.second, ctx); @@ -1111,7 +1111,7 @@ public: if (it != TabletInfo.end()) { TabletsAnswered.insert(tabletId); if (RequestProto.HasMetaRequest() && (RequestProto.GetMetaRequest().HasCmdGetPartitionLocations() || RequestProto.GetMetaRequest().HasCmdGetReadSessionsInfo())) { - TabletsDiscovered.insert(tabletId); // Disconnect event can arrive after connect event and this hash set will take it into account. + TabletsDiscovered.insert(tabletId); // Disconnect event can arrive after connect event and this hash set will take it into account. } AnswerIfCanForMeta(ctx); return true; @@ -1133,7 +1133,7 @@ public: void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) { TEvTabletPipe::TEvClientConnected *msg = ev->Get(); - const ui64 tabletId = ev->Get()->TabletId; + const ui64 tabletId = ev->Get()->TabletId; if (msg->Status != NKikimrProto::OK) { if (HandlePipeError(tabletId, ctx)) @@ -1151,7 +1151,7 @@ public: if (it != TabletInfo.end()) { ui32 nodeId = ev->Get()->ServerId.NodeId(); it->second.NodeId = nodeId; - TabletsDiscovered.insert(tabletId); + TabletsDiscovered.insert(tabletId); AnswerIfCanForMeta(ctx); } @@ -1201,7 +1201,7 @@ public: record.MutableFetchResponse()->Swap(&FetchResponse); record.SetStatus(MSTATUS_OK); record.SetErrorCode(NPersQueue::NErrorCode::OK); - return SendReplyAndDie(std::move(record), ctx); + return SendReplyAndDie(std::move(record), ctx); } const auto& clientId = fetch.GetClientId(); Y_VERIFY(FetchRequestReadsDone < fetch.PartitionSize()); @@ -1335,7 +1335,7 @@ public: HFunc(TEvPersQueue::TEvHasDataInfoResponse, Handle); HFunc(TEvPersQueue::TEvReadSessionsInfoResponse, Handle); CFunc(TEvents::TSystem::Wakeup, HandleTimeout); - CFunc(NActors::TEvents::TSystem::PoisonPill, Die); + CFunc(NActors::TEvents::TSystem::PoisonPill, Die); ) private: bool GetTopicsList(const ::google::protobuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& requests) { @@ -1367,25 +1367,25 @@ private: } }; -class TErrorReplier : public TActorBootstrapped<TErrorReplier> { +class TErrorReplier : public TActorBootstrapped<TErrorReplier> { public: TErrorReplier(const NKikimrClient::TPersQueueRequest& request, const TActorId& /*schemeCache*/) - : RequestId(request.HasRequestId() ? request.GetRequestId() : "<none>") - { - } + : RequestId(request.HasRequestId() ? request.GetRequestId() : "<none>") + { + } - virtual void SendReplyAndDie(NKikimrClient::TResponse&& response, const TActorContext& ctx) = 0; + virtual void SendReplyAndDie(NKikimrClient::TResponse&& response, const TActorContext& ctx) = 0; - void Bootstrap(const TActorContext& ctx) { + void Bootstrap(const TActorContext& ctx) { SendReplyAndDie(CreateErrorReply(MSTATUS_ERROR, NPersQueue::NErrorCode::BAD_REQUEST, ErrorText), ctx); - } + } static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::PQ_BASE_REQUEST_PROCESSOR; - } + } - TString ErrorText; - TString RequestId; + TString ErrorText; + TString RequestId; }; template <template <class TImpl, class... TArgs> class TSenderImpl, class... T> @@ -1394,67 +1394,67 @@ IActor* CreatePersQueueRequestProcessor( std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> pqReadSessionsInfoWorkerFactory, T&&... constructorParams ) { - try { - if (request.HasMetaRequest() + request.HasPartitionRequest() + request.HasFetchRequest() > 1) { - throw std::runtime_error("only one from meta partition or fetch requests must be filled"); - } - if (request.HasMetaRequest()) { - auto& meta = request.GetMetaRequest(); - const size_t count = meta.HasCmdGetPartitionLocations() + meta.HasCmdGetPartitionOffsets() + - meta.HasCmdGetTopicMetadata() + meta.HasCmdGetPartitionStatus() + meta.HasCmdGetReadSessionsInfo(); - if (count != 1) { - throw std::runtime_error("multiple or none requests in MetaRequest"); - } - if (meta.HasCmdGetPartitionLocations()) { - return new TSenderImpl<TPersQueueGetPartitionLocationsProcessor>(std::forward<T>(constructorParams)...); - } else if (meta.HasCmdGetPartitionOffsets()) { - return new TSenderImpl<TPersQueueGetPartitionOffsetsProcessor>(std::forward<T>(constructorParams)...); - } else if (meta.HasCmdGetTopicMetadata()) { - return new TSenderImpl<TPersQueueGetTopicMetadataProcessor>(std::forward<T>(constructorParams)...); - } else if (meta.HasCmdGetPartitionStatus()) { - return new TSenderImpl<TPersQueueGetPartitionStatusProcessor>(std::forward<T>(constructorParams)...); - } else if (meta.HasCmdGetReadSessionsInfo()) { + try { + if (request.HasMetaRequest() + request.HasPartitionRequest() + request.HasFetchRequest() > 1) { + throw std::runtime_error("only one from meta partition or fetch requests must be filled"); + } + if (request.HasMetaRequest()) { + auto& meta = request.GetMetaRequest(); + const size_t count = meta.HasCmdGetPartitionLocations() + meta.HasCmdGetPartitionOffsets() + + meta.HasCmdGetTopicMetadata() + meta.HasCmdGetPartitionStatus() + meta.HasCmdGetReadSessionsInfo(); + if (count != 1) { + throw std::runtime_error("multiple or none requests in MetaRequest"); + } + if (meta.HasCmdGetPartitionLocations()) { + return new TSenderImpl<TPersQueueGetPartitionLocationsProcessor>(std::forward<T>(constructorParams)...); + } else if (meta.HasCmdGetPartitionOffsets()) { + return new TSenderImpl<TPersQueueGetPartitionOffsetsProcessor>(std::forward<T>(constructorParams)...); + } else if (meta.HasCmdGetTopicMetadata()) { + return new TSenderImpl<TPersQueueGetTopicMetadataProcessor>(std::forward<T>(constructorParams)...); + } else if (meta.HasCmdGetPartitionStatus()) { + return new TSenderImpl<TPersQueueGetPartitionStatusProcessor>(std::forward<T>(constructorParams)...); + } else if (meta.HasCmdGetReadSessionsInfo()) { return new TSenderImpl<TPersQueueGetReadSessionsInfoProcessor>( std::forward<T>(constructorParams)..., pqReadSessionsInfoWorkerFactory ); - } else { - throw std::runtime_error("Not implemented yet"); - } - } else if (request.HasPartitionRequest()) { - return new TSenderImpl<TMessageBusServerPersQueueImpl>(std::forward<T>(constructorParams)...); - } else if (request.HasFetchRequest()) { - return new TSenderImpl<TMessageBusServerPersQueueImpl>(std::forward<T>(constructorParams)...); - } else { - throw std::runtime_error("empty request"); - } - } catch (const std::exception& ex) { - auto* replier = new TSenderImpl<TErrorReplier>(std::forward<T>(constructorParams)...); - replier->ErrorText = ex.what(); - return replier; - } + } else { + throw std::runtime_error("Not implemented yet"); + } + } else if (request.HasPartitionRequest()) { + return new TSenderImpl<TMessageBusServerPersQueueImpl>(std::forward<T>(constructorParams)...); + } else if (request.HasFetchRequest()) { + return new TSenderImpl<TMessageBusServerPersQueueImpl>(std::forward<T>(constructorParams)...); + } else { + throw std::runtime_error("empty request"); + } + } catch (const std::exception& ex) { + auto* replier = new TSenderImpl<TErrorReplier>(std::forward<T>(constructorParams)...); + replier->ErrorText = ex.what(); + return replier; + } } -template <class TImplActor> -class TMessageBusServerPersQueue : public TImplActor, TMessageBusSessionIdentHolder { +template <class TImplActor> +class TMessageBusServerPersQueue : public TImplActor, TMessageBusSessionIdentHolder { public: template <class... T> TMessageBusServerPersQueue(TBusMessageContext& msg, T&&... constructorParams) : TImplActor(static_cast<TBusPersQueue*>(msg.GetMessage())->Record, std::forward<T>(constructorParams)...) - , TMessageBusSessionIdentHolder(msg) + , TMessageBusSessionIdentHolder(msg) {} - virtual ~TMessageBusServerPersQueue() = default; + virtual ~TMessageBusServerPersQueue() = default; - void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) override { - THolder<TBusResponse> result(new TBusResponse()); - result->Record.Swap(&record); - LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "proxy answer " << TImplActor::RequestId); + void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) override { + THolder<TBusResponse> result(new TBusResponse()); + result->Record.Swap(&record); + LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "proxy answer " << TImplActor::RequestId); - SendReplyMove(result.Release()); - - TImplActor::Die(ctx); + SendReplyMove(result.Release()); + + TImplActor::Die(ctx); } }; @@ -1464,15 +1464,15 @@ IActor* CreateMessageBusServerPersQueue( const TActorId& schemeCache, std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> pqReadSessionsInfoWorkerFactory ) { - const NKikimrClient::TPersQueueRequest& request = static_cast<TBusPersQueue*>(msg.GetMessage())->Record; + const NKikimrClient::TPersQueueRequest& request = static_cast<TBusPersQueue*>(msg.GetMessage())->Record; return CreatePersQueueRequestProcessor<TMessageBusServerPersQueue>( request, pqReadSessionsInfoWorkerFactory, msg, schemeCache ); -} - +} + IActor* CreateActorServerPersQueue( const TActorId& parentId, const NKikimrClient::TPersQueueRequest& request, diff --git a/ydb/core/client/server/msgbus_server_persqueue.h b/ydb/core/client/server/msgbus_server_persqueue.h index 52c67832e4e..ee5f8e53d57 100644 --- a/ydb/core/client/server/msgbus_server_persqueue.h +++ b/ydb/core/client/server/msgbus_server_persqueue.h @@ -6,12 +6,12 @@ #include <ydb/core/base/tablet_pipe.h> #include <ydb/core/persqueue/events/global.h> #include <ydb/core/tx/scheme_cache/scheme_cache.h> - + #include <library/cpp/actors/core/interconnect.h> - -#include <util/generic/ptr.h> -#include <util/system/compiler.h> - + +#include <util/generic/ptr.h> +#include <util/system/compiler.h> + namespace NKikimr { namespace NMsgBusProxy { @@ -27,7 +27,7 @@ struct TProcessingResult { TProcessingResult ProcessMetaCacheAllTopicsResponse(NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsResponse::TPtr& response); TProcessingResult ProcessMetaCacheSingleTopicsResponse(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry); -// Worker actor creation +// Worker actor creation IActor* CreateMessageBusServerPersQueue( TBusMessageContext& msg, const TActorId& schemeCache, @@ -40,55 +40,55 @@ IActor* CreateActorServerPersQueue( std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> pqReadSessionsInfoWorkerFactory = nullptr ); - + NKikimrClient::TResponse CreateErrorReply(EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason); - -template <class TResponseEvent> -inline ui64 GetTabletId(const TResponseEvent* ev) { - return ev->Record.GetTabletId(); + +template <class TResponseEvent> +inline ui64 GetTabletId(const TResponseEvent* ev) { + return ev->Record.GetTabletId(); } - -template <> -inline ui64 GetTabletId<TEvTabletPipe::TEvClientConnected>(const TEvTabletPipe::TEvClientConnected* ev) { - return ev->TabletId; + +template <> +inline ui64 GetTabletId<TEvTabletPipe::TEvClientConnected>(const TEvTabletPipe::TEvClientConnected* ev) { + return ev->TabletId; } - -// Base class for PQ requests. It requests EvGetNode and creates worker actors for concrete topics. -// Than it starts merge over children responses. -// To use actor you need to: -// 1. Inherit from it. -// 2. Implement CreateTopicSubactor() and, optionally, MergeSubactorReplies() methods. -class TPersQueueBaseRequestProcessor : public TActorBootstrapped<TPersQueueBaseRequestProcessor> { -protected: + +// Base class for PQ requests. It requests EvGetNode and creates worker actors for concrete topics. +// Than it starts merge over children responses. +// To use actor you need to: +// 1. Inherit from it. +// 2. Implement CreateTopicSubactor() and, optionally, MergeSubactorReplies() methods. +class TPersQueueBaseRequestProcessor : public TActorBootstrapped<TPersQueueBaseRequestProcessor> { +protected: using TSchemeEntry = NSchemeCache::TSchemeCacheNavigate::TEntry; using TPQGroupInfoPtr = TIntrusiveConstPtr<NSchemeCache::TSchemeCacheNavigate::TPQGroupInfo>; using ESchemeStatus = NSchemeCache::TSchemeCacheNavigate::EStatus; - struct TPerTopicInfo { + struct TPerTopicInfo { TPerTopicInfo() { } explicit TPerTopicInfo(const TSchemeEntry& topicEntry) - : TopicEntry(topicEntry) - { - } - + : TopicEntry(topicEntry) + { + } + TActorId ActorId; TSchemeEntry TopicEntry; - NKikimrClient::TResponse Response; - bool ActorAnswered = false; - }; - -public: - struct TNodesInfo { - THolder<TEvInterconnect::TEvNodesInfo> NodesInfoReply; - THashMap<ui32, TString> HostNames; - - explicit TNodesInfo(THolder<TEvInterconnect::TEvNodesInfo> nodesInfoReply); - }; - -public: - static const TDuration TIMEOUT; - + NKikimrClient::TResponse Response; + bool ActorAnswered = false; + }; + +public: + struct TNodesInfo { + THolder<TEvInterconnect::TEvNodesInfo> NodesInfoReply; + THashMap<ui32, TString> HostNames; + + explicit TNodesInfo(THolder<TEvInterconnect::TEvNodesInfo> nodesInfoReply); + }; + +public: + static const TDuration TIMEOUT; + TInstant StartTimestamp = TInstant::Zero(); bool NeedChildrenCreation = false; @@ -98,77 +98,77 @@ public: static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::PQ_BASE_REQUEST_PROCESSOR; - } - -protected: + } + +protected: TPersQueueBaseRequestProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& pqMetaCacheId, bool listNodes); - + ~TPersQueueBaseRequestProcessor(); -public: - void Bootstrap(const TActorContext& ctx); - -protected: +public: + void Bootstrap(const TActorContext& ctx); + +protected: bool CreateChildrenIfNeeded(const TActorContext& ctx); virtual THolder<IActor> CreateTopicSubactor(const TSchemeEntry& topicEntry, const TString& name) = 0; // Creates actor for processing one concrete topic. - virtual NKikimrClient::TResponse MergeSubactorReplies(); - - virtual void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) = 0; + virtual NKikimrClient::TResponse MergeSubactorReplies(); + + virtual void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) = 0; void SendErrorReplyAndDie(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason); - - bool ReadyToCreateChildren() const; - - // true returned from this function means that we called Die(). + + bool ReadyToCreateChildren() const; + + // true returned from this function means that we called Die(). [[nodiscard]] bool CreateChildren(const TActorContext& ctx); - + virtual bool ReadyForAnswer(const TActorContext& ctx); - void AnswerAndDie(const TActorContext& ctx); + void AnswerAndDie(const TActorContext& ctx); void GetTopicsListOrThrow(const ::google::protobuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& requests, THashMap<TString, std::shared_ptr<THashSet<ui64>>>& partitionsToRequest); - + virtual STFUNC(StateFunc); - - void Handle(TEvInterconnect::TEvNodesInfo::TPtr& ev, const TActorContext& ctx); + + void Handle(TEvInterconnect::TEvNodesInfo::TPtr& ev, const TActorContext& ctx); void Handle(NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeTopicsResponse::TPtr& ev, const TActorContext& ctx); void Handle(NPqMetaCacheV2::TEvPqNewMetaCache::TEvDescribeAllTopicsResponse::TPtr& ev, const TActorContext& ctx); - void Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorContext& ctx); - void HandleTimeout(const TActorContext& ctx); - - void Die(const TActorContext& ctx) override; - -protected: - // Request + void Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorContext& ctx); + void HandleTimeout(const TActorContext& ctx); + + void Die(const TActorContext& ctx) override; + +protected: + // Request std::shared_ptr<const NKikimrClient::TPersQueueRequest> RequestProto; - const TString RequestId; - THashSet<TString> TopicsToRequest; // Topics that we need to request. If this set id empty, we are interested in all existing topics. - + const TString RequestId; + THashSet<TString> TopicsToRequest; // Topics that we need to request. If this set id empty, we are interested in all existing topics. + const TActorId PqMetaCache; THashMap<TActorId, THolder<TPerTopicInfo>> Children; - size_t ChildrenAnswered = 0; + size_t ChildrenAnswered = 0; THolder<NSchemeCache::TSchemeCacheNavigate> SchemeCacheResponse; - - // Nodes info - const bool ListNodes; + + // Nodes info + const bool ListNodes; std::shared_ptr<const TNodesInfo> NodesInfo; -}; - -// Helper actor that sends TEvGetBalancerDescribe and checks ACL (ACL is not implemented yet). +}; + +// Helper actor that sends TEvGetBalancerDescribe and checks ACL (ACL is not implemented yet). class TTopicInfoBasedActor : public TActorBootstrapped<TTopicInfoBasedActor> { -protected: +protected: using TSchemeEntry = NSchemeCache::TSchemeCacheNavigate::TEntry; using TPQGroupInfoPtr = TIntrusiveConstPtr<NSchemeCache::TSchemeCacheNavigate::TPQGroupInfo>; using ESchemeStatus = NSchemeCache::TSchemeCacheNavigate::EStatus; - + TTopicInfoBasedActor(const TSchemeEntry& topicEntry, const TString& topicName); - + virtual void BootstrapImpl(const TActorContext& ctx) = 0; virtual void Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) = 0; - - virtual void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) = 0; - - STFUNC(StateFunc); - -protected: + + virtual void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) = 0; + + STFUNC(StateFunc); + +protected: TActorId SchemeCache; TSchemeEntry SchemeEntry; TString Name; @@ -179,194 +179,194 @@ public: static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::PQ_BASE_REQUEST_PROCESSOR; } -}; - -template <class TBase> -class TReplierToParent : public TBase { -public: - template <class... T> +}; + +template <class TBase> +class TReplierToParent : public TBase { +public: + template <class... T> explicit TReplierToParent(const TActorId& parent, T&&... t) - : TBase(std::forward<T>(t)...) - , Parent(parent) - { - } - -protected: - void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) override { - THolder<TEvPersQueue::TEvResponse> result(new TEvPersQueue::TEvResponse()); - result->Record.Swap(&record); - - ctx.Send(Parent, result.Release()); - + : TBase(std::forward<T>(t)...) + , Parent(parent) + { + } + +protected: + void SendReplyAndDie(NKikimrClient::TResponse&& record, const TActorContext& ctx) override { + THolder<TEvPersQueue::TEvResponse> result(new TEvPersQueue::TEvResponse()); + result->Record.Swap(&record); + + ctx.Send(Parent, result.Release()); + Die(ctx); } void Die(const TActorContext& ctx) override { - TBase::Die(ctx); - } - + TBase::Die(ctx); + } + void SendErrorReplyAndDie(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) { - SendReplyAndDie(CreateErrorReply(status, code, errorReason), ctx); - } - -protected: + SendReplyAndDie(CreateErrorReply(status, code, errorReason), ctx); + } + +protected: const TActorId Parent; -}; - -// Pipe client helpers -template <class TBase, class TPipeEvent> -class TPipesWaiterActor : public TBase { -protected: - template <class... T> - explicit TPipesWaiterActor(T&&... t) - : TBase(std::forward<T>(t)...) - { - } - +}; + +// Pipe client helpers +template <class TBase, class TPipeEvent> +class TPipesWaiterActor : public TBase { +protected: + template <class... T> + explicit TPipesWaiterActor(T&&... t) + : TBase(std::forward<T>(t)...) + { + } + TActorId CreatePipe(ui64 tabletId, const TActorContext& ctx) { - NTabletPipe::TClientConfig clientConfig; + NTabletPipe::TClientConfig clientConfig; const TActorId pipe = ctx.RegisterWithSameMailbox(NTabletPipe::CreateClient(ctx.SelfID, tabletId, clientConfig)); - Y_VERIFY(Pipes.emplace(tabletId, pipe).second); - return pipe; - } - - bool HasTabletPipe(ui64 tabletId) const { - return IsIn(Pipes, tabletId); - } - - template <class TEventPtr> + Y_VERIFY(Pipes.emplace(tabletId, pipe).second); + return pipe; + } + + bool HasTabletPipe(ui64 tabletId) const { + return IsIn(Pipes, tabletId); + } + + template <class TEventPtr> TActorId CreatePipeAndSend(ui64 tabletId, const TActorContext& ctx, TEventPtr ev) { const TActorId pipe = CreatePipe(tabletId, ctx); - NTabletPipe::SendData(ctx, pipe, ev.Release()); - return pipe; - } - - // Wait in case TPipeEvent is not TEvTabletPipe::TEvClientConnected. - // true returned from this function means that we called Die(). + NTabletPipe::SendData(ctx, pipe, ev.Release()); + return pipe; + } + + // Wait in case TPipeEvent is not TEvTabletPipe::TEvClientConnected. + // true returned from this function means that we called Die(). [[nodiscard]] bool WaitAllPipeEvents(const TActorContext& ctx) { - static_assert(TPipeEvent::EventType != TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllConnections()"); - - if (EventsAreReady()) { - if (OnPipeEventsAreReady(ctx)) { - return true; - } - } else { - TBase::Become(&TPipesWaiterActor::WaitAllPipeEventsStateFunc); - } - return false; - } - - // Wait in case TPipeEvent is TEvTabletPipe::TEvClientConnected. - // true returned from this function means that we called Die(). + static_assert(TPipeEvent::EventType != TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllConnections()"); + + if (EventsAreReady()) { + if (OnPipeEventsAreReady(ctx)) { + return true; + } + } else { + TBase::Become(&TPipesWaiterActor::WaitAllPipeEventsStateFunc); + } + return false; + } + + // Wait in case TPipeEvent is TEvTabletPipe::TEvClientConnected. + // true returned from this function means that we called Die(). [[nodiscard]] bool WaitAllConnections(const TActorContext& ctx) { - static_assert(TPipeEvent::EventType == TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllPipeEvents()"); - - if (EventsAreReady()) { - if (OnPipeEventsAreReady(ctx)) { - return true; - } - } else { - TBase::Become(&TPipesWaiterActor::WaitAllConnectionsStateFunc); - } - return false; - } - - // true returned from this function means that we called Die(). + static_assert(TPipeEvent::EventType == TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllPipeEvents()"); + + if (EventsAreReady()) { + if (OnPipeEventsAreReady(ctx)) { + return true; + } + } else { + TBase::Become(&TPipesWaiterActor::WaitAllConnectionsStateFunc); + } + return false; + } + + // true returned from this function means that we called Die(). [[nodiscard]] virtual bool OnPipeEvent(ui64 tabletId, typename TPipeEvent::TPtr& ev, const TActorContext& /*ctx*/) { - Y_VERIFY(!IsIn(PipeAnswers, tabletId) || !PipeAnswers.find(tabletId)->second); - PipeAnswers[tabletId] = ev; - return false; - } - - // true returned from this function means that we called Die(). + Y_VERIFY(!IsIn(PipeAnswers, tabletId) || !PipeAnswers.find(tabletId)->second); + PipeAnswers[tabletId] = ev; + return false; + } + + // true returned from this function means that we called Die(). [[nodiscard]] virtual bool OnClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& /*ev*/, const TActorContext& /*ctx*/) { - return false; - } - - // true returned from this function means that we called Die(). + return false; + } + + // true returned from this function means that we called Die(). [[nodiscard]] virtual bool OnPipeEventsAreReady(const TActorContext& ctx) = 0; - - void Die(const TActorContext& ctx) override { - for (const auto& pipe : Pipes) { - NTabletPipe::CloseClient(ctx, pipe.second); - } - TBase::Die(ctx); - } - - STFUNC(WaitAllPipeEventsStateFunc) { - static_assert(TPipeEvent::EventType != TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllConnectionsStateFunc"); - switch (ev->GetTypeRewrite()) { - HFunc(TEvTabletPipe::TEvClientDestroyed, Handle); - HFunc(TEvTabletPipe::TEvClientConnected, Handle); - HFunc(TPipeEvent, HandlePipeEvent); - CFunc(NActors::TEvents::TSystem::PoisonPill, Die); - default: - LOG_WARN_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected event type: " << ev->GetTypeRewrite() << ", " << (ev->HasEvent() ? ev->GetBase()->ToString() : "<no data>")); - } - } - - STFUNC(WaitAllConnectionsStateFunc) { - static_assert(TPipeEvent::EventType == TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllPipeEventsStateFunc"); - switch (ev->GetTypeRewrite()) { - HFunc(TEvTabletPipe::TEvClientDestroyed, Handle); - HFunc(TEvTabletPipe::TEvClientConnected, HandlePipeEvent); - CFunc(NActors::TEvents::TSystem::PoisonPill, Die); - default: - LOG_WARN_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected event type: " << ev->GetTypeRewrite() << ", " << (ev->HasEvent() ? ev->GetBase()->ToString() : "<no data>")); - } - } - - void HandlePipeEvent(typename TPipeEvent::TPtr& ev, const TActorContext& ctx) { - const ui64 tabletId = GetTabletId(ev->Get()); - Y_VERIFY(tabletId != 0); - if (OnPipeEvent(tabletId, ev, ctx)) { - return; - } - if (EventsAreReady()) { - if (OnPipeEventsAreReady(ctx)) { - return; - } - } - } - - void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) { - TEvTabletPipe::TEvClientConnected* msg = ev->Get(); - const ui64 tabletId = GetTabletId(msg); - Y_VERIFY(tabletId != 0); - if (msg->Status != NKikimrProto::OK) { - // Create record for answer - PipeAnswers[tabletId]; - if (EventsAreReady()) { - if (OnPipeEventsAreReady(ctx)) { - return; - } - } - } else { - if (OnClientConnected(ev, ctx)) { - return; - } - } - } - - void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) { - // Create record for answer - const ui64 tabletId = ev->Get()->TabletId; - Y_VERIFY(tabletId != 0); - PipeAnswers[tabletId]; - if (EventsAreReady()) { - if (OnPipeEventsAreReady(ctx)) { - return; - } - } - } - - bool EventsAreReady() const { - return Pipes.size() == PipeAnswers.size(); - } - -protected: + + void Die(const TActorContext& ctx) override { + for (const auto& pipe : Pipes) { + NTabletPipe::CloseClient(ctx, pipe.second); + } + TBase::Die(ctx); + } + + STFUNC(WaitAllPipeEventsStateFunc) { + static_assert(TPipeEvent::EventType != TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllConnectionsStateFunc"); + switch (ev->GetTypeRewrite()) { + HFunc(TEvTabletPipe::TEvClientDestroyed, Handle); + HFunc(TEvTabletPipe::TEvClientConnected, Handle); + HFunc(TPipeEvent, HandlePipeEvent); + CFunc(NActors::TEvents::TSystem::PoisonPill, Die); + default: + LOG_WARN_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected event type: " << ev->GetTypeRewrite() << ", " << (ev->HasEvent() ? ev->GetBase()->ToString() : "<no data>")); + } + } + + STFUNC(WaitAllConnectionsStateFunc) { + static_assert(TPipeEvent::EventType == TEvTabletPipe::TEvClientConnected::EventType, "Use WaitAllPipeEventsStateFunc"); + switch (ev->GetTypeRewrite()) { + HFunc(TEvTabletPipe::TEvClientDestroyed, Handle); + HFunc(TEvTabletPipe::TEvClientConnected, HandlePipeEvent); + CFunc(NActors::TEvents::TSystem::PoisonPill, Die); + default: + LOG_WARN_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected event type: " << ev->GetTypeRewrite() << ", " << (ev->HasEvent() ? ev->GetBase()->ToString() : "<no data>")); + } + } + + void HandlePipeEvent(typename TPipeEvent::TPtr& ev, const TActorContext& ctx) { + const ui64 tabletId = GetTabletId(ev->Get()); + Y_VERIFY(tabletId != 0); + if (OnPipeEvent(tabletId, ev, ctx)) { + return; + } + if (EventsAreReady()) { + if (OnPipeEventsAreReady(ctx)) { + return; + } + } + } + + void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) { + TEvTabletPipe::TEvClientConnected* msg = ev->Get(); + const ui64 tabletId = GetTabletId(msg); + Y_VERIFY(tabletId != 0); + if (msg->Status != NKikimrProto::OK) { + // Create record for answer + PipeAnswers[tabletId]; + if (EventsAreReady()) { + if (OnPipeEventsAreReady(ctx)) { + return; + } + } + } else { + if (OnClientConnected(ev, ctx)) { + return; + } + } + } + + void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) { + // Create record for answer + const ui64 tabletId = ev->Get()->TabletId; + Y_VERIFY(tabletId != 0); + PipeAnswers[tabletId]; + if (EventsAreReady()) { + if (OnPipeEventsAreReady(ctx)) { + return; + } + } + } + + bool EventsAreReady() const { + return Pipes.size() == PipeAnswers.size(); + } + +protected: THashMap<ui64, TActorId> Pipes; // Tablet id -> pipe - THashMap<ui64, typename TPipeEvent::TPtr> PipeAnswers; // Mapped by tablet id -}; - -} // namespace NMsgBusProxy -} // namespace NKikimr + THashMap<ui64, typename TPipeEvent::TPtr> PipeAnswers; // Mapped by tablet id +}; + +} // namespace NMsgBusProxy +} // namespace NKikimr diff --git a/ydb/core/client/server/msgbus_server_pq_metarequest.cpp b/ydb/core/client/server/msgbus_server_pq_metarequest.cpp index 32f37251cc5..5280a4a11f2 100644 --- a/ydb/core/client/server/msgbus_server_pq_metarequest.cpp +++ b/ydb/core/client/server/msgbus_server_pq_metarequest.cpp @@ -1,11 +1,11 @@ -#include "msgbus_server_pq_metarequest.h" +#include "msgbus_server_pq_metarequest.h" #include "msgbus_server_pq_read_session_info.h" - -namespace NKikimr { -namespace NMsgBusProxy { + +namespace NKikimr { +namespace NMsgBusProxy { using namespace NSchemeCache; - -template <class TTopicResult> + +template <class TTopicResult> void SetErrorCode( TTopicResult* topicResult, const TSchemeCacheNavigate::TEntry& topicEntry @@ -13,8 +13,8 @@ void SetErrorCode( NPersQueue::NErrorCode::EErrorCode code = NPersQueue::NErrorCode::OK; if (topicEntry.Status != TSchemeCacheNavigate::EStatus::Ok || !topicEntry.PQGroupInfo) { code = NPersQueue::NErrorCode::UNKNOWN_TOPIC; - } - topicResult->SetErrorCode(code); + } + topicResult->SetErrorCode(code); if (code == NPersQueue::NErrorCode::UNKNOWN_TOPIC) { topicResult->SetErrorReason("topic not found"); } else if (code == NPersQueue::NErrorCode::INITIALIZING) { @@ -22,36 +22,36 @@ void SetErrorCode( } else if (code != NPersQueue::NErrorCode::OK) { topicResult->SetErrorReason("internal error"); } -} - -// -// GetTopicMetadata command -// - +} + +// +// GetTopicMetadata command +// + TPersQueueGetTopicMetadataProcessor::TPersQueueGetTopicMetadataProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache) - : TPersQueueBaseRequestProcessor(request, schemeCache, false) -{ - const auto& topics = RequestProto->GetMetaRequest().GetCmdGetTopicMetadata().GetTopic(); - TopicsToRequest.insert(topics.begin(), topics.end()); - if (IsIn(TopicsToRequest, TString())) { - throw std::runtime_error("empty topic in GetTopicMetadata request"); - } -} - + : TPersQueueBaseRequestProcessor(request, schemeCache, false) +{ + const auto& topics = RequestProto->GetMetaRequest().GetCmdGetTopicMetadata().GetTopic(); + TopicsToRequest.insert(topics.begin(), topics.end()); + if (IsIn(TopicsToRequest, TString())) { + throw std::runtime_error("empty topic in GetTopicMetadata request"); + } +} + THolder<IActor> TPersQueueGetTopicMetadataProcessor::CreateTopicSubactor( const TSchemeEntry& topicEntry, const TString& name ) { return MakeHolder<TPersQueueGetTopicMetadataTopicWorker>(SelfId(), topicEntry, name); -} - +} + TPersQueueGetTopicMetadataTopicWorker::TPersQueueGetTopicMetadataTopicWorker( const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name ) : TReplierToParent<TTopicInfoBasedActor>(parent, topicEntry, name) -{ +{ SetActivityType(NKikimrServices::TActivity::PQ_META_REQUEST_PROCESSOR); -} - +} + void TPersQueueGetTopicMetadataTopicWorker::BootstrapImpl(const TActorContext& ctx) { auto processingResult = ProcessMetaCacheSingleTopicsResponse(SchemeEntry); @@ -59,7 +59,7 @@ void TPersQueueGetTopicMetadataTopicWorker::BootstrapImpl(const TActorContext& c } void TPersQueueGetTopicMetadataTopicWorker::Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) { - NKikimrClient::TResponse response; + NKikimrClient::TResponse response; response.SetStatus(status); response.SetErrorCode(code); if (!errorReason.empty()) @@ -73,73 +73,73 @@ void TPersQueueGetTopicMetadataTopicWorker::Answer(const TActorContext& ctx, ERe topicInfo->MutableConfig()->CopyFrom(desc.GetPQTabletConfig()); topicInfo->MutableConfig()->SetVersion(desc.GetAlterVersion()); topicInfo->SetNumPartitions(desc.PartitionsSize()); - } - } - SendReplyAndDie(std::move(response), ctx); -} - - -// -// GetPartitionOffsets command -// - + } + } + SendReplyAndDie(std::move(response), ctx); +} + + +// +// GetPartitionOffsets command +// + TPersQueueGetPartitionOffsetsProcessor::TPersQueueGetPartitionOffsetsProcessor( const NKikimrClient::TPersQueueRequest& request, const TActorId& metaCacheId ) : TPersQueueBaseRequestProcessor(request, metaCacheId, false) -{ - GetTopicsListOrThrow(RequestProto->GetMetaRequest().GetCmdGetPartitionOffsets().GetTopicRequest(), PartitionsToRequest); -} - +{ + GetTopicsListOrThrow(RequestProto->GetMetaRequest().GetCmdGetPartitionOffsets().GetTopicRequest(), PartitionsToRequest); +} + THolder<IActor> TPersQueueGetPartitionOffsetsProcessor::CreateTopicSubactor( const TSchemeEntry& topicEntry, const TString& name ) { return MakeHolder<TPersQueueGetPartitionOffsetsTopicWorker>( SelfId(), topicEntry, name, PartitionsToRequest[topicEntry.PQGroupInfo->Description.GetName()], RequestProto ); -} - +} + TPersQueueGetPartitionOffsetsTopicWorker::TPersQueueGetPartitionOffsetsTopicWorker( const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<THashSet<ui64>>& partitionsToRequest, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto ) : TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvPersQueue::TEvOffsetsResponse>>(parent, topicEntry, name) - , PartitionsToRequest(partitionsToRequest) - , RequestProto(requestProto) -{ + , PartitionsToRequest(partitionsToRequest) + , RequestProto(requestProto) +{ SetActivityType(NKikimrServices::TActivity::PQ_META_REQUEST_PROCESSOR); -} - +} + void TPersQueueGetPartitionOffsetsTopicWorker::BootstrapImpl(const TActorContext &ctx) { - size_t partitionsAsked = 0; + size_t partitionsAsked = 0; THashSet<ui64> parts; if (SchemeEntry.PQGroupInfo) { const auto& pqDescr = SchemeEntry.PQGroupInfo->Description; for (const auto& partition : pqDescr.GetPartitions()) { const ui32 partIndex = partition.GetPartitionId(); - const ui64 tabletId = partition.GetTabletId(); + const ui64 tabletId = partition.GetTabletId(); if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty() && !PartitionsToRequest->contains(partIndex)) { - continue; - } + continue; + } parts.insert(partIndex); - ++partitionsAsked; - if (HasTabletPipe(tabletId)) { // Take all partitions for tablet from one TEvOffsetsResponse event - continue; - } - THolder<TEvPersQueue::TEvOffsets> ev(new TEvPersQueue::TEvOffsets()); - const TString& clientId = RequestProto->GetMetaRequest().GetCmdGetPartitionOffsets().GetClientId(); - if (!clientId.empty()) { - ev->Record.SetClientId(clientId); - } - CreatePipeAndSend(tabletId, ctx, std::move(ev)); - } - } + ++partitionsAsked; + if (HasTabletPipe(tabletId)) { // Take all partitions for tablet from one TEvOffsetsResponse event + continue; + } + THolder<TEvPersQueue::TEvOffsets> ev(new TEvPersQueue::TEvOffsets()); + const TString& clientId = RequestProto->GetMetaRequest().GetCmdGetPartitionOffsets().GetClientId(); + if (!clientId.empty()) { + ev->Record.SetClientId(clientId); + } + CreatePipeAndSend(tabletId, ctx, std::move(ev)); + } + } if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty() && PartitionsToRequest->size() != partitionsAsked) { SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, TStringBuilder() << "no one of requested partitions in topic '" << Name << "', Marker# PQ96"); return; - } + } if (!PartitionsToRequest.get() || PartitionsToRequest->empty()) { PartitionsToRequest.reset(new THashSet<ui64>()); PartitionsToRequest->swap(parts); @@ -147,9 +147,9 @@ void TPersQueueGetPartitionOffsetsTopicWorker::BootstrapImpl(const TActorContext if(WaitAllPipeEvents(ctx)) { return; } -} - -bool TPersQueueGetPartitionOffsetsTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { +} + +bool TPersQueueGetPartitionOffsetsTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { auto processResult = ProcessMetaCacheSingleTopicsResponse(SchemeEntry); Answer(ctx, processResult.Status, processResult.ErrorCode, processResult.Reason); @@ -157,7 +157,7 @@ bool TPersQueueGetPartitionOffsetsTopicWorker::OnPipeEventsAreReady(const TActor } void TPersQueueGetPartitionOffsetsTopicWorker::Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) { - NKikimrClient::TResponse response; + NKikimrClient::TResponse response; response.SetStatus(status); response.SetErrorCode(code); if (!errorReason.empty()) @@ -173,11 +173,11 @@ void TPersQueueGetPartitionOffsetsTopicWorker::Answer(const TActorContext& ctx, const ui64 partitionIndex = partResult.GetPartition(); if (PartitionsToRequest.get() == nullptr || PartitionsToRequest->empty() || PartitionsToRequest->contains(partitionIndex)) { topicResult.AddPartitionResult()->Swap(&partResult); - partitionsInserted.insert(partitionIndex); - } - } - } - } + partitionsInserted.insert(partitionIndex); + } + } + } + } if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty() && PartitionsToRequest->size() != partitionsInserted.size() && topicResult.GetErrorCode() == (ui32)NPersQueue::NErrorCode::OK) { const TString reason = "partition is not ready yet"; for (ui64 partitionIndex : *PartitionsToRequest) { @@ -187,73 +187,73 @@ void TPersQueueGetPartitionOffsetsTopicWorker::Answer(const TActorContext& ctx, res->SetErrorCode(NPersQueue::NErrorCode::INITIALIZING); res->SetErrorReason(reason); } - } - } - } - SendReplyAndDie(std::move(response), ctx); -} - - -// -// GetPartitionStatus command -// - + } + } + } + SendReplyAndDie(std::move(response), ctx); +} + + +// +// GetPartitionStatus command +// + TPersQueueGetPartitionStatusProcessor::TPersQueueGetPartitionStatusProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache) - : TPersQueueBaseRequestProcessor(request, schemeCache, false) -{ - GetTopicsListOrThrow(RequestProto->GetMetaRequest().GetCmdGetPartitionStatus().GetTopicRequest(), PartitionsToRequest); -} - + : TPersQueueBaseRequestProcessor(request, schemeCache, false) +{ + GetTopicsListOrThrow(RequestProto->GetMetaRequest().GetCmdGetPartitionStatus().GetTopicRequest(), PartitionsToRequest); +} + THolder<IActor> TPersQueueGetPartitionStatusProcessor::CreateTopicSubactor( const TSchemeEntry& topicEntry, const TString& name ) { return MakeHolder<TPersQueueGetPartitionStatusTopicWorker>( SelfId(), topicEntry, name, PartitionsToRequest[topicEntry.PQGroupInfo->Description.GetName()], RequestProto ); -} - +} + TPersQueueGetPartitionStatusTopicWorker::TPersQueueGetPartitionStatusTopicWorker( const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<THashSet<ui64>>& partitionsToRequest, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto ) : TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvPersQueue::TEvStatusResponse>>(parent, topicEntry, name) - , PartitionsToRequest(partitionsToRequest) - , RequestProto(requestProto) -{ + , PartitionsToRequest(partitionsToRequest) + , RequestProto(requestProto) +{ SetActivityType(NKikimrServices::TActivity::PQ_META_REQUEST_PROCESSOR); -} - +} + void TPersQueueGetPartitionStatusTopicWorker::BootstrapImpl(const TActorContext &ctx) { - size_t partitionsAsked = 0; + size_t partitionsAsked = 0; THashSet<ui64> parts; if (!ProcessingResult.IsFatal) { const auto& pqDescr = SchemeEntry.PQGroupInfo->Description; for (const auto& partition : pqDescr.GetPartitions()) { const ui32 partIndex = partition.GetPartitionId(); - const ui64 tabletId = partition.GetTabletId(); + const ui64 tabletId = partition.GetTabletId(); if (PartitionsToRequest != nullptr && !PartitionsToRequest->empty() && !PartitionsToRequest->contains(partIndex)) { - continue; - } + continue; + } parts.insert(partIndex); - ++partitionsAsked; - if (HasTabletPipe(tabletId)) { // Take all partitions for tablet from one TEvStatusResponse event - continue; - } - THolder<TEvPersQueue::TEvStatus> ev(new TEvPersQueue::TEvStatus()); + ++partitionsAsked; + if (HasTabletPipe(tabletId)) { // Take all partitions for tablet from one TEvStatusResponse event + continue; + } + THolder<TEvPersQueue::TEvStatus> ev(new TEvPersQueue::TEvStatus()); if (RequestProto->GetMetaRequest().GetCmdGetPartitionStatus().HasClientId()) ev->Record.SetClientId(RequestProto->GetMetaRequest().GetCmdGetPartitionStatus().GetClientId()); - CreatePipeAndSend(tabletId, ctx, std::move(ev)); - } + CreatePipeAndSend(tabletId, ctx, std::move(ev)); + } } else { SendErrorReplyAndDie(ctx, ProcessingResult.Status, ProcessingResult.ErrorCode, ProcessingResult.Reason); return; - } + } if (PartitionsToRequest != nullptr && !PartitionsToRequest->empty() && PartitionsToRequest->size() != partitionsAsked) { SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, TStringBuilder() << "no one of requested partitions in topic '" << Name << "', Marker# PQ97"); return; - } + } if (!PartitionsToRequest.get() || PartitionsToRequest->empty()) { PartitionsToRequest.reset(new THashSet<ui64>()); PartitionsToRequest->swap(parts); @@ -261,9 +261,9 @@ void TPersQueueGetPartitionStatusTopicWorker::BootstrapImpl(const TActorContext if (WaitAllPipeEvents(ctx)) return; -} - -bool TPersQueueGetPartitionStatusTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { +} + +bool TPersQueueGetPartitionStatusTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { auto processResult = ProcessMetaCacheSingleTopicsResponse(SchemeEntry); Answer(ctx, processResult.Status, processResult.ErrorCode, processResult.Reason); return true; @@ -273,7 +273,7 @@ void TPersQueueGetPartitionStatusTopicWorker::Answer( const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason ) { - NKikimrClient::TResponse response; + NKikimrClient::TResponse response; response.SetStatus(status); response.SetErrorCode(code); if (!errorReason.empty()) @@ -292,10 +292,10 @@ void TPersQueueGetPartitionStatusTopicWorker::Answer( if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty()) { partitionsInserted.insert(partitionIndex); } - } - } - } - } + } + } + } + } if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty() && PartitionsToRequest->size() != partitionsInserted.size() && topicResult.GetErrorCode() == (ui32)NPersQueue::NErrorCode::OK) { const TString reason = "partition is not ready yet"; for (ui64 partitionIndex : *PartitionsToRequest) { @@ -304,25 +304,25 @@ void TPersQueueGetPartitionStatusTopicWorker::Answer( res->SetPartition(partitionIndex); res->SetStatus(NKikimrPQ::TStatusResponse::STATUS_UNKNOWN); } - } - } - } - SendReplyAndDie(std::move(response), ctx); -} - - -// -// GetPartitionLocations command -// - + } + } + } + SendReplyAndDie(std::move(response), ctx); +} + + +// +// GetPartitionLocations command +// + TPersQueueGetPartitionLocationsProcessor::TPersQueueGetPartitionLocationsProcessor( const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache ) - : TPersQueueBaseRequestProcessor(request, schemeCache, true) -{ - GetTopicsListOrThrow(RequestProto->GetMetaRequest().GetCmdGetPartitionLocations().GetTopicRequest(), PartitionsToRequest); -} - + : TPersQueueBaseRequestProcessor(request, schemeCache, true) +{ + GetTopicsListOrThrow(RequestProto->GetMetaRequest().GetCmdGetPartitionLocations().GetTopicRequest(), PartitionsToRequest); +} + THolder<IActor> TPersQueueGetPartitionLocationsProcessor::CreateTopicSubactor( const TSchemeEntry& topicEntry, const TString& name ) { @@ -331,8 +331,8 @@ THolder<IActor> TPersQueueGetPartitionLocationsProcessor::CreateTopicSubactor( SelfId(), topicEntry, name, PartitionsToRequest[topicEntry.PQGroupInfo->Description.GetName()], RequestProto, NodesInfo ); -} - +} + TPersQueueGetPartitionLocationsTopicWorker::TPersQueueGetPartitionLocationsTopicWorker( const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name, @@ -341,38 +341,38 @@ TPersQueueGetPartitionLocationsTopicWorker::TPersQueueGetPartitionLocationsTopic std::shared_ptr<const TPersQueueBaseRequestProcessor::TNodesInfo> nodesInfo ) : TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvTabletPipe::TEvClientConnected>>(parent, topicEntry, name) - , PartitionsToRequest(partitionsToRequest) - , RequestProto(requestProto) - , NodesInfo(nodesInfo) -{ + , PartitionsToRequest(partitionsToRequest) + , RequestProto(requestProto) + , NodesInfo(nodesInfo) +{ SetActivityType(NKikimrServices::TActivity::PQ_META_REQUEST_PROCESSOR); -} - +} + void TPersQueueGetPartitionLocationsTopicWorker::BootstrapImpl(const TActorContext& ctx) { - size_t partitionsAsked = 0; + size_t partitionsAsked = 0; THashSet<ui64> parts; if (SchemeEntry.PQGroupInfo) { const auto& pqDescr = SchemeEntry.PQGroupInfo->Description; for (const auto& partition : pqDescr.GetPartitions()) { const ui32 partIndex = partition.GetPartitionId(); - const ui64 tabletId = partition.GetTabletId(); + const ui64 tabletId = partition.GetTabletId(); if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty() && !PartitionsToRequest->contains(partIndex)) { - continue; - } - PartitionToTablet[partIndex] = tabletId; - ++partitionsAsked; + continue; + } + PartitionToTablet[partIndex] = tabletId; + ++partitionsAsked; parts.insert(partIndex); - if (HasTabletPipe(tabletId)) { // Take all partitions for tablet from one TEvStatusResponse event - continue; - } - CreatePipe(tabletId, ctx); - } - } + if (HasTabletPipe(tabletId)) { // Take all partitions for tablet from one TEvStatusResponse event + continue; + } + CreatePipe(tabletId, ctx); + } + } if (PartitionsToRequest.get() != nullptr && !PartitionsToRequest->empty() && PartitionsToRequest->size() != partitionsAsked) { SendErrorReplyAndDie(ctx, MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, TStringBuilder() << "no one of requested partitions in topic '" << Name << "', Marker# PQ98"); return; - } + } if (!PartitionsToRequest.get() || PartitionsToRequest->empty()) { PartitionsToRequest.reset(new THashSet<ui64>()); PartitionsToRequest->swap(parts); @@ -380,9 +380,9 @@ void TPersQueueGetPartitionLocationsTopicWorker::BootstrapImpl(const TActorConte if(WaitAllConnections(ctx)) return; -} - -bool TPersQueueGetPartitionLocationsTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { +} + +bool TPersQueueGetPartitionLocationsTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { auto processResult = ProcessMetaCacheSingleTopicsResponse(SchemeEntry); Answer(ctx, processResult.Status, processResult.ErrorCode, processResult.Reason); return true; @@ -392,13 +392,13 @@ void TPersQueueGetPartitionLocationsTopicWorker::Answer( const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason ) { - NKikimrClient::TResponse response; + NKikimrClient::TResponse response; response.SetStatus(status); response.SetErrorCode(code); if (!errorReason.empty()) response.SetErrorReason(errorReason); if (code == NPersQueue::NErrorCode::OK) { - + auto& topicResult = *response.MutableMetaResponse()->MutableCmdGetPartitionLocationsResult()->AddTopicResult(); topicResult.SetTopic(Name); SetErrorCode(&topicResult, SchemeEntry); @@ -421,116 +421,116 @@ void TPersQueueGetPartitionLocationsTopicWorker::Answer( } else { statusInitializing = true; } - } else { - statusInitializing = true; - } + } else { + statusInitializing = true; + } if (statusInitializing) { location.SetErrorCode(NPersQueue::NErrorCode::INITIALIZING); location.SetErrorReason("Tablet for that partition is not running"); } - } - } - SendReplyAndDie(std::move(response), ctx); -} - - -// -// GetReadSessionsInfo command -// - + } + } + SendReplyAndDie(std::move(response), ctx); +} + + +// +// GetReadSessionsInfo command +// + TPersQueueGetReadSessionsInfoProcessor::TPersQueueGetReadSessionsInfoProcessor( const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache, std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> pqReadSessionsInfoWorkerFactory ) - : TPersQueueBaseRequestProcessor(request, schemeCache, true) + : TPersQueueBaseRequestProcessor(request, schemeCache, true) , PQReadSessionsInfoWorkerFactory(pqReadSessionsInfoWorkerFactory) -{ - const auto& cmd = RequestProto->GetMetaRequest().GetCmdGetReadSessionsInfo(); - const auto& topics = cmd.GetTopic(); - TopicsToRequest.insert(topics.begin(), topics.end()); - if (IsIn(TopicsToRequest, TString())) { - throw std::runtime_error("empty topic in GetReadSessionsInfo request"); - } - - if (!cmd.HasClientId()) { - throw std::runtime_error("No clientId specified in CmdGetReadSessionsInfo"); - } -} - +{ + const auto& cmd = RequestProto->GetMetaRequest().GetCmdGetReadSessionsInfo(); + const auto& topics = cmd.GetTopic(); + TopicsToRequest.insert(topics.begin(), topics.end()); + if (IsIn(TopicsToRequest, TString())) { + throw std::runtime_error("empty topic in GetReadSessionsInfo request"); + } + + if (!cmd.HasClientId()) { + throw std::runtime_error("No clientId specified in CmdGetReadSessionsInfo"); + } +} + THolder<IActor> TPersQueueGetReadSessionsInfoProcessor::CreateTopicSubactor( const TSchemeEntry& topicEntry, const TString& name ) { Y_VERIFY(NodesInfo.get() != nullptr); return MakeHolder<TPersQueueGetReadSessionsInfoTopicWorker>( SelfId(), topicEntry, name, RequestProto, NodesInfo); -} - +} + TPersQueueGetReadSessionsInfoTopicWorker::TPersQueueGetReadSessionsInfoTopicWorker( const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto, std::shared_ptr<const TPersQueueBaseRequestProcessor::TNodesInfo> nodesInfo ) : TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvPersQueue::TEvOffsetsResponse>>(parent, topicEntry, name) - , RequestProto(requestProto) - , NodesInfo(nodesInfo) -{ + , RequestProto(requestProto) + , NodesInfo(nodesInfo) +{ SetActivityType(NKikimrServices::TActivity::PQ_META_REQUEST_PROCESSOR); -} - -void TPersQueueGetReadSessionsInfoTopicWorker::Die(const TActorContext& ctx) { - if (BalancerPipe) { - NTabletPipe::CloseClient(ctx, BalancerPipe); - } - TReplierToParent::Die(ctx); -} - -void TPersQueueGetReadSessionsInfoTopicWorker::SendReadSessionsInfoToBalancer(const TActorContext& ctx) { - NTabletPipe::TClientConfig clientConfig; +} + +void TPersQueueGetReadSessionsInfoTopicWorker::Die(const TActorContext& ctx) { + if (BalancerPipe) { + NTabletPipe::CloseClient(ctx, BalancerPipe); + } + TReplierToParent::Die(ctx); +} + +void TPersQueueGetReadSessionsInfoTopicWorker::SendReadSessionsInfoToBalancer(const TActorContext& ctx) { + NTabletPipe::TClientConfig clientConfig; BalancerPipe = ctx.RegisterWithSameMailbox( NTabletPipe::CreateClient(ctx.SelfID, SchemeEntry.PQGroupInfo->Description.GetBalancerTabletID(), clientConfig) ); - - THolder<TEvPersQueue::TEvGetReadSessionsInfo> ev(new TEvPersQueue::TEvGetReadSessionsInfo()); - ev->Record.SetClientId(RequestProto->GetMetaRequest().GetCmdGetReadSessionsInfo().GetClientId()); - NTabletPipe::SendData(ctx, BalancerPipe, ev.Release()); -} - + + THolder<TEvPersQueue::TEvGetReadSessionsInfo> ev(new TEvPersQueue::TEvGetReadSessionsInfo()); + ev->Record.SetClientId(RequestProto->GetMetaRequest().GetCmdGetReadSessionsInfo().GetClientId()); + NTabletPipe::SendData(ctx, BalancerPipe, ev.Release()); +} + void TPersQueueGetReadSessionsInfoTopicWorker::BootstrapImpl(const TActorContext &ctx) { if (!ProcessingResult.IsFatal) { SendReadSessionsInfoToBalancer(ctx); for (const auto& partition : SchemeEntry.PQGroupInfo->Description.GetPartitions()) { const ui32 partitionIndex = partition.GetPartitionId(); - const ui64 tabletId = partition.GetTabletId(); - const bool inserted = PartitionToTablet.emplace(partitionIndex, tabletId).second; - Y_VERIFY(inserted); - - if (HasTabletPipe(tabletId)) { - continue; - } - - THolder<TEvPersQueue::TEvOffsets> ev(new TEvPersQueue::TEvOffsets()); - const TString& clientId = RequestProto->GetMetaRequest().GetCmdGetReadSessionsInfo().GetClientId(); - if (!clientId.empty()) { - ev->Record.SetClientId(clientId); - } - CreatePipeAndSend(tabletId, ctx, std::move(ev)); - } + const ui64 tabletId = partition.GetTabletId(); + const bool inserted = PartitionToTablet.emplace(partitionIndex, tabletId).second; + Y_VERIFY(inserted); + + if (HasTabletPipe(tabletId)) { + continue; + } + + THolder<TEvPersQueue::TEvOffsets> ev(new TEvPersQueue::TEvOffsets()); + const TString& clientId = RequestProto->GetMetaRequest().GetCmdGetReadSessionsInfo().GetClientId(); + if (!clientId.empty()) { + ev->Record.SetClientId(clientId); + } + CreatePipeAndSend(tabletId, ctx, std::move(ev)); + } } else { Answer(ctx, ProcessingResult.Status, ProcessingResult.ErrorCode, ProcessingResult.Reason); - } + } if(WaitAllPipeEvents(ctx)) return; -} - -bool TPersQueueGetReadSessionsInfoTopicWorker::WaitAllPipeEvents(const TActorContext& ctx) { - if (TPipesWaiterActor::WaitAllPipeEvents(ctx)) { - return true; - } - Become(&TPersQueueGetReadSessionsInfoTopicWorker::WaitAllPipeEventsStateFunc); - return false; -} - +} + +bool TPersQueueGetReadSessionsInfoTopicWorker::WaitAllPipeEvents(const TActorContext& ctx) { + if (TPipesWaiterActor::WaitAllPipeEvents(ctx)) { + return true; + } + Become(&TPersQueueGetReadSessionsInfoTopicWorker::WaitAllPipeEventsStateFunc); + return false; +} + THolder<IActor> TPersQueueGetReadSessionsInfoProcessor::CreateSessionsSubactor( const THashMap<TString, TActorId>&& readSessions ) { @@ -543,78 +543,78 @@ THolder<IActor> TPersQueueGetReadSessionsInfoProcessor::CreateSessionsSubactor( STFUNC(TPersQueueGetReadSessionsInfoTopicWorker::WaitAllPipeEventsStateFunc) { switch (ev->GetTypeRewrite()) { HFunc(TEvPersQueue::TEvReadSessionsInfoResponse, Handle); - case TEvTabletPipe::TEvClientDestroyed::EventType: - if (!HandleDestroy(ev->Get<TEvTabletPipe::TEvClientDestroyed>(), ctx)) { - TPipesWaiterActor::WaitAllPipeEventsStateFunc(ev, ctx); - } - break; - case TEvTabletPipe::TEvClientConnected::EventType: - if (!HandleConnect(ev->Get<TEvTabletPipe::TEvClientConnected>(), ctx)) { - TPipesWaiterActor::WaitAllPipeEventsStateFunc(ev, ctx); - } - break; - default: - TPipesWaiterActor::WaitAllPipeEventsStateFunc(ev, ctx); - } -} - -bool TPersQueueGetReadSessionsInfoTopicWorker::HandleConnect(TEvTabletPipe::TEvClientConnected* ev, const TActorContext& ctx) { - if (ev->ClientId != BalancerPipe) { - return false; - } - if (ev->Status != NKikimrProto::OK) { - BalancerReplied = true; - if (ReadyToAnswer()) { + case TEvTabletPipe::TEvClientDestroyed::EventType: + if (!HandleDestroy(ev->Get<TEvTabletPipe::TEvClientDestroyed>(), ctx)) { + TPipesWaiterActor::WaitAllPipeEventsStateFunc(ev, ctx); + } + break; + case TEvTabletPipe::TEvClientConnected::EventType: + if (!HandleConnect(ev->Get<TEvTabletPipe::TEvClientConnected>(), ctx)) { + TPipesWaiterActor::WaitAllPipeEventsStateFunc(ev, ctx); + } + break; + default: + TPipesWaiterActor::WaitAllPipeEventsStateFunc(ev, ctx); + } +} + +bool TPersQueueGetReadSessionsInfoTopicWorker::HandleConnect(TEvTabletPipe::TEvClientConnected* ev, const TActorContext& ctx) { + if (ev->ClientId != BalancerPipe) { + return false; + } + if (ev->Status != NKikimrProto::OK) { + BalancerReplied = true; + if (ReadyToAnswer()) { Answer(ctx, ProcessingResult.Status, ProcessingResult.ErrorCode, ProcessingResult.Reason); - } - } else { - TabletNodes[GetTabletId(ev)] = ev->ServerId.NodeId(); - } - return true; -} - -bool TPersQueueGetReadSessionsInfoTopicWorker::HandleDestroy(TEvTabletPipe::TEvClientDestroyed* ev, const TActorContext& ctx) { - if (ev->ClientId != BalancerPipe) { - return false; - } - BalancerReplied = true; - if (ReadyToAnswer()) { + } + } else { + TabletNodes[GetTabletId(ev)] = ev->ServerId.NodeId(); + } + return true; +} + +bool TPersQueueGetReadSessionsInfoTopicWorker::HandleDestroy(TEvTabletPipe::TEvClientDestroyed* ev, const TActorContext& ctx) { + if (ev->ClientId != BalancerPipe) { + return false; + } + BalancerReplied = true; + if (ReadyToAnswer()) { Answer(ctx, ProcessingResult.Status, ProcessingResult.ErrorCode, ProcessingResult.Reason); - } - return true; -} - -void TPersQueueGetReadSessionsInfoTopicWorker::Handle(TEvPersQueue::TEvReadSessionsInfoResponse::TPtr& ev, const TActorContext& ctx) { - BalancerReplied = true; + } + return true; +} + +void TPersQueueGetReadSessionsInfoTopicWorker::Handle(TEvPersQueue::TEvReadSessionsInfoResponse::TPtr& ev, const TActorContext& ctx) { + BalancerReplied = true; BalancerResponse = ev; - if (ReadyToAnswer()) { + if (ReadyToAnswer()) { Answer(ctx, MSTATUS_OK, NPersQueue::NErrorCode::OK, ""); - } -} - + } +} + -bool TPersQueueGetReadSessionsInfoTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { - PipeEventsAreReady = true; - if (ReadyToAnswer()) { +bool TPersQueueGetReadSessionsInfoTopicWorker::OnPipeEventsAreReady(const TActorContext& ctx) { + PipeEventsAreReady = true; + if (ReadyToAnswer()) { Answer(ctx, ProcessingResult.Status, ProcessingResult.ErrorCode, ProcessingResult.Reason); - return true; - } - return false; -} - -bool TPersQueueGetReadSessionsInfoTopicWorker::ReadyToAnswer() const { + return true; + } + return false; +} + +bool TPersQueueGetReadSessionsInfoTopicWorker::ReadyToAnswer() const { return PipeEventsAreReady && BalancerReplied; -} - - -TString TPersQueueGetReadSessionsInfoTopicWorker::GetHostName(ui32 hostId) const { - const auto host = NodesInfo->HostNames.find(hostId); - return host != NodesInfo->HostNames.end() ? host->second : TString(); -} - +} + + +TString TPersQueueGetReadSessionsInfoTopicWorker::GetHostName(ui32 hostId) const { + const auto host = NodesInfo->HostNames.find(hostId); + return host != NodesInfo->HostNames.end() ? host->second : TString(); +} + void TPersQueueGetReadSessionsInfoTopicWorker::Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) { - NKikimrClient::TResponse response; + NKikimrClient::TResponse response; response.SetStatus(status); response.SetErrorCode(code); if (!errorReason.empty()) @@ -656,11 +656,11 @@ void TPersQueueGetReadSessionsInfoTopicWorker::Answer(const TActorContext& ctx, } SendReplyAndDie(std::move(response), ctx); return; - } + } for (const auto& pipeAnswer : PipeAnswers) { if (!pipeAnswer.second) { - continue; - } + continue; + } const auto& offsetResp = pipeAnswer.second->Get()->Record; const ui64 tabletId = pipeAnswer.first; for (const auto& partResult : offsetResp.GetPartResult()) { @@ -691,15 +691,15 @@ void TPersQueueGetReadSessionsInfoTopicWorker::Answer(const TActorContext& ctx, res->SetErrorReason("Tablet for partition is not running"); } } - } - } - SendReplyAndDie(std::move(response), ctx); -} - -bool TPersQueueGetReadSessionsInfoTopicWorker::OnClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& /*ctx*/) { - TabletNodes[GetTabletId(ev->Get())] = ev->Get()->ServerId.NodeId(); - return false; -} - -} // namespace NMsgBusProxy -} // namespace NKikimr + } + } + SendReplyAndDie(std::move(response), ctx); +} + +bool TPersQueueGetReadSessionsInfoTopicWorker::OnClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& /*ctx*/) { + TabletNodes[GetTabletId(ev->Get())] = ev->Get()->ServerId.NodeId(); + return false; +} + +} // namespace NMsgBusProxy +} // namespace NKikimr diff --git a/ydb/core/client/server/msgbus_server_pq_metarequest.h b/ydb/core/client/server/msgbus_server_pq_metarequest.h index 1d56c28dc09..0f390a71e14 100644 --- a/ydb/core/client/server/msgbus_server_pq_metarequest.h +++ b/ydb/core/client/server/msgbus_server_pq_metarequest.h @@ -1,142 +1,142 @@ -#pragma once -#include "msgbus_server_persqueue.h" - -namespace NKikimr { -namespace NMsgBusProxy { - -// -// GetTopicMetadata command -// +#pragma once +#include "msgbus_server_persqueue.h" + +namespace NKikimr { +namespace NMsgBusProxy { + +// +// GetTopicMetadata command +// // -class TPersQueueGetTopicMetadataProcessor : public TPersQueueBaseRequestProcessor { -public: +class TPersQueueGetTopicMetadataProcessor : public TPersQueueBaseRequestProcessor { +public: TPersQueueGetTopicMetadataProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache); - -private: + +private: THolder<IActor> CreateTopicSubactor(const TSchemeEntry& topicEntry, const TString& name) override; -}; - +}; + class TPersQueueGetTopicMetadataTopicWorker : public TReplierToParent<TTopicInfoBasedActor> { -public: +public: TPersQueueGetTopicMetadataTopicWorker(const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name); - + void BootstrapImpl(const TActorContext& ctx) override; void Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) override; -}; - - -// -// GetPartitionOffsets command -// - -class TPersQueueGetPartitionOffsetsProcessor : public TPersQueueBaseRequestProcessor { -public: +}; + + +// +// GetPartitionOffsets command +// + +class TPersQueueGetPartitionOffsetsProcessor : public TPersQueueBaseRequestProcessor { +public: TPersQueueGetPartitionOffsetsProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& metaCacheId); - -private: + +private: THolder<IActor> CreateTopicSubactor(const TSchemeEntry& topicEntry, const TString& name) override; - -private: + +private: THashMap<TString, std::shared_ptr<THashSet<ui64>>> PartitionsToRequest; -}; - +}; + class TPersQueueGetPartitionOffsetsTopicWorker : public TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvPersQueue::TEvOffsetsResponse>> { -public: +public: TPersQueueGetPartitionOffsetsTopicWorker(const TActorId& parent, const TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<THashSet<ui64>>& partitionsToRequest, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto); - + void BootstrapImpl(const TActorContext& ctx) override; - bool OnPipeEventsAreReady(const TActorContext& ctx) override; + bool OnPipeEventsAreReady(const TActorContext& ctx) override; void Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) override; - -private: + +private: std::shared_ptr<THashSet<ui64>> PartitionsToRequest; std::shared_ptr<const NKikimrClient::TPersQueueRequest> RequestProto; -}; - - -// -// GetPartitionStatus command -// - -class TPersQueueGetPartitionStatusProcessor : public TPersQueueBaseRequestProcessor { -public: +}; + + +// +// GetPartitionStatus command +// + +class TPersQueueGetPartitionStatusProcessor : public TPersQueueBaseRequestProcessor { +public: TPersQueueGetPartitionStatusProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache); - -private: + +private: THolder<IActor> CreateTopicSubactor(const TSchemeEntry& topicEntry, const TString& name) override; - -private: + +private: THashMap<TString, std::shared_ptr<THashSet<ui64>>> PartitionsToRequest; -}; - +}; + class TPersQueueGetPartitionStatusTopicWorker : public TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvPersQueue::TEvStatusResponse>> { -public: +public: TPersQueueGetPartitionStatusTopicWorker(const TActorId& parent, const TTopicInfoBasedActor::TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<THashSet<ui64>>& partitionsToRequest, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto); - + void BootstrapImpl(const TActorContext& ctx) override; - bool OnPipeEventsAreReady(const TActorContext& ctx) override; + bool OnPipeEventsAreReady(const TActorContext& ctx) override; void Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) override; - -private: + +private: std::shared_ptr<THashSet<ui64>> PartitionsToRequest; std::shared_ptr<const NKikimrClient::TPersQueueRequest> RequestProto; -}; - - -// -// GetPartitionLocations command -// - -class TPersQueueGetPartitionLocationsProcessor : public TPersQueueBaseRequestProcessor { -public: +}; + + +// +// GetPartitionLocations command +// + +class TPersQueueGetPartitionLocationsProcessor : public TPersQueueBaseRequestProcessor { +public: TPersQueueGetPartitionLocationsProcessor(const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache); - -private: + +private: THolder<IActor> CreateTopicSubactor(const TSchemeEntry& topicEntry, const TString& name) override; - -private: + +private: THashMap<TString, std::shared_ptr<THashSet<ui64>>> PartitionsToRequest; -}; - +}; + class TPersQueueGetPartitionLocationsTopicWorker : public TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvTabletPipe::TEvClientConnected>> { -public: +public: TPersQueueGetPartitionLocationsTopicWorker(const TActorId& parent, const TTopicInfoBasedActor::TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<THashSet<ui64>>& partitionsToRequest, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto, std::shared_ptr<const TPersQueueBaseRequestProcessor::TNodesInfo> nodesInfo); - + void BootstrapImpl(const TActorContext& ctx) override; - bool OnPipeEventsAreReady(const TActorContext& ctx) override; + bool OnPipeEventsAreReady(const TActorContext& ctx) override; void Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) override; - -private: + +private: std::shared_ptr<THashSet<ui64>> PartitionsToRequest; std::shared_ptr<const NKikimrClient::TPersQueueRequest> RequestProto; - THashMap<ui32, ui64> PartitionToTablet; + THashMap<ui32, ui64> PartitionToTablet; std::shared_ptr<const TPersQueueBaseRequestProcessor::TNodesInfo> NodesInfo; -}; - - -// -// GetReadSessionsInfo command -// - -class TPersQueueGetReadSessionsInfoProcessor : public TPersQueueBaseRequestProcessor { -public: +}; + + +// +// GetReadSessionsInfo command +// + +class TPersQueueGetReadSessionsInfoProcessor : public TPersQueueBaseRequestProcessor { +public: TPersQueueGetReadSessionsInfoProcessor( const NKikimrClient::TPersQueueRequest& request, const TActorId& schemeCache, std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> pqReadSessionsInfoWorkerFactory ); - + bool ReadyForAnswer(const TActorContext& ctx) override { if (TPersQueueBaseRequestProcessor::ReadyForAnswer(ctx)) { if (HasSessionsRequest || ReadSessions.empty()) { @@ -167,7 +167,7 @@ public: } } -private: +private: THolder<IActor> CreateTopicSubactor(const TSchemeEntry& topicEntry, const TString& name) override; THolder<IActor> CreateSessionsSubactor(const THashMap<TString, TActorId>&& readSessions); @@ -175,45 +175,45 @@ private: std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> PQReadSessionsInfoWorkerFactory; mutable bool HasSessionsRequest = false; THashMap<TString, TActorId> ReadSessions; -}; - +}; + class TPersQueueGetReadSessionsInfoTopicWorker : public TReplierToParent<TPipesWaiterActor<TTopicInfoBasedActor, TEvPersQueue::TEvOffsetsResponse>> { -public: +public: TPersQueueGetReadSessionsInfoTopicWorker(const TActorId& parent, const TTopicInfoBasedActor::TSchemeEntry& topicEntry, const TString& name, const std::shared_ptr<const NKikimrClient::TPersQueueRequest>& requestProto, std::shared_ptr<const TPersQueueBaseRequestProcessor::TNodesInfo> nodesInfo); - + void BootstrapImpl(const TActorContext& ctx) override; void Answer(const TActorContext& ctx, EResponseStatus status, NPersQueue::NErrorCode::EErrorCode code, const TString& errorReason) override; - bool OnPipeEventsAreReady(const TActorContext& ctx) override; - - void Die(const TActorContext& ctx) override; - - void SendReadSessionsInfoToBalancer(const TActorContext& ctx); - bool ReadyToAnswer() const; - - // true returned from this function means that we called Die(). + bool OnPipeEventsAreReady(const TActorContext& ctx) override; + + void Die(const TActorContext& ctx) override; + + void SendReadSessionsInfoToBalancer(const TActorContext& ctx); + bool ReadyToAnswer() const; + + // true returned from this function means that we called Die(). [[nodiscard]] bool WaitAllPipeEvents(const TActorContext& ctx); - STFUNC(WaitAllPipeEventsStateFunc); - - void Handle(TEvPersQueue::TEvReadSessionsInfoResponse::TPtr& ev, const TActorContext& ctx); - TString GetHostName(ui32 hostId) const; - bool OnClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) override; - bool HandleConnect(TEvTabletPipe::TEvClientConnected* ev, const TActorContext& ctx); - bool HandleDestroy(TEvTabletPipe::TEvClientDestroyed* ev, const TActorContext& ctx); - -private: + STFUNC(WaitAllPipeEventsStateFunc); + + void Handle(TEvPersQueue::TEvReadSessionsInfoResponse::TPtr& ev, const TActorContext& ctx); + TString GetHostName(ui32 hostId) const; + bool OnClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) override; + bool HandleConnect(TEvTabletPipe::TEvClientConnected* ev, const TActorContext& ctx); + bool HandleDestroy(TEvTabletPipe::TEvClientDestroyed* ev, const TActorContext& ctx); + +private: std::shared_ptr<const NKikimrClient::TPersQueueRequest> RequestProto; TActorId BalancerPipe; - TEvPersQueue::TEvReadSessionsInfoResponse::TPtr BalancerResponse; - bool BalancerReplied = false; - bool PipeEventsAreReady = false; - THashMap<ui32, ui64> PartitionToTablet; - THashMap<ui64, ui32> TabletNodes; + TEvPersQueue::TEvReadSessionsInfoResponse::TPtr BalancerResponse; + bool BalancerReplied = false; + bool PipeEventsAreReady = false; + THashMap<ui32, ui64> PartitionToTablet; + THashMap<ui64, ui32> TabletNodes; std::shared_ptr<const TPersQueueBaseRequestProcessor::TNodesInfo> NodesInfo; -}; - -} // namespace NMsgBusProxy -} // namespace NKikimr +}; + +} // namespace NMsgBusProxy +} // namespace NKikimr diff --git a/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp b/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp index 237bba147b9..2e59ae3ce23 100644 --- a/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp +++ b/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp @@ -1,6 +1,6 @@ -#include "msgbus_server_persqueue.h" +#include "msgbus_server_persqueue.h" #include "msgbus_server_pq_read_session_info.h" - + #include <ydb/core/base/tabletid.h> #include <ydb/core/engine/minikql/flat_local_tx_factory.h> #include <ydb/core/keyvalue/keyvalue_events.h> @@ -11,261 +11,261 @@ #include <ydb/core/testlib/fake_scheme_shard.h> #include <ydb/core/testlib/mock_pq_metacache.h> #include <ydb/core/testlib/tablet_helpers.h> - + #include <library/cpp/testing/unittest/registar.h> - -#include <util/generic/is_in.h> -#include <util/string/join.h> + +#include <util/generic/is_in.h> +#include <util/string/join.h> #include <util/system/type_name.h> - -#include <list> -#include <typeinfo> - -namespace NKikimr { -namespace NMsgBusProxy { - -using namespace testing; + +#include <list> +#include <typeinfo> + +namespace NKikimr { +namespace NMsgBusProxy { + +using namespace testing; using namespace NSchemeCache; - + void FillValidTopicRequest(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request, ui64 topicsCount); -void MakeEmptyTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request); -void MakeDuplicatedTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request); -void MakeDuplicatedPartition(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request); - -// Base test class with useful helpers for constructing all you need to test pq requests. -class TMessageBusServerPersQueueRequestTestBase: public TTestBase { -protected: - void SetUp() override { - TTestBase::SetUp(); - - //TTestActorRuntime::SetVerbose(true); // debug events - - // Initialize runtime +void MakeEmptyTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request); +void MakeDuplicatedTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request); +void MakeDuplicatedPartition(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request); + +// Base test class with useful helpers for constructing all you need to test pq requests. +class TMessageBusServerPersQueueRequestTestBase: public TTestBase { +protected: + void SetUp() override { + TTestBase::SetUp(); + + //TTestActorRuntime::SetVerbose(true); // debug events + + // Initialize runtime Runtime = MakeHolder<TTestBasicRuntime>(); Runtime->SetObserverFunc([this](TTestActorRuntimeBase&, TAutoPtr<IEventHandle>& event) { - return EventsObserver(event); - }); + return EventsObserver(event); + }); Runtime->SetRegistrationObserverFunc([this](TTestActorRuntimeBase& runtime, const TActorId& parentId, const TActorId& actorId) { - TTestActorRuntime::DefaultRegistrationObserver(runtime, parentId, actorId); - return RegistrationObserver(parentId, actorId); - }); + TTestActorRuntime::DefaultRegistrationObserver(runtime, parentId, actorId); + return RegistrationObserver(parentId, actorId); + }); SetupTabletServices(*Runtime); // Calls Runtime->Initialize(); - - // Edge actor - EdgeActorId = Runtime->AllocateEdgeActor(); - - // Logging - Runtime->SetLogPriority(NKikimrServices::PERSQUEUE, NLog::PRI_DEBUG); - } - - void TearDown() override { - // Assertions - try { - AssertTestActorsDestroyed(); - UNIT_ASSERT(TestMainActorHasAnswered); - } catch (...) { - // If assertions will throw, we need to clear all resources - } - - // Cleanup - Runtime.Reset(); - MockPQMetaCache = nullptr; - Actor = nullptr; - TestMainActorHasAnswered = false; + + // Edge actor + EdgeActorId = Runtime->AllocateEdgeActor(); + + // Logging + Runtime->SetLogPriority(NKikimrServices::PERSQUEUE, NLog::PRI_DEBUG); + } + + void TearDown() override { + // Assertions + try { + AssertTestActorsDestroyed(); + UNIT_ASSERT(TestMainActorHasAnswered); + } catch (...) { + // If assertions will throw, we need to clear all resources + } + + // Cleanup + Runtime.Reset(); + MockPQMetaCache = nullptr; + Actor = nullptr; + TestMainActorHasAnswered = false; EdgeActorId = TActorId(); TestMainActorId = TActorId(); - EdgeEventHandle.Reset(); - LoadedFakeSchemeShard = false; - TestActors.clear(); - PausedEventTypes.clear(); - PausedEvents.clear(); - TTestBase::TearDown(); - } - - // - // Helpers - // - - TMockPQMetaCache& GetMockPQMetaCache() { - if (!MockPQMetaCache) { - MockPQMetaCache = new TMockPQMetaCache(); - Runtime->Register(MockPQMetaCache); - } - return *MockPQMetaCache; - } - - void EnsureHasFakeSchemeShard() { - if (!LoadedFakeSchemeShard) { - TFakeSchemeShardState::TPtr state{new TFakeSchemeShardState()}; - BootFakeSchemeShard(*Runtime, 123, state); - LoadedFakeSchemeShard = true; - } - } - - THolder<TEvPersQueue::TEvUpdateBalancerConfig> MakeUpdateBalancerConfigRequest(const TString& topic, const TVector<std::pair<ui32, ui64>>& partitionsToTablets, const ui64 schemeShardId = 123) { - static int version = 0; - ++version; - - THolder<TEvPersQueue::TEvUpdateBalancerConfig> request = MakeHolder<TEvPersQueue::TEvUpdateBalancerConfig>(); - for (const auto& p : partitionsToTablets) { - auto* part = request->Record.AddPartitions(); - part->SetPartition(p.first); - part->SetTabletId(p.second); - } - request->Record.SetTxId(12345); - request->Record.SetPathId(1); - request->Record.SetVersion(version); - request->Record.SetTopicName(topic); - request->Record.SetPath("path"); - request->Record.SetSchemeShardId(schemeShardId); - return request; - } - + EdgeEventHandle.Reset(); + LoadedFakeSchemeShard = false; + TestActors.clear(); + PausedEventTypes.clear(); + PausedEvents.clear(); + TTestBase::TearDown(); + } + + // + // Helpers + // + + TMockPQMetaCache& GetMockPQMetaCache() { + if (!MockPQMetaCache) { + MockPQMetaCache = new TMockPQMetaCache(); + Runtime->Register(MockPQMetaCache); + } + return *MockPQMetaCache; + } + + void EnsureHasFakeSchemeShard() { + if (!LoadedFakeSchemeShard) { + TFakeSchemeShardState::TPtr state{new TFakeSchemeShardState()}; + BootFakeSchemeShard(*Runtime, 123, state); + LoadedFakeSchemeShard = true; + } + } + + THolder<TEvPersQueue::TEvUpdateBalancerConfig> MakeUpdateBalancerConfigRequest(const TString& topic, const TVector<std::pair<ui32, ui64>>& partitionsToTablets, const ui64 schemeShardId = 123) { + static int version = 0; + ++version; + + THolder<TEvPersQueue::TEvUpdateBalancerConfig> request = MakeHolder<TEvPersQueue::TEvUpdateBalancerConfig>(); + for (const auto& p : partitionsToTablets) { + auto* part = request->Record.AddPartitions(); + part->SetPartition(p.first); + part->SetTabletId(p.second); + } + request->Record.SetTxId(12345); + request->Record.SetPathId(1); + request->Record.SetVersion(version); + request->Record.SetTopicName(topic); + request->Record.SetPath("path"); + request->Record.SetSchemeShardId(schemeShardId); + return request; + } + TActorId StartBalancer(ui64 balancerTabletId) { TActorId id = CreateTestBootstrapper(*Runtime, CreateTestTabletInfo(balancerTabletId, TTabletTypes::PERSQUEUE_READ_BALANCER, TErasureType::ErasureNone), - &CreatePersQueueReadBalancer); - - TDispatchOptions options; - options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot)); - Runtime->DispatchEvents(options); - return id; - } - + &CreatePersQueueReadBalancer); + + TDispatchOptions options; + options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot)); + Runtime->DispatchEvents(options); + return id; + } + TActorId PrepareBalancer(const TString& topic, ui64 balancerTabletId, const TVector<std::pair<ui32, ui64>>& partitionsToTablets, const ui64 schemeShardId = 123) { - EnsureHasFakeSchemeShard(); + EnsureHasFakeSchemeShard(); TActorId id = StartBalancer(balancerTabletId); - - THolder<TEvPersQueue::TEvUpdateBalancerConfig> request = MakeUpdateBalancerConfigRequest(topic, partitionsToTablets, schemeShardId); - - Runtime->SendToPipe(balancerTabletId, EdgeActorId, request.Release(), 0, GetPipeConfigWithRetries()); - TAutoPtr<IEventHandle> handle; - TEvPersQueue::TEvUpdateConfigResponse* result = Runtime->GrabEdgeEvent<TEvPersQueue::TEvUpdateConfigResponse>(handle); - - UNIT_ASSERT(result != nullptr); - const auto& rec = result->Record; - UNIT_ASSERT(rec.HasStatus() && rec.GetStatus() == NKikimrPQ::OK); - UNIT_ASSERT(rec.HasTxId() && rec.GetTxId() == 12345); - UNIT_ASSERT(rec.HasOrigin() && result->GetOrigin() == balancerTabletId); - + + THolder<TEvPersQueue::TEvUpdateBalancerConfig> request = MakeUpdateBalancerConfigRequest(topic, partitionsToTablets, schemeShardId); + + Runtime->SendToPipe(balancerTabletId, EdgeActorId, request.Release(), 0, GetPipeConfigWithRetries()); + TAutoPtr<IEventHandle> handle; + TEvPersQueue::TEvUpdateConfigResponse* result = Runtime->GrabEdgeEvent<TEvPersQueue::TEvUpdateConfigResponse>(handle); + + UNIT_ASSERT(result != nullptr); + const auto& rec = result->Record; + UNIT_ASSERT(rec.HasStatus() && rec.GetStatus() == NKikimrPQ::OK); + UNIT_ASSERT(rec.HasTxId() && rec.GetTxId() == 12345); + UNIT_ASSERT(rec.HasOrigin() && result->GetOrigin() == balancerTabletId); + ForwardToTablet(*Runtime, balancerTabletId, EdgeActorId, new TEvents::TEvPoisonPill()); - TDispatchOptions rebootOptions; - rebootOptions.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvRestored, 2)); - Runtime->DispatchEvents(rebootOptions); - return id; - } - - THolder<TEvPersQueue::TEvUpdateConfig> MakeUpdatePQRequest(const TString& topic, const TVector<size_t>& partitions) { - static int version = 0; - ++version; - - THolder<TEvPersQueue::TEvUpdateConfig> request(new TEvPersQueue::TEvUpdateConfig()); - for (size_t i : partitions) { + TDispatchOptions rebootOptions; + rebootOptions.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvRestored, 2)); + Runtime->DispatchEvents(rebootOptions); + return id; + } + + THolder<TEvPersQueue::TEvUpdateConfig> MakeUpdatePQRequest(const TString& topic, const TVector<size_t>& partitions) { + static int version = 0; + ++version; + + THolder<TEvPersQueue::TEvUpdateConfig> request(new TEvPersQueue::TEvUpdateConfig()); + for (size_t i : partitions) { request->Record.MutableTabletConfig()->AddPartitionIds(i); - } - request->Record.MutableTabletConfig()->SetCacheSize(10*1024*1024); - request->Record.SetTxId(12345); - auto tabletConfig = request->Record.MutableTabletConfig(); + } + request->Record.MutableTabletConfig()->SetCacheSize(10*1024*1024); + request->Record.SetTxId(12345); + auto tabletConfig = request->Record.MutableTabletConfig(); tabletConfig->SetTopicName(topic); - tabletConfig->SetVersion(version); - auto config = tabletConfig->MutablePartitionConfig(); - config->SetMaxCountInPartition(20000000); - config->SetMaxSizeInPartition(100 * 1024 * 1024); - config->SetLifetimeSeconds(0); + tabletConfig->SetVersion(version); + auto config = tabletConfig->MutablePartitionConfig(); + config->SetMaxCountInPartition(20000000); + config->SetMaxSizeInPartition(100 * 1024 * 1024); + config->SetLifetimeSeconds(0); config->SetSourceIdLifetimeSeconds(1*60*60); - config->SetMaxWriteInflightSize(90000000); - config->SetLowWatermark(6*1024*1024); - - return request; - } - + config->SetMaxWriteInflightSize(90000000); + config->SetLowWatermark(6*1024*1024); + + return request; + } + TActorId StartPQTablet(ui64 tabletId) { TActorId id = CreateTestBootstrapper(*Runtime, CreateTestTabletInfo(tabletId, TTabletTypes::PERSQUEUE, TErasureType::ErasureNone), - &CreatePersQueue); - - TDispatchOptions options; - options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot)); - Runtime->DispatchEvents(options); - return id; - } - + &CreatePersQueue); + + TDispatchOptions options; + options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot)); + Runtime->DispatchEvents(options); + return id; + } + TActorId PreparePQTablet(const TString& topic, ui64 tabletId, const TVector<size_t>& partitions) { - EnsureHasFakeSchemeShard(); + EnsureHasFakeSchemeShard(); TActorId id = StartPQTablet(tabletId); - - TAutoPtr<IEventHandle> handle; - { - THolder<TEvPersQueue::TEvUpdateConfig> request = MakeUpdatePQRequest(topic, partitions); - Runtime->SendToPipe(tabletId, EdgeActorId, request.Release(), 0, GetPipeConfigWithRetries()); - TEvPersQueue::TEvUpdateConfigResponse* result = Runtime->GrabEdgeEvent<TEvPersQueue::TEvUpdateConfigResponse>(handle); - - UNIT_ASSERT(result); - auto& rec = result->Record; - UNIT_ASSERT_C(rec.HasStatus() && rec.GetStatus() == NKikimrPQ::OK, "rec: " << rec); - UNIT_ASSERT_C(rec.HasTxId() && rec.GetTxId() == 12345, "rec: " << rec); - UNIT_ASSERT_C(rec.HasOrigin() && result->GetOrigin() == tabletId, "rec: " << rec); - } - - { - THolder<TEvKeyValue::TEvRequest> request; - request.Reset(new TEvKeyValue::TEvRequest); - auto read = request->Record.AddCmdRead(); - read->SetKey("_config"); - - Runtime->SendToPipe(tabletId, EdgeActorId, request.Release(), 0, GetPipeConfigWithRetries()); - TEvKeyValue::TEvResponse* result = Runtime->GrabEdgeEvent<TEvKeyValue::TEvResponse>(handle); - - UNIT_ASSERT(result); - UNIT_ASSERT(result->Record.HasStatus()); - UNIT_ASSERT_EQUAL(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK); - } - return id; - } - - // Register tested actor - void RegisterActor(const NKikimrClient::TPersQueueRequest& request) { + + TAutoPtr<IEventHandle> handle; + { + THolder<TEvPersQueue::TEvUpdateConfig> request = MakeUpdatePQRequest(topic, partitions); + Runtime->SendToPipe(tabletId, EdgeActorId, request.Release(), 0, GetPipeConfigWithRetries()); + TEvPersQueue::TEvUpdateConfigResponse* result = Runtime->GrabEdgeEvent<TEvPersQueue::TEvUpdateConfigResponse>(handle); + + UNIT_ASSERT(result); + auto& rec = result->Record; + UNIT_ASSERT_C(rec.HasStatus() && rec.GetStatus() == NKikimrPQ::OK, "rec: " << rec); + UNIT_ASSERT_C(rec.HasTxId() && rec.GetTxId() == 12345, "rec: " << rec); + UNIT_ASSERT_C(rec.HasOrigin() && result->GetOrigin() == tabletId, "rec: " << rec); + } + + { + THolder<TEvKeyValue::TEvRequest> request; + request.Reset(new TEvKeyValue::TEvRequest); + auto read = request->Record.AddCmdRead(); + read->SetKey("_config"); + + Runtime->SendToPipe(tabletId, EdgeActorId, request.Release(), 0, GetPipeConfigWithRetries()); + TEvKeyValue::TEvResponse* result = Runtime->GrabEdgeEvent<TEvKeyValue::TEvResponse>(handle); + + UNIT_ASSERT(result); + UNIT_ASSERT(result->Record.HasStatus()); + UNIT_ASSERT_EQUAL(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK); + } + return id; + } + + // Register tested actor + void RegisterActor(const NKikimrClient::TPersQueueRequest& request) { Actor = CreateActorServerPersQueue( EdgeActorId, request, GetMockPQMetaCache().SelfId(), nullptr ); - TestMainActorId = Runtime->Register(Actor); - TestActors.insert(TestMainActorId); - } - - TEvPersQueue::TEvResponse* GrabResponseEvent() { - TEvPersQueue::TEvResponse* responseEvent = Runtime->GrabEdgeEvent<TEvPersQueue::TEvResponse>(EdgeEventHandle); - UNIT_ASSERT(responseEvent); - TestMainActorHasAnswered = true; - return responseEvent; - } - - const TEvPersQueue::TEvResponse* GetResponse() { - UNIT_ASSERT(EdgeEventHandle.Get() != nullptr); - return EdgeEventHandle->Get<TEvPersQueue::TEvResponse>(); - } - - size_t ResponseFieldsCount() { - const TEvPersQueue::TEvResponse* resp = GetResponse(); - UNIT_ASSERT(resp != nullptr); - const auto& r = resp->Record; - const auto& m = r.GetMetaResponse(); - const auto& p = r.GetPartitionResponse(); - return r.HasFetchResponse() - + m.HasCmdGetPartitionOffsetsResult() - + m.HasCmdGetTopicMetadataResult() - + m.HasCmdGetPartitionLocationsResult() - + m.HasCmdGetPartitionStatusResult() - + m.HasCmdGetReadSessionsInfoResult() - + (p.CmdWriteResultSize() > 0) - + p.HasCmdGetMaxSeqNoResult() - + p.HasCmdReadResult() - + p.HasCmdGetClientOffsetResult() - + p.HasCmdGetOwnershipResult(); - } - + TestMainActorId = Runtime->Register(Actor); + TestActors.insert(TestMainActorId); + } + + TEvPersQueue::TEvResponse* GrabResponseEvent() { + TEvPersQueue::TEvResponse* responseEvent = Runtime->GrabEdgeEvent<TEvPersQueue::TEvResponse>(EdgeEventHandle); + UNIT_ASSERT(responseEvent); + TestMainActorHasAnswered = true; + return responseEvent; + } + + const TEvPersQueue::TEvResponse* GetResponse() { + UNIT_ASSERT(EdgeEventHandle.Get() != nullptr); + return EdgeEventHandle->Get<TEvPersQueue::TEvResponse>(); + } + + size_t ResponseFieldsCount() { + const TEvPersQueue::TEvResponse* resp = GetResponse(); + UNIT_ASSERT(resp != nullptr); + const auto& r = resp->Record; + const auto& m = r.GetMetaResponse(); + const auto& p = r.GetPartitionResponse(); + return r.HasFetchResponse() + + m.HasCmdGetPartitionOffsetsResult() + + m.HasCmdGetTopicMetadataResult() + + m.HasCmdGetPartitionLocationsResult() + + m.HasCmdGetPartitionStatusResult() + + m.HasCmdGetReadSessionsInfoResult() + + (p.CmdWriteResultSize() > 0) + + p.HasCmdGetMaxSeqNoResult() + + p.HasCmdReadResult() + + p.HasCmdGetClientOffsetResult() + + p.HasCmdGetOwnershipResult(); + } + template<class T> bool AssertTopicResponsesImpl(const T& t, const TString& topic, NPersQueue::NErrorCode::EErrorCode code, ui32 numParts) { @@ -313,151 +313,151 @@ protected: void AssertFailedResponse(NPersQueue::NErrorCode::EErrorCode code, const THashSet<TString>& markers = {}, EResponseStatus status = MSTATUS_ERROR) { - const TEvPersQueue::TEvResponse* resp = GetResponse(); + const TEvPersQueue::TEvResponse* resp = GetResponse(); Cerr << "Assert failed: Check response: " << resp->Record << Endl; - UNIT_ASSERT(resp != nullptr); - UNIT_ASSERT_C(resp->Record.HasStatus(), "Response: " << resp->Record); - UNIT_ASSERT_UNEQUAL_C(resp->Record.GetStatus(), 1, "Response: " << resp->Record); + UNIT_ASSERT(resp != nullptr); + UNIT_ASSERT_C(resp->Record.HasStatus(), "Response: " << resp->Record); + UNIT_ASSERT_UNEQUAL_C(resp->Record.GetStatus(), 1, "Response: " << resp->Record); UNIT_ASSERT_EQUAL_C(resp->Record.GetErrorCode(), code, "code: " << (ui32)code << " Response: " << resp->Record); - UNIT_ASSERT_C(!resp->Record.GetErrorReason().empty(), "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(resp->Record.GetStatus(), status, "Response: " << resp->Record); - if (!markers.empty()) { - const TString reason = resp->Record.GetErrorReason(); - UNIT_ASSERT_STRING_CONTAINS_C(reason, "Marker# ", reason << " doesn't contain any marker, but it should."); - const size_t markerPos = reason.find("Marker# "); - UNIT_ASSERT_UNEQUAL(markerPos, TString::npos); - const TString marker = reason.substr(markerPos); - UNIT_ASSERT_C(IsIn(markers, marker), marker << " is not in the specified set: {" << JoinSeq(", ", markers) << "}"); - } - UNIT_ASSERT_VALUES_EQUAL_C(ResponseFieldsCount(), 0, "Response: " << resp->Record); - } - + UNIT_ASSERT_C(!resp->Record.GetErrorReason().empty(), "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(resp->Record.GetStatus(), status, "Response: " << resp->Record); + if (!markers.empty()) { + const TString reason = resp->Record.GetErrorReason(); + UNIT_ASSERT_STRING_CONTAINS_C(reason, "Marker# ", reason << " doesn't contain any marker, but it should."); + const size_t markerPos = reason.find("Marker# "); + UNIT_ASSERT_UNEQUAL(markerPos, TString::npos); + const TString marker = reason.substr(markerPos); + UNIT_ASSERT_C(IsIn(markers, marker), marker << " is not in the specified set: {" << JoinSeq(", ", markers) << "}"); + } + UNIT_ASSERT_VALUES_EQUAL_C(ResponseFieldsCount(), 0, "Response: " << resp->Record); + } + void AssertFailedResponse(NPersQueue::NErrorCode::EErrorCode code, const char* marker, EResponseStatus status = MSTATUS_ERROR) { - AssertFailedResponse(code, THashSet<TString>({marker}), status); - } - - void AssertSucceededResponse() { - const TEvPersQueue::TEvResponse* resp = GetResponse(); - UNIT_ASSERT(resp != nullptr); - UNIT_ASSERT_VALUES_EQUAL_C(resp->Record.GetStatus(), 1, "Response: " << resp->Record); - UNIT_ASSERT_C(resp->Record.HasErrorCode(), "Response: " << resp->Record); + AssertFailedResponse(code, THashSet<TString>({marker}), status); + } + + void AssertSucceededResponse() { + const TEvPersQueue::TEvResponse* resp = GetResponse(); + UNIT_ASSERT(resp != nullptr); + UNIT_ASSERT_VALUES_EQUAL_C(resp->Record.GetStatus(), 1, "Response: " << resp->Record); + UNIT_ASSERT_C(resp->Record.HasErrorCode(), "Response: " << resp->Record); UNIT_ASSERT_EQUAL_C(resp->Record.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(resp->Record.GetErrorReason().empty(), "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(ResponseFieldsCount(), 1, "Response: " << resp->Record); - } - - TTestActorRuntime::EEventAction EventsObserver(TAutoPtr<IEventHandle>& event) { - switch (event->Type) { - case NKikimr::TEvPersQueue::EvResponse: - { - if (event->Sender == TestMainActorId) { - TestMainActorHasAnswered = true; - UNIT_ASSERT_EQUAL(event->Recipient, EdgeActorId); - } - break; - } - } - if ((IsIn(PausedEventTypes, event->Type) || IsIn(PausedEventTypes, 0)) && IsIn(TestActors, event->Recipient)) { - PausedEvents.push_back(event); - return TTestActorRuntime::EEventAction::DROP; - } - return TTestActorRuntime::EEventAction::PROCESS; - } - + UNIT_ASSERT_C(resp->Record.GetErrorReason().empty(), "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(ResponseFieldsCount(), 1, "Response: " << resp->Record); + } + + TTestActorRuntime::EEventAction EventsObserver(TAutoPtr<IEventHandle>& event) { + switch (event->Type) { + case NKikimr::TEvPersQueue::EvResponse: + { + if (event->Sender == TestMainActorId) { + TestMainActorHasAnswered = true; + UNIT_ASSERT_EQUAL(event->Recipient, EdgeActorId); + } + break; + } + } + if ((IsIn(PausedEventTypes, event->Type) || IsIn(PausedEventTypes, 0)) && IsIn(TestActors, event->Recipient)) { + PausedEvents.push_back(event); + return TTestActorRuntime::EEventAction::DROP; + } + return TTestActorRuntime::EEventAction::PROCESS; + } + void RegistrationObserver(const TActorId& parentId, const TActorId& actorId) { - if (IsIn(TestActors, parentId)) { - IActor* child = Runtime->FindActor(actorId); - UNIT_ASSERT(child); - UNIT_ASSERT(TestActors.insert(actorId).second); - } - } - - // TODO: move this code to test actor runtime - void AssertTestActorsDestroyed() { - auto events = Runtime->CaptureEvents(); + if (IsIn(TestActors, parentId)) { + IActor* child = Runtime->FindActor(actorId); + UNIT_ASSERT(child); + UNIT_ASSERT(TestActors.insert(actorId).second); + } + } + + // TODO: move this code to test actor runtime + void AssertTestActorsDestroyed() { + auto events = Runtime->CaptureEvents(); THashSet<TActorId> destroyedActors; - for (const auto& event : events) { - if (event->Type == TEvents::TSystem::PoisonPill) { - destroyedActors.insert(event->Recipient); - } - } + for (const auto& event : events) { + if (event->Type == TEvents::TSystem::PoisonPill) { + destroyedActors.insert(event->Recipient); + } + } for (const TActorId& actorId : TestActors) { - IActor* actor = Runtime->FindActor(actorId); - if (actor != nullptr) { + IActor* actor = Runtime->FindActor(actorId); + if (actor != nullptr) { const bool isPipe = actor->ActivityType == NKikimrServices::TActivity::TABLET_PIPE_CLIENT; - if (isPipe) { - UNIT_ASSERT_C(IsIn(destroyedActors, actorId), - "Pipe client was not destroyed after test actor worked. Pipe client actor id: " << actorId); - } else { - UNIT_ASSERT_C(IsIn(destroyedActors, actorId), - "Test actor or its child wasn't destroyed. Actor id: " << actorId + if (isPipe) { + UNIT_ASSERT_C(IsIn(destroyedActors, actorId), + "Pipe client was not destroyed after test actor worked. Pipe client actor id: " << actorId); + } else { + UNIT_ASSERT_C(IsIn(destroyedActors, actorId), + "Test actor or its child wasn't destroyed. Actor id: " << actorId << ". Type: " << TypeName(*actor)); - } - } - } - } - - // TODO: move this code to test actor runtime - void PauseInputForTestActors(ui64 eventType = 0) { - PausedEventTypes.insert(eventType); - } - - template <class TEvent> - void PauseInputForTestActors() { - PauseInputForTestActors(TEvent::EventType); - } - - void ResumeEventsForTestActors() { - PausedEventTypes.clear(); - for (TAutoPtr<IEventHandle>& event : PausedEvents) { - if (event.Get() != nullptr) { - Runtime->Send(event.Release()); - } - } - PausedEvents.clear(); - } - -protected: + } + } + } + } + + // TODO: move this code to test actor runtime + void PauseInputForTestActors(ui64 eventType = 0) { + PausedEventTypes.insert(eventType); + } + + template <class TEvent> + void PauseInputForTestActors() { + PauseInputForTestActors(TEvent::EventType); + } + + void ResumeEventsForTestActors() { + PausedEventTypes.clear(); + for (TAutoPtr<IEventHandle>& event : PausedEvents) { + if (event.Get() != nullptr) { + Runtime->Send(event.Release()); + } + } + PausedEvents.clear(); + } + +protected: TActorId EdgeActorId; - IActor* Actor = nullptr; + IActor* Actor = nullptr; TActorId TestMainActorId; - bool TestMainActorHasAnswered = false; - TMockPQMetaCache* MockPQMetaCache = nullptr; - TAutoPtr<IEventHandle> EdgeEventHandle; - bool LoadedFakeSchemeShard = false; + bool TestMainActorHasAnswered = false; + TMockPQMetaCache* MockPQMetaCache = nullptr; + TAutoPtr<IEventHandle> EdgeEventHandle; + bool LoadedFakeSchemeShard = false; THashSet<TActorId> TestActors; // Actor and its children - THashSet<ui64> PausedEventTypes; - std::list<TAutoPtr<IEventHandle>> PausedEvents; - THolder<TTestActorRuntime> Runtime; -}; - -// Common tests that are actual to all pq requests. -// To run these tests you need to insert COMMON_TESTS_LIST() macro in particular command's test. -class TMessageBusServerPersQueueRequestCommonTest: public TMessageBusServerPersQueueRequestTestBase { -public: -#define COMMON_TESTS_LIST() \ - UNIT_TEST(HandlesTimeout) \ + THashSet<ui64> PausedEventTypes; + std::list<TAutoPtr<IEventHandle>> PausedEvents; + THolder<TTestActorRuntime> Runtime; +}; + +// Common tests that are actual to all pq requests. +// To run these tests you need to insert COMMON_TESTS_LIST() macro in particular command's test. +class TMessageBusServerPersQueueRequestCommonTest: public TMessageBusServerPersQueueRequestTestBase { +public: +#define COMMON_TESTS_LIST() \ + UNIT_TEST(HandlesTimeout) \ UNIT_TEST(FailsOnFailedGetAllTopicsRequest) \ UNIT_TEST(FailsOnBadRootStatusInGetNodeRequest) \ UNIT_TEST(FailesOnNotATopic) \ - UNIT_TEST(FailsOnNotOkStatusInGetNodeRequest) \ - UNIT_TEST(FailsOnNoBalancerInGetNodeRequest) \ - UNIT_TEST(FailsOnZeroBalancerTabletIdInGetNodeRequest) \ - UNIT_TEST(FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly) \ - /**/ - + UNIT_TEST(FailsOnNotOkStatusInGetNodeRequest) \ + UNIT_TEST(FailsOnNoBalancerInGetNodeRequest) \ + UNIT_TEST(FailsOnZeroBalancerTabletIdInGetNodeRequest) \ + UNIT_TEST(FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly) \ + /**/ + virtual NKikimrClient::TPersQueueRequest MakeValidRequest(ui64 topicsCount = 2) = 0; - + void SetBalancerId(TSchemeCacheNavigate::TResultSet& resultSet, ui64 index, const TMaybe<ui64>& tabletId) { auto* newInfo = new TSchemeCacheNavigate::TPQGroupInfo(*resultSet[index].PQGroupInfo); if (tabletId.Defined()) { newInfo->Description.SetBalancerTabletID(*tabletId); } else { newInfo->Description.ClearBalancerTabletID(); - } + } resultSet[index].PQGroupInfo.Reset(newInfo); } - + TSchemeCacheNavigate::TEntry MakeEntry( ui64 topicId, TSchemeCacheNavigate::EStatus status = TSchemeCacheNavigate::EStatus::Ok, @@ -472,7 +472,7 @@ public: || !makePQDescription ) { return entry; - } + } auto *pqInfo = new TSchemeCacheNavigate::TPQGroupInfo(); pqInfo->Kind = TSchemeCacheNavigate::KindTopic; auto &descr = pqInfo->Description; @@ -499,8 +499,8 @@ public: } entry.PQGroupInfo.Reset(pqInfo); return entry; - } - + } + TSchemeCacheNavigate::TResultSet MakeResultSet(bool valid = true) { TSchemeCacheNavigate::TResultSet resultSet; if (valid) { @@ -510,862 +510,862 @@ public: resultSet.emplace_back(std::move( MakeEntry(2) )); - } + } return resultSet; - } - - void HandlesTimeout() { + } + + void HandlesTimeout() { EXPECT_CALL(GetMockPQMetaCache(), HandleDescribeAllTopics(_, _)); // gets request and doesn't reply - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - Runtime->EnableScheduleForActor(Actor->SelfId()); - - TDispatchOptions options; + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + Runtime->EnableScheduleForActor(Actor->SelfId()); + + TDispatchOptions options; options.FinalEvents.emplace_back([](IEventHandle& h) { return h.Type == TEvPqMetaCache::TEvDescribeAllTopicsRequest::EventType; }, 1); - Runtime->DispatchEvents(options); - - Runtime->UpdateCurrentTime(Runtime->GetCurrentTime() + TDuration::MilliSeconds(90000 + 1)); - - GrabResponseEvent(); + Runtime->DispatchEvents(options); + + Runtime->UpdateCurrentTime(Runtime->GetCurrentTime() + TDuration::MilliSeconds(90000 + 1)); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::ERROR, {"Marker# PQ11", "Marker# PQ16"}, MSTATUS_TIMEOUT); - } - + } + void FailsOnFailedGetAllTopicsRequest() { GetMockPQMetaCache().SetAllTopicsAnswer(false); - - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - - GrabResponseEvent(); + + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::UNKNOWN_TOPIC, {"Marker# PQ15", "Marker# PQ17"}); - } - - void FailsOnNotOkStatusInGetNodeRequest() { + } + + void FailsOnNotOkStatusInGetNodeRequest() { auto entry = MakeEntry(1); entry.Status = TSchemeCacheNavigate::EStatus::PathErrorUnknown; - + GetMockPQMetaCache().SetAllTopicsAnswer(true, TSchemeCacheNavigate::TResultSet{entry}); NKikimrClient::TPersQueueRequest request = MakeValidRequest(1); - RegisterActor(request); - - GrabResponseEvent(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::UNKNOWN_TOPIC, "Marker# PQ150"); - } - + } + void FailsOnBadRootStatusInGetNodeRequest() { auto resultSet = MakeResultSet(); resultSet[0].Status = ESchemeStatus::RootUnknown; - + GetMockPQMetaCache().SetAllTopicsAnswer(true, std::move(resultSet)); - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - - GrabResponseEvent(); + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::UNKNOWN_TOPIC, {"Marker# PQ1", "Marker# PQ14"}); - } - + } + void FailesOnNotATopic() { auto resultSet = MakeResultSet(); resultSet[1].Kind = TSchemeCacheNavigate::KindPath; resultSet[1].PQGroupInfo = nullptr; - + GetMockPQMetaCache().SetAllTopicsAnswer(true, std::move(resultSet)); - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - - GrabResponseEvent(); + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::UNKNOWN_TOPIC, {"Marker# PQ95", "Marker# PQ13"}); - } - - void FailsOnNoBalancerInGetNodeRequest() { + } + + void FailsOnNoBalancerInGetNodeRequest() { auto resultSet = MakeResultSet(); SetBalancerId(resultSet, 0, Nothing()); GetMockPQMetaCache().SetAllTopicsAnswer(true, std::move(resultSet)); - - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - - GrabResponseEvent(); + + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::UNKNOWN_TOPIC, {"Marker# PQ93", "Marker# PQ193"}); - } - - void FailsOnZeroBalancerTabletIdInGetNodeRequest() { + } + + void FailsOnZeroBalancerTabletIdInGetNodeRequest() { auto resultSet = MakeResultSet(); SetBalancerId(resultSet, 0, 0); GetMockPQMetaCache().SetAllTopicsAnswer(true, std::move(resultSet)); - - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - - GrabResponseEvent(); + + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::UNKNOWN_TOPIC, {"Marker# PQ94", "Marker# PQ22"}); - } - - void FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly() { + } + + void FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly() { auto resultSet = MakeResultSet(); resultSet[1].Status = TSchemeCacheNavigate::EStatus::LookupError; //SetBalancerId(resultSet, 1, 0); GetMockPQMetaCache().SetAllTopicsAnswer(true, resultSet); - - NKikimrClient::TPersQueueRequest request = MakeValidRequest(); - RegisterActor(request); - - GrabResponseEvent(); + + NKikimrClient::TPersQueueRequest request = MakeValidRequest(); + RegisterActor(request); + + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::ERROR, "Marker# PQ1"); - } - - // Implementation details for test with pipe disconnection for inheritance - enum class EDisconnectionMode { - DisconnectionComesFirst, - DisconnectionComesSecond, - AnswerDoesNotArrive, - }; - - template <class TResponseEvent> - void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode, std::function<void(EDisconnectionMode disconnectionMode)> dataValidationFunction, bool requestTheWholeTopic = false) { + } + + // Implementation details for test with pipe disconnection for inheritance + enum class EDisconnectionMode { + DisconnectionComesFirst, + DisconnectionComesSecond, + AnswerDoesNotArrive, + }; + + template <class TResponseEvent> + void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode, std::function<void(EDisconnectionMode disconnectionMode)> dataValidationFunction, bool requestTheWholeTopic = false) { GetMockPQMetaCache().SetAllTopicsAnswer(true, std::forward<TSchemeCacheNavigate::TResultSet>(MakeResultSet())); - - PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); - PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); - - PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); - PreparePQTablet("topic2", MakeTabletID(0, 0, 201), {0}); - PreparePQTablet("topic2", MakeTabletID(0, 0, 202), {1}); - PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); - - const ui64 tabletToDestroy = MakeTabletID(0, 0, 203); - - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - RegisterActor(req); - - // Pause responses with status - PauseInputForTestActors<TResponseEvent>(); - const size_t expectedEventsCount = requestTheWholeTopic ? 4 : 3; // When request is about the whole topic (not for particular partitions), - // we expect one more event for partition 0 of topic2 - // Wait that pause events are sent - { - TDispatchOptions options; - options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition([this, expectedEventsCount](IEventHandle&){ return PausedEvents.size() == expectedEventsCount; })); - Runtime->DispatchEvents(options); - } - UNIT_ASSERT_VALUES_EQUAL(PausedEvents.size(), expectedEventsCount); - - // Destroy one tablet and wait corresponding event from pipe - PauseInputForTestActors(NKikimr::TEvTabletPipe::EvClientDestroyed); - Runtime->SendToPipe(tabletToDestroy, EdgeActorId, new TEvents::TEvPoisonPill()); - { - TDispatchOptions options; - options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition([this, expectedEventsCount](IEventHandle&){ return PausedEvents.size() == expectedEventsCount + 1; })); - Runtime->DispatchEvents(options); - } - UNIT_ASSERT_VALUES_EQUAL(PausedEvents.size(), expectedEventsCount + 1); - - // Save paused events locally - // Later we will change order of them - std::list<TAutoPtr<IEventHandle>> pausedEvents; - pausedEvents.swap(PausedEvents); - ResumeEventsForTestActors(); - - TAutoPtr<IEventHandle> disconnectEvent = pausedEvents.back(); - pausedEvents.pop_back(); - TAutoPtr<IEventHandle> answerEvent; - { - bool foundAnswerEvent = false; - for (auto i = pausedEvents.begin(); i != pausedEvents.end(); ++i) { - auto& ev = *i; - UNIT_ASSERT(ev.Get() != nullptr); - if (GetTabletId(ev->Get<TResponseEvent>()) == tabletToDestroy) { - foundAnswerEvent = true; - answerEvent = ev; - i = pausedEvents.erase(i); - } - } - UNIT_ASSERT(foundAnswerEvent); - } - - switch (disconnectionMode) { - case EDisconnectionMode::DisconnectionComesFirst: - Runtime->Send(disconnectEvent.Release()); - Runtime->Send(answerEvent.Release()); - break; - case EDisconnectionMode::DisconnectionComesSecond: - Runtime->Send(answerEvent.Release()); - Runtime->Send(disconnectEvent.Release()); - break; - case EDisconnectionMode::AnswerDoesNotArrive: - Runtime->Send(disconnectEvent.Release()); - break; - default: - UNIT_FAIL("Unknown disconnection mode"); - } - - // Resend the rest paused events - for (auto& ev : pausedEvents) { - UNIT_ASSERT(ev.Get() != nullptr); - Runtime->Send(ev.Release()); - } - pausedEvents.clear(); - - GrabResponseEvent(); - - // Validate result - dataValidationFunction(disconnectionMode); - } -}; - -class TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { -public: - UNIT_TEST_SUITE(TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest) - COMMON_TESTS_LIST() - UNIT_TEST(FailsOnEmptyTopicName) - UNIT_TEST(SuccessfullyReplies) - UNIT_TEST_SUITE_END(); - + + PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); + PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); + + PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); + PreparePQTablet("topic2", MakeTabletID(0, 0, 201), {0}); + PreparePQTablet("topic2", MakeTabletID(0, 0, 202), {1}); + PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); + + const ui64 tabletToDestroy = MakeTabletID(0, 0, 203); + + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + RegisterActor(req); + + // Pause responses with status + PauseInputForTestActors<TResponseEvent>(); + const size_t expectedEventsCount = requestTheWholeTopic ? 4 : 3; // When request is about the whole topic (not for particular partitions), + // we expect one more event for partition 0 of topic2 + // Wait that pause events are sent + { + TDispatchOptions options; + options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition([this, expectedEventsCount](IEventHandle&){ return PausedEvents.size() == expectedEventsCount; })); + Runtime->DispatchEvents(options); + } + UNIT_ASSERT_VALUES_EQUAL(PausedEvents.size(), expectedEventsCount); + + // Destroy one tablet and wait corresponding event from pipe + PauseInputForTestActors(NKikimr::TEvTabletPipe::EvClientDestroyed); + Runtime->SendToPipe(tabletToDestroy, EdgeActorId, new TEvents::TEvPoisonPill()); + { + TDispatchOptions options; + options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition([this, expectedEventsCount](IEventHandle&){ return PausedEvents.size() == expectedEventsCount + 1; })); + Runtime->DispatchEvents(options); + } + UNIT_ASSERT_VALUES_EQUAL(PausedEvents.size(), expectedEventsCount + 1); + + // Save paused events locally + // Later we will change order of them + std::list<TAutoPtr<IEventHandle>> pausedEvents; + pausedEvents.swap(PausedEvents); + ResumeEventsForTestActors(); + + TAutoPtr<IEventHandle> disconnectEvent = pausedEvents.back(); + pausedEvents.pop_back(); + TAutoPtr<IEventHandle> answerEvent; + { + bool foundAnswerEvent = false; + for (auto i = pausedEvents.begin(); i != pausedEvents.end(); ++i) { + auto& ev = *i; + UNIT_ASSERT(ev.Get() != nullptr); + if (GetTabletId(ev->Get<TResponseEvent>()) == tabletToDestroy) { + foundAnswerEvent = true; + answerEvent = ev; + i = pausedEvents.erase(i); + } + } + UNIT_ASSERT(foundAnswerEvent); + } + + switch (disconnectionMode) { + case EDisconnectionMode::DisconnectionComesFirst: + Runtime->Send(disconnectEvent.Release()); + Runtime->Send(answerEvent.Release()); + break; + case EDisconnectionMode::DisconnectionComesSecond: + Runtime->Send(answerEvent.Release()); + Runtime->Send(disconnectEvent.Release()); + break; + case EDisconnectionMode::AnswerDoesNotArrive: + Runtime->Send(disconnectEvent.Release()); + break; + default: + UNIT_FAIL("Unknown disconnection mode"); + } + + // Resend the rest paused events + for (auto& ev : pausedEvents) { + UNIT_ASSERT(ev.Get() != nullptr); + Runtime->Send(ev.Release()); + } + pausedEvents.clear(); + + GrabResponseEvent(); + + // Validate result + dataValidationFunction(disconnectionMode); + } +}; + +class TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { +public: + UNIT_TEST_SUITE(TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest) + COMMON_TESTS_LIST() + UNIT_TEST(FailsOnEmptyTopicName) + UNIT_TEST(SuccessfullyReplies) + UNIT_TEST_SUITE_END(); + NKikimrClient::TPersQueueRequest MakeValidRequest(ui64 topicsCount = 2) override { - NKikimrClient::TPersQueueRequest persQueueRequest; + NKikimrClient::TPersQueueRequest persQueueRequest; persQueueRequest.SetTicket("client_id@" BUILTIN_ACL_DOMAIN); - - auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetTopicMetadata(); - req.AddTopic("topic1"); + + auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetTopicMetadata(); + req.AddTopic("topic1"); if (topicsCount > 1) req.AddTopic("topic2"); - return persQueueRequest; - } - - void FailsOnEmptyTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - req.MutableMetaRequest()->MutableCmdGetTopicMetadata()->AddTopic(""); - RegisterActor(req); - GrabResponseEvent(); + return persQueueRequest; + } + + void FailsOnEmptyTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + req.MutableMetaRequest()->MutableCmdGetTopicMetadata()->AddTopic(""); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "empty topic in GetTopicMetadata request"); - } - - void SuccessfullyReplies() { + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "empty topic in GetTopicMetadata request"); + } + + void SuccessfullyReplies() { GetMockPQMetaCache().SetAllTopicsAnswer(true, MakeResultSet()); - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - RegisterActor(req); - - GrabResponseEvent(); - AssertSucceededResponse(); - - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT_C(resp->Record.GetMetaResponse().HasCmdGetTopicMetadataResult(), "Response: " << resp->Record); - - const auto& res = resp->Record.GetMetaResponse().GetCmdGetTopicMetadataResult(); - UNIT_ASSERT_VALUES_EQUAL_C(res.TopicInfoSize(), 2, "Response: " << resp->Record); - - { - const auto& topic1 = res.GetTopicInfo(0).GetTopic() == "topic1" ? res.GetTopicInfo(0) : res.GetTopicInfo(1); - UNIT_ASSERT_STRINGS_EQUAL_C(topic1.GetTopic(), "topic1", "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(topic1.GetNumPartitions(), 1, "Response: " << resp->Record); - UNIT_ASSERT_C(topic1.HasConfig(), "Response: " << resp->Record); - UNIT_ASSERT_C(topic1.GetConfig().HasVersion(), "Response: " << resp->Record); - } - - { - const auto& topic2 = res.GetTopicInfo(0).GetTopic() == "topic2" ? res.GetTopicInfo(0) : res.GetTopicInfo(1); - UNIT_ASSERT_STRINGS_EQUAL_C(topic2.GetTopic(), "topic2", "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(topic2.GetNumPartitions(), 3, "Response: " << resp->Record); - UNIT_ASSERT_C(topic2.HasConfig(), "Response: " << resp->Record); - UNIT_ASSERT_C(topic2.GetConfig().HasVersion(), "Response: " << resp->Record); - } - } -}; - + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + RegisterActor(req); + + GrabResponseEvent(); + AssertSucceededResponse(); + + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT_C(resp->Record.GetMetaResponse().HasCmdGetTopicMetadataResult(), "Response: " << resp->Record); + + const auto& res = resp->Record.GetMetaResponse().GetCmdGetTopicMetadataResult(); + UNIT_ASSERT_VALUES_EQUAL_C(res.TopicInfoSize(), 2, "Response: " << resp->Record); + + { + const auto& topic1 = res.GetTopicInfo(0).GetTopic() == "topic1" ? res.GetTopicInfo(0) : res.GetTopicInfo(1); + UNIT_ASSERT_STRINGS_EQUAL_C(topic1.GetTopic(), "topic1", "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(topic1.GetNumPartitions(), 1, "Response: " << resp->Record); + UNIT_ASSERT_C(topic1.HasConfig(), "Response: " << resp->Record); + UNIT_ASSERT_C(topic1.GetConfig().HasVersion(), "Response: " << resp->Record); + } + + { + const auto& topic2 = res.GetTopicInfo(0).GetTopic() == "topic2" ? res.GetTopicInfo(0) : res.GetTopicInfo(1); + UNIT_ASSERT_STRINGS_EQUAL_C(topic2.GetTopic(), "topic2", "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(topic2.GetNumPartitions(), 3, "Response: " << resp->Record); + UNIT_ASSERT_C(topic2.HasConfig(), "Response: " << resp->Record); + UNIT_ASSERT_C(topic2.GetConfig().HasVersion(), "Response: " << resp->Record); + } + } +}; + void FillValidTopicRequest(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request, ui64 topicsCount = 2) { - { - auto& topic1 = *request.Add(); - topic1.SetTopic("topic1"); - } - + { + auto& topic1 = *request.Add(); + topic1.SetTopic("topic1"); + } + if (topicsCount > 1){ - auto& topic2 = *request.Add(); - topic2.SetTopic("topic2"); - topic2.AddPartition(1); - topic2.AddPartition(2); - } -} - -void MakeEmptyTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request) { - UNIT_ASSERT_UNEQUAL(request.size(), 0); // filled in - request.Mutable(0)->SetTopic(""); -} - -void MakeDuplicatedTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request) { - UNIT_ASSERT_UNEQUAL(request.size(), 0); // filled in - request.Mutable(1)->SetTopic("topic1"); -} - -void MakeDuplicatedPartition(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request) { - UNIT_ASSERT_UNEQUAL(request.size(), 0); // filled in - request.Mutable(1)->AddPartition(2); -} - -class TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { -public: - UNIT_TEST_SUITE(TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest) - COMMON_TESTS_LIST() - UNIT_TEST(FailsOnEmptyTopicName) - UNIT_TEST(FailsOnDuplicatedTopicName) - UNIT_TEST(FailsOnDuplicatedPartition) - UNIT_TEST(SuccessfullyPassesResponsesFromTablets) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) - UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) - UNIT_TEST_SUITE_END(); - + auto& topic2 = *request.Add(); + topic2.SetTopic("topic2"); + topic2.AddPartition(1); + topic2.AddPartition(2); + } +} + +void MakeEmptyTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request) { + UNIT_ASSERT_UNEQUAL(request.size(), 0); // filled in + request.Mutable(0)->SetTopic(""); +} + +void MakeDuplicatedTopic(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request) { + UNIT_ASSERT_UNEQUAL(request.size(), 0); // filled in + request.Mutable(1)->SetTopic("topic1"); +} + +void MakeDuplicatedPartition(NProtoBuf::RepeatedPtrField<::NKikimrClient::TPersQueueMetaRequest::TTopicRequest>& request) { + UNIT_ASSERT_UNEQUAL(request.size(), 0); // filled in + request.Mutable(1)->AddPartition(2); +} + +class TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { +public: + UNIT_TEST_SUITE(TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest) + COMMON_TESTS_LIST() + UNIT_TEST(FailsOnEmptyTopicName) + UNIT_TEST(FailsOnDuplicatedTopicName) + UNIT_TEST(FailsOnDuplicatedPartition) + UNIT_TEST(SuccessfullyPassesResponsesFromTablets) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) + UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) + UNIT_TEST_SUITE_END(); + NKikimrClient::TPersQueueRequest MakeValidRequest(ui64 topicsCount = 2) override { - NKikimrClient::TPersQueueRequest persQueueRequest; + NKikimrClient::TPersQueueRequest persQueueRequest; persQueueRequest.SetTicket("client_id@" BUILTIN_ACL_DOMAIN); - - auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetPartitionLocations(); + + auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetPartitionLocations(); FillValidTopicRequest(*req.MutableTopicRequest(), topicsCount); - return persQueueRequest; - } - - void FailsOnEmptyTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeEmptyTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionLocations()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + return persQueueRequest; + } + + void FailsOnEmptyTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeEmptyTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionLocations()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "TopicRequest must have Topic field"); - } - - void FailsOnDuplicatedTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeDuplicatedTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionLocations()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "TopicRequest must have Topic field"); + } + + void FailsOnDuplicatedTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeDuplicatedTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionLocations()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple TopicRequest"); - } - - void FailsOnDuplicatedPartition() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeDuplicatedPartition(*req.MutableMetaRequest()->MutableCmdGetPartitionLocations()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple TopicRequest"); + } + + void FailsOnDuplicatedPartition() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeDuplicatedPartition(*req.MutableMetaRequest()->MutableCmdGetPartitionLocations()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple partition"); - } - - void SuccessfullyPassesResponsesFromTablets() { + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple partition"); + } + + void SuccessfullyPassesResponsesFromTablets() { GetMockPQMetaCache().SetAllTopicsAnswer(true, MakeResultSet()); - PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); - PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); - - PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); - // Don't prepare partition 0 because it is not required in request - // Don't prepare partition 1 to ensure that response is successfull despite the tablet is down - PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); - - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - RegisterActor(req); - GrabResponseEvent(); - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionLocationsResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionLocationsResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionLocationSize(), 1, "Response: " << resp->Record); - const auto& partition1 = topic1Result.GetPartitionLocation(0); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 0, "Response: " << resp->Record); + PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); + PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); + + PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); + // Don't prepare partition 0 because it is not required in request + // Don't prepare partition 1 to ensure that response is successfull despite the tablet is down + PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); + + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + RegisterActor(req); + GrabResponseEvent(); + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionLocationsResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionLocationsResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionLocationSize(), 1, "Response: " << resp->Record); + const auto& partition1 = topic1Result.GetPartitionLocation(0); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 0, "Response: " << resp->Record); UNIT_ASSERT_EQUAL_C(partition1.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.GetHost().empty(), "Response: " << resp->Record); - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionLocationSize(), 2, "Response: " << resp->Record); - - // Partitions (order is not specified) - const auto& partition1 = topic2Result.GetPartitionLocation(0).GetPartition() == 1 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); - const auto& partition2 = topic2Result.GetPartitionLocation(0).GetPartition() == 2 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - + UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition1.GetHost().empty(), "Response: " << resp->Record); + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionLocationSize(), 2, "Response: " << resp->Record); + + // Partitions (order is not specified) + const auto& partition1 = topic2Result.GetPartitionLocation(0).GetPartition() == 1 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); + const auto& partition2 = topic2Result.GetPartitionLocation(0).GetPartition() == 2 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); + UNIT_ASSERT_UNEQUAL_C(partition1.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition1.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(partition1.GetHost().empty(), "Response: " << resp->Record); // No data - + UNIT_ASSERT_C(partition1.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(partition1.GetHost().empty(), "Response: " << resp->Record); // No data + UNIT_ASSERT_EQUAL_C(partition2.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.GetHost().empty(), "Response: " << resp->Record); // Data was passed - } - } - - void HandlesPipeDisconnection_DisconnectionComesFirst() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); - } - - void HandlesPipeDisconnection_DisconnectionComesSecond() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); - } - - void HandlesPipeDisconnection_AnswerDoesNotArrive() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); - } - - void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { - auto validation = [this](EDisconnectionMode disconnectionMode) { - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionLocationsResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionLocationsResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionLocationSize(), 1, "Response: " << resp->Record); - const auto& partition1 = topic1Result.GetPartitionLocation(0); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 0, "Response: " << resp->Record); + UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition2.GetHost().empty(), "Response: " << resp->Record); // Data was passed + } + } + + void HandlesPipeDisconnection_DisconnectionComesFirst() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); + } + + void HandlesPipeDisconnection_DisconnectionComesSecond() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); + } + + void HandlesPipeDisconnection_AnswerDoesNotArrive() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); + } + + void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { + auto validation = [this](EDisconnectionMode disconnectionMode) { + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionLocationsResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionLocationsResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionLocationSize(), 1, "Response: " << resp->Record); + const auto& partition1 = topic1Result.GetPartitionLocation(0); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 0, "Response: " << resp->Record); UNIT_ASSERT_EQUAL_C(partition1.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.GetHost().empty(), "Response: " << resp->Record); - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionLocationSize(), 2, "Response: " << resp->Record); - - // Partitions (order is not specified) - const auto& partition1 = topic2Result.GetPartitionLocation(0).GetPartition() == 1 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); - const auto& partition2 = topic2Result.GetPartitionLocation(0).GetPartition() == 2 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - + UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition1.GetHost().empty(), "Response: " << resp->Record); + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionLocationSize(), 2, "Response: " << resp->Record); + + // Partitions (order is not specified) + const auto& partition1 = topic2Result.GetPartitionLocation(0).GetPartition() == 1 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); + const auto& partition2 = topic2Result.GetPartitionLocation(0).GetPartition() == 2 ? topic2Result.GetPartitionLocation(0) : topic2Result.GetPartitionLocation(1); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); + UNIT_ASSERT_EQUAL_C(partition1.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.GetHost().empty(), "Response: " << resp->Record); - - if (disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive) { + UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition1.GetHost().empty(), "Response: " << resp->Record); + + if (disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive) { UNIT_ASSERT_UNEQUAL_C(partition2.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition2.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(partition2.GetHost().empty(), "Response: " << resp->Record); // Data was passed - } else { + UNIT_ASSERT_C(partition2.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(partition2.GetHost().empty(), "Response: " << resp->Record); // Data was passed + } else { UNIT_ASSERT_EQUAL_C(partition2.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.GetHost().empty(), "Response: " << resp->Record); // Data was passed - } - } - }; - TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvTabletPipe::TEvClientConnected>(disconnectionMode, validation); - } -}; - -class TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { -public: - UNIT_TEST_SUITE(TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest) - COMMON_TESTS_LIST() - UNIT_TEST(FailsOnEmptyTopicName) - UNIT_TEST(FailsOnDuplicatedTopicName) - UNIT_TEST(FailsOnDuplicatedPartition) - UNIT_TEST(SuccessfullyPassesResponsesFromTablets) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) - UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) - UNIT_TEST_SUITE_END(); - + UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition2.GetHost().empty(), "Response: " << resp->Record); // Data was passed + } + } + }; + TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvTabletPipe::TEvClientConnected>(disconnectionMode, validation); + } +}; + +class TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { +public: + UNIT_TEST_SUITE(TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest) + COMMON_TESTS_LIST() + UNIT_TEST(FailsOnEmptyTopicName) + UNIT_TEST(FailsOnDuplicatedTopicName) + UNIT_TEST(FailsOnDuplicatedPartition) + UNIT_TEST(SuccessfullyPassesResponsesFromTablets) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) + UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) + UNIT_TEST_SUITE_END(); + NKikimrClient::TPersQueueRequest MakeValidRequest(ui64 topicsCount = 2) override { - NKikimrClient::TPersQueueRequest persQueueRequest; + NKikimrClient::TPersQueueRequest persQueueRequest; persQueueRequest.SetTicket("client_id@" BUILTIN_ACL_DOMAIN); - - auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetPartitionOffsets(); + + auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetPartitionOffsets(); FillValidTopicRequest(*req.MutableTopicRequest(), topicsCount); - return persQueueRequest; - } - - void FailsOnEmptyTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeEmptyTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionOffsets()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + return persQueueRequest; + } + + void FailsOnEmptyTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeEmptyTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionOffsets()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "TopicRequest must have Topic field"); - } - - void FailsOnDuplicatedTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeDuplicatedTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionOffsets()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "TopicRequest must have Topic field"); + } + + void FailsOnDuplicatedTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeDuplicatedTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionOffsets()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple TopicRequest"); - } - - void FailsOnDuplicatedPartition() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeDuplicatedPartition(*req.MutableMetaRequest()->MutableCmdGetPartitionOffsets()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple TopicRequest"); + } + + void FailsOnDuplicatedPartition() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeDuplicatedPartition(*req.MutableMetaRequest()->MutableCmdGetPartitionOffsets()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple partition"); - } - - void SuccessfullyPassesResponsesFromTablets() { + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple partition"); + } + + void SuccessfullyPassesResponsesFromTablets() { GetMockPQMetaCache().SetAllTopicsAnswer(true, MakeResultSet()); - PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); - PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); - - PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); - // Don't prepare partition 0 because it is not required in request - // Don't prepare partition 1 to ensure that response is successfull despite the tablet is down - PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); - - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - RegisterActor(req); - GrabResponseEvent(); - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionOffsetsResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionOffsetsResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); + PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); + PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); + + PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); + // Don't prepare partition 0 because it is not required in request + // Don't prepare partition 1 to ensure that response is successfull despite the tablet is down + PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); + + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + RegisterActor(req); + GrabResponseEvent(); + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionOffsetsResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionOffsetsResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); UNIT_ASSERT_EQUAL_C(topic1Result.GetPartitionResult(0).GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!topic1Result.GetPartitionResult(0).HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(topic1Result.GetPartitionResult(0).HasStartOffset(), "Response: " << resp->Record); - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); - - // Partitions (order is not specified) - const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - + UNIT_ASSERT_C(!topic1Result.GetPartitionResult(0).HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(topic1Result.GetPartitionResult(0).HasStartOffset(), "Response: " << resp->Record); + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); + + // Partitions (order is not specified) + const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); + UNIT_ASSERT_UNEQUAL_C(partition1.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition1.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.HasStartOffset(), "Response: " << resp->Record); // No data - + UNIT_ASSERT_C(partition1.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition1.HasStartOffset(), "Response: " << resp->Record); // No data + UNIT_ASSERT_EQUAL_C(partition2.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(partition2.HasStartOffset(), "Response: " << resp->Record); // Data was passed - } - } - - void HandlesPipeDisconnection_DisconnectionComesFirst() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); - } - - void HandlesPipeDisconnection_DisconnectionComesSecond() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); - } - - void HandlesPipeDisconnection_AnswerDoesNotArrive() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); - } - - void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { - auto validation = [this](EDisconnectionMode disconnectionMode) { - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionOffsetsResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionOffsetsResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); + UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(partition2.HasStartOffset(), "Response: " << resp->Record); // Data was passed + } + } + + void HandlesPipeDisconnection_DisconnectionComesFirst() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); + } + + void HandlesPipeDisconnection_DisconnectionComesSecond() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); + } + + void HandlesPipeDisconnection_AnswerDoesNotArrive() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); + } + + void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { + auto validation = [this](EDisconnectionMode disconnectionMode) { + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionOffsetsResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionOffsetsResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); UNIT_ASSERT_EQUAL_C(topic1Result.GetPartitionResult(0).GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!topic1Result.GetPartitionResult(0).HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(topic1Result.GetPartitionResult(0).HasStartOffset(), "Response: " << resp->Record); - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); - - // Partitions (order is not specified) - const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - + UNIT_ASSERT_C(!topic1Result.GetPartitionResult(0).HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(topic1Result.GetPartitionResult(0).HasStartOffset(), "Response: " << resp->Record); + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); + + // Partitions (order is not specified) + const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); + UNIT_ASSERT_EQUAL_C(partition1.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(partition1.HasStartOffset(), "Response: " << resp->Record); // Data was passed - - if (disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive) { + UNIT_ASSERT_C(!partition1.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(partition1.HasStartOffset(), "Response: " << resp->Record); // Data was passed + + if (disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive) { UNIT_ASSERT_UNEQUAL_C(partition2.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition2.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.HasStartOffset(), "Response: " << resp->Record); // Data was passed - } else { + UNIT_ASSERT_C(partition2.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(!partition2.HasStartOffset(), "Response: " << resp->Record); // Data was passed + } else { UNIT_ASSERT_EQUAL_C(partition2.GetErrorCode(), NPersQueue::NErrorCode::OK, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); - UNIT_ASSERT_C(partition2.HasStartOffset(), "Response: " << resp->Record); // Data was passed - } - } - }; - TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvPersQueue::TEvOffsetsResponse>(disconnectionMode, validation); - } -}; - -class TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { -public: - UNIT_TEST_SUITE(TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest) - COMMON_TESTS_LIST() - UNIT_TEST(FailsOnEmptyTopicName) - UNIT_TEST(FailsOnDuplicatedTopicName) - UNIT_TEST(FailsOnDuplicatedPartition) - UNIT_TEST(SuccessfullyPassesResponsesFromTablets) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) - UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) - UNIT_TEST_SUITE_END(); - + UNIT_ASSERT_C(!partition2.HasErrorReason(), "Response: " << resp->Record); + UNIT_ASSERT_C(partition2.HasStartOffset(), "Response: " << resp->Record); // Data was passed + } + } + }; + TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvPersQueue::TEvOffsetsResponse>(disconnectionMode, validation); + } +}; + +class TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { +public: + UNIT_TEST_SUITE(TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest) + COMMON_TESTS_LIST() + UNIT_TEST(FailsOnEmptyTopicName) + UNIT_TEST(FailsOnDuplicatedTopicName) + UNIT_TEST(FailsOnDuplicatedPartition) + UNIT_TEST(SuccessfullyPassesResponsesFromTablets) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) + UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) + UNIT_TEST_SUITE_END(); + NKikimrClient::TPersQueueRequest MakeValidRequest(ui64 topicsCount = 2) override { - NKikimrClient::TPersQueueRequest persQueueRequest; + NKikimrClient::TPersQueueRequest persQueueRequest; persQueueRequest.SetTicket("client_id@" BUILTIN_ACL_DOMAIN); - - auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetPartitionStatus(); + + auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetPartitionStatus(); FillValidTopicRequest(*req.MutableTopicRequest(), topicsCount); - return persQueueRequest; - } - - void FailsOnEmptyTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeEmptyTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionStatus()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + return persQueueRequest; + } + + void FailsOnEmptyTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeEmptyTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionStatus()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "TopicRequest must have Topic field"); - } - - void FailsOnDuplicatedTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeDuplicatedTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionStatus()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "TopicRequest must have Topic field"); + } + + void FailsOnDuplicatedTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeDuplicatedTopic(*req.MutableMetaRequest()->MutableCmdGetPartitionStatus()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple TopicRequest"); - } - - void FailsOnDuplicatedPartition() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - MakeDuplicatedPartition(*req.MutableMetaRequest()->MutableCmdGetPartitionStatus()->MutableTopicRequest()); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple TopicRequest"); + } + + void FailsOnDuplicatedPartition() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + MakeDuplicatedPartition(*req.MutableMetaRequest()->MutableCmdGetPartitionStatus()->MutableTopicRequest()); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple partition"); - } - - void SuccessfullyPassesResponsesFromTablets() { + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "multiple partition"); + } + + void SuccessfullyPassesResponsesFromTablets() { GetMockPQMetaCache().SetAllTopicsAnswer(true, MakeResultSet()); - - PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); - PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); - - PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); - // Don't prepare partition 0 because it is not required in request - // Don't prepare partition 1 to ensure that response is successfull despite the tablet is down - PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); - - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - RegisterActor(req); - GrabResponseEvent(); - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionStatusResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionStatusResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); - - // Partitions (order is not specified) - const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - - UNIT_ASSERT_EQUAL_C(partition1.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_UNKNOWN, "Response: " << resp->Record); - UNIT_ASSERT_EQUAL_C(partition2.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); - - UNIT_ASSERT_C(!partition1.HasLastInitDurationSeconds(), "Response: " << resp->Record); // No data - UNIT_ASSERT_C(partition2.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed - } - } - - void HandlesPipeDisconnection_DisconnectionComesFirst() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); - } - - void HandlesPipeDisconnection_DisconnectionComesSecond() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); - } - - void HandlesPipeDisconnection_AnswerDoesNotArrive() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); - } - - void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { - auto validation = [this](EDisconnectionMode disconnectionMode) { - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionStatusResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionStatusResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); - const auto& partition = topic1Result.GetPartitionResult(0); - UNIT_ASSERT_VALUES_EQUAL_C(partition.GetPartition(), 0, "Response: " << resp->Record); - UNIT_ASSERT_EQUAL_C(partition.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); - - // Partitions (order is not specified) - const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - - UNIT_ASSERT_EQUAL_C(partition1.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition1.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed - - if (disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive) { - UNIT_ASSERT_EQUAL_C(partition2.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_UNKNOWN, "Response: " << resp->Record); - UNIT_ASSERT_C(!partition2.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed - } else { - UNIT_ASSERT_EQUAL_C(partition2.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); - UNIT_ASSERT_C(partition2.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed - } - } - }; - TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvPersQueue::TEvStatusResponse>(disconnectionMode, validation); - } -}; - -class TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { -public: - UNIT_TEST_SUITE(TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest) - COMMON_TESTS_LIST() - UNIT_TEST(FailsOnEmptyTopicName) - UNIT_TEST(FailsOnNoClientSpecified) - UNIT_TEST(SuccessfullyPassesResponsesFromTablets) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) - UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) - UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) - UNIT_TEST_SUITE_END(); - + + PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); + PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); + + PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); + // Don't prepare partition 0 because it is not required in request + // Don't prepare partition 1 to ensure that response is successfull despite the tablet is down + PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); + + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + RegisterActor(req); + GrabResponseEvent(); + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionStatusResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionStatusResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); + + // Partitions (order is not specified) + const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); + + UNIT_ASSERT_EQUAL_C(partition1.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_UNKNOWN, "Response: " << resp->Record); + UNIT_ASSERT_EQUAL_C(partition2.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); + + UNIT_ASSERT_C(!partition1.HasLastInitDurationSeconds(), "Response: " << resp->Record); // No data + UNIT_ASSERT_C(partition2.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed + } + } + + void HandlesPipeDisconnection_DisconnectionComesFirst() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); + } + + void HandlesPipeDisconnection_DisconnectionComesSecond() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); + } + + void HandlesPipeDisconnection_AnswerDoesNotArrive() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); + } + + void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { + auto validation = [this](EDisconnectionMode disconnectionMode) { + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetPartitionStatusResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetPartitionStatusResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); + const auto& partition = topic1Result.GetPartitionResult(0); + UNIT_ASSERT_VALUES_EQUAL_C(partition.GetPartition(), 0, "Response: " << resp->Record); + UNIT_ASSERT_EQUAL_C(partition.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); + UNIT_ASSERT_C(partition.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 2, "Response: " << resp->Record); + + // Partitions (order is not specified) + const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); + + UNIT_ASSERT_EQUAL_C(partition1.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); + UNIT_ASSERT_C(partition1.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed + + if (disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive) { + UNIT_ASSERT_EQUAL_C(partition2.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_UNKNOWN, "Response: " << resp->Record); + UNIT_ASSERT_C(!partition2.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed + } else { + UNIT_ASSERT_EQUAL_C(partition2.GetStatus(), NKikimrPQ::TStatusResponse::STATUS_OK, "Response: " << resp->Record); + UNIT_ASSERT_C(partition2.HasLastInitDurationSeconds(), "Response: " << resp->Record); // Data was passed + } + } + }; + TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvPersQueue::TEvStatusResponse>(disconnectionMode, validation); + } +}; + +class TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest: public TMessageBusServerPersQueueRequestCommonTest { +public: + UNIT_TEST_SUITE(TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest) + COMMON_TESTS_LIST() + UNIT_TEST(FailsOnEmptyTopicName) + UNIT_TEST(FailsOnNoClientSpecified) + UNIT_TEST(SuccessfullyPassesResponsesFromTablets) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesFirst) + UNIT_TEST(HandlesPipeDisconnection_DisconnectionComesSecond) + UNIT_TEST(HandlesPipeDisconnection_AnswerDoesNotArrive) + UNIT_TEST_SUITE_END(); + NKikimrClient::TPersQueueRequest MakeValidRequest(ui64 topicsCount = 2) override { - NKikimrClient::TPersQueueRequest persQueueRequest; + NKikimrClient::TPersQueueRequest persQueueRequest; persQueueRequest.SetTicket("client_id@" BUILTIN_ACL_DOMAIN); - - auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetReadSessionsInfo(); - req.SetClientId("client_id"); - req.AddTopic("topic1"); + + auto& req = *persQueueRequest.MutableMetaRequest()->MutableCmdGetReadSessionsInfo(); + req.SetClientId("client_id"); + req.AddTopic("topic1"); if (topicsCount > 1) req.AddTopic("topic2"); - return persQueueRequest; - } - - void FailsOnEmptyTopicName() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - req.MutableMetaRequest()->MutableCmdGetReadSessionsInfo()->AddTopic(""); - RegisterActor(req); - GrabResponseEvent(); + return persQueueRequest; + } + + void FailsOnEmptyTopicName() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + req.MutableMetaRequest()->MutableCmdGetReadSessionsInfo()->AddTopic(""); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "empty topic in GetReadSessionsInfo request"); - } - - void FailsOnNoClientSpecified() { - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - req.MutableMetaRequest()->MutableCmdGetReadSessionsInfo()->ClearClientId(); - RegisterActor(req); - GrabResponseEvent(); + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "empty topic in GetReadSessionsInfo request"); + } + + void FailsOnNoClientSpecified() { + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + req.MutableMetaRequest()->MutableCmdGetReadSessionsInfo()->ClearClientId(); + RegisterActor(req); + GrabResponseEvent(); AssertFailedResponse(NPersQueue::NErrorCode::BAD_REQUEST); - UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "No clientId specified in CmdGetReadSessionsInfo"); - } - - void SuccessfullyPassesResponsesFromTablets() { + UNIT_ASSERT_STRING_CONTAINS(GetResponse()->Record.GetErrorReason(), "No clientId specified in CmdGetReadSessionsInfo"); + } + + void SuccessfullyPassesResponsesFromTablets() { GetMockPQMetaCache().SetAllTopicsAnswer(true, MakeResultSet()); - - PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); - PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); - - PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); - // Don't prepare partition 0 to test its failure processing - PreparePQTablet("topic2", MakeTabletID(0, 0, 202), {1}); - PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); - - NKikimrClient::TPersQueueRequest req = MakeValidRequest(); - RegisterActor(req); - GrabResponseEvent(); - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetReadSessionsInfoResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetReadSessionsInfoResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); + + PrepareBalancer("topic1", MakeTabletID(0, 0, 100), {{1, MakeTabletID(0, 0, 101)}}); + PreparePQTablet("topic1", MakeTabletID(0, 0, 101), {0}); + + PrepareBalancer("topic2", MakeTabletID(0, 0, 200), {{1, MakeTabletID(0, 0, 201)}, {2, MakeTabletID(0, 0, 202)}, {3, MakeTabletID(0, 0, 203)}}); + // Don't prepare partition 0 to test its failure processing + PreparePQTablet("topic2", MakeTabletID(0, 0, 202), {1}); + PreparePQTablet("topic2", MakeTabletID(0, 0, 203), {2}); + + NKikimrClient::TPersQueueRequest req = MakeValidRequest(); + RegisterActor(req); + GrabResponseEvent(); + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetReadSessionsInfoResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetReadSessionsInfoResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.GetPartitionResult(0).GetPartition(), 0, "Response: " << resp->Record); UNIT_ASSERT_C(topic1Result.GetPartitionResult(0).GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); - } - { - const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + } + { + const auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), 3, "Response: " << resp->Record); - - // Partitions (order is not specified) + + // Partitions (order is not specified) // const auto& partition1 = topic2Result.GetPartitionResult(0).GetPartition() == 1 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); // const auto& partition2 = topic2Result.GetPartitionResult(0).GetPartition() == 2 ? topic2Result.GetPartitionResult(0) : topic2Result.GetPartitionResult(1); // UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); // UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - + UNIT_ASSERT_C(topic2Result.GetPartitionResult(0).GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); UNIT_ASSERT_C(topic2Result.GetPartitionResult(1).GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); UNIT_ASSERT_C(topic2Result.GetPartitionResult(2).GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); @@ -1373,75 +1373,75 @@ public: // UNIT_ASSERT_C(partition1.HasStartOffset(), "Response: " << resp->Record); // UNIT_ASSERT_C(partition2.HasStartOffset(), "Response: " << resp->Record); - } - } - - void HandlesPipeDisconnection_DisconnectionComesFirst() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); - } - - void HandlesPipeDisconnection_DisconnectionComesSecond() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); - } - - void HandlesPipeDisconnection_AnswerDoesNotArrive() { - HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); - } - - void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { - auto validation = [this](EDisconnectionMode disconnectionMode) { - AssertSucceededResponse(); - - // Check response - const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted - UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetReadSessionsInfoResult()); - auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetReadSessionsInfoResult().GetTopicResult(); - UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); - - { - const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); - UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); - UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); - const auto& partition = topic1Result.GetPartitionResult(0); - UNIT_ASSERT_VALUES_EQUAL_C(partition.GetPartition(), 0, "Response: " << resp->Record); + } + } + + void HandlesPipeDisconnection_DisconnectionComesFirst() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesFirst); + } + + void HandlesPipeDisconnection_DisconnectionComesSecond() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::DisconnectionComesSecond); + } + + void HandlesPipeDisconnection_AnswerDoesNotArrive() { + HandlesPipeDisconnectionImpl(EDisconnectionMode::AnswerDoesNotArrive); + } + + void HandlesPipeDisconnectionImpl(EDisconnectionMode disconnectionMode) { + auto validation = [this](EDisconnectionMode disconnectionMode) { + AssertSucceededResponse(); + + // Check response + const TEvPersQueue::TEvResponse* resp = GetResponse(); // not nullptr is already asserted + UNIT_ASSERT(resp->Record.GetMetaResponse().HasCmdGetReadSessionsInfoResult()); + auto perTopicResults = resp->Record.GetMetaResponse().GetCmdGetReadSessionsInfoResult().GetTopicResult(); + UNIT_ASSERT_VALUES_EQUAL(perTopicResults.size(), 2); + + { + const auto& topic1Result = perTopicResults.Get(0).GetTopic() == "topic1" ? perTopicResults.Get(0) : perTopicResults.Get(1); + UNIT_ASSERT_STRINGS_EQUAL(topic1Result.GetTopic(), "topic1"); + UNIT_ASSERT_VALUES_EQUAL_C(topic1Result.PartitionResultSize(), 1, "Response: " << resp->Record); + const auto& partition = topic1Result.GetPartitionResult(0); + UNIT_ASSERT_VALUES_EQUAL_C(partition.GetPartition(), 0, "Response: " << resp->Record); UNIT_ASSERT_C(partition.GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); - } - { - auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? *perTopicResults.Mutable(0) : *perTopicResults.Mutable(1); - UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); + } + { + auto& topic2Result = perTopicResults.Get(0).GetTopic() == "topic2" ? *perTopicResults.Mutable(0) : *perTopicResults.Mutable(1); + UNIT_ASSERT_STRINGS_EQUAL(topic2Result.GetTopic(), "topic2"); const size_t expectedPartitionsSize = 3; //disconnectionMode == EDisconnectionMode::AnswerDoesNotArrive ? 2 : 3; - UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), expectedPartitionsSize, "Response: " << resp->Record); - - // Partitions (order is not specified) - std::sort(topic2Result.MutablePartitionResult()->begin(), - topic2Result.MutablePartitionResult()->end(), - [](const auto& p1, const auto& p2) { - return p1.GetPartition() < p2.GetPartition(); - }); - const auto& partition0 = topic2Result.GetPartitionResult(0); - const auto& partition1 = topic2Result.GetPartitionResult(1); + UNIT_ASSERT_VALUES_EQUAL_C(topic2Result.PartitionResultSize(), expectedPartitionsSize, "Response: " << resp->Record); + + // Partitions (order is not specified) + std::sort(topic2Result.MutablePartitionResult()->begin(), + topic2Result.MutablePartitionResult()->end(), + [](const auto& p1, const auto& p2) { + return p1.GetPartition() < p2.GetPartition(); + }); + const auto& partition0 = topic2Result.GetPartitionResult(0); + const auto& partition1 = topic2Result.GetPartitionResult(1); const auto& partition2 = topic2Result.GetPartitionResult(2); - - UNIT_ASSERT_VALUES_EQUAL_C(partition0.GetPartition(), 0, "Response: " << resp->Record); - UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); + + UNIT_ASSERT_VALUES_EQUAL_C(partition0.GetPartition(), 0, "Response: " << resp->Record); + UNIT_ASSERT_VALUES_EQUAL_C(partition1.GetPartition(), 1, "Response: " << resp->Record); UNIT_ASSERT_VALUES_EQUAL_C(partition2.GetPartition(), 2, "Response: " << resp->Record); - + UNIT_ASSERT_C(partition0.GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); UNIT_ASSERT_C(partition1.GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); UNIT_ASSERT_C(partition2.GetErrorCode() == (ui32)NPersQueue::NErrorCode::INITIALIZING, "Response: " << resp->Record); Y_UNUSED(disconnectionMode); - } - }; - TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvPersQueue::TEvOffsetsResponse>(disconnectionMode, validation, true); - } -}; - -UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest); -UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest); -UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest); -UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest); -UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest); -} // namespace NMsgBusProxy -} // namespace NKikimr + } + }; + TMessageBusServerPersQueueRequestCommonTest::HandlesPipeDisconnectionImpl<TEvPersQueue::TEvOffsetsResponse>(disconnectionMode, validation, true); + } +}; + +UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest); +UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest); +UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest); +UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest); +UNIT_TEST_SUITE_REGISTRATION(TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest); +} // namespace NMsgBusProxy +} // namespace NKikimr diff --git a/ydb/core/client/server/msgbus_server_sqs.cpp b/ydb/core/client/server/msgbus_server_sqs.cpp index 82981f571ad..ea0383f448f 100644 --- a/ydb/core/client/server/msgbus_server_sqs.cpp +++ b/ydb/core/client/server/msgbus_server_sqs.cpp @@ -16,7 +16,7 @@ public: { } - void DoSendReply(const NKikimrClient::TSqsResponse& resp) override { + void DoSendReply(const NKikimrClient::TSqsResponse& resp) override { auto response = MakeHolder<NMsgBusProxy::TBusSqsResponse>(); response->Record.CopyFrom(resp); SendReplyMove(response.Release()); @@ -27,11 +27,11 @@ public: IActor* CreateMessageBusSqsRequest(NMsgBusProxy::TBusMessageContext& msg) { - NKikimrClient::TSqsRequest record + NKikimrClient::TSqsRequest record = static_cast<NMsgBusProxy::TBusSqsRequest*>(msg.GetMessage())->Record; - record.SetRequestId(CreateGuidAsString()); + record.SetRequestId(CreateGuidAsString()); - return CreateProxyActionActor(record, MakeHolder<TMessageBusCallback>(msg), true); + return CreateProxyActionActor(record, MakeHolder<TMessageBusCallback>(msg), true); } } // namespace NMsgBusProxy diff --git a/ydb/core/client/server/ut/ya.make b/ydb/core/client/server/ut/ya.make index 409979461df..d61b1c9215d 100644 --- a/ydb/core/client/server/ut/ya.make +++ b/ydb/core/client/server/ut/ya.make @@ -1,7 +1,7 @@ UNITTEST_FOR(ydb/core/client/server) - -OWNER(g:kikimr) - + +OWNER(g:kikimr) + FORK_SUBTESTS() SPLIT_FACTOR(20) @@ -10,18 +10,18 @@ TIMEOUT(600) SIZE(MEDIUM) -PEERDIR( +PEERDIR( library/cpp/testing/gmock_in_unittest ydb/core/persqueue ydb/core/tablet_flat ydb/core/testlib ydb/core/testlib/actors -) - +) + YQL_LAST_ABI_VERSION() -SRCS( - msgbus_server_pq_metarequest_ut.cpp -) - -END() +SRCS( + msgbus_server_pq_metarequest_ut.cpp +) + +END() diff --git a/ydb/core/client/server/ya.make b/ydb/core/client/server/ya.make index 8cbeec21b77..43d0c0ac330 100644 --- a/ydb/core/client/server/ya.make +++ b/ydb/core/client/server/ya.make @@ -29,8 +29,8 @@ SRCS( msgbus_server_persqueue.h msgbus_server_pq_metacache.h msgbus_server_pq_metacache.cpp - msgbus_server_pq_metarequest.h - msgbus_server_pq_metarequest.cpp + msgbus_server_pq_metarequest.h + msgbus_server_pq_metarequest.cpp msgbus_server_pq_read_session_info.cpp msgbus_server_resolve_node.cpp msgbus_server_ic_debug.cpp diff --git a/ydb/core/cms/console/console_configs_subscriber.cpp b/ydb/core/cms/console/console_configs_subscriber.cpp index 20f3969f86f..537a0d4e50b 100644 --- a/ydb/core/cms/console/console_configs_subscriber.cpp +++ b/ydb/core/cms/console/console_configs_subscriber.cpp @@ -47,10 +47,10 @@ public: , LastOrder(0) , CurrentConfig(currentConfig) {} - static constexpr NKikimrServices::TActivity::EType ActorActivityType() { - return NKikimrServices::TActivity::CMS_CONFIGS_SUBSCRIBER; - } - + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { + return NKikimrServices::TActivity::CMS_CONFIGS_SUBSCRIBER; + } + void Bootstrap(const TActorContext &ctx) { auto dinfo = AppData(ctx)->DomainsInfo; if (dinfo->Domains.size() != 1) { diff --git a/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp b/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp index 6766dd31719..36a39e94bda 100644 --- a/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp +++ b/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp @@ -525,7 +525,7 @@ protected: if (MonitoringAddress) AppConfig.MutableMonitoringConfig()->SetMonitoringAddress(MonitoringAddress); if (SqsHttpPort) - RunConfig.AppConfig.MutableSqsConfig()->MutableHttpServerConfig()->SetPort(SqsHttpPort); + RunConfig.AppConfig.MutableSqsConfig()->MutableHttpServerConfig()->SetPort(SqsHttpPort); if (GRpcPort) { auto& conf = *AppConfig.MutableGRpcConfig(); conf.SetStartGRpcProxy(true); diff --git a/ydb/core/driver_lib/run/factories.cpp b/ydb/core/driver_lib/run/factories.cpp index b09f2266c24..4f4b01b4b84 100644 --- a/ydb/core/driver_lib/run/factories.cpp +++ b/ydb/core/driver_lib/run/factories.cpp @@ -1,11 +1,11 @@ -#include "factories.h" - -namespace NKikimr { - -TModuleFactories::~TModuleFactories() { - if (PqCmConnections) { - PqCmConnections->Stop(true); - } -} - -} // namespace NKikimr +#include "factories.h" + +namespace NKikimr { + +TModuleFactories::~TModuleFactories() { + if (PqCmConnections) { + PqCmConnections->Stop(true); + } +} + +} // namespace NKikimr diff --git a/ydb/core/driver_lib/run/factories.h b/ydb/core/driver_lib/run/factories.h index 41a45b44e08..bf7570cf21c 100644 --- a/ydb/core/driver_lib/run/factories.h +++ b/ydb/core/driver_lib/run/factories.h @@ -15,7 +15,7 @@ #include <ydb/core/yq/libs/config/protos/audit.pb.h> #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> - + #include <library/cpp/actors/core/actorsystem.h> #include <ydb/library/security/ydb_credentials_provider_factory.h> @@ -57,7 +57,7 @@ struct TModuleFactories { std::function<NActors::TMon* (NActors::TMon::TConfig)> MonitoringFactory; std::shared_ptr<NSQS::IAuthFactory> SqsAuthFactory; - ~TModuleFactories(); + ~TModuleFactories(); }; } // NKikimr diff --git a/ydb/core/driver_lib/run/kikimr_services_initializers.cpp b/ydb/core/driver_lib/run/kikimr_services_initializers.cpp index 819c1478d19..7b6cd07e44c 100644 --- a/ydb/core/driver_lib/run/kikimr_services_initializers.cpp +++ b/ydb/core/driver_lib/run/kikimr_services_initializers.cpp @@ -949,11 +949,11 @@ void TLocalServiceInitializer::InitializeServices( TActorId(), TActorSetupCmd(CreateLabelsMaintainer(Config.GetMonitoringConfig()), TMailboxType::ReadAsFilled, 0))); - + setup->LocalServices.emplace_back(NTestShard::MakeStateServerInterfaceActorId(), TActorSetupCmd( NTestShard::CreateStateServerInterfaceActor(), TMailboxType::ReadAsFilled, 0)); - NKesus::AddKesusProbesList(); + NKesus::AddKesusProbesList(); } // TSharedCacheInitializer @@ -1493,17 +1493,17 @@ void TGRpcServicesInitializer::InitializeServices(NActors::TActorSystemSetup* se NMsgBusProxy::CreateMsgBusProxyId(), TActorSetupCmd(proxy, TMailboxType::ReadAsFilled, appData->UserPoolId)); - if (appData->PQConfig.GetEnabled()) { - - TDuration pqMetaRefresh = TDuration::Seconds(NMsgBusProxy::PQ_METACACHE_REFRESH_INTERVAL_SECONDS); + if (appData->PQConfig.GetEnabled()) { + + TDuration pqMetaRefresh = TDuration::Seconds(NMsgBusProxy::PQ_METACACHE_REFRESH_INTERVAL_SECONDS); IActor * cache = NMsgBusProxy::NPqMetaCacheV2::CreatePQMetaCache( appData->Counters, pqMetaRefresh ); - Y_VERIFY(cache); - setup->LocalServices.emplace_back( - NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), - TActorSetupCmd(cache, TMailboxType::ReadAsFilled, appData->UserPoolId)); - } + Y_VERIFY(cache); + setup->LocalServices.emplace_back( + NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), + TActorSetupCmd(cache, TMailboxType::ReadAsFilled, appData->UserPoolId)); + } } if (!IsServiceInitialized(setup, NGRpcService::CreateGRpcRequestProxyId())) { @@ -2091,27 +2091,27 @@ TSqsServiceInitializer::TSqsServiceInitializer(const TKikimrRunConfig& runConfig } void TSqsServiceInitializer::InitializeServices(NActors::TActorSystemSetup* setup, const NKikimr::TAppData* appData) { - if (Config.GetSqsConfig().GetEnableSqs()) { + if (Config.GetSqsConfig().GetEnableSqs()) { ui32 grpcPort = 0; if (Config.HasGRpcConfig()) grpcPort = Config.GetGRpcConfig().GetPort(); - { + { IActor* actor = NSQS::CreateSqsService(grpcPort); - setup->LocalServices.emplace_back( - NSQS::MakeSqsServiceID(NodeId), - TActorSetupCmd(actor, TMailboxType::HTSwap, appData->UserPoolId)); - } - - { - IActor* actor = NSQS::CreateSqsProxyService(); - setup->LocalServices.emplace_back( - NSQS::MakeSqsProxyServiceID(NodeId), - TActorSetupCmd(actor, TMailboxType::HTSwap, appData->UserPoolId)); - } + setup->LocalServices.emplace_back( + NSQS::MakeSqsServiceID(NodeId), + TActorSetupCmd(actor, TMailboxType::HTSwap, appData->UserPoolId)); + } + + { + IActor* actor = NSQS::CreateSqsProxyService(); + setup->LocalServices.emplace_back( + NSQS::MakeSqsProxyServiceID(NodeId), + TActorSetupCmd(actor, TMailboxType::HTSwap, appData->UserPoolId)); + } Factories->SqsAuthFactory->Initialize( setup->LocalServices, *appData, Config.GetSqsConfig()); - } + } } TConfigsDispatcherInitializer::TConfigsDispatcherInitializer(const TKikimrRunConfig& runConfig) @@ -2250,10 +2250,10 @@ void THealthCheckInitializer::InitializeServices(TActorSystemSetup* setup, const TActorSetupCmd(NHealthCheck::CreateHealthCheckService(), TMailboxType::HTSwap, appData->UserPoolId))); } -TYandexQueryInitializer::TYandexQueryInitializer(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories> factories, NYq::IYqSharedResources::TPtr yqSharedResources) +TYandexQueryInitializer::TYandexQueryInitializer(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories> factories, NYq::IYqSharedResources::TPtr yqSharedResources) : IKikimrServicesInitializer(runConfig) - , Factories(std::move(factories)) - , YqSharedResources(std::move(yqSharedResources)) + , Factories(std::move(factories)) + , YqSharedResources(std::move(yqSharedResources)) { } diff --git a/ydb/core/driver_lib/run/kikimr_services_initializers.h b/ydb/core/driver_lib/run/kikimr_services_initializers.h index 407ce1bb7ba..ccc9560529c 100644 --- a/ydb/core/driver_lib/run/kikimr_services_initializers.h +++ b/ydb/core/driver_lib/run/kikimr_services_initializers.h @@ -15,7 +15,7 @@ #include <ydb/public/lib/base/msgbus.h> #include <ydb/core/yq/libs/shared_resources/interface/shared_resources.h> - + #include <library/cpp/actors/core/defs.h> #include <library/cpp/actors/core/actorsystem.h> #include <library/cpp/actors/core/log_settings.h> @@ -488,14 +488,14 @@ public: class TYandexQueryInitializer : public IKikimrServicesInitializer { public: - TYandexQueryInitializer(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories> factories, NYq::IYqSharedResources::TPtr yqSharedResources); + TYandexQueryInitializer(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories> factories, NYq::IYqSharedResources::TPtr yqSharedResources); void InitializeServices(NActors::TActorSystemSetup* setup, const NKikimr::TAppData* appData) override; static void SetIcPort(ui32 icPort); -private: - std::shared_ptr<TModuleFactories> Factories; - NYq::IYqSharedResources::TPtr YqSharedResources; +private: + std::shared_ptr<TModuleFactories> Factories; + NYq::IYqSharedResources::TPtr YqSharedResources; static ui32 IcPort; }; diff --git a/ydb/core/driver_lib/run/run.cpp b/ydb/core/driver_lib/run/run.cpp index a4f74aa4e0f..46efd03b985 100644 --- a/ydb/core/driver_lib/run/run.cpp +++ b/ydb/core/driver_lib/run/run.cpp @@ -102,7 +102,7 @@ #include <ydb/services/yq/grpc_service.h> #include <ydb/core/yq/libs/init/init.h> - + #include <library/cpp/logger/global/global.h> #include <library/cpp/monlib/messagebus/mon_messagebus.h> #include <library/cpp/sighandler/async_signals_handler.h> @@ -521,7 +521,7 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) { names["experimental"] = &hasExperimental; bool hasClickhouseInternal = services.empty(); names["clickhouse_internal"] = &hasClickhouseInternal; - bool hasRateLimiter = false; + bool hasRateLimiter = false; names["rate_limiter"] = &hasRateLimiter; bool hasLongTx = false; names["long_tx"] = &hasLongTx; @@ -696,10 +696,10 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) { if (hasDiscovery) { server.AddService(new NGRpcService::TGRpcDiscoveryService(ActorSystem.Get(), Counters, grpcRequestProxyId)); } - + if (hasRateLimiter) { server.AddService(new NQuoter::TRateLimiterGRpcService(ActorSystem.Get(), Counters, grpcRequestProxyId)); - } + } if (hasMonitoring) { server.AddService(new NGRpcService::TGRpcMonitoringService(ActorSystem.Get(), Counters, grpcRequestProxyId)); @@ -1113,7 +1113,7 @@ void TKikimrRunner::InitializeActorSystem( AppData->UserPoolId); } } - + if (runConfig.AppConfig.HasGRpcConfig()) { if (const ui32 grpcPort = runConfig.AppConfig.GetGRpcConfig().GetPort()) { auto driverConfig = NYdb::TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << grpcPort); @@ -1122,9 +1122,9 @@ void TKikimrRunner::InitializeActorSystem( } } - if (YqSharedResources) { - YqSharedResources->Init(ActorSystem.Get()); - } + if (YqSharedResources) { + YqSharedResources->Init(ActorSystem.Get()); + } } TIntrusivePtr<TServiceInitializersList> TKikimrRunner::CreateServiceInitializersList( @@ -1318,7 +1318,7 @@ TIntrusivePtr<TServiceInitializersList> TKikimrRunner::CreateServiceInitializers if (serviceMask.EnableYandexQuery && runConfig.AppConfig.GetYandexQueryConfig().GetEnabled()) { YqSharedResources = NYq::CreateYqSharedResources(runConfig.AppConfig.GetYandexQueryConfig(), ModuleFactories->YdbCredentialProviderFactory, Counters->GetSubgroup("counters", "yq")); - sil->AddServiceInitializer(new TYandexQueryInitializer(runConfig, ModuleFactories, YqSharedResources)); + sil->AddServiceInitializer(new TYandexQueryInitializer(runConfig, ModuleFactories, YqSharedResources)); } if (serviceMask.EnableSequenceProxyService) { @@ -1477,10 +1477,10 @@ void TKikimrRunner::KikimrStop(bool graceful) { Bus.Drop(); } - if (YqSharedResources) { - YqSharedResources->Stop(); - } - + if (YqSharedResources) { + YqSharedResources->Stop(); + } + if (ActorSystem) { ActorSystem->Cleanup(); } diff --git a/ydb/core/driver_lib/run/run.h b/ydb/core/driver_lib/run/run.h index dfd93a24e71..b808a4f7343 100644 --- a/ydb/core/driver_lib/run/run.h +++ b/ydb/core/driver_lib/run/run.h @@ -33,7 +33,7 @@ protected: TIntrusivePtr<NScheme::TTypeRegistry> TypeRegistry; TIntrusivePtr<NMiniKQL::IMutableFunctionRegistry> FunctionRegistry; TIntrusivePtr<TFormatFactory> FormatFactory; - NYq::IYqSharedResources::TPtr YqSharedResources; + NYq::IYqSharedResources::TPtr YqSharedResources; TAutoPtr<TMon> Monitoring; NMonitoring::TDynamicCounterPtr Counters; @@ -41,7 +41,7 @@ protected: bool EnabledGrpcService = false; bool GracefulShutdownSupported = false; - THolder<NSQS::TAsyncHttpServer> SqsHttp; + THolder<NSQS::TAsyncHttpServer> SqsHttp; THolder<NYdb::TDriver> YdbDriver; diff --git a/ydb/core/driver_lib/run/ya.make b/ydb/core/driver_lib/run/ya.make index f410ef42f20..3cc649a2e27 100644 --- a/ydb/core/driver_lib/run/ya.make +++ b/ydb/core/driver_lib/run/ya.make @@ -26,7 +26,7 @@ SRCS( dummy.cpp dummy.h factories.h - factories.cpp + factories.cpp kikimr_services_initializers.cpp kikimr_services_initializers.h log_backend.cpp diff --git a/ydb/core/grpc_services/base/base.h b/ydb/core/grpc_services/base/base.h index 44b25c4a5f9..f2f0a0ab4b6 100644 --- a/ydb/core/grpc_services/base/base.h +++ b/ydb/core/grpc_services/base/base.h @@ -108,11 +108,11 @@ struct TRpcServices { EvBulkUpsert, EvWhoAmI, EvKikhouseDescribeTable, - EvCreateRateLimiterResource, - EvAlterRateLimiterResource, - EvDropRateLimiterResource, - EvListRateLimiterResources, - EvDescribeRateLimiterResource, + EvCreateRateLimiterResource, + EvAlterRateLimiterResource, + EvDropRateLimiterResource, + EvListRateLimiterResources, + EvDescribeRateLimiterResource, EvAcquireRateLimiterResource, EvKikhouseCreateSnapshot, EvKikhouseRefreshSnapshot, diff --git a/ydb/core/grpc_services/grpc_request_proxy.h b/ydb/core/grpc_services/grpc_request_proxy.h index 97315f6e9fb..d75e3e6be56 100644 --- a/ydb/core/grpc_services/grpc_request_proxy.h +++ b/ydb/core/grpc_services/grpc_request_proxy.h @@ -103,11 +103,11 @@ protected: void Handle(TEvDiscoverPQClustersRequest::TPtr& ev, const TActorContext& ctx); void Handle(TEvBulkUpsertRequest::TPtr& ev, const TActorContext& ctx); void Handle(TEvWhoAmIRequest::TPtr& ev, const TActorContext& ctx); - void Handle(TEvCreateRateLimiterResource::TPtr& ev, const TActorContext& ctx); - void Handle(TEvAlterRateLimiterResource::TPtr& ev, const TActorContext& ctx); - void Handle(TEvDropRateLimiterResource::TPtr& ev, const TActorContext& ctx); - void Handle(TEvListRateLimiterResources::TPtr& ev, const TActorContext& ctx); - void Handle(TEvDescribeRateLimiterResource::TPtr& ev, const TActorContext& ctx); + void Handle(TEvCreateRateLimiterResource::TPtr& ev, const TActorContext& ctx); + void Handle(TEvAlterRateLimiterResource::TPtr& ev, const TActorContext& ctx); + void Handle(TEvDropRateLimiterResource::TPtr& ev, const TActorContext& ctx); + void Handle(TEvListRateLimiterResources::TPtr& ev, const TActorContext& ctx); + void Handle(TEvDescribeRateLimiterResource::TPtr& ev, const TActorContext& ctx); void Handle(TEvAcquireRateLimiterResource::TPtr& ev, const TActorContext& ctx); void Handle(TEvKikhouseCreateSnapshotRequest::TPtr& ev, const TActorContext& ctx); void Handle(TEvKikhouseRefreshSnapshotRequest::TPtr& ev, const TActorContext& ctx); diff --git a/ydb/core/grpc_services/rpc_calls.cpp b/ydb/core/grpc_services/rpc_calls.cpp index b94d168147c..d8c6cb6eef1 100644 --- a/ydb/core/grpc_services/rpc_calls.cpp +++ b/ydb/core/grpc_services/rpc_calls.cpp @@ -13,7 +13,7 @@ void FillYdbStatus(Ydb::PersQueue::V1::StreamingWriteServerMessage& resp, const } template <> -void FillYdbStatus(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage& resp, const NYql::TIssues& issues, Ydb::StatusIds::StatusCode status) { +void FillYdbStatus(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage& resp, const NYql::TIssues& issues, Ydb::StatusIds::StatusCode status) { resp.set_status(status); NYql::IssuesToMessage(issues, resp.mutable_issues()); } diff --git a/ydb/core/grpc_services/rpc_calls.h b/ydb/core/grpc_services/rpc_calls.h index 43136c2f4ad..f6260a70e89 100644 --- a/ydb/core/grpc_services/rpc_calls.h +++ b/ydb/core/grpc_services/rpc_calls.h @@ -86,7 +86,7 @@ using TEvS3ListingRequest = TGRpcRequestWrapper<TRpcServices::EvS3Listing, Ydb:: using TEvBiStreamPingRequest = TGRpcRequestBiStreamWrapper<TRpcServices::EvBiStreamPing, Draft::Dummy::PingRequest, Draft::Dummy::PingResponse>; using TEvExperimentalStreamQueryRequest = TGRpcRequestWrapper<TRpcServices::EvExperimentalStreamQuery, Ydb::Experimental::ExecuteStreamQueryRequest, Ydb::Experimental::ExecuteStreamQueryResponse, false>; using TEvStreamPQWriteRequest = TGRpcRequestBiStreamWrapper<TRpcServices::EvStreamPQWrite, Ydb::PersQueue::V1::StreamingWriteClientMessage, Ydb::PersQueue::V1::StreamingWriteServerMessage>; -using TEvStreamPQReadRequest = TGRpcRequestBiStreamWrapper<TRpcServices::EvStreamPQRead, Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; +using TEvStreamPQReadRequest = TGRpcRequestBiStreamWrapper<TRpcServices::EvStreamPQRead, Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; using TEvPQReadInfoRequest = TGRpcRequestWrapper<TRpcServices::EvPQReadInfo, Ydb::PersQueue::V1::ReadInfoRequest, Ydb::PersQueue::V1::ReadInfoResponse, true>; using TEvPQDropTopicRequest = TGRpcRequestValidationWrapper<TRpcServices::EvPQDropTopic, Ydb::PersQueue::V1::DropTopicRequest, Ydb::PersQueue::V1::DropTopicResponse, true>; using TEvPQCreateTopicRequest = TGRpcRequestValidationWrapper<TRpcServices::EvPQCreateTopic, Ydb::PersQueue::V1::CreateTopicRequest, Ydb::PersQueue::V1::CreateTopicResponse, true>; diff --git a/ydb/core/grpc_services/rpc_deferrable.h b/ydb/core/grpc_services/rpc_deferrable.h index 644e7b84c40..f6a1f54fd79 100644 --- a/ydb/core/grpc_services/rpc_deferrable.h +++ b/ydb/core/grpc_services/rpc_deferrable.h @@ -188,12 +188,12 @@ protected: this->Die(ctx); } - void Reply(Ydb::StatusIds::StatusCode status, const TString& message, NKikimrIssues::TIssuesIds::EIssueCode issueCode, const TActorContext& ctx) { - NYql::TIssues issues; - issues.AddIssue(MakeIssue(issueCode, message)); - Reply(status, issues, ctx); - } - + void Reply(Ydb::StatusIds::StatusCode status, const TString& message, NKikimrIssues::TIssuesIds::EIssueCode issueCode, const TActorContext& ctx) { + NYql::TIssues issues; + issues.AddIssue(MakeIssue(issueCode, message)); + Reply(status, issues, ctx); + } + void Reply(Ydb::StatusIds::StatusCode status, const TActorContext& ctx) { Request_->ReplyWithYdbStatus(status); this->Die(ctx); diff --git a/ydb/core/grpc_services/rpc_yq.cpp b/ydb/core/grpc_services/rpc_yq.cpp index 6fe5f868d6b..482b707aca1 100644 --- a/ydb/core/grpc_services/rpc_yq.cpp +++ b/ydb/core/grpc_services/rpc_yq.cpp @@ -34,7 +34,7 @@ public: using TBase::Request_; using TBase::GetProtoRequest; -protected: +protected: TString Token; TString FolderId; TString User; @@ -61,7 +61,7 @@ public: TMaybe<TString> authToken = proxyCtx->GetYdbToken(); if (!authToken) { - ReplyWithStatus("Token is empty", StatusIds::BAD_REQUEST); + ReplyWithStatus("Token is empty", StatusIds::BAD_REQUEST); return; } Token = *authToken; @@ -80,12 +80,12 @@ public: FolderId = path.back(); if (!FolderId) { - ReplyWithStatus("Folder id is empty", StatusIds::BAD_REQUEST); + ReplyWithStatus("Folder id is empty", StatusIds::BAD_REQUEST); return; } if (FolderId.length() > 1024) { - ReplyWithStatus("Folder id length greater than 1024 characters: " + FolderId, StatusIds::BAD_REQUEST); + ReplyWithStatus("Folder id length greater than 1024 characters: " + FolderId, StatusIds::BAD_REQUEST); return; } @@ -106,18 +106,18 @@ public: return; } - const auto* req = GetProtoRequest(); + const auto* req = GetProtoRequest(); auto ev = MakeHolder<EvRequestType>(FolderId, *req, User, Token, permissions); Send(NYq::ControlPlaneProxyActorId(), ev.Release()); Become(&TYandexQueryRequestRPC<RpcRequestType, EvRequestType, EvResponseType>::StateFunc); } -protected: - void ReplyWithStatus(const TString& issueMessage, StatusIds::StatusCode status) { +protected: + void ReplyWithStatus(const TString& issueMessage, StatusIds::StatusCode status) { Request_->RaiseIssue(NYql::TIssue(issueMessage)); - Request_->ReplyWithYdbStatus(status); - PassAway(); - } + Request_->ReplyWithYdbStatus(status); + PassAway(); + } STRICT_STFUNC(StateFunc, hFunc(EvResponseType, Handle); @@ -130,7 +130,7 @@ protected: req.ReplyWithYdbStatus(StatusIds::BAD_REQUEST); } else { req.SendResult(response.Result, StatusIds::SUCCESS); - } + } } template <typename TResponse, typename TReq> requires requires (TResponse r) { r.AuditDetails; } @@ -140,7 +140,7 @@ protected: req.ReplyWithYdbStatus(StatusIds::BAD_REQUEST); } else { req.SendResult(response.Result, StatusIds::SUCCESS); - } + } NYq::TEvAuditService::TExtraInfo extraInfo{ .Token = Token, diff --git a/ydb/core/kesus/tablet/events.h b/ydb/core/kesus/tablet/events.h index 872f0149c95..814061c067f 100644 --- a/ydb/core/kesus/tablet/events.h +++ b/ydb/core/kesus/tablet/events.h @@ -8,9 +8,9 @@ namespace NKikimr { namespace NKesus { -TString CanonizeQuoterResourcePath(const TVector<TString>& path); -TString CanonizeQuoterResourcePath(const TString& path); - +TString CanonizeQuoterResourcePath(const TVector<TString>& path); +TString CanonizeQuoterResourcePath(const TString& path); + struct TEvKesus { enum EEv { EvBegin = EventSpaceBegin(TKikimrEvents::ES_KESUS), @@ -66,30 +66,30 @@ struct TEvKesus { Deprecated_EvJobStop, EvDescribeSemaphoreChanged, - // Quoter API - // Control API - EvDescribeQuoterResources = EvBegin + 1024, - EvDescribeQuoterResourcesResult, - EvAddQuoterResource, - EvAddQuoterResourceResult, - EvUpdateQuoterResource, - EvUpdateQuoterResourceResult, - EvDeleteQuoterResource, - EvDeleteQuoterResourceResult, - // Runtime API - EvSubscribeOnResources = EvBegin + 1024 + 512, - EvSubscribeOnResourcesResult, - EvUpdateConsumptionState, - EvUpdateConsumptionStateAck, - EvResourcesAllocated, - EvResourcesAllocatedAck, - EvProxyResourceConsumptionStatistics, - EvAggregatedResourceConsumptionStatistics, - EvGetQuoterResourceCounters, - EvGetQuoterResourceCountersResult, + // Quoter API + // Control API + EvDescribeQuoterResources = EvBegin + 1024, + EvDescribeQuoterResourcesResult, + EvAddQuoterResource, + EvAddQuoterResourceResult, + EvUpdateQuoterResource, + EvUpdateQuoterResourceResult, + EvDeleteQuoterResource, + EvDeleteQuoterResourceResult, + // Runtime API + EvSubscribeOnResources = EvBegin + 1024 + 512, + EvSubscribeOnResourcesResult, + EvUpdateConsumptionState, + EvUpdateConsumptionStateAck, + EvResourcesAllocated, + EvResourcesAllocatedAck, + EvProxyResourceConsumptionStatistics, + EvAggregatedResourceConsumptionStatistics, + EvGetQuoterResourceCounters, + EvGetQuoterResourceCountersResult, EvAccountResources, EvAccountResourcesAck, - + EvEnd }; @@ -432,104 +432,104 @@ struct TEvKesus { Record.SetOwnersChanged(ownersChanged); } }; - - // Quoter API - - struct TEvDescribeQuoterResources : public TEventPB<TEvDescribeQuoterResources, NKikimrKesus::TEvDescribeQuoterResources, EvDescribeQuoterResources> { - using TEventPB::TEventPB; - }; - - struct TEvDescribeQuoterResourcesResult : public TEventPB<TEvDescribeQuoterResourcesResult, NKikimrKesus::TEvDescribeQuoterResourcesResult, EvDescribeQuoterResourcesResult> { - using TEventPB::TEventPB; - - TEvDescribeQuoterResourcesResult() = default; - - TEvDescribeQuoterResourcesResult(Ydb::StatusIds::StatusCode status, const TString& reason) { - FillError(Record.MutableError(), status, reason); - } - }; - - struct TEvAddQuoterResource : public TEventPB<TEvAddQuoterResource, NKikimrKesus::TEvAddQuoterResource, EvAddQuoterResource> { - using TEventPB::TEventPB; - }; - - struct TEvAddQuoterResourceResult : public TEventPB<TEvAddQuoterResourceResult, NKikimrKesus::TEvAddQuoterResourceResult, EvAddQuoterResourceResult> { - using TEventPB::TEventPB; - - TEvAddQuoterResourceResult() = default; - - TEvAddQuoterResourceResult(Ydb::StatusIds::StatusCode status, const TString& reason) { - FillError(Record.MutableError(), status, reason); - } - }; - - struct TEvUpdateQuoterResource : public TEventPB<TEvUpdateQuoterResource, NKikimrKesus::TEvUpdateQuoterResource, EvUpdateQuoterResource> { - using TEventPB::TEventPB; - }; - - struct TEvUpdateQuoterResourceResult : public TEventPB<TEvUpdateQuoterResourceResult, NKikimrKesus::TEvUpdateQuoterResourceResult, EvUpdateQuoterResourceResult> { - using TEventPB::TEventPB; - - TEvUpdateQuoterResourceResult() = default; - - TEvUpdateQuoterResourceResult(Ydb::StatusIds::StatusCode status, const TString& reason) { - FillError(Record.MutableError(), status, reason); - } - }; - - struct TEvDeleteQuoterResource : public TEventPB<TEvDeleteQuoterResource, NKikimrKesus::TEvDeleteQuoterResource, EvDeleteQuoterResource> { - using TEventPB::TEventPB; - }; - - struct TEvDeleteQuoterResourceResult : public TEventPB<TEvDeleteQuoterResourceResult, NKikimrKesus::TEvDeleteQuoterResourceResult, EvDeleteQuoterResourceResult> { - using TEventPB::TEventPB; - - TEvDeleteQuoterResourceResult() = default; - - TEvDeleteQuoterResourceResult(Ydb::StatusIds::StatusCode status, const TString& reason) { - FillError(Record.MutableError(), status, reason); - } - }; - - struct TEvSubscribeOnResources : public TEventPBWithArena<TEvSubscribeOnResources, NKikimrKesus::TEvSubscribeOnResources, EvSubscribeOnResources> { - using TBaseEvent = TEventPBWithArena<TEvSubscribeOnResources, NKikimrKesus::TEvSubscribeOnResources, EvSubscribeOnResources>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvSubscribeOnResourcesResult : public TEventPBWithArena<TEvSubscribeOnResourcesResult, NKikimrKesus::TEvSubscribeOnResourcesResult, EvSubscribeOnResourcesResult> { - using TBaseEvent = TEventPBWithArena<TEvSubscribeOnResourcesResult, NKikimrKesus::TEvSubscribeOnResourcesResult, EvSubscribeOnResourcesResult>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvUpdateConsumptionState : public TEventPBWithArena<TEvUpdateConsumptionState, NKikimrKesus::TEvUpdateConsumptionState, EvUpdateConsumptionState> { - using TBaseEvent = TEventPBWithArena<TEvUpdateConsumptionState, NKikimrKesus::TEvUpdateConsumptionState, EvUpdateConsumptionState>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvUpdateConsumptionStateAck : public TEventPBWithArena<TEvUpdateConsumptionStateAck, NKikimrKesus::TEvUpdateConsumptionStateAck, EvUpdateConsumptionStateAck> { - using TBaseEvent = TEventPBWithArena<TEvUpdateConsumptionStateAck, NKikimrKesus::TEvUpdateConsumptionStateAck, EvUpdateConsumptionStateAck>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvResourcesAllocated : public TEventPBWithArena<TEvResourcesAllocated, NKikimrKesus::TEvResourcesAllocated, EvResourcesAllocated> { - using TBaseEvent = TEventPBWithArena<TEvResourcesAllocated, NKikimrKesus::TEvResourcesAllocated, EvResourcesAllocated>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvResourcesAllocatedAck : public TEventPBWithArena<TEvResourcesAllocatedAck, NKikimrKesus::TEvResourcesAllocatedAck, EvResourcesAllocatedAck> { - using TBaseEvent = TEventPBWithArena<TEvResourcesAllocatedAck, NKikimrKesus::TEvResourcesAllocatedAck, EvResourcesAllocatedAck>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvGetQuoterResourceCounters : public TEventPBWithArena<TEvGetQuoterResourceCounters, NKikimrKesus::TEvGetQuoterResourceCounters, EvGetQuoterResourceCounters> { - using TBaseEvent = TEventPBWithArena<TEvGetQuoterResourceCounters, NKikimrKesus::TEvGetQuoterResourceCounters, EvGetQuoterResourceCounters>; - using TBaseEvent::TBaseEvent; - }; - - struct TEvGetQuoterResourceCountersResult : public TEventPBWithArena<TEvGetQuoterResourceCountersResult, NKikimrKesus::TEvGetQuoterResourceCountersResult, EvGetQuoterResourceCountersResult> { - using TBaseEvent = TEventPBWithArena<TEvGetQuoterResourceCountersResult, NKikimrKesus::TEvGetQuoterResourceCountersResult, EvGetQuoterResourceCountersResult>; - using TBaseEvent::TBaseEvent; - }; + + // Quoter API + + struct TEvDescribeQuoterResources : public TEventPB<TEvDescribeQuoterResources, NKikimrKesus::TEvDescribeQuoterResources, EvDescribeQuoterResources> { + using TEventPB::TEventPB; + }; + + struct TEvDescribeQuoterResourcesResult : public TEventPB<TEvDescribeQuoterResourcesResult, NKikimrKesus::TEvDescribeQuoterResourcesResult, EvDescribeQuoterResourcesResult> { + using TEventPB::TEventPB; + + TEvDescribeQuoterResourcesResult() = default; + + TEvDescribeQuoterResourcesResult(Ydb::StatusIds::StatusCode status, const TString& reason) { + FillError(Record.MutableError(), status, reason); + } + }; + + struct TEvAddQuoterResource : public TEventPB<TEvAddQuoterResource, NKikimrKesus::TEvAddQuoterResource, EvAddQuoterResource> { + using TEventPB::TEventPB; + }; + + struct TEvAddQuoterResourceResult : public TEventPB<TEvAddQuoterResourceResult, NKikimrKesus::TEvAddQuoterResourceResult, EvAddQuoterResourceResult> { + using TEventPB::TEventPB; + + TEvAddQuoterResourceResult() = default; + + TEvAddQuoterResourceResult(Ydb::StatusIds::StatusCode status, const TString& reason) { + FillError(Record.MutableError(), status, reason); + } + }; + + struct TEvUpdateQuoterResource : public TEventPB<TEvUpdateQuoterResource, NKikimrKesus::TEvUpdateQuoterResource, EvUpdateQuoterResource> { + using TEventPB::TEventPB; + }; + + struct TEvUpdateQuoterResourceResult : public TEventPB<TEvUpdateQuoterResourceResult, NKikimrKesus::TEvUpdateQuoterResourceResult, EvUpdateQuoterResourceResult> { + using TEventPB::TEventPB; + + TEvUpdateQuoterResourceResult() = default; + + TEvUpdateQuoterResourceResult(Ydb::StatusIds::StatusCode status, const TString& reason) { + FillError(Record.MutableError(), status, reason); + } + }; + + struct TEvDeleteQuoterResource : public TEventPB<TEvDeleteQuoterResource, NKikimrKesus::TEvDeleteQuoterResource, EvDeleteQuoterResource> { + using TEventPB::TEventPB; + }; + + struct TEvDeleteQuoterResourceResult : public TEventPB<TEvDeleteQuoterResourceResult, NKikimrKesus::TEvDeleteQuoterResourceResult, EvDeleteQuoterResourceResult> { + using TEventPB::TEventPB; + + TEvDeleteQuoterResourceResult() = default; + + TEvDeleteQuoterResourceResult(Ydb::StatusIds::StatusCode status, const TString& reason) { + FillError(Record.MutableError(), status, reason); + } + }; + + struct TEvSubscribeOnResources : public TEventPBWithArena<TEvSubscribeOnResources, NKikimrKesus::TEvSubscribeOnResources, EvSubscribeOnResources> { + using TBaseEvent = TEventPBWithArena<TEvSubscribeOnResources, NKikimrKesus::TEvSubscribeOnResources, EvSubscribeOnResources>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvSubscribeOnResourcesResult : public TEventPBWithArena<TEvSubscribeOnResourcesResult, NKikimrKesus::TEvSubscribeOnResourcesResult, EvSubscribeOnResourcesResult> { + using TBaseEvent = TEventPBWithArena<TEvSubscribeOnResourcesResult, NKikimrKesus::TEvSubscribeOnResourcesResult, EvSubscribeOnResourcesResult>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvUpdateConsumptionState : public TEventPBWithArena<TEvUpdateConsumptionState, NKikimrKesus::TEvUpdateConsumptionState, EvUpdateConsumptionState> { + using TBaseEvent = TEventPBWithArena<TEvUpdateConsumptionState, NKikimrKesus::TEvUpdateConsumptionState, EvUpdateConsumptionState>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvUpdateConsumptionStateAck : public TEventPBWithArena<TEvUpdateConsumptionStateAck, NKikimrKesus::TEvUpdateConsumptionStateAck, EvUpdateConsumptionStateAck> { + using TBaseEvent = TEventPBWithArena<TEvUpdateConsumptionStateAck, NKikimrKesus::TEvUpdateConsumptionStateAck, EvUpdateConsumptionStateAck>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvResourcesAllocated : public TEventPBWithArena<TEvResourcesAllocated, NKikimrKesus::TEvResourcesAllocated, EvResourcesAllocated> { + using TBaseEvent = TEventPBWithArena<TEvResourcesAllocated, NKikimrKesus::TEvResourcesAllocated, EvResourcesAllocated>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvResourcesAllocatedAck : public TEventPBWithArena<TEvResourcesAllocatedAck, NKikimrKesus::TEvResourcesAllocatedAck, EvResourcesAllocatedAck> { + using TBaseEvent = TEventPBWithArena<TEvResourcesAllocatedAck, NKikimrKesus::TEvResourcesAllocatedAck, EvResourcesAllocatedAck>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvGetQuoterResourceCounters : public TEventPBWithArena<TEvGetQuoterResourceCounters, NKikimrKesus::TEvGetQuoterResourceCounters, EvGetQuoterResourceCounters> { + using TBaseEvent = TEventPBWithArena<TEvGetQuoterResourceCounters, NKikimrKesus::TEvGetQuoterResourceCounters, EvGetQuoterResourceCounters>; + using TBaseEvent::TBaseEvent; + }; + + struct TEvGetQuoterResourceCountersResult : public TEventPBWithArena<TEvGetQuoterResourceCountersResult, NKikimrKesus::TEvGetQuoterResourceCountersResult, EvGetQuoterResourceCountersResult> { + using TBaseEvent = TEventPBWithArena<TEvGetQuoterResourceCountersResult, NKikimrKesus::TEvGetQuoterResourceCountersResult, EvGetQuoterResourceCountersResult>; + using TBaseEvent::TBaseEvent; + }; struct TEvAccountResources : public TEventPBWithArena<TEvAccountResources, NKikimrKesus::TEvAccountResources, EvAccountResources> { using TBaseEvent = TEventPBWithArena<TEvAccountResources, NKikimrKesus::TEvAccountResources, EvAccountResources>; diff --git a/ydb/core/kesus/tablet/probes.cpp b/ydb/core/kesus/tablet/probes.cpp index 0bf599b04b6..df2d18c8a6e 100644 --- a/ydb/core/kesus/tablet/probes.cpp +++ b/ydb/core/kesus/tablet/probes.cpp @@ -1,3 +1,3 @@ -#include "probes.h" - -LWTRACE_DEFINE_PROVIDER(KESUS_QUOTER_PROVIDER); +#include "probes.h" + +LWTRACE_DEFINE_PROVIDER(KESUS_QUOTER_PROVIDER); diff --git a/ydb/core/kesus/tablet/probes.h b/ydb/core/kesus/tablet/probes.h index 077375fb368..d970b8d780a 100644 --- a/ydb/core/kesus/tablet/probes.h +++ b/ydb/core/kesus/tablet/probes.h @@ -1,42 +1,42 @@ -#pragma once -#include <library/cpp/actors/core/actorid.h> +#pragma once +#include <library/cpp/actors/core/actorid.h> #include <library/cpp/lwtrace/all.h> - -#include <util/string/builder.h> - -#include <type_traits> -#include <limits> - -struct TActorIdParam { - using TStoreType = TString; + +#include <util/string/builder.h> + +#include <type_traits> +#include <limits> + +struct TActorIdParam { + using TStoreType = TString; using TFuncParam = typename TTypeTraits<NActors::TActorId>::TFuncParam; - - inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { - *out = stored; - } - - inline static TStoreType ToStoreType(TFuncParam v) { - return TStringBuilder() << v; - } -}; - -#define KESUS_QUOTER_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ - PROBE(ResourceProcess, GROUPS("QuoterResource"), \ - TYPES(TString, TString, TInstant, bool, size_t), \ - NAMES("quoter", "resource", "timestampSec", "active", "activeChildren")) \ - PROBE(ResourceAccumulateResource, GROUPS("QuoterResource"), \ - TYPES(TString, TString, TInstant, bool, double), \ - NAMES("quoter", "resource", "timestampSec", "active", "spent")) \ - PROBE(ResourceActivate, GROUPS("QuoterResource"), \ - TYPES(TString, TString), \ - NAMES("quoter", "resource")) \ - PROBE(ResourceDeactivate, GROUPS("QuoterResource"), \ - TYPES(TString, TString), \ - NAMES("quoter", "resource")) \ - PROBE(ResourceGiveToChild, GROUPS("QuoterResource"), \ - TYPES(TString, TString, TInstant, double, ui32), \ - NAMES("quoter", "resource", "timestampSec", "giveAmount", "childWeight")) \ - \ + + inline static void ToString(typename TTypeTraits<TStoreType>::TFuncParam stored, TString* out) { + *out = stored; + } + + inline static TStoreType ToStoreType(TFuncParam v) { + return TStringBuilder() << v; + } +}; + +#define KESUS_QUOTER_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ + PROBE(ResourceProcess, GROUPS("QuoterResource"), \ + TYPES(TString, TString, TInstant, bool, size_t), \ + NAMES("quoter", "resource", "timestampSec", "active", "activeChildren")) \ + PROBE(ResourceAccumulateResource, GROUPS("QuoterResource"), \ + TYPES(TString, TString, TInstant, bool, double), \ + NAMES("quoter", "resource", "timestampSec", "active", "spent")) \ + PROBE(ResourceActivate, GROUPS("QuoterResource"), \ + TYPES(TString, TString), \ + NAMES("quoter", "resource")) \ + PROBE(ResourceDeactivate, GROUPS("QuoterResource"), \ + TYPES(TString, TString), \ + NAMES("quoter", "resource")) \ + PROBE(ResourceGiveToChild, GROUPS("QuoterResource"), \ + TYPES(TString, TString, TInstant, double, ui32), \ + NAMES("quoter", "resource", "timestampSec", "giveAmount", "childWeight")) \ + \ PROBE(ResourceBillSend, GROUPS("QuoterResource", "RateAccounting"), \ TYPES(TString, TString, TString, ui64, TInstant, TInstant, TString, TString, TString, TString, TString, TString), \ NAMES("quoter", "resource", "category", "quantity", "billStartSec", "billEndSec", "version", "schema", "cloudId", "folderId", "resourceId", "sourceId")) \ @@ -68,24 +68,24 @@ struct TActorIdParam { TYPES(TString, TString, TInstant, TInstant), \ NAMES("quoter", "resource", "accountTillSec", "accountedSec")) \ \ - PROBE(SessionProcess, GROUPS("QuoterSession"), \ - TYPES(TString, TString, TActorIdParam, TInstant, bool), \ - NAMES("quoter", "resource", "session", "timestampSec", "active")) \ - PROBE(SessionAccumulateResource, GROUPS("QuoterSession"), \ - TYPES(TString, TString, TActorIdParam, TInstant, bool, double), \ - NAMES("quoter", "resource", "session", "timestampSec", "active", "spent")) \ - PROBE(SessionActivate, GROUPS("QuoterSession"), \ - TYPES(TString, TString, TActorIdParam), \ - NAMES("quoter", "resource", "session")) \ - PROBE(SessionDeactivate, GROUPS("QuoterSession"), \ - TYPES(TString, TString, TActorIdParam), \ - NAMES("quoter", "resource", "session")) \ - PROBE(SessionUpdateConsumptionState, GROUPS("QuoterSession"), \ - TYPES(TString, TString, TActorIdParam, bool, double), \ - NAMES("quoter", "resource", "session", "consume", "amount")) \ - PROBE(SessionSend, GROUPS("QuoterSession"), \ - TYPES(TString, TString, TActorIdParam, double), \ - NAMES("quoter", "resource", "session", "amount")) \ - /**/ - -LWTRACE_DECLARE_PROVIDER(KESUS_QUOTER_PROVIDER) + PROBE(SessionProcess, GROUPS("QuoterSession"), \ + TYPES(TString, TString, TActorIdParam, TInstant, bool), \ + NAMES("quoter", "resource", "session", "timestampSec", "active")) \ + PROBE(SessionAccumulateResource, GROUPS("QuoterSession"), \ + TYPES(TString, TString, TActorIdParam, TInstant, bool, double), \ + NAMES("quoter", "resource", "session", "timestampSec", "active", "spent")) \ + PROBE(SessionActivate, GROUPS("QuoterSession"), \ + TYPES(TString, TString, TActorIdParam), \ + NAMES("quoter", "resource", "session")) \ + PROBE(SessionDeactivate, GROUPS("QuoterSession"), \ + TYPES(TString, TString, TActorIdParam), \ + NAMES("quoter", "resource", "session")) \ + PROBE(SessionUpdateConsumptionState, GROUPS("QuoterSession"), \ + TYPES(TString, TString, TActorIdParam, bool, double), \ + NAMES("quoter", "resource", "session", "consume", "amount")) \ + PROBE(SessionSend, GROUPS("QuoterSession"), \ + TYPES(TString, TString, TActorIdParam, double), \ + NAMES("quoter", "resource", "session", "amount")) \ + /**/ + +LWTRACE_DECLARE_PROVIDER(KESUS_QUOTER_PROVIDER) diff --git a/ydb/core/kesus/tablet/quoter_performance_test/main.cpp b/ydb/core/kesus/tablet/quoter_performance_test/main.cpp index 0fca0239d72..5e64810a390 100644 --- a/ydb/core/kesus/tablet/quoter_performance_test/main.cpp +++ b/ydb/core/kesus/tablet/quoter_performance_test/main.cpp @@ -1,40 +1,40 @@ #include <ydb/core/kesus/tablet/ut_helpers.h> -#include "options.h" -#include "session.h" -#include "test_state.h" - -#include <util/generic/ptr.h> -#include <util/generic/yexception.h> - -using namespace NKikimr; -using namespace NKikimr::NKesus; - -void Test(const TOptions& options) { - Cerr << "Run test with " << options.ResourcesCount << " resources and " << options.SessionsCountPerResource << " sessions per each resource." << Endl; - Cerr << "Test time: " << options.TestTime << "." << Endl; - - TTestContext ctx; - ctx.Setup(1, true); - ctx.Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NLog::PRI_WARN); - - ctx.Runtime->SetDispatchTimeout(TDuration::Minutes(10)); - - auto state = MakeIntrusive<TTestState>(options, ctx); - for (size_t session = 0; session < state->Options.SessionsCountPerResource; ++session) { - ctx.Runtime->Register(new TSessionActor(state)); - } - - for (size_t session = 0; session < state->Options.SessionsCountPerResource; ++session) { - ctx.ExpectEdgeEvent<TEvents::TEvWakeup>(state->EdgeActorId); - } - - for (size_t res = 0; res < options.ResourcesCount; ++res) { - Cerr << "\"" << GetResourceName(res) << "\": " << state->ResourcesState[res].ConsumedAmount << " units." << Endl; - } - Cerr << "Expected: " << (options.TestTime.Seconds() * options.MaxUnitsPerSecond) << " units per resource." << Endl; -} - -int main(int argc, const char** argv) { - TOptions options(argc, argv); - Test(options); -} +#include "options.h" +#include "session.h" +#include "test_state.h" + +#include <util/generic/ptr.h> +#include <util/generic/yexception.h> + +using namespace NKikimr; +using namespace NKikimr::NKesus; + +void Test(const TOptions& options) { + Cerr << "Run test with " << options.ResourcesCount << " resources and " << options.SessionsCountPerResource << " sessions per each resource." << Endl; + Cerr << "Test time: " << options.TestTime << "." << Endl; + + TTestContext ctx; + ctx.Setup(1, true); + ctx.Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NLog::PRI_WARN); + + ctx.Runtime->SetDispatchTimeout(TDuration::Minutes(10)); + + auto state = MakeIntrusive<TTestState>(options, ctx); + for (size_t session = 0; session < state->Options.SessionsCountPerResource; ++session) { + ctx.Runtime->Register(new TSessionActor(state)); + } + + for (size_t session = 0; session < state->Options.SessionsCountPerResource; ++session) { + ctx.ExpectEdgeEvent<TEvents::TEvWakeup>(state->EdgeActorId); + } + + for (size_t res = 0; res < options.ResourcesCount; ++res) { + Cerr << "\"" << GetResourceName(res) << "\": " << state->ResourcesState[res].ConsumedAmount << " units." << Endl; + } + Cerr << "Expected: " << (options.TestTime.Seconds() * options.MaxUnitsPerSecond) << " units per resource." << Endl; +} + +int main(int argc, const char** argv) { + TOptions options(argc, argv); + Test(options); +} diff --git a/ydb/core/kesus/tablet/quoter_performance_test/options.h b/ydb/core/kesus/tablet/quoter_performance_test/options.h index 5f12305ed7e..ba22d6a3bb0 100644 --- a/ydb/core/kesus/tablet/quoter_performance_test/options.h +++ b/ydb/core/kesus/tablet/quoter_performance_test/options.h @@ -1,59 +1,59 @@ -#pragma once +#pragma once #include <library/cpp/getopt/opt.h> - -#include <util/datetime/base.h> -#include <util/generic/string.h> -#include <util/generic/yexception.h> - -struct TOptions { - size_t ResourcesCount = 0; - size_t SessionsCountPerResource = 0; - TDuration TestTime; - - double MaxUnitsPerSecond; - -public: - TOptions(int argc, const char** argv) { - try { - ParseOptions(argc, argv); - ValidateOptions(); - } catch (const std::exception&) { - Cerr << "Failed to get options: " << CurrentExceptionMessage() << Endl; - exit(1); - } - } - -private: - void ParseOptions(int argc, const char** argv) { - NLastGetopt::TOpts opts; - opts.SetTitle("Quoter performance test program"); - opts.SetFreeArgsNum(0); - opts.AddHelpOption('h'); - opts.AddVersionOption(); - - opts.AddLongOption('r', "resources-count", "resources count in test") - .DefaultValue(100) - .RequiredArgument("COUNT") - .StoreResult(&ResourcesCount); - opts.AddLongOption('s', "sessions-count", "sessions count per resource") - .DefaultValue(100) - .RequiredArgument("COUNT") - .StoreResult(&SessionsCountPerResource); - opts.AddLongOption('t', "test-time", "test time") - .DefaultValue(TDuration::Seconds(10)) - .RequiredArgument("TIME") - .StoreResult(&TestTime); - opts.AddLongOption('u', "max-units-per-second", "quota value") - .DefaultValue(100.0) - .StoreResult(&MaxUnitsPerSecond); - - NLastGetopt::TOptsParseResult res(&opts, argc, argv); - } - - void ValidateOptions() { - Y_ENSURE(ResourcesCount > 0); - Y_ENSURE(ResourcesCount < 10000000); - Y_ENSURE(SessionsCountPerResource > 0); - Y_ENSURE(SessionsCountPerResource < 10000000); - } -}; + +#include <util/datetime/base.h> +#include <util/generic/string.h> +#include <util/generic/yexception.h> + +struct TOptions { + size_t ResourcesCount = 0; + size_t SessionsCountPerResource = 0; + TDuration TestTime; + + double MaxUnitsPerSecond; + +public: + TOptions(int argc, const char** argv) { + try { + ParseOptions(argc, argv); + ValidateOptions(); + } catch (const std::exception&) { + Cerr << "Failed to get options: " << CurrentExceptionMessage() << Endl; + exit(1); + } + } + +private: + void ParseOptions(int argc, const char** argv) { + NLastGetopt::TOpts opts; + opts.SetTitle("Quoter performance test program"); + opts.SetFreeArgsNum(0); + opts.AddHelpOption('h'); + opts.AddVersionOption(); + + opts.AddLongOption('r', "resources-count", "resources count in test") + .DefaultValue(100) + .RequiredArgument("COUNT") + .StoreResult(&ResourcesCount); + opts.AddLongOption('s', "sessions-count", "sessions count per resource") + .DefaultValue(100) + .RequiredArgument("COUNT") + .StoreResult(&SessionsCountPerResource); + opts.AddLongOption('t', "test-time", "test time") + .DefaultValue(TDuration::Seconds(10)) + .RequiredArgument("TIME") + .StoreResult(&TestTime); + opts.AddLongOption('u', "max-units-per-second", "quota value") + .DefaultValue(100.0) + .StoreResult(&MaxUnitsPerSecond); + + NLastGetopt::TOptsParseResult res(&opts, argc, argv); + } + + void ValidateOptions() { + Y_ENSURE(ResourcesCount > 0); + Y_ENSURE(ResourcesCount < 10000000); + Y_ENSURE(SessionsCountPerResource > 0); + Y_ENSURE(SessionsCountPerResource < 10000000); + } +}; diff --git a/ydb/core/kesus/tablet/quoter_performance_test/session.h b/ydb/core/kesus/tablet/quoter_performance_test/session.h index 99457ca6346..2680bef6d45 100644 --- a/ydb/core/kesus/tablet/quoter_performance_test/session.h +++ b/ydb/core/kesus/tablet/quoter_performance_test/session.h @@ -1,97 +1,97 @@ -#pragma once -#include "test_state.h" +#pragma once +#include "test_state.h" #include <ydb/core/kesus/tablet/ut_helpers.h> - -#include <util/generic/yexception.h> - -#include <limits> - -using namespace NKikimr; -using namespace NKikimr::NKesus; - -class TSessionActor : public TActorBootstrapped<TSessionActor> { -public: - TSessionActor(TIntrusivePtr<TTestState> state) - : State(std::move(state)) - , ResState(State->Options.ResourcesCount) - { - } - - void Bootstrap(const NActors::TActorContext&) { - Become(&TSessionActor::StateFunc); - TabletPipe = Register(NTabletPipe::CreateClient(SelfId(), State->TabletId)); - Subscribe(); - } - - STFUNC(StateFunc) { - Y_UNUSED(ctx); - switch (ev->GetTypeRewrite()) { - hFunc(TEvKesus::TEvSubscribeOnResourcesResult, Handle); - hFunc(TEvKesus::TEvResourcesAllocated, Handle); - cFunc(TEvents::TEvWakeup::EventType, EndSessionAndDie); - hFunc(TEvTabletPipe::TEvClientDestroyed, Handle); - hFunc(TEvTabletPipe::TEvClientConnected, Handle); - } - } - - void Subscribe() { - auto req = MakeHolder<TEvKesus::TEvSubscribeOnResources>(); + +#include <util/generic/yexception.h> + +#include <limits> + +using namespace NKikimr; +using namespace NKikimr::NKesus; + +class TSessionActor : public TActorBootstrapped<TSessionActor> { +public: + TSessionActor(TIntrusivePtr<TTestState> state) + : State(std::move(state)) + , ResState(State->Options.ResourcesCount) + { + } + + void Bootstrap(const NActors::TActorContext&) { + Become(&TSessionActor::StateFunc); + TabletPipe = Register(NTabletPipe::CreateClient(SelfId(), State->TabletId)); + Subscribe(); + } + + STFUNC(StateFunc) { + Y_UNUSED(ctx); + switch (ev->GetTypeRewrite()) { + hFunc(TEvKesus::TEvSubscribeOnResourcesResult, Handle); + hFunc(TEvKesus::TEvResourcesAllocated, Handle); + cFunc(TEvents::TEvWakeup::EventType, EndSessionAndDie); + hFunc(TEvTabletPipe::TEvClientDestroyed, Handle); + hFunc(TEvTabletPipe::TEvClientConnected, Handle); + } + } + + void Subscribe() { + auto req = MakeHolder<TEvKesus::TEvSubscribeOnResources>(); ActorIdToProto(SelfId(), req->Record.MutableActorID()); - req->Record.MutableResources()->Reserve(State->Options.ResourcesCount); - - //const double amount = State->Options.MaxUnitsPerSecond * State->Options.TestTime.Seconds() * 2.0; - const double amount = std::numeric_limits<double>::infinity(); - for (size_t res = 0; res < State->Options.ResourcesCount; ++res) { - auto* reqRes = req->Record.AddResources(); - reqRes->SetResourcePath(GetResourceName(res)); - reqRes->SetStartConsuming(true); - reqRes->SetInitialAmount(amount); - } - NTabletPipe::SendData(SelfId(), TabletPipe, req.Release()); - } - - void Handle(TEvKesus::TEvSubscribeOnResourcesResult::TPtr& ev) { - Y_ENSURE(ev->Get()->Record.ResultsSize() == State->Options.ResourcesCount); - ScheduleStop(); - } - - void ScheduleStop() { - Schedule(State->Options.TestTime, new TEvents::TEvWakeup()); - } - - void Handle(TEvKesus::TEvResourcesAllocated::TPtr& ev) { - for (const auto& res : ev->Get()->Record.GetResourcesInfo()) { - Y_ENSURE(res.GetStateNotification().GetStatus() == Ydb::StatusIds::SUCCESS); - - const auto resIndex = State->ResId2StateIndex.find(res.GetResourceId()); - Y_ENSURE(resIndex != State->ResId2StateIndex.end()); - ResState[resIndex->second].ConsumedAmount += res.GetAmount(); - //Cerr << ResState[resIndex->second].ConsumedAmount << Endl; - } - } - - void EndSessionAndDie() { - with_lock (State->Mutex) { - for (size_t res = 0; res < State->Options.ResourcesCount; ++res) { - State->ResourcesState[res].ConsumedAmount += ResState[res].ConsumedAmount; - } - } - Send(State->EdgeActorId, new TEvents::TEvWakeup()); - NTabletPipe::CloseClient(SelfId(), TabletPipe); - PassAway(); - } - - void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr&) { - Y_FAIL(); - } - - void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev) { - Y_VERIFY(ev->Get()->Status == NKikimrProto::OK); - } - -private: - TIntrusivePtr<TTestState> State; - TInstant StartSessionTime; + req->Record.MutableResources()->Reserve(State->Options.ResourcesCount); + + //const double amount = State->Options.MaxUnitsPerSecond * State->Options.TestTime.Seconds() * 2.0; + const double amount = std::numeric_limits<double>::infinity(); + for (size_t res = 0; res < State->Options.ResourcesCount; ++res) { + auto* reqRes = req->Record.AddResources(); + reqRes->SetResourcePath(GetResourceName(res)); + reqRes->SetStartConsuming(true); + reqRes->SetInitialAmount(amount); + } + NTabletPipe::SendData(SelfId(), TabletPipe, req.Release()); + } + + void Handle(TEvKesus::TEvSubscribeOnResourcesResult::TPtr& ev) { + Y_ENSURE(ev->Get()->Record.ResultsSize() == State->Options.ResourcesCount); + ScheduleStop(); + } + + void ScheduleStop() { + Schedule(State->Options.TestTime, new TEvents::TEvWakeup()); + } + + void Handle(TEvKesus::TEvResourcesAllocated::TPtr& ev) { + for (const auto& res : ev->Get()->Record.GetResourcesInfo()) { + Y_ENSURE(res.GetStateNotification().GetStatus() == Ydb::StatusIds::SUCCESS); + + const auto resIndex = State->ResId2StateIndex.find(res.GetResourceId()); + Y_ENSURE(resIndex != State->ResId2StateIndex.end()); + ResState[resIndex->second].ConsumedAmount += res.GetAmount(); + //Cerr << ResState[resIndex->second].ConsumedAmount << Endl; + } + } + + void EndSessionAndDie() { + with_lock (State->Mutex) { + for (size_t res = 0; res < State->Options.ResourcesCount; ++res) { + State->ResourcesState[res].ConsumedAmount += ResState[res].ConsumedAmount; + } + } + Send(State->EdgeActorId, new TEvents::TEvWakeup()); + NTabletPipe::CloseClient(SelfId(), TabletPipe); + PassAway(); + } + + void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr&) { + Y_FAIL(); + } + + void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev) { + Y_VERIFY(ev->Get()->Status == NKikimrProto::OK); + } + +private: + TIntrusivePtr<TTestState> State; + TInstant StartSessionTime; TActorId TabletPipe; - std::vector<TTestState::TResourceState> ResState; -}; + std::vector<TTestState::TResourceState> ResState; +}; diff --git a/ydb/core/kesus/tablet/quoter_performance_test/test_state.h b/ydb/core/kesus/tablet/quoter_performance_test/test_state.h index e4d4766ef44..9abdb5a385a 100644 --- a/ydb/core/kesus/tablet/quoter_performance_test/test_state.h +++ b/ydb/core/kesus/tablet/quoter_performance_test/test_state.h @@ -1,54 +1,54 @@ -#pragma once -#include "options.h" +#pragma once +#include "options.h" #include <ydb/core/kesus/tablet/ut_helpers.h> - -#include <util/generic/ptr.h> -#include <util/system/mutex.h> - -using namespace NKikimr; -using namespace NKikimr::NKesus; - -inline TString GetResourceName(size_t i) { - return TStringBuilder() << "/Root/Res" << i; -} - -struct TTestState : public TAtomicRefCount<TTestState> { - TTestState(const TOptions& options, TTestContext& ctx) - : Options(options) - , ResourcesState(options.ResourcesCount) - , EdgeActorId(ctx.Runtime->AllocateEdgeActor()) - { - TabletId = ctx.TabletId; - Y_ENSURE(TabletId != 0); - CreateResources(ctx); - } - - void CreateResources(TTestContext& ctx) { - { // Root - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(Options.MaxUnitsPerSecond * Options.ResourcesCount); - Cerr << "Add resource \"/Root\": " << cfg << "." << Endl; - ctx.AddQuoterResource("/Root", cfg); - } - - for (size_t res = 0; res < Options.ResourcesCount; ++res) { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(Options.MaxUnitsPerSecond); - Cerr << "Add resource \"" << GetResourceName(res) << "\": " << cfg << "." << Endl; - ResourcesState[res].ResourceId = ctx.AddQuoterResource(GetResourceName(res), cfg); - ResId2StateIndex[ResourcesState[res].ResourceId] = res; - } - } - - struct TResourceState { - ui64 ResourceId = 0; - double ConsumedAmount = 0; - }; - - const TOptions& Options; - ui64 TabletId = 0; - std::vector<TResourceState> ResourcesState; + +#include <util/generic/ptr.h> +#include <util/system/mutex.h> + +using namespace NKikimr; +using namespace NKikimr::NKesus; + +inline TString GetResourceName(size_t i) { + return TStringBuilder() << "/Root/Res" << i; +} + +struct TTestState : public TAtomicRefCount<TTestState> { + TTestState(const TOptions& options, TTestContext& ctx) + : Options(options) + , ResourcesState(options.ResourcesCount) + , EdgeActorId(ctx.Runtime->AllocateEdgeActor()) + { + TabletId = ctx.TabletId; + Y_ENSURE(TabletId != 0); + CreateResources(ctx); + } + + void CreateResources(TTestContext& ctx) { + { // Root + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(Options.MaxUnitsPerSecond * Options.ResourcesCount); + Cerr << "Add resource \"/Root\": " << cfg << "." << Endl; + ctx.AddQuoterResource("/Root", cfg); + } + + for (size_t res = 0; res < Options.ResourcesCount; ++res) { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(Options.MaxUnitsPerSecond); + Cerr << "Add resource \"" << GetResourceName(res) << "\": " << cfg << "." << Endl; + ResourcesState[res].ResourceId = ctx.AddQuoterResource(GetResourceName(res), cfg); + ResId2StateIndex[ResourcesState[res].ResourceId] = res; + } + } + + struct TResourceState { + ui64 ResourceId = 0; + double ConsumedAmount = 0; + }; + + const TOptions& Options; + ui64 TabletId = 0; + std::vector<TResourceState> ResourcesState; TActorId EdgeActorId; - THashMap<ui64, size_t> ResId2StateIndex; - TMutex Mutex; -}; + THashMap<ui64, size_t> ResId2StateIndex; + TMutex Mutex; +}; diff --git a/ydb/core/kesus/tablet/quoter_performance_test/ya.make b/ydb/core/kesus/tablet/quoter_performance_test/ya.make index c256ff5875f..c21ff249b45 100644 --- a/ydb/core/kesus/tablet/quoter_performance_test/ya.make +++ b/ydb/core/kesus/tablet/quoter_performance_test/ya.make @@ -1,24 +1,24 @@ -PROGRAM() - +PROGRAM() + OWNER( galaxycrab g:kikimr ) - + SRCDIR(ydb/core/kesus/tablet) - -PEERDIR( + +PEERDIR( library/cpp/getopt library/cpp/testing/unittest ADDINCL ydb/core/kesus/tablet ydb/core/testlib -) - +) + YQL_LAST_ABI_VERSION() -SRCS( - main.cpp - ut_helpers.cpp -) - -END() +SRCS( + main.cpp + ut_helpers.cpp +) + +END() diff --git a/ydb/core/kesus/tablet/quoter_resource_tree.cpp b/ydb/core/kesus/tablet/quoter_resource_tree.cpp index 3f9d6e56530..33682c35834 100644 --- a/ydb/core/kesus/tablet/quoter_resource_tree.cpp +++ b/ydb/core/kesus/tablet/quoter_resource_tree.cpp @@ -1,593 +1,593 @@ -#include "quoter_resource_tree.h" - -#include "probes.h" +#include "quoter_resource_tree.h" +#include "probes.h" + #include <ydb/core/base/path.h> - -#include <util/string/builder.h> + +#include <util/string/builder.h> #include <util/generic/maybe.h> - -#include <algorithm> -#include <cmath> -#include <iterator> -#include <vector> -#include <limits> - -LWTRACE_USING(KESUS_QUOTER_PROVIDER); - -namespace NKikimr { -namespace NKesus { - -TString CanonizeQuoterResourcePath(const TVector<TString>& path) { - return JoinPath(path); // Like canonic kikimr path, but without first slash -} - -TString CanonizeQuoterResourcePath(const TString& path) { - return CanonizeQuoterResourcePath(SplitPath(path)); -} - -namespace { - -static constexpr double TICKS_PER_SECOND = 10.0; // every 100 ms -static constexpr double RESOURCE_BURST_COEFFICIENT = 0.0; -static constexpr double EPSILON_COEFFICIENT = 0.000001; -static constexpr int64_t ULPS_ACCURACY = 4; -static const TString RESOURCE_COUNTERS_LABEL = "resource"; -static const TString ALLOCATED_COUNTER_NAME = "Allocated"; -static const TString SESSIONS_COUNTER_NAME = "Sessions"; -static const TString ACTIVE_SESSIONS_COUNTER_NAME = "ActiveSessions"; -static const TString LIMIT_COUNTER_NAME = "Limit"; -static const TString RESOURCE_SUBSCRIPTIONS_COUNTER_NAME = "ResourceSubscriptions"; -static const TString UNKNOWN_RESOURCE_SUBSCRIPTIONS_COUNTER_NAME = "UnknownResourceSubscriptions"; -static const TString RESOURCE_CONSUMPTION_STARTS_COUNTER_NAME = "ResourceConsumptionStarts"; -static const TString RESOURCE_CONSUMPTION_STOPS_COUNTER_NAME = "ResourceConsumptionStops"; -static const TString ELAPSED_MICROSEC_ON_RESOURCE_ALLOCATION_COUNTER_NAME = "ElapsedMicrosecOnResourceAllocation"; -static const TString TICK_PROCESSOR_TASKS_PROCESSED_COUNTER_NAME = "TickProcessorTasksProcessed"; -static const TString ELAPSED_MICROSEC_WHEN_RESOURCE_ACTIVE_COUNTER_NAME = "ElapsedMicrosecWhenResourceActive"; - -bool ValidResourcePathSymbols[256] = {}; - -bool MakeValidResourcePathSymbols() { + +#include <algorithm> +#include <cmath> +#include <iterator> +#include <vector> +#include <limits> + +LWTRACE_USING(KESUS_QUOTER_PROVIDER); + +namespace NKikimr { +namespace NKesus { + +TString CanonizeQuoterResourcePath(const TVector<TString>& path) { + return JoinPath(path); // Like canonic kikimr path, but without first slash +} + +TString CanonizeQuoterResourcePath(const TString& path) { + return CanonizeQuoterResourcePath(SplitPath(path)); +} + +namespace { + +static constexpr double TICKS_PER_SECOND = 10.0; // every 100 ms +static constexpr double RESOURCE_BURST_COEFFICIENT = 0.0; +static constexpr double EPSILON_COEFFICIENT = 0.000001; +static constexpr int64_t ULPS_ACCURACY = 4; +static const TString RESOURCE_COUNTERS_LABEL = "resource"; +static const TString ALLOCATED_COUNTER_NAME = "Allocated"; +static const TString SESSIONS_COUNTER_NAME = "Sessions"; +static const TString ACTIVE_SESSIONS_COUNTER_NAME = "ActiveSessions"; +static const TString LIMIT_COUNTER_NAME = "Limit"; +static const TString RESOURCE_SUBSCRIPTIONS_COUNTER_NAME = "ResourceSubscriptions"; +static const TString UNKNOWN_RESOURCE_SUBSCRIPTIONS_COUNTER_NAME = "UnknownResourceSubscriptions"; +static const TString RESOURCE_CONSUMPTION_STARTS_COUNTER_NAME = "ResourceConsumptionStarts"; +static const TString RESOURCE_CONSUMPTION_STOPS_COUNTER_NAME = "ResourceConsumptionStops"; +static const TString ELAPSED_MICROSEC_ON_RESOURCE_ALLOCATION_COUNTER_NAME = "ElapsedMicrosecOnResourceAllocation"; +static const TString TICK_PROCESSOR_TASKS_PROCESSED_COUNTER_NAME = "TickProcessorTasksProcessed"; +static const TString ELAPSED_MICROSEC_WHEN_RESOURCE_ACTIVE_COUNTER_NAME = "ElapsedMicrosecWhenResourceActive"; + +bool ValidResourcePathSymbols[256] = {}; + +bool MakeValidResourcePathSymbols() { char symbols[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-/:#"; - for (size_t i = 0; i < Y_ARRAY_SIZE(symbols) - 1; ++i) { - ValidResourcePathSymbols[static_cast<unsigned char>(symbols[i])] = true; - } - return true; -} - -const bool ValidResourcePathSymbolsAreInitialized = MakeValidResourcePathSymbols(); - -TInstant NextTick(TInstant time, TDuration tickSize) { - const ui64 timeUs = time.MicroSeconds(); - const ui64 tickUs = tickSize.MicroSeconds(); - const ui64 r = timeUs % tickUs; - const TInstant next = TInstant::MicroSeconds(timeUs - r + tickUs); - Y_ASSERT(next > time); - return next; -} - -// Doubles equality comparison -// See details in https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ -union TDoubleUnion { - TDoubleUnion(double value) - : FloatValue(value) - { - } - - bool IsNegative() const { - return IntValue < 0; - } - - int64_t IntValue; - double FloatValue; - static_assert(sizeof(IntValue) == sizeof(FloatValue)); -}; - -bool AlmostEqualUlpsAndAbs(double a, double b, double maxDiff, int64_t maxUlpsDiff) { - // Check if the numbers are really close -- needed - // when comparing numbers near zero. - const double absDiff = std::abs(a - b); - if (absDiff <= maxDiff) - return true; - - const TDoubleUnion uA(a); - const TDoubleUnion uB(b); - - // Different signs means they do not match. - if (uA.IsNegative() != uB.IsNegative()) - return false; - - // Find the difference in ULPs. - const int64_t ulpsDiff = std::abs(uA.IntValue - uB.IntValue); - return ulpsDiff <= maxUlpsDiff; -} - -class TRoundRobinListItem { -public: - TRoundRobinListItem() - : Prev(this) - , Next(this) - { - } - - void DeleteFromRoundRobinList() { - Prev->Next = Next; - Next->Prev = Prev; - Prev = this; - Next = this; - } - - void InsertBeforeInRoundRobinList(TRoundRobinListItem* item) { - item->Prev = Prev; - item->Next = this; - Prev->Next = item; - Prev = item; - } - - template <class T> - T* GetNext() const { - return static_cast<T*>(Next); - } - -protected: - TRoundRobinListItem* Prev; - TRoundRobinListItem* Next; -}; - -// Child resource or session for Hierarchical DRR algorithm. -class THierarchicalDRRResourceConsumer : public TRoundRobinListItem { -public: - virtual ~THierarchicalDRRResourceConsumer() = default; - - virtual double AccumulateResource(double amount, TInstant now) = 0; // returns spent amount of resource. - - virtual ui32 GetWeight() const = 0; -}; - -// Resource in case of hierarchical DRR algorithm. -class THierarhicalDRRQuoterResourceTree : public TQuoterResourceTree, public THierarchicalDRRResourceConsumer { -public: - using TQuoterResourceTree::TQuoterResourceTree; - - THierarhicalDRRQuoterResourceTree* GetParent() { - return static_cast<THierarhicalDRRQuoterResourceTree*>(Parent); - } - - const THierarhicalDRRQuoterResourceTree* GetParent() const { - return static_cast<const THierarhicalDRRQuoterResourceTree*>(Parent); - } - - bool ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) override; - - void CalcParameters() override; + for (size_t i = 0; i < Y_ARRAY_SIZE(symbols) - 1; ++i) { + ValidResourcePathSymbols[static_cast<unsigned char>(symbols[i])] = true; + } + return true; +} + +const bool ValidResourcePathSymbolsAreInitialized = MakeValidResourcePathSymbols(); + +TInstant NextTick(TInstant time, TDuration tickSize) { + const ui64 timeUs = time.MicroSeconds(); + const ui64 tickUs = tickSize.MicroSeconds(); + const ui64 r = timeUs % tickUs; + const TInstant next = TInstant::MicroSeconds(timeUs - r + tickUs); + Y_ASSERT(next > time); + return next; +} + +// Doubles equality comparison +// See details in https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ +union TDoubleUnion { + TDoubleUnion(double value) + : FloatValue(value) + { + } + + bool IsNegative() const { + return IntValue < 0; + } + + int64_t IntValue; + double FloatValue; + static_assert(sizeof(IntValue) == sizeof(FloatValue)); +}; + +bool AlmostEqualUlpsAndAbs(double a, double b, double maxDiff, int64_t maxUlpsDiff) { + // Check if the numbers are really close -- needed + // when comparing numbers near zero. + const double absDiff = std::abs(a - b); + if (absDiff <= maxDiff) + return true; + + const TDoubleUnion uA(a); + const TDoubleUnion uB(b); + + // Different signs means they do not match. + if (uA.IsNegative() != uB.IsNegative()) + return false; + + // Find the difference in ULPs. + const int64_t ulpsDiff = std::abs(uA.IntValue - uB.IntValue); + return ulpsDiff <= maxUlpsDiff; +} + +class TRoundRobinListItem { +public: + TRoundRobinListItem() + : Prev(this) + , Next(this) + { + } + + void DeleteFromRoundRobinList() { + Prev->Next = Next; + Next->Prev = Prev; + Prev = this; + Next = this; + } + + void InsertBeforeInRoundRobinList(TRoundRobinListItem* item) { + item->Prev = Prev; + item->Next = this; + Prev->Next = item; + Prev = item; + } + + template <class T> + T* GetNext() const { + return static_cast<T*>(Next); + } + +protected: + TRoundRobinListItem* Prev; + TRoundRobinListItem* Next; +}; + +// Child resource or session for Hierarchical DRR algorithm. +class THierarchicalDRRResourceConsumer : public TRoundRobinListItem { +public: + virtual ~THierarchicalDRRResourceConsumer() = default; + + virtual double AccumulateResource(double amount, TInstant now) = 0; // returns spent amount of resource. + + virtual ui32 GetWeight() const = 0; +}; + +// Resource in case of hierarchical DRR algorithm. +class THierarhicalDRRQuoterResourceTree : public TQuoterResourceTree, public THierarchicalDRRResourceConsumer { +public: + using TQuoterResourceTree::TQuoterResourceTree; + + THierarhicalDRRQuoterResourceTree* GetParent() { + return static_cast<THierarhicalDRRQuoterResourceTree*>(Parent); + } + + const THierarhicalDRRQuoterResourceTree* GetParent() const { + return static_cast<const THierarhicalDRRQuoterResourceTree*>(Parent); + } + + bool ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) override; + + void CalcParameters() override; void CalcParametersForAccounting(); - + THolder<TQuoterSession> DoCreateSession(const NActors::TActorId& clientId) override; - - void AddActiveChild(THierarchicalDRRResourceConsumer* child, TTickProcessorQueue& queue, TInstant now); - void RemoveActiveChild(THierarchicalDRRResourceConsumer* child); - - double GetBurst() const { - return Burst; - } - - bool IsFull() const { - return FreeResource >= Burst || AlmostEqualUlpsAndAbs(FreeResource, Burst, ResourceFillingEpsilon, ULPS_ACCURACY); - } - - double AccumulateResource(double amount, TInstant now) override; - void DoProcess(TTickProcessorQueue& queue, TInstant now) override; - - double GetResourceTickQuantum() const { - return ResourceTickQuantum; - } - - double GetResourceFillingEpsilon() const { - return ResourceFillingEpsilon; - } - - TDuration GetTickSize() const { - return TickSize; - } - - double GetMaxUnitsPerSecond() const { - return MaxUnitsPerSecond; - } - - ui32 GetWeight() const override { - return Weight; - } - - void ScheduleNextTick(TTickProcessorQueue& queue, TInstant now); - - bool HasActiveChildren() const { - return CurrentActiveChild != nullptr; - } - - void DeactivateIfFull(TInstant now); - - void SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) override; - - void SetLimitCounter(); - - void RemoveChild(TQuoterResourceTree* child) override; - + + void AddActiveChild(THierarchicalDRRResourceConsumer* child, TTickProcessorQueue& queue, TInstant now); + void RemoveActiveChild(THierarchicalDRRResourceConsumer* child); + + double GetBurst() const { + return Burst; + } + + bool IsFull() const { + return FreeResource >= Burst || AlmostEqualUlpsAndAbs(FreeResource, Burst, ResourceFillingEpsilon, ULPS_ACCURACY); + } + + double AccumulateResource(double amount, TInstant now) override; + void DoProcess(TTickProcessorQueue& queue, TInstant now) override; + + double GetResourceTickQuantum() const { + return ResourceTickQuantum; + } + + double GetResourceFillingEpsilon() const { + return ResourceFillingEpsilon; + } + + TDuration GetTickSize() const { + return TickSize; + } + + double GetMaxUnitsPerSecond() const { + return MaxUnitsPerSecond; + } + + ui32 GetWeight() const override { + return Weight; + } + + void ScheduleNextTick(TTickProcessorQueue& queue, TInstant now); + + bool HasActiveChildren() const { + return CurrentActiveChild != nullptr; + } + + void DeactivateIfFull(TInstant now); + + void SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) override; + + void SetLimitCounter(); + + void RemoveChild(TQuoterResourceTree* child) override; + TInstant Report(const NActors::TActorId& clientId, ui64 resourceId, TInstant start, TDuration interval, const double* values, size_t size, TTickProcessorQueue& queue, TInstant now); void RunAccounting(); -private: - double MaxUnitsPerSecond = 0.0; +private: + double MaxUnitsPerSecond = 0.0; double PrefetchCoefficient = 0.0; double PrefetchWatermark = 0.0; - ui32 Weight = 1; - TDuration TickSize; - ui64 ActiveChildrenWeight = 0; - - double ResourceTickQuantum = 0.0; // incoming quantum - - double Burst = 0.0; - - double ResourceFillingEpsilon = 0.0; - double FreeResource = 0.0; - - bool Active = false; - THierarchicalDRRResourceConsumer* CurrentActiveChild = nullptr; - size_t ActiveChildrenCount = 0; + ui32 Weight = 1; + TDuration TickSize; + ui64 ActiveChildrenWeight = 0; + + double ResourceTickQuantum = 0.0; // incoming quantum + + double Burst = 0.0; + + double ResourceFillingEpsilon = 0.0; + double FreeResource = 0.0; + + bool Active = false; + THierarchicalDRRResourceConsumer* CurrentActiveChild = nullptr; + size_t ActiveChildrenCount = 0; THolder<TRateAccounting> RateAccounting; bool ActiveAccounting = false; -}; - +}; + THolder<TQuoterResourceTree> CreateResource(ui64 resourceId, ui64 parentId, NActors::TActorId kesus, const IBillSink::TPtr& billSink, const NKikimrKesus::TStreamingQuoterResource& props) { - Y_VERIFY(resourceId != parentId); + Y_VERIFY(resourceId != parentId); return MakeHolder<THierarhicalDRRQuoterResourceTree>(resourceId, parentId, kesus, billSink, props); -} - -// Session in case of hierarchical DRR algorithm. -class THierarhicalDRRQuoterSession : public TQuoterSession, public THierarchicalDRRResourceConsumer { -public: +} + +// Session in case of hierarchical DRR algorithm. +class THierarhicalDRRQuoterSession : public TQuoterSession, public THierarchicalDRRResourceConsumer { +public: THierarhicalDRRQuoterSession(const NActors::TActorId& clientId, THierarhicalDRRQuoterResourceTree* resource) - : TQuoterSession(clientId, resource) - { - } - - THierarhicalDRRQuoterResourceTree* GetResource() { - return static_cast<THierarhicalDRRQuoterResourceTree*>(Resource); - } - - const THierarhicalDRRQuoterResourceTree* GetResource() const { - return static_cast<const THierarhicalDRRQuoterResourceTree*>(Resource); - } - - void UpdateConsumptionState(bool consume, double amount, TTickProcessorQueue& queue, TInstant now) override; + : TQuoterSession(clientId, resource) + { + } + + THierarhicalDRRQuoterResourceTree* GetResource() { + return static_cast<THierarhicalDRRQuoterResourceTree*>(Resource); + } + + const THierarhicalDRRQuoterResourceTree* GetResource() const { + return static_cast<const THierarhicalDRRQuoterResourceTree*>(Resource); + } + + void UpdateConsumptionState(bool consume, double amount, TTickProcessorQueue& queue, TInstant now) override; TInstant Account(TInstant start, TDuration interval, const double* values, size_t size, TTickProcessorQueue& queue, TInstant now) override; - void DoProcess(TTickProcessorQueue& queue, TInstant now) override; - - void ScheduleNextTick(TTickProcessorQueue& queue, TInstant now); - - double AccumulateResource(double amount, TInstant now) override; - - ui32 GetWeight() const override { - return 1; - } - - size_t GetLevel() const override { - return GetResource()->GetLevel() + 1; - } - - TTickProcessorId GetTickProcessorId() const override { - return {ClientId, Resource->GetResourceId()}; - } - - void Activate(TTickProcessorQueue& queue, TInstant now) { - Y_VERIFY(!Active); - LWPROBE(SessionActivate, - GetResource()->GetQuoterPath(), - GetResource()->GetPath(), - ClientId); - Active = true; - GetResource()->AddActiveChild(this, queue, now); - const NMonitoring::TDynamicCounters::TCounterPtr& activeSessions = GetResource()->GetCounters().ActiveSessions; - if (activeSessions) { - activeSessions->Inc(); - } - } - - void Deactivate() { - Y_VERIFY(Active); - LWPROBE(SessionDeactivate, - GetResource()->GetQuoterPath(), - GetResource()->GetPath(), - ClientId); - Active = false; - AmountRequested = 0.0; - GetResource()->RemoveActiveChild(this); - const NMonitoring::TDynamicCounters::TCounterPtr& activeSessions = GetResource()->GetCounters().ActiveSessions; - if (activeSessions) { - activeSessions->Dec(); - } - } - - bool IsFull() const { - const double burst = GetBurst(); - return FreeResource >= burst || AlmostEqualUlpsAndAbs(FreeResource, burst, GetResource()->GetResourceFillingEpsilon(), ULPS_ACCURACY); - } - - double GetBurst() const { - return GetResource()->GetBurst(); - } - - void CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason) override; - - void SendAvailableResource(); - - void OnPropsChanged() override; - -private: - double FreeResource = 0.0; -}; - -double THierarhicalDRRQuoterSession::AccumulateResource(double amount, TInstant now) { - const double newFreeResource = Min(FreeResource + amount, AmountRequested + GetBurst()); - double spent = newFreeResource - FreeResource; - FreeResource = newFreeResource; - if (spent < GetResource()->GetResourceFillingEpsilon()) { - spent = 0.0; - } - - LWPROBE(SessionAccumulateResource, - GetResource()->GetQuoterPath(), - GetResource()->GetPath(), - ClientId, - now, - Active, - spent); - - if (AmountRequested < GetResource()->GetResourceFillingEpsilon() && IsFull()) { - Deactivate(); - } - - return spent; -} - -void THierarhicalDRRQuoterSession::CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason) { - TQuoterSession::CloseSession(status, reason); - if (Active) { - Deactivate(); - } -} - -void THierarhicalDRRQuoterSession::UpdateConsumptionState(bool consume, double amount, TTickProcessorQueue& queue, TInstant now) { - LWPROBE(SessionUpdateConsumptionState, - GetResource()->GetQuoterPath(), - GetResource()->GetPath(), - ClientId, - consume, - amount); - if (consume) { - AmountRequested = Max(amount, 2.0 * GetResource()->GetResourceFillingEpsilon()); - if (!Active) { - Activate(queue, now); - ScheduleNextTick(queue, now); - } - SendAvailableResource(); - } else { - AmountRequested = 0.0; - const bool full = IsFull(); - if (Active && full) { - Deactivate(); - } else if (!Active && !full) { - Activate(queue, now); - ScheduleNextTick(queue, now); - } - } -} - + void DoProcess(TTickProcessorQueue& queue, TInstant now) override; + + void ScheduleNextTick(TTickProcessorQueue& queue, TInstant now); + + double AccumulateResource(double amount, TInstant now) override; + + ui32 GetWeight() const override { + return 1; + } + + size_t GetLevel() const override { + return GetResource()->GetLevel() + 1; + } + + TTickProcessorId GetTickProcessorId() const override { + return {ClientId, Resource->GetResourceId()}; + } + + void Activate(TTickProcessorQueue& queue, TInstant now) { + Y_VERIFY(!Active); + LWPROBE(SessionActivate, + GetResource()->GetQuoterPath(), + GetResource()->GetPath(), + ClientId); + Active = true; + GetResource()->AddActiveChild(this, queue, now); + const NMonitoring::TDynamicCounters::TCounterPtr& activeSessions = GetResource()->GetCounters().ActiveSessions; + if (activeSessions) { + activeSessions->Inc(); + } + } + + void Deactivate() { + Y_VERIFY(Active); + LWPROBE(SessionDeactivate, + GetResource()->GetQuoterPath(), + GetResource()->GetPath(), + ClientId); + Active = false; + AmountRequested = 0.0; + GetResource()->RemoveActiveChild(this); + const NMonitoring::TDynamicCounters::TCounterPtr& activeSessions = GetResource()->GetCounters().ActiveSessions; + if (activeSessions) { + activeSessions->Dec(); + } + } + + bool IsFull() const { + const double burst = GetBurst(); + return FreeResource >= burst || AlmostEqualUlpsAndAbs(FreeResource, burst, GetResource()->GetResourceFillingEpsilon(), ULPS_ACCURACY); + } + + double GetBurst() const { + return GetResource()->GetBurst(); + } + + void CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason) override; + + void SendAvailableResource(); + + void OnPropsChanged() override; + +private: + double FreeResource = 0.0; +}; + +double THierarhicalDRRQuoterSession::AccumulateResource(double amount, TInstant now) { + const double newFreeResource = Min(FreeResource + amount, AmountRequested + GetBurst()); + double spent = newFreeResource - FreeResource; + FreeResource = newFreeResource; + if (spent < GetResource()->GetResourceFillingEpsilon()) { + spent = 0.0; + } + + LWPROBE(SessionAccumulateResource, + GetResource()->GetQuoterPath(), + GetResource()->GetPath(), + ClientId, + now, + Active, + spent); + + if (AmountRequested < GetResource()->GetResourceFillingEpsilon() && IsFull()) { + Deactivate(); + } + + return spent; +} + +void THierarhicalDRRQuoterSession::CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason) { + TQuoterSession::CloseSession(status, reason); + if (Active) { + Deactivate(); + } +} + +void THierarhicalDRRQuoterSession::UpdateConsumptionState(bool consume, double amount, TTickProcessorQueue& queue, TInstant now) { + LWPROBE(SessionUpdateConsumptionState, + GetResource()->GetQuoterPath(), + GetResource()->GetPath(), + ClientId, + consume, + amount); + if (consume) { + AmountRequested = Max(amount, 2.0 * GetResource()->GetResourceFillingEpsilon()); + if (!Active) { + Activate(queue, now); + ScheduleNextTick(queue, now); + } + SendAvailableResource(); + } else { + AmountRequested = 0.0; + const bool full = IsFull(); + if (Active && full) { + Deactivate(); + } else if (!Active && !full) { + Activate(queue, now); + ScheduleNextTick(queue, now); + } + } +} + TInstant THierarhicalDRRQuoterSession::Account(TInstant start, TDuration interval, const double* values, size_t size, TTickProcessorQueue& queue, TInstant now) { return GetResource()->Report(ClientId, GetResource()->GetResourceId(), start, interval, values, size, queue, now); } -void THierarhicalDRRQuoterSession::SendAvailableResource() { - if (FreeResource >= GetResource()->GetResourceFillingEpsilon()) { - if (AmountRequested >= GetResource()->GetResourceFillingEpsilon()) { - const double spent = Min(AmountRequested, FreeResource); - Send(spent); - AmountRequested -= spent; - FreeResource -= spent; - } - if (AmountRequested < GetResource()->GetResourceFillingEpsilon()) { - AmountRequested = 0.0; - FreeResource = Min(FreeResource, GetBurst()); - if (IsFull()) { - Deactivate(); - } - } - } -} - -void THierarhicalDRRQuoterSession::DoProcess(TTickProcessorQueue& queue, TInstant now) { - LWPROBE(SessionProcess, - GetResource()->GetQuoterPath(), - GetResource()->GetPath(), - ClientId, - now, - Active); - if (Active) { - SendAvailableResource(); - if (Active) { - ScheduleNextTick(queue, now); - } - } -} - -void THierarhicalDRRQuoterSession::ScheduleNextTick(TTickProcessorQueue& queue, TInstant now) { - Schedule(queue, NextTick(now, GetResource()->GetTickSize())); -} - -void THierarhicalDRRQuoterSession::OnPropsChanged() { - FreeResource = Min(FreeResource, AmountRequested + GetBurst()); - TQuoterSession::OnPropsChanged(); -} - -} // anonymous namespace - +void THierarhicalDRRQuoterSession::SendAvailableResource() { + if (FreeResource >= GetResource()->GetResourceFillingEpsilon()) { + if (AmountRequested >= GetResource()->GetResourceFillingEpsilon()) { + const double spent = Min(AmountRequested, FreeResource); + Send(spent); + AmountRequested -= spent; + FreeResource -= spent; + } + if (AmountRequested < GetResource()->GetResourceFillingEpsilon()) { + AmountRequested = 0.0; + FreeResource = Min(FreeResource, GetBurst()); + if (IsFull()) { + Deactivate(); + } + } + } +} + +void THierarhicalDRRQuoterSession::DoProcess(TTickProcessorQueue& queue, TInstant now) { + LWPROBE(SessionProcess, + GetResource()->GetQuoterPath(), + GetResource()->GetPath(), + ClientId, + now, + Active); + if (Active) { + SendAvailableResource(); + if (Active) { + ScheduleNextTick(queue, now); + } + } +} + +void THierarhicalDRRQuoterSession::ScheduleNextTick(TTickProcessorQueue& queue, TInstant now) { + Schedule(queue, NextTick(now, GetResource()->GetTickSize())); +} + +void THierarhicalDRRQuoterSession::OnPropsChanged() { + FreeResource = Min(FreeResource, AmountRequested + GetBurst()); + TQuoterSession::OnPropsChanged(); +} + +} // anonymous namespace + TQuoterSession::TQuoterSession(const NActors::TActorId& clientId, TQuoterResourceTree* resource) - : Resource(resource) - , ClientId(clientId) -{ -} - -void TQuoterSession::CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason) { - ResourceSink->CloseSession(GetResource()->GetResourceId(), status, reason); -} - -void TQuoterSession::Send(double spent) { - LWPROBE(SessionSend, - GetResource()->GetQuoterPath(), - GetResource()->GetPath(), - ClientId, - spent); - ResourceSink->Send(Resource->GetResourceId(), spent, NeedSendChangedProps ? &GetResource()->GetEffectiveProps() : nullptr); - NeedSendChangedProps = false; - TotalConsumed += spent; - AddAllocatedCounter(spent); -} - -void TQuoterSession::AddAllocatedCounter(double spent) { - TQuoterResourceTree* resource = GetResource(); - Y_ASSERT(resource != nullptr); - do { - resource->GetCounters().AddAllocated(spent); - resource = resource->GetParent(); - } while (resource != nullptr); -} - + : Resource(resource) + , ClientId(clientId) +{ +} + +void TQuoterSession::CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason) { + ResourceSink->CloseSession(GetResource()->GetResourceId(), status, reason); +} + +void TQuoterSession::Send(double spent) { + LWPROBE(SessionSend, + GetResource()->GetQuoterPath(), + GetResource()->GetPath(), + ClientId, + spent); + ResourceSink->Send(Resource->GetResourceId(), spent, NeedSendChangedProps ? &GetResource()->GetEffectiveProps() : nullptr); + NeedSendChangedProps = false; + TotalConsumed += spent; + AddAllocatedCounter(spent); +} + +void TQuoterSession::AddAllocatedCounter(double spent) { + TQuoterResourceTree* resource = GetResource(); + Y_ASSERT(resource != nullptr); + do { + resource->GetCounters().AddAllocated(spent); + resource = resource->GetParent(); + } while (resource != nullptr); +} + TQuoterResourceTree::TQuoterResourceTree(ui64 resourceId, ui64 parentId, NActors::TActorId kesus, const IBillSink::TPtr& billSink, const NKikimrKesus::TStreamingQuoterResource& props) - : ResourceId(resourceId) - , ParentId(parentId) + : ResourceId(resourceId) + , ParentId(parentId) , Kesus(kesus) , BillSink(billSink) - , Props(props) - , EffectiveProps(props) -{ -} - -void TQuoterResourceTree::AddChild(TQuoterResourceTree* child) { - Y_VERIFY(child->Parent == nullptr); - Children.insert(child); - child->Parent = this; -} - -void TQuoterResourceTree::RemoveChild(TQuoterResourceTree* child) { - Y_VERIFY(child->Parent == this); - const auto childIt = Children.find(child); - Y_VERIFY(childIt != Children.end()); - Children.erase(childIt); - child->Parent = nullptr; -} - -bool TQuoterResourceTree::Update(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { - if (!ValidateProps(props, errorMessage)) { - return false; - } - const ui64 id = GetResourceId(); - const TString path = GetPath(); - Props = props; - Props.SetResourceId(id); - Props.SetResourcePath(path); - EffectiveProps = Props; - CalcParameters(); - return true; -} - -bool TQuoterResourceTree::ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { + , Props(props) + , EffectiveProps(props) +{ +} + +void TQuoterResourceTree::AddChild(TQuoterResourceTree* child) { + Y_VERIFY(child->Parent == nullptr); + Children.insert(child); + child->Parent = this; +} + +void TQuoterResourceTree::RemoveChild(TQuoterResourceTree* child) { + Y_VERIFY(child->Parent == this); + const auto childIt = Children.find(child); + Y_VERIFY(childIt != Children.end()); + Children.erase(childIt); + child->Parent = nullptr; +} + +bool TQuoterResourceTree::Update(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { + if (!ValidateProps(props, errorMessage)) { + return false; + } + const ui64 id = GetResourceId(); + const TString path = GetPath(); + Props = props; + Props.SetResourceId(id); + Props.SetResourcePath(path); + EffectiveProps = Props; + CalcParameters(); + return true; +} + +bool TQuoterResourceTree::ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { Y_UNUSED(props, errorMessage); - return true; -} - -void TQuoterResourceTree::CalcParameters() { - ResourceLevel = 0; - if (Parent) { - ResourceLevel = Parent->ResourceLevel + 1; - } - + return true; +} + +void TQuoterResourceTree::CalcParameters() { + ResourceLevel = 0; + if (Parent) { + ResourceLevel = Parent->ResourceLevel + 1; + } + // Recurse into children - for (TQuoterResourceTree* child : Children) { - child->CalcParameters(); - } -} - -void TQuoterResourceTree::SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) { - Counters.SetResourceCounters(std::move(resourceCounters)); -} - -void TQuoterResourceTree::UpdateActiveTime(TInstant now) { - if (StartActiveTime && Counters.ElapsedMicrosecWhenResourceActive && now > StartActiveTime) { - const TDuration diff = now - StartActiveTime; - *Counters.ElapsedMicrosecWhenResourceActive += diff.MicroSeconds(); - } - StartActiveTime = now; -} - -void TQuoterResourceTree::StopActiveTime(TInstant now) { - UpdateActiveTime(now); - StartActiveTime = TInstant::Zero(); -} - -void TQuoterResourceTree::TCounters::SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) { - ResourceCounters = std::move(resourceCounters); - if (ResourceCounters) { - Allocated = ResourceCounters->GetCounter(ALLOCATED_COUNTER_NAME, true); - Sessions = ResourceCounters->GetExpiringCounter(SESSIONS_COUNTER_NAME, false); - ActiveSessions = ResourceCounters->GetExpiringCounter(ACTIVE_SESSIONS_COUNTER_NAME, false); - ElapsedMicrosecWhenResourceActive = ResourceCounters->GetCounter(ELAPSED_MICROSEC_WHEN_RESOURCE_ACTIVE_COUNTER_NAME, true); - } else { - Allocated = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - Sessions = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - ActiveSessions = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - ElapsedMicrosecWhenResourceActive = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - } -} - -void TQuoterResourceTree::TCounters::AddAllocated(double allocated) { - if (Allocated) { - allocated += AllocatedRemainder; - const double counterIncrease = std::floor(allocated); - AllocatedRemainder = allocated - counterIncrease; - Allocated->Add(counterIncrease); - } -} - -void TQuoterResourceTree::TCounters::SetLimit(TMaybe<double> limit) { - if (ResourceCounters) { - if (limit) { - if (!Limit) { - Limit = ResourceCounters->GetExpiringCounter(LIMIT_COUNTER_NAME, false); - } - *Limit = static_cast<i64>(*limit); - } else { - Limit = nullptr; - } - } -} - -bool THierarhicalDRRQuoterResourceTree::ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { - if (!props.HasHierarhicalDRRResourceConfig()) { - errorMessage = "No HierarhicalDRRResourceConfig specified."; - return false; - } - const auto& hdrrConfig = props.GetHierarhicalDRRResourceConfig(); - const double maxUnitsPerSecond = hdrrConfig.GetMaxUnitsPerSecond() ? - hdrrConfig.GetMaxUnitsPerSecond() : hdrrConfig.GetSpeedSettings().GetMaxUnitsPerSecond(); + for (TQuoterResourceTree* child : Children) { + child->CalcParameters(); + } +} + +void TQuoterResourceTree::SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) { + Counters.SetResourceCounters(std::move(resourceCounters)); +} + +void TQuoterResourceTree::UpdateActiveTime(TInstant now) { + if (StartActiveTime && Counters.ElapsedMicrosecWhenResourceActive && now > StartActiveTime) { + const TDuration diff = now - StartActiveTime; + *Counters.ElapsedMicrosecWhenResourceActive += diff.MicroSeconds(); + } + StartActiveTime = now; +} + +void TQuoterResourceTree::StopActiveTime(TInstant now) { + UpdateActiveTime(now); + StartActiveTime = TInstant::Zero(); +} + +void TQuoterResourceTree::TCounters::SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) { + ResourceCounters = std::move(resourceCounters); + if (ResourceCounters) { + Allocated = ResourceCounters->GetCounter(ALLOCATED_COUNTER_NAME, true); + Sessions = ResourceCounters->GetExpiringCounter(SESSIONS_COUNTER_NAME, false); + ActiveSessions = ResourceCounters->GetExpiringCounter(ACTIVE_SESSIONS_COUNTER_NAME, false); + ElapsedMicrosecWhenResourceActive = ResourceCounters->GetCounter(ELAPSED_MICROSEC_WHEN_RESOURCE_ACTIVE_COUNTER_NAME, true); + } else { + Allocated = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + Sessions = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + ActiveSessions = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + ElapsedMicrosecWhenResourceActive = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + } +} + +void TQuoterResourceTree::TCounters::AddAllocated(double allocated) { + if (Allocated) { + allocated += AllocatedRemainder; + const double counterIncrease = std::floor(allocated); + AllocatedRemainder = allocated - counterIncrease; + Allocated->Add(counterIncrease); + } +} + +void TQuoterResourceTree::TCounters::SetLimit(TMaybe<double> limit) { + if (ResourceCounters) { + if (limit) { + if (!Limit) { + Limit = ResourceCounters->GetExpiringCounter(LIMIT_COUNTER_NAME, false); + } + *Limit = static_cast<i64>(*limit); + } else { + Limit = nullptr; + } + } +} + +bool THierarhicalDRRQuoterResourceTree::ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { + if (!props.HasHierarhicalDRRResourceConfig()) { + errorMessage = "No HierarhicalDRRResourceConfig specified."; + return false; + } + const auto& hdrrConfig = props.GetHierarhicalDRRResourceConfig(); + const double maxUnitsPerSecond = hdrrConfig.GetMaxUnitsPerSecond() ? + hdrrConfig.GetMaxUnitsPerSecond() : hdrrConfig.GetSpeedSettings().GetMaxUnitsPerSecond(); if (!std::isfinite(maxUnitsPerSecond)) { errorMessage = "MaxUnitsPerSecond must be finite."; return false; } - if (maxUnitsPerSecond < 0.0) { - errorMessage = "MaxUnitsPerSecond can't be less than 0."; - return false; - } - + if (maxUnitsPerSecond < 0.0) { + errorMessage = "MaxUnitsPerSecond can't be less than 0."; + return false; + } + // Validate prefetch settings const double prefetchCoefficient = hdrrConfig.GetPrefetchCoefficient(); if (!std::isfinite(prefetchCoefficient)) { @@ -608,37 +608,37 @@ bool THierarhicalDRRQuoterResourceTree::ValidateProps(const NKikimrKesus::TStrea return false; } - if (!ParentId && !maxUnitsPerSecond) { - errorMessage = "No MaxUnitsPerSecond parameter in root resource."; - return false; - } + if (!ParentId && !maxUnitsPerSecond) { + errorMessage = "No MaxUnitsPerSecond parameter in root resource."; + return false; + } if (!TRateAccounting::ValidateProps(props, errorMessage)) { return false; } return TQuoterResourceTree::ValidateProps(props, errorMessage); -} - -void THierarhicalDRRQuoterResourceTree::CalcParameters() { - // compatibility - if (!Props.GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond() && Props.GetHierarhicalDRRResourceConfig().GetSpeedSettings().GetMaxUnitsPerSecond()) { - Props.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(Props.GetHierarhicalDRRResourceConfig().GetSpeedSettings().GetMaxUnitsPerSecond()); - } - +} + +void THierarhicalDRRQuoterResourceTree::CalcParameters() { + // compatibility + if (!Props.GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond() && Props.GetHierarhicalDRRResourceConfig().GetSpeedSettings().GetMaxUnitsPerSecond()) { + Props.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(Props.GetHierarhicalDRRResourceConfig().GetSpeedSettings().GetMaxUnitsPerSecond()); + } + // speed settings - THierarhicalDRRQuoterResourceTree* const parent = GetParent(); - const auto& config = GetProps().GetHierarhicalDRRResourceConfig(); - if (config.GetMaxUnitsPerSecond()) { - MaxUnitsPerSecond = config.GetMaxUnitsPerSecond(); - } else if (parent) { - MaxUnitsPerSecond = parent->MaxUnitsPerSecond; - } - - if (parent && MaxUnitsPerSecond > parent->MaxUnitsPerSecond) { - MaxUnitsPerSecond = parent->MaxUnitsPerSecond; - } - + THierarhicalDRRQuoterResourceTree* const parent = GetParent(); + const auto& config = GetProps().GetHierarhicalDRRResourceConfig(); + if (config.GetMaxUnitsPerSecond()) { + MaxUnitsPerSecond = config.GetMaxUnitsPerSecond(); + } else if (parent) { + MaxUnitsPerSecond = parent->MaxUnitsPerSecond; + } + + if (parent && MaxUnitsPerSecond > parent->MaxUnitsPerSecond) { + MaxUnitsPerSecond = parent->MaxUnitsPerSecond; + } + // prefetch settings if (config.GetPrefetchCoefficient()) { PrefetchCoefficient = config.GetPrefetchCoefficient(); @@ -651,36 +651,36 @@ void THierarhicalDRRQuoterResourceTree::CalcParameters() { PrefetchWatermark = parent->PrefetchWatermark; } - ResourceTickQuantum = MaxUnitsPerSecond >= 0.0 ? MaxUnitsPerSecond / TICKS_PER_SECOND : 0.0; - ResourceFillingEpsilon = ResourceTickQuantum * EPSILON_COEFFICIENT; - TickSize = TDuration::Seconds(1) / TICKS_PER_SECOND; - - Burst = ResourceTickQuantum * RESOURCE_BURST_COEFFICIENT; - - const ui32 oldWeight = Weight; - Weight = config.GetWeight() ? config.GetWeight() : 1; - const i64 weightDiff = static_cast<i64>(Weight) - static_cast<i64>(oldWeight); - if (Active && parent && weightDiff) { - parent->ActiveChildrenWeight += weightDiff; - } - - FreeResource = Min(FreeResource, HasActiveChildren() ? ResourceTickQuantum : GetBurst()); - - // Update in props - auto* effectiveConfig = EffectiveProps.MutableHierarhicalDRRResourceConfig(); - effectiveConfig->SetMaxUnitsPerSecond(MaxUnitsPerSecond); - effectiveConfig->SetWeight(Weight); - effectiveConfig->SetMaxBurstSizeCoefficient(1); + ResourceTickQuantum = MaxUnitsPerSecond >= 0.0 ? MaxUnitsPerSecond / TICKS_PER_SECOND : 0.0; + ResourceFillingEpsilon = ResourceTickQuantum * EPSILON_COEFFICIENT; + TickSize = TDuration::Seconds(1) / TICKS_PER_SECOND; + + Burst = ResourceTickQuantum * RESOURCE_BURST_COEFFICIENT; + + const ui32 oldWeight = Weight; + Weight = config.GetWeight() ? config.GetWeight() : 1; + const i64 weightDiff = static_cast<i64>(Weight) - static_cast<i64>(oldWeight); + if (Active && parent && weightDiff) { + parent->ActiveChildrenWeight += weightDiff; + } + + FreeResource = Min(FreeResource, HasActiveChildren() ? ResourceTickQuantum : GetBurst()); + + // Update in props + auto* effectiveConfig = EffectiveProps.MutableHierarhicalDRRResourceConfig(); + effectiveConfig->SetMaxUnitsPerSecond(MaxUnitsPerSecond); + effectiveConfig->SetWeight(Weight); + effectiveConfig->SetMaxBurstSizeCoefficient(1); effectiveConfig->SetPrefetchCoefficient(PrefetchCoefficient); effectiveConfig->SetPrefetchWatermark(PrefetchWatermark); - - SetLimitCounter(); - + + SetLimitCounter(); + CalcParametersForAccounting(); - TQuoterResourceTree::CalcParameters(); // recalc for children -} - + TQuoterResourceTree::CalcParameters(); // recalc for children +} + void THierarhicalDRRQuoterResourceTree::CalcParametersForAccounting() { const auto* accCfgParent = Parent ? &Parent->GetEffectiveProps().GetAccountingConfig() : nullptr; auto* accCfg = EffectiveProps.MutableAccountingConfig(); @@ -737,109 +737,109 @@ void THierarhicalDRRQuoterResourceTree::CalcParametersForAccounting() { } } -void THierarhicalDRRQuoterResourceTree::RemoveChild(TQuoterResourceTree* childBase) { - THierarhicalDRRQuoterResourceTree* child = static_cast<THierarhicalDRRQuoterResourceTree*>(childBase); - if (child->Active) { - child->Active = false; - RemoveActiveChild(child); - } - TQuoterResourceTree::RemoveChild(childBase); -} - -void THierarhicalDRRQuoterResourceTree::DeactivateIfFull(TInstant now) { - if (!HasActiveChildren() && IsFull()) { - Active = false; - LWPROBE(ResourceDeactivate, - QuoterPath, - GetPath()); - StopActiveTime(now); - if (GetParent()) { - GetParent()->RemoveActiveChild(this); - } - } -} - -double THierarhicalDRRQuoterResourceTree::AccumulateResource(double amount, TInstant now) { - amount = Min(amount, ResourceTickQuantum); - const double newFreeResource = Min(FreeResource + amount, HasActiveChildren() ? ResourceTickQuantum : GetBurst()); - double spent = newFreeResource - FreeResource; - FreeResource = newFreeResource; - if (spent < ResourceFillingEpsilon) { - spent = 0.0; - } - - LWPROBE(ResourceAccumulateResource, - QuoterPath, - GetPath(), - now, - Active, - spent); - - DeactivateIfFull(now); - return spent; -} - -void THierarhicalDRRQuoterResourceTree::DoProcess(TTickProcessorQueue& queue, TInstant now) { - LWPROBE(ResourceProcess, - QuoterPath, - GetPath(), - now, - Active, - ActiveChildrenCount); - if (Active) { - if (Parent == nullptr) { // Root resource - AccumulateResource(ResourceTickQuantum, now); - } - - UpdateActiveTime(now); - if (HasActiveChildren()) { - const ui64 sumWeights = ActiveChildrenWeight; - const double quantum = Max(FreeResource / static_cast<double>(sumWeights), ResourceFillingEpsilon); - const size_t activeChildrenCount = ActiveChildrenCount; // This count will be nonincreasing during cycle. - size_t childrenProcessed = 0; - double freeResourceBeforeCycle = FreeResource; - while (FreeResource >= ResourceFillingEpsilon && HasActiveChildren()) { - THierarchicalDRRResourceConsumer* child = CurrentActiveChild; - CurrentActiveChild = CurrentActiveChild->GetNext<THierarchicalDRRResourceConsumer>(); - const ui32 weight = child->GetWeight(); - double amount = quantum; - if (weight != 1) { - amount *= static_cast<double>(weight); - } - const double giveAmount = std::clamp(amount, ResourceFillingEpsilon, FreeResource); - LWPROBE(ResourceGiveToChild, - QuoterPath, - GetPath(), - now, - giveAmount, - weight); - const double spent = child->AccumulateResource(giveAmount, now); - FreeResource -= spent; - - ++childrenProcessed; - if (childrenProcessed == activeChildrenCount) { // All children are processed, check whether FreeResource didn't change (so, there was no progress). - if (AlmostEqualUlpsAndAbs(FreeResource, freeResourceBeforeCycle, ResourceFillingEpsilon, ULPS_ACCURACY)) { - // Nothing has changed when all sessions/resources were processed. Break cycle. - break; - } - childrenProcessed = 0; - freeResourceBeforeCycle = FreeResource; - } - } - } - - DeactivateIfFull(now); - } - +void THierarhicalDRRQuoterResourceTree::RemoveChild(TQuoterResourceTree* childBase) { + THierarhicalDRRQuoterResourceTree* child = static_cast<THierarhicalDRRQuoterResourceTree*>(childBase); + if (child->Active) { + child->Active = false; + RemoveActiveChild(child); + } + TQuoterResourceTree::RemoveChild(childBase); +} + +void THierarhicalDRRQuoterResourceTree::DeactivateIfFull(TInstant now) { + if (!HasActiveChildren() && IsFull()) { + Active = false; + LWPROBE(ResourceDeactivate, + QuoterPath, + GetPath()); + StopActiveTime(now); + if (GetParent()) { + GetParent()->RemoveActiveChild(this); + } + } +} + +double THierarhicalDRRQuoterResourceTree::AccumulateResource(double amount, TInstant now) { + amount = Min(amount, ResourceTickQuantum); + const double newFreeResource = Min(FreeResource + amount, HasActiveChildren() ? ResourceTickQuantum : GetBurst()); + double spent = newFreeResource - FreeResource; + FreeResource = newFreeResource; + if (spent < ResourceFillingEpsilon) { + spent = 0.0; + } + + LWPROBE(ResourceAccumulateResource, + QuoterPath, + GetPath(), + now, + Active, + spent); + + DeactivateIfFull(now); + return spent; +} + +void THierarhicalDRRQuoterResourceTree::DoProcess(TTickProcessorQueue& queue, TInstant now) { + LWPROBE(ResourceProcess, + QuoterPath, + GetPath(), + now, + Active, + ActiveChildrenCount); + if (Active) { + if (Parent == nullptr) { // Root resource + AccumulateResource(ResourceTickQuantum, now); + } + + UpdateActiveTime(now); + if (HasActiveChildren()) { + const ui64 sumWeights = ActiveChildrenWeight; + const double quantum = Max(FreeResource / static_cast<double>(sumWeights), ResourceFillingEpsilon); + const size_t activeChildrenCount = ActiveChildrenCount; // This count will be nonincreasing during cycle. + size_t childrenProcessed = 0; + double freeResourceBeforeCycle = FreeResource; + while (FreeResource >= ResourceFillingEpsilon && HasActiveChildren()) { + THierarchicalDRRResourceConsumer* child = CurrentActiveChild; + CurrentActiveChild = CurrentActiveChild->GetNext<THierarchicalDRRResourceConsumer>(); + const ui32 weight = child->GetWeight(); + double amount = quantum; + if (weight != 1) { + amount *= static_cast<double>(weight); + } + const double giveAmount = std::clamp(amount, ResourceFillingEpsilon, FreeResource); + LWPROBE(ResourceGiveToChild, + QuoterPath, + GetPath(), + now, + giveAmount, + weight); + const double spent = child->AccumulateResource(giveAmount, now); + FreeResource -= spent; + + ++childrenProcessed; + if (childrenProcessed == activeChildrenCount) { // All children are processed, check whether FreeResource didn't change (so, there was no progress). + if (AlmostEqualUlpsAndAbs(FreeResource, freeResourceBeforeCycle, ResourceFillingEpsilon, ULPS_ACCURACY)) { + // Nothing has changed when all sessions/resources were processed. Break cycle. + break; + } + childrenProcessed = 0; + freeResourceBeforeCycle = FreeResource; + } + } + } + + DeactivateIfFull(now); + } + if (ActiveAccounting) { RunAccounting(); - } + } if (Active || ActiveAccounting) { ScheduleNextTick(queue, now); } -} - +} + TInstant THierarhicalDRRQuoterResourceTree::Report( const NActors::TActorId& clientId, ui64 resourceId, @@ -871,458 +871,458 @@ void THierarhicalDRRQuoterResourceTree::RunAccounting() { } } -void THierarhicalDRRQuoterResourceTree::AddActiveChild(THierarchicalDRRResourceConsumer* child, TTickProcessorQueue& queue, TInstant now) { - UpdateActiveTime(now); - if (!HasActiveChildren()) { - CurrentActiveChild = child; - ActiveChildrenCount = 1; - - Active = true; - LWPROBE(ResourceActivate, - QuoterPath, - GetPath()); - - ScheduleNextTick(queue, now); - if (GetParent()) { - GetParent()->AddActiveChild(this, queue, now); - } - - // Update sum of active children weights - Y_ASSERT(ActiveChildrenWeight == 0); - ActiveChildrenWeight = child->GetWeight(); - - } else { - if (child->GetNext<THierarchicalDRRResourceConsumer>() == child && CurrentActiveChild != child) { // Not in list. - CurrentActiveChild->InsertBeforeInRoundRobinList(child); - ++ActiveChildrenCount; - - // Update sum of active children weights - ActiveChildrenWeight += child->GetWeight(); - } - } -} - -void THierarhicalDRRQuoterResourceTree::RemoveActiveChild(THierarchicalDRRResourceConsumer* child) { - if (HasActiveChildren()) { - if (child == CurrentActiveChild) { - CurrentActiveChild = CurrentActiveChild->GetNext<THierarchicalDRRResourceConsumer>(); - } - child->DeleteFromRoundRobinList(); - --ActiveChildrenCount; - if (child == CurrentActiveChild) { - CurrentActiveChild = nullptr; - Y_ASSERT(ActiveChildrenCount == 0); - } - - // Update sum of active children weights - Y_ASSERT(ActiveChildrenWeight >= child->GetWeight()); - ActiveChildrenWeight -= child->GetWeight(); - Y_ASSERT(ActiveChildrenCount > 0 || ActiveChildrenWeight == 0); - } -} - -void THierarhicalDRRQuoterResourceTree::ScheduleNextTick(TTickProcessorQueue& queue, TInstant now) { - Schedule(queue, NextTick(now, TickSize)); -} - +void THierarhicalDRRQuoterResourceTree::AddActiveChild(THierarchicalDRRResourceConsumer* child, TTickProcessorQueue& queue, TInstant now) { + UpdateActiveTime(now); + if (!HasActiveChildren()) { + CurrentActiveChild = child; + ActiveChildrenCount = 1; + + Active = true; + LWPROBE(ResourceActivate, + QuoterPath, + GetPath()); + + ScheduleNextTick(queue, now); + if (GetParent()) { + GetParent()->AddActiveChild(this, queue, now); + } + + // Update sum of active children weights + Y_ASSERT(ActiveChildrenWeight == 0); + ActiveChildrenWeight = child->GetWeight(); + + } else { + if (child->GetNext<THierarchicalDRRResourceConsumer>() == child && CurrentActiveChild != child) { // Not in list. + CurrentActiveChild->InsertBeforeInRoundRobinList(child); + ++ActiveChildrenCount; + + // Update sum of active children weights + ActiveChildrenWeight += child->GetWeight(); + } + } +} + +void THierarhicalDRRQuoterResourceTree::RemoveActiveChild(THierarchicalDRRResourceConsumer* child) { + if (HasActiveChildren()) { + if (child == CurrentActiveChild) { + CurrentActiveChild = CurrentActiveChild->GetNext<THierarchicalDRRResourceConsumer>(); + } + child->DeleteFromRoundRobinList(); + --ActiveChildrenCount; + if (child == CurrentActiveChild) { + CurrentActiveChild = nullptr; + Y_ASSERT(ActiveChildrenCount == 0); + } + + // Update sum of active children weights + Y_ASSERT(ActiveChildrenWeight >= child->GetWeight()); + ActiveChildrenWeight -= child->GetWeight(); + Y_ASSERT(ActiveChildrenCount > 0 || ActiveChildrenWeight == 0); + } +} + +void THierarhicalDRRQuoterResourceTree::ScheduleNextTick(TTickProcessorQueue& queue, TInstant now) { + Schedule(queue, NextTick(now, TickSize)); +} + THolder<TQuoterSession> THierarhicalDRRQuoterResourceTree::DoCreateSession(const NActors::TActorId& clientId) { - return MakeHolder<THierarhicalDRRQuoterSession>(clientId, this); -} - -void THierarhicalDRRQuoterResourceTree::SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) { - TQuoterResourceTree::SetResourceCounters(std::move(resourceCounters)); + return MakeHolder<THierarhicalDRRQuoterSession>(clientId, this); +} + +void THierarhicalDRRQuoterResourceTree::SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters) { + TQuoterResourceTree::SetResourceCounters(std::move(resourceCounters)); if (RateAccounting) { RateAccounting->SetResourceCounters(Counters.ResourceCounters); } - SetLimitCounter(); -} - -void THierarhicalDRRQuoterResourceTree::SetLimitCounter() { - const double speedLimit = GetProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(); - if (speedLimit) { - Counters.SetLimit(speedLimit); - } else { - Counters.SetLimit(Nothing()); - } -} - -bool TQuoterResources::Exists(ui64 resourceId) const { - return ResourcesById.find(resourceId) != ResourcesById.end(); -} - -TQuoterResourceTree* TQuoterResources::LoadResource(ui64 resourceId, ui64 parentId, const NKikimrKesus::TStreamingQuoterResource& props) { + SetLimitCounter(); +} + +void THierarhicalDRRQuoterResourceTree::SetLimitCounter() { + const double speedLimit = GetProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(); + if (speedLimit) { + Counters.SetLimit(speedLimit); + } else { + Counters.SetLimit(Nothing()); + } +} + +bool TQuoterResources::Exists(ui64 resourceId) const { + return ResourcesById.find(resourceId) != ResourcesById.end(); +} + +TQuoterResourceTree* TQuoterResources::LoadResource(ui64 resourceId, ui64 parentId, const NKikimrKesus::TStreamingQuoterResource& props) { auto resource = CreateResource(resourceId, parentId, Kesus, BillSink, props); - Y_VERIFY(!Exists(resource->GetResourceId()), - "Resource \"%s\" has duplicated id: %" PRIu64, resource->GetPath().c_str(), resourceId); - Y_VERIFY(!props.GetResourcePath().empty(), - "Resource %" PRIu64 " has empty path", resourceId); - TQuoterResourceTree* res = resource.Get(); - ResourcesByPath.emplace(props.GetResourcePath(), resource.Get()); - ResourcesById.emplace(resourceId, std::move(resource)); - SetResourceCounters(res); - res->SetQuoterPath(QuoterPath); - return res; -} - -TQuoterResourceTree* TQuoterResources::AddResource(ui64 resourceId, const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { - // validate - if (ResourcesById.find(resourceId) != ResourcesById.end()) { - errorMessage = TStringBuilder() << "Resource with id " << resourceId << " already exists."; - return nullptr; - } - - const TVector<TString> path = SplitPath(props.GetResourcePath()); - if (path.empty()) { - errorMessage = "Empty resource path is specified."; - return nullptr; - } - const TString& canonPath = CanonizeQuoterResourcePath(path); - - if (ResourcesByPath.find(canonPath) != ResourcesByPath.end()) { - errorMessage = TStringBuilder() << "Resource with path \"" << canonPath << "\" already exists."; - return nullptr; - } - - // find parent - TQuoterResourceTree* parent = nullptr; - if (path.size() > 1) { - const TVector<TString> parentPath(path.begin(), path.end() - 1); - TString canonParentPath = CanonizeQuoterResourcePath(parentPath); - parent = FindPathImpl(canonParentPath); - if (!parent) { - errorMessage = TStringBuilder() << "Parent resource \"" << canonParentPath << "\" doesn't exist."; - return nullptr; - } - } - - // create and finally validate props - NKikimrKesus::TStreamingQuoterResource resProps = props; - resProps.SetResourceId(resourceId); - resProps.SetResourcePath(canonPath); - const ui64 parentId = parent ? parent->GetResourceId() : 0; + Y_VERIFY(!Exists(resource->GetResourceId()), + "Resource \"%s\" has duplicated id: %" PRIu64, resource->GetPath().c_str(), resourceId); + Y_VERIFY(!props.GetResourcePath().empty(), + "Resource %" PRIu64 " has empty path", resourceId); + TQuoterResourceTree* res = resource.Get(); + ResourcesByPath.emplace(props.GetResourcePath(), resource.Get()); + ResourcesById.emplace(resourceId, std::move(resource)); + SetResourceCounters(res); + res->SetQuoterPath(QuoterPath); + return res; +} + +TQuoterResourceTree* TQuoterResources::AddResource(ui64 resourceId, const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage) { + // validate + if (ResourcesById.find(resourceId) != ResourcesById.end()) { + errorMessage = TStringBuilder() << "Resource with id " << resourceId << " already exists."; + return nullptr; + } + + const TVector<TString> path = SplitPath(props.GetResourcePath()); + if (path.empty()) { + errorMessage = "Empty resource path is specified."; + return nullptr; + } + const TString& canonPath = CanonizeQuoterResourcePath(path); + + if (ResourcesByPath.find(canonPath) != ResourcesByPath.end()) { + errorMessage = TStringBuilder() << "Resource with path \"" << canonPath << "\" already exists."; + return nullptr; + } + + // find parent + TQuoterResourceTree* parent = nullptr; + if (path.size() > 1) { + const TVector<TString> parentPath(path.begin(), path.end() - 1); + TString canonParentPath = CanonizeQuoterResourcePath(parentPath); + parent = FindPathImpl(canonParentPath); + if (!parent) { + errorMessage = TStringBuilder() << "Parent resource \"" << canonParentPath << "\" doesn't exist."; + return nullptr; + } + } + + // create and finally validate props + NKikimrKesus::TStreamingQuoterResource resProps = props; + resProps.SetResourceId(resourceId); + resProps.SetResourcePath(canonPath); + const ui64 parentId = parent ? parent->GetResourceId() : 0; THolder<TQuoterResourceTree> resource = CreateResource(resourceId, parentId, Kesus, BillSink, resProps); - if (!resource->ValidateProps(resProps, errorMessage)) { - return nullptr; - } - - // insert - TQuoterResourceTree* resourcePtr = resource.Get(); - if (parent) { - parent->AddChild(resourcePtr); - } - ResourcesByPath[canonPath] = resourcePtr; - ResourcesById[resourceId] = std::move(resource); - SetResourceCounters(resourcePtr); - resourcePtr->SetQuoterPath(QuoterPath); - resourcePtr->CalcParameters(); - - return resourcePtr; -} - -bool TQuoterResources::DeleteResource(TQuoterResourceTree* resource, TString& errorMessage) { - if (!resource->GetChildren().empty()) { - errorMessage = TStringBuilder() << "Resource \"" << resource->GetPath() << "\" has children."; - return false; - } - - if (resource->GetParent()) { - resource->GetParent()->RemoveChild(resource); - } - - const auto sessions = resource->GetSessions(); - TStringBuilder closeReason; - closeReason << "Resource \"" << resource->GetPath() << "\" was deleted."; + if (!resource->ValidateProps(resProps, errorMessage)) { + return nullptr; + } + + // insert + TQuoterResourceTree* resourcePtr = resource.Get(); + if (parent) { + parent->AddChild(resourcePtr); + } + ResourcesByPath[canonPath] = resourcePtr; + ResourcesById[resourceId] = std::move(resource); + SetResourceCounters(resourcePtr); + resourcePtr->SetQuoterPath(QuoterPath); + resourcePtr->CalcParameters(); + + return resourcePtr; +} + +bool TQuoterResources::DeleteResource(TQuoterResourceTree* resource, TString& errorMessage) { + if (!resource->GetChildren().empty()) { + errorMessage = TStringBuilder() << "Resource \"" << resource->GetPath() << "\" has children."; + return false; + } + + if (resource->GetParent()) { + resource->GetParent()->RemoveChild(resource); + } + + const auto sessions = resource->GetSessions(); + TStringBuilder closeReason; + closeReason << "Resource \"" << resource->GetPath() << "\" was deleted."; for (const NActors::TActorId& clientId : sessions) { - const auto sessionId = TQuoterSessionId{clientId, resource->GetResourceId()}; - const auto sessionIt = Sessions.find(sessionId); - Y_VERIFY(sessionIt != Sessions.end()); - TQuoterSession* session = sessionIt->second.Get(); - session->CloseSession(Ydb::StatusIds::NOT_FOUND, closeReason); + const auto sessionId = TQuoterSessionId{clientId, resource->GetResourceId()}; + const auto sessionIt = Sessions.find(sessionId); + Y_VERIFY(sessionIt != Sessions.end()); + TQuoterSession* session = sessionIt->second.Get(); + session->CloseSession(Ydb::StatusIds::NOT_FOUND, closeReason); const NActors::TActorId pipeServerId = session->SetPipeServerId({}); - SetPipeServerId(sessionId, pipeServerId, {}); // Erase pipeServerId from index. - Sessions.erase(sessionIt); - } - - const auto resByPathIt = ResourcesByPath.find(resource->GetPath()); - Y_VERIFY(resByPathIt != ResourcesByPath.end()); - Y_VERIFY(resByPathIt->second == resource); - ResourcesByPath.erase(resByPathIt); - - const auto resByIdIt = ResourcesById.find(resource->GetResourceId()); - Y_VERIFY(resByIdIt != ResourcesById.end()); - Y_VERIFY(resByIdIt->second.Get() == resource); - ResourcesById.erase(resByIdIt); - return true; -} - + SetPipeServerId(sessionId, pipeServerId, {}); // Erase pipeServerId from index. + Sessions.erase(sessionIt); + } + + const auto resByPathIt = ResourcesByPath.find(resource->GetPath()); + Y_VERIFY(resByPathIt != ResourcesByPath.end()); + Y_VERIFY(resByPathIt->second == resource); + ResourcesByPath.erase(resByPathIt); + + const auto resByIdIt = ResourcesById.find(resource->GetResourceId()); + Y_VERIFY(resByIdIt != ResourcesById.end()); + Y_VERIFY(resByIdIt->second.Get() == resource); + ResourcesById.erase(resByIdIt); + return true; +} + void TQuoterResources::SetupBilling(NActors::TActorId kesus, const IBillSink::TPtr& billSink) { Kesus = kesus; BillSink = billSink; } -void TQuoterResources::ConstructTrees() { - // connect with parents - std::vector<TQuoterResourceTree*> roots; - for (auto&& [id, resource] : ResourcesById) { - if (resource->GetParentId()) { - const auto parent = ResourcesById.find(resource->GetParentId()); - Y_VERIFY(parent != ResourcesById.end(), - "Parent %" PRIu64 " was not found for resource %" PRIu64 " (\"%s\")", - resource->GetParentId(), resource->GetResourceId(), resource->GetPath().c_str()); - parent->second->AddChild(resource.Get()); - } else { - roots.push_back(resource.Get()); - } - } - for (TQuoterResourceTree* root : roots) { - root->CalcParameters(); - } -} - -bool TQuoterResources::IsResourcePathValid(const TString& path) { - for (const char c : path) { - if (!ValidResourcePathSymbols[static_cast<unsigned char>(c)]) { - return false; - } - } - return true; -} - -TQuoterResourceTree* TQuoterResources::FindPath(const TString& resourcePath) { - return FindPathImpl(CanonizeQuoterResourcePath(resourcePath)); -} - -TQuoterResourceTree* TQuoterResources::FindId(ui64 resourceId) { - const auto res = ResourcesById.find(resourceId); - return res != ResourcesById.end() ? res->second.Get() : nullptr; -} - -TQuoterResourceTree* TQuoterResources::FindPathImpl(const TString& resourcePath) { - const auto res = ResourcesByPath.find(resourcePath); - return res != ResourcesByPath.end() ? res->second : nullptr; -} - -void TQuoterResources::ProcessTick(const TTickProcessorTask& task, TTickProcessorQueue& queue) { - TTickProcessor* processor = nullptr; - if (task.Processor.first) { // session - auto sessionIt = Sessions.find(task.Processor); - if (sessionIt != Sessions.end()) { - processor = sessionIt->second.Get(); - } - } else { // resource - processor = FindId(task.Processor.second); - } - if (processor) { - processor->Process(queue, task.Time); - } -} - +void TQuoterResources::ConstructTrees() { + // connect with parents + std::vector<TQuoterResourceTree*> roots; + for (auto&& [id, resource] : ResourcesById) { + if (resource->GetParentId()) { + const auto parent = ResourcesById.find(resource->GetParentId()); + Y_VERIFY(parent != ResourcesById.end(), + "Parent %" PRIu64 " was not found for resource %" PRIu64 " (\"%s\")", + resource->GetParentId(), resource->GetResourceId(), resource->GetPath().c_str()); + parent->second->AddChild(resource.Get()); + } else { + roots.push_back(resource.Get()); + } + } + for (TQuoterResourceTree* root : roots) { + root->CalcParameters(); + } +} + +bool TQuoterResources::IsResourcePathValid(const TString& path) { + for (const char c : path) { + if (!ValidResourcePathSymbols[static_cast<unsigned char>(c)]) { + return false; + } + } + return true; +} + +TQuoterResourceTree* TQuoterResources::FindPath(const TString& resourcePath) { + return FindPathImpl(CanonizeQuoterResourcePath(resourcePath)); +} + +TQuoterResourceTree* TQuoterResources::FindId(ui64 resourceId) { + const auto res = ResourcesById.find(resourceId); + return res != ResourcesById.end() ? res->second.Get() : nullptr; +} + +TQuoterResourceTree* TQuoterResources::FindPathImpl(const TString& resourcePath) { + const auto res = ResourcesByPath.find(resourcePath); + return res != ResourcesByPath.end() ? res->second : nullptr; +} + +void TQuoterResources::ProcessTick(const TTickProcessorTask& task, TTickProcessorQueue& queue) { + TTickProcessor* processor = nullptr; + if (task.Processor.first) { // session + auto sessionIt = Sessions.find(task.Processor); + if (sessionIt != Sessions.end()) { + processor = sessionIt->second.Get(); + } + } else { // resource + processor = FindId(task.Processor.second); + } + if (processor) { + processor->Process(queue, task.Time); + } +} + TQuoterSession* TQuoterResources::GetOrCreateSession(const NActors::TActorId& clientId, TQuoterResourceTree* resource) { - const ui64 resourceId = resource->GetResourceId(); - if (TQuoterSession* session = FindSession(clientId, resourceId)) { - return session; - } else { - const auto newSessionIt = Sessions.emplace(TQuoterSessionId{clientId, resourceId}, resource->CreateSession(clientId)).first; - return newSessionIt->second.Get(); - } -} - + const ui64 resourceId = resource->GetResourceId(); + if (TQuoterSession* session = FindSession(clientId, resourceId)) { + return session; + } else { + const auto newSessionIt = Sessions.emplace(TQuoterSessionId{clientId, resourceId}, resource->CreateSession(clientId)).first; + return newSessionIt->second.Get(); + } +} + TQuoterSession* TQuoterResources::FindSession(const NActors::TActorId& clientId, ui64 resourceId) { - const auto sessionIt = Sessions.find(TQuoterSessionId{clientId, resourceId}); - return sessionIt != Sessions.end() ? sessionIt->second.Get() : nullptr; -} - + const auto sessionIt = Sessions.find(TQuoterSessionId{clientId, resourceId}); + return sessionIt != Sessions.end() ? sessionIt->second.Get() : nullptr; +} + const TQuoterSession* TQuoterResources::FindSession(const NActors::TActorId& clientId, ui64 resourceId) const { - const auto sessionIt = Sessions.find(TQuoterSessionId{clientId, resourceId}); - return sessionIt != Sessions.end() ? sessionIt->second.Get() : nullptr; -} - -void TQuoterResources::OnUpdateResourceProps(TQuoterResourceTree* rootResource) { - const ui64 resId = rootResource->GetResourceId(); + const auto sessionIt = Sessions.find(TQuoterSessionId{clientId, resourceId}); + return sessionIt != Sessions.end() ? sessionIt->second.Get() : nullptr; +} + +void TQuoterResources::OnUpdateResourceProps(TQuoterResourceTree* rootResource) { + const ui64 resId = rootResource->GetResourceId(); for (const NActors::TActorId& sessionActor : rootResource->GetSessions()) { - TQuoterSession* session = FindSession(sessionActor, resId); - Y_VERIFY(session); - session->OnPropsChanged(); - } - for (TQuoterResourceTree* child : rootResource->GetChildren()) { - OnUpdateResourceProps(child); - } -} - -void TQuoterResources::EnableDetailedCountersMode(bool enable) { - Counters.DetailedCountersMode = enable; - - ReinitResourceCounters(); -} - -void TQuoterResources::SetResourceCounters(TQuoterResourceTree* res) { - res->SetResourceCounters( - Counters.QuoterCounters && (Counters.DetailedCountersMode || res->GetParentId() == 0) ? - Counters.QuoterCounters->GetSubgroup(RESOURCE_COUNTERS_LABEL, res->GetProps().GetResourcePath()) : - nullptr - ); -} - -void TQuoterResources::SetQuoterCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> quoterCounters) { - Counters.QuoterCounters = std::move(quoterCounters); - - ReinitResourceCounters(); -} - -void TQuoterResources::ReinitResourceCounters() { - if (Counters.QuoterCounters) { - Counters.ResourceSubscriptions = Counters.QuoterCounters->GetCounter(RESOURCE_SUBSCRIPTIONS_COUNTER_NAME, true); - Counters.UnknownResourceSubscriptions = Counters.QuoterCounters->GetCounter(UNKNOWN_RESOURCE_SUBSCRIPTIONS_COUNTER_NAME, true); - Counters.ResourceConsumptionStarts = Counters.QuoterCounters->GetCounter(RESOURCE_CONSUMPTION_STARTS_COUNTER_NAME, true); - Counters.ResourceConsumptionStops = Counters.QuoterCounters->GetCounter(RESOURCE_CONSUMPTION_STOPS_COUNTER_NAME, true); - Counters.ElapsedMicrosecOnResourceAllocation = Counters.QuoterCounters->GetCounter(ELAPSED_MICROSEC_ON_RESOURCE_ALLOCATION_COUNTER_NAME, true); - Counters.TickProcessorTasksProcessed = Counters.QuoterCounters->GetCounter(TICK_PROCESSOR_TASKS_PROCESSED_COUNTER_NAME, true); - } else { - Counters.ResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - Counters.UnknownResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - Counters.ResourceConsumptionStarts = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - Counters.ResourceConsumptionStops = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - Counters.ElapsedMicrosecOnResourceAllocation = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - Counters.TickProcessorTasksProcessed = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - } - - for (auto&& [id, res] : ResourcesById) { - SetResourceCounters(res.Get()); - } -} - -void TQuoterResources::FillCounters(NKikimrKesus::TEvGetQuoterResourceCountersResult& counters) { - for (auto&& [path, res] : ResourcesByPath) { - auto* resCounter = counters.AddResourceCounters(); - resCounter->SetResourcePath(path); - resCounter->SetAllocated(res->GetCounters().GetAllocated()); - } -} - + TQuoterSession* session = FindSession(sessionActor, resId); + Y_VERIFY(session); + session->OnPropsChanged(); + } + for (TQuoterResourceTree* child : rootResource->GetChildren()) { + OnUpdateResourceProps(child); + } +} + +void TQuoterResources::EnableDetailedCountersMode(bool enable) { + Counters.DetailedCountersMode = enable; + + ReinitResourceCounters(); +} + +void TQuoterResources::SetResourceCounters(TQuoterResourceTree* res) { + res->SetResourceCounters( + Counters.QuoterCounters && (Counters.DetailedCountersMode || res->GetParentId() == 0) ? + Counters.QuoterCounters->GetSubgroup(RESOURCE_COUNTERS_LABEL, res->GetProps().GetResourcePath()) : + nullptr + ); +} + +void TQuoterResources::SetQuoterCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> quoterCounters) { + Counters.QuoterCounters = std::move(quoterCounters); + + ReinitResourceCounters(); +} + +void TQuoterResources::ReinitResourceCounters() { + if (Counters.QuoterCounters) { + Counters.ResourceSubscriptions = Counters.QuoterCounters->GetCounter(RESOURCE_SUBSCRIPTIONS_COUNTER_NAME, true); + Counters.UnknownResourceSubscriptions = Counters.QuoterCounters->GetCounter(UNKNOWN_RESOURCE_SUBSCRIPTIONS_COUNTER_NAME, true); + Counters.ResourceConsumptionStarts = Counters.QuoterCounters->GetCounter(RESOURCE_CONSUMPTION_STARTS_COUNTER_NAME, true); + Counters.ResourceConsumptionStops = Counters.QuoterCounters->GetCounter(RESOURCE_CONSUMPTION_STOPS_COUNTER_NAME, true); + Counters.ElapsedMicrosecOnResourceAllocation = Counters.QuoterCounters->GetCounter(ELAPSED_MICROSEC_ON_RESOURCE_ALLOCATION_COUNTER_NAME, true); + Counters.TickProcessorTasksProcessed = Counters.QuoterCounters->GetCounter(TICK_PROCESSOR_TASKS_PROCESSED_COUNTER_NAME, true); + } else { + Counters.ResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + Counters.UnknownResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + Counters.ResourceConsumptionStarts = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + Counters.ResourceConsumptionStops = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + Counters.ElapsedMicrosecOnResourceAllocation = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + Counters.TickProcessorTasksProcessed = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + } + + for (auto&& [id, res] : ResourcesById) { + SetResourceCounters(res.Get()); + } +} + +void TQuoterResources::FillCounters(NKikimrKesus::TEvGetQuoterResourceCountersResult& counters) { + for (auto&& [path, res] : ResourcesByPath) { + auto* resCounter = counters.AddResourceCounters(); + resCounter->SetResourcePath(path); + resCounter->SetAllocated(res->GetCounters().GetAllocated()); + } +} + void TQuoterResources::SetPipeServerId(TQuoterSessionId sessionId, const NActors::TActorId& prevId, const NActors::TActorId& id) { - if (prevId) { - auto [prevIt, prevItEnd] = PipeServerIdToSession.equal_range(prevId); - for (; prevIt != prevItEnd; ++prevIt) { - if (prevIt->second.second == sessionId.second) { // compare resource id - PipeServerIdToSession.erase(prevIt); - break; - } - } - } - if (id) { - PipeServerIdToSession.emplace(id, sessionId); - } -} - + if (prevId) { + auto [prevIt, prevItEnd] = PipeServerIdToSession.equal_range(prevId); + for (; prevIt != prevItEnd; ++prevIt) { + if (prevIt->second.second == sessionId.second) { // compare resource id + PipeServerIdToSession.erase(prevIt); + break; + } + } + } + if (id) { + PipeServerIdToSession.emplace(id, sessionId); + } +} + void TQuoterResources::DisconnectSession(const NActors::TActorId& pipeServerId) { - auto [pipeToSessionItBegin, pipeToSessionItEnd] = PipeServerIdToSession.equal_range(pipeServerId); - for (auto pipeToSessionIt = pipeToSessionItBegin; pipeToSessionIt != pipeToSessionItEnd; ++pipeToSessionIt) { - const TQuoterSessionId sessionId = pipeToSessionIt->second; + auto [pipeToSessionItBegin, pipeToSessionItEnd] = PipeServerIdToSession.equal_range(pipeServerId); + for (auto pipeToSessionIt = pipeToSessionItBegin; pipeToSessionIt != pipeToSessionItEnd; ++pipeToSessionIt) { + const TQuoterSessionId sessionId = pipeToSessionIt->second; const NActors::TActorId sessionClientId = sessionId.first; - - { - const auto sessionIter = Sessions.find(sessionId); - Y_VERIFY(sessionIter != Sessions.end()); - TQuoterSession* session = sessionIter->second.Get(); - session->GetResource()->OnSessionDisconnected(sessionClientId); - session->CloseSession(Ydb::StatusIds::SESSION_EXPIRED, "Disconected."); - Sessions.erase(sessionIter); - } - } - PipeServerIdToSession.erase(pipeToSessionItBegin, pipeToSessionItEnd); -} - -void TQuoterResources::SetQuoterPath(const TString& quoterPath) { - QuoterPath = quoterPath; - for (auto&& [id, resource] : ResourcesById) { - resource->SetQuoterPath(QuoterPath); - } -} - -void TTickProcessorQueue::Push(const TTickProcessorTask& task) { - if (!Empty()) { - if (Sorted) { - if (task < Tasks.back()) { - Sorted = false; - } - } - if (!Sorted && task < Top()) { - TopIndex = Tasks.size(); - } - } - Tasks.push_back(task); -} - -void TTickProcessorQueue::Pop() { - ++FirstIndex; - ++TopIndex; - Y_ASSERT(FirstIndex <= Tasks.size()); -} - -const TTickProcessorTask& TTickProcessorQueue::Top() const { - return Tasks[TopIndex]; -} - -bool TTickProcessorQueue::Empty() const { - return FirstIndex == Tasks.size(); -} - -void TTickProcessorQueue::Merge(TTickProcessorQueue&& from) { - Y_ASSERT(from.FirstIndex == 0); - Sort(); - from.Sort(); - - if (Empty()) { - std::swap(Tasks, from.Tasks); - FirstIndex = 0; - TopIndex = 0; - return; - } else if (from.Empty()) { - return; - } - - if (Tasks.back() <= from.Tasks.front()) { - if (FirstIndex > 0) { - auto to = Tasks.begin(); - auto from = Tasks.begin() + FirstIndex; - const size_t count = Tasks.size() - FirstIndex; - if (2 * count < Tasks.size()) { - for (size_t i = 0; i < count; ++i, ++to, ++from) { - *to = std::move(*from); - } - FirstIndex = 0; - TopIndex = 0; - Tasks.resize(count); - } - } - Tasks.reserve(Tasks.size() + from.Tasks.size()); - Tasks.insert(Tasks.end(), std::make_move_iterator(from.Tasks.begin()), std::make_move_iterator(from.Tasks.end())); - return; - } - - std::vector<TTickProcessorTask> dest; - dest.reserve(Tasks.size() - FirstIndex + from.Tasks.size()); - auto current = Tasks.begin() + FirstIndex; - auto end = Tasks.end(); - auto fromCurrent = from.Tasks.begin(); - auto fromEnd = from.Tasks.end(); - std::merge(std::make_move_iterator(current), - std::make_move_iterator(end), - std::make_move_iterator(fromCurrent), - std::make_move_iterator(fromEnd), - std::back_inserter(dest)); - std::swap(Tasks, dest); - TopIndex = 0; - FirstIndex = 0; -} - -void TTickProcessorQueue::Sort() { - if (!Sorted) { - std::sort(Tasks.begin() + FirstIndex, Tasks.end()); - TopIndex = FirstIndex; - Sorted = true; - } -} - -} -} + + { + const auto sessionIter = Sessions.find(sessionId); + Y_VERIFY(sessionIter != Sessions.end()); + TQuoterSession* session = sessionIter->second.Get(); + session->GetResource()->OnSessionDisconnected(sessionClientId); + session->CloseSession(Ydb::StatusIds::SESSION_EXPIRED, "Disconected."); + Sessions.erase(sessionIter); + } + } + PipeServerIdToSession.erase(pipeToSessionItBegin, pipeToSessionItEnd); +} + +void TQuoterResources::SetQuoterPath(const TString& quoterPath) { + QuoterPath = quoterPath; + for (auto&& [id, resource] : ResourcesById) { + resource->SetQuoterPath(QuoterPath); + } +} + +void TTickProcessorQueue::Push(const TTickProcessorTask& task) { + if (!Empty()) { + if (Sorted) { + if (task < Tasks.back()) { + Sorted = false; + } + } + if (!Sorted && task < Top()) { + TopIndex = Tasks.size(); + } + } + Tasks.push_back(task); +} + +void TTickProcessorQueue::Pop() { + ++FirstIndex; + ++TopIndex; + Y_ASSERT(FirstIndex <= Tasks.size()); +} + +const TTickProcessorTask& TTickProcessorQueue::Top() const { + return Tasks[TopIndex]; +} + +bool TTickProcessorQueue::Empty() const { + return FirstIndex == Tasks.size(); +} + +void TTickProcessorQueue::Merge(TTickProcessorQueue&& from) { + Y_ASSERT(from.FirstIndex == 0); + Sort(); + from.Sort(); + + if (Empty()) { + std::swap(Tasks, from.Tasks); + FirstIndex = 0; + TopIndex = 0; + return; + } else if (from.Empty()) { + return; + } + + if (Tasks.back() <= from.Tasks.front()) { + if (FirstIndex > 0) { + auto to = Tasks.begin(); + auto from = Tasks.begin() + FirstIndex; + const size_t count = Tasks.size() - FirstIndex; + if (2 * count < Tasks.size()) { + for (size_t i = 0; i < count; ++i, ++to, ++from) { + *to = std::move(*from); + } + FirstIndex = 0; + TopIndex = 0; + Tasks.resize(count); + } + } + Tasks.reserve(Tasks.size() + from.Tasks.size()); + Tasks.insert(Tasks.end(), std::make_move_iterator(from.Tasks.begin()), std::make_move_iterator(from.Tasks.end())); + return; + } + + std::vector<TTickProcessorTask> dest; + dest.reserve(Tasks.size() - FirstIndex + from.Tasks.size()); + auto current = Tasks.begin() + FirstIndex; + auto end = Tasks.end(); + auto fromCurrent = from.Tasks.begin(); + auto fromEnd = from.Tasks.end(); + std::merge(std::make_move_iterator(current), + std::make_move_iterator(end), + std::make_move_iterator(fromCurrent), + std::make_move_iterator(fromEnd), + std::back_inserter(dest)); + std::swap(Tasks, dest); + TopIndex = 0; + FirstIndex = 0; +} + +void TTickProcessorQueue::Sort() { + if (!Sorted) { + std::sort(Tasks.begin() + FirstIndex, Tasks.end()); + TopIndex = FirstIndex; + Sorted = true; + } +} + +} +} diff --git a/ydb/core/kesus/tablet/quoter_resource_tree.h b/ydb/core/kesus/tablet/quoter_resource_tree.h index bc28e518c3f..a59a9070659 100644 --- a/ydb/core/kesus/tablet/quoter_resource_tree.h +++ b/ydb/core/kesus/tablet/quoter_resource_tree.h @@ -1,408 +1,408 @@ -#pragma once +#pragma once #include "rate_accounting.h" #include <ydb/core/protos/kesus.pb.h> - + #include <library/cpp/actors/core/actor.h> #include <library/cpp/monlib/dynamic_counters/counters.h> - -#include <util/datetime/base.h> -#include <util/generic/hash.h> -#include <util/generic/hash_set.h> -#include <util/generic/ptr.h> -#include <util/generic/string.h> -#include <util/generic/yexception.h> - -#include <queue> - -namespace NKikimr { - -namespace NKesus { - + +#include <util/datetime/base.h> +#include <util/generic/hash.h> +#include <util/generic/hash_set.h> +#include <util/generic/ptr.h> +#include <util/generic/string.h> +#include <util/generic/yexception.h> + +#include <queue> + +namespace NKikimr { + +namespace NKesus { + using TQuoterSessionId = std::pair<NActors::TActorId, ui64>; // client id, resource id using TTickProcessorId = std::pair<NActors::TActorId, ui64>; // == TQuoterSessionId for sessions. == ResourceId for resources (with empty actor id). -class TQuoterResourceTree; - -TString CanonizeQuoterResourcePath(const TVector<TString>& path); -TString CanonizeQuoterResourcePath(const TString& path); - -// Member of tick processor queue. -// Contains ids that are sufficient to find processor, -// and fields for proper ordering in priority queue (time and level). -struct TTickProcessorTask { - TTickProcessorId Processor; - size_t ProcessorLevel; - TInstant Time; - - bool operator<(const TTickProcessorTask& task) const { - return std::tie(Time, ProcessorLevel) < std::tie(task.Time, task.ProcessorLevel); - } - - bool operator<=(const TTickProcessorTask& task) const { - return !task.operator<(*this); - } -}; - -// Queue for scheduling ticks for resource filling. -// This queue has interface of priority queue, -// but it performs better than priority queue -// in our load profile. -// KIKIMR-7381 -class TTickProcessorQueue { -public: - TTickProcessorQueue() = default; - - void Push(const TTickProcessorTask& task); - void Pop(); - const TTickProcessorTask& Top() const; - bool Empty() const; - void Merge(TTickProcessorQueue&& from); - -private: - void Sort(); - -private: - std::vector<TTickProcessorTask> Tasks; - size_t TopIndex = 0; - bool Sorted = true; - size_t FirstIndex = 0; -}; - -// Parent interface for tick processor. -// Can be resource or sessions connected to resource. -class TTickProcessor { -public: - virtual ~TTickProcessor() = default; - - virtual size_t GetLevel() const = 0; // Level in the tree for proper ordering. - virtual TTickProcessorId GetTickProcessorId() const = 0; - - void Schedule(TTickProcessorQueue& queue, TInstant time) { - if (!Scheduled) { - Scheduled = true; - queue.Push({GetTickProcessorId(), GetLevel(), time}); - } - } - - void Process(TTickProcessorQueue& queue, TInstant now) { - Scheduled = false; - DoProcess(queue, now); - } - -private: - virtual void DoProcess(TTickProcessorQueue& queue, TInstant now) = 0; - -private: - bool Scheduled = false; -}; - -// Resource sink - encapsulates send method (to tablet pipe in fact). -// When session is reconnected, this object is changed to new one -// with different actor to send resource to. -class IResourceSink : public TThrRefBase { -public: - using TPtr = TIntrusivePtr<IResourceSink>; - - // Successful resource allocation notification. - virtual void Send(ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) = 0; - - // Notification about resource allocation error. For example, when resource was deleted during usage. - // This notification means, that Kesus will not process resource with given id. - // Other resources from this client will be continued processing as usual. - virtual void CloseSession(ui64 resourceId, Ydb::StatusIds::StatusCode status, const TString& reason) = 0; -}; - -// Common interface for session to resource. -class TQuoterSession : public TTickProcessor { -public: +class TQuoterResourceTree; + +TString CanonizeQuoterResourcePath(const TVector<TString>& path); +TString CanonizeQuoterResourcePath(const TString& path); + +// Member of tick processor queue. +// Contains ids that are sufficient to find processor, +// and fields for proper ordering in priority queue (time and level). +struct TTickProcessorTask { + TTickProcessorId Processor; + size_t ProcessorLevel; + TInstant Time; + + bool operator<(const TTickProcessorTask& task) const { + return std::tie(Time, ProcessorLevel) < std::tie(task.Time, task.ProcessorLevel); + } + + bool operator<=(const TTickProcessorTask& task) const { + return !task.operator<(*this); + } +}; + +// Queue for scheduling ticks for resource filling. +// This queue has interface of priority queue, +// but it performs better than priority queue +// in our load profile. +// KIKIMR-7381 +class TTickProcessorQueue { +public: + TTickProcessorQueue() = default; + + void Push(const TTickProcessorTask& task); + void Pop(); + const TTickProcessorTask& Top() const; + bool Empty() const; + void Merge(TTickProcessorQueue&& from); + +private: + void Sort(); + +private: + std::vector<TTickProcessorTask> Tasks; + size_t TopIndex = 0; + bool Sorted = true; + size_t FirstIndex = 0; +}; + +// Parent interface for tick processor. +// Can be resource or sessions connected to resource. +class TTickProcessor { +public: + virtual ~TTickProcessor() = default; + + virtual size_t GetLevel() const = 0; // Level in the tree for proper ordering. + virtual TTickProcessorId GetTickProcessorId() const = 0; + + void Schedule(TTickProcessorQueue& queue, TInstant time) { + if (!Scheduled) { + Scheduled = true; + queue.Push({GetTickProcessorId(), GetLevel(), time}); + } + } + + void Process(TTickProcessorQueue& queue, TInstant now) { + Scheduled = false; + DoProcess(queue, now); + } + +private: + virtual void DoProcess(TTickProcessorQueue& queue, TInstant now) = 0; + +private: + bool Scheduled = false; +}; + +// Resource sink - encapsulates send method (to tablet pipe in fact). +// When session is reconnected, this object is changed to new one +// with different actor to send resource to. +class IResourceSink : public TThrRefBase { +public: + using TPtr = TIntrusivePtr<IResourceSink>; + + // Successful resource allocation notification. + virtual void Send(ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) = 0; + + // Notification about resource allocation error. For example, when resource was deleted during usage. + // This notification means, that Kesus will not process resource with given id. + // Other resources from this client will be continued processing as usual. + virtual void CloseSession(ui64 resourceId, Ydb::StatusIds::StatusCode status, const TString& reason) = 0; +}; + +// Common interface for session to resource. +class TQuoterSession : public TTickProcessor { +public: TQuoterSession(const NActors::TActorId& clientId, TQuoterResourceTree* resource); - - virtual ~TQuoterSession() = default; - - TQuoterResourceTree* GetResource() { - return Resource; - } - - const TQuoterResourceTree* GetResource() const { - return Resource; - } - - // Client actor id (remote client: quoter proxy). + + virtual ~TQuoterSession() = default; + + TQuoterResourceTree* GetResource() { + return Resource; + } + + const TQuoterResourceTree* GetResource() const { + return Resource; + } + + // Client actor id (remote client: quoter proxy). const NActors::TActorId& GetClientId() const { - return ClientId; - } - - // Set new sink or change in case of reconnection. - void SetResourceSink(const IResourceSink::TPtr& sink) { - ResourceSink = sink; - } - - virtual void Send(double spent); - - // Reaction for quoter runtime events: TEvSubscribeOnResources and TEvUpdateConsumptionState. - virtual void UpdateConsumptionState(bool consume, double amount, TTickProcessorQueue& queue, TInstant now) = 0; - + return ClientId; + } + + // Set new sink or change in case of reconnection. + void SetResourceSink(const IResourceSink::TPtr& sink) { + ResourceSink = sink; + } + + virtual void Send(double spent); + + // Reaction for quoter runtime events: TEvSubscribeOnResources and TEvUpdateConsumptionState. + virtual void UpdateConsumptionState(bool consume, double amount, TTickProcessorQueue& queue, TInstant now) = 0; + // Reaction for quoter runtime event TEvAccountResources. virtual TInstant Account(TInstant start, TDuration interval, const double* values, size_t size, TTickProcessorQueue& queue, TInstant now) = 0; - // Close session when resource is deleted. - virtual void CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason); - - // Properties for viewer - virtual bool IsActive() const { - return Active; - } - - virtual double GetTotalConsumed() const { - return TotalConsumed; - } - - virtual double GetAmountRequested() const { - return AmountRequested; - } - - virtual void OnPropsChanged() { - NeedSendChangedProps = true; - Send(0); // Update props immediately // KIKIMR-8563 - } - + // Close session when resource is deleted. + virtual void CloseSession(Ydb::StatusIds::StatusCode status, const TString& reason); + + // Properties for viewer + virtual bool IsActive() const { + return Active; + } + + virtual double GetTotalConsumed() const { + return TotalConsumed; + } + + virtual double GetAmountRequested() const { + return AmountRequested; + } + + virtual void OnPropsChanged() { + NeedSendChangedProps = true; + Send(0); // Update props immediately // KIKIMR-8563 + } + NActors::TActorId SetPipeServerId(const NActors::TActorId& pipeServerId) { const NActors::TActorId prevId = PipeServerId; - PipeServerId = pipeServerId; - return prevId; - } - + PipeServerId = pipeServerId; + return prevId; + } + NActors::TActorId GetPipeServerId() const { - return PipeServerId; - } - -protected: - void AddAllocatedCounter(double spent); - -protected: - TQuoterResourceTree* Resource = nullptr; + return PipeServerId; + } + +protected: + void AddAllocatedCounter(double spent); + +protected: + TQuoterResourceTree* Resource = nullptr; NActors::TActorId ClientId; NActors::TActorId PipeServerId; - double AmountRequested = 0.0; - double TotalConsumed = 0.0; // Only for session statistics. Accuracy of this variable will degrade in time. - bool Active = false; - bool NeedSendChangedProps = false; - IResourceSink::TPtr ResourceSink; -}; - -// Common interface for hierarchical quoter resource. -class TQuoterResourceTree : public TTickProcessor { -public: + double AmountRequested = 0.0; + double TotalConsumed = 0.0; // Only for session statistics. Accuracy of this variable will degrade in time. + bool Active = false; + bool NeedSendChangedProps = false; + IResourceSink::TPtr ResourceSink; +}; + +// Common interface for hierarchical quoter resource. +class TQuoterResourceTree : public TTickProcessor { +public: TQuoterResourceTree(ui64 resourceId, ui64 parentId, NActors::TActorId kesus, const IBillSink::TPtr& billSink, const NKikimrKesus::TStreamingQuoterResource& props); - TQuoterResourceTree(TQuoterResourceTree&&) = delete; - TQuoterResourceTree(const TQuoterResourceTree&) = delete; - - virtual ~TQuoterResourceTree() = default; - - virtual void SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters); - - ui64 GetResourceId() const { - return ResourceId; - } - - ui64 GetParentId() const { - return ParentId; - } - - TQuoterResourceTree* GetParent() { - return Parent; - } - - const TQuoterResourceTree* GetParent() const { - return Parent; - } - - const NKikimrKesus::TStreamingQuoterResource& GetProps() const { - return Props; - } - - const NKikimrKesus::TStreamingQuoterResource& GetEffectiveProps() const { - return EffectiveProps; - } - - const TString& GetPath() const { - return GetProps().GetResourcePath(); - } - - const THashSet<TQuoterResourceTree*>& GetChildren() const { - return Children; - } - - // Static children manipulation. - virtual void AddChild(TQuoterResourceTree* child); - virtual void RemoveChild(TQuoterResourceTree* child); - - virtual bool Update(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage); - - virtual bool ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage); - - // Runtime algorithm entry points. - virtual void CalcParameters(); // Recursively calculates all parameters for runtime algorithm. - + TQuoterResourceTree(TQuoterResourceTree&&) = delete; + TQuoterResourceTree(const TQuoterResourceTree&) = delete; + + virtual ~TQuoterResourceTree() = default; + + virtual void SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters); + + ui64 GetResourceId() const { + return ResourceId; + } + + ui64 GetParentId() const { + return ParentId; + } + + TQuoterResourceTree* GetParent() { + return Parent; + } + + const TQuoterResourceTree* GetParent() const { + return Parent; + } + + const NKikimrKesus::TStreamingQuoterResource& GetProps() const { + return Props; + } + + const NKikimrKesus::TStreamingQuoterResource& GetEffectiveProps() const { + return EffectiveProps; + } + + const TString& GetPath() const { + return GetProps().GetResourcePath(); + } + + const THashSet<TQuoterResourceTree*>& GetChildren() const { + return Children; + } + + // Static children manipulation. + virtual void AddChild(TQuoterResourceTree* child); + virtual void RemoveChild(TQuoterResourceTree* child); + + virtual bool Update(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage); + + virtual bool ValidateProps(const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage); + + // Runtime algorithm entry points. + virtual void CalcParameters(); // Recursively calculates all parameters for runtime algorithm. + virtual THolder<TQuoterSession> DoCreateSession(const NActors::TActorId& clientId) = 0; - + THolder<TQuoterSession> CreateSession(const NActors::TActorId& clientId) { - THolder<TQuoterSession> session = DoCreateSession(clientId); - if (session) { - Sessions.insert(clientId); - if (Counters.Sessions) { - Counters.Sessions->Inc(); - } - } - return session; - } - + THolder<TQuoterSession> session = DoCreateSession(clientId); + if (session) { + Sessions.insert(clientId); + if (Counters.Sessions) { + Counters.Sessions->Inc(); + } + } + return session; + } + const THashSet<NActors::TActorId>& GetSessions() const { - return Sessions; - } - + return Sessions; + } + void OnSessionDisconnected(const NActors::TActorId& clientId) { - Sessions.erase(clientId); - } - - // TTickProcessor interface implementation. - size_t GetLevel() const override { - return ResourceLevel; - } - - TTickProcessorId GetTickProcessorId() const override { + Sessions.erase(clientId); + } + + // TTickProcessor interface implementation. + size_t GetLevel() const override { + return ResourceLevel; + } + + TTickProcessorId GetTickProcessorId() const override { return {NActors::TActorId(), ResourceId}; - } - - class TCounters { - public: - TIntrusivePtr<NMonitoring::TDynamicCounters> ResourceCounters; - NMonitoring::TDynamicCounters::TCounterPtr Sessions; - NMonitoring::TDynamicCounters::TCounterPtr ActiveSessions; - NMonitoring::TDynamicCounters::TCounterPtr Limit; // Current limit according to settings. If resource has no explicit limit, the counter is nullptr. - NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosecWhenResourceActive; - - void AddAllocated(double allocated); - ui64 GetAllocated() const { - return Allocated ? Allocated->Val() : 0; - } - void SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters); - void SetLimit(TMaybe<double> limit); - - private: - NMonitoring::TDynamicCounters::TCounterPtr Allocated; - double AllocatedRemainder = 0.0; - }; - - TCounters& GetCounters() { - return Counters; - } - - void UpdateActiveTime(TInstant now); - void StopActiveTime(TInstant now); - - void SetQuoterPath(const TString& quoterPath) { - QuoterPath = quoterPath; - } - - const TString& GetQuoterPath() const { - return QuoterPath; - } - -protected: - const ui64 ResourceId; - const ui64 ParentId; + } + + class TCounters { + public: + TIntrusivePtr<NMonitoring::TDynamicCounters> ResourceCounters; + NMonitoring::TDynamicCounters::TCounterPtr Sessions; + NMonitoring::TDynamicCounters::TCounterPtr ActiveSessions; + NMonitoring::TDynamicCounters::TCounterPtr Limit; // Current limit according to settings. If resource has no explicit limit, the counter is nullptr. + NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosecWhenResourceActive; + + void AddAllocated(double allocated); + ui64 GetAllocated() const { + return Allocated ? Allocated->Val() : 0; + } + void SetResourceCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> resourceCounters); + void SetLimit(TMaybe<double> limit); + + private: + NMonitoring::TDynamicCounters::TCounterPtr Allocated; + double AllocatedRemainder = 0.0; + }; + + TCounters& GetCounters() { + return Counters; + } + + void UpdateActiveTime(TInstant now); + void StopActiveTime(TInstant now); + + void SetQuoterPath(const TString& quoterPath) { + QuoterPath = quoterPath; + } + + const TString& GetQuoterPath() const { + return QuoterPath; + } + +protected: + const ui64 ResourceId; + const ui64 ParentId; NActors::TActorId Kesus; IBillSink::TPtr BillSink; - TString QuoterPath; - size_t ResourceLevel = 0; - TQuoterResourceTree* Parent = nullptr; - THashSet<TQuoterResourceTree*> Children; + TString QuoterPath; + size_t ResourceLevel = 0; + TQuoterResourceTree* Parent = nullptr; + THashSet<TQuoterResourceTree*> Children; THashSet<NActors::TActorId> Sessions; - NKikimrKesus::TStreamingQuoterResource Props; - NKikimrKesus::TStreamingQuoterResource EffectiveProps; // Props with actual values taken from Props or from parent's Props or from defaults. - TCounters Counters; - TInstant StartActiveTime = TInstant::Zero(); -}; - -// All Kesus tablet resources container. -class TQuoterResources { -public: - struct TCounters { - TIntrusivePtr<NMonitoring::TDynamicCounters> QuoterCounters; - bool DetailedCountersMode = false; - - NMonitoring::TDynamicCounters::TCounterPtr ResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - NMonitoring::TDynamicCounters::TCounterPtr UnknownResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - NMonitoring::TDynamicCounters::TCounterPtr ResourceConsumptionStarts = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - NMonitoring::TDynamicCounters::TCounterPtr ResourceConsumptionStops = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosecOnResourceAllocation = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - NMonitoring::TDynamicCounters::TCounterPtr TickProcessorTasksProcessed = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - }; - -public: - TQuoterResources() = default; - - TQuoterResourceTree* FindPath(const TString& resourcePath); - TQuoterResourceTree* FindId(ui64 resourceId); - const THashMap<TString, TQuoterResourceTree*>& GetAllResources() const { - return ResourcesByPath; - } - bool Exists(ui64 resourceId) const; - TQuoterResourceTree* LoadResource(ui64 resourceId, ui64 parentId, const NKikimrKesus::TStreamingQuoterResource& props); // initialization - TQuoterResourceTree* AddResource(ui64 resourceId, const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage); - bool DeleteResource(TQuoterResourceTree* resource, TString& errorMessage); - - void SetQuoterCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> quoterCounters); - void EnableDetailedCountersMode(bool enable = true); - void FillCounters(NKikimrKesus::TEvGetQuoterResourceCountersResult& counters); - + NKikimrKesus::TStreamingQuoterResource Props; + NKikimrKesus::TStreamingQuoterResource EffectiveProps; // Props with actual values taken from Props or from parent's Props or from defaults. + TCounters Counters; + TInstant StartActiveTime = TInstant::Zero(); +}; + +// All Kesus tablet resources container. +class TQuoterResources { +public: + struct TCounters { + TIntrusivePtr<NMonitoring::TDynamicCounters> QuoterCounters; + bool DetailedCountersMode = false; + + NMonitoring::TDynamicCounters::TCounterPtr ResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + NMonitoring::TDynamicCounters::TCounterPtr UnknownResourceSubscriptions = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + NMonitoring::TDynamicCounters::TCounterPtr ResourceConsumptionStarts = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + NMonitoring::TDynamicCounters::TCounterPtr ResourceConsumptionStops = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosecOnResourceAllocation = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + NMonitoring::TDynamicCounters::TCounterPtr TickProcessorTasksProcessed = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + }; + +public: + TQuoterResources() = default; + + TQuoterResourceTree* FindPath(const TString& resourcePath); + TQuoterResourceTree* FindId(ui64 resourceId); + const THashMap<TString, TQuoterResourceTree*>& GetAllResources() const { + return ResourcesByPath; + } + bool Exists(ui64 resourceId) const; + TQuoterResourceTree* LoadResource(ui64 resourceId, ui64 parentId, const NKikimrKesus::TStreamingQuoterResource& props); // initialization + TQuoterResourceTree* AddResource(ui64 resourceId, const NKikimrKesus::TStreamingQuoterResource& props, TString& errorMessage); + bool DeleteResource(TQuoterResourceTree* resource, TString& errorMessage); + + void SetQuoterCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> quoterCounters); + void EnableDetailedCountersMode(bool enable = true); + void FillCounters(NKikimrKesus::TEvGetQuoterResourceCountersResult& counters); + void SetupBilling(NActors::TActorId kesus, const IBillSink::TPtr& billSink); - void ConstructTrees(); // Constructs all trees during initialization. - - size_t GetResourcesCount() const { - return ResourcesById.size(); - } - - // Checks whether resource path consists of only valid characters. - static bool IsResourcePathValid(const TString& path); - - void ProcessTick(const TTickProcessorTask& task, TTickProcessorQueue& queue); - + void ConstructTrees(); // Constructs all trees during initialization. + + size_t GetResourcesCount() const { + return ResourcesById.size(); + } + + // Checks whether resource path consists of only valid characters. + static bool IsResourcePathValid(const TString& path); + + void ProcessTick(const TTickProcessorTask& task, TTickProcessorQueue& queue); + TQuoterSession* GetOrCreateSession(const NActors::TActorId& clientId, TQuoterResourceTree* resource); TQuoterSession* FindSession(const NActors::TActorId& clientId, ui64 resourceId); const TQuoterSession* FindSession(const NActors::TActorId& clientId, ui64 resourceId) const; void DisconnectSession(const NActors::TActorId& pipeServerId); void SetPipeServerId(TQuoterSessionId sessionId, const NActors::TActorId& prevId, const NActors::TActorId& id); - - void OnUpdateResourceProps(TQuoterResourceTree* rootResource); - - const TCounters& GetCounters() { - return Counters; - } - - void SetQuoterPath(const TString& quoterPath); - - -private: - TQuoterResourceTree* FindPathImpl(const TString& resourcePath); // doesn't canonize path - - void SetResourceCounters(TQuoterResourceTree* res); - void ReinitResourceCounters(); - -private: - TString QuoterPath; + + void OnUpdateResourceProps(TQuoterResourceTree* rootResource); + + const TCounters& GetCounters() { + return Counters; + } + + void SetQuoterPath(const TString& quoterPath); + + +private: + TQuoterResourceTree* FindPathImpl(const TString& resourcePath); // doesn't canonize path + + void SetResourceCounters(TQuoterResourceTree* res); + void ReinitResourceCounters(); + +private: + TString QuoterPath; NActors::TActorId Kesus; IBillSink::TPtr BillSink; - - THashMap<ui64, THolder<TQuoterResourceTree>> ResourcesById; - THashMap<TString, TQuoterResourceTree*> ResourcesByPath; - THashMap<TQuoterSessionId, THolder<TQuoterSession>> Sessions; + + THashMap<ui64, THolder<TQuoterResourceTree>> ResourcesById; + THashMap<TString, TQuoterResourceTree*> ResourcesByPath; + THashMap<TQuoterSessionId, THolder<TQuoterSession>> Sessions; THashMultiMap<NActors::TActorId, TQuoterSessionId> PipeServerIdToSession; - - TCounters Counters; -}; - -} -} + + TCounters Counters; +}; + +} +} diff --git a/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp b/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp index b280b4b29b8..679e2368043 100644 --- a/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp +++ b/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp @@ -1,643 +1,643 @@ -#include "quoter_resource_tree.h" - +#include "quoter_resource_tree.h" + #include <library/cpp/testing/gmock_in_unittest/gmock.h> #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr { -namespace NKesus { - -using namespace testing; - -class THDRRQuoterResourceTreeRuntimeTest: public TTestBase { -public: - UNIT_TEST_SUITE(THDRRQuoterResourceTreeRuntimeTest) - UNIT_TEST(TestCreateInactiveSession) - UNIT_TEST(TestAllocateResource) - UNIT_TEST(TestDeleteResourceSessions) - UNIT_TEST(TestUpdateResourceSessions) - UNIT_TEST(TestStopConsuming) - UNIT_TEST(TestUpdateConsumptionState) - UNIT_TEST(TestUpdateConsumptionStateAfterAllResourceAllocated) - UNIT_TEST(TestAllocationGranularity) - UNIT_TEST(TestDistributeResourcesBetweenConsumers) - UNIT_TEST(TestHierarchicalQuotas) - UNIT_TEST(TestHangDefence) - UNIT_TEST(TestMoreStrongChildLimit) - UNIT_TEST(TestAmountIsLessThanEpsilon) - UNIT_TEST(TestEffectiveProps) - UNIT_TEST(TestWeights) - UNIT_TEST(TestWeightsChange) - UNIT_TEST(TestVerySmallSpeed) - UNIT_TEST(TestVeryBigWeights) - UNIT_TEST(TestDeleteResourceWithActiveChildren) - UNIT_TEST(TestActiveSessionDisconnectsAndThenConnectsAgain) - UNIT_TEST(TestInactiveSessionDisconnectsAndThenConnectsAgain) - UNIT_TEST(TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain) - UNIT_TEST(TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain) - UNIT_TEST_SUITE_END(); - - void SetUp() override { - Resources = MakeHolder<TQuoterResources>(); - Queue = MakeHolder<TTickProcessorQueue>(); - NextResourceId = 1; - NextActorId = 1; - Time = TInstant::Now(); - } - - void TearDown() override { - Resources = nullptr; - Queue = nullptr; - } - - // Helpers - struct TTestResourceSink : public IResourceSink { - TTestResourceSink(ui64 resourceId = 0) - : ResourceId(resourceId) - { - } - - void Send(ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) override { - UNIT_ASSERT(!SessionClosed); - if (ResourceId) { - UNIT_ASSERT_VALUES_EQUAL(ResourceId, resourceId); - } - UNIT_ASSERT_GE_C(amount, 0.0, "Actual amount: " << amount); - if (!amount) { - UNIT_ASSERT(props != nullptr); - } - SumAmount += amount; - OnSend(resourceId, amount, props); - }; - - void CloseSession(ui64 resourceId, Ydb::StatusIds::StatusCode status, const TString& reason) override { - UNIT_ASSERT(!IsIn(SessionClosed, resourceId)); - if (ResourceId) { - UNIT_ASSERT_VALUES_EQUAL(ResourceId, resourceId); - } - UNIT_ASSERT_UNEQUAL(status, Ydb::StatusIds::SUCCESS); - UNIT_ASSERT(!reason.empty()); - SessionClosed.insert(resourceId); - if (status == Ydb::StatusIds::NOT_FOUND) { - OnNotFound(resourceId); - } else if (status == Ydb::StatusIds::SESSION_EXPIRED) { - OnSessionExpired(resourceId); - } else { - UNIT_ASSERT_C(false, "Unexpected status code: " << status); - } - } - + +namespace NKikimr { +namespace NKesus { + +using namespace testing; + +class THDRRQuoterResourceTreeRuntimeTest: public TTestBase { +public: + UNIT_TEST_SUITE(THDRRQuoterResourceTreeRuntimeTest) + UNIT_TEST(TestCreateInactiveSession) + UNIT_TEST(TestAllocateResource) + UNIT_TEST(TestDeleteResourceSessions) + UNIT_TEST(TestUpdateResourceSessions) + UNIT_TEST(TestStopConsuming) + UNIT_TEST(TestUpdateConsumptionState) + UNIT_TEST(TestUpdateConsumptionStateAfterAllResourceAllocated) + UNIT_TEST(TestAllocationGranularity) + UNIT_TEST(TestDistributeResourcesBetweenConsumers) + UNIT_TEST(TestHierarchicalQuotas) + UNIT_TEST(TestHangDefence) + UNIT_TEST(TestMoreStrongChildLimit) + UNIT_TEST(TestAmountIsLessThanEpsilon) + UNIT_TEST(TestEffectiveProps) + UNIT_TEST(TestWeights) + UNIT_TEST(TestWeightsChange) + UNIT_TEST(TestVerySmallSpeed) + UNIT_TEST(TestVeryBigWeights) + UNIT_TEST(TestDeleteResourceWithActiveChildren) + UNIT_TEST(TestActiveSessionDisconnectsAndThenConnectsAgain) + UNIT_TEST(TestInactiveSessionDisconnectsAndThenConnectsAgain) + UNIT_TEST(TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain) + UNIT_TEST(TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain) + UNIT_TEST_SUITE_END(); + + void SetUp() override { + Resources = MakeHolder<TQuoterResources>(); + Queue = MakeHolder<TTickProcessorQueue>(); + NextResourceId = 1; + NextActorId = 1; + Time = TInstant::Now(); + } + + void TearDown() override { + Resources = nullptr; + Queue = nullptr; + } + + // Helpers + struct TTestResourceSink : public IResourceSink { + TTestResourceSink(ui64 resourceId = 0) + : ResourceId(resourceId) + { + } + + void Send(ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) override { + UNIT_ASSERT(!SessionClosed); + if (ResourceId) { + UNIT_ASSERT_VALUES_EQUAL(ResourceId, resourceId); + } + UNIT_ASSERT_GE_C(amount, 0.0, "Actual amount: " << amount); + if (!amount) { + UNIT_ASSERT(props != nullptr); + } + SumAmount += amount; + OnSend(resourceId, amount, props); + }; + + void CloseSession(ui64 resourceId, Ydb::StatusIds::StatusCode status, const TString& reason) override { + UNIT_ASSERT(!IsIn(SessionClosed, resourceId)); + if (ResourceId) { + UNIT_ASSERT_VALUES_EQUAL(ResourceId, resourceId); + } + UNIT_ASSERT_UNEQUAL(status, Ydb::StatusIds::SUCCESS); + UNIT_ASSERT(!reason.empty()); + SessionClosed.insert(resourceId); + if (status == Ydb::StatusIds::NOT_FOUND) { + OnNotFound(resourceId); + } else if (status == Ydb::StatusIds::SESSION_EXPIRED) { + OnSessionExpired(resourceId); + } else { + UNIT_ASSERT_C(false, "Unexpected status code: " << status); + } + } + MOCK_METHOD(void, OnSend, (ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props), ()); MOCK_METHOD(void, OnNotFound, (ui64 resourceId), ()); MOCK_METHOD(void, OnSessionExpired, (ui64 resourceId), ()); - - const ui64 ResourceId; - THashSet<ui64> SessionClosed; - double SumAmount = 0.0; - }; - - struct TTestSession { - TTestSession() = default; - - TTestSession(TQuoterSession* session, TIntrusivePtr<TTestResourceSink> sink) - : Session(session) - , Sink(std::move(sink)) - { - } - - TQuoterSession* Session; - TIntrusivePtr<TTestResourceSink> Sink; - }; - - TInstant ProcessOneTick() { - UNIT_ASSERT(!Queue->Empty()); - const TInstant time = Queue->Top().Time; - TInstant nextTime; - TTickProcessorQueue queue; - do { - const TTickProcessorTask task = Queue->Top(); - Queue->Pop(); - Resources->ProcessTick(task, queue); - nextTime = !Queue->Empty() ? Queue->Top().Time : TInstant::Max(); - } while (nextTime == time); - Queue->Merge(std::move(queue)); - Time = time; - return time; - } - - TInstant ProcessTicks(size_t count) { - TInstant time; - while (count--) { - time = ProcessOneTick(); - } - return time; - } - - size_t ProcessAllTicks() { - size_t ticksPassed = 0; - while (!Queue->Empty()) { - ProcessOneTick(); - ++ticksPassed; - } - return ticksPassed; - } - - void AssertQueueEmpty() { - UNIT_ASSERT(Queue->Empty()); - } - - void AssertQueueNotEmpty() { - UNIT_ASSERT(!Queue->Empty()); - } - - TQuoterResourceTree* AddResource(const TString& path, double maxUnitsPerSecond) { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(maxUnitsPerSecond); - return AddResource(path, cfg); - } - - TQuoterResourceTree* AddResource(const TString& path, const NKikimrKesus::THierarchicalDRRResourceConfig& config = {}) { - NKikimrKesus::TStreamingQuoterResource cfg; - cfg.SetResourcePath(path); - *cfg.MutableHierarhicalDRRResourceConfig() = config; - TString errorMessage; - TQuoterResourceTree* res = Resources->AddResource(NextResourceId++, cfg, errorMessage); - UNIT_ASSERT_C(res, "Failed to add resource [" << path << "]: " << errorMessage); - return res; - } - + + const ui64 ResourceId; + THashSet<ui64> SessionClosed; + double SumAmount = 0.0; + }; + + struct TTestSession { + TTestSession() = default; + + TTestSession(TQuoterSession* session, TIntrusivePtr<TTestResourceSink> sink) + : Session(session) + , Sink(std::move(sink)) + { + } + + TQuoterSession* Session; + TIntrusivePtr<TTestResourceSink> Sink; + }; + + TInstant ProcessOneTick() { + UNIT_ASSERT(!Queue->Empty()); + const TInstant time = Queue->Top().Time; + TInstant nextTime; + TTickProcessorQueue queue; + do { + const TTickProcessorTask task = Queue->Top(); + Queue->Pop(); + Resources->ProcessTick(task, queue); + nextTime = !Queue->Empty() ? Queue->Top().Time : TInstant::Max(); + } while (nextTime == time); + Queue->Merge(std::move(queue)); + Time = time; + return time; + } + + TInstant ProcessTicks(size_t count) { + TInstant time; + while (count--) { + time = ProcessOneTick(); + } + return time; + } + + size_t ProcessAllTicks() { + size_t ticksPassed = 0; + while (!Queue->Empty()) { + ProcessOneTick(); + ++ticksPassed; + } + return ticksPassed; + } + + void AssertQueueEmpty() { + UNIT_ASSERT(Queue->Empty()); + } + + void AssertQueueNotEmpty() { + UNIT_ASSERT(!Queue->Empty()); + } + + TQuoterResourceTree* AddResource(const TString& path, double maxUnitsPerSecond) { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(maxUnitsPerSecond); + return AddResource(path, cfg); + } + + TQuoterResourceTree* AddResource(const TString& path, const NKikimrKesus::THierarchicalDRRResourceConfig& config = {}) { + NKikimrKesus::TStreamingQuoterResource cfg; + cfg.SetResourcePath(path); + *cfg.MutableHierarhicalDRRResourceConfig() = config; + TString errorMessage; + TQuoterResourceTree* res = Resources->AddResource(NextResourceId++, cfg, errorMessage); + UNIT_ASSERT_C(res, "Failed to add resource [" << path << "]: " << errorMessage); + return res; + } + std::vector<TTestSession> CreateSession(std::vector<TQuoterResourceTree*> resources, bool consume, double amount, TIntrusivePtr<TTestResourceSink> sink = nullptr, NActors::TActorId clientId = {}, NActors::TActorId pipeServerId = {}) { - UNIT_ASSERT(!resources.empty()); - - if (!sink) { - sink = new TTestResourceSink(resources.size() == 1 ? resources[0]->GetResourceId() : 0); - } - const bool newClientId = !clientId; - if (!clientId) { - clientId = NewActorID(); - } - if (!pipeServerId) { - pipeServerId = NewActorID(); - } - TTickProcessorQueue queue; - std::vector<TTestSession> result; - result.reserve(resources.size()); - for (TQuoterResourceTree* resource : resources) { - TQuoterSession* session = Resources->GetOrCreateSession(clientId, resource); - UNIT_ASSERT(session); - session->SetResourceSink(sink); + UNIT_ASSERT(!resources.empty()); + + if (!sink) { + sink = new TTestResourceSink(resources.size() == 1 ? resources[0]->GetResourceId() : 0); + } + const bool newClientId = !clientId; + if (!clientId) { + clientId = NewActorID(); + } + if (!pipeServerId) { + pipeServerId = NewActorID(); + } + TTickProcessorQueue queue; + std::vector<TTestSession> result; + result.reserve(resources.size()); + for (TQuoterResourceTree* resource : resources) { + TQuoterSession* session = Resources->GetOrCreateSession(clientId, resource); + UNIT_ASSERT(session); + session->SetResourceSink(sink); const NActors::TActorId prevPipeServerId = session->SetPipeServerId(pipeServerId); - UNIT_ASSERT(!newClientId || !prevPipeServerId); - Resources->SetPipeServerId(TQuoterSessionId(clientId, resource->GetResourceId()), prevPipeServerId, pipeServerId); - session->UpdateConsumptionState(consume, amount, queue, Time); - result.emplace_back(session, sink); - } - Queue->Merge(std::move(queue)); - UNIT_ASSERT_VALUES_EQUAL(resources.size(), result.size()); - return result; - } - + UNIT_ASSERT(!newClientId || !prevPipeServerId); + Resources->SetPipeServerId(TQuoterSessionId(clientId, resource->GetResourceId()), prevPipeServerId, pipeServerId); + session->UpdateConsumptionState(consume, amount, queue, Time); + result.emplace_back(session, sink); + } + Queue->Merge(std::move(queue)); + UNIT_ASSERT_VALUES_EQUAL(resources.size(), result.size()); + return result; + } + TTestSession CreateSession(TQuoterResourceTree* resource, bool consume, double amount, TIntrusivePtr<TTestResourceSink> sink = nullptr, NActors::TActorId clientId = {}, NActors::TActorId pipeServerId = {}) { - return CreateSession(std::vector<TQuoterResourceTree*>(1, resource), consume, amount, sink, clientId, pipeServerId)[0]; - } - - void DisconnectSession(TQuoterSession* session) { - Resources->DisconnectSession(session->GetPipeServerId()); - } - + return CreateSession(std::vector<TQuoterResourceTree*>(1, resource), consume, amount, sink, clientId, pipeServerId)[0]; + } + + void DisconnectSession(TQuoterSession* session) { + Resources->DisconnectSession(session->GetPipeServerId()); + } + NActors::TActorId NewActorID() { - const ui64 x1 = NextActorId++; - const ui64 x2 = NextActorId++; + const ui64 x1 = NextActorId++; + const ui64 x2 = NextActorId++; return NActors::TActorId(x1, x2); - } - - // Tests - void TestCreateInactiveSession() { - auto* res = AddResource("/Root", 100); // with small burst - auto session = CreateSession(res, false, 0); - AssertQueueEmpty(); - } - - void TestAllocateResource() { - auto* res = AddResource("/Root", 10); - auto session = CreateSession(res, true, 10); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleEq(1), nullptr)) - .Times(10); - UNIT_ASSERT_VALUES_EQUAL(ProcessAllTicks(), 11); - UNIT_ASSERT_DOUBLES_EQUAL(session.Sink->SumAmount, 10, 0.01); - } - - void TestDeleteResourceSessions() { - auto* res = AddResource("/Root", 10); - auto inactiveSession = CreateSession(res, false, 0); - auto activeSession = CreateSession(res, true, 10); - EXPECT_CALL(*inactiveSession.Sink, OnNotFound(_)); - EXPECT_CALL(*activeSession.Sink, OnNotFound(_)); - EXPECT_CALL(*activeSession.Sink, OnSend(_, DoubleNear(1, 0.01), nullptr)) - .Times(5); - ProcessTicks(5); - AssertQueueNotEmpty(); - TString msg; - UNIT_ASSERT(Resources->DeleteResource(res, msg)); - ProcessOneTick(); - AssertQueueEmpty(); - } - - void TestUpdateResourceSessions() { - auto* root = AddResource("/Root", 100); - auto* res = AddResource("/Root/Res"); - auto session = CreateSession(res, true, 60); - auto& oldSettingsAllocation = - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) - .Times(2); - auto& updateSend = - EXPECT_CALL(*session.Sink, OnSend(_, 0.0, _)) - .After(oldSettingsAllocation) - .WillOnce(Invoke([](ui64, double, const NKikimrKesus::TStreamingQuoterResource* props) { - UNIT_ASSERT(props != nullptr); - UNIT_ASSERT_DOUBLES_EQUAL(props->GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 400, 0.001); - })); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(40, 0.01), nullptr)) - .After(updateSend); - - ProcessTicks(2); - NKikimrKesus::TStreamingQuoterResource newProps; - newProps.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(400); - TString msg; - UNIT_ASSERT(root->Update(newProps, msg)); - Resources->OnUpdateResourceProps(root); - ProcessAllTicks(); - } - - void TestStopConsuming() { - auto* res = AddResource("/Root", 100); - auto session = CreateSession(res, true, 100); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) - .Times(5); - const TInstant lastProcessed = ProcessTicks(5); - TTickProcessorQueue queue; - session.Session->UpdateConsumptionState(false, 0, queue, lastProcessed); - Queue->Merge(std::move(queue)); - UNIT_ASSERT_VALUES_EQUAL(ProcessAllTicks(), 1); - } - - void TestUpdateConsumptionState() { - auto* res = AddResource("/Root", 100); - auto session = CreateSession(res, true, 100); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) - .Times(5); - const TInstant lastProcessed = ProcessTicks(5); - session.Sink = new TTestResourceSink(res->GetResourceId()); - session.Session->SetResourceSink(session.Sink); - TTickProcessorQueue queue; - session.Session->UpdateConsumptionState(true, 20, queue, lastProcessed); - Queue->Merge(std::move(queue)); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) - .Times(2); - UNIT_ASSERT_VALUES_EQUAL(ProcessAllTicks(), 3); - } - - void TestUpdateConsumptionStateAfterAllResourceAllocated() { - auto* res = AddResource("/Root", 100); - auto session = CreateSession(res, true, 500); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) - .Times(50); - ProcessAllTicks(); - UNIT_ASSERT_DOUBLES_EQUAL(session.Sink->SumAmount, 500, 0.01); - - // Update state after all resource was allocated. - session.Sink = new TTestResourceSink(res->GetResourceId()); - session.Session->SetResourceSink(session.Sink); - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) - .Times(50); - - TTickProcessorQueue queue; - session.Session->UpdateConsumptionState(true, 500, queue, Time); // Burst should be sent in UpdateConsumptionState() function. - Queue->Merge(std::move(queue)); - - ProcessAllTicks(); - UNIT_ASSERT_DOUBLES_EQUAL(session.Sink->SumAmount, 500, 0.01); - } - - void TestAllocationGranularity() { - auto* res = AddResource("/Root", 10); - auto session1 = CreateSession(res, true, 0.4); - auto session2 = CreateSession(res, true, 0.4); - // 1 resource per second, but granularity is 0.1 per second, so both sessions will be satisfied. - EXPECT_CALL(*session1.Sink, OnSend(_, DoubleEq(0.4), nullptr)); - EXPECT_CALL(*session2.Sink, OnSend(_, DoubleEq(0.4), nullptr)); - ProcessOneTick(); - } - - - void TestDistributeResourcesBetweenConsumers() { - auto* res = AddResource("/Root", 10); - TTestSession sessions[] = { - CreateSession(res, true, 2), - CreateSession(res, true, 2), - CreateSession(res, true, 2), - CreateSession(res, true, 2), - }; - for (auto& session : sessions) { - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(0.25, 0.01), nullptr)) - .Times(8); - } - - auto AssertSessionsAreEquallyFilled = [&]() { - for (size_t i = 1; i < Y_ARRAY_SIZE(sessions); ++i) { - UNIT_ASSERT_DOUBLES_EQUAL(sessions[i - 1].Sink->SumAmount, sessions[i].Sink->SumAmount, 0.01); - } - }; - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - ProcessOneTick(); - AssertSessionsAreEquallyFilled(); - } - - void TestHierarchicalQuotas() { - auto* root = AddResource("/Root", 10); - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(7); - auto* res1 = AddResource("/Root/Res1", cfg); - cfg.SetMaxUnitsPerSecond(1); - auto* res2 = AddResource("/Root/Res2", cfg); - - auto rootSession = CreateSession(root, true, 10); - auto res1Session = CreateSession(res1, true, 10); - auto res2Session = CreateSession(res2, true, 10); - - EXPECT_CALL(*rootSession.Sink, OnSend(_, Le(1.0), nullptr)) - .Times(AtLeast(1)); - EXPECT_CALL(*res1Session.Sink, OnSend(_, Le(0.7), nullptr)) - .Times(AtLeast(1)); - EXPECT_CALL(*res2Session.Sink, OnSend(_, Le(0.1), nullptr)) - .Times(AtLeast(1)); - - ProcessOneTick(); - ProcessOneTick(); - } - - void TestHangDefence() { - auto* root = AddResource("/Root", 100); - // All these sessions will be filled in one tick. Next resource accumulation will spend zero amount of resource. - // But despite that algorithm should detect it and finish. - auto session1 = CreateSession(root, true, 0.1); - auto session2 = CreateSession(root, true, 0.1); - auto session3 = CreateSession(root, true, 0.1); - ProcessAllTicks(); - } - - void TestMoreStrongChildLimit() { - AddResource("/Root", 100); - auto* res = AddResource("/Root/Res", 3); // 0.3 resource in one tick - auto session = CreateSession(res, true, 3); - - // Parent resource tick amount is 1, but our resource tick is 0.3 - EXPECT_CALL(*session.Sink, OnSend(_, DoubleEq(0.3), nullptr)) - .Times(10); - ProcessAllTicks(); - } - - void TestAmountIsLessThanEpsilon() { - auto* root = AddResource("/Root", 100); - auto session = CreateSession(root, true, 0.000001); - - // Session must not hang even if client requested too small resource. - EXPECT_CALL(*session.Sink, OnSend(_, Ge(0.000001), nullptr)); // send epsilon - ProcessAllTicks(); - } - - void TestEffectiveProps() { - NKikimrKesus::THierarchicalDRRResourceConfig rootCfg; - rootCfg.SetMaxUnitsPerSecond(100); - AddResource("/Root", rootCfg); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(200); - auto* res = AddResource("/Root/Res", cfg); - - UNIT_ASSERT_DOUBLES_EQUAL(res->GetEffectiveProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 100, 0.001); // min - - auto* res2 = AddResource("/Root/Res2"); - UNIT_ASSERT_DOUBLES_EQUAL(res2->GetEffectiveProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 100, 0.001); // inherits - } - - void TestWeights() { - AddResource("/Root", 10); - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetWeight(7); - auto* lightRes = AddResource("/Root/LightRes", cfg); - cfg.SetWeight(42); - auto* heavyRes = AddResource("/Root/HeavyRes", cfg); - - auto lightResSession = CreateSession(lightRes, true, 10); - auto heavyResSession = CreateSession(heavyRes, true, 10); - - EXPECT_CALL(*lightResSession.Sink, OnSend(_, DoubleNear(1.0 * 7.0 / 49.0, 0.001), nullptr)) - .Times(AtLeast(1)); - EXPECT_CALL(*heavyResSession.Sink, OnSend(_, DoubleNear(1.0 * 42.0 / 49.0, 0.001), nullptr)) - .Times(AtLeast(1)); - - ProcessOneTick(); - ProcessOneTick(); - } - - void TestWeightsChange() { - AddResource("/Root", 10); - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetWeight(7); - auto* lightRes = AddResource("/Root/LightRes", cfg); - cfg.SetWeight(42); - auto* heavyRes = AddResource("/Root/HeavyRes", cfg); - - auto lightResSession = CreateSession(lightRes, true, 10); - auto heavyResSession = CreateSession(heavyRes, true, 10); - - // Before weights change - auto& firstLightResAllocation = EXPECT_CALL(*lightResSession.Sink, OnSend(_, DoubleNear(1.0 * 7.0 / 49.0, 0.001), nullptr)) - .Times(2); - auto& firstHeavyResAllocation = EXPECT_CALL(*heavyResSession.Sink, OnSend(_, DoubleNear(1.0 * 42.0 / 49.0, 0.001), nullptr)) - .Times(2); - - auto& heaveResPropsChange = EXPECT_CALL(*heavyResSession.Sink, OnSend(_, 0.0, _)) - .Times(1) - .After(firstHeavyResAllocation); - - // After weights change - EXPECT_CALL(*lightResSession.Sink, OnSend(_, DoubleNear(1.0 * 7.0 / 8.0, 0.001), nullptr)) - .Times(2) - .After(firstLightResAllocation); - EXPECT_CALL(*heavyResSession.Sink, OnSend(_, DoubleNear(1.0 / 8.0, 0.001), nullptr)) - .Times(2) - .After(heaveResPropsChange); - - ProcessOneTick(); - ProcessOneTick(); - - // Update one weight. - // It is expected that a new weight will be applied in the next tick. - TString msg; - NKikimrKesus::TStreamingQuoterResource newPropsWithoutWeight; - newPropsWithoutWeight.MutableHierarhicalDRRResourceConfig(); - UNIT_ASSERT_C(heavyRes->Update(newPropsWithoutWeight, msg), msg); - Resources->OnUpdateResourceProps(heavyRes); - - ProcessOneTick(); - ProcessOneTick(); - } - - void TestVerySmallSpeed() { - auto* res = AddResource("/Root", 0.000000000000000000000000000000000000000001); - - auto session = CreateSession(res, true, 10); - - EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(0.000000000000000000000000000000000000000001, 0.000000000000000000000000000000000000000001), nullptr)) - .Times(1); - - ProcessOneTick(); - } - - void TestVeryBigWeights() { - AddResource("/Root", 10); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetWeight(std::numeric_limits<ui32>::max() - 3); - auto* res1 = AddResource("/Root/Res1", cfg); - auto* res2 = AddResource("/Root/Res1/Res2", cfg); - - auto session11 = CreateSession(res1, true, 10); - auto session12 = CreateSession(res1, true, 10); - auto session13 = CreateSession(res1, true, 10); - auto session14 = CreateSession(res1, true, 10); - auto session15 = CreateSession(res1, true, 10); - - auto session21 = CreateSession(res2, true, 10); - auto session22 = CreateSession(res2, true, 10); - - EXPECT_CALL(*session11.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) - .Times(1); - EXPECT_CALL(*session12.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) - .Times(1); - EXPECT_CALL(*session13.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) - .Times(1); - EXPECT_CALL(*session14.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) - .Times(1); - EXPECT_CALL(*session15.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) - .Times(1); - EXPECT_CALL(*session21.Sink, OnSend(_, DoubleNear(0.5, 0.001), nullptr)) - .Times(1); - EXPECT_CALL(*session22.Sink, OnSend(_, DoubleNear(0.5, 0.001), nullptr)) - .Times(1); - - ProcessOneTick(); - } - - void TestDeleteResourceWithActiveChildren() { - AddResource("Root", 10); - auto* res1 = AddResource("Root/Res1"); - auto* res2 = AddResource("Root/Res1/Res2"); - - auto session1 = CreateSession(res1, true, std::numeric_limits<double>::infinity()); - auto session2 = CreateSession(res2, true, std::numeric_limits<double>::infinity()); - auto session3 = CreateSession(res2, true, std::numeric_limits<double>::infinity()); - - EXPECT_CALL(*session1.Sink, OnNotFound(_)); - EXPECT_CALL(*session2.Sink, OnNotFound(_)); - EXPECT_CALL(*session3.Sink, OnSessionExpired(_)); - - ProcessTicks(3); - - DisconnectSession(session3.Session); - - TString msg; - UNIT_ASSERT_C(Resources->DeleteResource(res2, msg), msg); - ProcessTicks(3); - - UNIT_ASSERT_C(Resources->DeleteResource(res1, msg), msg); - ProcessAllTicks(); - } - - void TestSessionDisconnectsAndThenConnectsAgainImpl(bool consumes, size_t resourcesCount = 1) { - UNIT_ASSERT(resourcesCount > 0); - - AddResource("Root", 100500); - + } + + // Tests + void TestCreateInactiveSession() { + auto* res = AddResource("/Root", 100); // with small burst + auto session = CreateSession(res, false, 0); + AssertQueueEmpty(); + } + + void TestAllocateResource() { + auto* res = AddResource("/Root", 10); + auto session = CreateSession(res, true, 10); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleEq(1), nullptr)) + .Times(10); + UNIT_ASSERT_VALUES_EQUAL(ProcessAllTicks(), 11); + UNIT_ASSERT_DOUBLES_EQUAL(session.Sink->SumAmount, 10, 0.01); + } + + void TestDeleteResourceSessions() { + auto* res = AddResource("/Root", 10); + auto inactiveSession = CreateSession(res, false, 0); + auto activeSession = CreateSession(res, true, 10); + EXPECT_CALL(*inactiveSession.Sink, OnNotFound(_)); + EXPECT_CALL(*activeSession.Sink, OnNotFound(_)); + EXPECT_CALL(*activeSession.Sink, OnSend(_, DoubleNear(1, 0.01), nullptr)) + .Times(5); + ProcessTicks(5); + AssertQueueNotEmpty(); + TString msg; + UNIT_ASSERT(Resources->DeleteResource(res, msg)); + ProcessOneTick(); + AssertQueueEmpty(); + } + + void TestUpdateResourceSessions() { + auto* root = AddResource("/Root", 100); + auto* res = AddResource("/Root/Res"); + auto session = CreateSession(res, true, 60); + auto& oldSettingsAllocation = + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) + .Times(2); + auto& updateSend = + EXPECT_CALL(*session.Sink, OnSend(_, 0.0, _)) + .After(oldSettingsAllocation) + .WillOnce(Invoke([](ui64, double, const NKikimrKesus::TStreamingQuoterResource* props) { + UNIT_ASSERT(props != nullptr); + UNIT_ASSERT_DOUBLES_EQUAL(props->GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 400, 0.001); + })); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(40, 0.01), nullptr)) + .After(updateSend); + + ProcessTicks(2); + NKikimrKesus::TStreamingQuoterResource newProps; + newProps.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(400); + TString msg; + UNIT_ASSERT(root->Update(newProps, msg)); + Resources->OnUpdateResourceProps(root); + ProcessAllTicks(); + } + + void TestStopConsuming() { + auto* res = AddResource("/Root", 100); + auto session = CreateSession(res, true, 100); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) + .Times(5); + const TInstant lastProcessed = ProcessTicks(5); + TTickProcessorQueue queue; + session.Session->UpdateConsumptionState(false, 0, queue, lastProcessed); + Queue->Merge(std::move(queue)); + UNIT_ASSERT_VALUES_EQUAL(ProcessAllTicks(), 1); + } + + void TestUpdateConsumptionState() { + auto* res = AddResource("/Root", 100); + auto session = CreateSession(res, true, 100); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) + .Times(5); + const TInstant lastProcessed = ProcessTicks(5); + session.Sink = new TTestResourceSink(res->GetResourceId()); + session.Session->SetResourceSink(session.Sink); + TTickProcessorQueue queue; + session.Session->UpdateConsumptionState(true, 20, queue, lastProcessed); + Queue->Merge(std::move(queue)); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) + .Times(2); + UNIT_ASSERT_VALUES_EQUAL(ProcessAllTicks(), 3); + } + + void TestUpdateConsumptionStateAfterAllResourceAllocated() { + auto* res = AddResource("/Root", 100); + auto session = CreateSession(res, true, 500); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) + .Times(50); + ProcessAllTicks(); + UNIT_ASSERT_DOUBLES_EQUAL(session.Sink->SumAmount, 500, 0.01); + + // Update state after all resource was allocated. + session.Sink = new TTestResourceSink(res->GetResourceId()); + session.Session->SetResourceSink(session.Sink); + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(10, 0.01), nullptr)) + .Times(50); + + TTickProcessorQueue queue; + session.Session->UpdateConsumptionState(true, 500, queue, Time); // Burst should be sent in UpdateConsumptionState() function. + Queue->Merge(std::move(queue)); + + ProcessAllTicks(); + UNIT_ASSERT_DOUBLES_EQUAL(session.Sink->SumAmount, 500, 0.01); + } + + void TestAllocationGranularity() { + auto* res = AddResource("/Root", 10); + auto session1 = CreateSession(res, true, 0.4); + auto session2 = CreateSession(res, true, 0.4); + // 1 resource per second, but granularity is 0.1 per second, so both sessions will be satisfied. + EXPECT_CALL(*session1.Sink, OnSend(_, DoubleEq(0.4), nullptr)); + EXPECT_CALL(*session2.Sink, OnSend(_, DoubleEq(0.4), nullptr)); + ProcessOneTick(); + } + + + void TestDistributeResourcesBetweenConsumers() { + auto* res = AddResource("/Root", 10); + TTestSession sessions[] = { + CreateSession(res, true, 2), + CreateSession(res, true, 2), + CreateSession(res, true, 2), + CreateSession(res, true, 2), + }; + for (auto& session : sessions) { + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(0.25, 0.01), nullptr)) + .Times(8); + } + + auto AssertSessionsAreEquallyFilled = [&]() { + for (size_t i = 1; i < Y_ARRAY_SIZE(sessions); ++i) { + UNIT_ASSERT_DOUBLES_EQUAL(sessions[i - 1].Sink->SumAmount, sessions[i].Sink->SumAmount, 0.01); + } + }; + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + ProcessOneTick(); + AssertSessionsAreEquallyFilled(); + } + + void TestHierarchicalQuotas() { + auto* root = AddResource("/Root", 10); + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(7); + auto* res1 = AddResource("/Root/Res1", cfg); + cfg.SetMaxUnitsPerSecond(1); + auto* res2 = AddResource("/Root/Res2", cfg); + + auto rootSession = CreateSession(root, true, 10); + auto res1Session = CreateSession(res1, true, 10); + auto res2Session = CreateSession(res2, true, 10); + + EXPECT_CALL(*rootSession.Sink, OnSend(_, Le(1.0), nullptr)) + .Times(AtLeast(1)); + EXPECT_CALL(*res1Session.Sink, OnSend(_, Le(0.7), nullptr)) + .Times(AtLeast(1)); + EXPECT_CALL(*res2Session.Sink, OnSend(_, Le(0.1), nullptr)) + .Times(AtLeast(1)); + + ProcessOneTick(); + ProcessOneTick(); + } + + void TestHangDefence() { + auto* root = AddResource("/Root", 100); + // All these sessions will be filled in one tick. Next resource accumulation will spend zero amount of resource. + // But despite that algorithm should detect it and finish. + auto session1 = CreateSession(root, true, 0.1); + auto session2 = CreateSession(root, true, 0.1); + auto session3 = CreateSession(root, true, 0.1); + ProcessAllTicks(); + } + + void TestMoreStrongChildLimit() { + AddResource("/Root", 100); + auto* res = AddResource("/Root/Res", 3); // 0.3 resource in one tick + auto session = CreateSession(res, true, 3); + + // Parent resource tick amount is 1, but our resource tick is 0.3 + EXPECT_CALL(*session.Sink, OnSend(_, DoubleEq(0.3), nullptr)) + .Times(10); + ProcessAllTicks(); + } + + void TestAmountIsLessThanEpsilon() { + auto* root = AddResource("/Root", 100); + auto session = CreateSession(root, true, 0.000001); + + // Session must not hang even if client requested too small resource. + EXPECT_CALL(*session.Sink, OnSend(_, Ge(0.000001), nullptr)); // send epsilon + ProcessAllTicks(); + } + + void TestEffectiveProps() { + NKikimrKesus::THierarchicalDRRResourceConfig rootCfg; + rootCfg.SetMaxUnitsPerSecond(100); + AddResource("/Root", rootCfg); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(200); + auto* res = AddResource("/Root/Res", cfg); + + UNIT_ASSERT_DOUBLES_EQUAL(res->GetEffectiveProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 100, 0.001); // min + + auto* res2 = AddResource("/Root/Res2"); + UNIT_ASSERT_DOUBLES_EQUAL(res2->GetEffectiveProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 100, 0.001); // inherits + } + + void TestWeights() { + AddResource("/Root", 10); + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetWeight(7); + auto* lightRes = AddResource("/Root/LightRes", cfg); + cfg.SetWeight(42); + auto* heavyRes = AddResource("/Root/HeavyRes", cfg); + + auto lightResSession = CreateSession(lightRes, true, 10); + auto heavyResSession = CreateSession(heavyRes, true, 10); + + EXPECT_CALL(*lightResSession.Sink, OnSend(_, DoubleNear(1.0 * 7.0 / 49.0, 0.001), nullptr)) + .Times(AtLeast(1)); + EXPECT_CALL(*heavyResSession.Sink, OnSend(_, DoubleNear(1.0 * 42.0 / 49.0, 0.001), nullptr)) + .Times(AtLeast(1)); + + ProcessOneTick(); + ProcessOneTick(); + } + + void TestWeightsChange() { + AddResource("/Root", 10); + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetWeight(7); + auto* lightRes = AddResource("/Root/LightRes", cfg); + cfg.SetWeight(42); + auto* heavyRes = AddResource("/Root/HeavyRes", cfg); + + auto lightResSession = CreateSession(lightRes, true, 10); + auto heavyResSession = CreateSession(heavyRes, true, 10); + + // Before weights change + auto& firstLightResAllocation = EXPECT_CALL(*lightResSession.Sink, OnSend(_, DoubleNear(1.0 * 7.0 / 49.0, 0.001), nullptr)) + .Times(2); + auto& firstHeavyResAllocation = EXPECT_CALL(*heavyResSession.Sink, OnSend(_, DoubleNear(1.0 * 42.0 / 49.0, 0.001), nullptr)) + .Times(2); + + auto& heaveResPropsChange = EXPECT_CALL(*heavyResSession.Sink, OnSend(_, 0.0, _)) + .Times(1) + .After(firstHeavyResAllocation); + + // After weights change + EXPECT_CALL(*lightResSession.Sink, OnSend(_, DoubleNear(1.0 * 7.0 / 8.0, 0.001), nullptr)) + .Times(2) + .After(firstLightResAllocation); + EXPECT_CALL(*heavyResSession.Sink, OnSend(_, DoubleNear(1.0 / 8.0, 0.001), nullptr)) + .Times(2) + .After(heaveResPropsChange); + + ProcessOneTick(); + ProcessOneTick(); + + // Update one weight. + // It is expected that a new weight will be applied in the next tick. + TString msg; + NKikimrKesus::TStreamingQuoterResource newPropsWithoutWeight; + newPropsWithoutWeight.MutableHierarhicalDRRResourceConfig(); + UNIT_ASSERT_C(heavyRes->Update(newPropsWithoutWeight, msg), msg); + Resources->OnUpdateResourceProps(heavyRes); + + ProcessOneTick(); + ProcessOneTick(); + } + + void TestVerySmallSpeed() { + auto* res = AddResource("/Root", 0.000000000000000000000000000000000000000001); + + auto session = CreateSession(res, true, 10); + + EXPECT_CALL(*session.Sink, OnSend(_, DoubleNear(0.000000000000000000000000000000000000000001, 0.000000000000000000000000000000000000000001), nullptr)) + .Times(1); + + ProcessOneTick(); + } + + void TestVeryBigWeights() { + AddResource("/Root", 10); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetWeight(std::numeric_limits<ui32>::max() - 3); + auto* res1 = AddResource("/Root/Res1", cfg); + auto* res2 = AddResource("/Root/Res1/Res2", cfg); + + auto session11 = CreateSession(res1, true, 10); + auto session12 = CreateSession(res1, true, 10); + auto session13 = CreateSession(res1, true, 10); + auto session14 = CreateSession(res1, true, 10); + auto session15 = CreateSession(res1, true, 10); + + auto session21 = CreateSession(res2, true, 10); + auto session22 = CreateSession(res2, true, 10); + + EXPECT_CALL(*session11.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) + .Times(1); + EXPECT_CALL(*session12.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) + .Times(1); + EXPECT_CALL(*session13.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) + .Times(1); + EXPECT_CALL(*session14.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) + .Times(1); + EXPECT_CALL(*session15.Sink, OnSend(_, DoubleNear(0.000001, 0.000001), nullptr)) + .Times(1); + EXPECT_CALL(*session21.Sink, OnSend(_, DoubleNear(0.5, 0.001), nullptr)) + .Times(1); + EXPECT_CALL(*session22.Sink, OnSend(_, DoubleNear(0.5, 0.001), nullptr)) + .Times(1); + + ProcessOneTick(); + } + + void TestDeleteResourceWithActiveChildren() { + AddResource("Root", 10); + auto* res1 = AddResource("Root/Res1"); + auto* res2 = AddResource("Root/Res1/Res2"); + + auto session1 = CreateSession(res1, true, std::numeric_limits<double>::infinity()); + auto session2 = CreateSession(res2, true, std::numeric_limits<double>::infinity()); + auto session3 = CreateSession(res2, true, std::numeric_limits<double>::infinity()); + + EXPECT_CALL(*session1.Sink, OnNotFound(_)); + EXPECT_CALL(*session2.Sink, OnNotFound(_)); + EXPECT_CALL(*session3.Sink, OnSessionExpired(_)); + + ProcessTicks(3); + + DisconnectSession(session3.Session); + + TString msg; + UNIT_ASSERT_C(Resources->DeleteResource(res2, msg), msg); + ProcessTicks(3); + + UNIT_ASSERT_C(Resources->DeleteResource(res1, msg), msg); + ProcessAllTicks(); + } + + void TestSessionDisconnectsAndThenConnectsAgainImpl(bool consumes, size_t resourcesCount = 1) { + UNIT_ASSERT(resourcesCount > 0); + + AddResource("Root", 100500); + const NActors::TActorId clientId = NewActorID(); const NActors::TActorId pipeServerId = NewActorID(); - std::vector<TQuoterResourceTree*> resources; - resources.reserve(resourcesCount); - for (size_t resourceIndex = 0; resourceIndex < resourcesCount; ++resourceIndex) { - resources.push_back(AddResource(TStringBuilder() << "Root/Res_" << resourceIndex, 10)); - } - - auto sessionsForResources = CreateSession(resources, consumes, std::numeric_limits<double>::infinity(), nullptr, clientId, pipeServerId); - - for (size_t resourceIndex = 0; resourceIndex < resourcesCount; ++resourceIndex) { - TTestSession& session = sessionsForResources[resourceIndex]; - const ui64 resourceId = resources[resourceIndex]->GetResourceId(); - if (consumes) { - EXPECT_CALL(*session.Sink, OnSend(resourceId, DoubleNear(1, 0.000001), nullptr)) - .Times(1); - } - EXPECT_CALL(*session.Sink, OnSessionExpired(resourceId)); - } - - if (consumes) { - AssertQueueNotEmpty(); - ProcessOneTick(); - } - Resources->DisconnectSession(pipeServerId); - - auto sessionsForResources2 = CreateSession(resources, true, std::numeric_limits<double>::infinity(), nullptr, clientId, pipeServerId); - - for (size_t resourceIndex = 0; resourceIndex < resourcesCount; ++resourceIndex) { - TTestSession& session = sessionsForResources2[resourceIndex]; - const ui64 resourceId = resources[resourceIndex]->GetResourceId(); - EXPECT_CALL(*session.Sink, OnSend(resourceId, DoubleNear(1, 0.000001), nullptr)) - .Times(1); - } - - ProcessOneTick(); - } - - void TestActiveSessionDisconnectsAndThenConnectsAgain() { - TestSessionDisconnectsAndThenConnectsAgainImpl(true); - } - - void TestInactiveSessionDisconnectsAndThenConnectsAgain() { - TestSessionDisconnectsAndThenConnectsAgainImpl(false); - } - - void TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain() { - TestSessionDisconnectsAndThenConnectsAgainImpl(true, 5); - } - - void TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain() { - TestSessionDisconnectsAndThenConnectsAgainImpl(false, 5); - } - -private: - THolder<TQuoterResources> Resources; - THolder<TTickProcessorQueue> Queue; - ui64 NextResourceId = 1; - ui64 NextActorId = 1; - TInstant Time = TInstant::Now(); -}; - -UNIT_TEST_SUITE_REGISTRATION(THDRRQuoterResourceTreeRuntimeTest); - -} -} + std::vector<TQuoterResourceTree*> resources; + resources.reserve(resourcesCount); + for (size_t resourceIndex = 0; resourceIndex < resourcesCount; ++resourceIndex) { + resources.push_back(AddResource(TStringBuilder() << "Root/Res_" << resourceIndex, 10)); + } + + auto sessionsForResources = CreateSession(resources, consumes, std::numeric_limits<double>::infinity(), nullptr, clientId, pipeServerId); + + for (size_t resourceIndex = 0; resourceIndex < resourcesCount; ++resourceIndex) { + TTestSession& session = sessionsForResources[resourceIndex]; + const ui64 resourceId = resources[resourceIndex]->GetResourceId(); + if (consumes) { + EXPECT_CALL(*session.Sink, OnSend(resourceId, DoubleNear(1, 0.000001), nullptr)) + .Times(1); + } + EXPECT_CALL(*session.Sink, OnSessionExpired(resourceId)); + } + + if (consumes) { + AssertQueueNotEmpty(); + ProcessOneTick(); + } + Resources->DisconnectSession(pipeServerId); + + auto sessionsForResources2 = CreateSession(resources, true, std::numeric_limits<double>::infinity(), nullptr, clientId, pipeServerId); + + for (size_t resourceIndex = 0; resourceIndex < resourcesCount; ++resourceIndex) { + TTestSession& session = sessionsForResources2[resourceIndex]; + const ui64 resourceId = resources[resourceIndex]->GetResourceId(); + EXPECT_CALL(*session.Sink, OnSend(resourceId, DoubleNear(1, 0.000001), nullptr)) + .Times(1); + } + + ProcessOneTick(); + } + + void TestActiveSessionDisconnectsAndThenConnectsAgain() { + TestSessionDisconnectsAndThenConnectsAgainImpl(true); + } + + void TestInactiveSessionDisconnectsAndThenConnectsAgain() { + TestSessionDisconnectsAndThenConnectsAgainImpl(false); + } + + void TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain() { + TestSessionDisconnectsAndThenConnectsAgainImpl(true, 5); + } + + void TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain() { + TestSessionDisconnectsAndThenConnectsAgainImpl(false, 5); + } + +private: + THolder<TQuoterResources> Resources; + THolder<TTickProcessorQueue> Queue; + ui64 NextResourceId = 1; + ui64 NextActorId = 1; + TInstant Time = TInstant::Now(); +}; + +UNIT_TEST_SUITE_REGISTRATION(THDRRQuoterResourceTreeRuntimeTest); + +} +} diff --git a/ydb/core/kesus/tablet/quoter_runtime.cpp b/ydb/core/kesus/tablet/quoter_runtime.cpp index d00c10739db..f5cacf685cb 100644 --- a/ydb/core/kesus/tablet/quoter_runtime.cpp +++ b/ydb/core/kesus/tablet/quoter_runtime.cpp @@ -1,170 +1,170 @@ -#include "tablet_impl.h" - -#include <util/system/datetime.h> - -namespace NKikimr { -namespace NKesus { - -#define TRACE_LOG_EVENT(tabletId, protoEventTypeStr, protoRecord, recipient, cookie) \ - LOG_TRACE_S( \ - TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, \ - "[" << (tabletId) << "] Send " protoEventTypeStr " to " << (recipient) \ - << ". Cookie: " << (cookie) << ". Data: " << (protoRecord) \ - ) - -class TKesusTablet::TQuoterResourceSink : public IResourceSink { -public: +#include "tablet_impl.h" + +#include <util/system/datetime.h> + +namespace NKikimr { +namespace NKesus { + +#define TRACE_LOG_EVENT(tabletId, protoEventTypeStr, protoRecord, recipient, cookie) \ + LOG_TRACE_S( \ + TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, \ + "[" << (tabletId) << "] Send " protoEventTypeStr " to " << (recipient) \ + << ". Cookie: " << (cookie) << ". Data: " << (protoRecord) \ + ) + +class TKesusTablet::TQuoterResourceSink : public IResourceSink { +public: TQuoterResourceSink(const TActorId& actor, TKesusTablet* kesus) - : Actor(actor) - , Kesus(kesus) - { - } - - void Send(ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) override { - Kesus->QuoterResourceSessionsAccumulator.Accumulate(Actor, resourceId, amount, props); - } - - void CloseSession(ui64 resourceId, Ydb::StatusIds::StatusCode status, const TString& reason) override { - THolder<TEvKesus::TEvResourcesAllocated> ev = MakeHolder<TEvKesus::TEvResourcesAllocated>(); - auto* info = ev->Record.AddResourcesInfo(); - info->SetResourceId(resourceId); - TEvKesus::FillError(info->MutableStateNotification(), status, reason); - TRACE_LOG_EVENT(Kesus->TabletID(), "TEvResourcesAllocated", ev->Record, Actor, 0); - Kesus->Send(Actor, std::move(ev)); - } - -private: + : Actor(actor) + , Kesus(kesus) + { + } + + void Send(ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) override { + Kesus->QuoterResourceSessionsAccumulator.Accumulate(Actor, resourceId, amount, props); + } + + void CloseSession(ui64 resourceId, Ydb::StatusIds::StatusCode status, const TString& reason) override { + THolder<TEvKesus::TEvResourcesAllocated> ev = MakeHolder<TEvKesus::TEvResourcesAllocated>(); + auto* info = ev->Record.AddResourcesInfo(); + info->SetResourceId(resourceId); + TEvKesus::FillError(info->MutableStateNotification(), status, reason); + TRACE_LOG_EVENT(Kesus->TabletID(), "TEvResourcesAllocated", ev->Record, Actor, 0); + Kesus->Send(Actor, std::move(ev)); + } + +private: TActorId Actor; - TKesusTablet* Kesus; -}; - + TKesusTablet* Kesus; +}; + void TKesusTablet::TQuoterResourceSessionsAccumulator::Accumulate(const TActorId& recipient, ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props) { - TSendInfo& info = SendInfos[recipient]; - if (!info.Event) { - info.Event = MakeHolder<TEvKesus::TEvResourcesAllocated>(); - } - auto [indexIt, insertedNew] = info.ResIdIndex.try_emplace(resourceId, info.Event->Record.ResourcesInfoSize()); - NKikimrKesus::TEvResourcesAllocated::TResourceInfo* resInfo = nullptr; - if (insertedNew) { - resInfo = info.Event->Record.AddResourcesInfo(); - resInfo->SetResourceId(resourceId); - resInfo->SetAmount(amount); - resInfo->MutableStateNotification()->SetStatus(Ydb::StatusIds::SUCCESS); - } else { - Y_VERIFY(indexIt->second < info.Event->Record.ResourcesInfoSize()); - resInfo = info.Event->Record.MutableResourcesInfo(indexIt->second); - resInfo->SetAmount(resInfo->GetAmount() + amount); - } - - if (props) { - *resInfo->MutableEffectiveProps() = *props; - } -} - -void TKesusTablet::TQuoterResourceSessionsAccumulator::SendAll(const TActorContext& ctx, ui64 tabletId) { - for (auto infoIter = SendInfos.begin(), infoEnd = SendInfos.end(); infoIter != infoEnd; ++infoIter) { + TSendInfo& info = SendInfos[recipient]; + if (!info.Event) { + info.Event = MakeHolder<TEvKesus::TEvResourcesAllocated>(); + } + auto [indexIt, insertedNew] = info.ResIdIndex.try_emplace(resourceId, info.Event->Record.ResourcesInfoSize()); + NKikimrKesus::TEvResourcesAllocated::TResourceInfo* resInfo = nullptr; + if (insertedNew) { + resInfo = info.Event->Record.AddResourcesInfo(); + resInfo->SetResourceId(resourceId); + resInfo->SetAmount(amount); + resInfo->MutableStateNotification()->SetStatus(Ydb::StatusIds::SUCCESS); + } else { + Y_VERIFY(indexIt->second < info.Event->Record.ResourcesInfoSize()); + resInfo = info.Event->Record.MutableResourcesInfo(indexIt->second); + resInfo->SetAmount(resInfo->GetAmount() + amount); + } + + if (props) { + *resInfo->MutableEffectiveProps() = *props; + } +} + +void TKesusTablet::TQuoterResourceSessionsAccumulator::SendAll(const TActorContext& ctx, ui64 tabletId) { + for (auto infoIter = SendInfos.begin(), infoEnd = SendInfos.end(); infoIter != infoEnd; ++infoIter) { const TActorId& recipientId = infoIter->first; - auto& info = infoIter->second; - TRACE_LOG_EVENT(tabletId, "TEvResourcesAllocated", info.Event->Record, recipientId, 0); - ctx.Send(recipientId, std::move(info.Event)); - } - SendInfos.clear(); -} - -void TKesusTablet::Handle(TEvKesus::TEvSubscribeOnResources::TPtr& ev) { - THolder<TEvKesus::TEvSubscribeOnResourcesResult> reply = MakeHolder<TEvKesus::TEvSubscribeOnResourcesResult>(); + auto& info = infoIter->second; + TRACE_LOG_EVENT(tabletId, "TEvResourcesAllocated", info.Event->Record, recipientId, 0); + ctx.Send(recipientId, std::move(info.Event)); + } + SendInfos.clear(); +} + +void TKesusTablet::Handle(TEvKesus::TEvSubscribeOnResources::TPtr& ev) { + THolder<TEvKesus::TEvSubscribeOnResourcesResult> reply = MakeHolder<TEvKesus::TEvSubscribeOnResourcesResult>(); const TActorId clientId = ActorIdFromProto(ev->Get()->Record.GetActorID()); const TActorId pipeServerId = ev->Recipient; - reply->Record.MutableResults()->Reserve(ev->Get()->Record.ResourcesSize()); - IResourceSink::TPtr sink = new TQuoterResourceSink(ev->Sender, this); - const TInstant now = TActivationContext::Now(); - TTickProcessorQueue queue; - i64 subscriptions = 0; - i64 unknownSubscriptions = 0; - for (const NKikimrKesus::TEvSubscribeOnResources::TResourceSubscribeInfo& resource : ev->Get()->Record.GetResources()) { - NKikimrKesus::TEvSubscribeOnResourcesResult::TResourceSubscribeResult* result = reply->Record.AddResults(); - TQuoterResourceTree* resourceTree = QuoterResources.FindPath(resource.GetResourcePath()); - if (resourceTree) { - ++subscriptions; - TQuoterSession* session = QuoterResources.GetOrCreateSession(clientId, resourceTree); - session->SetResourceSink(sink); + reply->Record.MutableResults()->Reserve(ev->Get()->Record.ResourcesSize()); + IResourceSink::TPtr sink = new TQuoterResourceSink(ev->Sender, this); + const TInstant now = TActivationContext::Now(); + TTickProcessorQueue queue; + i64 subscriptions = 0; + i64 unknownSubscriptions = 0; + for (const NKikimrKesus::TEvSubscribeOnResources::TResourceSubscribeInfo& resource : ev->Get()->Record.GetResources()) { + NKikimrKesus::TEvSubscribeOnResourcesResult::TResourceSubscribeResult* result = reply->Record.AddResults(); + TQuoterResourceTree* resourceTree = QuoterResources.FindPath(resource.GetResourcePath()); + if (resourceTree) { + ++subscriptions; + TQuoterSession* session = QuoterResources.GetOrCreateSession(clientId, resourceTree); + session->SetResourceSink(sink); const NActors::TActorId prevPipeServerId = session->SetPipeServerId(pipeServerId); - QuoterResources.SetPipeServerId(TQuoterSessionId(clientId, resourceTree->GetResourceId()), prevPipeServerId, pipeServerId); - session->UpdateConsumptionState(resource.GetStartConsuming(), resource.GetInitialAmount(), queue, now); - - result->MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); - result->SetResourceId(resourceTree->GetResourceId()); - *result->MutableEffectiveProps() = resourceTree->GetEffectiveProps(); - } else { - ++unknownSubscriptions; - TEvKesus::FillError(result->MutableError(), Ydb::StatusIds::NOT_FOUND, TStringBuilder() << "Resource \"" << resource.GetResourcePath() << "\" doesn't exist."); - } - } - if (subscriptions) { - *QuoterResources.GetCounters().ResourceSubscriptions += subscriptions; - } - if (unknownSubscriptions) { - *QuoterResources.GetCounters().UnknownResourceSubscriptions += unknownSubscriptions; - } - QuoterTickProcessorQueue.Merge(std::move(queue)); - TRACE_LOG_EVENT(TabletID(), "TEvSubscribeOnResourcesResult", reply->Record, ev->Sender, ev->Cookie); - Send(ev->Sender, std::move(reply), 0, ev->Cookie); - - LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, - "[" << TabletID() << "] Subscribe on quoter resources (sender=" << ev->Sender - << ", cookie=" << ev->Cookie << ")"); - - HandleQuoterTick(); -} - -void TKesusTablet::Handle(TEvKesus::TEvUpdateConsumptionState::TPtr& ev) { - THolder<TEvKesus::TEvResourcesAllocated> errors; + QuoterResources.SetPipeServerId(TQuoterSessionId(clientId, resourceTree->GetResourceId()), prevPipeServerId, pipeServerId); + session->UpdateConsumptionState(resource.GetStartConsuming(), resource.GetInitialAmount(), queue, now); + + result->MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); + result->SetResourceId(resourceTree->GetResourceId()); + *result->MutableEffectiveProps() = resourceTree->GetEffectiveProps(); + } else { + ++unknownSubscriptions; + TEvKesus::FillError(result->MutableError(), Ydb::StatusIds::NOT_FOUND, TStringBuilder() << "Resource \"" << resource.GetResourcePath() << "\" doesn't exist."); + } + } + if (subscriptions) { + *QuoterResources.GetCounters().ResourceSubscriptions += subscriptions; + } + if (unknownSubscriptions) { + *QuoterResources.GetCounters().UnknownResourceSubscriptions += unknownSubscriptions; + } + QuoterTickProcessorQueue.Merge(std::move(queue)); + TRACE_LOG_EVENT(TabletID(), "TEvSubscribeOnResourcesResult", reply->Record, ev->Sender, ev->Cookie); + Send(ev->Sender, std::move(reply), 0, ev->Cookie); + + LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, + "[" << TabletID() << "] Subscribe on quoter resources (sender=" << ev->Sender + << ", cookie=" << ev->Cookie << ")"); + + HandleQuoterTick(); +} + +void TKesusTablet::Handle(TEvKesus::TEvUpdateConsumptionState::TPtr& ev) { + THolder<TEvKesus::TEvResourcesAllocated> errors; const TActorId clientId = ActorIdFromProto(ev->Get()->Record.GetActorID()); - const TInstant now = TActivationContext::Now(); - IResourceSink::TPtr sink = new TQuoterResourceSink(ev->Sender, this); - TTickProcessorQueue queue; - i64 consumptionStarts = 0; - i64 consumptionStops = 0; - for (const NKikimrKesus::TEvUpdateConsumptionState::TResourceInfo& resource : ev->Get()->Record.GetResourcesInfo()) { - if (TQuoterSession* session = QuoterResources.FindSession(clientId, resource.GetResourceId())) { - if (resource.GetConsumeResource()) { - ++consumptionStarts; - } else { - ++consumptionStops; - } - session->SetResourceSink(sink); - session->UpdateConsumptionState(resource.GetConsumeResource(), resource.GetAmount(), queue, now); - } else { - if (!errors) { - errors = MakeHolder<TEvKesus::TEvResourcesAllocated>(); - } - auto* notification = errors->Record.AddResourcesInfo(); - notification->SetResourceId(resource.GetResourceId()); - TEvKesus::FillError(notification->MutableStateNotification(), Ydb::StatusIds::BAD_SESSION, "No such session exists."); - } - } - if (consumptionStarts) { - *QuoterResources.GetCounters().ResourceConsumptionStarts += consumptionStarts; - } - if (consumptionStops) { - *QuoterResources.GetCounters().ResourceConsumptionStops += consumptionStops; - } - QuoterTickProcessorQueue.Merge(std::move(queue)); - if (errors) { - TRACE_LOG_EVENT(TabletID(), "TEvResourcesAllocated", errors->Record, ev->Sender, 0); - Send(ev->Sender, std::move(errors)); - } - auto ack = MakeHolder<TEvKesus::TEvUpdateConsumptionStateAck>(); - TRACE_LOG_EVENT(TabletID(), "TEvUpdateConsumptionStateAck", ack->Record, ev->Sender, ev->Cookie); - Send(ev->Sender, std::move(ack), 0, ev->Cookie); - - LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, - "[" << TabletID() << "] Update quoter resources consumption state (sender=" << ev->Sender - << ", cookie=" << ev->Cookie << ")"); - - HandleQuoterTick(); -} - + const TInstant now = TActivationContext::Now(); + IResourceSink::TPtr sink = new TQuoterResourceSink(ev->Sender, this); + TTickProcessorQueue queue; + i64 consumptionStarts = 0; + i64 consumptionStops = 0; + for (const NKikimrKesus::TEvUpdateConsumptionState::TResourceInfo& resource : ev->Get()->Record.GetResourcesInfo()) { + if (TQuoterSession* session = QuoterResources.FindSession(clientId, resource.GetResourceId())) { + if (resource.GetConsumeResource()) { + ++consumptionStarts; + } else { + ++consumptionStops; + } + session->SetResourceSink(sink); + session->UpdateConsumptionState(resource.GetConsumeResource(), resource.GetAmount(), queue, now); + } else { + if (!errors) { + errors = MakeHolder<TEvKesus::TEvResourcesAllocated>(); + } + auto* notification = errors->Record.AddResourcesInfo(); + notification->SetResourceId(resource.GetResourceId()); + TEvKesus::FillError(notification->MutableStateNotification(), Ydb::StatusIds::BAD_SESSION, "No such session exists."); + } + } + if (consumptionStarts) { + *QuoterResources.GetCounters().ResourceConsumptionStarts += consumptionStarts; + } + if (consumptionStops) { + *QuoterResources.GetCounters().ResourceConsumptionStops += consumptionStops; + } + QuoterTickProcessorQueue.Merge(std::move(queue)); + if (errors) { + TRACE_LOG_EVENT(TabletID(), "TEvResourcesAllocated", errors->Record, ev->Sender, 0); + Send(ev->Sender, std::move(errors)); + } + auto ack = MakeHolder<TEvKesus::TEvUpdateConsumptionStateAck>(); + TRACE_LOG_EVENT(TabletID(), "TEvUpdateConsumptionStateAck", ack->Record, ev->Sender, ev->Cookie); + Send(ev->Sender, std::move(ack), 0, ev->Cookie); + + LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, + "[" << TabletID() << "] Update quoter resources consumption state (sender=" << ev->Sender + << ", cookie=" << ev->Cookie << ")"); + + HandleQuoterTick(); +} + void TKesusTablet::Handle(TEvKesus::TEvAccountResources::TPtr& ev) { auto ack = MakeHolder<TEvKesus::TEvAccountResourcesAck>(); const TActorId clientId = ActorIdFromProto(ev->Get()->Record.GetActorID()); @@ -196,68 +196,68 @@ void TKesusTablet::Handle(TEvKesus::TEvAccountResources::TPtr& ev) { HandleQuoterTick(); } -void TKesusTablet::Handle(TEvKesus::TEvResourcesAllocatedAck::TPtr& ev) { - Y_UNUSED(ev); -} - -void TKesusTablet::ScheduleQuoterTick() { - if (!QuoterTickProcessingIsScheduled && !QuoterTickProcessorQueue.Empty()) { - const TInstant time = QuoterTickProcessorQueue.Top().Time; - if (time < NextQuoterTickTime) { - const TInstant now = TActivationContext::Now(); - Schedule(time - now, new TEvents::TEvWakeup(QUOTER_TICK_PROCESSING_WAKEUP_TAG)); - QuoterTickProcessingIsScheduled = true; - NextQuoterTickTime = time; - } - } -} - -void TKesusTablet::HandleQuoterTick() { +void TKesusTablet::Handle(TEvKesus::TEvResourcesAllocatedAck::TPtr& ev) { + Y_UNUSED(ev); +} + +void TKesusTablet::ScheduleQuoterTick() { + if (!QuoterTickProcessingIsScheduled && !QuoterTickProcessorQueue.Empty()) { + const TInstant time = QuoterTickProcessorQueue.Top().Time; + if (time < NextQuoterTickTime) { + const TInstant now = TActivationContext::Now(); + Schedule(time - now, new TEvents::TEvWakeup(QUOTER_TICK_PROCESSING_WAKEUP_TAG)); + QuoterTickProcessingIsScheduled = true; + NextQuoterTickTime = time; + } + } +} + +void TKesusTablet::HandleQuoterTick() { const NHPTimer::STime hpprev = GetCycleCountFast(); - NextQuoterTickTime = TInstant::Max(); - i64 processedTasks = 0; - while (!QuoterTickProcessorQueue.Empty()) { - const TInstant now = TActivationContext::Now(); - bool processed = false; - const TInstant topTime = QuoterTickProcessorQueue.Top().Time; - if (now >= topTime) { - TTickProcessorQueue queue; - do { - QuoterResources.ProcessTick(QuoterTickProcessorQueue.Top(), queue); - QuoterTickProcessorQueue.Pop(); - processed = true; - ++processedTasks; - } while (!QuoterTickProcessorQueue.Empty() && QuoterTickProcessorQueue.Top().Time == topTime); - - if (processed) { - QuoterTickProcessorQueue.Merge(std::move(queue)); - } - } - - if (!processed) { - break; - } - } - ScheduleQuoterTick(); - QuoterResourceSessionsAccumulator.SendAll(TActivationContext::AsActorContext(), TabletID()); + NextQuoterTickTime = TInstant::Max(); + i64 processedTasks = 0; + while (!QuoterTickProcessorQueue.Empty()) { + const TInstant now = TActivationContext::Now(); + bool processed = false; + const TInstant topTime = QuoterTickProcessorQueue.Top().Time; + if (now >= topTime) { + TTickProcessorQueue queue; + do { + QuoterResources.ProcessTick(QuoterTickProcessorQueue.Top(), queue); + QuoterTickProcessorQueue.Pop(); + processed = true; + ++processedTasks; + } while (!QuoterTickProcessorQueue.Empty() && QuoterTickProcessorQueue.Top().Time == topTime); + + if (processed) { + QuoterTickProcessorQueue.Merge(std::move(queue)); + } + } + + if (!processed) { + break; + } + } + ScheduleQuoterTick(); + QuoterResourceSessionsAccumulator.SendAll(TActivationContext::AsActorContext(), TabletID()); const NHPTimer::STime hpnow = GetCycleCountFast(); - *QuoterResources.GetCounters().ElapsedMicrosecOnResourceAllocation += NHPTimer::GetSeconds(hpnow - hpprev) * 1000000; - if (processedTasks) { - *QuoterResources.GetCounters().TickProcessorTasksProcessed += processedTasks; - } -} - -void TKesusTablet::Handle(TEvKesus::TEvGetQuoterResourceCounters::TPtr& ev) { - THolder<TEvKesus::TEvGetQuoterResourceCountersResult> reply = MakeHolder<TEvKesus::TEvGetQuoterResourceCountersResult>(); - QuoterResources.FillCounters(reply->Record); - TRACE_LOG_EVENT(TabletID(), "TEvGetQuoterResourceCountersResult", reply->Record, ev->Sender, ev->Cookie); - Send(ev->Sender, std::move(reply), 0, ev->Cookie); -} - -void TKesusTablet::Handle(TEvTabletPipe::TEvServerDisconnected::TPtr& ev) { - LOG_TRACE_S(TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, "Got TEvServerDisconnected(" << ev->Get()->ServerId << ")"); - QuoterResources.DisconnectSession(ev->Get()->ServerId); -} - -} -} + *QuoterResources.GetCounters().ElapsedMicrosecOnResourceAllocation += NHPTimer::GetSeconds(hpnow - hpprev) * 1000000; + if (processedTasks) { + *QuoterResources.GetCounters().TickProcessorTasksProcessed += processedTasks; + } +} + +void TKesusTablet::Handle(TEvKesus::TEvGetQuoterResourceCounters::TPtr& ev) { + THolder<TEvKesus::TEvGetQuoterResourceCountersResult> reply = MakeHolder<TEvKesus::TEvGetQuoterResourceCountersResult>(); + QuoterResources.FillCounters(reply->Record); + TRACE_LOG_EVENT(TabletID(), "TEvGetQuoterResourceCountersResult", reply->Record, ev->Sender, ev->Cookie); + Send(ev->Sender, std::move(reply), 0, ev->Cookie); +} + +void TKesusTablet::Handle(TEvTabletPipe::TEvServerDisconnected::TPtr& ev) { + LOG_TRACE_S(TActivationContext::AsActorContext(), NKikimrServices::KESUS_TABLET, "Got TEvServerDisconnected(" << ev->Get()->ServerId << ")"); + QuoterResources.DisconnectSession(ev->Get()->ServerId); +} + +} +} diff --git a/ydb/core/kesus/tablet/schema.h b/ydb/core/kesus/tablet/schema.h index cc2c95e6959..faa630253bf 100644 --- a/ydb/core/kesus/tablet/schema.h +++ b/ydb/core/kesus/tablet/schema.h @@ -51,17 +51,17 @@ struct TKesusSchema : NIceDb::Schema { using TColumns = TableColumns<SessionId, SemaphoreId, OrderId, TimeoutMillis, Count, Data>; }; - struct QuoterResources : Table<5> { - struct Id : Column<1, NScheme::NTypeIds::Uint64> {}; - struct ParentId : Column<2, NScheme::NTypeIds::Uint64> {}; - struct Props : Column<3, NScheme::NTypeIds::String> { using Type = NKikimrKesus::TStreamingQuoterResource; }; - - using TKey = TableKey<Id>; - using TColumns = TableColumns<Id, ParentId, Props>; - }; - - using TTables = SchemaTables<SysParams, Sessions, Semaphores, SessionSemaphores, QuoterResources>; - + struct QuoterResources : Table<5> { + struct Id : Column<1, NScheme::NTypeIds::Uint64> {}; + struct ParentId : Column<2, NScheme::NTypeIds::Uint64> {}; + struct Props : Column<3, NScheme::NTypeIds::String> { using Type = NKikimrKesus::TStreamingQuoterResource; }; + + using TKey = TableKey<Id>; + using TColumns = TableColumns<Id, ParentId, Props>; + }; + + using TTables = SchemaTables<SysParams, Sessions, Semaphores, SessionSemaphores, QuoterResources>; + using TSettings = SchemaSettings< ExecutorLogBatching<true>, ExecutorLogFlushPeriod<500 /*500us*/>>; @@ -78,8 +78,8 @@ struct TKesusSchema : NIceDb::Schema { static constexpr ui64 SysParam_ReadConsistencyMode = 10; static constexpr ui64 SysParam_AttachConsistencyMode = 11; static constexpr ui64 SysParam_StrictMarkerCounter = 12; - static constexpr ui64 SysParam_NextQuoterResourceId = 13; - static constexpr ui64 SysParam_RateLimiterCountersMode = 14; + static constexpr ui64 SysParam_NextQuoterResourceId = 13; + static constexpr ui64 SysParam_RateLimiterCountersMode = 14; }; } diff --git a/ydb/core/kesus/tablet/tablet.cpp b/ydb/core/kesus/tablet/tablet.cpp index 3641f4df981..7a39cef0f1d 100644 --- a/ydb/core/kesus/tablet/tablet.cpp +++ b/ydb/core/kesus/tablet/tablet.cpp @@ -1,10 +1,10 @@ #include "tablet.h" -#include "probes.h" +#include "probes.h" #include "tablet_impl.h" #include <library/cpp/lwtrace/mon/mon_lwtrace.h> - + namespace NKikimr { namespace NKesus { @@ -12,9 +12,9 @@ IActor* CreateKesusTablet(const TActorId& tablet, TTabletStorageInfo* info) { return new TKesusTablet(tablet, info); } -void AddKesusProbesList() { - NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(KESUS_QUOTER_PROVIDER)); -} - +void AddKesusProbesList() { + NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(KESUS_QUOTER_PROVIDER)); } + } +} diff --git a/ydb/core/kesus/tablet/tablet.h b/ydb/core/kesus/tablet/tablet.h index 55fd609333c..ceb393e094d 100644 --- a/ydb/core/kesus/tablet/tablet.h +++ b/ydb/core/kesus/tablet/tablet.h @@ -9,7 +9,7 @@ namespace NKesus { IActor* CreateKesusTablet(const TActorId& tablet, TTabletStorageInfo* info); -void AddKesusProbesList(); - +void AddKesusProbesList(); + } } diff --git a/ydb/core/kesus/tablet/tablet_html.cpp b/ydb/core/kesus/tablet/tablet_html.cpp index 20d269bf65a..018d13e4fe1 100644 --- a/ydb/core/kesus/tablet/tablet_html.cpp +++ b/ydb/core/kesus/tablet/tablet_html.cpp @@ -3,7 +3,7 @@ #include <library/cpp/monlib/service/pages/templates.h> #include <util/string/escape.h> -#include <util/string/subst.h> +#include <util/string/subst.h> namespace NKikimr { namespace NKesus { @@ -340,109 +340,109 @@ struct TKesusTablet::THtmlRenderer { } } } - - void RenderQuoterResourceLink(IOutputStream& out, const TQuoterResourceTree* resource) { - if (resource) { - TCgiParameters params; - params.InsertUnescaped("quoter_resource", resource->GetPath()); - out << "<a href=\"app?TabletID=" << Self->TabletID() << "&" << params() << "\">" << resource->GetPath() << "</a>"; - } - } - - void RenderQuoterResourceTable(IOutputStream& out, std::vector<const TQuoterResourceTree*>& resources) { - std::sort(resources.begin(), resources.end(), - [](const TQuoterResourceTree* res1, const TQuoterResourceTree* res2) { - return res1->GetPath() < res2->GetPath(); - }); - HTML(out) { - TABLE_SORTABLE_CLASS("table") { - TABLEHEAD() { - TABLER() { - TABLEH() { out << "Path"; } - TABLEH() { out << "Props"; } - } - } - TABLEBODY() { - for (const TQuoterResourceTree* resource : resources) { - TABLER() { - TABLED() { RenderQuoterResourceLink(out, resource); } - TABLED() { out << resource->GetProps(); } - } - } - } - } - } - } - - void RenderQuoterResources(IOutputStream& out) { - HTML(out) { - H3() { out << "Quoter resources"; } - - std::vector<const TQuoterResourceTree*> resources; - resources.reserve(Self->QuoterResources.GetAllResources().size()); - for (auto&& [path, resource] : Self->QuoterResources.GetAllResources()) { - resources.push_back(resource); - } - - RenderQuoterResourceTable(out, resources); - } - } - - void RenderQuoterResourceDetails(IOutputStream& out, const TString& path) { - if (const TQuoterResourceTree* resource = Self->QuoterResources.FindPath(path)) { - HTML(out) { - H2() { out << "Kesus quoter resource " << EscapeC(path); } - - PRE() { - if (resource->GetParent()) { - out << "Parent: "; RenderQuoterResourceLink(out, resource->GetParent()); out << "\n"; - } - TString props = "\n"; - props += resource->GetProps().Utf8DebugString(); - SubstGlobal(props, "\n", "\n "); // make indent - out << "Props:" << props << "\n"; - } - - H3() { out << "Children resources"; } - - std::vector<const TQuoterResourceTree*> resources(resource->GetChildren().begin(), resource->GetChildren().end()); - RenderQuoterResourceTable(out, resources); - - H3() { out << "Sessions"; } - RenderQuoterResourceSessions(out, resource); - } - } else { - RenderError(out, TStringBuilder() << "Resource with path " << path << " not found"); - } - } - - void RenderQuoterResourceSessions(IOutputStream& out, const TQuoterResourceTree* resource) { - HTML(out) { - TABLE_SORTABLE_CLASS("table") { - TABLEHEAD() { - TABLER() { - TABLEH() { out << "Client"; } - TABLEH() { out << "Active"; } - TABLEH() { out << "Consumed"; } - TABLEH() { out << "Requested"; } - } - } - TABLEBODY() { - const auto& clients = resource->GetSessions(); + + void RenderQuoterResourceLink(IOutputStream& out, const TQuoterResourceTree* resource) { + if (resource) { + TCgiParameters params; + params.InsertUnescaped("quoter_resource", resource->GetPath()); + out << "<a href=\"app?TabletID=" << Self->TabletID() << "&" << params() << "\">" << resource->GetPath() << "</a>"; + } + } + + void RenderQuoterResourceTable(IOutputStream& out, std::vector<const TQuoterResourceTree*>& resources) { + std::sort(resources.begin(), resources.end(), + [](const TQuoterResourceTree* res1, const TQuoterResourceTree* res2) { + return res1->GetPath() < res2->GetPath(); + }); + HTML(out) { + TABLE_SORTABLE_CLASS("table") { + TABLEHEAD() { + TABLER() { + TABLEH() { out << "Path"; } + TABLEH() { out << "Props"; } + } + } + TABLEBODY() { + for (const TQuoterResourceTree* resource : resources) { + TABLER() { + TABLED() { RenderQuoterResourceLink(out, resource); } + TABLED() { out << resource->GetProps(); } + } + } + } + } + } + } + + void RenderQuoterResources(IOutputStream& out) { + HTML(out) { + H3() { out << "Quoter resources"; } + + std::vector<const TQuoterResourceTree*> resources; + resources.reserve(Self->QuoterResources.GetAllResources().size()); + for (auto&& [path, resource] : Self->QuoterResources.GetAllResources()) { + resources.push_back(resource); + } + + RenderQuoterResourceTable(out, resources); + } + } + + void RenderQuoterResourceDetails(IOutputStream& out, const TString& path) { + if (const TQuoterResourceTree* resource = Self->QuoterResources.FindPath(path)) { + HTML(out) { + H2() { out << "Kesus quoter resource " << EscapeC(path); } + + PRE() { + if (resource->GetParent()) { + out << "Parent: "; RenderQuoterResourceLink(out, resource->GetParent()); out << "\n"; + } + TString props = "\n"; + props += resource->GetProps().Utf8DebugString(); + SubstGlobal(props, "\n", "\n "); // make indent + out << "Props:" << props << "\n"; + } + + H3() { out << "Children resources"; } + + std::vector<const TQuoterResourceTree*> resources(resource->GetChildren().begin(), resource->GetChildren().end()); + RenderQuoterResourceTable(out, resources); + + H3() { out << "Sessions"; } + RenderQuoterResourceSessions(out, resource); + } + } else { + RenderError(out, TStringBuilder() << "Resource with path " << path << " not found"); + } + } + + void RenderQuoterResourceSessions(IOutputStream& out, const TQuoterResourceTree* resource) { + HTML(out) { + TABLE_SORTABLE_CLASS("table") { + TABLEHEAD() { + TABLER() { + TABLEH() { out << "Client"; } + TABLEH() { out << "Active"; } + TABLEH() { out << "Consumed"; } + TABLEH() { out << "Requested"; } + } + } + TABLEBODY() { + const auto& clients = resource->GetSessions(); for (const NActors::TActorId& clientId : clients) { - const TQuoterSession* session = Self->QuoterResources.FindSession(clientId, resource->GetResourceId()); - Y_VERIFY(session); - TABLER() { - TABLED() { out << clientId; } - TABLED() { out << (session->IsActive() ? "true" : "false"); } - TABLED() { out << session->GetTotalConsumed(); } - TABLED() { out << session->GetAmountRequested(); } - } - } - } - } - } - } + const TQuoterSession* session = Self->QuoterResources.FindSession(clientId, resource->GetResourceId()); + Y_VERIFY(session); + TABLER() { + TABLED() { out << clientId; } + TABLED() { out << (session->IsActive() ? "true" : "false"); } + TABLED() { out << session->GetTotalConsumed(); } + TABLED() { out << session->GetAmountRequested(); } + } + } + } + } + } + } }; bool TKesusTablet::OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext& ctx) { @@ -462,14 +462,14 @@ bool TKesusTablet::OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const T renderer.RenderSessionDetails(out, params.Get("session")); } else if (params.Has("semaphore")) { renderer.RenderSemaphoreDetails(out, params.Get("semaphore")); - } else if (params.Has("quoter_resource")) { - renderer.RenderQuoterResourceDetails(out, params.Get("quoter_resource")); + } else if (params.Has("quoter_resource")) { + renderer.RenderQuoterResourceDetails(out, params.Get("quoter_resource")); } else { H2() { out << "Kesus " << EscapeC(KesusPath); } renderer.RenderProxyList(out); renderer.RenderSessionList(out); renderer.RenderSemaphoreList(out); - renderer.RenderQuoterResources(out); + renderer.RenderQuoterResources(out); } } diff --git a/ydb/core/kesus/tablet/tablet_impl.cpp b/ydb/core/kesus/tablet/tablet_impl.cpp index 245c4b91b8c..109d6d0a909 100644 --- a/ydb/core/kesus/tablet/tablet_impl.cpp +++ b/ydb/core/kesus/tablet/tablet_impl.cpp @@ -36,7 +36,7 @@ void TKesusTablet::ResetState() { SessionGracePeriod = TDuration::Seconds(10); ReadConsistencyMode = Ydb::Coordination::CONSISTENCY_MODE_RELAXED; AttachConsistencyMode = Ydb::Coordination::CONSISTENCY_MODE_STRICT; - RateLimiterCountersMode = Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_AGGREGATED; + RateLimiterCountersMode = Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_AGGREGATED; Sessions.clear(); Semaphores.clear(); @@ -44,8 +44,8 @@ void TKesusTablet::ResetState() { SelfCheckCounter = 0; StrictMarkerCounter = 0; - - NextQuoterResourceId = 1; + + NextQuoterResourceId = 1; } void TKesusTablet::ResetCounters() { @@ -55,7 +55,7 @@ void TKesusTablet::ResetCounters() { TabletCounters->Simple()[COUNTER_SEMAPHORE_COUNT].Set(0); TabletCounters->Simple()[COUNTER_SEMAPHORE_OWNER_COUNT].Set(0); TabletCounters->Simple()[COUNTER_SEMAPHORE_WAITER_COUNT].Set(0); - TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Set(0); + TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Set(0); } void TKesusTablet::OnDetach(const TActorContext& ctx) { @@ -145,16 +145,16 @@ void TKesusTablet::Handle(TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { ProxiesByNode.erase(msg->NodeId); } -void TKesusTablet::Handle(TEvents::TEvWakeup::TPtr& ev) { - switch (ev->Get()->Tag) { - case QUOTER_TICK_PROCESSING_WAKEUP_TAG: - QuoterTickProcessingIsScheduled = false; - return HandleQuoterTick(); - default: - Y_VERIFY(false, "Unknown Wakeup event with tag #%" PRIu64, ev->Get()->Tag); - } -} - +void TKesusTablet::Handle(TEvents::TEvWakeup::TPtr& ev) { + switch (ev->Get()->Tag) { + case QUOTER_TICK_PROCESSING_WAKEUP_TAG: + QuoterTickProcessingIsScheduled = false; + return HandleQuoterTick(); + default: + Y_VERIFY(false, "Unknown Wakeup event with tag #%" PRIu64, ev->Get()->Tag); + } +} + void TKesusTablet::Handle(TEvKesus::TEvDescribeProxies::TPtr& ev) { const auto& record = ev->Get()->Record; VerifyKesusPath(record.GetKesusPath()); @@ -290,18 +290,18 @@ STFUNC(TKesusTablet::StateWork) { cFunc(TEvKesus::Deprecated_EvClientReady, HandleIgnored); cFunc(TEvKesus::Deprecated_EvJobStatus, HandleIgnored); - hFunc(TEvKesus::TEvDescribeQuoterResources, Handle); - hFunc(TEvKesus::TEvAddQuoterResource, Handle); - hFunc(TEvKesus::TEvUpdateQuoterResource, Handle); - hFunc(TEvKesus::TEvDeleteQuoterResource, Handle); - hFunc(TEvKesus::TEvSubscribeOnResources, Handle); - hFunc(TEvKesus::TEvUpdateConsumptionState, Handle); + hFunc(TEvKesus::TEvDescribeQuoterResources, Handle); + hFunc(TEvKesus::TEvAddQuoterResource, Handle); + hFunc(TEvKesus::TEvUpdateQuoterResource, Handle); + hFunc(TEvKesus::TEvDeleteQuoterResource, Handle); + hFunc(TEvKesus::TEvSubscribeOnResources, Handle); + hFunc(TEvKesus::TEvUpdateConsumptionState, Handle); hFunc(TEvKesus::TEvAccountResources, Handle); - hFunc(TEvKesus::TEvResourcesAllocatedAck, Handle); - hFunc(TEvKesus::TEvGetQuoterResourceCounters, Handle); - hFunc(TEvTabletPipe::TEvServerDisconnected, Handle); - hFunc(TEvents::TEvWakeup, Handle); - + hFunc(TEvKesus::TEvResourcesAllocatedAck, Handle); + hFunc(TEvKesus::TEvGetQuoterResourceCounters, Handle); + hFunc(TEvTabletPipe::TEvServerDisconnected, Handle); + hFunc(TEvents::TEvWakeup, Handle); + hFunc(TEvPrivate::TEvSelfCheckStart, Handle); hFunc(TEvPrivate::TEvSelfCheckTimeout, Handle); diff --git a/ydb/core/kesus/tablet/tablet_impl.h b/ydb/core/kesus/tablet/tablet_impl.h index 1607e352d05..4c9e5f0eaaf 100644 --- a/ydb/core/kesus/tablet/tablet_impl.h +++ b/ydb/core/kesus/tablet/tablet_impl.h @@ -3,7 +3,7 @@ #include "defs.h" #include "events.h" -#include "quoter_resource_tree.h" +#include "quoter_resource_tree.h" #include "schema.h" #include <ydb/core/base/path.h> @@ -50,14 +50,14 @@ private: struct TTxSemaphoreTimeout; struct TTxSemaphoreUpdate; - struct TTxQuoterResourceAdd; - struct TTxQuoterResourceUpdate; - struct TTxQuoterResourceDelete; - struct TTxQuoterResourceDescribe; - class TQuoterResourceSink; - - static constexpr ui64 QUOTER_TICK_PROCESSING_WAKEUP_TAG = 1; - + struct TTxQuoterResourceAdd; + struct TTxQuoterResourceUpdate; + struct TTxQuoterResourceDelete; + struct TTxQuoterResourceDescribe; + class TQuoterResourceSink; + + static constexpr ui64 QUOTER_TICK_PROCESSING_WAKEUP_TAG = 1; + static constexpr size_t MAX_SESSIONS_LIMIT = 1000000; // 1 million static constexpr size_t MAX_DESCRIPTION_SIZE = 1024; static constexpr size_t MAX_PROTECTION_KEY_SIZE = 16; @@ -216,18 +216,18 @@ private: void NotifyWatchers(TVector<TDelayedEvent>& events, bool dataChanged, bool ownerChanged); }; - struct TQuoterResourceSessionsAccumulator { + struct TQuoterResourceSessionsAccumulator { void Accumulate(const TActorId& recipient, ui64 resourceId, double amount, const NKikimrKesus::TStreamingQuoterResource* props); - void SendAll(const TActorContext& ctx, ui64 tabletId); - - struct TSendInfo { - THolder<TEvKesus::TEvResourcesAllocated> Event; - THashMap<ui64, size_t> ResIdIndex; - }; - + void SendAll(const TActorContext& ctx, ui64 tabletId); + + struct TSendInfo { + THolder<TEvKesus::TEvResourcesAllocated> Event; + THashMap<ui64, size_t> ResIdIndex; + }; + THashMap<TActorId, TSendInfo> SendInfos; - }; - + }; + struct TEvPrivate { enum EEv { EvSessionTimeout = EventSpaceBegin(TEvents::ES_PRIVATE), @@ -297,15 +297,15 @@ private: THashMap<ui32, THashSet<TProxyInfo*>> ProxiesByNode; THashMap<ui64, ui64> SessionsTxCount; - // Quoter support - ui64 NextQuoterResourceId; - TQuoterResources QuoterResources; - TInstant NextQuoterTickTime = TInstant::Max(); - TTickProcessorQueue QuoterTickProcessorQueue; - TQuoterResourceSessionsAccumulator QuoterResourceSessionsAccumulator; - Ydb::Coordination::RateLimiterCountersMode RateLimiterCountersMode; - bool QuoterTickProcessingIsScheduled = false; - + // Quoter support + ui64 NextQuoterResourceId; + TQuoterResources QuoterResources; + TInstant NextQuoterTickTime = TInstant::Max(); + TTickProcessorQueue QuoterTickProcessorQueue; + TQuoterResourceSessionsAccumulator QuoterResourceSessionsAccumulator; + Ydb::Coordination::RateLimiterCountersMode RateLimiterCountersMode; + bool QuoterTickProcessingIsScheduled = false; + // Counters support THolder<TTabletCountersBase> TabletCountersPtr; TTabletCountersBase* TabletCounters; @@ -371,7 +371,7 @@ private: bool ScheduleSessionTimeout(TSessionInfo* session, const TActorContext& ctx, TDuration gracePeriod = TDuration::Zero()); void ClearProxy(TProxyInfo* proxy, const TActorContext& ctx); void ForgetProxy(TProxyInfo* proxy); - void ScheduleQuoterTick(); + void ScheduleQuoterTick(); private: void VerifyKesusPath(const TString& kesusPath); @@ -380,7 +380,7 @@ private: void Handle(TEvents::TEvUndelivered::TPtr& ev); void Handle(TEvInterconnect::TEvNodeConnected::TPtr& ev); void Handle(TEvInterconnect::TEvNodeDisconnected::TPtr& ev); - void Handle(TEvents::TEvWakeup::TPtr& ev); + void Handle(TEvents::TEvWakeup::TPtr& ev); void Handle(TEvKesus::TEvDummyRequest::TPtr& ev); void Handle(TEvKesus::TEvSetConfig::TPtr& ev); @@ -399,20 +399,20 @@ private: void Handle(TEvKesus::TEvUpdateSemaphore::TPtr& ev); void Handle(TEvKesus::TEvDeleteSemaphore::TPtr& ev); - // Quoter API - void Handle(TEvKesus::TEvDescribeQuoterResources::TPtr& ev); - void Handle(TEvKesus::TEvAddQuoterResource::TPtr& ev); - void Handle(TEvKesus::TEvUpdateQuoterResource::TPtr& ev); - void Handle(TEvKesus::TEvDeleteQuoterResource::TPtr& ev); - // Quoter runtime - void Handle(TEvKesus::TEvSubscribeOnResources::TPtr& ev); - void Handle(TEvKesus::TEvUpdateConsumptionState::TPtr& ev); + // Quoter API + void Handle(TEvKesus::TEvDescribeQuoterResources::TPtr& ev); + void Handle(TEvKesus::TEvAddQuoterResource::TPtr& ev); + void Handle(TEvKesus::TEvUpdateQuoterResource::TPtr& ev); + void Handle(TEvKesus::TEvDeleteQuoterResource::TPtr& ev); + // Quoter runtime + void Handle(TEvKesus::TEvSubscribeOnResources::TPtr& ev); + void Handle(TEvKesus::TEvUpdateConsumptionState::TPtr& ev); void Handle(TEvKesus::TEvAccountResources::TPtr& ev); - void Handle(TEvKesus::TEvResourcesAllocatedAck::TPtr& ev); - void Handle(TEvKesus::TEvGetQuoterResourceCounters::TPtr& ev); - void Handle(TEvTabletPipe::TEvServerDisconnected::TPtr& ev); - void HandleQuoterTick(); - + void Handle(TEvKesus::TEvResourcesAllocatedAck::TPtr& ev); + void Handle(TEvKesus::TEvGetQuoterResourceCounters::TPtr& ev); + void Handle(TEvTabletPipe::TEvServerDisconnected::TPtr& ev); + void HandleQuoterTick(); + void Handle(TEvPrivate::TEvSessionTimeout::TPtr& ev); void Handle(TEvPrivate::TEvAcquireSemaphoreTimeout::TPtr& ev); void Handle(TEvPrivate::TEvSelfCheckStart::TPtr& ev); diff --git a/ydb/core/kesus/tablet/tablet_ut.cpp b/ydb/core/kesus/tablet/tablet_ut.cpp index 1cf1a80ca8e..0bf8fce2b8e 100644 --- a/ydb/core/kesus/tablet/tablet_ut.cpp +++ b/ydb/core/kesus/tablet/tablet_ut.cpp @@ -9,8 +9,8 @@ #include <util/random/random.h> -#include <limits> - +#include <limits> + namespace NKikimr { namespace NKesus { @@ -30,7 +30,7 @@ namespace { void EnableRelaxedAttach(TTestContext& ctx) { Ydb::Coordination::Config config; config.set_attach_consistency_mode(Ydb::Coordination::CONSISTENCY_MODE_RELAXED); - ctx.SetConfig(12345, config, 42); + ctx.SetConfig(12345, config, 42); } } @@ -39,44 +39,44 @@ Y_UNIT_TEST_SUITE(TKesusTest) { TTestContext ctx; ctx.Setup(); - ctx.SetConfig(12345, MakeConfig("/foo/bar/baz"), 42); - - { - const auto getConfigResult = ctx.GetConfig(); - UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetConfig().path(), "/foo/bar/baz"); - UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetVersion(), 42); - UNIT_ASSERT_EQUAL_C(getConfigResult.GetConfig().rate_limiter_counters_mode(), Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_AGGREGATED, "Record: " << getConfigResult); - } - + ctx.SetConfig(12345, MakeConfig("/foo/bar/baz"), 42); + { - - auto configWithModes = MakeConfig("/foo/bar/baz"); - configWithModes.set_rate_limiter_counters_mode(Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED); - ctx.SetConfig(12345, configWithModes, 42); + const auto getConfigResult = ctx.GetConfig(); + UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetConfig().path(), "/foo/bar/baz"); + UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetVersion(), 42); + UNIT_ASSERT_EQUAL_C(getConfigResult.GetConfig().rate_limiter_counters_mode(), Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_AGGREGATED, "Record: " << getConfigResult); } { - const auto getConfigResult = ctx.GetConfig(); - UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetConfig().path(), "/foo/bar/baz"); - UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetVersion(), 42); - UNIT_ASSERT_EQUAL_C(getConfigResult.GetConfig().rate_limiter_counters_mode(), Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED, "Record: " << getConfigResult); + + auto configWithModes = MakeConfig("/foo/bar/baz"); + configWithModes.set_rate_limiter_counters_mode(Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED); + ctx.SetConfig(12345, configWithModes, 42); } + { + const auto getConfigResult = ctx.GetConfig(); + UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetConfig().path(), "/foo/bar/baz"); + UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetVersion(), 42); + UNIT_ASSERT_EQUAL_C(getConfigResult.GetConfig().rate_limiter_counters_mode(), Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED, "Record: " << getConfigResult); + } + // Verify it is restored after reboot ctx.RebootTablet(); - + { - const auto getConfigResult = ctx.GetConfig(); - UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetConfig().path(), "/foo/bar/baz"); - UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetVersion(), 42); - UNIT_ASSERT_EQUAL_C(getConfigResult.GetConfig().rate_limiter_counters_mode(), Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED, "Record: " << getConfigResult); + const auto getConfigResult = ctx.GetConfig(); + UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetConfig().path(), "/foo/bar/baz"); + UNIT_ASSERT_VALUES_EQUAL(getConfigResult.GetVersion(), 42); + UNIT_ASSERT_EQUAL_C(getConfigResult.GetConfig().rate_limiter_counters_mode(), Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED, "Record: " << getConfigResult); } // Verify it is ok to repeat the event - ctx.SetConfig(12345, MakeConfig("/foo/bar/baz"), 42); + ctx.SetConfig(12345, MakeConfig("/foo/bar/baz"), 42); // Verify it is not ok to downgrade - ctx.SetConfig(12345, MakeConfig("/foo/bar/baz"), 41, Ydb::StatusIds::PRECONDITION_FAILED); + ctx.SetConfig(12345, MakeConfig("/foo/bar/baz"), 41, Ydb::StatusIds::PRECONDITION_FAILED); } Y_UNIT_TEST(TestRegisterProxy) { @@ -1310,371 +1310,371 @@ Y_UNIT_TEST_SUITE(TKesusTest) { ctx.MustRegisterProxy(proxy, 2); testAllFailures(Ydb::StatusIds::BAD_SESSION); } - - Y_UNIT_TEST(TestQuoterResourceDescribe) { - TTestContext ctx; - ctx.Setup(); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg1; - cfg1.SetMaxUnitsPerSecond(100500); - cfg1.SetMaxBurstSizeCoefficient(1.5); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg2; - cfg2.SetMaxUnitsPerSecond(10); - - ctx.AddQuoterResource("/Root", cfg1); // id=1 - ctx.AddQuoterResource("/Root/Folder", cfg1); // id=2 - ctx.AddQuoterResource("/Root/Q1", cfg2); // id=3 - ctx.AddQuoterResource("/Root/Folder/Q1", cfg2); // id=4 - ctx.AddQuoterResource("/Root/Folder/Q2", cfg2); // id=5 - ctx.AddQuoterResource("/Root/Folder/Q3", cfg2); // id=6 - - ctx.AddQuoterResource("/Root2", cfg1); // id=7 - ctx.AddQuoterResource("/Root2/Q", cfg2); // id=8 - - auto testDescribe = [&]() { - ctx.VerifyDescribeQuoterResources({100}, {}, false, Ydb::StatusIds::NOT_FOUND); // no such id - ctx.VerifyDescribeQuoterResources({}, {"Nonexistent/Path"}, false, Ydb::StatusIds::NOT_FOUND); // no such path - ctx.VerifyDescribeQuoterResources({}, {"/Root", ""}, false, Ydb::StatusIds::NOT_FOUND); // empty path - ctx.VerifyDescribeQuoterResources({1, 1}, {}, false); // two times is OK - ctx.VerifyDescribeQuoterResources({}, {"/Root2/Q", "/Root2/Q"}, false); // two times is OK - - { - // All - const auto resources = ctx.DescribeQuoterResources({}, {}, true); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 8); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root/Folder"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(2).GetResourcePath(), "Root/Folder/Q1"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(3).GetResourcePath(), "Root/Folder/Q2"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(4).GetResourcePath(), "Root/Folder/Q3"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(5).GetResourcePath(), "Root/Q1"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(6).GetResourcePath(), "Root2"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(7).GetResourcePath(), "Root2/Q"); - - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 1); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(1).GetResourceId(), 2); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(2).GetResourceId(), 4); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(3).GetResourceId(), 5); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(4).GetResourceId(), 6); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(5).GetResourceId(), 3); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(6).GetResourceId(), 7); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(7).GetResourceId(), 8); - - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 100500, 0.001); - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(6).GetHierarhicalDRRResourceConfig().GetMaxBurstSizeCoefficient(), - 1.5, 0.001); - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(7).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 10, 0.001); - } - { - // All upper level - const auto resources = ctx.DescribeQuoterResources({}, {}, false); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root2"); - - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 1); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(1).GetResourceId(), 7); - } - { - // By id - const auto resources = ctx.DescribeQuoterResources({3, 2}, {}, true); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 5); - - const auto resources2 = ctx.DescribeQuoterResources({3, 2}, {}, false); - UNIT_ASSERT_VALUES_EQUAL(resources2.ResourcesSize(), 2); - } - { - // By path - const auto resources = ctx.DescribeQuoterResources({}, {"Root2/"}, true); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); - - const auto resources2 = ctx.DescribeQuoterResources({}, {"Root2/"}, false); - UNIT_ASSERT_VALUES_EQUAL(resources2.ResourcesSize(), 1); - } - }; - testDescribe(); - ctx.RebootTablet(); - testDescribe(); - } - - Y_UNIT_TEST(TestQuoterResourceCreation) { - TTestContext ctx; - ctx.Setup(); - - // validation - ctx.AddQuoterResource("/a/b", Ydb::StatusIds::BAD_REQUEST); // no parent resource - ctx.AddQuoterResource(":-)", Ydb::StatusIds::BAD_REQUEST); // invalid resource name - ctx.AddQuoterResource("", Ydb::StatusIds::BAD_REQUEST); // empty path - ctx.AddQuoterResource("/", Ydb::StatusIds::BAD_REQUEST); // empty path - ctx.AddQuoterResource("//", Ydb::StatusIds::BAD_REQUEST); // empty path - { - NKikimrKesus::TStreamingQuoterResource res; - res.SetResourceId(42); - res.SetResourcePath("/CorrentPath"); - res.MutableHierarhicalDRRResourceConfig(); - ctx.AddQuoterResource(res, Ydb::StatusIds::BAD_REQUEST); // resource id specified - } - { - NKikimrKesus::TStreamingQuoterResource res; - res.SetResourcePath("/CorrentPath"); - ctx.AddQuoterResource(res, Ydb::StatusIds::BAD_REQUEST); // DRR config is not specified - } - - ctx.AddQuoterResource("RootQuoter", 42.0); // OK - ctx.AddQuoterResource("/RootQuoter/", 42.0, Ydb::StatusIds::ALREADY_EXISTS); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(100500); - ctx.AddQuoterResource("/RootQuoter/", cfg, Ydb::StatusIds::BAD_REQUEST); // different settings - - ctx.AddQuoterResource("RootQuoter/SubQuoter"); - ctx.AddQuoterResource("/RootQuoter//OtherSubQuoter/", cfg); - - auto checkResources = [&]() { - const auto resources = ctx.DescribeQuoterResources({}, {}, true); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 3); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "RootQuoter"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "RootQuoter/OtherSubQuoter"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(2).GetResourcePath(), "RootQuoter/SubQuoter"); - - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 1); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(1).GetResourceId(), 3); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(2).GetResourceId(), 2); - - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 100500, 0.001); - }; - checkResources(); - ctx.RebootTablet(); - checkResources(); - - ctx.AddQuoterResource("/RootQuoter", 42.0, Ydb::StatusIds::ALREADY_EXISTS); // properly loaded - - // check that resource id was persisted - ctx.AddQuoterResource("OtherRootQuoter", 100.0); - const auto resources = ctx.DescribeQuoterResources({}, {"OtherRootQuoter"}, false); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 4); - } - - Y_UNIT_TEST(TestQuoterHDRRParametersValidation) { - TTestContext ctx; - ctx.Setup(); - - { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(-100); // negative - ctx.AddQuoterResource("/Res", cfg, Ydb::StatusIds::BAD_REQUEST); - } - - { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - // no max units per second in root resource - ctx.AddQuoterResource("/ResWithoutMaxUnitsPerSecond", cfg, Ydb::StatusIds::BAD_REQUEST); - - cfg.SetMaxUnitsPerSecond(1); - ctx.AddQuoterResource("/ResWithMaxUnitsPerSecond", cfg, Ydb::StatusIds::SUCCESS); - - cfg.ClearMaxUnitsPerSecond(); // child can have no MaxUnitsPerSecond - ctx.AddQuoterResource("/ResWithMaxUnitsPerSecond/ChildWithoutMaxUnitsPerSecond", cfg, Ydb::StatusIds::SUCCESS); - } - } - - Y_UNIT_TEST(TestQuoterResourceModification) { - TTestContext ctx; - ctx.Setup(); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg1; - cfg1.SetMaxUnitsPerSecond(100); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg2; - cfg2.SetMaxUnitsPerSecond(5); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg3; - cfg3.SetMaxUnitsPerSecond(42); - - ctx.AddQuoterResource("/Root", cfg1); // id=1 - ctx.AddQuoterResource("/Root/Q", cfg1); // id=2 - - auto testBeforeModification = [&]() { - const auto resources = ctx.DescribeQuoterResources({}, {}, true); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root/Q"); - - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(0).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 100, 0.001); - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 100, 0.001); - }; - testBeforeModification(); - ctx.RebootTablet(); - testBeforeModification(); - - auto testAfterModification = [&]() { - const auto resources = ctx.DescribeQuoterResources({}, {}, true); - UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); - UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root/Q"); - - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(0).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 5, 0.001); - UNIT_ASSERT_DOUBLES_EQUAL( - resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), - 5, 0.001); - }; - - ctx.UpdateQuoterResource(1, cfg2); - ctx.UpdateQuoterResource("/Root/Q", cfg2); - testAfterModification(); - ctx.RebootTablet(); - testAfterModification(); - - // test validation - { - NKikimrKesus::TStreamingQuoterResource req; - *req.MutableHierarhicalDRRResourceConfig() = cfg3; - - ctx.UpdateQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // no resource path and id - testAfterModification(); - ctx.RebootTablet(); - testAfterModification(); - + + Y_UNIT_TEST(TestQuoterResourceDescribe) { + TTestContext ctx; + ctx.Setup(); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg1; + cfg1.SetMaxUnitsPerSecond(100500); + cfg1.SetMaxBurstSizeCoefficient(1.5); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg2; + cfg2.SetMaxUnitsPerSecond(10); + + ctx.AddQuoterResource("/Root", cfg1); // id=1 + ctx.AddQuoterResource("/Root/Folder", cfg1); // id=2 + ctx.AddQuoterResource("/Root/Q1", cfg2); // id=3 + ctx.AddQuoterResource("/Root/Folder/Q1", cfg2); // id=4 + ctx.AddQuoterResource("/Root/Folder/Q2", cfg2); // id=5 + ctx.AddQuoterResource("/Root/Folder/Q3", cfg2); // id=6 + + ctx.AddQuoterResource("/Root2", cfg1); // id=7 + ctx.AddQuoterResource("/Root2/Q", cfg2); // id=8 + + auto testDescribe = [&]() { + ctx.VerifyDescribeQuoterResources({100}, {}, false, Ydb::StatusIds::NOT_FOUND); // no such id + ctx.VerifyDescribeQuoterResources({}, {"Nonexistent/Path"}, false, Ydb::StatusIds::NOT_FOUND); // no such path + ctx.VerifyDescribeQuoterResources({}, {"/Root", ""}, false, Ydb::StatusIds::NOT_FOUND); // empty path + ctx.VerifyDescribeQuoterResources({1, 1}, {}, false); // two times is OK + ctx.VerifyDescribeQuoterResources({}, {"/Root2/Q", "/Root2/Q"}, false); // two times is OK + + { + // All + const auto resources = ctx.DescribeQuoterResources({}, {}, true); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 8); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root/Folder"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(2).GetResourcePath(), "Root/Folder/Q1"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(3).GetResourcePath(), "Root/Folder/Q2"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(4).GetResourcePath(), "Root/Folder/Q3"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(5).GetResourcePath(), "Root/Q1"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(6).GetResourcePath(), "Root2"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(7).GetResourcePath(), "Root2/Q"); + + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 1); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(1).GetResourceId(), 2); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(2).GetResourceId(), 4); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(3).GetResourceId(), 5); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(4).GetResourceId(), 6); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(5).GetResourceId(), 3); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(6).GetResourceId(), 7); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(7).GetResourceId(), 8); + + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 100500, 0.001); + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(6).GetHierarhicalDRRResourceConfig().GetMaxBurstSizeCoefficient(), + 1.5, 0.001); + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(7).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 10, 0.001); + } + { + // All upper level + const auto resources = ctx.DescribeQuoterResources({}, {}, false); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root2"); + + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 1); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(1).GetResourceId(), 7); + } + { + // By id + const auto resources = ctx.DescribeQuoterResources({3, 2}, {}, true); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 5); + + const auto resources2 = ctx.DescribeQuoterResources({3, 2}, {}, false); + UNIT_ASSERT_VALUES_EQUAL(resources2.ResourcesSize(), 2); + } + { + // By path + const auto resources = ctx.DescribeQuoterResources({}, {"Root2/"}, true); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); + + const auto resources2 = ctx.DescribeQuoterResources({}, {"Root2/"}, false); + UNIT_ASSERT_VALUES_EQUAL(resources2.ResourcesSize(), 1); + } + }; + testDescribe(); + ctx.RebootTablet(); + testDescribe(); + } + + Y_UNIT_TEST(TestQuoterResourceCreation) { + TTestContext ctx; + ctx.Setup(); + + // validation + ctx.AddQuoterResource("/a/b", Ydb::StatusIds::BAD_REQUEST); // no parent resource + ctx.AddQuoterResource(":-)", Ydb::StatusIds::BAD_REQUEST); // invalid resource name + ctx.AddQuoterResource("", Ydb::StatusIds::BAD_REQUEST); // empty path + ctx.AddQuoterResource("/", Ydb::StatusIds::BAD_REQUEST); // empty path + ctx.AddQuoterResource("//", Ydb::StatusIds::BAD_REQUEST); // empty path + { + NKikimrKesus::TStreamingQuoterResource res; + res.SetResourceId(42); + res.SetResourcePath("/CorrentPath"); + res.MutableHierarhicalDRRResourceConfig(); + ctx.AddQuoterResource(res, Ydb::StatusIds::BAD_REQUEST); // resource id specified + } + { + NKikimrKesus::TStreamingQuoterResource res; + res.SetResourcePath("/CorrentPath"); + ctx.AddQuoterResource(res, Ydb::StatusIds::BAD_REQUEST); // DRR config is not specified + } + + ctx.AddQuoterResource("RootQuoter", 42.0); // OK + ctx.AddQuoterResource("/RootQuoter/", 42.0, Ydb::StatusIds::ALREADY_EXISTS); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(100500); + ctx.AddQuoterResource("/RootQuoter/", cfg, Ydb::StatusIds::BAD_REQUEST); // different settings + + ctx.AddQuoterResource("RootQuoter/SubQuoter"); + ctx.AddQuoterResource("/RootQuoter//OtherSubQuoter/", cfg); + + auto checkResources = [&]() { + const auto resources = ctx.DescribeQuoterResources({}, {}, true); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 3); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "RootQuoter"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "RootQuoter/OtherSubQuoter"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(2).GetResourcePath(), "RootQuoter/SubQuoter"); + + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 1); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(1).GetResourceId(), 3); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(2).GetResourceId(), 2); + + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 100500, 0.001); + }; + checkResources(); + ctx.RebootTablet(); + checkResources(); + + ctx.AddQuoterResource("/RootQuoter", 42.0, Ydb::StatusIds::ALREADY_EXISTS); // properly loaded + + // check that resource id was persisted + ctx.AddQuoterResource("OtherRootQuoter", 100.0); + const auto resources = ctx.DescribeQuoterResources({}, {"OtherRootQuoter"}, false); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(resources.GetResources(0).GetResourceId(), 4); + } + + Y_UNIT_TEST(TestQuoterHDRRParametersValidation) { + TTestContext ctx; + ctx.Setup(); + + { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(-100); // negative + ctx.AddQuoterResource("/Res", cfg, Ydb::StatusIds::BAD_REQUEST); + } + + { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + // no max units per second in root resource + ctx.AddQuoterResource("/ResWithoutMaxUnitsPerSecond", cfg, Ydb::StatusIds::BAD_REQUEST); + + cfg.SetMaxUnitsPerSecond(1); + ctx.AddQuoterResource("/ResWithMaxUnitsPerSecond", cfg, Ydb::StatusIds::SUCCESS); + + cfg.ClearMaxUnitsPerSecond(); // child can have no MaxUnitsPerSecond + ctx.AddQuoterResource("/ResWithMaxUnitsPerSecond/ChildWithoutMaxUnitsPerSecond", cfg, Ydb::StatusIds::SUCCESS); + } + } + + Y_UNIT_TEST(TestQuoterResourceModification) { + TTestContext ctx; + ctx.Setup(); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg1; + cfg1.SetMaxUnitsPerSecond(100); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg2; + cfg2.SetMaxUnitsPerSecond(5); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg3; + cfg3.SetMaxUnitsPerSecond(42); + + ctx.AddQuoterResource("/Root", cfg1); // id=1 + ctx.AddQuoterResource("/Root/Q", cfg1); // id=2 + + auto testBeforeModification = [&]() { + const auto resources = ctx.DescribeQuoterResources({}, {}, true); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root/Q"); + + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(0).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 100, 0.001); + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 100, 0.001); + }; + testBeforeModification(); + ctx.RebootTablet(); + testBeforeModification(); + + auto testAfterModification = [&]() { + const auto resources = ctx.DescribeQuoterResources({}, {}, true); + UNIT_ASSERT_VALUES_EQUAL(resources.ResourcesSize(), 2); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(0).GetResourcePath(), "Root"); + UNIT_ASSERT_STRINGS_EQUAL(resources.GetResources(1).GetResourcePath(), "Root/Q"); + + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(0).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 5, 0.001); + UNIT_ASSERT_DOUBLES_EQUAL( + resources.GetResources(1).GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), + 5, 0.001); + }; + + ctx.UpdateQuoterResource(1, cfg2); + ctx.UpdateQuoterResource("/Root/Q", cfg2); + testAfterModification(); + ctx.RebootTablet(); + testAfterModification(); + + // test validation + { + NKikimrKesus::TStreamingQuoterResource req; + *req.MutableHierarhicalDRRResourceConfig() = cfg3; + + ctx.UpdateQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // no resource path and id + testAfterModification(); + ctx.RebootTablet(); + testAfterModification(); + req.SetResourcePath("?Invalid/Path?"); - ctx.UpdateQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // invalid path - testAfterModification(); - ctx.RebootTablet(); - testAfterModification(); - - req.SetResourcePath("/Root/Q"); - req.ClearAlgorithmSpecificConfig(); - ctx.UpdateQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // no config - testAfterModification(); - ctx.RebootTablet(); - testAfterModification(); - - req.SetResourcePath("/Root/P"); - *req.MutableHierarhicalDRRResourceConfig() = cfg3; - ctx.UpdateQuoterResource(req, Ydb::StatusIds::NOT_FOUND); // no such resource - testAfterModification(); - ctx.RebootTablet(); - testAfterModification(); - - req.ClearResourcePath(); - req.SetResourceId(42); - ctx.UpdateQuoterResource(req, Ydb::StatusIds::NOT_FOUND); // no such resource - testAfterModification(); - ctx.RebootTablet(); - testAfterModification(); - } - } - - Y_UNIT_TEST(TestQuoterResourceDeletion) { - TTestContext ctx; - ctx.Setup(); - - ctx.AddQuoterResource("/Root", 1.0); // id=1 - ctx.AddQuoterResource("/Root/Q"); // id=2 - ctx.AddQuoterResource("/Root/Folder"); // id=3 - ctx.AddQuoterResource("/Root/Folder/Q1"); // id=4 - - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); - - // validation - { - NKikimrKesus::TEvDeleteQuoterResource req; - ctx.DeleteQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // no resource - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); - + ctx.UpdateQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // invalid path + testAfterModification(); + ctx.RebootTablet(); + testAfterModification(); + + req.SetResourcePath("/Root/Q"); + req.ClearAlgorithmSpecificConfig(); + ctx.UpdateQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // no config + testAfterModification(); + ctx.RebootTablet(); + testAfterModification(); + + req.SetResourcePath("/Root/P"); + *req.MutableHierarhicalDRRResourceConfig() = cfg3; + ctx.UpdateQuoterResource(req, Ydb::StatusIds::NOT_FOUND); // no such resource + testAfterModification(); + ctx.RebootTablet(); + testAfterModification(); + + req.ClearResourcePath(); + req.SetResourceId(42); + ctx.UpdateQuoterResource(req, Ydb::StatusIds::NOT_FOUND); // no such resource + testAfterModification(); + ctx.RebootTablet(); + testAfterModification(); + } + } + + Y_UNIT_TEST(TestQuoterResourceDeletion) { + TTestContext ctx; + ctx.Setup(); + + ctx.AddQuoterResource("/Root", 1.0); // id=1 + ctx.AddQuoterResource("/Root/Q"); // id=2 + ctx.AddQuoterResource("/Root/Folder"); // id=3 + ctx.AddQuoterResource("/Root/Folder/Q1"); // id=4 + + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); + + // validation + { + NKikimrKesus::TEvDeleteQuoterResource req; + ctx.DeleteQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // no resource + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); + req.SetResourcePath("?Invalid/Path?"); - ctx.DeleteQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // invalid path - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); - - req.SetResourcePath("/Root/Folder/NonexistingRes"); - ctx.DeleteQuoterResource(req, Ydb::StatusIds::NOT_FOUND); - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); - - req.ClearResourcePath(); - req.SetResourceId(100); - ctx.DeleteQuoterResource(req, Ydb::StatusIds::NOT_FOUND); - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); - - ctx.DeleteQuoterResource(3, Ydb::StatusIds::BAD_REQUEST); // Folder is not empty - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); - } - - ctx.DeleteQuoterResource("/Root/Folder/Q1"); // By name - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 3); - ctx.RebootTablet(); - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 3); - - ctx.DeleteQuoterResource(3); // By id - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 2); - ctx.RebootTablet(); - UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 2); - } - - Y_UNIT_TEST(TestQuoterSubscribeOnResource) { - TTestContext ctx; - ctx.Setup(); - - ctx.AddQuoterResource("/Q1", 10.0); // id=1 - ctx.AddQuoterResource("/Q2", 10.0); // id=2 - - auto edge = ctx.Runtime->AllocateEdgeActor(); - auto client = ctx.Runtime->AllocateEdgeActor(); - auto ans = ctx.SubscribeOnResource(client, edge, "Q1", false); - UNIT_ASSERT_VALUES_EQUAL(ans.GetResults(0).GetResourceId(), 1); - - auto client2 = ctx.Runtime->AllocateEdgeActor(); - ans = ctx.SubscribeOnResources( - client2, - edge, - { - TTestContext::TResourceConsumingInfo("/Q1", false), - TTestContext::TResourceConsumingInfo("/Q2", false), - TTestContext::TResourceConsumingInfo("/Q3", false, 0.0, Ydb::StatusIds::NOT_FOUND), - } - ); - UNIT_ASSERT_VALUES_EQUAL(ans.GetResults(0).GetResourceId(), 1); - UNIT_ASSERT_VALUES_EQUAL(ans.GetResults(1).GetResourceId(), 2); - } - - Y_UNIT_TEST(TestAllocatesResources) { - TTestContext ctx; - ctx.Setup(); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(100.0); - ctx.AddQuoterResource("/Root", cfg); - ctx.AddQuoterResource("/Root/Res"); // With inherited settings. - - auto edge = ctx.Runtime->AllocateEdgeActor(); - auto client = ctx.Runtime->AllocateEdgeActor(); - const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult = ctx.SubscribeOnResource(client, edge, "/Root/Res", false, 0); - UNIT_ASSERT(subscribeResult.GetResults(0).HasEffectiveProps()); - ctx.UpdateConsumptionState(client, edge, subscribeResult.GetResults(0).GetResourceId(), true, 50.0); - - double allocated = 0.0; - do { - auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); - UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); - const auto& info = result->Record.GetResourcesInfo(0); - UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); - allocated += info.GetAmount(); - } while (allocated < 49.99); - } - + ctx.DeleteQuoterResource(req, Ydb::StatusIds::BAD_REQUEST); // invalid path + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); + + req.SetResourcePath("/Root/Folder/NonexistingRes"); + ctx.DeleteQuoterResource(req, Ydb::StatusIds::NOT_FOUND); + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); + + req.ClearResourcePath(); + req.SetResourceId(100); + ctx.DeleteQuoterResource(req, Ydb::StatusIds::NOT_FOUND); + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); + + ctx.DeleteQuoterResource(3, Ydb::StatusIds::BAD_REQUEST); // Folder is not empty + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 4); + } + + ctx.DeleteQuoterResource("/Root/Folder/Q1"); // By name + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 3); + ctx.RebootTablet(); + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 3); + + ctx.DeleteQuoterResource(3); // By id + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 2); + ctx.RebootTablet(); + UNIT_ASSERT_VALUES_EQUAL(ctx.DescribeQuoterResources({}, {}, true).ResourcesSize(), 2); + } + + Y_UNIT_TEST(TestQuoterSubscribeOnResource) { + TTestContext ctx; + ctx.Setup(); + + ctx.AddQuoterResource("/Q1", 10.0); // id=1 + ctx.AddQuoterResource("/Q2", 10.0); // id=2 + + auto edge = ctx.Runtime->AllocateEdgeActor(); + auto client = ctx.Runtime->AllocateEdgeActor(); + auto ans = ctx.SubscribeOnResource(client, edge, "Q1", false); + UNIT_ASSERT_VALUES_EQUAL(ans.GetResults(0).GetResourceId(), 1); + + auto client2 = ctx.Runtime->AllocateEdgeActor(); + ans = ctx.SubscribeOnResources( + client2, + edge, + { + TTestContext::TResourceConsumingInfo("/Q1", false), + TTestContext::TResourceConsumingInfo("/Q2", false), + TTestContext::TResourceConsumingInfo("/Q3", false, 0.0, Ydb::StatusIds::NOT_FOUND), + } + ); + UNIT_ASSERT_VALUES_EQUAL(ans.GetResults(0).GetResourceId(), 1); + UNIT_ASSERT_VALUES_EQUAL(ans.GetResults(1).GetResourceId(), 2); + } + + Y_UNIT_TEST(TestAllocatesResources) { + TTestContext ctx; + ctx.Setup(); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(100.0); + ctx.AddQuoterResource("/Root", cfg); + ctx.AddQuoterResource("/Root/Res"); // With inherited settings. + + auto edge = ctx.Runtime->AllocateEdgeActor(); + auto client = ctx.Runtime->AllocateEdgeActor(); + const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult = ctx.SubscribeOnResource(client, edge, "/Root/Res", false, 0); + UNIT_ASSERT(subscribeResult.GetResults(0).HasEffectiveProps()); + ctx.UpdateConsumptionState(client, edge, subscribeResult.GetResults(0).GetResourceId(), true, 50.0); + + double allocated = 0.0; + do { + auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); + UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); + const auto& info = result->Record.GetResourcesInfo(0); + UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); + allocated += info.GetAmount(); + } while (allocated < 49.99); + } + Y_UNIT_TEST(TestQuoterAccountResourcesOnDemand) { TTestContext ctx; ctx.Setup(); @@ -2084,156 +2084,156 @@ Y_UNIT_TEST_SUITE(TKesusTest) { } } - Y_UNIT_TEST(TestPassesUpdatedPropsToSession) { - TTestContext ctx; - ctx.Setup(); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(100.0); - ctx.AddQuoterResource("/Root", cfg); - ctx.AddQuoterResource("/Root/Res"); - - auto edge = ctx.Runtime->AllocateEdgeActor(); - auto client = ctx.Runtime->AllocateEdgeActor(); - const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult = ctx.SubscribeOnResource(client, edge, "/Root/Res", false, 0); - UNIT_ASSERT(subscribeResult.GetResults(0).HasEffectiveProps()); - - // update - cfg.SetMaxUnitsPerSecond(150.0); - ctx.UpdateQuoterResource("/Root", cfg); - - ctx.UpdateConsumptionState(client, edge, subscribeResult.GetResults(0).GetResourceId(), true, 50.0); - - auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); - UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); - const auto& info = result->Record.GetResourcesInfo(0); - UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); - UNIT_ASSERT_DOUBLES_EQUAL(info.GetEffectiveProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 150.0, 0.001); - } - - Y_UNIT_TEST(TestGetQuoterResourceCounters) { - TTestContext ctx; - ctx.Setup(); - //ctx.Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NLog::PRI_TRACE); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(1000.0); - ctx.AddQuoterResource("/Root1", cfg); - ctx.AddQuoterResource("/Root1/Res"); - - ctx.AddQuoterResource("/Root2", cfg); - ctx.AddQuoterResource("/Root2/Res"); - ctx.AddQuoterResource("/Root2/Res/Subres"); - - auto edge = ctx.Runtime->AllocateEdgeActor(); - auto client = ctx.Runtime->AllocateEdgeActor(); - const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult = ctx.SubscribeOnResource(client, edge, "/Root1/Res", true, 300); - UNIT_ASSERT(subscribeResult.GetResults(0).HasEffectiveProps()); - + Y_UNIT_TEST(TestPassesUpdatedPropsToSession) { + TTestContext ctx; + ctx.Setup(); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(100.0); + ctx.AddQuoterResource("/Root", cfg); + ctx.AddQuoterResource("/Root/Res"); + + auto edge = ctx.Runtime->AllocateEdgeActor(); + auto client = ctx.Runtime->AllocateEdgeActor(); + const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult = ctx.SubscribeOnResource(client, edge, "/Root/Res", false, 0); + UNIT_ASSERT(subscribeResult.GetResults(0).HasEffectiveProps()); + + // update + cfg.SetMaxUnitsPerSecond(150.0); + ctx.UpdateQuoterResource("/Root", cfg); + + ctx.UpdateConsumptionState(client, edge, subscribeResult.GetResults(0).GetResourceId(), true, 50.0); + + auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); + UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); + const auto& info = result->Record.GetResourcesInfo(0); + UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); + UNIT_ASSERT_DOUBLES_EQUAL(info.GetEffectiveProps().GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond(), 150.0, 0.001); + } + + Y_UNIT_TEST(TestGetQuoterResourceCounters) { + TTestContext ctx; + ctx.Setup(); + //ctx.Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NLog::PRI_TRACE); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(1000.0); + ctx.AddQuoterResource("/Root1", cfg); + ctx.AddQuoterResource("/Root1/Res"); + + ctx.AddQuoterResource("/Root2", cfg); + ctx.AddQuoterResource("/Root2/Res"); + ctx.AddQuoterResource("/Root2/Res/Subres"); + + auto edge = ctx.Runtime->AllocateEdgeActor(); + auto client = ctx.Runtime->AllocateEdgeActor(); + const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult = ctx.SubscribeOnResource(client, edge, "/Root1/Res", true, 300); + UNIT_ASSERT(subscribeResult.GetResults(0).HasEffectiveProps()); + auto WaitAllocated = [&ctx](const TActorId edge, double amount) -> double { - double allocated = 0.0; - do { - auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); - UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); - const auto& info = result->Record.GetResourcesInfo(0); - UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); - allocated += info.GetAmount(); - } while (allocated < amount - 0.01); - return allocated; - }; - - // Wait allocation - const double allocated1First = WaitAllocated(edge, 300); - - auto CheckCountersValues = [&ctx](ui64 v1, ui64 v2) { - auto counters = ctx.GetQuoterResourceCounters(); - UNIT_ASSERT_VALUES_EQUAL_C(counters.ResourceCountersSize(), 5, counters); - UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(0).GetResourcePath(), "Root1"); - UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(1).GetResourcePath(), "Root1/Res"); - UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(2).GetResourcePath(), "Root2"); - UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(3).GetResourcePath(), "Root2/Res"); - UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(4).GetResourcePath(), "Root2/Res/Subres"); - UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(0).GetAllocated(), v1, counters); - UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(1).GetAllocated(), v1, counters); - UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(2).GetAllocated(), v2, counters); - UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(3).GetAllocated(), v2, counters); - UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(4).GetAllocated(), v2, counters); - }; - - CheckCountersValues(static_cast<ui64>(allocated1First), 0); - - auto edge2 = ctx.Runtime->AllocateEdgeActor(); - auto client2 = ctx.Runtime->AllocateEdgeActor(); - const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult2First = ctx.SubscribeOnResource(client2, edge2, "/Root2/Res/Subres", true, 200); - UNIT_ASSERT(subscribeResult2First.GetResults(0).HasEffectiveProps()); - - const double allocated2First = WaitAllocated(edge2, 200); - CheckCountersValues(static_cast<ui64>(allocated1First), static_cast<ui64>(allocated2First)); - - const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult1Second = ctx.SubscribeOnResource(client, edge, "/Root1/Res", true, 20); - UNIT_ASSERT(subscribeResult1Second.GetResults(0).HasEffectiveProps()); - - const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult2Second = ctx.SubscribeOnResource(client2, edge2, "/Root2/Res/Subres", true, 50); - UNIT_ASSERT(subscribeResult2Second.GetResults(0).HasEffectiveProps()); - - const double allocated1Second = WaitAllocated(edge, 20); - const double allocated2Second = WaitAllocated(edge2, 50); - - CheckCountersValues(static_cast<ui64>(allocated1First + allocated1Second), static_cast<ui64>(allocated2First + allocated2Second)); - } - - Y_UNIT_TEST(TestStopResourceAllocationWhenPipeDestroyed) { - TTestContext ctx; - ctx.Setup(); - - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(100.0); - ctx.AddQuoterResource("Root", cfg); - + double allocated = 0.0; + do { + auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); + UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); + const auto& info = result->Record.GetResourcesInfo(0); + UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); + allocated += info.GetAmount(); + } while (allocated < amount - 0.01); + return allocated; + }; + + // Wait allocation + const double allocated1First = WaitAllocated(edge, 300); + + auto CheckCountersValues = [&ctx](ui64 v1, ui64 v2) { + auto counters = ctx.GetQuoterResourceCounters(); + UNIT_ASSERT_VALUES_EQUAL_C(counters.ResourceCountersSize(), 5, counters); + UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(0).GetResourcePath(), "Root1"); + UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(1).GetResourcePath(), "Root1/Res"); + UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(2).GetResourcePath(), "Root2"); + UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(3).GetResourcePath(), "Root2/Res"); + UNIT_ASSERT_VALUES_EQUAL(counters.GetResourceCounters(4).GetResourcePath(), "Root2/Res/Subres"); + UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(0).GetAllocated(), v1, counters); + UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(1).GetAllocated(), v1, counters); + UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(2).GetAllocated(), v2, counters); + UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(3).GetAllocated(), v2, counters); + UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(4).GetAllocated(), v2, counters); + }; + + CheckCountersValues(static_cast<ui64>(allocated1First), 0); + + auto edge2 = ctx.Runtime->AllocateEdgeActor(); + auto client2 = ctx.Runtime->AllocateEdgeActor(); + const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult2First = ctx.SubscribeOnResource(client2, edge2, "/Root2/Res/Subres", true, 200); + UNIT_ASSERT(subscribeResult2First.GetResults(0).HasEffectiveProps()); + + const double allocated2First = WaitAllocated(edge2, 200); + CheckCountersValues(static_cast<ui64>(allocated1First), static_cast<ui64>(allocated2First)); + + const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult1Second = ctx.SubscribeOnResource(client, edge, "/Root1/Res", true, 20); + UNIT_ASSERT(subscribeResult1Second.GetResults(0).HasEffectiveProps()); + + const NKikimrKesus::TEvSubscribeOnResourcesResult subscribeResult2Second = ctx.SubscribeOnResource(client2, edge2, "/Root2/Res/Subres", true, 50); + UNIT_ASSERT(subscribeResult2Second.GetResults(0).HasEffectiveProps()); + + const double allocated1Second = WaitAllocated(edge, 20); + const double allocated2Second = WaitAllocated(edge2, 50); + + CheckCountersValues(static_cast<ui64>(allocated1First + allocated1Second), static_cast<ui64>(allocated2First + allocated2Second)); + } + + Y_UNIT_TEST(TestStopResourceAllocationWhenPipeDestroyed) { + TTestContext ctx; + ctx.Setup(); + + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(100.0); + ctx.AddQuoterResource("Root", cfg); + auto CreateSession = [&]() -> std::pair<TActorId, TActorId> { TActorId edge = ctx.Runtime->AllocateEdgeActor(); const TActorId sessionPipe = ctx.Runtime->ConnectToPipe(ctx.TabletId, edge, 0, GetPipeConfigWithRetries()); - auto req = MakeHolder<TEvKesus::TEvSubscribeOnResources>(); + auto req = MakeHolder<TEvKesus::TEvSubscribeOnResources>(); ActorIdToProto(edge, req->Record.MutableActorID()); - auto* reqRes = req->Record.AddResources(); - reqRes->SetResourcePath("Root"); - reqRes->SetStartConsuming(true); - reqRes->SetInitialAmount(std::numeric_limits<double>::infinity()); - ctx.Runtime->SendToPipe( - ctx.TabletId, - edge, - req.Release(), - 0, - GetPipeConfigWithRetries(), - sessionPipe, - 0); - return std::make_pair(edge, sessionPipe); - }; - + auto* reqRes = req->Record.AddResources(); + reqRes->SetResourcePath("Root"); + reqRes->SetStartConsuming(true); + reqRes->SetInitialAmount(std::numeric_limits<double>::infinity()); + ctx.Runtime->SendToPipe( + ctx.TabletId, + edge, + req.Release(), + 0, + GetPipeConfigWithRetries(), + sessionPipe, + 0); + return std::make_pair(edge, sessionPipe); + }; + const std::pair<TActorId, TActorId> edgeAndSession1 = CreateSession(); const std::pair<TActorId, TActorId> edgeAndSession2 = CreateSession(); - + auto WaitAllocation = [&](TActorId edge, double expectedAmount) { - size_t attempts = 30; - do { - auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); - UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); - const auto& info = result->Record.GetResourcesInfo(0); - UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); - if (std::abs(info.GetAmount() - expectedAmount) <= 0.01) { - break; // OK - } - } while (--attempts); - UNIT_ASSERT(attempts); - }; - - WaitAllocation(edgeAndSession1.first, 5); - WaitAllocation(edgeAndSession2.first, 5); - - // Kill pipe and then session must be deactivated on kesus. - ctx.Runtime->Send(new IEventHandle(edgeAndSession2.second, edgeAndSession2.first, new TEvents::TEvPoisonPill())); - WaitAllocation(edgeAndSession1.first, 10); // Now first session is the only active session and it receives all resource. - } + size_t attempts = 30; + do { + auto result = ctx.ExpectEdgeEvent<TEvKesus::TEvResourcesAllocated>(edge); + UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesInfoSize(), 1); + const auto& info = result->Record.GetResourcesInfo(0); + UNIT_ASSERT_VALUES_EQUAL(info.GetStateNotification().GetStatus(), Ydb::StatusIds::SUCCESS); + if (std::abs(info.GetAmount() - expectedAmount) <= 0.01) { + break; // OK + } + } while (--attempts); + UNIT_ASSERT(attempts); + }; + + WaitAllocation(edgeAndSession1.first, 5); + WaitAllocation(edgeAndSession2.first, 5); + + // Kill pipe and then session must be deactivated on kesus. + ctx.Runtime->Send(new IEventHandle(edgeAndSession2.second, edgeAndSession2.first, new TEvents::TEvPoisonPill())); + WaitAllocation(edgeAndSession1.first, 10); // Now first session is the only active session and it receives all resource. + } } } diff --git a/ydb/core/kesus/tablet/tx_config_get.cpp b/ydb/core/kesus/tablet/tx_config_get.cpp index 06f81a1abe0..8d1c087e78f 100644 --- a/ydb/core/kesus/tablet/tx_config_get.cpp +++ b/ydb/core/kesus/tablet/tx_config_get.cpp @@ -32,7 +32,7 @@ struct TKesusTablet::TTxConfigGet : public TTxBase { config->set_session_grace_period_millis(Self->SessionGracePeriod.MilliSeconds()); config->set_read_consistency_mode(Self->ReadConsistencyMode); config->set_attach_consistency_mode(Self->AttachConsistencyMode); - config->set_rate_limiter_counters_mode(Self->RateLimiterCountersMode); + config->set_rate_limiter_counters_mode(Self->RateLimiterCountersMode); Reply->Record.SetVersion(Self->ConfigVersion); Reply->Record.SetPath(Self->KesusPath); return true; diff --git a/ydb/core/kesus/tablet/tx_config_set.cpp b/ydb/core/kesus/tablet/tx_config_set.cpp index 93eae8a9edf..a934ef03209 100644 --- a/ydb/core/kesus/tablet/tx_config_set.cpp +++ b/ydb/core/kesus/tablet/tx_config_set.cpp @@ -2,7 +2,7 @@ #include <ydb/core/base/appdata.h> #include <ydb/core/base/counters.h> - + namespace NKikimr { namespace NKesus { @@ -46,10 +46,10 @@ struct TKesusTablet::TTxConfigSet : public TTxBase { NIceDb::TNiceDb db(txc.DB); if (newPath) { - if (Self->KesusPath != newPath) { - Self->QuoterResources.SetQuoterCounters(GetServiceCounters(AppData()->Counters, "quoter_service")->GetSubgroup("quoter", newPath)); - Self->QuoterResources.SetQuoterPath(newPath); - } + if (Self->KesusPath != newPath) { + Self->QuoterResources.SetQuoterCounters(GetServiceCounters(AppData()->Counters, "quoter_service")->GetSubgroup("quoter", newPath)); + Self->QuoterResources.SetQuoterPath(newPath); + } Self->KesusPath = newPath; Self->PersistSysParam(db, Schema::SysParam_KesusPath, Self->KesusPath); } @@ -74,14 +74,14 @@ struct TKesusTablet::TTxConfigSet : public TTxBase { Self->PersistSysParam(db, Schema::SysParam_AttachConsistencyMode, ToString(static_cast<ui64>(Self->AttachConsistencyMode))); } - if (Record.GetConfig().rate_limiter_counters_mode()) { - Self->RateLimiterCountersMode = Record.GetConfig().rate_limiter_counters_mode(); - Self->PersistSysParam(db, Schema::SysParam_RateLimiterCountersMode, ToString(static_cast<ui64>(Self->RateLimiterCountersMode))); - - // Apply mode to resource tree - Self->QuoterResources.EnableDetailedCountersMode(Self->RateLimiterCountersMode == Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED); - } - + if (Record.GetConfig().rate_limiter_counters_mode()) { + Self->RateLimiterCountersMode = Record.GetConfig().rate_limiter_counters_mode(); + Self->PersistSysParam(db, Schema::SysParam_RateLimiterCountersMode, ToString(static_cast<ui64>(Self->RateLimiterCountersMode))); + + // Apply mode to resource tree + Self->QuoterResources.EnableDetailedCountersMode(Self->RateLimiterCountersMode == Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED); + } + Self->ConfigVersion = newVersion; Self->PersistSysParam(db, Schema::SysParam_ConfigVersion, ToString(Self->ConfigVersion)); diff --git a/ydb/core/kesus/tablet/tx_init.cpp b/ydb/core/kesus/tablet/tx_init.cpp index 1f3f96ff9f3..a04c1d9fb95 100644 --- a/ydb/core/kesus/tablet/tx_init.cpp +++ b/ydb/core/kesus/tablet/tx_init.cpp @@ -1,11 +1,11 @@ #include "tablet_impl.h" -#include "quoter_resource_tree.h" +#include "quoter_resource_tree.h" #include "schema.h" #include <ydb/core/base/counters.h> #include <ydb/core/base/appdata.h> - + #include <util/string/cast.h> namespace NKikimr { @@ -39,12 +39,12 @@ struct TKesusTablet::TTxInit : public TTxBase { auto sessionsRowset = db.Table<Schema::Sessions>().Range().Select(); auto semaphoresRowset = db.Table<Schema::Semaphores>().Range().Select(); auto sessionSemaphoresRowset = db.Table<Schema::SessionSemaphores>().Range().Select(); - auto quoterResourcesRowset = db.Table<Schema::QuoterResources>().Range().Select(); + auto quoterResourcesRowset = db.Table<Schema::QuoterResources>().Range().Select(); if (!sysParamsRowset.IsReady() || !sessionsRowset.IsReady() || !semaphoresRowset.IsReady() || - !sessionSemaphoresRowset.IsReady() || - !quoterResourcesRowset.IsReady()) + !sessionSemaphoresRowset.IsReady() || + !quoterResourcesRowset.IsReady()) { return false; } @@ -103,14 +103,14 @@ struct TKesusTablet::TTxInit : public TTxBase { case Schema::SysParam_StrictMarkerCounter: Self->StrictMarkerCounter = FromString<ui64>(value); break; - case Schema::SysParam_NextQuoterResourceId: - Self->NextQuoterResourceId = FromString<ui64>(value); - break; - case Schema::SysParam_RateLimiterCountersMode: - if (auto mode = FromString<ui64>(value)) { - Self->RateLimiterCountersMode = static_cast<Ydb::Coordination::RateLimiterCountersMode>(mode); - } - break; + case Schema::SysParam_NextQuoterResourceId: + Self->NextQuoterResourceId = FromString<ui64>(value); + break; + case Schema::SysParam_RateLimiterCountersMode: + if (auto mode = FromString<ui64>(value)) { + Self->RateLimiterCountersMode = static_cast<Ydb::Coordination::RateLimiterCountersMode>(mode); + } + break; default: Y_FAIL("Unexpected SysParam value %" PRIu64, id); } @@ -207,33 +207,33 @@ struct TKesusTablet::TTxInit : public TTxBase { } } - // Read QuoterResources - if (Self->KesusPath) { - Self->QuoterResources.SetQuoterCounters(GetServiceCounters(AppData()->Counters, "quoter_service")->GetSubgroup("quoter", Self->KesusPath)); - if (Self->RateLimiterCountersMode == Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED) { - Self->QuoterResources.EnableDetailedCountersMode(); - } - Self->QuoterResources.SetQuoterPath(Self->KesusPath); - } - - { + // Read QuoterResources + if (Self->KesusPath) { + Self->QuoterResources.SetQuoterCounters(GetServiceCounters(AppData()->Counters, "quoter_service")->GetSubgroup("quoter", Self->KesusPath)); + if (Self->RateLimiterCountersMode == Ydb::Coordination::RATE_LIMITER_COUNTERS_MODE_DETAILED) { + Self->QuoterResources.EnableDetailedCountersMode(); + } + Self->QuoterResources.SetQuoterPath(Self->KesusPath); + } + + { Self->QuoterResources.SetupBilling(ctx.SelfID, MakeMeteringSink()); - auto quoterResourcesRowset = db.Table<Schema::QuoterResources>().Range().Select(); - if (!quoterResourcesRowset.IsReady()) - return false; - while (!quoterResourcesRowset.EndOfSet()) { - const ui64 id = quoterResourcesRowset.GetValue<Schema::QuoterResources::Id>(); - const ui64 parentId = quoterResourcesRowset.GetValue<Schema::QuoterResources::ParentId>(); - NKikimrKesus::TStreamingQuoterResource props = quoterResourcesRowset.GetValue<Schema::QuoterResources::Props>(); - props.SetResourcePath(CanonizeQuoterResourcePath(props.GetResourcePath())); - Self->QuoterResources.LoadResource(id, parentId, props); - Self->TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Add(1); - if (!quoterResourcesRowset.Next()) - return false; - } - Self->QuoterResources.ConstructTrees(); - } - + auto quoterResourcesRowset = db.Table<Schema::QuoterResources>().Range().Select(); + if (!quoterResourcesRowset.IsReady()) + return false; + while (!quoterResourcesRowset.EndOfSet()) { + const ui64 id = quoterResourcesRowset.GetValue<Schema::QuoterResources::Id>(); + const ui64 parentId = quoterResourcesRowset.GetValue<Schema::QuoterResources::ParentId>(); + NKikimrKesus::TStreamingQuoterResource props = quoterResourcesRowset.GetValue<Schema::QuoterResources::Props>(); + props.SetResourcePath(CanonizeQuoterResourcePath(props.GetResourcePath())); + Self->QuoterResources.LoadResource(id, parentId, props); + Self->TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Add(1); + if (!quoterResourcesRowset.Next()) + return false; + } + Self->QuoterResources.ConstructTrees(); + } + Self->PersistSysParam(db, Schema::SysParam_LastLeaderActor, ctx.SelfID.ToString()); return true; } diff --git a/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp b/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp index fa5755eb6ee..e4192b83564 100644 --- a/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp +++ b/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp @@ -1,128 +1,128 @@ -#include "tablet_impl.h" - +#include "tablet_impl.h" + #include <library/cpp/protobuf/util/is_equal.h> - -namespace NKikimr { -namespace NKesus { - -struct TKesusTablet::TTxQuoterResourceAdd : public TTxBase { + +namespace NKikimr { +namespace NKesus { + +struct TKesusTablet::TTxQuoterResourceAdd : public TTxBase { const TActorId Sender; - const ui64 Cookie; - NKikimrKesus::TEvAddQuoterResource Record; - - THolder<TEvKesus::TEvAddQuoterResourceResult> Reply; - + const ui64 Cookie; + NKikimrKesus::TEvAddQuoterResource Record; + + THolder<TEvKesus::TEvAddQuoterResourceResult> Reply; + TTxQuoterResourceAdd(TSelf* self, const TActorId& sender, ui64 cookie, const NKikimrKesus::TEvAddQuoterResource& record) - : TTxBase(self) - , Sender(sender) - , Cookie(cookie) - , Record(record) - { - } - - TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_ADD; } - - void ReplyOk(ui64 quoterResourceId) { - NKikimrKesus::TEvAddQuoterResourceResult result; - result.SetResourceId(quoterResourceId); - result.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); - Reply = MakeHolder<TEvKesus::TEvAddQuoterResourceResult>(result); - } - + : TTxBase(self) + , Sender(sender) + , Cookie(cookie) + , Record(record) + { + } + + TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_ADD; } + + void ReplyOk(ui64 quoterResourceId) { + NKikimrKesus::TEvAddQuoterResourceResult result; + result.SetResourceId(quoterResourceId); + result.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); + Reply = MakeHolder<TEvKesus::TEvAddQuoterResourceResult>(result); + } + bool Execute(TTransactionContext& txc, const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceAdd::Execute (sender=" << Sender - << ", cookie=" << Cookie << ", path=\"" << Record.GetResource().GetResourcePath() - << "\", config=" << Record.GetResource().GetHierarhicalDRRResourceConfig() << ")"); - - const auto& resourceDesc = Record.GetResource(); - if (const TQuoterResourceTree* resource = Self->QuoterResources.FindPath(resourceDesc.GetResourcePath())) { - if (NProtoBuf::IsEqual(resource->GetProps().GetHierarhicalDRRResourceConfig(), resourceDesc.GetHierarhicalDRRResourceConfig())) { - THolder<TEvKesus::TEvAddQuoterResourceResult> reply = - MakeHolder<TEvKesus::TEvAddQuoterResourceResult>( - Ydb::StatusIds::ALREADY_EXISTS, - "Resource already exists and has same settings."); - reply->Record.SetResourceId(resource->GetResourceId()); - Reply = std::move(reply); - } else { - Reply = MakeHolder<TEvKesus::TEvAddQuoterResourceResult>( - Ydb::StatusIds::BAD_REQUEST, - "Resource already exists and has different settings."); - } - return true; - } - - Y_VERIFY(Self->NextQuoterResourceId > 0); - - TString errorMessage; - TQuoterResourceTree* resource = Self->QuoterResources.AddResource(Self->NextQuoterResourceId, Record.GetResource(), errorMessage); - if (!resource) { - Reply = MakeHolder<TEvKesus::TEvAddQuoterResourceResult>( - Ydb::StatusIds::BAD_REQUEST, - errorMessage); - return true; - } - ++Self->NextQuoterResourceId; - - NIceDb::TNiceDb db(txc.DB); - Self->PersistSysParam(db, Schema::SysParam_NextQuoterResourceId, ToString(Self->NextQuoterResourceId)); - db.Table<Schema::QuoterResources>().Key(resource->GetResourceId()).Update( - NIceDb::TUpdate<Schema::QuoterResources::ParentId>(resource->GetParentId()), - NIceDb::TUpdate<Schema::QuoterResources::Props>(resource->GetProps())); - - Self->TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Add(1); - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] Created new quoter resource " - << resource->GetResourceId() << " \"" << resource->GetProps().GetResourcePath() << "\""); - - ReplyOk(resource->GetResourceId()); - return true; - } - - void Complete(const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceAdd::Complete (sender=" << Sender - << ", cookie=" << Cookie << ")"); - - Y_VERIFY(Reply); - ctx.Send(Sender, std::move(Reply), 0, Cookie); - } -}; - -void TKesusTablet::Handle(TEvKesus::TEvAddQuoterResource::TPtr& ev) { - const auto& record = ev->Get()->Record; - TabletCounters->Cumulative()[COUNTER_REQS_QUOTER_RESOURCE_ADD].Increment(1); - - const auto& resourceDesc = record.GetResource(); - if (!TQuoterResources::IsResourcePathValid(resourceDesc.GetResourcePath())) { - Send(ev->Sender, - new TEvKesus::TEvAddQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "Invalid resource path."), - 0, ev->Cookie); - return; - } - - if (!resourceDesc.HasHierarhicalDRRResourceConfig()) { - Send(ev->Sender, - new TEvKesus::TEvAddQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "Not supported resource type. Today's only supported resource type is hierarchical DRR resource."), - 0, ev->Cookie); - return; - } - - if (resourceDesc.GetResourceId()) { - Send(ev->Sender, - new TEvKesus::TEvAddQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "ResourceId specified."), - 0, ev->Cookie); - return; - } - - Execute(new TTxQuoterResourceAdd(this, ev->Sender, ev->Cookie, record), TActivationContext::AsActorContext()); -} - -} -} + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceAdd::Execute (sender=" << Sender + << ", cookie=" << Cookie << ", path=\"" << Record.GetResource().GetResourcePath() + << "\", config=" << Record.GetResource().GetHierarhicalDRRResourceConfig() << ")"); + + const auto& resourceDesc = Record.GetResource(); + if (const TQuoterResourceTree* resource = Self->QuoterResources.FindPath(resourceDesc.GetResourcePath())) { + if (NProtoBuf::IsEqual(resource->GetProps().GetHierarhicalDRRResourceConfig(), resourceDesc.GetHierarhicalDRRResourceConfig())) { + THolder<TEvKesus::TEvAddQuoterResourceResult> reply = + MakeHolder<TEvKesus::TEvAddQuoterResourceResult>( + Ydb::StatusIds::ALREADY_EXISTS, + "Resource already exists and has same settings."); + reply->Record.SetResourceId(resource->GetResourceId()); + Reply = std::move(reply); + } else { + Reply = MakeHolder<TEvKesus::TEvAddQuoterResourceResult>( + Ydb::StatusIds::BAD_REQUEST, + "Resource already exists and has different settings."); + } + return true; + } + + Y_VERIFY(Self->NextQuoterResourceId > 0); + + TString errorMessage; + TQuoterResourceTree* resource = Self->QuoterResources.AddResource(Self->NextQuoterResourceId, Record.GetResource(), errorMessage); + if (!resource) { + Reply = MakeHolder<TEvKesus::TEvAddQuoterResourceResult>( + Ydb::StatusIds::BAD_REQUEST, + errorMessage); + return true; + } + ++Self->NextQuoterResourceId; + + NIceDb::TNiceDb db(txc.DB); + Self->PersistSysParam(db, Schema::SysParam_NextQuoterResourceId, ToString(Self->NextQuoterResourceId)); + db.Table<Schema::QuoterResources>().Key(resource->GetResourceId()).Update( + NIceDb::TUpdate<Schema::QuoterResources::ParentId>(resource->GetParentId()), + NIceDb::TUpdate<Schema::QuoterResources::Props>(resource->GetProps())); + + Self->TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Add(1); + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] Created new quoter resource " + << resource->GetResourceId() << " \"" << resource->GetProps().GetResourcePath() << "\""); + + ReplyOk(resource->GetResourceId()); + return true; + } + + void Complete(const TActorContext& ctx) override { + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceAdd::Complete (sender=" << Sender + << ", cookie=" << Cookie << ")"); + + Y_VERIFY(Reply); + ctx.Send(Sender, std::move(Reply), 0, Cookie); + } +}; + +void TKesusTablet::Handle(TEvKesus::TEvAddQuoterResource::TPtr& ev) { + const auto& record = ev->Get()->Record; + TabletCounters->Cumulative()[COUNTER_REQS_QUOTER_RESOURCE_ADD].Increment(1); + + const auto& resourceDesc = record.GetResource(); + if (!TQuoterResources::IsResourcePathValid(resourceDesc.GetResourcePath())) { + Send(ev->Sender, + new TEvKesus::TEvAddQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "Invalid resource path."), + 0, ev->Cookie); + return; + } + + if (!resourceDesc.HasHierarhicalDRRResourceConfig()) { + Send(ev->Sender, + new TEvKesus::TEvAddQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "Not supported resource type. Today's only supported resource type is hierarchical DRR resource."), + 0, ev->Cookie); + return; + } + + if (resourceDesc.GetResourceId()) { + Send(ev->Sender, + new TEvKesus::TEvAddQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "ResourceId specified."), + 0, ev->Cookie); + return; + } + + Execute(new TTxQuoterResourceAdd(this, ev->Sender, ev->Cookie, record), TActivationContext::AsActorContext()); +} + +} +} diff --git a/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp b/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp index 33048ba7051..bd27a0e04ea 100644 --- a/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp +++ b/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp @@ -1,103 +1,103 @@ -#include "tablet_impl.h" - -namespace NKikimr { -namespace NKesus { - -struct TKesusTablet::TTxQuoterResourceDelete : public TTxBase { +#include "tablet_impl.h" + +namespace NKikimr { +namespace NKesus { + +struct TKesusTablet::TTxQuoterResourceDelete : public TTxBase { const TActorId Sender; - const ui64 Cookie; - NKikimrKesus::TEvDeleteQuoterResource Record; - - THolder<TEvKesus::TEvDeleteQuoterResourceResult> Reply; - + const ui64 Cookie; + NKikimrKesus::TEvDeleteQuoterResource Record; + + THolder<TEvKesus::TEvDeleteQuoterResourceResult> Reply; + TTxQuoterResourceDelete(TSelf* self, const TActorId& sender, ui64 cookie, const NKikimrKesus::TEvDeleteQuoterResource& record) - : TTxBase(self) - , Sender(sender) - , Cookie(cookie) - , Record(record) - { - } - - TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_DELETE; } - - void ReplyOk() { - NKikimrKesus::TEvDeleteQuoterResourceResult result; - result.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); - Reply = MakeHolder<TEvKesus::TEvDeleteQuoterResourceResult>(result); - } - + : TTxBase(self) + , Sender(sender) + , Cookie(cookie) + , Record(record) + { + } + + TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_DELETE; } + + void ReplyOk() { + NKikimrKesus::TEvDeleteQuoterResourceResult result; + result.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); + Reply = MakeHolder<TEvKesus::TEvDeleteQuoterResourceResult>(result); + } + bool Execute(TTransactionContext& txc, const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceDelete::Execute (sender=" << Sender - << ", cookie=" << Cookie << ", id=" << Record.GetResourceId() << ", path=\"" << Record.GetResourcePath() << "\")"); - - TQuoterResourceTree* resource = Record.GetResourceId() ? - Self->QuoterResources.FindId(Record.GetResourceId()) : - Self->QuoterResources.FindPath(Record.GetResourcePath()); - if (!resource) { - Reply = MakeHolder<TEvKesus::TEvDeleteQuoterResourceResult>( - Ydb::StatusIds::NOT_FOUND, - "Resource doesn't exist."); - return true; - } - - const ui64 resourceId = resource->GetResourceId(); - const TString resourcePath = resource->GetPath(); - - TString errorMessage; - if (!Self->QuoterResources.DeleteResource(resource, errorMessage)) { - Reply = MakeHolder<TEvKesus::TEvDeleteQuoterResourceResult>( - Ydb::StatusIds::BAD_REQUEST, - errorMessage); - return true; - } - - NIceDb::TNiceDb db(txc.DB); - db.Table<Schema::QuoterResources>().Key(resourceId).Delete(); - - Self->TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Add(-1); - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] Deleted quoter resource " - << resourceId << " \"" << resourcePath << "\""); - - ReplyOk(); - return true; - } - - void Complete(const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceDelete::Complete (sender=" << Sender - << ", cookie=" << Cookie << ")"); - - Y_VERIFY(Reply); - ctx.Send(Sender, std::move(Reply), 0, Cookie); - } -}; - -void TKesusTablet::Handle(TEvKesus::TEvDeleteQuoterResource::TPtr& ev) { - const auto& record = ev->Get()->Record; - TabletCounters->Cumulative()[COUNTER_REQS_QUOTER_RESOURCE_DELETE].Increment(1); - - if (record.GetResourcePath().empty() && !record.GetResourceId()) { - Send(ev->Sender, - new TEvKesus::TEvDeleteQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "You should specify resource path or resource id."), - 0, ev->Cookie); - return; - } - - if (!record.GetResourcePath().empty() && !TQuoterResources::IsResourcePathValid(record.GetResourcePath())) { - Send(ev->Sender, - new TEvKesus::TEvDeleteQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "Invalid resource path."), - 0, ev->Cookie); - return; - } - - Execute(new TTxQuoterResourceDelete(this, ev->Sender, ev->Cookie, record), TActivationContext::AsActorContext()); -} - -} -} + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceDelete::Execute (sender=" << Sender + << ", cookie=" << Cookie << ", id=" << Record.GetResourceId() << ", path=\"" << Record.GetResourcePath() << "\")"); + + TQuoterResourceTree* resource = Record.GetResourceId() ? + Self->QuoterResources.FindId(Record.GetResourceId()) : + Self->QuoterResources.FindPath(Record.GetResourcePath()); + if (!resource) { + Reply = MakeHolder<TEvKesus::TEvDeleteQuoterResourceResult>( + Ydb::StatusIds::NOT_FOUND, + "Resource doesn't exist."); + return true; + } + + const ui64 resourceId = resource->GetResourceId(); + const TString resourcePath = resource->GetPath(); + + TString errorMessage; + if (!Self->QuoterResources.DeleteResource(resource, errorMessage)) { + Reply = MakeHolder<TEvKesus::TEvDeleteQuoterResourceResult>( + Ydb::StatusIds::BAD_REQUEST, + errorMessage); + return true; + } + + NIceDb::TNiceDb db(txc.DB); + db.Table<Schema::QuoterResources>().Key(resourceId).Delete(); + + Self->TabletCounters->Simple()[COUNTER_QUOTER_RESOURCE_COUNT].Add(-1); + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] Deleted quoter resource " + << resourceId << " \"" << resourcePath << "\""); + + ReplyOk(); + return true; + } + + void Complete(const TActorContext& ctx) override { + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceDelete::Complete (sender=" << Sender + << ", cookie=" << Cookie << ")"); + + Y_VERIFY(Reply); + ctx.Send(Sender, std::move(Reply), 0, Cookie); + } +}; + +void TKesusTablet::Handle(TEvKesus::TEvDeleteQuoterResource::TPtr& ev) { + const auto& record = ev->Get()->Record; + TabletCounters->Cumulative()[COUNTER_REQS_QUOTER_RESOURCE_DELETE].Increment(1); + + if (record.GetResourcePath().empty() && !record.GetResourceId()) { + Send(ev->Sender, + new TEvKesus::TEvDeleteQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "You should specify resource path or resource id."), + 0, ev->Cookie); + return; + } + + if (!record.GetResourcePath().empty() && !TQuoterResources::IsResourcePathValid(record.GetResourcePath())) { + Send(ev->Sender, + new TEvKesus::TEvDeleteQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "Invalid resource path."), + 0, ev->Cookie); + return; + } + + Execute(new TTxQuoterResourceDelete(this, ev->Sender, ev->Cookie, record), TActivationContext::AsActorContext()); +} + +} +} diff --git a/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp b/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp index 3e78d3c5102..877cddc5245 100644 --- a/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp +++ b/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp @@ -1,127 +1,127 @@ -#include "tablet_impl.h" - -#include <util/string/builder.h> - -namespace NKikimr { -namespace NKesus { - -struct TKesusTablet::TTxQuoterResourceDescribe : public TTxBase { +#include "tablet_impl.h" + +#include <util/string/builder.h> + +namespace NKikimr { +namespace NKesus { + +struct TKesusTablet::TTxQuoterResourceDescribe : public TTxBase { const TActorId Sender; - const ui64 Cookie; - const NKikimrKesus::TEvDescribeQuoterResources Record; - - THolder<TEvKesus::TEvDescribeQuoterResourcesResult> Reply; - + const ui64 Cookie; + const NKikimrKesus::TEvDescribeQuoterResources Record; + + THolder<TEvKesus::TEvDescribeQuoterResourcesResult> Reply; + TTxQuoterResourceDescribe(TSelf* self, const TActorId& sender, ui64 cookie, const NKikimrKesus::TEvDescribeQuoterResources& record) - : TTxBase(self) - , Sender(sender) - , Cookie(cookie) - , Record(record) - , Reply(MakeHolder<TEvKesus::TEvDescribeQuoterResourcesResult>()) - { - Reply->Record.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); - } - - TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_DESCRIBE; } - - template <class TRepeatedField> - static TString FormatResourcesToDescribe(const TRepeatedField& list) { - TStringBuilder ret; - ret << "["; - for (int i = 0; i < list.size(); ++i) { - if (i > 0) { - ret << ", "; - } - ret << list.Get(i); - } - ret << "]"; - return std::move(ret); - } - - TString FormatIdsToDescribe() const { - return FormatResourcesToDescribe(Record.GetResourceIds()); - } - - TString FormatPathsToDescribe() const { - return FormatResourcesToDescribe(Record.GetResourcePaths()); - } - - void AddToResult(const TQuoterResourceTree* resource) { - Y_VERIFY(resource); - *Reply->Record.AddResources() = resource->GetProps(); - } - - bool NeedToDescribeAll() const { - return Record.ResourcePathsSize() == 0 && Record.ResourceIdsSize() == 0; - } - - void WalkResource(const TQuoterResourceTree* resource) { - AddToResult(resource); - for (const TQuoterResourceTree* child : resource->GetChildren()) { - WalkResource(child); - } - } - - void ProcessResource(const TQuoterResourceTree* resource) { - if (Record.GetRecursive()) { - WalkResource(resource); - } else { - AddToResult(resource); - } - } - - bool Execute(TTransactionContext& txc, const TActorContext& ctx) override { - Y_UNUSED(txc); - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceDescribe::Execute (sender=" << Sender - << ", cookie=" << Cookie << ", ids=" << FormatIdsToDescribe() - << ", paths=" << FormatPathsToDescribe() << ", recursive=" << Record.GetRecursive() << ")"); - - if (NeedToDescribeAll()) { - for (auto&& [path, resource] : Self->QuoterResources.GetAllResources()) { - if (Record.GetRecursive() || resource->GetParentId() == 0) { - AddToResult(resource); - } - } - } else { - for (ui64 id : Record.GetResourceIds()) { - const TQuoterResourceTree* resource = Self->QuoterResources.FindId(id); - if (!resource) { - Reply = MakeHolder<TEvKesus::TEvDescribeQuoterResourcesResult>( - Ydb::StatusIds::NOT_FOUND, - TStringBuilder() << "Resource with id " << id << " doesn't exist."); - return true; - } - ProcessResource(resource); - } - for (const TString& path : Record.GetResourcePaths()) { - const TQuoterResourceTree* resource = Self->QuoterResources.FindPath(path); - if (!resource) { - Reply = MakeHolder<TEvKesus::TEvDescribeQuoterResourcesResult>( - Ydb::StatusIds::NOT_FOUND, - TStringBuilder() << "Resource with path \"" << path << "\" doesn't exist."); - return true; - } - ProcessResource(resource); - } - } - - return true; - } - - void Complete(const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceDescribe::Complete (sender=" << Sender - << ", cookie=" << Cookie << ")"); - - Y_VERIFY(Reply); - ctx.Send(Sender, std::move(Reply), 0, Cookie); - } -}; - -void TKesusTablet::Handle(TEvKesus::TEvDescribeQuoterResources::TPtr& ev) { - Execute(new TTxQuoterResourceDescribe(this, ev->Sender, ev->Cookie, ev->Get()->Record), TActivationContext::AsActorContext()); -} - -} -} + : TTxBase(self) + , Sender(sender) + , Cookie(cookie) + , Record(record) + , Reply(MakeHolder<TEvKesus::TEvDescribeQuoterResourcesResult>()) + { + Reply->Record.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); + } + + TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_DESCRIBE; } + + template <class TRepeatedField> + static TString FormatResourcesToDescribe(const TRepeatedField& list) { + TStringBuilder ret; + ret << "["; + for (int i = 0; i < list.size(); ++i) { + if (i > 0) { + ret << ", "; + } + ret << list.Get(i); + } + ret << "]"; + return std::move(ret); + } + + TString FormatIdsToDescribe() const { + return FormatResourcesToDescribe(Record.GetResourceIds()); + } + + TString FormatPathsToDescribe() const { + return FormatResourcesToDescribe(Record.GetResourcePaths()); + } + + void AddToResult(const TQuoterResourceTree* resource) { + Y_VERIFY(resource); + *Reply->Record.AddResources() = resource->GetProps(); + } + + bool NeedToDescribeAll() const { + return Record.ResourcePathsSize() == 0 && Record.ResourceIdsSize() == 0; + } + + void WalkResource(const TQuoterResourceTree* resource) { + AddToResult(resource); + for (const TQuoterResourceTree* child : resource->GetChildren()) { + WalkResource(child); + } + } + + void ProcessResource(const TQuoterResourceTree* resource) { + if (Record.GetRecursive()) { + WalkResource(resource); + } else { + AddToResult(resource); + } + } + + bool Execute(TTransactionContext& txc, const TActorContext& ctx) override { + Y_UNUSED(txc); + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceDescribe::Execute (sender=" << Sender + << ", cookie=" << Cookie << ", ids=" << FormatIdsToDescribe() + << ", paths=" << FormatPathsToDescribe() << ", recursive=" << Record.GetRecursive() << ")"); + + if (NeedToDescribeAll()) { + for (auto&& [path, resource] : Self->QuoterResources.GetAllResources()) { + if (Record.GetRecursive() || resource->GetParentId() == 0) { + AddToResult(resource); + } + } + } else { + for (ui64 id : Record.GetResourceIds()) { + const TQuoterResourceTree* resource = Self->QuoterResources.FindId(id); + if (!resource) { + Reply = MakeHolder<TEvKesus::TEvDescribeQuoterResourcesResult>( + Ydb::StatusIds::NOT_FOUND, + TStringBuilder() << "Resource with id " << id << " doesn't exist."); + return true; + } + ProcessResource(resource); + } + for (const TString& path : Record.GetResourcePaths()) { + const TQuoterResourceTree* resource = Self->QuoterResources.FindPath(path); + if (!resource) { + Reply = MakeHolder<TEvKesus::TEvDescribeQuoterResourcesResult>( + Ydb::StatusIds::NOT_FOUND, + TStringBuilder() << "Resource with path \"" << path << "\" doesn't exist."); + return true; + } + ProcessResource(resource); + } + } + + return true; + } + + void Complete(const TActorContext& ctx) override { + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceDescribe::Complete (sender=" << Sender + << ", cookie=" << Cookie << ")"); + + Y_VERIFY(Reply); + ctx.Send(Sender, std::move(Reply), 0, Cookie); + } +}; + +void TKesusTablet::Handle(TEvKesus::TEvDescribeQuoterResources::TPtr& ev) { + Execute(new TTxQuoterResourceDescribe(this, ev->Sender, ev->Cookie, ev->Get()->Record), TActivationContext::AsActorContext()); +} + +} +} diff --git a/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp b/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp index 71896120613..d4418ffa6db 100644 --- a/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp +++ b/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp @@ -1,118 +1,118 @@ -#include "tablet_impl.h" - -namespace NKikimr { -namespace NKesus { - -struct TKesusTablet::TTxQuoterResourceUpdate : public TTxBase { +#include "tablet_impl.h" + +namespace NKikimr { +namespace NKesus { + +struct TKesusTablet::TTxQuoterResourceUpdate : public TTxBase { const TActorId Sender; - const ui64 Cookie; - NKikimrKesus::TEvUpdateQuoterResource Record; - - THolder<TEvKesus::TEvUpdateQuoterResourceResult> Reply; - + const ui64 Cookie; + NKikimrKesus::TEvUpdateQuoterResource Record; + + THolder<TEvKesus::TEvUpdateQuoterResourceResult> Reply; + TTxQuoterResourceUpdate(TSelf* self, const TActorId& sender, ui64 cookie, const NKikimrKesus::TEvUpdateQuoterResource& record) - : TTxBase(self) - , Sender(sender) - , Cookie(cookie) - , Record(record) - { - } - - TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_UPDATE; } - - void ReplyOk(ui64 quoterResourceId) { - NKikimrKesus::TEvUpdateQuoterResourceResult result; - result.SetResourceId(quoterResourceId); - result.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); - Reply = MakeHolder<TEvKesus::TEvUpdateQuoterResourceResult>(result); - } - + : TTxBase(self) + , Sender(sender) + , Cookie(cookie) + , Record(record) + { + } + + TTxType GetTxType() const override { return TXTYPE_QUOTER_RESOURCE_UPDATE; } + + void ReplyOk(ui64 quoterResourceId) { + NKikimrKesus::TEvUpdateQuoterResourceResult result; + result.SetResourceId(quoterResourceId); + result.MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); + Reply = MakeHolder<TEvKesus::TEvUpdateQuoterResourceResult>(result); + } + bool Execute(TTransactionContext& txc, const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceUpdate::Execute (sender=" << Sender - << ", cookie=" << Cookie << ", id=" << Record.GetResource().GetResourceId() << ", path=\"" << Record.GetResource().GetResourcePath() - << "\", config=" << Record.GetResource().GetHierarhicalDRRResourceConfig() << ")"); - - const auto& resourceDesc = Record.GetResource(); - TQuoterResourceTree* resource = resourceDesc.GetResourceId() ? - Self->QuoterResources.FindId(resourceDesc.GetResourceId()) : - Self->QuoterResources.FindPath(resourceDesc.GetResourcePath()); - if (!resource) { - Reply = MakeHolder<TEvKesus::TEvUpdateQuoterResourceResult>( - Ydb::StatusIds::NOT_FOUND, - "No resource found."); - return true; - } - TString errorMessage; - if (!resource->Update(resourceDesc, errorMessage)) { - Reply = MakeHolder<TEvKesus::TEvUpdateQuoterResourceResult>( - Ydb::StatusIds::BAD_REQUEST, - errorMessage); - return true; - } - Self->QuoterResources.OnUpdateResourceProps(resource); - - NIceDb::TNiceDb db(txc.DB); - db.Table<Schema::QuoterResources>().Key(resource->GetResourceId()).Update( - NIceDb::TUpdate<Schema::QuoterResources::Props>(resource->GetProps())); - - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] Updated quoter resource " - << resource->GetResourceId() << " \"" << resource->GetPath() << "\""); - - ReplyOk(resource->GetResourceId()); - - if (Self->QuoterTickProcessorQueue.Empty()) { // Ticks are not scheduled, so update all sessions with new props now. - Self->QuoterResourceSessionsAccumulator.SendAll(ctx, Self->TabletID()); - } - - return true; - } - - void Complete(const TActorContext& ctx) override { - LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxQuoterResourceUpdate::Complete (sender=" << Sender - << ", cookie=" << Cookie << ")"); - - Y_VERIFY(Reply); - ctx.Send(Sender, std::move(Reply), 0, Cookie); - } -}; - -void TKesusTablet::Handle(TEvKesus::TEvUpdateQuoterResource::TPtr& ev) { - const auto& record = ev->Get()->Record; - TabletCounters->Cumulative()[COUNTER_REQS_QUOTER_RESOURCE_UPDATE].Increment(1); - - const auto& resourceDesc = record.GetResource(); - if (resourceDesc.GetResourcePath().empty() && !resourceDesc.GetResourceId()) { - Send(ev->Sender, - new TEvKesus::TEvUpdateQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "You should specify resource path or resource id."), - 0, ev->Cookie); - return; - } - - if (!resourceDesc.GetResourcePath().empty() && !TQuoterResources::IsResourcePathValid(resourceDesc.GetResourcePath())) { - Send(ev->Sender, - new TEvKesus::TEvUpdateQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "Invalid resource path."), - 0, ev->Cookie); - return; - } - - if (!resourceDesc.HasHierarhicalDRRResourceConfig()) { - Send(ev->Sender, - new TEvKesus::TEvUpdateQuoterResourceResult( - Ydb::StatusIds::BAD_REQUEST, - "No resource config."), - 0, ev->Cookie); - return; - } - - Execute(new TTxQuoterResourceUpdate(this, ev->Sender, ev->Cookie, record), TActivationContext::AsActorContext()); -} - -} -} + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceUpdate::Execute (sender=" << Sender + << ", cookie=" << Cookie << ", id=" << Record.GetResource().GetResourceId() << ", path=\"" << Record.GetResource().GetResourcePath() + << "\", config=" << Record.GetResource().GetHierarhicalDRRResourceConfig() << ")"); + + const auto& resourceDesc = Record.GetResource(); + TQuoterResourceTree* resource = resourceDesc.GetResourceId() ? + Self->QuoterResources.FindId(resourceDesc.GetResourceId()) : + Self->QuoterResources.FindPath(resourceDesc.GetResourcePath()); + if (!resource) { + Reply = MakeHolder<TEvKesus::TEvUpdateQuoterResourceResult>( + Ydb::StatusIds::NOT_FOUND, + "No resource found."); + return true; + } + TString errorMessage; + if (!resource->Update(resourceDesc, errorMessage)) { + Reply = MakeHolder<TEvKesus::TEvUpdateQuoterResourceResult>( + Ydb::StatusIds::BAD_REQUEST, + errorMessage); + return true; + } + Self->QuoterResources.OnUpdateResourceProps(resource); + + NIceDb::TNiceDb db(txc.DB); + db.Table<Schema::QuoterResources>().Key(resource->GetResourceId()).Update( + NIceDb::TUpdate<Schema::QuoterResources::Props>(resource->GetProps())); + + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] Updated quoter resource " + << resource->GetResourceId() << " \"" << resource->GetPath() << "\""); + + ReplyOk(resource->GetResourceId()); + + if (Self->QuoterTickProcessorQueue.Empty()) { // Ticks are not scheduled, so update all sessions with new props now. + Self->QuoterResourceSessionsAccumulator.SendAll(ctx, Self->TabletID()); + } + + return true; + } + + void Complete(const TActorContext& ctx) override { + LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, + "[" << Self->TabletID() << "] TTxQuoterResourceUpdate::Complete (sender=" << Sender + << ", cookie=" << Cookie << ")"); + + Y_VERIFY(Reply); + ctx.Send(Sender, std::move(Reply), 0, Cookie); + } +}; + +void TKesusTablet::Handle(TEvKesus::TEvUpdateQuoterResource::TPtr& ev) { + const auto& record = ev->Get()->Record; + TabletCounters->Cumulative()[COUNTER_REQS_QUOTER_RESOURCE_UPDATE].Increment(1); + + const auto& resourceDesc = record.GetResource(); + if (resourceDesc.GetResourcePath().empty() && !resourceDesc.GetResourceId()) { + Send(ev->Sender, + new TEvKesus::TEvUpdateQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "You should specify resource path or resource id."), + 0, ev->Cookie); + return; + } + + if (!resourceDesc.GetResourcePath().empty() && !TQuoterResources::IsResourcePathValid(resourceDesc.GetResourcePath())) { + Send(ev->Sender, + new TEvKesus::TEvUpdateQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "Invalid resource path."), + 0, ev->Cookie); + return; + } + + if (!resourceDesc.HasHierarhicalDRRResourceConfig()) { + Send(ev->Sender, + new TEvKesus::TEvUpdateQuoterResourceResult( + Ydb::StatusIds::BAD_REQUEST, + "No resource config."), + 0, ev->Cookie); + return; + } + + Execute(new TTxQuoterResourceUpdate(this, ev->Sender, ev->Cookie, record), TActivationContext::AsActorContext()); +} + +} +} diff --git a/ydb/core/kesus/tablet/tx_sessions_describe.cpp b/ydb/core/kesus/tablet/tx_sessions_describe.cpp index 7f2ea8a0eb6..f77fa2c9d4a 100644 --- a/ydb/core/kesus/tablet/tx_sessions_describe.cpp +++ b/ydb/core/kesus/tablet/tx_sessions_describe.cpp @@ -44,7 +44,7 @@ struct TKesusTablet::TTxSessionsDescribe : public TTxBase { void Complete(const TActorContext& ctx) override { LOG_DEBUG_S(ctx, NKikimrServices::KESUS_TABLET, - "[" << Self->TabletID() << "] TTxSessionsDescribe::Complete (sender=" << Sender + "[" << Self->TabletID() << "] TTxSessionsDescribe::Complete (sender=" << Sender << ", cookie=" << Cookie << ")"); Y_VERIFY(Reply); diff --git a/ydb/core/kesus/tablet/ut/ya.make b/ydb/core/kesus/tablet/ut/ya.make index d9a9218c2b7..a4736e7b95f 100644 --- a/ydb/core/kesus/tablet/ut/ya.make +++ b/ydb/core/kesus/tablet/ut/ya.make @@ -22,7 +22,7 @@ YQL_LAST_ABI_VERSION() SRCS( tablet_ut.cpp - quoter_resource_tree_ut.cpp + quoter_resource_tree_ut.cpp ut_helpers.cpp ) diff --git a/ydb/core/kesus/tablet/ut_helpers.cpp b/ydb/core/kesus/tablet/ut_helpers.cpp index f559800a769..f4b4900d7f4 100644 --- a/ydb/core/kesus/tablet/ut_helpers.cpp +++ b/ydb/core/kesus/tablet/ut_helpers.cpp @@ -3,9 +3,9 @@ #include <ydb/core/metering/metering.h> #include <library/cpp/actors/core/event_pb.h> - -#include <algorithm> - + +#include <algorithm> + namespace NKikimr { namespace NKesus { @@ -70,9 +70,9 @@ TTestContext::TTestContext() , TabletId(MakeTabletID(0, 0, 1)) {} -void TTestContext::Setup(ui32 nodeCount, bool useRealThreads) { +void TTestContext::Setup(ui32 nodeCount, bool useRealThreads) { ProxyClients.clear(); - Runtime.Reset(new TTestBasicRuntime(nodeCount, useRealThreads)); + Runtime.Reset(new TTestBasicRuntime(nodeCount, useRealThreads)); SetupLogging(); SetupTabletServices(); @@ -96,7 +96,7 @@ void TTestContext::Finalize() { } void TTestContext::SetupLogging() { - Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NLog::PRI_TRACE); + Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NLog::PRI_TRACE); } void TTestContext::SetupTabletServices() { @@ -162,28 +162,28 @@ void TTestContext::SendFromProxy(const TActorId& proxy, ui64 generation, IEventB cookie); } -NKikimrKesus::TEvGetConfigResult TTestContext::GetConfig() { - const ui64 cookie = RandomNumber<ui64>(); - const auto edge = Runtime->AllocateEdgeActor(); - SendFromEdge(edge, new TEvKesus::TEvGetConfig(), cookie); - - auto result = ExpectEdgeEvent<TEvKesus::TEvGetConfigResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetConfig().path(), result->Record.GetPath(), "Record: " << result->Record); - return result->Record; -} - -NKikimrKesus::TEvSetConfigResult TTestContext::SetConfig(ui64 txId, const Ydb::Coordination::Config& config, ui64 version, Ydb::StatusIds::StatusCode status) { - const ui64 cookie = RandomNumber<ui64>(); - const auto edge = Runtime->AllocateEdgeActor(); - SendFromEdge(edge, new TEvKesus::TEvSetConfig(txId, config, version), cookie); - - auto result = ExpectEdgeEvent<TEvKesus::TEvSetConfigResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetError().GetStatus(), status, "Record: " << result->Record); - UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetTxId(), txId, "Record: " << result->Record); - UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetTabletId(), TabletId, "Record: " << result->Record); - return result->Record; -} - +NKikimrKesus::TEvGetConfigResult TTestContext::GetConfig() { + const ui64 cookie = RandomNumber<ui64>(); + const auto edge = Runtime->AllocateEdgeActor(); + SendFromEdge(edge, new TEvKesus::TEvGetConfig(), cookie); + + auto result = ExpectEdgeEvent<TEvKesus::TEvGetConfigResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetConfig().path(), result->Record.GetPath(), "Record: " << result->Record); + return result->Record; +} + +NKikimrKesus::TEvSetConfigResult TTestContext::SetConfig(ui64 txId, const Ydb::Coordination::Config& config, ui64 version, Ydb::StatusIds::StatusCode status) { + const ui64 cookie = RandomNumber<ui64>(); + const auto edge = Runtime->AllocateEdgeActor(); + SendFromEdge(edge, new TEvKesus::TEvSetConfig(txId, config, version), cookie); + + auto result = ExpectEdgeEvent<TEvKesus::TEvSetConfigResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetError().GetStatus(), status, "Record: " << result->Record); + UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetTxId(), txId, "Record: " << result->Record); + UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetTabletId(), TabletId, "Record: " << result->Record); + return result->Record; +} + void TTestContext::SyncProxy(const TActorId& proxy, ui64 generation, bool useTransactions) { ui64 cookie = RandomNumber<ui64>(); SendFromProxy(proxy, generation, new TEvKesus::TEvDummyRequest(useTransactions), cookie); @@ -672,188 +672,188 @@ TTestContext::TDescribeSemaphoreChanges TTestContext::ExpectDescribeSemaphoreCha return changes; } -THolder<TEvKesus::TEvDescribeQuoterResourcesResult> TTestContext::VerifyDescribeQuoterResources( - const NKikimrKesus::TEvDescribeQuoterResources& req, - Ydb::StatusIds::StatusCode status) -{ - ui64 cookie = RandomNumber<ui64>(); - auto edge = Runtime->AllocateEdgeActor(); - SendFromEdge(edge, MakeHolder<TEvKesus::TEvDescribeQuoterResources>(req), cookie); - auto result = ExpectEdgeEvent<TEvKesus::TEvDescribeQuoterResourcesResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL(result->Record.GetError().GetStatus(), status); - if (status != Ydb::StatusIds::SUCCESS) { - UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesSize(), 0); - } - return result; -} - -THolder<TEvKesus::TEvDescribeQuoterResourcesResult> TTestContext::VerifyDescribeQuoterResources( - const std::vector<ui64>& resourceIds, - const std::vector<TString>& resourcePaths, - bool recursive, - Ydb::StatusIds::StatusCode status) -{ - NKikimrKesus::TEvDescribeQuoterResources req; - for (ui64 resourceId : resourceIds) { - req.AddResourceIds(resourceId); - } - for (const TString& resourcePath : resourcePaths) { - req.AddResourcePaths(resourcePath); - } - req.SetRecursive(recursive); - return VerifyDescribeQuoterResources(req, status); -} - -NKikimrKesus::TEvDescribeQuoterResourcesResult TTestContext::DescribeQuoterResources( - const std::vector<ui64>& resourceIds, - const std::vector<TString>& resourcePaths, - bool recursive) -{ - NKikimrKesus::TEvDescribeQuoterResourcesResult result = VerifyDescribeQuoterResources(resourceIds, resourcePaths, recursive)->Record; - std::sort(result.MutableResources()->begin(), result.MutableResources()->end(), - [](const NKikimrKesus::TStreamingQuoterResource& r1, const NKikimrKesus::TStreamingQuoterResource& r2) { - return r1.GetResourcePath() < r2.GetResourcePath(); - }); - return result; -} - -ui64 TTestContext::AddQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status) { - ui64 cookie = RandomNumber<ui64>(); - auto edge = Runtime->AllocateEdgeActor(); - auto req = MakeHolder<TEvKesus::TEvAddQuoterResource>(); - *req->Record.MutableResource() = resource; - SendFromEdge(edge, std::move(req), cookie); - auto result = ExpectEdgeEvent<TEvKesus::TEvAddQuoterResourceResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetError().GetStatus(), status, "Faied to create new quoter resource \"" << resource.GetResourcePath() << "\""); - if (status == Ydb::StatusIds::SUCCESS) { - UNIT_ASSERT(result->Record.GetResourceId()); - } - return status == Ydb::StatusIds::SUCCESS ? result->Record.GetResourceId() : 0; -} - -ui64 TTestContext::AddQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status) { - NKikimrKesus::TStreamingQuoterResource resource; - resource.SetResourcePath(resourcePath); - *resource.MutableHierarhicalDRRResourceConfig() = config; - return AddQuoterResource(resource, status); -} - -void TTestContext::UpdateQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status) { - ui64 cookie = RandomNumber<ui64>(); - auto edge = Runtime->AllocateEdgeActor(); - auto req = MakeHolder<TEvKesus::TEvUpdateQuoterResource>(); - *req->Record.MutableResource() = resource; - SendFromEdge(edge, std::move(req), cookie); - auto result = ExpectEdgeEvent<TEvKesus::TEvUpdateQuoterResourceResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL(result->Record.GetError().GetStatus(), status); - if (status == Ydb::StatusIds::SUCCESS) { - UNIT_ASSERT(result->Record.GetResourceId()); - } -} - -void TTestContext::UpdateQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status) { - NKikimrKesus::TStreamingQuoterResource resource; - resource.SetResourcePath(resourcePath); - *resource.MutableHierarhicalDRRResourceConfig() = config; - UpdateQuoterResource(resource, status); -} - -void TTestContext::UpdateQuoterResource(ui64 resourceId, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status) { - NKikimrKesus::TStreamingQuoterResource resource; - resource.SetResourceId(resourceId); - *resource.MutableHierarhicalDRRResourceConfig() = config; - UpdateQuoterResource(resource, status); -} - -void TTestContext::DeleteQuoterResource(const NKikimrKesus::TEvDeleteQuoterResource& req, Ydb::StatusIds::StatusCode status) { - ui64 cookie = RandomNumber<ui64>(); - auto edge = Runtime->AllocateEdgeActor(); - SendFromEdge(edge, MakeHolder<TEvKesus::TEvDeleteQuoterResource>(req), cookie); - auto result = ExpectEdgeEvent<TEvKesus::TEvDeleteQuoterResourceResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL(result->Record.GetError().GetStatus(), status); -} - -void TTestContext::DeleteQuoterResource(const TString& resourcePath, Ydb::StatusIds::StatusCode status) { - NKikimrKesus::TEvDeleteQuoterResource req; - req.SetResourcePath(resourcePath); - DeleteQuoterResource(req, status); -} - -void TTestContext::DeleteQuoterResource(ui64 resourceId, Ydb::StatusIds::StatusCode status) { - NKikimrKesus::TEvDeleteQuoterResource req; - req.SetResourceId(resourceId); - DeleteQuoterResource(req, status); -} - -TTestContext::TResourceConsumingInfo::TResourceConsumingInfo(const TString& path, bool consume, double amount, Ydb::StatusIds::StatusCode status) - : Path(path) - , Consume(consume) - , Amount(amount) - , ExpectedStatus(status) -{ -} - -TTestContext::TResourceConsumingInfo::TResourceConsumingInfo(ui64 id, bool consume, double amount, Ydb::StatusIds::StatusCode status) - : Id(id) - , Consume(consume) - , Amount(amount) - , ExpectedStatus(status) -{ -} - +THolder<TEvKesus::TEvDescribeQuoterResourcesResult> TTestContext::VerifyDescribeQuoterResources( + const NKikimrKesus::TEvDescribeQuoterResources& req, + Ydb::StatusIds::StatusCode status) +{ + ui64 cookie = RandomNumber<ui64>(); + auto edge = Runtime->AllocateEdgeActor(); + SendFromEdge(edge, MakeHolder<TEvKesus::TEvDescribeQuoterResources>(req), cookie); + auto result = ExpectEdgeEvent<TEvKesus::TEvDescribeQuoterResourcesResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL(result->Record.GetError().GetStatus(), status); + if (status != Ydb::StatusIds::SUCCESS) { + UNIT_ASSERT_VALUES_EQUAL(result->Record.ResourcesSize(), 0); + } + return result; +} + +THolder<TEvKesus::TEvDescribeQuoterResourcesResult> TTestContext::VerifyDescribeQuoterResources( + const std::vector<ui64>& resourceIds, + const std::vector<TString>& resourcePaths, + bool recursive, + Ydb::StatusIds::StatusCode status) +{ + NKikimrKesus::TEvDescribeQuoterResources req; + for (ui64 resourceId : resourceIds) { + req.AddResourceIds(resourceId); + } + for (const TString& resourcePath : resourcePaths) { + req.AddResourcePaths(resourcePath); + } + req.SetRecursive(recursive); + return VerifyDescribeQuoterResources(req, status); +} + +NKikimrKesus::TEvDescribeQuoterResourcesResult TTestContext::DescribeQuoterResources( + const std::vector<ui64>& resourceIds, + const std::vector<TString>& resourcePaths, + bool recursive) +{ + NKikimrKesus::TEvDescribeQuoterResourcesResult result = VerifyDescribeQuoterResources(resourceIds, resourcePaths, recursive)->Record; + std::sort(result.MutableResources()->begin(), result.MutableResources()->end(), + [](const NKikimrKesus::TStreamingQuoterResource& r1, const NKikimrKesus::TStreamingQuoterResource& r2) { + return r1.GetResourcePath() < r2.GetResourcePath(); + }); + return result; +} + +ui64 TTestContext::AddQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status) { + ui64 cookie = RandomNumber<ui64>(); + auto edge = Runtime->AllocateEdgeActor(); + auto req = MakeHolder<TEvKesus::TEvAddQuoterResource>(); + *req->Record.MutableResource() = resource; + SendFromEdge(edge, std::move(req), cookie); + auto result = ExpectEdgeEvent<TEvKesus::TEvAddQuoterResourceResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetError().GetStatus(), status, "Faied to create new quoter resource \"" << resource.GetResourcePath() << "\""); + if (status == Ydb::StatusIds::SUCCESS) { + UNIT_ASSERT(result->Record.GetResourceId()); + } + return status == Ydb::StatusIds::SUCCESS ? result->Record.GetResourceId() : 0; +} + +ui64 TTestContext::AddQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status) { + NKikimrKesus::TStreamingQuoterResource resource; + resource.SetResourcePath(resourcePath); + *resource.MutableHierarhicalDRRResourceConfig() = config; + return AddQuoterResource(resource, status); +} + +void TTestContext::UpdateQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status) { + ui64 cookie = RandomNumber<ui64>(); + auto edge = Runtime->AllocateEdgeActor(); + auto req = MakeHolder<TEvKesus::TEvUpdateQuoterResource>(); + *req->Record.MutableResource() = resource; + SendFromEdge(edge, std::move(req), cookie); + auto result = ExpectEdgeEvent<TEvKesus::TEvUpdateQuoterResourceResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL(result->Record.GetError().GetStatus(), status); + if (status == Ydb::StatusIds::SUCCESS) { + UNIT_ASSERT(result->Record.GetResourceId()); + } +} + +void TTestContext::UpdateQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status) { + NKikimrKesus::TStreamingQuoterResource resource; + resource.SetResourcePath(resourcePath); + *resource.MutableHierarhicalDRRResourceConfig() = config; + UpdateQuoterResource(resource, status); +} + +void TTestContext::UpdateQuoterResource(ui64 resourceId, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status) { + NKikimrKesus::TStreamingQuoterResource resource; + resource.SetResourceId(resourceId); + *resource.MutableHierarhicalDRRResourceConfig() = config; + UpdateQuoterResource(resource, status); +} + +void TTestContext::DeleteQuoterResource(const NKikimrKesus::TEvDeleteQuoterResource& req, Ydb::StatusIds::StatusCode status) { + ui64 cookie = RandomNumber<ui64>(); + auto edge = Runtime->AllocateEdgeActor(); + SendFromEdge(edge, MakeHolder<TEvKesus::TEvDeleteQuoterResource>(req), cookie); + auto result = ExpectEdgeEvent<TEvKesus::TEvDeleteQuoterResourceResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL(result->Record.GetError().GetStatus(), status); +} + +void TTestContext::DeleteQuoterResource(const TString& resourcePath, Ydb::StatusIds::StatusCode status) { + NKikimrKesus::TEvDeleteQuoterResource req; + req.SetResourcePath(resourcePath); + DeleteQuoterResource(req, status); +} + +void TTestContext::DeleteQuoterResource(ui64 resourceId, Ydb::StatusIds::StatusCode status) { + NKikimrKesus::TEvDeleteQuoterResource req; + req.SetResourceId(resourceId); + DeleteQuoterResource(req, status); +} + +TTestContext::TResourceConsumingInfo::TResourceConsumingInfo(const TString& path, bool consume, double amount, Ydb::StatusIds::StatusCode status) + : Path(path) + , Consume(consume) + , Amount(amount) + , ExpectedStatus(status) +{ +} + +TTestContext::TResourceConsumingInfo::TResourceConsumingInfo(ui64 id, bool consume, double amount, Ydb::StatusIds::StatusCode status) + : Id(id) + , Consume(consume) + , Amount(amount) + , ExpectedStatus(status) +{ +} + NKikimrKesus::TEvSubscribeOnResourcesResult TTestContext::SubscribeOnResources(const TActorId& client, const TActorId& edge, const std::vector<TResourceConsumingInfo>& info) { - const ui64 cookie = RandomNumber<ui64>(); - auto req = MakeHolder<TEvKesus::TEvSubscribeOnResources>(); + const ui64 cookie = RandomNumber<ui64>(); + auto req = MakeHolder<TEvKesus::TEvSubscribeOnResources>(); ActorIdToProto(client, req->Record.MutableActorID()); - req->Record.MutableResources()->Reserve(info.size()); - for (const TResourceConsumingInfo& res : info) { - auto* reqRes = req->Record.AddResources(); - reqRes->SetResourcePath(res.Path); - Y_ASSERT(!res.Id); // self check - reqRes->SetStartConsuming(res.Consume); - reqRes->SetInitialAmount(res.Amount); - } - - SendFromEdge(edge, std::move(req), cookie); - auto result = ExpectEdgeEvent<TEvKesus::TEvSubscribeOnResourcesResult>(edge, cookie); - UNIT_ASSERT_VALUES_EQUAL(result->Record.ResultsSize(), info.size()); - for (size_t i = 0; i < info.size(); ++i) { - UNIT_ASSERT_VALUES_EQUAL(result->Record.GetResults(i).GetError().GetStatus(), info[i].ExpectedStatus); - const auto status = result->Record.GetResults(i).GetError().GetStatus(); - if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::ALREADY_EXISTS) { - UNIT_ASSERT(result->Record.GetResults(i).GetResourceId() != 0); - } else { - UNIT_ASSERT_VALUES_EQUAL(result->Record.GetResults(i).GetResourceId(), 0); - } - } - return result->Record; -} - + req->Record.MutableResources()->Reserve(info.size()); + for (const TResourceConsumingInfo& res : info) { + auto* reqRes = req->Record.AddResources(); + reqRes->SetResourcePath(res.Path); + Y_ASSERT(!res.Id); // self check + reqRes->SetStartConsuming(res.Consume); + reqRes->SetInitialAmount(res.Amount); + } + + SendFromEdge(edge, std::move(req), cookie); + auto result = ExpectEdgeEvent<TEvKesus::TEvSubscribeOnResourcesResult>(edge, cookie); + UNIT_ASSERT_VALUES_EQUAL(result->Record.ResultsSize(), info.size()); + for (size_t i = 0; i < info.size(); ++i) { + UNIT_ASSERT_VALUES_EQUAL(result->Record.GetResults(i).GetError().GetStatus(), info[i].ExpectedStatus); + const auto status = result->Record.GetResults(i).GetError().GetStatus(); + if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::ALREADY_EXISTS) { + UNIT_ASSERT(result->Record.GetResults(i).GetResourceId() != 0); + } else { + UNIT_ASSERT_VALUES_EQUAL(result->Record.GetResults(i).GetResourceId(), 0); + } + } + return result->Record; +} + NKikimrKesus::TEvSubscribeOnResourcesResult TTestContext::SubscribeOnResource(const TActorId& client, const TActorId& edge, const TString& path, bool startConsuming, double amount, Ydb::StatusIds::StatusCode status) { - return SubscribeOnResources(client, edge, {TResourceConsumingInfo(path, startConsuming, amount, status)}); -} - + return SubscribeOnResources(client, edge, {TResourceConsumingInfo(path, startConsuming, amount, status)}); +} + void TTestContext::UpdateConsumptionState(const TActorId& client, const TActorId& edge, const std::vector<TResourceConsumingInfo>& info) { - const ui64 cookie = RandomNumber<ui64>(); - auto req = MakeHolder<TEvKesus::TEvUpdateConsumptionState>(); + const ui64 cookie = RandomNumber<ui64>(); + auto req = MakeHolder<TEvKesus::TEvUpdateConsumptionState>(); ActorIdToProto(client, req->Record.MutableActorID()); - req->Record.MutableResourcesInfo()->Reserve(info.size()); - for (const TResourceConsumingInfo& res : info) { - auto* reqRes = req->Record.AddResourcesInfo(); - reqRes->SetResourceId(res.Id); - Y_ASSERT(res.Path.empty()); // self check - reqRes->SetConsumeResource(res.Consume); - reqRes->SetAmount(res.Amount); - } - - SendFromEdge(edge, std::move(req), cookie); - ExpectEdgeEvent<TEvKesus::TEvUpdateConsumptionStateAck>(edge, cookie); -} - + req->Record.MutableResourcesInfo()->Reserve(info.size()); + for (const TResourceConsumingInfo& res : info) { + auto* reqRes = req->Record.AddResourcesInfo(); + reqRes->SetResourceId(res.Id); + Y_ASSERT(res.Path.empty()); // self check + reqRes->SetConsumeResource(res.Consume); + reqRes->SetAmount(res.Amount); + } + + SendFromEdge(edge, std::move(req), cookie); + ExpectEdgeEvent<TEvKesus::TEvUpdateConsumptionStateAck>(edge, cookie); +} + void TTestContext::UpdateConsumptionState(const TActorId& client, const TActorId& edge, ui64 id, bool consume, double amount, Ydb::StatusIds::StatusCode status) { - UpdateConsumptionState(client, edge, {TResourceConsumingInfo(id, consume, amount, status)}); -} - + UpdateConsumptionState(client, edge, {TResourceConsumingInfo(id, consume, amount, status)}); +} + void TTestContext::AccountResources(const TActorId& client, const TActorId& edge, const std::vector<TResourceAccountInfo>& info) { const ui64 cookie = RandomNumber<ui64>(); auto req = MakeHolder<TEvKesus::TEvAccountResources>(); @@ -877,19 +877,19 @@ void TTestContext::AccountResources(const TActorId& client, const TActorId& edge AccountResources(client, edge, {TResourceAccountInfo(id, start, interval, std::move(amount))}); } -NKikimrKesus::TEvGetQuoterResourceCountersResult TTestContext::GetQuoterResourceCounters() { - const ui64 cookie = RandomNumber<ui64>(); - const auto edge = Runtime->AllocateEdgeActor(); - SendFromEdge(edge, new TEvKesus::TEvGetQuoterResourceCounters(), cookie); - - auto result = ExpectEdgeEvent<TEvKesus::TEvGetQuoterResourceCountersResult>(edge, cookie); - std::sort(result->Record.MutableResourceCounters()->begin(), - result->Record.MutableResourceCounters()->end(), - [](const auto& c1, const auto c2) { - return c1.GetResourcePath() < c2.GetResourcePath(); - }); - return result->Record; -} - -} -} +NKikimrKesus::TEvGetQuoterResourceCountersResult TTestContext::GetQuoterResourceCounters() { + const ui64 cookie = RandomNumber<ui64>(); + const auto edge = Runtime->AllocateEdgeActor(); + SendFromEdge(edge, new TEvKesus::TEvGetQuoterResourceCounters(), cookie); + + auto result = ExpectEdgeEvent<TEvKesus::TEvGetQuoterResourceCountersResult>(edge, cookie); + std::sort(result->Record.MutableResourceCounters()->begin(), + result->Record.MutableResourceCounters()->end(), + [](const auto& c1, const auto c2) { + return c1.GetResourcePath() < c2.GetResourcePath(); + }); + return result->Record; +} + +} +} diff --git a/ydb/core/kesus/tablet/ut_helpers.h b/ydb/core/kesus/tablet/ut_helpers.h index 8f487a30332..3d1bec9b198 100644 --- a/ydb/core/kesus/tablet/ut_helpers.h +++ b/ydb/core/kesus/tablet/ut_helpers.h @@ -25,7 +25,7 @@ struct TTestContext { TTestContext(); - void Setup(ui32 nodeCount = 1, bool useRealThreads = false); + void Setup(ui32 nodeCount = 1, bool useRealThreads = false); void Finalize(); virtual void SetupLogging(); @@ -66,18 +66,18 @@ struct TTestContext { // Sends payload to tablet from edge, fresh pipe every time void SendFromEdge(const TActorId& edge, IEventBase* payload, ui64 cookie = 0); - template <class TEvent> + template <class TEvent> void SendFromEdge(const TActorId& edge, THolder<TEvent> payload, ui64 cookie = 0) { - SendFromEdge(edge, payload.Release(), cookie); - } - + SendFromEdge(edge, payload.Release(), cookie); + } + // Sends payload to tablet from proxy (caches pipe per proxy/generation pair) void SendFromProxy(const TActorId& proxy, ui64 generation, IEventBase* payload, ui64 cookie = 0); - // set/get config requests - NKikimrKesus::TEvGetConfigResult GetConfig(); - NKikimrKesus::TEvSetConfigResult SetConfig(ui64 txId, const Ydb::Coordination::Config& config, ui64 version, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - + // set/get config requests + NKikimrKesus::TEvGetConfigResult GetConfig(); + NKikimrKesus::TEvSetConfigResult SetConfig(ui64 txId, const Ydb::Coordination::Config& config, ui64 version, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + // Makes a dummy request using this proxy/generation pair void SyncProxy(const TActorId& proxy, ui64 generation, bool useTransactions = false); @@ -241,56 +241,56 @@ struct TTestContext { ui64 reqId, const TActorId& proxy, ui64 generation, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); TDescribeSemaphoreChanges ExpectDescribeSemaphoreChanged(ui64 reqId, const TActorId& proxy, ui64 generation); - - // Quoter - THolder<TEvKesus::TEvDescribeQuoterResourcesResult> VerifyDescribeQuoterResources( - const NKikimrKesus::TEvDescribeQuoterResources& req, - Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - THolder<TEvKesus::TEvDescribeQuoterResourcesResult> VerifyDescribeQuoterResources( - const std::vector<ui64>& resourceIds, - const std::vector<TString>& resourcePaths, - bool recursive, - Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - NKikimrKesus::TEvDescribeQuoterResourcesResult DescribeQuoterResources( - const std::vector<ui64>& resourceIds, - const std::vector<TString>& resourcePaths, - bool recursive); - - ui64 AddQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - ui64 AddQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - ui64 AddQuoterResource(const TString& resourcePath, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS) { - return AddQuoterResource(resourcePath, NKikimrKesus::THierarchicalDRRResourceConfig(), status); - } - ui64 AddQuoterResource(const TString& resourcePath, double maxUnitsPerSecond, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS) { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(maxUnitsPerSecond); - return AddQuoterResource(resourcePath, cfg, status); - } - - void UpdateQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - void UpdateQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - void UpdateQuoterResource(ui64 resourceId, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - void UpdateQuoterResource(ui64 resourceId, double maxUnitsPerSecond, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS) { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(maxUnitsPerSecond); - return UpdateQuoterResource(resourceId, cfg, status); - } - - void DeleteQuoterResource(const NKikimrKesus::TEvDeleteQuoterResource& req, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - void DeleteQuoterResource(const TString& resourcePath, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - void DeleteQuoterResource(ui64 resourceId, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - - struct TResourceConsumingInfo { - TResourceConsumingInfo(const TString& path, bool consume, double amount = 0.0, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - TResourceConsumingInfo(ui64 id, bool consume, double amount = 0.0, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - - TString Path; - ui64 Id = 0; - bool Consume; - double Amount; - Ydb::StatusIds::StatusCode ExpectedStatus; - }; - + + // Quoter + THolder<TEvKesus::TEvDescribeQuoterResourcesResult> VerifyDescribeQuoterResources( + const NKikimrKesus::TEvDescribeQuoterResources& req, + Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + THolder<TEvKesus::TEvDescribeQuoterResourcesResult> VerifyDescribeQuoterResources( + const std::vector<ui64>& resourceIds, + const std::vector<TString>& resourcePaths, + bool recursive, + Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + NKikimrKesus::TEvDescribeQuoterResourcesResult DescribeQuoterResources( + const std::vector<ui64>& resourceIds, + const std::vector<TString>& resourcePaths, + bool recursive); + + ui64 AddQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + ui64 AddQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + ui64 AddQuoterResource(const TString& resourcePath, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS) { + return AddQuoterResource(resourcePath, NKikimrKesus::THierarchicalDRRResourceConfig(), status); + } + ui64 AddQuoterResource(const TString& resourcePath, double maxUnitsPerSecond, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS) { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(maxUnitsPerSecond); + return AddQuoterResource(resourcePath, cfg, status); + } + + void UpdateQuoterResource(const NKikimrKesus::TStreamingQuoterResource& resource, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + void UpdateQuoterResource(const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + void UpdateQuoterResource(ui64 resourceId, const NKikimrKesus::THierarchicalDRRResourceConfig& config, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + void UpdateQuoterResource(ui64 resourceId, double maxUnitsPerSecond, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS) { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(maxUnitsPerSecond); + return UpdateQuoterResource(resourceId, cfg, status); + } + + void DeleteQuoterResource(const NKikimrKesus::TEvDeleteQuoterResource& req, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + void DeleteQuoterResource(const TString& resourcePath, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + void DeleteQuoterResource(ui64 resourceId, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + + struct TResourceConsumingInfo { + TResourceConsumingInfo(const TString& path, bool consume, double amount = 0.0, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + TResourceConsumingInfo(ui64 id, bool consume, double amount = 0.0, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + + TString Path; + ui64 Id = 0; + bool Consume; + double Amount; + Ydb::StatusIds::StatusCode ExpectedStatus; + }; + struct TResourceAccountInfo { ui64 Id = 0; TInstant Start; @@ -307,14 +307,14 @@ struct TTestContext { NKikimrKesus::TEvSubscribeOnResourcesResult SubscribeOnResources(const TActorId& client, const TActorId& edge, const std::vector<TResourceConsumingInfo>& info); NKikimrKesus::TEvSubscribeOnResourcesResult SubscribeOnResource(const TActorId& client, const TActorId& edge, const TString& path, bool startConsuming, double amount = 0.0, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - + void UpdateConsumptionState(const TActorId& client, const TActorId& edge, const std::vector<TResourceConsumingInfo>& info); void UpdateConsumptionState(const TActorId& client, const TActorId& edge, ui64 id, bool consume, double amount = 0.0, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - + void AccountResources(const TActorId& client, const TActorId& edge, const std::vector<TResourceAccountInfo>& info); void AccountResources(const TActorId& client, const TActorId& edge, ui64 id, TInstant start, TDuration interval, std::vector<double>&& amount); - NKikimrKesus::TEvGetQuoterResourceCountersResult GetQuoterResourceCounters(); + NKikimrKesus::TEvGetQuoterResourceCountersResult GetQuoterResourceCounters(); }; } diff --git a/ydb/core/kesus/tablet/ya.make b/ydb/core/kesus/tablet/ya.make index 02c34c7fceb..723a86904da 100644 --- a/ydb/core/kesus/tablet/ya.make +++ b/ydb/core/kesus/tablet/ya.make @@ -7,9 +7,9 @@ OWNER( SRCS( events.cpp - probes.cpp - quoter_resource_tree.cpp - quoter_runtime.cpp + probes.cpp + quoter_resource_tree.cpp + quoter_runtime.cpp rate_accounting.cpp schema.cpp tablet_db.cpp @@ -21,10 +21,10 @@ SRCS( tx_dummy.cpp tx_init_schema.cpp tx_init.cpp - tx_quoter_resource_add.cpp - tx_quoter_resource_delete.cpp - tx_quoter_resource_describe.cpp - tx_quoter_resource_update.cpp + tx_quoter_resource_add.cpp + tx_quoter_resource_delete.cpp + tx_quoter_resource_describe.cpp + tx_quoter_resource_update.cpp tx_self_check.cpp tx_semaphore_acquire.cpp tx_semaphore_create.cpp diff --git a/ydb/core/kqp/compute_actor/kqp_compute_actor.h b/ydb/core/kqp/compute_actor/kqp_compute_actor.h index 02b7b98029e..06faa6edd6d 100644 --- a/ydb/core/kqp/compute_actor/kqp_compute_actor.h +++ b/ydb/core/kqp/compute_actor/kqp_compute_actor.h @@ -15,14 +15,14 @@ TComputationNodeFactory GetKqpActorComputeFactory(TKqpScanComputeContext* comput namespace NKqp { -IActor* CreateKqpComputeActor(const TActorId& executerId, ui64 txId, NYql::NDqProto::TDqTask&& task, - NYql::NDq::IDqSourceActorFactory::TPtr sourceActorFactory, NYql::NDq::IDqSinkActorFactory::TPtr sinkActorFactory, +IActor* CreateKqpComputeActor(const TActorId& executerId, ui64 txId, NYql::NDqProto::TDqTask&& task, + NYql::NDq::IDqSourceActorFactory::TPtr sourceActorFactory, NYql::NDq::IDqSinkActorFactory::TPtr sinkActorFactory, const NYql::NDq::TComputeRuntimeSettings& settings, const NYql::NDq::TComputeMemoryLimits& memoryLimits); IActor* CreateKqpScanComputeActor(const NKikimrKqp::TKqpSnapshot& snapshot, const TActorId& executerId, ui64 txId, - NYql::NDqProto::TDqTask&& task, NYql::NDq::IDqSourceActorFactory::TPtr sourceActorFactory, NYql::NDq::IDqSinkActorFactory::TPtr sinkActorFactory, - const NYql::NDq::TComputeRuntimeSettings& settings, const NYql::NDq::TComputeMemoryLimits& memoryLimits, - TIntrusivePtr<TKqpCounters> counters); + NYql::NDqProto::TDqTask&& task, NYql::NDq::IDqSourceActorFactory::TPtr sourceActorFactory, NYql::NDq::IDqSinkActorFactory::TPtr sinkActorFactory, + const NYql::NDq::TComputeRuntimeSettings& settings, const NYql::NDq::TComputeMemoryLimits& memoryLimits, + TIntrusivePtr<TKqpCounters> counters); } // namespace NKqp diff --git a/ydb/core/kqp/compute_actor/kqp_compute_actor_impl.h b/ydb/core/kqp/compute_actor/kqp_compute_actor_impl.h index b32537d9514..dd52fd2f258 100644 --- a/ydb/core/kqp/compute_actor/kqp_compute_actor_impl.h +++ b/ydb/core/kqp/compute_actor/kqp_compute_actor_impl.h @@ -23,9 +23,9 @@ public: IDqOutputConsumer::TPtr CreateOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const NMiniKQL::TType* type, NUdf::IApplyContext* applyCtx, const NMiniKQL::TTypeEnvironment& typeEnv, - TVector<IDqOutput::TPtr>&& outputs) const override + TVector<IDqOutput::TPtr>&& outputs) const override { - return KqpBuildOutputConsumer(outputDesc, type, applyCtx, typeEnv, std::move(outputs)); + return KqpBuildOutputConsumer(outputDesc, type, applyCtx, typeEnv, std::move(outputs)); } IDqChannelStorage::TPtr CreateChannelStorage(ui64 channelId) const override { diff --git a/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp b/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp index 750207da8e4..23699c399c8 100644 --- a/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp +++ b/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp @@ -34,7 +34,7 @@ public: TKqpComputeActor(const TActorId& executerId, ui64 txId, NDqProto::TDqTask&& task, IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, - const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits) + const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits) : TBase(executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), settings, memoryLimits) , ComputeCtx(settings.StatsMode) { @@ -317,8 +317,8 @@ private: } // anonymous namespace IActor* CreateKqpComputeActor(const TActorId& executerId, ui64 txId, NDqProto::TDqTask&& task, - IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, - const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits) + IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, + const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits) { return new TKqpComputeActor(executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), settings, memoryLimits); diff --git a/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp b/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp index f8e3cd0c772..e5a6dbc5958 100644 --- a/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp +++ b/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp @@ -72,8 +72,8 @@ public: } TKqpScanComputeActor(const NKikimrKqp::TKqpSnapshot& snapshot, const TActorId& executerId, ui64 txId, - NDqProto::TDqTask&& task, IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, - const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, TIntrusivePtr<TKqpCounters> counters) + NDqProto::TDqTask&& task, IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, + const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, TIntrusivePtr<TKqpCounters> counters) : TBase(executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), settings, memoryLimits) , ComputeCtx(settings.StatsMode) , Snapshot(snapshot) @@ -1133,11 +1133,11 @@ private: } // anonymous namespace IActor* CreateKqpScanComputeActor(const NKikimrKqp::TKqpSnapshot& snapshot, const TActorId& executerId, ui64 txId, - NDqProto::TDqTask&& task, IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, - const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, TIntrusivePtr<TKqpCounters> counters) + NDqProto::TDqTask&& task, IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, + const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, TIntrusivePtr<TKqpCounters> counters) { - return new TKqpScanComputeActor(snapshot, executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), - settings, memoryLimits, counters); + return new TKqpScanComputeActor(snapshot, executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), + settings, memoryLimits, counters); } } // namespace NKqp diff --git a/ydb/core/kqp/executer/kqp_data_executer.cpp b/ydb/core/kqp/executer/kqp_data_executer.cpp index 003d4ffa206..210d5fe14ea 100644 --- a/ydb/core/kqp/executer/kqp_data_executer.cpp +++ b/ydb/core/kqp/executer/kqp_data_executer.cpp @@ -1235,7 +1235,7 @@ private: return false; }; - auto computeActor = CreateKqpComputeActor(SelfId(), TxId, std::move(taskDesc), nullptr, nullptr, settings, limits); + auto computeActor = CreateKqpComputeActor(SelfId(), TxId, std::move(taskDesc), nullptr, nullptr, settings, limits); auto computeActorId = Register(computeActor); task.ComputeActorId = computeActorId; diff --git a/ydb/core/kqp/node/kqp_node.cpp b/ydb/core/kqp/node/kqp_node.cpp index 24f8f2ac79a..753b44d51f6 100644 --- a/ydb/core/kqp/node/kqp_node.cpp +++ b/ydb/core/kqp/node/kqp_node.cpp @@ -311,11 +311,11 @@ private: IActor* computeActor; if (tableKind == ETableKind::Datashard || tableKind == ETableKind::Olap) { computeActor = CreateKqpScanComputeActor(msg.GetSnapshot(), request.Executer, txId, std::move(dqTask), - nullptr, nullptr, runtimeSettings, memoryLimits, Counters); + nullptr, nullptr, runtimeSettings, memoryLimits, Counters); taskCtx.ComputeActorId = Register(computeActor); } else { if (Y_LIKELY(!CaFactory)) { - computeActor = CreateKqpComputeActor(request.Executer, txId, std::move(dqTask), nullptr, nullptr, runtimeSettings, + computeActor = CreateKqpComputeActor(request.Executer, txId, std::move(dqTask), nullptr, nullptr, runtimeSettings, memoryLimits); taskCtx.ComputeActorId = Register(computeActor); } else { diff --git a/ydb/core/kqp/runtime/kqp_channel_storage.cpp b/ydb/core/kqp/runtime/kqp_channel_storage.cpp index d39d0090905..3514c81dc44 100644 --- a/ydb/core/kqp/runtime/kqp_channel_storage.cpp +++ b/ydb/core/kqp/runtime/kqp_channel_storage.cpp @@ -55,8 +55,8 @@ public: Become(&TKqpChannelStorageActor::WorkState); } - static constexpr char ActorName[] = "KQP_CHANNEL_STORAGE"; - + static constexpr char ActorName[] = "KQP_CHANNEL_STORAGE"; + protected: void PassAway() override { Send(SpillingActorId, new TEvents::TEvPoison); diff --git a/ydb/core/kqp/runtime/kqp_output_stream.cpp b/ydb/core/kqp/runtime/kqp_output_stream.cpp index 640706d9b45..2de6c9363c3 100644 --- a/ydb/core/kqp/runtime/kqp_output_stream.cpp +++ b/ydb/core/kqp/runtime/kqp_output_stream.cpp @@ -20,10 +20,10 @@ using namespace NUdf; class TKqpOutputRangePartitionConsumer : public IDqOutputConsumer { public: TKqpOutputRangePartitionConsumer(const TTypeEnvironment& typeEnv, - TVector<NYql::NDq::IDqOutput::TPtr>&& outputs, TVector<TKqpRangePartition>&& partitions, + TVector<NYql::NDq::IDqOutput::TPtr>&& outputs, TVector<TKqpRangePartition>&& partitions, TVector<TDataTypeId>&& keyColumnTypes, TVector<ui32>&& keyColumnIndices) : TypeEnv(typeEnv) - , Outputs(std::move(outputs)) + , Outputs(std::move(outputs)) , Partitions(std::move(partitions)) , KeyColumnTypes(std::move(keyColumnTypes)) , KeyColumnIndices(std::move(keyColumnIndices)) @@ -35,25 +35,25 @@ public: } bool IsFull() const override { - return AnyOf(Outputs, [](const auto& output) { return output->IsFull(); }); + return AnyOf(Outputs, [](const auto& output) { return output->IsFull(); }); } void Consume(TUnboxedValue&& value) final { ui32 partitionIndex = FindKeyPartitionIndex(TypeEnv, value, Partitions, KeyColumnTypes, KeyColumnIndices, [](const auto& partition) { return partition.Range; }); - Outputs[partitionIndex]->Push(std::move(value)); + Outputs[partitionIndex]->Push(std::move(value)); } void Finish() final { - for (auto& output : Outputs) { - output->Finish(); + for (auto& output : Outputs) { + output->Finish(); } } private: const TTypeEnvironment& TypeEnv; - TVector<NYql::NDq::IDqOutput::TPtr> Outputs; + TVector<NYql::NDq::IDqOutput::TPtr> Outputs; TVector<TKqpRangePartition> Partitions; TVector<TDataTypeId> KeyColumnTypes; TVector<ui32> KeyColumnIndices; @@ -62,11 +62,11 @@ private: } // namespace NYql::NDq::IDqOutputConsumer::TPtr CreateOutputRangePartitionConsumer( - TVector<NYql::NDq::IDqOutput::TPtr>&& outputs, TVector<TKqpRangePartition>&& partitions, + TVector<NYql::NDq::IDqOutput::TPtr>&& outputs, TVector<TKqpRangePartition>&& partitions, TVector<NUdf::TDataTypeId>&& keyColumnTypes, TVector<ui32>&& keyColumnIndices, const NMiniKQL::TTypeEnvironment& typeEnv) { - return MakeIntrusive<TKqpOutputRangePartitionConsumer>(typeEnv, std::move(outputs), std::move(partitions), + return MakeIntrusive<TKqpOutputRangePartitionConsumer>(typeEnv, std::move(outputs), std::move(partitions), std::move(keyColumnTypes), std::move(keyColumnIndices)); } diff --git a/ydb/core/kqp/runtime/kqp_runtime_impl.h b/ydb/core/kqp/runtime/kqp_runtime_impl.h index 004af2a5d6d..ea22736770d 100644 --- a/ydb/core/kqp/runtime/kqp_runtime_impl.h +++ b/ydb/core/kqp/runtime/kqp_runtime_impl.h @@ -18,7 +18,7 @@ TTableId ParseTableId(const NMiniKQL::TRuntimeNode& node); NUdf::TDataTypeId UnwrapDataTypeFromStruct(const NMiniKQL::TStructType& structType, ui32 index); NYql::NDq::IDqOutputConsumer::TPtr CreateOutputRangePartitionConsumer( - TVector<NYql::NDq::IDqOutput::TPtr>&& outputs, TVector<TKqpRangePartition>&& partitions, + TVector<NYql::NDq::IDqOutput::TPtr>&& outputs, TVector<TKqpRangePartition>&& partitions, TVector<NUdf::TDataTypeId>&& keyColumnTypes, TVector<ui32>&& keyColumnIndices, const NMiniKQL::TTypeEnvironment& typeEnv); diff --git a/ydb/core/kqp/runtime/kqp_spilling_file.cpp b/ydb/core/kqp/runtime/kqp_spilling_file.cpp index 0dd0a4f7a3d..9ff1542d4d9 100644 --- a/ydb/core/kqp/runtime/kqp_spilling_file.cpp +++ b/ydb/core/kqp/runtime/kqp_spilling_file.cpp @@ -84,8 +84,8 @@ public: Become(&TKqpLocalFileSpillingActor::WorkState); } - static constexpr char ActorName[] = "KQP_LOCAL_FILE_SPILLING"; - + static constexpr char ActorName[] = "KQP_LOCAL_FILE_SPILLING"; + private: STRICT_STFUNC(WorkState, hFunc(TEvKqpSpilling::TEvWrite, HandleWork) @@ -229,8 +229,8 @@ public: Become(&TKqpLocalFileSpillingService::WorkState); } - static constexpr char ActorName[] = "KQP_LOCAL_FILE_SPILLING_SERVICE"; - + static constexpr char ActorName[] = "KQP_LOCAL_FILE_SPILLING_SERVICE"; + protected: void PassAway() override { IoThreadPool->Stop(); diff --git a/ydb/core/kqp/runtime/kqp_tasks_runner.cpp b/ydb/core/kqp/runtime/kqp_tasks_runner.cpp index 70b1038e6cd..c8a6e652548 100644 --- a/ydb/core/kqp/runtime/kqp_tasks_runner.cpp +++ b/ydb/core/kqp/runtime/kqp_tasks_runner.cpp @@ -22,7 +22,7 @@ using namespace NYql; using namespace NDq; IDqOutputConsumer::TPtr KqpBuildOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const TType* type, - NUdf::IApplyContext* applyCtx, const TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) + NUdf::IApplyContext* applyCtx, const TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) { switch (outputDesc.GetTypeCase()) { case NDqProto::TTaskOutput::kRangePartition: { @@ -44,7 +44,7 @@ IDqOutputConsumer::TPtr KqpBuildOutputConsumer(const NDqProto::TTaskOutput& outp partitions.emplace_back(std::move(partition)); } - return CreateOutputRangePartitionConsumer(std::move(outputs), std::move(partitions), + return CreateOutputRangePartitionConsumer(std::move(outputs), std::move(partitions), std::move(keyColumnTypes), std::move(keyColumnIndices), typeEnv); } @@ -53,7 +53,7 @@ IDqOutputConsumer::TPtr KqpBuildOutputConsumer(const NDqProto::TTaskOutput& outp } default: { - return DqBuildOutputConsumer(outputDesc, type, typeEnv, std::move(outputs)); + return DqBuildOutputConsumer(outputDesc, type, typeEnv, std::move(outputs)); } } } diff --git a/ydb/core/kqp/runtime/kqp_tasks_runner.h b/ydb/core/kqp/runtime/kqp_tasks_runner.h index 6ae8bf1db75..b70da9f1490 100644 --- a/ydb/core/kqp/runtime/kqp_tasks_runner.h +++ b/ydb/core/kqp/runtime/kqp_tasks_runner.h @@ -8,7 +8,7 @@ namespace NKqp { NYql::NDq::IDqOutputConsumer::TPtr KqpBuildOutputConsumer(const NYql::NDqProto::TTaskOutput& outputDesc, const NMiniKQL::TType* type, NUdf::IApplyContext* applyCtx, const NMiniKQL::TTypeEnvironment& typeEnv, - TVector<NYql::NDq::IDqOutput::TPtr>&& outputs); + TVector<NYql::NDq::IDqOutput::TPtr>&& outputs); TIntrusivePtr<NYql::NDq::IDqTaskRunner> CreateKqpTaskRunner(const NYql::NDq::TDqTaskRunnerContext& execCtx, const NYql::NDq::TDqTaskRunnerSettings& settings, const NYql::NDq::TLogFunc& logFunc); diff --git a/ydb/core/persqueue/blob.h b/ydb/core/persqueue/blob.h index 633ec2499a3..6b6ae0b010a 100644 --- a/ydb/core/persqueue/blob.h +++ b/ydb/core/persqueue/blob.h @@ -250,7 +250,7 @@ struct THead { IOutputStream& operator <<(IOutputStream& out, const THead& value); -//stucture for tracking written KV-blobs, stored in memory parts of one partitioned blob +//stucture for tracking written KV-blobs, stored in memory parts of one partitioned blob class TPartitionedBlob { public: TPartitionedBlob& operator=(const TPartitionedBlob& x); diff --git a/ydb/core/persqueue/partition.cpp b/ydb/core/persqueue/partition.cpp index cc8e2419cf2..52df73c7eb8 100644 --- a/ydb/core/persqueue/partition.cpp +++ b/ydb/core/persqueue/partition.cpp @@ -42,8 +42,8 @@ static const ui32 LEVEL0 = 32; static const TDuration UPDATE_AVAIL_SIZE_INTERVAL = TDuration::MilliSeconds(100); -static const TString WRITE_QUOTA_ROOT_PATH = "write-quota"; - +static const TString WRITE_QUOTA_ROOT_PATH = "write-quota"; + struct TPartition::THasDataReq { ui64 Num; ui64 Offset; @@ -289,7 +289,7 @@ void TPartition::ReplyWrite( write->SetMaxSeqNo(maxSeqNo); write->SetOffset(offset); - write->SetPartitionQuotedTimeMs(partitionQuotedTime); + write->SetPartitionQuotedTimeMs(partitionQuotedTime); write->SetTopicQuotedTimeMs(topicQuotedTime.MilliSeconds()); write->SetTotalTimeInPartitionQueueMs(queueTime); write->SetWriteTimeMs(writeTime); @@ -515,8 +515,8 @@ TPartition::TPartition(ui64 tabletId, ui32 partition, const TActorId& tablet, co WriteTimestampEstimate = ManageWriteTimestampEstimate ? ctx.Now() : TInstant::Zero(); - CalcTopicWriteQuotaParams(); - + CalcTopicWriteQuotaParams(); + Counters.Populate(counters); } @@ -562,7 +562,7 @@ void TPartition::HandleMonitoring(TEvPQ::TEvMonRequest::TPtr& ev, const TActorCo res.push_back(out.Str()); out.Clear(); } for (auto& avg : AvgWriteBytes) { - out << "AvgWriteSize per " << avg.GetDuration().ToString() << " is " << avg.GetValue() << " bytes"; + out << "AvgWriteSize per " << avg.GetDuration().ToString() << " is " << avg.GetValue() << " bytes"; res.push_back(out.Str()); out.Clear(); } out << Config.DebugString(); res.push_back(out.Str()); out.Clear(); @@ -809,7 +809,7 @@ void TPartition::SetupTopicCounters(const TActorContext& ctx) { MsgsWritten = NKikimr::NPQ::TMultiCounter(GetServiceCounters(counters, "pqproxy|writeSession"), GetLabels(TopicName), {}, {"MessagesWritten" + suffix}, true); - + TVector<NPQ::TLabelsInfo> aggr = {{{{"Account", NPersQueue::GetAccount(TopicName)}}, {"total"}}}; ui32 border = AppData(ctx)->PQConfig.GetWriteLatencyBigMs(); auto subGroup = GetServiceCounters(counters, "pqproxy|SLI"); @@ -843,7 +843,7 @@ void TPartition::SetupTopicCounters(const TActorContext& ctx) { void TPartition::SetupStreamCounters(const TActorContext& ctx) { auto counters = AppData(ctx)->Counters; auto labels = NKikimr::NPQ::GetLabelsForStream(TopicName, CloudId, DbId, FolderId); - + WriteBufferIsFullCounter.SetCounter( GetCountersForStream(counters, "writingTime"), {{"host", DCId}, @@ -1967,10 +1967,10 @@ void TPartition::Handle(TEvPQ::TEvPartitionStatus::TPtr& ev, const TActorContext result.SetGapSize(headGapSize + GapSize); Y_VERIFY(AvgWriteBytes.size() == 4); - result.SetAvgWriteSpeedPerSec(AvgWriteBytes[0].GetValue()); - result.SetAvgWriteSpeedPerMin(AvgWriteBytes[1].GetValue()); - result.SetAvgWriteSpeedPerHour(AvgWriteBytes[2].GetValue()); - result.SetAvgWriteSpeedPerDay(AvgWriteBytes[3].GetValue()); + result.SetAvgWriteSpeedPerSec(AvgWriteBytes[0].GetValue()); + result.SetAvgWriteSpeedPerMin(AvgWriteBytes[1].GetValue()); + result.SetAvgWriteSpeedPerHour(AvgWriteBytes[2].GetValue()); + result.SetAvgWriteSpeedPerDay(AvgWriteBytes[3].GetValue()); Y_VERIFY(AvgQuotaBytes.size() == 4); result.SetAvgQuotaSpeedPerSec(AvgQuotaBytes[0].GetValue()); @@ -2686,8 +2686,8 @@ void TPartition::AnswerCurrentWrites(const TActorContext& ctx) { ui64 offset = EndOffset; while (!Responses.empty()) { - const ui64 quotedTime = Responses.front().QuotedTime; - const ui64 queueTime = Responses.front().QueueTime; + const ui64 quotedTime = Responses.front().QuotedTime; + const ui64 queueTime = Responses.front().QueueTime; const ui64 writeTime = ctx.Now().MilliSeconds() - Responses.front().WriteTime; if (Responses.front().IsWrite()) { @@ -2740,9 +2740,9 @@ void TPartition::AnswerCurrentWrites(const TActorContext& ctx) "Answering for message sourceid: '" << EscapeC(s) << "', Topic: '" << TopicName << "', Partition: " << Partition << ", SeqNo: " << seqNo << ", partNo: " << partNo << ", Offset: " << offset << " is " << (already ? "already written" : "stored on disk") ); - if (PartitionWriteQuotaWaitCounter) { - PartitionWriteQuotaWaitCounter->IncFor(quotedTime); - } + if (PartitionWriteQuotaWaitCounter) { + PartitionWriteQuotaWaitCounter->IncFor(quotedTime); + } if (!already && partNo + 1 == totalParts) ++offset; @@ -2793,7 +2793,7 @@ void TPartition::AnswerCurrentWrites(const TActorContext& ctx) } Responses.pop_front(); } - TopicQuotaWaitTimeForCurrentBlob = TDuration::Zero(); + TopicQuotaWaitTimeForCurrentBlob = TDuration::Zero(); } @@ -3251,7 +3251,7 @@ void TPartition::ReportLabeledCounters(const TActorContext& ctx) ui32 id = METRIC_TOTAL_WRITE_SPEED_1; for (ui32 i = 0; i < AvgWriteBytes.size(); ++i) { - ui64 avg = AvgWriteBytes[i].GetValue(); + ui64 avg = AvgWriteBytes[i].GetValue(); if (avg != PartitionLabeledCounters.GetCounters()[id].Get()) { haveChanges = true; PartitionLabeledCounters.GetCounters()[id].Set(avg); //total @@ -3510,12 +3510,12 @@ void TPartition::HandleWriteResponse(const TActorContext& ctx) { //All ok auto now = ctx.Now(); - const auto& quotingConfig = AppData()->PQConfig.GetQuotingConfig(); + const auto& quotingConfig = AppData()->PQConfig.GetQuotingConfig(); if (quotingConfig.GetTopicWriteQuotaEntityToLimit() == NKikimrPQ::TPQConfig::TQuotingConfig::USER_PAYLOAD_SIZE) { - WriteQuota.Exaust(WriteNewSize, now); - } else { - WriteQuota.Exaust(WriteCycleSize, now); - } + WriteQuota.Exaust(WriteNewSize, now); + } else { + WriteQuota.Exaust(WriteCycleSize, now); + } for (auto& avg : AvgWriteBytes) { avg.Update(WriteNewSize, now); } @@ -4448,18 +4448,18 @@ bool TPartition::ProcessWrites(TEvKeyValue::TEvRequest* request, const TActorCon if (!WriteQuota.CanExaust()) { // Waiting for partition quota. SetDeadlinesForWrites(ctx); - return false; - } + return false; + } if (WaitingForPreviousBlobQuota()) { // Waiting for topic quota. SetDeadlinesForWrites(ctx); - if (StartTopicQuotaWaitTimeForCurrentBlob == TInstant::Zero() && !Requests.empty()) { - StartTopicQuotaWaitTimeForCurrentBlob = TActivationContext::Now(); - } + if (StartTopicQuotaWaitTimeForCurrentBlob == TInstant::Zero() && !Requests.empty()) { + StartTopicQuotaWaitTimeForCurrentBlob = TActivationContext::Now(); + } return false; } - + QuotaDeadline = TInstant::Zero(); if (Requests.empty()) @@ -4550,14 +4550,14 @@ void TPartition::HandleWrites(const TActorContext& ctx) bool res = ProcessWrites(request.Get(), ctx); Y_VERIFY(!res); } - Y_VERIFY(Requests.empty() || !WriteQuota.CanExaust() || WaitingForPreviousBlobQuota()); //in this case all writes must be processed or no quota left + Y_VERIFY(Requests.empty() || !WriteQuota.CanExaust() || WaitingForPreviousBlobQuota()); //in this case all writes must be processed or no quota left AnswerCurrentWrites(ctx); //in case if all writes are already done - no answer will be called on kv write, no kv write at all BecomeIdle(ctx); return; } WritesTotal.Inc(); - WriteBlobWithQuota(std::move(request)); + WriteBlobWithQuota(std::move(request)); } @@ -4623,100 +4623,100 @@ void TPartition::ProcessRead(const TActorContext& ctx, TReadInfo&& info, const u ctx.Send(BlobCache, request.Release()); } -void TPartition::Handle(TEvQuota::TEvClearance::TPtr& ev, const TActorContext& ctx) -{ - const ui64 cookie = ev->Cookie; - LOG_DEBUG_S(ctx, NKikimrServices::PERSQUEUE, "Got quota. Topic: \"" << TopicName << "\". Partition: " - << Partition << ": " << ev->Get()->Result << ". Cookie: " << cookie); - // Check - if (Y_UNLIKELY(ev->Get()->Result != TEvQuota::TEvClearance::EResult::Success)) { - Y_VERIFY(ev->Get()->Result != TEvQuota::TEvClearance::EResult::Deadline); // We set deadline == inf in quota request. - LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Got quota error. Topic: \"" << TopicName << "\". Partition " - << Partition << ": " << ev->Get()->Result); - ctx.Send(Tablet, new TEvents::TEvPoisonPill()); - return; - } - - // Search for proper request - Y_VERIFY(TopicQuotaRequestCookie == cookie); - TopicQuotaRequestCookie = 0; - Y_ASSERT(!WaitingForPreviousBlobQuota()); - - // Metrics - TopicQuotaWaitTimeForCurrentBlob = StartTopicQuotaWaitTimeForCurrentBlob ? TActivationContext::Now() - StartTopicQuotaWaitTimeForCurrentBlob : TDuration::Zero(); - if (TopicWriteQuotaWaitCounter) { - TopicWriteQuotaWaitCounter->IncFor(TopicQuotaWaitTimeForCurrentBlob.MilliSeconds()); - } - // Reset quota wait time - StartTopicQuotaWaitTimeForCurrentBlob = TInstant::Zero(); - +void TPartition::Handle(TEvQuota::TEvClearance::TPtr& ev, const TActorContext& ctx) +{ + const ui64 cookie = ev->Cookie; + LOG_DEBUG_S(ctx, NKikimrServices::PERSQUEUE, "Got quota. Topic: \"" << TopicName << "\". Partition: " + << Partition << ": " << ev->Get()->Result << ". Cookie: " << cookie); + // Check + if (Y_UNLIKELY(ev->Get()->Result != TEvQuota::TEvClearance::EResult::Success)) { + Y_VERIFY(ev->Get()->Result != TEvQuota::TEvClearance::EResult::Deadline); // We set deadline == inf in quota request. + LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Got quota error. Topic: \"" << TopicName << "\". Partition " + << Partition << ": " << ev->Get()->Result); + ctx.Send(Tablet, new TEvents::TEvPoisonPill()); + return; + } + + // Search for proper request + Y_VERIFY(TopicQuotaRequestCookie == cookie); + TopicQuotaRequestCookie = 0; + Y_ASSERT(!WaitingForPreviousBlobQuota()); + + // Metrics + TopicQuotaWaitTimeForCurrentBlob = StartTopicQuotaWaitTimeForCurrentBlob ? TActivationContext::Now() - StartTopicQuotaWaitTimeForCurrentBlob : TDuration::Zero(); + if (TopicWriteQuotaWaitCounter) { + TopicWriteQuotaWaitCounter->IncFor(TopicQuotaWaitTimeForCurrentBlob.MilliSeconds()); + } + // Reset quota wait time + StartTopicQuotaWaitTimeForCurrentBlob = TInstant::Zero(); + if (CurrentStateFunc() == &TThis::StateIdle) - HandleWrites(ctx); -} - -size_t TPartition::GetQuotaRequestSize(const TEvKeyValue::TEvRequest& request) -{ - if (AppData()->PQConfig.GetQuotingConfig().GetTopicWriteQuotaEntityToLimit() == NKikimrPQ::TPQConfig::TQuotingConfig::USER_PAYLOAD_SIZE) { - return WriteNewSize; - } else { - size_t dataSize = 0; - for (const auto& cmdWrite : request.Record.GetCmdWrite()) { - dataSize += cmdWrite.GetValue().size(); - } - return dataSize; - } -} - -void TPartition::RequestQuotaForWriteBlobRequest(size_t dataSize, ui64 cookie) -{ - LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::PERSQUEUE, - "Send write quota request. Topic: \"" << TopicName << "\". Partition: " << Partition << ". Amount: " << dataSize << ". Cookie: " << cookie); - - Send(MakeQuoterServiceID(), - new TEvQuota::TEvRequest( - TEvQuota::EResourceOperator::And, - { TEvQuota::TResourceLeaf(TopicWriteQuoterPath, TopicWriteQuotaResourcePath, dataSize) }, - TDuration::Max()), - 0, - cookie); -} - -bool TPartition::WaitingForPreviousBlobQuota() const { - return TopicQuotaRequestCookie != 0; -} - + HandleWrites(ctx); +} + +size_t TPartition::GetQuotaRequestSize(const TEvKeyValue::TEvRequest& request) +{ + if (AppData()->PQConfig.GetQuotingConfig().GetTopicWriteQuotaEntityToLimit() == NKikimrPQ::TPQConfig::TQuotingConfig::USER_PAYLOAD_SIZE) { + return WriteNewSize; + } else { + size_t dataSize = 0; + for (const auto& cmdWrite : request.Record.GetCmdWrite()) { + dataSize += cmdWrite.GetValue().size(); + } + return dataSize; + } +} + +void TPartition::RequestQuotaForWriteBlobRequest(size_t dataSize, ui64 cookie) +{ + LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::PERSQUEUE, + "Send write quota request. Topic: \"" << TopicName << "\". Partition: " << Partition << ". Amount: " << dataSize << ". Cookie: " << cookie); + + Send(MakeQuoterServiceID(), + new TEvQuota::TEvRequest( + TEvQuota::EResourceOperator::And, + { TEvQuota::TResourceLeaf(TopicWriteQuoterPath, TopicWriteQuotaResourcePath, dataSize) }, + TDuration::Max()), + 0, + cookie); +} + +bool TPartition::WaitingForPreviousBlobQuota() const { + return TopicQuotaRequestCookie != 0; +} + void TPartition::WriteBlobWithQuota(THolder<TEvKeyValue::TEvRequest>&& request) -{ - // Request quota and write blob. - // Mirrored topics are not quoted in local dc. +{ + // Request quota and write blob. + // Mirrored topics are not quoted in local dc. const bool skip = !IsQuotingEnabled() || TopicWriteQuotaResourcePath.empty(); - if (size_t quotaRequestSize = skip ? 0 : GetQuotaRequestSize(*request)) { - // Request with data. We should check before attempting to write data whether we have enough quota. - Y_VERIFY(!WaitingForPreviousBlobQuota()); - - TopicQuotaRequestCookie = NextTopicWriteQuotaRequestCookie++; - RequestQuotaForWriteBlobRequest(quotaRequestSize, TopicQuotaRequestCookie); - } - + if (size_t quotaRequestSize = skip ? 0 : GetQuotaRequestSize(*request)) { + // Request with data. We should check before attempting to write data whether we have enough quota. + Y_VERIFY(!WaitingForPreviousBlobQuota()); + + TopicQuotaRequestCookie = NextTopicWriteQuotaRequestCookie++; + RequestQuotaForWriteBlobRequest(quotaRequestSize, TopicQuotaRequestCookie); + } + AddMetaKey(request.Get()); - WriteStartTime = TActivationContext::Now(); - // Write blob -#if 1 - // PQ -> CacheProxy -> KV - Send(BlobCache, request.Release()); -#else - Send(Tablet, request.Release()); -#endif -} - -void TPartition::CalcTopicWriteQuotaParams() -{ + WriteStartTime = TActivationContext::Now(); + // Write blob +#if 1 + // PQ -> CacheProxy -> KV + Send(BlobCache, request.Release()); +#else + Send(Tablet, request.Release()); +#endif +} + +void TPartition::CalcTopicWriteQuotaParams() +{ const auto& pqConfig = AppData()->PQConfig; const auto& quotingConfig = pqConfig.GetQuotingConfig(); if (IsQuotingEnabled()) { // Mirrored topics are not quoted in local dc. - Y_VERIFY(quotingConfig.GetTopicWriteQuotaEntityToLimit() != NKikimrPQ::TPQConfig::TQuotingConfig::UNSPECIFIED); - + Y_VERIFY(quotingConfig.GetTopicWriteQuotaEntityToLimit() != NKikimrPQ::TPQConfig::TQuotingConfig::UNSPECIFIED); + TString topicPath = TopicPath.empty() ? TopicName : TopicPath; TFsPath fsPath(topicPath); if (fsPath.IsSubpathOf(pqConfig.GetRoot())) { @@ -4725,21 +4725,21 @@ void TPartition::CalcTopicWriteQuotaParams() topicPath = NPersQueue::GetTopicPath(topicPath); auto topicParts = SplitPath(topicPath); // account/folder/topic // account is first element if (topicParts.size() < 2) { - LOG_WARN_S(TActivationContext::AsActorContext(), NKikimrServices::PERSQUEUE, + LOG_WARN_S(TActivationContext::AsActorContext(), NKikimrServices::PERSQUEUE, "tablet " << TabletID << " topic '" << topicPath << "' Bad topic name. Disable quoting for topic"); - return; - } - + return; + } + const TString account = topicParts[0]; topicParts[0] = WRITE_QUOTA_ROOT_PATH; // write-quota/folder/topic - + TopicWriteQuotaResourcePath = JoinPath(topicParts); - TopicWriteQuoterPath = TStringBuilder() << quotingConfig.GetQuotersDirectoryPath() << "/" << account; + TopicWriteQuoterPath = TStringBuilder() << quotingConfig.GetQuotersDirectoryPath() << "/" << account; LOG_DEBUG_S(TActivationContext::AsActorContext(), NKikimrServices::PERSQUEUE, "topicWriteQuutaResourcePath " << TopicWriteQuotaResourcePath << " topicWriteQuoterPath '" << TopicWriteQuoterPath << " account " << account); - } -} - + } +} + void TPartition::CreateMirrorerActor() { Mirrorer = MakeHolder<TMirrorerInfo>( Register(new TMirrorer(Tablet, SelfId(), TopicName, Partition, LocalDC, EndOffset, Config.GetPartitionConfig().GetMirrorFrom(), Counters)), diff --git a/ydb/core/persqueue/partition.h b/ydb/core/persqueue/partition.h index 2eec26f2516..18ad383218f 100644 --- a/ydb/core/persqueue/partition.h +++ b/ydb/core/persqueue/partition.h @@ -157,7 +157,7 @@ private: void HandleOnIdle(TEvPQ::TEvSplitMessageGroup::TPtr& ev, const TActorContext& ctx); void HandleOnWrite(TEvPQ::TEvSplitMessageGroup::TPtr& ev, const TActorContext& ctx); - void Handle(TEvQuota::TEvClearance::TPtr& ev, const TActorContext& ctx); + void Handle(TEvQuota::TEvClearance::TPtr& ev, const TActorContext& ctx); bool DropOldStuff(TEvKeyValue::TEvRequest* request, bool hasWrites, const TActorContext& ctx); //will fill sourceIds, request and NewHead @@ -245,8 +245,8 @@ private: template <typename TEv> TString EventStr(const char * func, const TEv& ev) { TStringStream ss; - ss << func << " event# " << ev->GetTypeRewrite() << " (" << ev->GetBase()->ToStringHeader() << "), Tablet " << Tablet << ", Partition " << Partition - << ", Sender " << ev->Sender.ToString() << ", Recipient " << ev->Recipient.ToString() << ", Cookie: " << ev->Cookie; + ss << func << " event# " << ev->GetTypeRewrite() << " (" << ev->GetBase()->ToStringHeader() << "), Tablet " << Tablet << ", Partition " << Partition + << ", Sender " << ev->Sender.ToString() << ", Recipient " << ev->Recipient.ToString() << ", Cookie: " << ev->Cookie; return ss.Str(); } @@ -311,7 +311,7 @@ private: HFuncTraced(TEvPQ::TEvUpdateAvailableSize, HandleOnIdle); HFuncTraced(TEvPQ::TEvReserveBytes, Handle); HFuncTraced(TEvPQ::TEvPipeDisconnected, Handle); - HFuncTraced(TEvQuota::TEvClearance, Handle); + HFuncTraced(TEvQuota::TEvClearance, Handle); HFuncTraced(TEvPQ::TEvQuotaDeadlineCheck, Handle); HFuncTraced(TEvPQ::TEvRegisterMessageGroup, HandleOnIdle); HFuncTraced(TEvPQ::TEvDeregisterMessageGroup, HandleOnIdle); @@ -360,7 +360,7 @@ private: HFuncTraced(TEvPQ::TEvPipeDisconnected, Handle); HFuncTraced(TEvPQ::TEvUpdateAvailableSize, HandleOnWrite); HFuncTraced(TEvPQ::TEvQuotaDeadlineCheck, Handle); - HFuncTraced(TEvQuota::TEvClearance, Handle); + HFuncTraced(TEvQuota::TEvClearance, Handle); HFuncTraced(TEvPQ::TEvRegisterMessageGroup, HandleOnWrite); HFuncTraced(TEvPQ::TEvDeregisterMessageGroup, HandleOnWrite); HFuncTraced(TEvPQ::TEvSplitMessageGroup, HandleOnWrite); @@ -387,11 +387,11 @@ private: void WriteBlobWithQuota(THolder<TEvKeyValue::TEvRequest>&& request); void AddMetaKey(TEvKeyValue::TEvRequest* request); - size_t GetQuotaRequestSize(const TEvKeyValue::TEvRequest& request); - void RequestQuotaForWriteBlobRequest(size_t dataSize, ui64 cookie); - void CalcTopicWriteQuotaParams(); - bool WaitingForPreviousBlobQuota() const; - + size_t GetQuotaRequestSize(const TEvKeyValue::TEvRequest& request); + void RequestQuotaForWriteBlobRequest(size_t dataSize, ui64 cookie); + void CalcTopicWriteQuotaParams(); + bool WaitingForPreviousBlobQuota() const; + private: void UpdateUserInfoEndOffset(const TInstant& now); @@ -615,10 +615,10 @@ private: ui64 HasDataReqNum; TQuotaTracker WriteQuota; - THolder<TPercentileCounter> PartitionWriteQuotaWaitCounter; + THolder<TPercentileCounter> PartitionWriteQuotaWaitCounter; TInstant QuotaDeadline = TInstant::Zero(); - - TVector<NSlidingWindow::TSlidingWindow<NSlidingWindow::TSumOperation<ui64>>> AvgWriteBytes; + + TVector<NSlidingWindow::TSlidingWindow<NSlidingWindow::TSumOperation<ui64>>> AvgWriteBytes; TVector<NSlidingWindow::TSlidingWindow<NSlidingWindow::TSumOperation<ui64>>> AvgQuotaBytes; @@ -644,18 +644,18 @@ private: NKikimr::NPQ::TMultiCounter BytesWrittenUncompressed; NKikimr::NPQ::TMultiCounter BytesWrittenComp; NKikimr::NPQ::TMultiCounter MsgsWritten; - - // Writing blob with topic quota variables - ui64 TopicQuotaRequestCookie = 0; - // Wait topic quota metrics - THolder<TPercentileCounter> TopicWriteQuotaWaitCounter; - TInstant StartTopicQuotaWaitTimeForCurrentBlob; + + // Writing blob with topic quota variables + ui64 TopicQuotaRequestCookie = 0; + // Wait topic quota metrics + THolder<TPercentileCounter> TopicWriteQuotaWaitCounter; + TInstant StartTopicQuotaWaitTimeForCurrentBlob; TInstant WriteStartTime; - TDuration TopicQuotaWaitTimeForCurrentBlob; - // Topic quota parameters - TString TopicWriteQuoterPath; - TString TopicWriteQuotaResourcePath; - ui64 NextTopicWriteQuotaRequestCookie = 1; + TDuration TopicQuotaWaitTimeForCurrentBlob; + // Topic quota parameters + TString TopicWriteQuoterPath; + TString TopicWriteQuotaResourcePath; + ui64 NextTopicWriteQuotaRequestCookie = 1; TDeque<NKikimrPQ::TStatusResponse::TErrorMessage> Errors; diff --git a/ydb/core/persqueue/percentile_counter.cpp b/ydb/core/persqueue/percentile_counter.cpp index a6414e70bd8..ca798d1b786 100644 --- a/ydb/core/persqueue/percentile_counter.cpp +++ b/ydb/core/persqueue/percentile_counter.cpp @@ -132,7 +132,7 @@ TMultiCounter::operator bool() { TPercentileCounter::TPercentileCounter(TIntrusivePtr<NMonitoring::TDynamicCounters> counters, const TVector<TLabelsInfo>& labels, const TVector<std::pair<TString, TString>>& subgroups, const TString& sensor, - const TVector<std::pair<ui64, TString>>& intervals, const bool deriv, bool expiring) + const TVector<std::pair<ui64, TString>>& intervals, const bool deriv, bool expiring) { Y_VERIFY(!intervals.empty()); Counters.reserve(intervals.size()); @@ -145,27 +145,27 @@ TPercentileCounter::TPercentileCounter(TIntrusivePtr<NMonitoring::TDynamicCounte } void TPercentileCounter::IncFor(ui64 key, ui64 value) { - if (!Ranges.empty()) { - ui32 i = 0; - // The last range value is Max<ui64>(). - while (Ranges[i] < key) { - ++i; - } - Y_ASSERT(i < Ranges.size()); + if (!Ranges.empty()) { + ui32 i = 0; + // The last range value is Max<ui64>(). + while (Ranges[i] < key) { + ++i; + } + Y_ASSERT(i < Ranges.size()); Counters[i].Inc(value); - } + } } void TPercentileCounter::DecFor(ui64 key, ui64 value) { - if (!Ranges.empty()) { - ui32 i = 0; - // The last range value is Max<ui64>(). - while (Ranges[i] < key) { - ++i; - } - Y_ASSERT(i < Ranges.size()); + if (!Ranges.empty()) { + ui32 i = 0; + // The last range value is Max<ui64>(). + while (Ranges[i] < key) { + ++i; + } + Y_ASSERT(i < Ranges.size()); Counters[i].Dec(value); - } + } } NKikimr::NPQ::TPercentileCounter CreateSLIDurationCounter(TIntrusivePtr<NMonitoring::TDynamicCounters> counters, TVector<NPQ::TLabelsInfo> aggr, const TString name, ui32 border, TVector<ui32> durations) diff --git a/ydb/core/persqueue/percentile_counter.h b/ydb/core/persqueue/percentile_counter.h index 74ee43d2872..66ef4b2ec40 100644 --- a/ydb/core/persqueue/percentile_counter.h +++ b/ydb/core/persqueue/percentile_counter.h @@ -55,8 +55,8 @@ public: const bool deriv, bool expiring = true); - void IncFor(ui64 key, ui64 value = 1); - void DecFor(ui64 key, ui64 value = 1); + void IncFor(ui64 key, ui64 value = 1); + void DecFor(ui64 key, ui64 value = 1); private: TVector<TMultiCounter> Counters; diff --git a/ydb/core/persqueue/user_info.h b/ydb/core/persqueue/user_info.h index 258e947c467..792e3118bbc 100644 --- a/ydb/core/persqueue/user_info.h +++ b/ydb/core/persqueue/user_info.h @@ -12,7 +12,7 @@ #include <ydb/library/persqueue/topic_parser/topic_parser.h> #include <library/cpp/sliding_window/sliding_window.h> - + #include <util/generic/set.h> namespace NKikimr { @@ -194,7 +194,7 @@ struct TUserInfo { ui32 Subscriptions; i64 EndOffset; - TVector<NSlidingWindow::TSlidingWindow<NSlidingWindow::TSumOperation<ui64>>> AvgReadBytes; + TVector<NSlidingWindow::TSlidingWindow<NSlidingWindow::TSumOperation<ui64>>> AvgReadBytes; NSlidingWindow::TSlidingWindow<NSlidingWindow::TMaxOperation<ui64>> WriteLagMs; @@ -277,7 +277,7 @@ struct TUserInfo { } ReadQuota.Exaust(readSize, now); for (auto& avg : AvgReadBytes) { - avg.Update(readSize, now); + avg.Update(readSize, now); } Y_VERIFY(ActiveReads > 0); --ActiveReads; diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto index d64169d4fc0..75314aa4b40 100644 --- a/ydb/core/protos/config.proto +++ b/ydb/core/protos/config.proto @@ -679,30 +679,30 @@ message TFeatureFlags { } message TSqsConfig { - optional bool EnableSqs = 5; + optional bool EnableSqs = 5; optional string Root = 1 [default = "/Root/SQS"]; - // optional uint32 HttpPort = 2 [default = 8771]; // moved to HttpServerConfig - + // optional uint32 HttpPort = 2 [default = 8771]; // moved to HttpServerConfig + // URL that must be specified as an entry point to SQS cluster. // It must be used if SQS nodes are placed behind the balancer. // If this field is not specified, every node uses its own // fqdn here. optional string Endpoint = 3; - - optional THttpServerConfig HttpServerConfig = 4; - + + optional THttpServerConfig HttpServerConfig = 4; + // Turn on queue leaders option optional bool EnableQueueMaster = 6 [default = true]; // TODO: remove optional bool EnableQueueLeader = 68 [default = true]; - + // Time to live for queue attributes cache - optional uint64 QueueAttributesCacheTimeMs = 8 [default = 60000]; - + optional uint64 QueueAttributesCacheTimeMs = 8 [default = 60000]; + // Effective query execution time that is treated as slow query - optional uint64 SlowRequestTimeMs = 9 [default = 2500]; - + optional uint64 SlowRequestTimeMs = 9 [default = 2500]; + // Max time value for long polling (param WaitTimeSeconds for ReceiveMessage) - optional uint64 MaxWaitTimeoutMs = 10 [default = 20000]; + optional uint64 MaxWaitTimeoutMs = 10 [default = 20000]; optional uint32 SchemeCacheSoftRefreshTimeSeconds = 11 [default = 5]; optional uint32 SchemeCacheHardRefreshTimeSeconds = 12 [default = 10]; @@ -715,7 +715,7 @@ message TSqsConfig { optional string YandexCloudAccessServiceAddress = 29; // host:port optional string YandexCloudFolderServiceAddress = 30; // host:port optional string YandexCloudServiceRegion = 39 [default = "ru-central1"]; - + optional uint32 MeteringFlushingIntervalMs = 51 [default = 5000]; optional string MeteringLogFilePath = 52; @@ -723,120 +723,120 @@ message TSqsConfig { repeated string MeteringYandexNetCidr = 54; // TODO: remove both - optional uint32 MastersDescriberUpdateTimeMs = 17 [default = 10000]; - optional uint32 MasterConnectTimeoutMs = 18 [default = 10000]; - + optional uint32 MastersDescriberUpdateTimeMs = 17 [default = 10000]; + optional uint32 MasterConnectTimeoutMs = 18 [default = 10000]; + optional uint32 LeadersDescriberUpdateTimeMs = 69 [default = 10000]; optional uint32 LeaderConnectTimeoutMs = 70 [default = 10000]; - optional uint64 MinMessageRetentionPeriodMs = 19 [default = 60000]; - - // DB requests retries - optional uint64 TransactionTimeoutMs = 20 [default = 20000]; // If this amount of time elapsed, we don't retry - optional uint64 TransactionRetryWaitDurationMs = 21 [default = 500]; // First wait time before next retry (+ random component). Then - 2 * TransactionRetryWaitDurationMs and etc - optional uint64 TransactionMaxRetryWaitDurationMs = 22 [default = 2500]; // Max wait before next retry - - optional uint64 BackgroundMetricsUpdateTimeMs = 23 [default = 10000]; - - optional uint64 MaxNumberOfReceiveMessages = 24 [default = 10]; // MaxNumberOfMessages parameter in ReceiveMessage - - optional uint64 RequestTimeoutMs = 25 [default = 600000]; + optional uint64 MinMessageRetentionPeriodMs = 19 [default = 60000]; + + // DB requests retries + optional uint64 TransactionTimeoutMs = 20 [default = 20000]; // If this amount of time elapsed, we don't retry + optional uint64 TransactionRetryWaitDurationMs = 21 [default = 500]; // First wait time before next retry (+ random component). Then - 2 * TransactionRetryWaitDurationMs and etc + optional uint64 TransactionMaxRetryWaitDurationMs = 22 [default = 2500]; // Max wait before next retry + + optional uint64 BackgroundMetricsUpdateTimeMs = 23 [default = 10000]; + + optional uint64 MaxNumberOfReceiveMessages = 24 [default = 10]; // MaxNumberOfMessages parameter in ReceiveMessage + + optional uint64 RequestTimeoutMs = 25 [default = 600000]; optional bool ForceQueueCreationV2 = 26 [default = true]; // deprecated, TODO: remove from config optional bool ForceQueueDeletionV2 = 27 [default = true]; // deprecated, TODO: remove from config optional bool EnableDeadLetterQueues = 41 [default = false]; - - optional bool CreateLegacyDurationCounters = 28; - - optional uint64 MinTimeLeftForReceiveMessageWaitMs = 31 [default = 64]; // if wait deadline - now is less than this time, we don't wait in receive message - - optional bool CheckAllShardsInReceiveMessage = 32; // Check all shards in receive message event if deadline is expired now (setting for test) - - optional uint64 CleanupPeriodMs = 33 [default = 150000]; - optional uint64 CleanupBatchSize = 34 [default = 1000]; // Batch size for cleanup queries // Don't set big values - - optional uint64 DeduplicationPeriodMs = 35 [default = 300000]; // Period of time for which deduplication in fifo queues acts - optional uint64 GroupsReadAttemptIdsPeriodMs = 36 [default = 300000]; // Period of time for which group read attempt ids in fifo queues act - optional uint64 GroupSelectionBatchSize = 38 [default = 1000]; - - optional bool DoAutomaticMigration = 37 [default = true]; // Check and add new columns to queue tables during queue master start - - optional uint64 AddMesagesToInflyBatchSize = 40 [default = 2000]; - - message TBatchingPolicy { - // Optimal parameters: SQS-479 - optional uint64 BatchSize = 1 [default = 200]; - optional uint64 TransactionsMaxInflyPerShard = 2 [default = 20]; - } - - optional TBatchingPolicy StdQueueSendBatchingPolicy = 42; - optional TBatchingPolicy FifoQueueSendBatchingPolicy = 43; - optional TBatchingPolicy StdQueueDeleteBatchingPolicy = 44; - optional TBatchingPolicy FifoQueueDeleteBatchingPolicy = 45; - optional TBatchingPolicy StdQueueLoadBatchingPolicy = 46; - - message TQuotingConfig { - // Settings for quoting. - optional bool EnableQuoting = 1; - optional uint64 QuotaDeadlineMs = 2; - - message TDefaultActionsRates { - // Per queue - optional uint32 StdSendMessageRate = 1; // send message (+batch) - optional uint32 StdReceiveMessageRate = 2; // receive message - optional uint32 StdDeleteMessageRate = 3; // delete message (+batch) - optional uint32 StdChangeMessageVisibilityRate = 4; // change message visibility (+batch) - - optional uint32 FifoSendMessageRate = 5; // send message (+batch) - optional uint32 FifoReceiveMessageRate = 6; // receive message - optional uint32 FifoDeleteMessageRate = 7; // delete message (+batch) - optional uint32 FifoChangeMessageVisibilityRate = 8; // change message visibility (+batch) - - // Per user - optional uint32 CreateObjectsRate = 9; // create queue, create user - optional uint32 DeleteObjectsRate = 10; // delete queue, delete user - - optional uint32 OtherRequestsRate = 11; // control requests: get/set queue attributes, list queues, permissions modification, etc - } - - message TLocalRateLimiterConfig { - optional TDefaultActionsRates Rates = 1; - } - - message TKesusQuoterConfig { - // Not implemented - optional TDefaultActionsRates DefaultLimits = 1; - } - - // Only one of quoter configs could be set - optional TLocalRateLimiterConfig LocalRateLimiterConfig = 3; - optional TKesusQuoterConfig KesusQuoterConfig = 4; - } - - optional TQuotingConfig QuotingConfig = 47; + + optional bool CreateLegacyDurationCounters = 28; + + optional uint64 MinTimeLeftForReceiveMessageWaitMs = 31 [default = 64]; // if wait deadline - now is less than this time, we don't wait in receive message + + optional bool CheckAllShardsInReceiveMessage = 32; // Check all shards in receive message event if deadline is expired now (setting for test) + + optional uint64 CleanupPeriodMs = 33 [default = 150000]; + optional uint64 CleanupBatchSize = 34 [default = 1000]; // Batch size for cleanup queries // Don't set big values + + optional uint64 DeduplicationPeriodMs = 35 [default = 300000]; // Period of time for which deduplication in fifo queues acts + optional uint64 GroupsReadAttemptIdsPeriodMs = 36 [default = 300000]; // Period of time for which group read attempt ids in fifo queues act + optional uint64 GroupSelectionBatchSize = 38 [default = 1000]; + + optional bool DoAutomaticMigration = 37 [default = true]; // Check and add new columns to queue tables during queue master start + + optional uint64 AddMesagesToInflyBatchSize = 40 [default = 2000]; + + message TBatchingPolicy { + // Optimal parameters: SQS-479 + optional uint64 BatchSize = 1 [default = 200]; + optional uint64 TransactionsMaxInflyPerShard = 2 [default = 20]; + } + + optional TBatchingPolicy StdQueueSendBatchingPolicy = 42; + optional TBatchingPolicy FifoQueueSendBatchingPolicy = 43; + optional TBatchingPolicy StdQueueDeleteBatchingPolicy = 44; + optional TBatchingPolicy FifoQueueDeleteBatchingPolicy = 45; + optional TBatchingPolicy StdQueueLoadBatchingPolicy = 46; + + message TQuotingConfig { + // Settings for quoting. + optional bool EnableQuoting = 1; + optional uint64 QuotaDeadlineMs = 2; + + message TDefaultActionsRates { + // Per queue + optional uint32 StdSendMessageRate = 1; // send message (+batch) + optional uint32 StdReceiveMessageRate = 2; // receive message + optional uint32 StdDeleteMessageRate = 3; // delete message (+batch) + optional uint32 StdChangeMessageVisibilityRate = 4; // change message visibility (+batch) + + optional uint32 FifoSendMessageRate = 5; // send message (+batch) + optional uint32 FifoReceiveMessageRate = 6; // receive message + optional uint32 FifoDeleteMessageRate = 7; // delete message (+batch) + optional uint32 FifoChangeMessageVisibilityRate = 8; // change message visibility (+batch) + + // Per user + optional uint32 CreateObjectsRate = 9; // create queue, create user + optional uint32 DeleteObjectsRate = 10; // delete queue, delete user + + optional uint32 OtherRequestsRate = 11; // control requests: get/set queue attributes, list queues, permissions modification, etc + } + + message TLocalRateLimiterConfig { + optional TDefaultActionsRates Rates = 1; + } + + message TKesusQuoterConfig { + // Not implemented + optional TDefaultActionsRates DefaultLimits = 1; + } + + // Only one of quoter configs could be set + optional TLocalRateLimiterConfig LocalRateLimiterConfig = 3; + optional TKesusQuoterConfig KesusQuoterConfig = 4; + } + + optional TQuotingConfig QuotingConfig = 47; optional bool EnableQueueAttributesValidation = 48 [default = true]; - - message TAccountSettingsDefaults { - optional int64 MaxQueuesCount = 1 [default = 50]; - } - - optional TAccountSettingsDefaults AccountSettingsDefaults = 49; - - optional bool AllowYandexAttributePrefix = 50; - - optional uint64 QueueCountersExportDelayMs = 55; // Export queue counters after queue creation with delay (milliseconds). - optional bool CreateLazyCounters = 56 [default = true]; - - optional uint64 UserSettingsUpdateTimeMs = 57 [default = 60000]; - optional uint64 UserSettingsReadBatchSize = 58 [default = 1000]; - optional uint64 QueuesListReadBatchSize = 59 [default = 1000]; - - optional bool ValidateMessageBody = 60; + + message TAccountSettingsDefaults { + optional int64 MaxQueuesCount = 1 [default = 50]; + } + + optional TAccountSettingsDefaults AccountSettingsDefaults = 49; + + optional bool AllowYandexAttributePrefix = 50; + + optional uint64 QueueCountersExportDelayMs = 55; // Export queue counters after queue creation with delay (milliseconds). + optional bool CreateLazyCounters = 56 [default = true]; + + optional uint64 UserSettingsUpdateTimeMs = 57 [default = 60000]; + optional uint64 UserSettingsReadBatchSize = 58 [default = 1000]; + optional uint64 QueuesListReadBatchSize = 59 [default = 1000]; + + optional bool ValidateMessageBody = 60; optional uint64 DlqNotificationGracePeriodMs = 61 [default = 60000]; - - optional uint64 AddMessagesToInflyCheckPeriodMs = 62 [default = 30000]; - optional uint64 AddMessagesToInflyMinCheckAttempts = 63 [default = 10]; + + optional uint64 AddMessagesToInflyCheckPeriodMs = 62 [default = 30000]; + optional uint64 AddMessagesToInflyMinCheckAttempts = 63 [default = 10]; optional uint64 MinimumGarbageAgeSeconds = 64 [default = 3600]; diff --git a/ydb/core/protos/counters_kesus.proto b/ydb/core/protos/counters_kesus.proto index 33df5de6266..a272555f0ce 100644 --- a/ydb/core/protos/counters_kesus.proto +++ b/ydb/core/protos/counters_kesus.proto @@ -15,7 +15,7 @@ enum ESimpleCounters { COUNTER_SEMAPHORE_COUNT = 4 [(CounterOpts) = {Name: "Semaphores"}]; COUNTER_SEMAPHORE_OWNER_COUNT = 5 [(CounterOpts) = {Name: "SemaphoreOwners"}]; COUNTER_SEMAPHORE_WAITER_COUNT = 6 [(CounterOpts) = {Name: "SemaphoreWaiters"}]; - COUNTER_QUOTER_RESOURCE_COUNT = 7 [(CounterOpts) = {Name: "QuoterResources"}]; + COUNTER_QUOTER_RESOURCE_COUNT = 7 [(CounterOpts) = {Name: "QuoterResources"}]; } enum ECumulativeCounters { @@ -37,10 +37,10 @@ enum ECumulativeCounters { COUNTER_REQS_SEMAPHORE_CREATE = 13 [(CounterOpts) = {Name: "Requests/SemaphoreCreate"}]; COUNTER_REQS_SEMAPHORE_DELETE = 14 [(CounterOpts) = {Name: "Requests/SemaphoreDelete"}]; COUNTER_REQS_SEMAPHORE_RELEASE = 15 [(CounterOpts) = {Name: "Requests/SemaphoreRelease"}]; - COUNTER_REQS_QUOTER_RESOURCE_DESCRIBE = 16 [(CounterOpts) = {Name: "Requests/QuoterResourceDescribe"}]; - COUNTER_REQS_QUOTER_RESOURCE_ADD = 17 [(CounterOpts) = {Name: "Requests/QuoterResourceAdd"}]; - COUNTER_REQS_QUOTER_RESOURCE_UPDATE = 18 [(CounterOpts) = {Name: "Requests/QuoterResourceUpdate"}]; - COUNTER_REQS_QUOTER_RESOURCE_DELETE = 19 [(CounterOpts) = {Name: "Requests/QuoterResourceDelete"}]; + COUNTER_REQS_QUOTER_RESOURCE_DESCRIBE = 16 [(CounterOpts) = {Name: "Requests/QuoterResourceDescribe"}]; + COUNTER_REQS_QUOTER_RESOURCE_ADD = 17 [(CounterOpts) = {Name: "Requests/QuoterResourceAdd"}]; + COUNTER_REQS_QUOTER_RESOURCE_UPDATE = 18 [(CounterOpts) = {Name: "Requests/QuoterResourceUpdate"}]; + COUNTER_REQS_QUOTER_RESOURCE_DELETE = 19 [(CounterOpts) = {Name: "Requests/QuoterResourceDelete"}]; } enum EPercentileCounters { @@ -71,8 +71,8 @@ enum ETxTypes { TXTYPE_SEMAPHORE_TIMEOUT = 15 [(TxTypeOpts) = {Name: "TxSemaphoreTimeout"}]; TXTYPE_SEMAPHORE_UPDATE = 16 [(TxTypeOpts) = {Name: "TxSemaphoreUpdate"}]; TXTYPE_SELF_CHECK = 17 [(TxTypeOpts) = {Name: "TxSelfCheck"}]; - TXTYPE_QUOTER_RESOURCE_DESCRIBE = 18 [(TxTypeOpts) = {Name: "TxQouterResourceDescribe"}]; - TXTYPE_QUOTER_RESOURCE_ADD = 19 [(TxTypeOpts) = {Name: "TxQouterResourceAdd"}]; - TXTYPE_QUOTER_RESOURCE_UPDATE = 20 [(TxTypeOpts) = {Name: "TxQouterResourceUpdate"}]; - TXTYPE_QUOTER_RESOURCE_DELETE = 21 [(TxTypeOpts) = {Name: "TxQouterResourceDelete"}]; + TXTYPE_QUOTER_RESOURCE_DESCRIBE = 18 [(TxTypeOpts) = {Name: "TxQouterResourceDescribe"}]; + TXTYPE_QUOTER_RESOURCE_ADD = 19 [(TxTypeOpts) = {Name: "TxQouterResourceAdd"}]; + TXTYPE_QUOTER_RESOURCE_UPDATE = 20 [(TxTypeOpts) = {Name: "TxQouterResourceUpdate"}]; + TXTYPE_QUOTER_RESOURCE_DELETE = 21 [(TxTypeOpts) = {Name: "TxQouterResourceDelete"}]; } diff --git a/ydb/core/protos/kesus.proto b/ydb/core/protos/kesus.proto index 7c03a3ccd95..9b4934a405a 100644 --- a/ydb/core/protos/kesus.proto +++ b/ydb/core/protos/kesus.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package NKikimrKesus; option java_package = "ru.yandex.kikimr.proto"; -option cc_enable_arenas = true; +option cc_enable_arenas = true; import "library/cpp/actors/protos/actors.proto"; import "ydb/public/api/protos/ydb_coordination.proto"; @@ -244,9 +244,9 @@ message TEvSessionStolen { uint64 SessionId = 2; } -// -// Quoter control API. -// +// +// Quoter control API. +// message TAccountingConfig { // Account consumed resources and send billing metrics. // Default value is false (not inherited). @@ -312,172 +312,172 @@ message TAccountingConfig { TMetric Overshoot = 10; } -message THierarchicalDRRResourceConfig { - // Resource consumption speed limit. - // Value is required for root resource. +message THierarchicalDRRResourceConfig { + // Resource consumption speed limit. + // Value is required for root resource. // 0 is equivalent to not set (inherit value from parent). - // Must be nonnegative. - double MaxUnitsPerSecond = 3; - - // Maximum burst size of resource consumption across the whole cluster - // divided by max_units_per_second. - // Default value is 1. - // This means that maximum burst size might be equal to max_units_per_second. - // 0 is equivalent to not set. - // Must be nonnegative. - double MaxBurstSizeCoefficient = 4; - - // Weight of this resource among resource siblings in current resource tree branch. - // This weight is applied during starvation to share resource among all resource siblings. - // Default value is 1. - uint32 Weight = 5; - + // Must be nonnegative. + double MaxUnitsPerSecond = 3; + + // Maximum burst size of resource consumption across the whole cluster + // divided by max_units_per_second. + // Default value is 1. + // This means that maximum burst size might be equal to max_units_per_second. + // 0 is equivalent to not set. + // Must be nonnegative. + double MaxBurstSizeCoefficient = 4; + + // Weight of this resource among resource siblings in current resource tree branch. + // This weight is applied during starvation to share resource among all resource siblings. + // Default value is 1. + uint32 Weight = 5; + // Prefetch in local bucket up to PrefetchCoefficient*MaxUnitsPerSecond units (full size). // Default value is inherited from parent or equals 0.2 for root. // Disables prefetching if any negative value is set // (It is useful to avoid bursts in case of large number of local buckets). double PrefetchCoefficient = 6; - + // Prefetching starts if there is less than PrefetchWatermark fraction of full local bucket left. // Default value is inherited from parent or equals 0.75 for root. // Must be nonnegative and less than or equal to 1. double PrefetchWatermark = 7; - - // deprecated settings { - message TSpeedSettings { - double MaxUnitsPerSecond = 1; // Max resource consuming speed (== quota). - } - TSpeedSettings SpeedSettings = 1; // If not set, value is inherited from parent's limit. Must be set in root node of the tree. - // deprecated settings } -} - -message TStreamingQuoterResource { - uint64 ResourceId = 1; // For describe request. - - // Resource path. Elements are separated by slash. - // The first symbol is not slash. - // The first element is root resource name. - // Resource path is the path of resource inside coordination node. - string ResourcePath = 2; - - oneof AlgorithmSpecificConfig { - // For Hierarhical DRR algorithm. - THierarchicalDRRResourceConfig HierarhicalDRRResourceConfig = 3; - } + + // deprecated settings { + message TSpeedSettings { + double MaxUnitsPerSecond = 1; // Max resource consuming speed (== quota). + } + TSpeedSettings SpeedSettings = 1; // If not set, value is inherited from parent's limit. Must be set in root node of the tree. + // deprecated settings } +} + +message TStreamingQuoterResource { + uint64 ResourceId = 1; // For describe request. + + // Resource path. Elements are separated by slash. + // The first symbol is not slash. + // The first element is root resource name. + // Resource path is the path of resource inside coordination node. + string ResourcePath = 2; + + oneof AlgorithmSpecificConfig { + // For Hierarhical DRR algorithm. + THierarchicalDRRResourceConfig HierarhicalDRRResourceConfig = 3; + } TAccountingConfig AccountingConfig = 4; -} - -message TEvDescribeQuoterResources { - // Specify ResourceId or ResourcePath to describe resources. - repeated uint64 ResourceIds = 1; - repeated string ResourcePaths = 2; - bool Recursive = 3; -} - -message TEvDescribeQuoterResourcesResult { - repeated TStreamingQuoterResource Resources = 2; - TKesusError Error = 3; -} - -message TEvAddQuoterResource { - TStreamingQuoterResource Resource = 3; -} - -message TEvAddQuoterResourceResult { - uint64 ResourceId = 1; - TKesusError Error = 2; -} - -message TEvUpdateQuoterResource { - TStreamingQuoterResource Resource = 3; -} - -message TEvUpdateQuoterResourceResult { - uint64 ResourceId = 1; - TKesusError Error = 2; -} - -message TEvDeleteQuoterResource { - // Specify ResourceId or ResourcePath. - uint64 ResourceId = 3; - string ResourcePath = 4; -} - -message TEvDeleteQuoterResourceResult { - TKesusError Error = 1; -} - -// -// Quoter runtime API. -// - -// The first event that is sent to Kesus for given resource. -// This creates session for specified resource if it is not created. -// Kesus will start allocating resource for this client (if Amount > 0). -message TEvSubscribeOnResources { - message TResourceSubscribeInfo { - string ResourcePath = 1; - bool StartConsuming = 2; // In case of StartConsuming == false this will create session, but doesn't start consuming of resource. - double InitialAmount = 3; - } - repeated TResourceSubscribeInfo Resources = 1; +} + +message TEvDescribeQuoterResources { + // Specify ResourceId or ResourcePath to describe resources. + repeated uint64 ResourceIds = 1; + repeated string ResourcePaths = 2; + bool Recursive = 3; +} + +message TEvDescribeQuoterResourcesResult { + repeated TStreamingQuoterResource Resources = 2; + TKesusError Error = 3; +} + +message TEvAddQuoterResource { + TStreamingQuoterResource Resource = 3; +} + +message TEvAddQuoterResourceResult { + uint64 ResourceId = 1; + TKesusError Error = 2; +} + +message TEvUpdateQuoterResource { + TStreamingQuoterResource Resource = 3; +} + +message TEvUpdateQuoterResourceResult { + uint64 ResourceId = 1; + TKesusError Error = 2; +} + +message TEvDeleteQuoterResource { + // Specify ResourceId or ResourcePath. + uint64 ResourceId = 3; + string ResourcePath = 4; +} + +message TEvDeleteQuoterResourceResult { + TKesusError Error = 1; +} + +// +// Quoter runtime API. +// + +// The first event that is sent to Kesus for given resource. +// This creates session for specified resource if it is not created. +// Kesus will start allocating resource for this client (if Amount > 0). +message TEvSubscribeOnResources { + message TResourceSubscribeInfo { + string ResourcePath = 1; + bool StartConsuming = 2; // In case of StartConsuming == false this will create session, but doesn't start consuming of resource. + double InitialAmount = 3; + } + repeated TResourceSubscribeInfo Resources = 1; NActorsProto.TActorId ActorID = 2; -} - -message TEvSubscribeOnResourcesResult { - message TResourceSubscribeResult { - uint64 ResourceId = 1; - TKesusError Error = 2; - TStreamingQuoterResource EffectiveProps = 3; - } +} + +message TEvSubscribeOnResourcesResult { + message TResourceSubscribeResult { + uint64 ResourceId = 1; + TKesusError Error = 2; + TStreamingQuoterResource EffectiveProps = 3; + } repeated TResourceSubscribeResult Results = 1; // Resources are in the same order that they were specified in request. -} - -// Notification about resources consumption state update. -// Event is sent from client to Kesus. -// Has no result message. -message TEvUpdateConsumptionState { - message TResourceInfo { - uint64 ResourceId = 1; - bool ConsumeResource = 2; - double Amount = 3; - } - repeated TResourceInfo ResourcesInfo = 1; +} + +// Notification about resources consumption state update. +// Event is sent from client to Kesus. +// Has no result message. +message TEvUpdateConsumptionState { + message TResourceInfo { + uint64 ResourceId = 1; + bool ConsumeResource = 2; + double Amount = 3; + } + repeated TResourceInfo ResourcesInfo = 1; NActorsProto.TActorId ActorID = 2; -} - -// Ack for TEvUpdateConsumptionState event. -message TEvUpdateConsumptionStateAck { -} - -// Notification about new resource allocation. -// Event is sent from Kesus to client. -// Has no result message. -message TEvResourcesAllocated { - message TResourceInfo { - uint64 ResourceId = 1; - double Amount = 2; - TStreamingQuoterResource EffectiveProps = 3; // Props for resource if they has been changed - TKesusError StateNotification = 4; // Resource can be deleted during consuming. In this case Kesus will send an error for resource. - } - repeated TResourceInfo ResourcesInfo = 1; -} - -// Ack for TEvResourcesAllocated event. -message TEvResourcesAllocatedAck { -} - -message TEvGetQuoterResourceCounters { -} - -message TEvGetQuoterResourceCountersResult { - message TResourceCounters { - string ResourcePath = 1; - uint64 Allocated = 2; // Allocated amount from tablet start/resource creation for this resource and all its children. - } - - repeated TResourceCounters ResourceCounters = 1; -} +} + +// Ack for TEvUpdateConsumptionState event. +message TEvUpdateConsumptionStateAck { +} + +// Notification about new resource allocation. +// Event is sent from Kesus to client. +// Has no result message. +message TEvResourcesAllocated { + message TResourceInfo { + uint64 ResourceId = 1; + double Amount = 2; + TStreamingQuoterResource EffectiveProps = 3; // Props for resource if they has been changed + TKesusError StateNotification = 4; // Resource can be deleted during consuming. In this case Kesus will send an error for resource. + } + repeated TResourceInfo ResourcesInfo = 1; +} + +// Ack for TEvResourcesAllocated event. +message TEvResourcesAllocatedAck { +} + +message TEvGetQuoterResourceCounters { +} + +message TEvGetQuoterResourceCountersResult { + message TResourceCounters { + string ResourcePath = 1; + uint64 Allocated = 2; // Allocated amount from tablet start/resource creation for this resource and all its children. + } + + repeated TResourceCounters ResourceCounters = 1; +} // Account consumed resources into metering. // Event is sent from client to Kesus. diff --git a/ydb/core/protos/msgbus.proto b/ydb/core/protos/msgbus.proto index df7cda59800..03eaea03a6a 100644 --- a/ydb/core/protos/msgbus.proto +++ b/ydb/core/protos/msgbus.proto @@ -610,14 +610,14 @@ message TSqsRequest { NKikimr.NSQS.TListUsersRequest ListUsers = 17; NKikimr.NSQS.TModifyPermissionsRequest ModifyPermissions = 18; NKikimr.NSQS.TListPermissionsRequest ListPermissions = 19; - NKikimr.NSQS.TDeleteQueueBatchRequest DeleteQueueBatch = 20; - NKikimr.NSQS.TPurgeQueueBatchRequest PurgeQueueBatch = 21; - NKikimr.NSQS.TGetQueueAttributesBatchRequest GetQueueAttributesBatch = 22; + NKikimr.NSQS.TDeleteQueueBatchRequest DeleteQueueBatch = 20; + NKikimr.NSQS.TPurgeQueueBatchRequest PurgeQueueBatch = 21; + NKikimr.NSQS.TGetQueueAttributesBatchRequest GetQueueAttributesBatch = 22; NKikimr.NSQS.TListDeadLetterSourceQueuesRequest ListDeadLetterSourceQueues = 23; NKikimr.NSQS.TCountQueuesRequest CountQueues = 24; } - optional string RequestId = 30; - optional bool RequestRateLimit = 31 [default = true]; + optional string RequestId = 30; + optional bool RequestRateLimit = 31 [default = true]; } message TSqsResponse { @@ -641,13 +641,13 @@ message TSqsResponse { NKikimr.NSQS.TListUsersResponse ListUsers = 17; NKikimr.NSQS.TModifyPermissionsResponse ModifyPermissions = 18; NKikimr.NSQS.TListPermissionsResponse ListPermissions = 19; - NKikimr.NSQS.TDeleteQueueBatchResponse DeleteQueueBatch = 20; - NKikimr.NSQS.TPurgeQueueBatchResponse PurgeQueueBatch = 21; - NKikimr.NSQS.TGetQueueAttributesBatchResponse GetQueueAttributesBatch = 22; + NKikimr.NSQS.TDeleteQueueBatchResponse DeleteQueueBatch = 20; + NKikimr.NSQS.TPurgeQueueBatchResponse PurgeQueueBatch = 21; + NKikimr.NSQS.TGetQueueAttributesBatchResponse GetQueueAttributesBatch = 22; NKikimr.NSQS.TListDeadLetterSourceQueuesResponse ListDeadLetterSourceQueues = 23; NKikimr.NSQS.TCountQueuesResponse CountQueues = 24; } - optional string RequestId = 30; + optional string RequestId = 30; optional string FolderId = 31; optional string ResourceId = 32; optional bool IsFifo = 33; diff --git a/ydb/core/protos/msgbus_pq.proto b/ydb/core/protos/msgbus_pq.proto index 849b292383a..54698225c57 100644 --- a/ydb/core/protos/msgbus_pq.proto +++ b/ydb/core/protos/msgbus_pq.proto @@ -405,7 +405,7 @@ message TPersQueuePartitionResponse { //write stat optional uint32 PartitionQuotedTimeMs = 8; - optional uint32 TopicQuotedTimeMs = 11; + optional uint32 TopicQuotedTimeMs = 11; optional uint32 TotalTimeInPartitionQueueMs = 9; optional uint32 WriteTimeMs = 10; } diff --git a/ydb/core/protos/pqconfig.proto b/ydb/core/protos/pqconfig.proto index 7c859274496..21eab91bace 100644 --- a/ydb/core/protos/pqconfig.proto +++ b/ydb/core/protos/pqconfig.proto @@ -47,7 +47,7 @@ message TPQConfig { optional string VersionTablePath = 16 [default = "/Root/PQ/Config/V2/Versions"]; optional uint32 ClustersUpdateTimeoutOnErrorSec = 17 [default = 1]; - + optional uint32 WriteInitLatencyBigMs = 19 [default = 900]; optional uint32 ReadInitLatencyBigMs = 20 [default = 900]; optional uint32 CommitLatencyBigMs = 21 [default = 900]; @@ -56,17 +56,17 @@ message TPQConfig { optional uint32 ReadLatencyFromDiskBigMs = 28 [default = 1000]; - message TQuotingConfig { - optional bool EnableQuoting = 1; - optional string QuotersDirectoryPath = 2 [default = "/Root/PersQueue/System/Quoters"]; - optional ELimitedEntity TopicWriteQuotaEntityToLimit = 3; - - // Enum for describing entities that quoter can limit. - enum ELimitedEntity { - UNSPECIFIED = 0; - WRITTEN_BLOB_SIZE = 1; // Written blob size, including additional data for compaction. - USER_PAYLOAD_SIZE = 2; // Message + source id size. - } + message TQuotingConfig { + optional bool EnableQuoting = 1; + optional string QuotersDirectoryPath = 2 [default = "/Root/PersQueue/System/Quoters"]; + optional ELimitedEntity TopicWriteQuotaEntityToLimit = 3; + + // Enum for describing entities that quoter can limit. + enum ELimitedEntity { + UNSPECIFIED = 0; + WRITTEN_BLOB_SIZE = 1; // Written blob size, including additional data for compaction. + USER_PAYLOAD_SIZE = 2; // Message + source id size. + } optional bool EnableReadQuoting = 4 [default = false]; optional uint64 ReadCreditBytes = 5 [default = 100000]; @@ -74,8 +74,8 @@ message TPQConfig { optional bool PartitionReadQuotaIsTwiceWriteQuota = 7 [default = false]; - } - optional TQuotingConfig QuotingConfig = 18; + } + optional TQuotingConfig QuotingConfig = 18; // Time duration that we wait before we consider remote cluster enabled for load balancing purposes optional uint32 RemoteClusterEnabledDelaySec = 24 [default = 300]; // 5 minutes diff --git a/ydb/core/protos/services.proto b/ydb/core/protos/services.proto index c17c8a7dc37..c3156860cf7 100644 --- a/ydb/core/protos/services.proto +++ b/ydb/core/protos/services.proto @@ -278,12 +278,12 @@ enum EServiceKikimr { BUILD_INDEX = 1000; METERING_WRITER = 1010; - - // Streaming - STREAMS = 1011; - STREAMS_SERVICE = 1012; - STREAMS_STORAGE_SERVICE = 1013; - STREAMS_SCHEDULER_SERVICE = 1014; + + // Streaming + STREAMS = 1011; + STREAMS_SERVICE = 1012; + STREAMS_STORAGE_SERVICE = 1013; + STREAMS_SCHEDULER_SERVICE = 1014; STREAMS_RESOURCE_SERVICE = 1015; STREAMS_CHECKPOINT_COORDINATOR = 1016; STREAMS_CONTROL_PLANE_SERVICE = 1017; @@ -298,8 +298,8 @@ enum EServiceKikimr { // Change exchange (async indexes & CDC) CHANGE_EXCHANGE = 1100; - - YDB_SDK = 1101; // Log component for logger in YDB SDK driver. + + YDB_SDK = 1101; // Log component for logger in YDB SDK driver. // Replication REPLICATION_CONTROLLER = 1200; diff --git a/ydb/core/protos/sqs.proto b/ydb/core/protos/sqs.proto index eae557a6241..f312d9bc0e9 100644 --- a/ydb/core/protos/sqs.proto +++ b/ydb/core/protos/sqs.proto @@ -21,7 +21,7 @@ message TError { optional uint32 Status = 1; optional string Message = 2; - optional string ErrorCode = 4; // https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - list of error codes + optional string ErrorCode = 4; // https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - list of error codes optional TDebug Debug = 3; } @@ -86,12 +86,12 @@ message TCreateQueueRequest { optional uint64 Shards = 4 [default = 4]; /// Number of partitions of data table. optional uint64 Partitions = 5 [default = 8]; - /// Autosplit partitions with data - optional bool EnableAutosplit = 8; - /// Size in bytes when partition will be splitted on two parts. - optional uint64 SizeToSplit = 9 [default = 1073741824]; // 1GB + /// Autosplit partitions with data + optional bool EnableAutosplit = 8; + /// Size in bytes when partition will be splitted on two parts. + optional uint64 SizeToSplit = 9 [default = 1073741824]; // 1GB /// Enable internal tables' transactions out of order execution. - optional bool EnableOutOfOrderTransactionsExecution = 6 [default = true]; + optional bool EnableOutOfOrderTransactionsExecution = 6 [default = true]; } message TCreateQueueResponse { @@ -106,7 +106,7 @@ message TCreateUserRequest { /// Sender's authentification. optional TAuthentification Auth = 1; /// Then user name we want to create. - optional string UserName = 2; + optional string UserName = 2; } message TCreateUserResponse { @@ -121,15 +121,15 @@ message TGetQueueAttributesRequest { optional string QueueName = 2; /// List of attribute's names to return. repeated string Names = 3; - /// Batch Id. Available only in case of batch request. - optional string Id = 4; + /// Batch Id. Available only in case of batch request. + optional string Id = 4; } message TGetQueueAttributesResponse { optional TError Error = 1; optional string RequestId = 2; - /// Batch Id. Available only in case of batch request. - optional string Id = 15; + /// Batch Id. Available only in case of batch request. + optional string Id = 15; /// The approximate number of visible messages in a queue. optional uint64 ApproximateNumberOfMessages = 3; /// The approximate number of messages that are waiting @@ -163,21 +163,21 @@ message TGetQueueAttributesResponse { optional string QueueArn = 17; } -message TGetQueueAttributesBatchRequest { - optional TCredentials Credentials = 100; - /// Sender's authentification. - optional TAuthentification Auth = 1; - /// List of attribute's names to return. - repeated string Names = 3; - repeated TGetQueueAttributesRequest Entries = 4; -} - -message TGetQueueAttributesBatchResponse { - optional TError Error = 1; - optional string RequestId = 2; - repeated TGetQueueAttributesResponse Entries = 3; -} - +message TGetQueueAttributesBatchRequest { + optional TCredentials Credentials = 100; + /// Sender's authentification. + optional TAuthentification Auth = 1; + /// List of attribute's names to return. + repeated string Names = 3; + repeated TGetQueueAttributesRequest Entries = 4; +} + +message TGetQueueAttributesBatchResponse { + optional TError Error = 1; + optional string RequestId = 2; + repeated TGetQueueAttributesResponse Entries = 3; +} + message TGetQueueUrlRequest { optional TCredentials Credentials = 100; /// Sender's authentification. @@ -228,36 +228,36 @@ message TDeleteQueueRequest { /// Sender's authentification. optional TAuthentification Auth = 1; optional string QueueName = 2; - /// Batch Id. Available only in case of batch response. - optional string Id = 3; + /// Batch Id. Available only in case of batch response. + optional string Id = 3; } message TDeleteQueueResponse { optional TError Error = 1; optional string RequestId = 2; - /// Batch Id. Available only in case of batch response. - optional string Id = 3; -} - -message TDeleteQueueBatchRequest { - optional TCredentials Credentials = 100; - /// Sender's authentification. - optional TAuthentification Auth = 1; - repeated TDeleteQueueRequest Entries = 2; -} - -message TDeleteQueueBatchResponse { - optional TError Error = 1; - optional string RequestId = 2; - repeated TDeleteQueueResponse Entries = 3; -} - + /// Batch Id. Available only in case of batch response. + optional string Id = 3; +} + +message TDeleteQueueBatchRequest { + optional TCredentials Credentials = 100; + /// Sender's authentification. + optional TAuthentification Auth = 1; + repeated TDeleteQueueRequest Entries = 2; +} + +message TDeleteQueueBatchResponse { + optional TError Error = 1; + optional string RequestId = 2; + repeated TDeleteQueueResponse Entries = 3; +} + message TDeleteUserRequest { optional TCredentials Credentials = 100; /// Sender's authentification. optional TAuthentification Auth = 1; /// Then user name we want to delete. - optional string UserName = 2; + optional string UserName = 2; } message TDeleteUserResponse { @@ -306,30 +306,30 @@ message TPurgeQueueRequest { /// Sender's authentification. optional TAuthentification Auth = 1; optional string QueueName = 2; - /// Batch Id. Available only in case of batch response. - optional string Id = 3; + /// Batch Id. Available only in case of batch response. + optional string Id = 3; } message TPurgeQueueResponse { optional TError Error = 1; optional string RequestId = 3; - /// Batch Id. Available only in case of batch response. - optional string Id = 4; -} - -message TPurgeQueueBatchRequest { - optional TCredentials Credentials = 100; - /// Sender's authentification. - optional TAuthentification Auth = 1; - repeated TPurgeQueueRequest Entries = 2; -} - -message TPurgeQueueBatchResponse { - optional TError Error = 1; - optional string RequestId = 2; - repeated TPurgeQueueResponse Entries = 3; -} - + /// Batch Id. Available only in case of batch response. + optional string Id = 4; +} + +message TPurgeQueueBatchRequest { + optional TCredentials Credentials = 100; + /// Sender's authentification. + optional TAuthentification Auth = 1; + repeated TPurgeQueueRequest Entries = 2; +} + +message TPurgeQueueBatchResponse { + optional TError Error = 1; + optional string RequestId = 2; + repeated TPurgeQueueResponse Entries = 3; +} + message TReceiveMessageRequest { optional TCredentials Credentials = 100; /// Sender's authentification. @@ -370,9 +370,9 @@ message TReceiveMessageResponse { optional string MessageGroupId = 5; /// An MD5 digest of the message body string. optional string MD5OfMessageBody = 6; - /// An MD5 digest of the non-URL-encoded message attribute string. - /// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#sqs-attributes-md5-message-digest-calculation - optional string MD5OfMessageAttributes = 12; + /// An MD5 digest of the non-URL-encoded message attribute string. + /// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#sqs-attributes-md5-message-digest-calculation + optional string MD5OfMessageAttributes = 12; /// Message's attributes. repeated TMessageAttribute MessageAttributes = 7; /// Message's data itself. @@ -421,7 +421,7 @@ message TSendMessageResponse { optional TError Error = 1; optional string RequestId = 2; /// An MD5 digest of the non-URL-encoded message attribute string. - /// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#sqs-attributes-md5-message-digest-calculation + /// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#sqs-attributes-md5-message-digest-calculation optional string MD5OfMessageAttributes = 3; /// An MD5 digest of the non-URL-encoded message body string. optional string MD5OfMessageBody = 4; @@ -525,7 +525,7 @@ message TListDeadLetterSourceQueuesRequest { optional TCredentials Credentials = 100; /// The name of the queue. optional string QueueName = 1; - optional TAuthentification Auth = 2; + optional TAuthentification Auth = 2; } message TListDeadLetterSourceQueuesResponse { diff --git a/ydb/core/quoter/debug_info.cpp b/ydb/core/quoter/debug_info.cpp index 569fc792f9c..8b2dc4f5520 100644 --- a/ydb/core/quoter/debug_info.cpp +++ b/ydb/core/quoter/debug_info.cpp @@ -1,11 +1,11 @@ -#include "debug_info.h" - -#ifdef QUOTER_SYSTEM_DEBUG_INFO - -namespace NKikimr::NQuoter { - -TDebugInfoHolder DebugInfo; - -} // namespace NKikimr::NQuoter - -#endif // QUOTER_SYSTEM_DEBUG_INFO +#include "debug_info.h" + +#ifdef QUOTER_SYSTEM_DEBUG_INFO + +namespace NKikimr::NQuoter { + +TDebugInfoHolder DebugInfo; + +} // namespace NKikimr::NQuoter + +#endif // QUOTER_SYSTEM_DEBUG_INFO diff --git a/ydb/core/quoter/debug_info.h b/ydb/core/quoter/debug_info.h index 9b529713ffb..da65e2f9175 100644 --- a/ydb/core/quoter/debug_info.h +++ b/ydb/core/quoter/debug_info.h @@ -1,60 +1,60 @@ -#pragma once -#define QUOTER_SYSTEM_DEBUG_INFO // uncomment this or build with -DQUOTER_SYSTEM_DEBUG_INFO - -#ifdef QUOTER_SYSTEM_DEBUG_INFO -#define QUOTER_SYSTEM_DEBUG(action) action -#else // QUOTER_SYSTEM_DEBUG_INFO -#define QUOTER_SYSTEM_DEBUG(action) -#endif // QUOTER_SYSTEM_DEBUG_INFO - -#ifdef QUOTER_SYSTEM_DEBUG_INFO - -#include <util/generic/hash.h> -#include <util/system/spinlock.h> - -namespace NKikimr::NQuoter { - -// Structure for using in debug purpuses with gdb. -// Can show all main Quoter System objects in coredump. -struct TDebugInfo { - class TQuoterService* QuoterService = nullptr; - THashMap<TString, class TKesusQuoterProxy*> KesusQuoterProxies; -}; - -// Helper for safe access to debug info. -class TDebugInfoHolder { -public: - class TAutoGuarder { - friend class TDebugInfoHolder; - - TAutoGuarder(TDebugInfoHolder& parent) - : Parent(parent) - , Guard(Parent.Lock) - { - } - - public: - TDebugInfo* operator->() { - return &Parent.DebugInfo; - } - - private: - TDebugInfoHolder& Parent; - TGuard<TAdaptiveLock> Guard; - }; - - // Returns safe (guarded) debug info to write to. - TAutoGuarder operator->() { - return { *this }; - } - -private: - TDebugInfo DebugInfo; - TAdaptiveLock Lock; -}; - -extern TDebugInfoHolder DebugInfo; - -} // namespace NKikimr::NQuoter - -#endif // QUOTER_SYSTEM_DEBUG_INFO +#pragma once +#define QUOTER_SYSTEM_DEBUG_INFO // uncomment this or build with -DQUOTER_SYSTEM_DEBUG_INFO + +#ifdef QUOTER_SYSTEM_DEBUG_INFO +#define QUOTER_SYSTEM_DEBUG(action) action +#else // QUOTER_SYSTEM_DEBUG_INFO +#define QUOTER_SYSTEM_DEBUG(action) +#endif // QUOTER_SYSTEM_DEBUG_INFO + +#ifdef QUOTER_SYSTEM_DEBUG_INFO + +#include <util/generic/hash.h> +#include <util/system/spinlock.h> + +namespace NKikimr::NQuoter { + +// Structure for using in debug purpuses with gdb. +// Can show all main Quoter System objects in coredump. +struct TDebugInfo { + class TQuoterService* QuoterService = nullptr; + THashMap<TString, class TKesusQuoterProxy*> KesusQuoterProxies; +}; + +// Helper for safe access to debug info. +class TDebugInfoHolder { +public: + class TAutoGuarder { + friend class TDebugInfoHolder; + + TAutoGuarder(TDebugInfoHolder& parent) + : Parent(parent) + , Guard(Parent.Lock) + { + } + + public: + TDebugInfo* operator->() { + return &Parent.DebugInfo; + } + + private: + TDebugInfoHolder& Parent; + TGuard<TAdaptiveLock> Guard; + }; + + // Returns safe (guarded) debug info to write to. + TAutoGuarder operator->() { + return { *this }; + } + +private: + TDebugInfo DebugInfo; + TAdaptiveLock Lock; +}; + +extern TDebugInfoHolder DebugInfo; + +} // namespace NKikimr::NQuoter + +#endif // QUOTER_SYSTEM_DEBUG_INFO diff --git a/ydb/core/quoter/kesus_quoter_proxy.cpp b/ydb/core/quoter/kesus_quoter_proxy.cpp index 4d2d20691e9..dc95ae51411 100644 --- a/ydb/core/quoter/kesus_quoter_proxy.cpp +++ b/ydb/core/quoter/kesus_quoter_proxy.cpp @@ -1,173 +1,173 @@ -#include "kesus_quoter_proxy.h" +#include "kesus_quoter_proxy.h" #include "quoter_service_impl.h" -#include "debug_info.h" - +#include "debug_info.h" + #include <ydb/core/base/counters.h> #include <ydb/core/base/path.h> #include <ydb/core/kesus/tablet/events.h> #include <ydb/core/util/time_series_vec.h> - + #include <ydb/library/yql/public/issue/yql_issue_message.h> - + #include <library/cpp/actors/core/hfunc.h> #include <library/cpp/actors/core/log.h> #include <library/cpp/actors/core/actor_bootstrapped.h> - + #include <util/generic/map.h> #include <util/generic/hash.h> -#include <util/system/types.h> - -#include <limits> -#include <cmath> - -#if defined PLOG_TRACE || defined PLOG_DEBUG || defined PLOG_INFO || defined PLOG_WARN || defined PLOG_ERROR \ - || defined KESUS_PROXY_LOG_TRACE || defined KESUS_PROXY_LOG_DEBUG || defined KESUS_PROXY_LOG_INFO || defined KESUS_PROXY_LOG_WARN || defined KESUS_PROXY_LOG_ERROR -#error log macro definition clash -#endif - -#define PLOG_TRACE(stream) LOG_TRACE_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) -#define PLOG_DEBUG(stream) LOG_DEBUG_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) -#define PLOG_INFO(stream) LOG_INFO_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) -#define PLOG_WARN(stream) LOG_WARN_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) -#define PLOG_ERROR(stream) LOG_ERROR_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) - -#define KESUS_PROXY_LOG_TRACE(stream) PLOG_TRACE(LogPrefix << stream) -#define KESUS_PROXY_LOG_DEBUG(stream) PLOG_DEBUG(LogPrefix << stream) -#define KESUS_PROXY_LOG_INFO(stream) PLOG_INFO(LogPrefix << stream) -#define KESUS_PROXY_LOG_WARN(stream) PLOG_WARN(LogPrefix << stream) -#define KESUS_PROXY_LOG_ERROR(stream) PLOG_ERROR(LogPrefix << stream) - +#include <util/system/types.h> + +#include <limits> +#include <cmath> + +#if defined PLOG_TRACE || defined PLOG_DEBUG || defined PLOG_INFO || defined PLOG_WARN || defined PLOG_ERROR \ + || defined KESUS_PROXY_LOG_TRACE || defined KESUS_PROXY_LOG_DEBUG || defined KESUS_PROXY_LOG_INFO || defined KESUS_PROXY_LOG_WARN || defined KESUS_PROXY_LOG_ERROR +#error log macro definition clash +#endif + +#define PLOG_TRACE(stream) LOG_TRACE_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) +#define PLOG_DEBUG(stream) LOG_DEBUG_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) +#define PLOG_INFO(stream) LOG_INFO_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) +#define PLOG_WARN(stream) LOG_WARN_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) +#define PLOG_ERROR(stream) LOG_ERROR_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_PROXY, stream) + +#define KESUS_PROXY_LOG_TRACE(stream) PLOG_TRACE(LogPrefix << stream) +#define KESUS_PROXY_LOG_DEBUG(stream) PLOG_DEBUG(LogPrefix << stream) +#define KESUS_PROXY_LOG_INFO(stream) PLOG_INFO(LogPrefix << stream) +#define KESUS_PROXY_LOG_WARN(stream) PLOG_WARN(LogPrefix << stream) +#define KESUS_PROXY_LOG_ERROR(stream) PLOG_ERROR(LogPrefix << stream) + namespace NKikimr { namespace NQuoter { -static constexpr double FADING_ALLOCATION_COEFFICIENT = 0.999; +static constexpr double FADING_ALLOCATION_COEFFICIENT = 0.999; static constexpr double PREFETCH_COEFFICIENT_DEFAULT = 0.20; static constexpr double PREFETCH_WATERMARK_DEFAULT = 0.75; - -using NKesus::TEvKesus; - + +using NKesus::TEvKesus; + class TKesusQuoterProxy : public TActorBootstrapped<TKesusQuoterProxy> { struct TResourceState { const TString Resource; - ui64 ResId = Max<ui64>(); - - double Available = 0; - double QueueWeight = 0; - double ResourceBucketMaxSize = 0; - double ResourceBucketMinSize = 0; - bool SessionIsActive = false; - bool ProxySessionWasSent = false; - TInstant LastAllocated = TInstant::Zero(); - std::pair<TDuration, double> AverageAllocationParams = {TDuration::Zero(), 0.0}; - - NKikimrKesus::TStreamingQuoterResource Props; - bool InitedProps = false; - - TKesusResourceAllocationStatistics AllocStats; - + ui64 ResId = Max<ui64>(); + + double Available = 0; + double QueueWeight = 0; + double ResourceBucketMaxSize = 0; + double ResourceBucketMinSize = 0; + bool SessionIsActive = false; + bool ProxySessionWasSent = false; + TInstant LastAllocated = TInstant::Zero(); + std::pair<TDuration, double> AverageAllocationParams = {TDuration::Zero(), 0.0}; + + NKikimrKesus::TStreamingQuoterResource Props; + bool InitedProps = false; + + TKesusResourceAllocationStatistics AllocStats; + THolder<TTimeSeriesVec<double>> History; bool PendingReport = false; // History contains data to send TInstant HistoryAccepted; // Do not report history before this instant TInstant LastReport; // Aligned to `ReportPeriod` grid timestamp of last sent report TDuration ReportPeriod = TDuration::Max(); - struct TCounters { - class TDoubleCounter { - public: - TDoubleCounter() = default; - - TDoubleCounter(NMonitoring::TDynamicCounters::TCounterPtr counter) - : Counter(std::move(counter)) - { - } - - TDoubleCounter& operator=(NMonitoring::TDynamicCounters::TCounterPtr counter) { - Counter = std::move(counter); - return *this; - } - - TDoubleCounter& operator+=(double value) { - value += Remainder; - const double counterIncrease = std::floor(value); - Remainder = value - counterIncrease; - if (Counter) { - *Counter += static_cast<i64>(counterIncrease); - } - return *this; - } - - private: - NMonitoring::TDynamicCounters::TCounterPtr Counter; - double Remainder = 0.0; - }; - - std::vector<NMonitoring::TDynamicCounters::TCounterPtr> ParentConsumed; // Aggregated consumed counters for parent resources. - NMonitoring::TDynamicCounters::TCounterPtr QueueSize; - NMonitoring::TDynamicCounters::TCounterPtr QueueWeight; - NMonitoring::TDynamicCounters::TCounterPtr Dropped; - NMonitoring::TDynamicCounters::TCounterPtr Accumulated; - TDoubleCounter AllocatedOffline; - TDoubleCounter ReceivedFromKesus; - - TCounters(const TString& resource, const NMonitoring::TDynamicCounterPtr& quoterCounters) { - if (!quoterCounters) { - return; - } - - auto splittedPath = SplitPath(resource); - Y_VERIFY(!splittedPath.empty()); - ParentConsumed.reserve(splittedPath.size() - 1); - for (auto pathIter = splittedPath.begin() + 1; pathIter != splittedPath.end(); ++pathIter) { - const TString parentResourceName = NKesus::CanonizeQuoterResourcePath(TVector<TString>(splittedPath.begin(), pathIter)); - const auto resourceCounters = quoterCounters->GetSubgroup(RESOURCE_COUNTER_SENSOR_NAME, parentResourceName); - ParentConsumed.emplace_back(resourceCounters->GetCounter(CONSUMED_COUNTER_NAME, true)); - } - - const auto resourceCounters = quoterCounters->GetSubgroup(RESOURCE_COUNTER_SENSOR_NAME, NKesus::CanonizeQuoterResourcePath(splittedPath)); - QueueSize = resourceCounters->GetExpiringCounter(RESOURCE_QUEUE_SIZE_COUNTER_SENSOR_NAME, false); - QueueWeight = resourceCounters->GetExpiringCounter(RESOURCE_QUEUE_WEIGHT_COUNTER_SENSOR_NAME, false); - AllocatedOffline = resourceCounters->GetCounter(RESOURCE_ALLOCATED_OFFLINE_COUNTER_SENSOR_NAME, true); - Dropped = resourceCounters->GetCounter(RESOURCE_DROPPED_COUNTER_SENSOR_NAME, true); - Accumulated = resourceCounters->GetExpiringCounter(RESOURCE_ACCUMULATED_COUNTER_SENSOR_NAME, false); - ReceivedFromKesus = resourceCounters->GetCounter(RESOURCE_RECEIVED_FROM_KESUS_COUNTER_SENSOR_NAME, true); - } - - void AddConsumed(ui64 consumed) { - for (NMonitoring::TDynamicCounters::TCounterPtr& counter : ParentConsumed) { - *counter += consumed; - } - } - }; - - TCounters Counters; - - explicit TResourceState(const TString& resource, const NMonitoring::TDynamicCounterPtr& quoterCounters) - : Resource(resource) - , Counters(resource, quoterCounters) + struct TCounters { + class TDoubleCounter { + public: + TDoubleCounter() = default; + + TDoubleCounter(NMonitoring::TDynamicCounters::TCounterPtr counter) + : Counter(std::move(counter)) + { + } + + TDoubleCounter& operator=(NMonitoring::TDynamicCounters::TCounterPtr counter) { + Counter = std::move(counter); + return *this; + } + + TDoubleCounter& operator+=(double value) { + value += Remainder; + const double counterIncrease = std::floor(value); + Remainder = value - counterIncrease; + if (Counter) { + *Counter += static_cast<i64>(counterIncrease); + } + return *this; + } + + private: + NMonitoring::TDynamicCounters::TCounterPtr Counter; + double Remainder = 0.0; + }; + + std::vector<NMonitoring::TDynamicCounters::TCounterPtr> ParentConsumed; // Aggregated consumed counters for parent resources. + NMonitoring::TDynamicCounters::TCounterPtr QueueSize; + NMonitoring::TDynamicCounters::TCounterPtr QueueWeight; + NMonitoring::TDynamicCounters::TCounterPtr Dropped; + NMonitoring::TDynamicCounters::TCounterPtr Accumulated; + TDoubleCounter AllocatedOffline; + TDoubleCounter ReceivedFromKesus; + + TCounters(const TString& resource, const NMonitoring::TDynamicCounterPtr& quoterCounters) { + if (!quoterCounters) { + return; + } + + auto splittedPath = SplitPath(resource); + Y_VERIFY(!splittedPath.empty()); + ParentConsumed.reserve(splittedPath.size() - 1); + for (auto pathIter = splittedPath.begin() + 1; pathIter != splittedPath.end(); ++pathIter) { + const TString parentResourceName = NKesus::CanonizeQuoterResourcePath(TVector<TString>(splittedPath.begin(), pathIter)); + const auto resourceCounters = quoterCounters->GetSubgroup(RESOURCE_COUNTER_SENSOR_NAME, parentResourceName); + ParentConsumed.emplace_back(resourceCounters->GetCounter(CONSUMED_COUNTER_NAME, true)); + } + + const auto resourceCounters = quoterCounters->GetSubgroup(RESOURCE_COUNTER_SENSOR_NAME, NKesus::CanonizeQuoterResourcePath(splittedPath)); + QueueSize = resourceCounters->GetExpiringCounter(RESOURCE_QUEUE_SIZE_COUNTER_SENSOR_NAME, false); + QueueWeight = resourceCounters->GetExpiringCounter(RESOURCE_QUEUE_WEIGHT_COUNTER_SENSOR_NAME, false); + AllocatedOffline = resourceCounters->GetCounter(RESOURCE_ALLOCATED_OFFLINE_COUNTER_SENSOR_NAME, true); + Dropped = resourceCounters->GetCounter(RESOURCE_DROPPED_COUNTER_SENSOR_NAME, true); + Accumulated = resourceCounters->GetExpiringCounter(RESOURCE_ACCUMULATED_COUNTER_SENSOR_NAME, false); + ReceivedFromKesus = resourceCounters->GetCounter(RESOURCE_RECEIVED_FROM_KESUS_COUNTER_SENSOR_NAME, true); + } + + void AddConsumed(ui64 consumed) { + for (NMonitoring::TDynamicCounters::TCounterPtr& counter : ParentConsumed) { + *counter += consumed; + } + } + }; + + TCounters Counters; + + explicit TResourceState(const TString& resource, const NMonitoring::TDynamicCounterPtr& quoterCounters) + : Resource(resource) + , Counters(resource, quoterCounters) {} - - void AddUpdate(TEvQuota::TEvProxyUpdate& ev) const { - TVector<TEvQuota::TUpdateTick> update; - double sustainedRate = 0.0; - if (Available > 0.0) { - constexpr double rateBurst = 2.0; - constexpr ui32 ticks = 2; - constexpr double ticksD = static_cast<double>(ticks); - update.emplace_back(0, ticks, Available * (rateBurst / ticksD), TEvQuota::ETickPolicy::Front); - sustainedRate = Available * rateBurst; - } else { - update.emplace_back(); - } - ev.Resources.emplace_back(ResId, sustainedRate, std::move(update), TEvQuota::EUpdateState::Normal); - } - - void SetProps(const NKikimrKesus::TStreamingQuoterResource& props) { - Props = props; + + void AddUpdate(TEvQuota::TEvProxyUpdate& ev) const { + TVector<TEvQuota::TUpdateTick> update; + double sustainedRate = 0.0; + if (Available > 0.0) { + constexpr double rateBurst = 2.0; + constexpr ui32 ticks = 2; + constexpr double ticksD = static_cast<double>(ticks); + update.emplace_back(0, ticks, Available * (rateBurst / ticksD), TEvQuota::ETickPolicy::Front); + sustainedRate = Available * rateBurst; + } else { + update.emplace_back(); + } + ev.Resources.emplace_back(ResId, sustainedRate, std::move(update), TEvQuota::EUpdateState::Normal); + } + + void SetProps(const NKikimrKesus::TStreamingQuoterResource& props) { + Props = props; const auto& cfg = Props.GetHierarhicalDRRResourceConfig(); const double speed = cfg.GetMaxUnitsPerSecond(); const double prefetch = cfg.GetPrefetchCoefficient() ? cfg.GetPrefetchCoefficient() : PREFETCH_COEFFICIENT_DEFAULT; const double watermark = std::clamp(cfg.GetPrefetchWatermark() ? cfg.GetPrefetchWatermark() : PREFETCH_WATERMARK_DEFAULT, 0.0, 1.0); - + const double prevBucketMaxSize = ResourceBucketMaxSize; ResourceBucketMaxSize = Max(0.0, speed * prefetch); ResourceBucketMinSize = ResourceBucketMaxSize * watermark; @@ -176,16 +176,16 @@ class TKesusQuoterProxy : public TActorBootstrapped<TKesusQuoterProxy> { // Decrease available resource if speed or prefetch settings have been changed. if (prefetch > 0.0) { // https://st.yandex-team.ru/RTMR-3774 if (InitedProps && ResourceBucketMaxSize < prevBucketMaxSize) { - if (const double maxAvailable = ResourceBucketMaxSize + QueueWeight; Available > maxAvailable) { - if (Counters.Dropped) { - const double dropped = Available - maxAvailable; - *Counters.Dropped += dropped; - } - SetAvailable(maxAvailable); // Update resource props with smaller quota. - } - } - } - + if (const double maxAvailable = ResourceBucketMaxSize + QueueWeight; Available > maxAvailable) { + if (Counters.Dropped) { + const double dropped = Available - maxAvailable; + *Counters.Dropped += dropped; + } + SetAvailable(maxAvailable); // Update resource props with smaller quota. + } + } + } + if (Props.GetAccountingConfig().GetEnabled()) { ReportPeriod = TDuration::MilliSeconds(Props.GetAccountingConfig().GetReportPeriodMs()); THolder<TTimeSeriesVec<double>> history(new TTimeSeriesVec<double>(Props.GetAccountingConfig().GetCollectPeriodSec())); @@ -198,246 +198,246 @@ class TKesusQuoterProxy : public TActorBootstrapped<TKesusQuoterProxy> { History.Destroy(); } - if (!InitedProps) { - InitedProps = true; - SetAvailable(ResourceBucketMaxSize); - } - AllocStats.SetProps(Props); - } - - void SetAvailable(double available) { - Available = available; - if (Counters.Accumulated) { - *Counters.Accumulated = static_cast<i64>(available); - } - } - }; - - struct TEvPrivate { - enum EEv { - EvOfflineResourceAllocation = EventSpaceBegin(TEvents::ES_PRIVATE), - - EvEnd - }; - - static_assert(EvEnd <= EventSpaceEnd(TKikimrEvents::ES_PRIVATE), - "expected EvEnd <= EventSpaceEnd(TKikimrEvents::ES_PRIVATE)"); - - struct TEvOfflineResourceAllocation : public TEventLocal<TEvOfflineResourceAllocation, EvOfflineResourceAllocation> { - struct TResourceInfo { - ui64 ResourceId; - double Amount; - - TResourceInfo(ui64 resId, double amount) - : ResourceId(resId) - , Amount(amount) - { - } - }; - - std::vector<TResourceInfo> Resources; - - TEvOfflineResourceAllocation() = default; - }; + if (!InitedProps) { + InitedProps = true; + SetAvailable(ResourceBucketMaxSize); + } + AllocStats.SetProps(Props); + } + + void SetAvailable(double available) { + Available = available; + if (Counters.Accumulated) { + *Counters.Accumulated = static_cast<i64>(available); + } + } }; + struct TEvPrivate { + enum EEv { + EvOfflineResourceAllocation = EventSpaceBegin(TEvents::ES_PRIVATE), + + EvEnd + }; + + static_assert(EvEnd <= EventSpaceEnd(TKikimrEvents::ES_PRIVATE), + "expected EvEnd <= EventSpaceEnd(TKikimrEvents::ES_PRIVATE)"); + + struct TEvOfflineResourceAllocation : public TEventLocal<TEvOfflineResourceAllocation, EvOfflineResourceAllocation> { + struct TResourceInfo { + ui64 ResourceId; + double Amount; + + TResourceInfo(ui64 resId, double amount) + : ResourceId(resId) + , Amount(amount) + { + } + }; + + std::vector<TResourceInfo> Resources; + + TEvOfflineResourceAllocation() = default; + }; + }; + const TActorId QuoterServiceId; const ui64 QuoterId; const TVector<TString> Path; - const TString LogPrefix; + const TString LogPrefix; TIntrusiveConstPtr<NSchemeCache::TSchemeCacheNavigate::TKesusInfo> KesusInfo; - THolder<ITabletPipeFactory> TabletPipeFactory; + THolder<ITabletPipeFactory> TabletPipeFactory; TActorId KesusPipeClient; - bool Connected = false; - TInstant DisconnectTime; - ui64 OfflineAllocationCookie = 0; + bool Connected = false; + TInstant DisconnectTime; + ui64 OfflineAllocationCookie = 0; - TMap<TString, THolder<TResourceState>> Resources; // Map because iterators are needed to remain valid during insertions. + TMap<TString, THolder<TResourceState>> Resources; // Map because iterators are needed to remain valid during insertions. THashMap<ui64, decltype(Resources)::iterator> ResIndex; - THashMap<ui64, std::vector<TString>> CookieToResourcePath; - ui64 NextCookie = 1; + THashMap<ui64, std::vector<TString>> CookieToResourcePath; + ui64 NextCookie = 1; - THolder<NKesus::TEvKesus::TEvUpdateConsumptionState> UpdateEv; + THolder<NKesus::TEvKesus::TEvUpdateConsumptionState> UpdateEv; THolder<NKesus::TEvKesus::TEvAccountResources> AccountEv; - THolder<TEvQuota::TEvProxyUpdate> ProxyUpdateEv; - THashMap<TDuration, THolder<TEvPrivate::TEvOfflineResourceAllocation>> OfflineAllocationEvSchedule; - - struct TCounters { - NMonitoring::TDynamicCounterPtr QuoterCounters; - - NMonitoring::TDynamicCounters::TCounterPtr Disconnects; - - void Init(const TString& quoterPath) { - TIntrusivePtr<NMonitoring::TDynamicCounters> serviceCounters = GetServiceCounters(AppData()->Counters, QUOTER_SERVICE_COUNTER_SENSOR_NAME); - if (serviceCounters) { - QuoterCounters = serviceCounters->GetSubgroup(QUOTER_COUNTER_SENSOR_NAME, quoterPath); - Disconnects = QuoterCounters->GetCounter(DISCONNECTS_COUNTER_SENSOR_NAME, true); - } - } - }; - - TCounters Counters; - -private: - ui64 NewCookieForRequest(TString resourcePath) { - Y_VERIFY(resourcePath); - std::vector<TString> paths = {std::move(resourcePath)}; - return NewCookieForRequest(std::move(paths)); - } - - ui64 NewCookieForRequest(std::vector<TString> resourcePaths) { - Y_VERIFY(!resourcePaths.empty()); - const ui64 cookie = NextCookie++; - Y_VERIFY(CookieToResourcePath.emplace(cookie, std::move(resourcePaths)).second); - return cookie; - } - - std::vector<TString> PopResourcePathsForRequest(ui64 cookie) { - auto resPathIt = CookieToResourcePath.find(cookie); - if (resPathIt != CookieToResourcePath.end()) { - std::vector<TString> ret = std::move(resPathIt->second); - CookieToResourcePath.erase(resPathIt); - return ret; - } else { - return {}; - } - } - - static TString KesusErrorToString(const NKikimrKesus::TKesusError& err) { - NYql::TIssues issues; - NYql::IssuesFromMessage(err.GetIssues(), issues); - return issues.ToString(); - } - - void SendProxySessionError(TEvQuota::TEvProxySession::EResult code, const TString& resourcePath) { - KESUS_PROXY_LOG_TRACE("ProxySession(\"" << resourcePath << "\", Error: " << code << ")"); - Send(QuoterServiceId, - new TEvQuota::TEvProxySession( - code, - QuoterId, - 0, - resourcePath, - TDuration::Zero(), - TEvQuota::EStatUpdatePolicy::Never - )); - } - - void ProcessSubscribeResourceError(Ydb::StatusIds::StatusCode code, TResourceState* resState) { - if (!resState->ProxySessionWasSent) { - resState->ProxySessionWasSent = true; - const TEvQuota::TEvProxySession::EResult sessionCode = code == Ydb::StatusIds::NOT_FOUND ? TEvQuota::TEvProxySession::UnknownResource : TEvQuota::TEvProxySession::GenericError; - SendProxySessionError(sessionCode, resState->Resource); - DeleteResourceInfo(resState->Resource, resState->ResId); - } else { - BreakResource(*resState, GetProxyUpdateEv()); - } - } - - void SendProxySessionIfNotSent(TResourceState* resState) { - if (!resState->ProxySessionWasSent) { - resState->ProxySessionWasSent = true; - KESUS_PROXY_LOG_TRACE("ProxySession(\"" << resState->Resource << "\", " << resState->ResId << ")"); - Send(QuoterServiceId, - new TEvQuota::TEvProxySession( - TEvQuota::TEvProxySession::Success, - QuoterId, - resState->ResId, - resState->Resource, - TDuration::MilliSeconds(100), - TEvQuota::EStatUpdatePolicy::EveryActiveTick - )); - } - } - - TResourceState* FindResource(ui64 id) { - const auto indexIt = ResIndex.find(id); - return indexIt != ResIndex.end() ? indexIt->second->second.Get() : nullptr; - } - - const TResourceState* FindResource(ui64 id) const { - const auto indexIt = ResIndex.find(id); - return indexIt != ResIndex.end() ? indexIt->second->second.Get() : nullptr; - } - - void Handle(TEvQuota::TEvProxyRequest::TPtr& ev) { - TEvQuota::TEvProxyRequest* msg = ev->Get(); - KESUS_PROXY_LOG_INFO("ProxyRequest \"" << msg->Resource << "\""); - Y_VERIFY(ev->Sender == QuoterServiceId); - - auto resourceIt = Resources.find(msg->Resource); - if (resourceIt == Resources.end()) { - const TString canonPath = NKesus::CanonizeQuoterResourcePath(msg->Resource); - if (canonPath != msg->Resource) { - KESUS_PROXY_LOG_WARN("Resource \"" << msg->Resource << "\" has incorrect name. Maybe this was some error on client side."); - SendProxySessionError(TEvQuota::TEvProxySession::GenericError, msg->Resource); - return; - } - - auto [iter, inserted] = Resources.emplace(msg->Resource, MakeHolder<TResourceState>(msg->Resource, Counters.QuoterCounters)); - Y_ASSERT(inserted); - resourceIt = iter; - } - Y_ASSERT(resourceIt != Resources.end()); - - TResourceState* const resState = resourceIt->second.Get(); - if (resState->ResId == Max<ui64>()) { - InitiateNewSessionToResource(resState->Resource); - } else { - // Already. Resend result. - resState->ProxySessionWasSent = false; - SendProxySessionIfNotSent(resState); - resState->AddUpdate(GetProxyUpdateEv()); - } - } - - void InitiateNewSessionToResource(const TString& resourcePath) { - if (Connected) { - KESUS_PROXY_LOG_DEBUG("Subscribe on resource \"" << resourcePath << "\""); - NKikimrKesus::TEvSubscribeOnResources req; + THolder<TEvQuota::TEvProxyUpdate> ProxyUpdateEv; + THashMap<TDuration, THolder<TEvPrivate::TEvOfflineResourceAllocation>> OfflineAllocationEvSchedule; + + struct TCounters { + NMonitoring::TDynamicCounterPtr QuoterCounters; + + NMonitoring::TDynamicCounters::TCounterPtr Disconnects; + + void Init(const TString& quoterPath) { + TIntrusivePtr<NMonitoring::TDynamicCounters> serviceCounters = GetServiceCounters(AppData()->Counters, QUOTER_SERVICE_COUNTER_SENSOR_NAME); + if (serviceCounters) { + QuoterCounters = serviceCounters->GetSubgroup(QUOTER_COUNTER_SENSOR_NAME, quoterPath); + Disconnects = QuoterCounters->GetCounter(DISCONNECTS_COUNTER_SENSOR_NAME, true); + } + } + }; + + TCounters Counters; + +private: + ui64 NewCookieForRequest(TString resourcePath) { + Y_VERIFY(resourcePath); + std::vector<TString> paths = {std::move(resourcePath)}; + return NewCookieForRequest(std::move(paths)); + } + + ui64 NewCookieForRequest(std::vector<TString> resourcePaths) { + Y_VERIFY(!resourcePaths.empty()); + const ui64 cookie = NextCookie++; + Y_VERIFY(CookieToResourcePath.emplace(cookie, std::move(resourcePaths)).second); + return cookie; + } + + std::vector<TString> PopResourcePathsForRequest(ui64 cookie) { + auto resPathIt = CookieToResourcePath.find(cookie); + if (resPathIt != CookieToResourcePath.end()) { + std::vector<TString> ret = std::move(resPathIt->second); + CookieToResourcePath.erase(resPathIt); + return ret; + } else { + return {}; + } + } + + static TString KesusErrorToString(const NKikimrKesus::TKesusError& err) { + NYql::TIssues issues; + NYql::IssuesFromMessage(err.GetIssues(), issues); + return issues.ToString(); + } + + void SendProxySessionError(TEvQuota::TEvProxySession::EResult code, const TString& resourcePath) { + KESUS_PROXY_LOG_TRACE("ProxySession(\"" << resourcePath << "\", Error: " << code << ")"); + Send(QuoterServiceId, + new TEvQuota::TEvProxySession( + code, + QuoterId, + 0, + resourcePath, + TDuration::Zero(), + TEvQuota::EStatUpdatePolicy::Never + )); + } + + void ProcessSubscribeResourceError(Ydb::StatusIds::StatusCode code, TResourceState* resState) { + if (!resState->ProxySessionWasSent) { + resState->ProxySessionWasSent = true; + const TEvQuota::TEvProxySession::EResult sessionCode = code == Ydb::StatusIds::NOT_FOUND ? TEvQuota::TEvProxySession::UnknownResource : TEvQuota::TEvProxySession::GenericError; + SendProxySessionError(sessionCode, resState->Resource); + DeleteResourceInfo(resState->Resource, resState->ResId); + } else { + BreakResource(*resState, GetProxyUpdateEv()); + } + } + + void SendProxySessionIfNotSent(TResourceState* resState) { + if (!resState->ProxySessionWasSent) { + resState->ProxySessionWasSent = true; + KESUS_PROXY_LOG_TRACE("ProxySession(\"" << resState->Resource << "\", " << resState->ResId << ")"); + Send(QuoterServiceId, + new TEvQuota::TEvProxySession( + TEvQuota::TEvProxySession::Success, + QuoterId, + resState->ResId, + resState->Resource, + TDuration::MilliSeconds(100), + TEvQuota::EStatUpdatePolicy::EveryActiveTick + )); + } + } + + TResourceState* FindResource(ui64 id) { + const auto indexIt = ResIndex.find(id); + return indexIt != ResIndex.end() ? indexIt->second->second.Get() : nullptr; + } + + const TResourceState* FindResource(ui64 id) const { + const auto indexIt = ResIndex.find(id); + return indexIt != ResIndex.end() ? indexIt->second->second.Get() : nullptr; + } + + void Handle(TEvQuota::TEvProxyRequest::TPtr& ev) { + TEvQuota::TEvProxyRequest* msg = ev->Get(); + KESUS_PROXY_LOG_INFO("ProxyRequest \"" << msg->Resource << "\""); + Y_VERIFY(ev->Sender == QuoterServiceId); + + auto resourceIt = Resources.find(msg->Resource); + if (resourceIt == Resources.end()) { + const TString canonPath = NKesus::CanonizeQuoterResourcePath(msg->Resource); + if (canonPath != msg->Resource) { + KESUS_PROXY_LOG_WARN("Resource \"" << msg->Resource << "\" has incorrect name. Maybe this was some error on client side."); + SendProxySessionError(TEvQuota::TEvProxySession::GenericError, msg->Resource); + return; + } + + auto [iter, inserted] = Resources.emplace(msg->Resource, MakeHolder<TResourceState>(msg->Resource, Counters.QuoterCounters)); + Y_ASSERT(inserted); + resourceIt = iter; + } + Y_ASSERT(resourceIt != Resources.end()); + + TResourceState* const resState = resourceIt->second.Get(); + if (resState->ResId == Max<ui64>()) { + InitiateNewSessionToResource(resState->Resource); + } else { + // Already. Resend result. + resState->ProxySessionWasSent = false; + SendProxySessionIfNotSent(resState); + resState->AddUpdate(GetProxyUpdateEv()); + } + } + + void InitiateNewSessionToResource(const TString& resourcePath) { + if (Connected) { + KESUS_PROXY_LOG_DEBUG("Subscribe on resource \"" << resourcePath << "\""); + NKikimrKesus::TEvSubscribeOnResources req; ActorIdToProto(SelfId(), req.MutableActorID()); - auto* res = req.AddResources(); - res->SetResourcePath(resourcePath); - NTabletPipe::SendData(SelfId(), KesusPipeClient, new TEvKesus::TEvSubscribeOnResources(std::move(req)), NewCookieForRequest(resourcePath)); - } - } - - void SubscribeToAllResources() { - Y_VERIFY(Connected); - if (Resources.empty()) { - return; - } - std::vector<TString> resourcePaths; - resourcePaths.reserve(Resources.size()); - NKikimrKesus::TEvSubscribeOnResources req; + auto* res = req.AddResources(); + res->SetResourcePath(resourcePath); + NTabletPipe::SendData(SelfId(), KesusPipeClient, new TEvKesus::TEvSubscribeOnResources(std::move(req)), NewCookieForRequest(resourcePath)); + } + } + + void SubscribeToAllResources() { + Y_VERIFY(Connected); + if (Resources.empty()) { + return; + } + std::vector<TString> resourcePaths; + resourcePaths.reserve(Resources.size()); + NKikimrKesus::TEvSubscribeOnResources req; ActorIdToProto(SelfId(), req.MutableActorID()); - for (auto&& [resourcePath, resInfo] : Resources) { - auto* res = req.AddResources(); - res->SetResourcePath(resourcePath); - if (resInfo->SessionIsActive) { - res->SetStartConsuming(true); - res->SetInitialAmount(std::numeric_limits<double>::infinity()); - } - resourcePaths.push_back(resourcePath); - } - NTabletPipe::SendData(SelfId(), KesusPipeClient, new TEvKesus::TEvSubscribeOnResources(std::move(req)), NewCookieForRequest(std::move(resourcePaths))); - } - - TEvQuota::TEvProxyUpdate& GetProxyUpdateEv() { - if (!ProxyUpdateEv) { - ProxyUpdateEv = CreateUpdateEvent(); - } - return *ProxyUpdateEv; - } - - void InitUpdateEv() { - if (!UpdateEv) { - UpdateEv = MakeHolder<NKesus::TEvKesus::TEvUpdateConsumptionState>(); + for (auto&& [resourcePath, resInfo] : Resources) { + auto* res = req.AddResources(); + res->SetResourcePath(resourcePath); + if (resInfo->SessionIsActive) { + res->SetStartConsuming(true); + res->SetInitialAmount(std::numeric_limits<double>::infinity()); + } + resourcePaths.push_back(resourcePath); + } + NTabletPipe::SendData(SelfId(), KesusPipeClient, new TEvKesus::TEvSubscribeOnResources(std::move(req)), NewCookieForRequest(std::move(resourcePaths))); + } + + TEvQuota::TEvProxyUpdate& GetProxyUpdateEv() { + if (!ProxyUpdateEv) { + ProxyUpdateEv = CreateUpdateEvent(); + } + return *ProxyUpdateEv; + } + + void InitUpdateEv() { + if (!UpdateEv) { + UpdateEv = MakeHolder<NKesus::TEvKesus::TEvUpdateConsumptionState>(); ActorIdToProto(SelfId(), UpdateEv->Record.MutableActorID()); - } - } - + } + } + void InitAccountEv() { if (!AccountEv) { AccountEv = MakeHolder<NKesus::TEvKesus::TEvAccountResources>(); @@ -445,98 +445,98 @@ private: } } - void SendDeferredEvents() { - if (Connected && UpdateEv) { - KESUS_PROXY_LOG_TRACE("UpdateConsumptionState(" << UpdateEv->Record << ")"); - NTabletPipe::SendData(SelfId(), KesusPipeClient, UpdateEv.Release()); - } - UpdateEv.Reset(); - + void SendDeferredEvents() { + if (Connected && UpdateEv) { + KESUS_PROXY_LOG_TRACE("UpdateConsumptionState(" << UpdateEv->Record << ")"); + NTabletPipe::SendData(SelfId(), KesusPipeClient, UpdateEv.Release()); + } + UpdateEv.Reset(); + if (Connected && AccountEv && AccountEv->Record.GetResourcesInfo().size() > 0) { KESUS_PROXY_LOG_TRACE("AccountResources(" << AccountEv->Record << ")"); NTabletPipe::SendData(SelfId(), KesusPipeClient, AccountEv.Release()); } AccountEv.Reset(); - if (ProxyUpdateEv && ProxyUpdateEv->Resources) { - SendToService(std::move(ProxyUpdateEv)); - } - } - - void ScheduleOfflineAllocation() { - if (OfflineAllocationEvSchedule.empty()) { - return; - } - - if (!Connected) { - for (auto&& alloc : OfflineAllocationEvSchedule) { - KESUS_PROXY_LOG_TRACE("Schedule offline allocation in " << alloc.first << ": " << PrintResources(*alloc.second)); - TAutoPtr<IEventHandle> h = new IEventHandle(SelfId(), SelfId(), alloc.second.Release(), 0, OfflineAllocationCookie); - TActivationContext::Schedule(alloc.first, std::move(h)); - } - } - OfflineAllocationEvSchedule.clear(); - } - - void MarkAllActiveResourcesForOfflineAllocation() { + if (ProxyUpdateEv && ProxyUpdateEv->Resources) { + SendToService(std::move(ProxyUpdateEv)); + } + } + + void ScheduleOfflineAllocation() { + if (OfflineAllocationEvSchedule.empty()) { + return; + } + + if (!Connected) { + for (auto&& alloc : OfflineAllocationEvSchedule) { + KESUS_PROXY_LOG_TRACE("Schedule offline allocation in " << alloc.first << ": " << PrintResources(*alloc.second)); + TAutoPtr<IEventHandle> h = new IEventHandle(SelfId(), SelfId(), alloc.second.Release(), 0, OfflineAllocationCookie); + TActivationContext::Schedule(alloc.first, std::move(h)); + } + } + OfflineAllocationEvSchedule.clear(); + } + + void MarkAllActiveResourcesForOfflineAllocation() { const TInstant now = TActivationContext::Now(); - for (auto&& [path, resState] : Resources) { - Y_UNUSED(path); - if (resState->ResId != Max<ui64>() && resState->SessionIsActive) { - resState->AverageAllocationParams = resState->AllocStats.GetAverageAllocationParams(); - MarkResourceForOfflineAllocation(*resState, now); - } - } - } - - void MarkResourceForOfflineAllocation(TResourceState& res, TInstant now) { - TDuration averageDuration; - double averageAmount; - std::tie(averageDuration, averageAmount) = res.AverageAllocationParams; - KESUS_PROXY_LOG_TRACE("Mark \"" << res.Resource << "\" for offline allocation. Connected: " << Connected - << ", SessionIsActive: " << res.SessionIsActive - << ", AverageDuration: " << averageDuration - << ", AverageAmount: " << averageAmount); - if (!Connected && res.SessionIsActive && averageDuration && averageAmount) { - const TDuration when = - res.LastAllocated + averageDuration <= now ? - TDuration::Zero() : - res.LastAllocated + averageDuration - now; - auto& event = OfflineAllocationEvSchedule[when]; - if (!event) { - event = MakeHolder<TEvPrivate::TEvOfflineResourceAllocation>(); - } - double amount = averageAmount; - if (when) { - const TDuration disconnected = now - DisconnectTime; - const double microseconds = static_cast<double>((when + disconnected).MicroSeconds()); - amount *= std::pow(FADING_ALLOCATION_COEFFICIENT, microseconds / 1000000.0); - } - event->Resources.emplace_back(res.ResId, amount); - } - } - - void ActivateSession(TResourceState& res, bool activate = true) { - Y_ASSERT(res.SessionIsActive != activate); - KESUS_PROXY_LOG_INFO((activate ? "Activate" : "Deactivate") << " session to \"" << res.Resource << "\". Connected: " << Connected); - - res.SessionIsActive = activate; - if (Connected) { - InitUpdateEv(); - auto* resInfo = UpdateEv->Record.AddResourcesInfo(); - resInfo->SetResourceId(res.ResId); - resInfo->SetConsumeResource(activate); - if (activate) { - resInfo->SetAmount(std::numeric_limits<double>::infinity()); - } - } else { - if (activate) { - res.AverageAllocationParams = res.AllocStats.GetAverageAllocationParams(); + for (auto&& [path, resState] : Resources) { + Y_UNUSED(path); + if (resState->ResId != Max<ui64>() && resState->SessionIsActive) { + resState->AverageAllocationParams = resState->AllocStats.GetAverageAllocationParams(); + MarkResourceForOfflineAllocation(*resState, now); + } + } + } + + void MarkResourceForOfflineAllocation(TResourceState& res, TInstant now) { + TDuration averageDuration; + double averageAmount; + std::tie(averageDuration, averageAmount) = res.AverageAllocationParams; + KESUS_PROXY_LOG_TRACE("Mark \"" << res.Resource << "\" for offline allocation. Connected: " << Connected + << ", SessionIsActive: " << res.SessionIsActive + << ", AverageDuration: " << averageDuration + << ", AverageAmount: " << averageAmount); + if (!Connected && res.SessionIsActive && averageDuration && averageAmount) { + const TDuration when = + res.LastAllocated + averageDuration <= now ? + TDuration::Zero() : + res.LastAllocated + averageDuration - now; + auto& event = OfflineAllocationEvSchedule[when]; + if (!event) { + event = MakeHolder<TEvPrivate::TEvOfflineResourceAllocation>(); + } + double amount = averageAmount; + if (when) { + const TDuration disconnected = now - DisconnectTime; + const double microseconds = static_cast<double>((when + disconnected).MicroSeconds()); + amount *= std::pow(FADING_ALLOCATION_COEFFICIENT, microseconds / 1000000.0); + } + event->Resources.emplace_back(res.ResId, amount); + } + } + + void ActivateSession(TResourceState& res, bool activate = true) { + Y_ASSERT(res.SessionIsActive != activate); + KESUS_PROXY_LOG_INFO((activate ? "Activate" : "Deactivate") << " session to \"" << res.Resource << "\". Connected: " << Connected); + + res.SessionIsActive = activate; + if (Connected) { + InitUpdateEv(); + auto* resInfo = UpdateEv->Record.AddResourcesInfo(); + resInfo->SetResourceId(res.ResId); + resInfo->SetConsumeResource(activate); + if (activate) { + resInfo->SetAmount(std::numeric_limits<double>::infinity()); + } + } else { + if (activate) { + res.AverageAllocationParams = res.AllocStats.GetAverageAllocationParams(); MarkResourceForOfflineAllocation(res, TActivationContext::Now()); - } - } - } - + } + } + } + void ReportSession(TResourceState& res) { if (Connected && res.History) { InitAccountEv(); @@ -570,189 +570,189 @@ private: } } - void Handle(TEvQuota::TEvProxyStats::TPtr& ev) { - TEvQuota::TEvProxyStats* msg = ev->Get(); - KESUS_PROXY_LOG_TRACE("ProxyStats(" << PrintResources(*ev->Get()) << ")"); - for (const TEvQuota::TProxyStat& stat : msg->Stats) { - const auto indexIt = ResIndex.find(stat.ResourceId); - if (indexIt != ResIndex.end()) { - TResourceState& res = *indexIt->second->second; - res.SetAvailable(res.Available - stat.Consumed); - res.QueueWeight = stat.QueueWeight; - res.Counters.AddConsumed(stat.Consumed); + void Handle(TEvQuota::TEvProxyStats::TPtr& ev) { + TEvQuota::TEvProxyStats* msg = ev->Get(); + KESUS_PROXY_LOG_TRACE("ProxyStats(" << PrintResources(*ev->Get()) << ")"); + for (const TEvQuota::TProxyStat& stat : msg->Stats) { + const auto indexIt = ResIndex.find(stat.ResourceId); + if (indexIt != ResIndex.end()) { + TResourceState& res = *indexIt->second->second; + res.SetAvailable(res.Available - stat.Consumed); + res.QueueWeight = stat.QueueWeight; + res.Counters.AddConsumed(stat.Consumed); if (res.History) { res.History->Add(stat.History); res.PendingReport = true; CheckReport(res, TActivationContext::Now()); } - if (res.Counters.QueueSize) { - *res.Counters.QueueSize = static_cast<i64>(stat.QueueSize); - *res.Counters.QueueWeight = static_cast<i64>(stat.QueueWeight); - } - KESUS_PROXY_LOG_TRACE("Set info for resource \"" << res.Resource << "\": { Available: " << res.Available << ", QueueWeight: " << res.QueueWeight << " }"); - CheckState(res); - res.AddUpdate(GetProxyUpdateEv()); - } - } - } - - void DeleteResourceInfo(const TString& resource, const ui64 resourceId) { - auto indexIt = ResIndex.find(resourceId); + if (res.Counters.QueueSize) { + *res.Counters.QueueSize = static_cast<i64>(stat.QueueSize); + *res.Counters.QueueWeight = static_cast<i64>(stat.QueueWeight); + } + KESUS_PROXY_LOG_TRACE("Set info for resource \"" << res.Resource << "\": { Available: " << res.Available << ", QueueWeight: " << res.QueueWeight << " }"); + CheckState(res); + res.AddUpdate(GetProxyUpdateEv()); + } + } + } + + void DeleteResourceInfo(const TString& resource, const ui64 resourceId) { + auto indexIt = ResIndex.find(resourceId); if (indexIt != ResIndex.end()) { - auto resIt = indexIt->second; - if (resIt != Resources.end()) { // else it is already new resource with same path. - TResourceState& res = *resIt->second; - if (res.SessionIsActive) { - ActivateSession(res, false); - } - Resources.erase(resIt); - } + auto resIt = indexIt->second; + if (resIt != Resources.end()) { // else it is already new resource with same path. + TResourceState& res = *resIt->second; + if (res.SessionIsActive) { + ActivateSession(res, false); + } + Resources.erase(resIt); + } ResIndex.erase(indexIt); return; } - auto resIt = Resources.find(resource); + auto resIt = Resources.find(resource); if (resIt != Resources.end()) { - TResourceState& res = *resIt->second; - if (res.SessionIsActive) { - ActivateSession(res, false); - } - if (res.ResId != Max<ui64>()) { - ResIndex.erase(res.ResId); - } + TResourceState& res = *resIt->second; + if (res.SessionIsActive) { + ActivateSession(res, false); + } + if (res.ResId != Max<ui64>()) { + ResIndex.erase(res.ResId); + } Resources.erase(resIt); } } - - void Handle(TEvQuota::TEvProxyCloseSession::TPtr& ev) { - TEvQuota::TEvProxyCloseSession* msg = ev->Get(); - KESUS_PROXY_LOG_TRACE("ProxyCloseSession(\"" << msg->Resource << "\", " << msg->ResourceId << ")"); - DeleteResourceInfo(msg->Resource, msg->ResourceId); - } - - void BreakResource(TResourceState& res, TEvQuota::TEvProxyUpdate& ev) { - ev.Resources.emplace_back(res.ResId, 0.0, TVector<TEvQuota::TUpdateTick>(), TEvQuota::EUpdateState::Broken); - } - - void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev) { - if (ev->Get()->Status == NKikimrProto::OK) { - KESUS_PROXY_LOG_DEBUG("Successfully connected to tablet"); - Connected = true; - SubscribeToAllResources(); - } else { - if (ev->Get()->Dead) { - KESUS_PROXY_LOG_WARN("Tablet doesn't exist"); - SendToService(CreateUpdateEvent(TEvQuota::EUpdateState::Broken)); - } else { - KESUS_PROXY_LOG_WARN("Failed to connect to tablet. Status: " << ev->Get()->Status); - ConnectToKesus(true); - } - } - } - - void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) { - Y_VERIFY(ev->Get()->TabletId == GetKesusTabletId(), - "Got EvClientDestroyed with tablet %" PRIu64 ", but kesus tablet is %" PRIu64, ev->Get()->TabletId, GetKesusTabletId()); - KESUS_PROXY_LOG_WARN("Disconnected from tablet"); - ConnectToKesus(true); + + void Handle(TEvQuota::TEvProxyCloseSession::TPtr& ev) { + TEvQuota::TEvProxyCloseSession* msg = ev->Get(); + KESUS_PROXY_LOG_TRACE("ProxyCloseSession(\"" << msg->Resource << "\", " << msg->ResourceId << ")"); + DeleteResourceInfo(msg->Resource, msg->ResourceId); + } + + void BreakResource(TResourceState& res, TEvQuota::TEvProxyUpdate& ev) { + ev.Resources.emplace_back(res.ResId, 0.0, TVector<TEvQuota::TUpdateTick>(), TEvQuota::EUpdateState::Broken); + } + + void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev) { + if (ev->Get()->Status == NKikimrProto::OK) { + KESUS_PROXY_LOG_DEBUG("Successfully connected to tablet"); + Connected = true; + SubscribeToAllResources(); + } else { + if (ev->Get()->Dead) { + KESUS_PROXY_LOG_WARN("Tablet doesn't exist"); + SendToService(CreateUpdateEvent(TEvQuota::EUpdateState::Broken)); + } else { + KESUS_PROXY_LOG_WARN("Failed to connect to tablet. Status: " << ev->Get()->Status); + ConnectToKesus(true); + } + } + } + + void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) { + Y_VERIFY(ev->Get()->TabletId == GetKesusTabletId(), + "Got EvClientDestroyed with tablet %" PRIu64 ", but kesus tablet is %" PRIu64, ev->Get()->TabletId, GetKesusTabletId()); + KESUS_PROXY_LOG_WARN("Disconnected from tablet"); + ConnectToKesus(true); DisconnectTime = TActivationContext::Now(); - OfflineAllocationCookie = NextCookie++; - MarkAllActiveResourcesForOfflineAllocation(); - if (Counters.Disconnects) { - ++*Counters.Disconnects; - } - } - - void Handle(NKesus::TEvKesus::TEvSubscribeOnResourcesResult::TPtr& ev) { - const std::vector<TString> resourcePaths = PopResourcePathsForRequest(ev->Cookie); - if (!resourcePaths.empty()) { - const auto& result = ev->Get()->Record; - KESUS_PROXY_LOG_TRACE("SubscribeOnResourceResult(" << result << ")"); - Y_VERIFY(result.ResultsSize() == resourcePaths.size(), "Expected %" PRISZT " resources, but got %" PRISZT, resourcePaths.size(), result.ResultsSize()); - for (size_t i = 0; i < resourcePaths.size(); ++i) { - const auto& resResult = result.GetResults(i); - auto resourceIt = Resources.find(resourcePaths[i]); - if (resourceIt != Resources.end()) { - auto* resState = resourceIt->second.Get(); - Y_VERIFY(resState != nullptr); - if (resResult.GetError().GetStatus() == Ydb::StatusIds::SUCCESS) { - KESUS_PROXY_LOG_INFO("Initialized new session with resource \"" << resourcePaths[i] << "\""); - if (resState->ResId != Max<ui64>() && resState->ResId != resResult.GetResourceId()) { // Kesus was disconnected and then resource was recreated. - BreakResource(*resState, GetProxyUpdateEv()); - ResIndex[resState->ResId] = Resources.end(); - } - resState->ResId = resResult.GetResourceId(); - ResIndex[resState->ResId] = resourceIt; - resourceIt->second->SetProps(resResult.GetEffectiveProps()); - resState->AllocStats.OnConnected(); - resourceIt->second->AddUpdate(GetProxyUpdateEv()); - SendProxySessionIfNotSent(resState); - } else { - // TODO: make cache with error results. - KESUS_PROXY_LOG_WARN("Resource \"" << resourcePaths[i] << "\" session initialization error: " << KesusErrorToString(resResult.GetError())); - ProcessSubscribeResourceError(resResult.GetError().GetStatus(), resState); - } - } - } - } // else it was old request that was retried. - } - - void Handle(NKesus::TEvKesus::TEvResourcesAllocated::TPtr& ev) { - KESUS_PROXY_LOG_TRACE("ResourcesAllocated(" << ev->Get()->Record << ")"); + OfflineAllocationCookie = NextCookie++; + MarkAllActiveResourcesForOfflineAllocation(); + if (Counters.Disconnects) { + ++*Counters.Disconnects; + } + } + + void Handle(NKesus::TEvKesus::TEvSubscribeOnResourcesResult::TPtr& ev) { + const std::vector<TString> resourcePaths = PopResourcePathsForRequest(ev->Cookie); + if (!resourcePaths.empty()) { + const auto& result = ev->Get()->Record; + KESUS_PROXY_LOG_TRACE("SubscribeOnResourceResult(" << result << ")"); + Y_VERIFY(result.ResultsSize() == resourcePaths.size(), "Expected %" PRISZT " resources, but got %" PRISZT, resourcePaths.size(), result.ResultsSize()); + for (size_t i = 0; i < resourcePaths.size(); ++i) { + const auto& resResult = result.GetResults(i); + auto resourceIt = Resources.find(resourcePaths[i]); + if (resourceIt != Resources.end()) { + auto* resState = resourceIt->second.Get(); + Y_VERIFY(resState != nullptr); + if (resResult.GetError().GetStatus() == Ydb::StatusIds::SUCCESS) { + KESUS_PROXY_LOG_INFO("Initialized new session with resource \"" << resourcePaths[i] << "\""); + if (resState->ResId != Max<ui64>() && resState->ResId != resResult.GetResourceId()) { // Kesus was disconnected and then resource was recreated. + BreakResource(*resState, GetProxyUpdateEv()); + ResIndex[resState->ResId] = Resources.end(); + } + resState->ResId = resResult.GetResourceId(); + ResIndex[resState->ResId] = resourceIt; + resourceIt->second->SetProps(resResult.GetEffectiveProps()); + resState->AllocStats.OnConnected(); + resourceIt->second->AddUpdate(GetProxyUpdateEv()); + SendProxySessionIfNotSent(resState); + } else { + // TODO: make cache with error results. + KESUS_PROXY_LOG_WARN("Resource \"" << resourcePaths[i] << "\" session initialization error: " << KesusErrorToString(resResult.GetError())); + ProcessSubscribeResourceError(resResult.GetError().GetStatus(), resState); + } + } + } + } // else it was old request that was retried. + } + + void Handle(NKesus::TEvKesus::TEvResourcesAllocated::TPtr& ev) { + KESUS_PROXY_LOG_TRACE("ResourcesAllocated(" << ev->Get()->Record << ")"); const TInstant now = TActivationContext::Now(); - for (const NKikimrKesus::TEvResourcesAllocated::TResourceInfo& allocatedInfo : ev->Get()->Record.GetResourcesInfo()) { - TResourceState* res = FindResource(allocatedInfo.GetResourceId()); - if (!res) { - continue; - } - if (allocatedInfo.GetStateNotification().GetStatus() == Ydb::StatusIds::SUCCESS) { - const auto amount = allocatedInfo.GetAmount(); - KESUS_PROXY_LOG_TRACE("Kesus allocated {\"" << res->Resource << "\", " << amount << "}"); - if (allocatedInfo.HasEffectiveProps()) { // changed - res->SetProps(allocatedInfo.GetEffectiveProps()); - } - res->SetAvailable(res->Available + amount); - res->LastAllocated = now; - res->AllocStats.OnResourceAllocated(now, amount); - res->Counters.ReceivedFromKesus += amount; - CheckState(*res); - res->AddUpdate(GetProxyUpdateEv()); - } else { - KESUS_PROXY_LOG_WARN("Resource [" << res->Resource << "] is broken: " << KesusErrorToString(allocatedInfo.GetStateNotification())); - BreakResource(*res, GetProxyUpdateEv()); - } - } - } - - void Handle(TEvPrivate::TEvOfflineResourceAllocation::TPtr& ev) { - if (ev->Cookie != OfflineAllocationCookie) { // From previous disconnections - return; - } - - KESUS_PROXY_LOG_TRACE("OfflineResourceAllocation(" << PrintResources(*ev->Get()) << ")"); + for (const NKikimrKesus::TEvResourcesAllocated::TResourceInfo& allocatedInfo : ev->Get()->Record.GetResourcesInfo()) { + TResourceState* res = FindResource(allocatedInfo.GetResourceId()); + if (!res) { + continue; + } + if (allocatedInfo.GetStateNotification().GetStatus() == Ydb::StatusIds::SUCCESS) { + const auto amount = allocatedInfo.GetAmount(); + KESUS_PROXY_LOG_TRACE("Kesus allocated {\"" << res->Resource << "\", " << amount << "}"); + if (allocatedInfo.HasEffectiveProps()) { // changed + res->SetProps(allocatedInfo.GetEffectiveProps()); + } + res->SetAvailable(res->Available + amount); + res->LastAllocated = now; + res->AllocStats.OnResourceAllocated(now, amount); + res->Counters.ReceivedFromKesus += amount; + CheckState(*res); + res->AddUpdate(GetProxyUpdateEv()); + } else { + KESUS_PROXY_LOG_WARN("Resource [" << res->Resource << "] is broken: " << KesusErrorToString(allocatedInfo.GetStateNotification())); + BreakResource(*res, GetProxyUpdateEv()); + } + } + } + + void Handle(TEvPrivate::TEvOfflineResourceAllocation::TPtr& ev) { + if (ev->Cookie != OfflineAllocationCookie) { // From previous disconnections + return; + } + + KESUS_PROXY_LOG_TRACE("OfflineResourceAllocation(" << PrintResources(*ev->Get()) << ")"); const TInstant now = TActivationContext::Now(); - for (const TEvPrivate::TEvOfflineResourceAllocation::TResourceInfo& allocatedInfo : ev->Get()->Resources) { - TResourceState* res = FindResource(allocatedInfo.ResourceId); - if (!res) { - continue; - } - const bool wasActive = res->SessionIsActive; - KESUS_PROXY_LOG_TRACE("Allocated {\"" << res->Resource << "\", " << allocatedInfo.Amount << "} offline"); - res->SetAvailable(res->Available + allocatedInfo.Amount); - res->LastAllocated = now; - CheckState(*res); - res->AddUpdate(GetProxyUpdateEv()); - if (wasActive) { - MarkResourceForOfflineAllocation(*res, now); - } - - res->Counters.AllocatedOffline += allocatedInfo.Amount; - } - } - - void Handle(NKesus::TEvKesus::TEvUpdateConsumptionStateAck::TPtr&) { - } - + for (const TEvPrivate::TEvOfflineResourceAllocation::TResourceInfo& allocatedInfo : ev->Get()->Resources) { + TResourceState* res = FindResource(allocatedInfo.ResourceId); + if (!res) { + continue; + } + const bool wasActive = res->SessionIsActive; + KESUS_PROXY_LOG_TRACE("Allocated {\"" << res->Resource << "\", " << allocatedInfo.Amount << "} offline"); + res->SetAvailable(res->Available + allocatedInfo.Amount); + res->LastAllocated = now; + CheckState(*res); + res->AddUpdate(GetProxyUpdateEv()); + if (wasActive) { + MarkResourceForOfflineAllocation(*res, now); + } + + res->Counters.AllocatedOffline += allocatedInfo.Amount; + } + } + + void Handle(NKesus::TEvKesus::TEvUpdateConsumptionStateAck::TPtr&) { + } + void Handle(NKesus::TEvKesus::TEvAccountResourcesAck::TPtr& ev) { const auto& result = ev->Get()->Record; KESUS_PROXY_LOG_TRACE("AccountResourcesAck(" << result << ")"); @@ -764,82 +764,82 @@ private: } } - THolder<TEvQuota::TEvProxyUpdate> CreateUpdateEvent(TEvQuota::EUpdateState state = TEvQuota::EUpdateState::Normal) const { - return MakeHolder<TEvQuota::TEvProxyUpdate>(QuoterId, state); - } - - TString PrintResources(const TEvQuota::TEvProxyUpdate& ev) const { - TStringBuilder ret; - ret << "["; - for (size_t i = 0; i < ev.Resources.size(); ++i) { - ret << (i > 0 ? ", { " : "{ "); - const auto& update = ev.Resources[i]; - const TResourceState* res = FindResource(update.ResourceId); - if (res) { - ret << "\"" << res->Resource << "\""; - } else { - ret << update.ResourceId; - } - ret << ", " << update.ResourceState; - for (size_t j = 0; j < update.Update.size(); ++j) { - const auto& updateTick = update.Update[j]; - ret << ", {"<< updateTick.Channel << ": " << updateTick.Policy << "(" << updateTick.Rate << ", " << updateTick.Ticks << ")}"; - } - ret << " }"; - } - ret << "]"; - return std::move(ret); - } - - TString PrintResources(const TEvQuota::TEvProxyStats& stats) const { - TStringBuilder ret; - ret << "["; - bool first = true; - for (const TEvQuota::TProxyStat& stat : stats.Stats) { - ret << (first ? "{" : ", {"); - first = false; - if (const auto* res = FindResource(stat.ResourceId)) { - ret << "\"" << res->Resource << "\""; - } else { - ret << stat.ResourceId; - } - ret << ", Consumed: " << stat.Consumed << ", Queue: " << stat.QueueWeight << "}"; - } - ret << "]"; - return std::move(ret); - } - - TString PrintResources(const TEvPrivate::TEvOfflineResourceAllocation& alloc) { - TStringBuilder ret; - ret << "["; - bool first = true; - for (const TEvPrivate::TEvOfflineResourceAllocation::TResourceInfo& resInfo : alloc.Resources) { - ret << (first ? "{ " : ", { "); - first = false; - if (const auto* res = FindResource(resInfo.ResourceId)) { - ret << "\"" << res->Resource << "\""; - } else { - ret << resInfo.ResourceId; - } - ret << ", " << resInfo.Amount << " }"; - } - ret << "]"; - return std::move(ret); - } - - void SendToService(THolder<TEvQuota::TEvProxyUpdate>&& ev) { - KESUS_PROXY_LOG_TRACE("ProxyUpdate(" << ev->QuoterState << ", " << PrintResources(*ev) << ")"); - Send(QuoterServiceId, std::move(ev)); - } - - void CheckState(TResourceState& res) { - if (res.SessionIsActive && res.Available >= res.ResourceBucketMaxSize + res.QueueWeight) { - ActivateSession(res, false); - } else if (!res.SessionIsActive && res.Available < res.ResourceBucketMinSize + res.QueueWeight) { - ActivateSession(res); - } - } - + THolder<TEvQuota::TEvProxyUpdate> CreateUpdateEvent(TEvQuota::EUpdateState state = TEvQuota::EUpdateState::Normal) const { + return MakeHolder<TEvQuota::TEvProxyUpdate>(QuoterId, state); + } + + TString PrintResources(const TEvQuota::TEvProxyUpdate& ev) const { + TStringBuilder ret; + ret << "["; + for (size_t i = 0; i < ev.Resources.size(); ++i) { + ret << (i > 0 ? ", { " : "{ "); + const auto& update = ev.Resources[i]; + const TResourceState* res = FindResource(update.ResourceId); + if (res) { + ret << "\"" << res->Resource << "\""; + } else { + ret << update.ResourceId; + } + ret << ", " << update.ResourceState; + for (size_t j = 0; j < update.Update.size(); ++j) { + const auto& updateTick = update.Update[j]; + ret << ", {"<< updateTick.Channel << ": " << updateTick.Policy << "(" << updateTick.Rate << ", " << updateTick.Ticks << ")}"; + } + ret << " }"; + } + ret << "]"; + return std::move(ret); + } + + TString PrintResources(const TEvQuota::TEvProxyStats& stats) const { + TStringBuilder ret; + ret << "["; + bool first = true; + for (const TEvQuota::TProxyStat& stat : stats.Stats) { + ret << (first ? "{" : ", {"); + first = false; + if (const auto* res = FindResource(stat.ResourceId)) { + ret << "\"" << res->Resource << "\""; + } else { + ret << stat.ResourceId; + } + ret << ", Consumed: " << stat.Consumed << ", Queue: " << stat.QueueWeight << "}"; + } + ret << "]"; + return std::move(ret); + } + + TString PrintResources(const TEvPrivate::TEvOfflineResourceAllocation& alloc) { + TStringBuilder ret; + ret << "["; + bool first = true; + for (const TEvPrivate::TEvOfflineResourceAllocation::TResourceInfo& resInfo : alloc.Resources) { + ret << (first ? "{ " : ", { "); + first = false; + if (const auto* res = FindResource(resInfo.ResourceId)) { + ret << "\"" << res->Resource << "\""; + } else { + ret << resInfo.ResourceId; + } + ret << ", " << resInfo.Amount << " }"; + } + ret << "]"; + return std::move(ret); + } + + void SendToService(THolder<TEvQuota::TEvProxyUpdate>&& ev) { + KESUS_PROXY_LOG_TRACE("ProxyUpdate(" << ev->QuoterState << ", " << PrintResources(*ev) << ")"); + Send(QuoterServiceId, std::move(ev)); + } + + void CheckState(TResourceState& res) { + if (res.SessionIsActive && res.Available >= res.ResourceBucketMaxSize + res.QueueWeight) { + ActivateSession(res, false); + } else if (!res.SessionIsActive && res.Available < res.ResourceBucketMinSize + res.QueueWeight) { + ActivateSession(res); + } + } + void CheckReport(TResourceState& res, TInstant now) { if (res.LastReport + res.ReportPeriod < now && res.PendingReport) { ReportSession(res); @@ -851,40 +851,40 @@ private: } } - static TString GetLogPrefix(const TVector<TString>& path) { - return TStringBuilder() << "[" << CanonizePath(path) << "]: "; - } - + static TString GetLogPrefix(const TVector<TString>& path) { + return TStringBuilder() << "[" << CanonizePath(path) << "]: "; + } + public: static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::QUOTER_PROXY_ACTOR; } TKesusQuoterProxy(ui64 quoterId, const NSchemeCache::TSchemeCacheNavigate::TEntry& navEntry, const TActorId& quoterServiceId, THolder<ITabletPipeFactory> tabletPipeFactory) - : QuoterServiceId(quoterServiceId) - , QuoterId(quoterId) + : QuoterServiceId(quoterServiceId) + , QuoterId(quoterId) , Path(navEntry.Path) - , LogPrefix(GetLogPrefix(Path)) + , LogPrefix(GetLogPrefix(Path)) , KesusInfo(navEntry.KesusInfo) - , TabletPipeFactory(std::move(tabletPipeFactory)) + , TabletPipeFactory(std::move(tabletPipeFactory)) { - Y_VERIFY(KesusInfo); - Y_VERIFY(GetKesusTabletId()); - Y_VERIFY(TabletPipeFactory); + Y_VERIFY(KesusInfo); + Y_VERIFY(GetKesusTabletId()); + Y_VERIFY(TabletPipeFactory); Y_UNUSED(QuoterId); - - QUOTER_SYSTEM_DEBUG(DebugInfo->KesusQuoterProxies.emplace(CanonizePath(Path), this)); - } - - ~TKesusQuoterProxy() { - QUOTER_SYSTEM_DEBUG(DebugInfo->KesusQuoterProxies.erase(CanonizePath(Path))); + + QUOTER_SYSTEM_DEBUG(DebugInfo->KesusQuoterProxies.emplace(CanonizePath(Path), this)); } + ~TKesusQuoterProxy() { + QUOTER_SYSTEM_DEBUG(DebugInfo->KesusQuoterProxies.erase(CanonizePath(Path))); + } + void Bootstrap() { - KESUS_PROXY_LOG_INFO("Created kesus quoter proxy. Tablet id: " << GetKesusTabletId()); - Counters.Init(CanonizePath(Path)); + KESUS_PROXY_LOG_INFO("Created kesus quoter proxy. Tablet id: " << GetKesusTabletId()); + Counters.Init(CanonizePath(Path)); Become(&TThis::StateFunc); - ConnectToKesus(false); + ConnectToKesus(false); } STFUNC(StateFunc) { @@ -894,130 +894,130 @@ public: hFunc(TEvQuota::TEvProxyRequest, Handle); hFunc(TEvQuota::TEvProxyStats, Handle); hFunc(TEvQuota::TEvProxyCloseSession, Handle); - hFunc(TEvTabletPipe::TEvClientConnected, Handle); - hFunc(TEvTabletPipe::TEvClientDestroyed, Handle); - hFunc(NKesus::TEvKesus::TEvSubscribeOnResourcesResult, Handle); - hFunc(NKesus::TEvKesus::TEvResourcesAllocated, Handle); - hFunc(NKesus::TEvKesus::TEvUpdateConsumptionStateAck, Handle); + hFunc(TEvTabletPipe::TEvClientConnected, Handle); + hFunc(TEvTabletPipe::TEvClientDestroyed, Handle); + hFunc(NKesus::TEvKesus::TEvSubscribeOnResourcesResult, Handle); + hFunc(NKesus::TEvKesus::TEvResourcesAllocated, Handle); + hFunc(NKesus::TEvKesus::TEvUpdateConsumptionStateAck, Handle); hFunc(NKesus::TEvKesus::TEvAccountResourcesAck, Handle); - hFunc(TEvPrivate::TEvOfflineResourceAllocation, Handle); + hFunc(TEvPrivate::TEvOfflineResourceAllocation, Handle); default: - KESUS_PROXY_LOG_WARN("TKesusQuoterProxy::StateFunc unexpected event type# " + KESUS_PROXY_LOG_WARN("TKesusQuoterProxy::StateFunc unexpected event type# " << ev->GetTypeRewrite() << " event: " << TString(ev->HasEvent() ? ev->GetBase()->ToString() : "serialized?")); - Y_VERIFY_DEBUG(false, "Unknown event"); + Y_VERIFY_DEBUG(false, "Unknown event"); break; } - - ScheduleOfflineAllocation(); - SendDeferredEvents(); - } - - ui64 GetKesusTabletId() const { - return KesusInfo->Description.GetKesusTabletId(); - } - - NTabletPipe::TClientConfig GetPipeConnectionOptions(bool reconnection) { - NTabletPipe::TClientConfig cfg; - cfg.CheckAliveness = true; + + ScheduleOfflineAllocation(); + SendDeferredEvents(); + } + + ui64 GetKesusTabletId() const { + return KesusInfo->Description.GetKesusTabletId(); + } + + NTabletPipe::TClientConfig GetPipeConnectionOptions(bool reconnection) { + NTabletPipe::TClientConfig cfg; + cfg.CheckAliveness = true; cfg.RetryPolicy = { .RetryLimitCount = 3u, .DoFirstRetryInstantly = !reconnection }; - return cfg; - } - - void CleanupPreviousConnection() { - if (KesusPipeClient) { - NTabletPipe::CloseClient(SelfId(), KesusPipeClient); - } - CookieToResourcePath.clear(); // we will resend all requests with new cookies - } - - void ConnectToKesus(bool reconnection) { - if (reconnection) { - KESUS_PROXY_LOG_INFO("Reconnecting to kesus"); - } else { - KESUS_PROXY_LOG_DEBUG("Connecting to kesus"); - } - CleanupPreviousConnection(); - - KesusPipeClient = - Register( - TabletPipeFactory->CreateTabletPipe( - SelfId(), - GetKesusTabletId(), - GetPipeConnectionOptions(reconnection))); - Connected = false; - } - - void PassAway() override { - if (KesusPipeClient) { - NTabletPipe::CloseClient(SelfId(), KesusPipeClient); - } - TActorBootstrapped::PassAway(); - } + return cfg; + } + + void CleanupPreviousConnection() { + if (KesusPipeClient) { + NTabletPipe::CloseClient(SelfId(), KesusPipeClient); + } + CookieToResourcePath.clear(); // we will resend all requests with new cookies + } + + void ConnectToKesus(bool reconnection) { + if (reconnection) { + KESUS_PROXY_LOG_INFO("Reconnecting to kesus"); + } else { + KESUS_PROXY_LOG_DEBUG("Connecting to kesus"); + } + CleanupPreviousConnection(); + + KesusPipeClient = + Register( + TabletPipeFactory->CreateTabletPipe( + SelfId(), + GetKesusTabletId(), + GetPipeConnectionOptions(reconnection))); + Connected = false; + } + + void PassAway() override { + if (KesusPipeClient) { + NTabletPipe::CloseClient(SelfId(), KesusPipeClient); + } + TActorBootstrapped::PassAway(); + } }; -struct TDefaultTabletPipeFactory : public ITabletPipeFactory { +struct TDefaultTabletPipeFactory : public ITabletPipeFactory { IActor* CreateTabletPipe(const NActors::TActorId& owner, ui64 tabletId, const NKikimr::NTabletPipe::TClientConfig& config) override { - return NTabletPipe::CreateClient(owner, tabletId, config); - } -}; - -THolder<ITabletPipeFactory> ITabletPipeFactory::GetDefaultFactory() { - return MakeHolder<TDefaultTabletPipeFactory>(); + return NTabletPipe::CreateClient(owner, tabletId, config); + } +}; + +THolder<ITabletPipeFactory> ITabletPipeFactory::GetDefaultFactory() { + return MakeHolder<TDefaultTabletPipeFactory>(); } IActor* CreateKesusQuoterProxy(ui64 quoterId, const NSchemeCache::TSchemeCacheNavigate::TEntry& navEntry, const TActorId& quoterServiceId, THolder<ITabletPipeFactory> tabletPipeFactory) { - return new TKesusQuoterProxy(quoterId, navEntry, quoterServiceId, std::move(tabletPipeFactory)); -} - -TKesusResourceAllocationStatistics::TKesusResourceAllocationStatistics(size_t windowSize) - : BestPrevStat(windowSize) - , Stat(windowSize) -{ - Y_ASSERT(windowSize >= 2); -} - -void TKesusResourceAllocationStatistics::SetProps(const NKikimrKesus::TStreamingQuoterResource& props) { - DefaultAllocationDelta = TDuration::MilliSeconds(100); - DefaultAllocationAmount = props.GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond() / 10; -} - -void TKesusResourceAllocationStatistics::OnConnected() { - if (Stat.AvailSize() >= BestPrevStat.AvailSize()) { - BestPrevStat = std::move(Stat); - } - Stat.Clear(); -} - -void TKesusResourceAllocationStatistics::OnResourceAllocated(TInstant now, double amount) { - Stat.PushBack({now, amount}); -} - -std::pair<TDuration, double> TKesusResourceAllocationStatistics::GetAverageAllocationParams() const { - if (Stat.AvailSize() >= 2 && Stat.AvailSize() >= BestPrevStat.AvailSize()) { - return GetAverageAllocationParams(Stat); - } - if (BestPrevStat.AvailSize() >= 2) { - return GetAverageAllocationParams(BestPrevStat); - } - - Y_ASSERT(DefaultAllocationDelta != TDuration::Zero()); - Y_ASSERT(DefaultAllocationAmount > 0); - return {DefaultAllocationDelta, DefaultAllocationAmount}; -} - -std::pair<TDuration, double> TKesusResourceAllocationStatistics::GetAverageAllocationParams(const TSimpleRingBuffer<TStatItem>& stat) { - const TDuration window = stat[stat.TotalSize() - 1].Time - stat[stat.FirstIndex()].Time; - double totalAmount = 0; - for (size_t i = stat.FirstIndex(), size = stat.TotalSize(); i < size; ++i) { - totalAmount += stat[i].Amount; - } - return {window / (stat.AvailSize() - 1), totalAmount / stat.AvailSize()}; -} - + return new TKesusQuoterProxy(quoterId, navEntry, quoterServiceId, std::move(tabletPipeFactory)); } + +TKesusResourceAllocationStatistics::TKesusResourceAllocationStatistics(size_t windowSize) + : BestPrevStat(windowSize) + , Stat(windowSize) +{ + Y_ASSERT(windowSize >= 2); } + +void TKesusResourceAllocationStatistics::SetProps(const NKikimrKesus::TStreamingQuoterResource& props) { + DefaultAllocationDelta = TDuration::MilliSeconds(100); + DefaultAllocationAmount = props.GetHierarhicalDRRResourceConfig().GetMaxUnitsPerSecond() / 10; +} + +void TKesusResourceAllocationStatistics::OnConnected() { + if (Stat.AvailSize() >= BestPrevStat.AvailSize()) { + BestPrevStat = std::move(Stat); + } + Stat.Clear(); +} + +void TKesusResourceAllocationStatistics::OnResourceAllocated(TInstant now, double amount) { + Stat.PushBack({now, amount}); +} + +std::pair<TDuration, double> TKesusResourceAllocationStatistics::GetAverageAllocationParams() const { + if (Stat.AvailSize() >= 2 && Stat.AvailSize() >= BestPrevStat.AvailSize()) { + return GetAverageAllocationParams(Stat); + } + if (BestPrevStat.AvailSize() >= 2) { + return GetAverageAllocationParams(BestPrevStat); + } + + Y_ASSERT(DefaultAllocationDelta != TDuration::Zero()); + Y_ASSERT(DefaultAllocationAmount > 0); + return {DefaultAllocationDelta, DefaultAllocationAmount}; +} + +std::pair<TDuration, double> TKesusResourceAllocationStatistics::GetAverageAllocationParams(const TSimpleRingBuffer<TStatItem>& stat) { + const TDuration window = stat[stat.TotalSize() - 1].Time - stat[stat.FirstIndex()].Time; + double totalAmount = 0; + for (size_t i = stat.FirstIndex(), size = stat.TotalSize(); i < size; ++i) { + totalAmount += stat[i].Amount; + } + return {window / (stat.AvailSize() - 1), totalAmount / stat.AvailSize()}; +} + +} +} diff --git a/ydb/core/quoter/kesus_quoter_proxy.h b/ydb/core/quoter/kesus_quoter_proxy.h index ed79edfc9fd..4c35b5671f6 100644 --- a/ydb/core/quoter/kesus_quoter_proxy.h +++ b/ydb/core/quoter/kesus_quoter_proxy.h @@ -1,55 +1,55 @@ -#pragma once +#pragma once #include <ydb/core/base/tablet_pipe.h> #include <ydb/core/protos/kesus.pb.h> #include <ydb/core/tx/scheme_cache/scheme_cache.h> - + #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/actorid.h> #include <library/cpp/containers/ring_buffer/ring_buffer.h> - -#include <util/datetime/base.h> -#include <util/generic/ptr.h> -#include <util/system/types.h> - -namespace NKikimr { -namespace NQuoter { - -struct ITabletPipeFactory { + +#include <util/datetime/base.h> +#include <util/generic/ptr.h> +#include <util/system/types.h> + +namespace NKikimr { +namespace NQuoter { + +struct ITabletPipeFactory { virtual NActors::IActor* CreateTabletPipe(const NActors::TActorId& owner, ui64 tabletId, const NKikimr::NTabletPipe::TClientConfig& config = NKikimr::NTabletPipe::TClientConfig()) = 0; - - virtual ~ITabletPipeFactory() = default; - - static THolder<ITabletPipeFactory> GetDefaultFactory(); -}; - + + virtual ~ITabletPipeFactory() = default; + + static THolder<ITabletPipeFactory> GetDefaultFactory(); +}; + NActors::IActor* CreateKesusQuoterProxy(ui64 quoterId, const NSchemeCache::TSchemeCacheNavigate::TEntry& navEntry, const NActors::TActorId& quoterServiceId, THolder<ITabletPipeFactory> tabletPipeFactory = ITabletPipeFactory::GetDefaultFactory()); - -class TKesusResourceAllocationStatistics { -public: - explicit TKesusResourceAllocationStatistics(size_t windowSize = 100); - - void SetProps(const NKikimrKesus::TStreamingQuoterResource& props); - - void OnConnected(); - void OnResourceAllocated(TInstant now, double amount); - - std::pair<TDuration, double> GetAverageAllocationParams() const; - -private: - struct TStatItem { - TInstant Time; - double Amount; - }; - -private: - static std::pair<TDuration, double> GetAverageAllocationParams(const TSimpleRingBuffer<TStatItem>& stat); - -private: - TSimpleRingBuffer<TStatItem> BestPrevStat; // Full stat that was made before current connection - TSimpleRingBuffer<TStatItem> Stat; - TDuration DefaultAllocationDelta; - double DefaultAllocationAmount = 0; -}; - -} // namespace NQuoter -} // namespace NKikimr + +class TKesusResourceAllocationStatistics { +public: + explicit TKesusResourceAllocationStatistics(size_t windowSize = 100); + + void SetProps(const NKikimrKesus::TStreamingQuoterResource& props); + + void OnConnected(); + void OnResourceAllocated(TInstant now, double amount); + + std::pair<TDuration, double> GetAverageAllocationParams() const; + +private: + struct TStatItem { + TInstant Time; + double Amount; + }; + +private: + static std::pair<TDuration, double> GetAverageAllocationParams(const TSimpleRingBuffer<TStatItem>& stat); + +private: + TSimpleRingBuffer<TStatItem> BestPrevStat; // Full stat that was made before current connection + TSimpleRingBuffer<TStatItem> Stat; + TDuration DefaultAllocationDelta; + double DefaultAllocationAmount = 0; +}; + +} // namespace NQuoter +} // namespace NKikimr diff --git a/ydb/core/quoter/kesus_quoter_ut.cpp b/ydb/core/quoter/kesus_quoter_ut.cpp index 430db7db483..b5c40d38cff 100644 --- a/ydb/core/quoter/kesus_quoter_ut.cpp +++ b/ydb/core/quoter/kesus_quoter_ut.cpp @@ -1,75 +1,75 @@ -#include "quoter_service.h" -#include "kesus_quoter_proxy.h" -#include "ut_helpers.h" - -namespace NKikimr { - -Y_UNIT_TEST_SUITE(QuoterWithKesusTest) { - Y_UNIT_TEST(ForbidsNotCanonizedQuoterPath) { - TKesusQuoterTestSetup setup; - // Without timeout - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH + "/", TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 1, TEvQuota::TEvClearance::EResult::GenericError); - - // With timeout - setup.GetQuota("/" + TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 1, TDuration::Seconds(1), TEvQuota::TEvClearance::EResult::GenericError); - } - - Y_UNIT_TEST(ForbidsNotCanonizedResourcePath) { - TKesusQuoterTestSetup setup; - // Without timeout - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/", 1, TEvQuota::TEvClearance::EResult::GenericError); - - // With timeout - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "/" + TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/", 1, TDuration::Seconds(1), TEvQuota::TEvClearance::EResult::GenericError); - } - - Y_UNIT_TEST(HandlesNonExistentResource) { - TKesusQuoterTestSetup setup; - // Without timeout - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "UnknownResource", 1, TEvQuota::TEvClearance::EResult::UnknownResource); - - // With timeout - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "UnknownResource", 1, TDuration::Seconds(1), TEvQuota::TEvClearance::EResult::UnknownResource); - } - - Y_UNIT_TEST(HandlesAllRequestsForNonExistentResource) { - TKesusQuoterTestSetup setup; - constexpr size_t requestsCount = 5; - for (size_t i = 0; i < requestsCount; ++i) { - const TDuration deadline = (i & 1) ? TDuration::Max() : TDuration::Seconds(1); - setup.SendGetQuotaRequest(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "UnknownResource", 1, deadline); - } - - for (size_t i = 0; i < requestsCount; ++i) { - auto answer = setup.WaitGetQuotaAnswer(); - UNIT_ASSERT_VALUES_EQUAL(answer->Result, TEvQuota::TEvClearance::EResult::UnknownResource); - } - } - - Y_UNIT_TEST(GetsQuota) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); - } - - Y_UNIT_TEST(GetsBigQuota) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); // stabilization - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 30); // default rate is 10 - } - - Y_UNIT_TEST(GetsBigQuotaWithDeadline) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); // stabilization - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 20, TDuration::Seconds(3)); // default rate is 10 - } - - Y_UNIT_TEST(FailsToGetBigQuota) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); // stabilization - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 40, TDuration::MilliSeconds(500), TEvQuota::TEvClearance::EResult::Deadline); // default rate is 10 - } - +#include "quoter_service.h" +#include "kesus_quoter_proxy.h" +#include "ut_helpers.h" + +namespace NKikimr { + +Y_UNIT_TEST_SUITE(QuoterWithKesusTest) { + Y_UNIT_TEST(ForbidsNotCanonizedQuoterPath) { + TKesusQuoterTestSetup setup; + // Without timeout + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH + "/", TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 1, TEvQuota::TEvClearance::EResult::GenericError); + + // With timeout + setup.GetQuota("/" + TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 1, TDuration::Seconds(1), TEvQuota::TEvClearance::EResult::GenericError); + } + + Y_UNIT_TEST(ForbidsNotCanonizedResourcePath) { + TKesusQuoterTestSetup setup; + // Without timeout + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/", 1, TEvQuota::TEvClearance::EResult::GenericError); + + // With timeout + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "/" + TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/", 1, TDuration::Seconds(1), TEvQuota::TEvClearance::EResult::GenericError); + } + + Y_UNIT_TEST(HandlesNonExistentResource) { + TKesusQuoterTestSetup setup; + // Without timeout + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "UnknownResource", 1, TEvQuota::TEvClearance::EResult::UnknownResource); + + // With timeout + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "UnknownResource", 1, TDuration::Seconds(1), TEvQuota::TEvClearance::EResult::UnknownResource); + } + + Y_UNIT_TEST(HandlesAllRequestsForNonExistentResource) { + TKesusQuoterTestSetup setup; + constexpr size_t requestsCount = 5; + for (size_t i = 0; i < requestsCount; ++i) { + const TDuration deadline = (i & 1) ? TDuration::Max() : TDuration::Seconds(1); + setup.SendGetQuotaRequest(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "UnknownResource", 1, deadline); + } + + for (size_t i = 0; i < requestsCount; ++i) { + auto answer = setup.WaitGetQuotaAnswer(); + UNIT_ASSERT_VALUES_EQUAL(answer->Result, TEvQuota::TEvClearance::EResult::UnknownResource); + } + } + + Y_UNIT_TEST(GetsQuota) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); + } + + Y_UNIT_TEST(GetsBigQuota) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); // stabilization + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 30); // default rate is 10 + } + + Y_UNIT_TEST(GetsBigQuotaWithDeadline) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); // stabilization + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 20, TDuration::Seconds(3)); // default rate is 10 + } + + Y_UNIT_TEST(FailsToGetBigQuota) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); // stabilization + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 40, TDuration::MilliSeconds(500), TEvQuota::TEvClearance::EResult::Deadline); // default rate is 10 + } + Y_UNIT_TEST(PrefetchCoefficient) { TKesusQuoterTestSetup setup; NKikimrKesus::THierarchicalDRRResourceConfig cfg; @@ -89,605 +89,605 @@ Y_UNIT_TEST_SUITE(QuoterWithKesusTest) { setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "root/leaf", 1, TDuration::MilliSeconds(500), TEvQuota::TEvClearance::EResult::Deadline); } - Y_UNIT_TEST(GetsQuotaAfterPause) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); - Sleep(TDuration::MilliSeconds(1000)); - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); - } - - Y_UNIT_TEST(GetsSeveralQuotas) { - TKesusQuoterTestSetup setup; - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res1"); - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res2"); - setup.GetQuota({{TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res1", 6}, {TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res2", 5}}); - setup.GetQuota({{TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res1", 10}, {TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res2", 1}}); - } - - Y_UNIT_TEST(KesusRecreation) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); - - setup.GetClient().DeleteKesus(TKesusQuoterTestSetup::DEFAULT_KESUS_PARENT_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_NAME); - Sleep(TDuration::MilliSeconds(500)); // Wait for pipe disconnection, reconnection and passing info that old kesus was destroyed - setup.GetClient().RefreshPathCache(setup.GetServer().GetRuntime(), TKesusQuoterTestSetup::DEFAULT_KESUS_PATH); - - setup.CreateDefaultKesusAndResource(); - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res"); - - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res"); - } - - Y_UNIT_TEST(AllocationStatistics) { - TKesusQuoterTestSetup setup; - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - cfg.SetMaxUnitsPerSecond(100'000'000.0); - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "root", cfg); - - auto GetResourceName = [](size_t resIndex) -> TString { - return TStringBuilder() << "root/" << resIndex; - }; - - constexpr size_t ResourceCount = 5; - - cfg.ClearMaxUnitsPerSecond(); - for (size_t resIndex = 0; resIndex < ResourceCount; ++resIndex) { - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, GetResourceName(resIndex), cfg); - } - - auto UseQuota = [&]() { - for (size_t resIndex = 0; resIndex < ResourceCount; ++resIndex) { - for (size_t i = 0; i < 3; ++i) { - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, GetResourceName(resIndex), 10'000'000); - } - } - }; - - ui64 prevValue = 0; - auto CheckCountersIncreased = [&]() { - auto counters = setup.GetQuoterCounters(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH); - UNIT_ASSERT_C(counters.ResourceCountersSize() > 0, counters); - UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(1).GetResourcePath(), "root", counters); - const ui64 allocated = counters.GetResourceCounters(1).GetAllocated(); - UNIT_ASSERT_C(allocated > prevValue, counters); - prevValue = allocated; - }; - - for (size_t i = 0; i < 3; ++i) { - UseQuota(); - CheckCountersIncreased(); - } - } - - Y_UNIT_TEST(UpdatesCountersForParentResources) { - TKesusQuoterTestSetup setup; - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1"); - setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1/Child2"); - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1/Child2", 42); - auto quoterCounters = setup.GetServer().GetRuntime()->GetAppData().Counters - ->GetSubgroup("counters", "quoter_service") - ->GetSubgroup("quoter", TKesusQuoterTestSetup::DEFAULT_KESUS_PATH); - - auto CheckConsumedCounter = [&](const TString& resourcePath) { - size_t attempts = 30; // Counters are updated asynchronously, so make several attempts to get proper counter values. - do { - auto counter = quoterCounters->GetSubgroup("resource", resourcePath)->GetCounter("QuotaConsumed"); - if (counter->Val() != 42 && attempts > 1) { - Sleep(TDuration::MilliSeconds(50)); - } else { - UNIT_ASSERT_VALUES_EQUAL_C(counter->Val(), 42, "Resource path: " << resourcePath); - } - } while (--attempts); - }; - - CheckConsumedCounter(TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); - CheckConsumedCounter(TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1"); - CheckConsumedCounter(TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1/Child2"); - } - - Y_UNIT_TEST(CanDeleteResourceWhenUsingIt) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); // success - setup.SendGetQuotaRequest(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 100500); // will wait for big amount of resource - setup.DeleteKesusResource(); - auto answer = setup.WaitGetQuotaAnswer(); - UNIT_ASSERT_VALUES_EQUAL(answer->Result, TEvQuota::TEvClearance::EResult::UnknownResource); - } - - Y_UNIT_TEST(CanKillKesusWhenUsingIt) { - TKesusQuoterTestSetup setup; - setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); - setup.SendGetQuotaRequest(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 5); - setup.KillKesusTablet(); - auto answer = setup.WaitGetQuotaAnswer(); - UNIT_ASSERT_VALUES_EQUAL(answer->Result, TEvQuota::TEvClearance::EResult::Success); - } -} - -Y_UNIT_TEST_SUITE(KesusProxyTest) { - void FillProps(NKikimrKesus::TStreamingQuoterResource* props, ui64 resId = 42, double speed = 100.0) { - props->SetResourceId(resId); - auto* cfg = props->MutableHierarhicalDRRResourceConfig(); - cfg->SetMaxUnitsPerSecond(speed); - } - - void FillResult(NKikimrKesus::TEvSubscribeOnResourcesResult::TResourceSubscribeResult* result, ui64 resId = 42, double speed = 100.0) { - result->SetResourceId(resId); - result->MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); - FillProps(result->MutableEffectiveProps(), resId, speed); - } - - void FillResultNotFound(NKikimrKesus::TEvSubscribeOnResourcesResult::TResourceSubscribeResult* result) { - result->MutableError()->SetStatus(Ydb::StatusIds::NOT_FOUND); - } - - Y_UNIT_TEST(ReconnectsWithKesusWhenNotConnected) { - TKesusProxyTestSetup setup; - auto* pipeMock = setup.GetPipeFactory().ExpectTabletPipeCreation(true); - - EXPECT_CALL(*pipeMock, OnPoisonPill()); - - // expect a new pipe after receiving NotConnected - setup.GetPipeFactory().ExpectTabletPipeCreation(); - - setup.SendNotConnected(pipeMock); - - TDispatchOptions reconnected; - reconnected.CustomFinalCondition = [&] { - return setup.GetPipeFactory().GetPipesCreatedCount() >= 2; - }; - setup.GetRuntime().DispatchEvents(reconnected); - } - - Y_UNIT_TEST(ReconnectsWithKesusWhenPipeDestroyed) { - TKesusProxyTestSetup setup; - auto* pipeMock = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipeMock, OnPoisonPill()); - - setup.WaitConnected(); - - setup.SendDestroyed(pipeMock); - - setup.WaitPipesCreated(2); - } - - Y_UNIT_TEST(RejectsNotCanonizedResourceName) { - TKesusProxyTestSetup setup; - - setup.ProxyRequest("/resource", TEvQuota::TEvProxySession::GenericError); - setup.ProxyRequest("resource//resource", TEvQuota::TEvProxySession::GenericError); - } - - Y_UNIT_TEST(SubscribesOnResource) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - } - - Y_UNIT_TEST(SubscribesOnResourcesWhenReconnected) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - size_t resCounter = 0; - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .Times(3) - .WillRepeatedly(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), TStringBuilder() << "res" << resCounter); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults(), 42 + resCounter, 5.0); - pipe->SendSubscribeOnResourceResult(ans, cookie); - ++resCounter; - })); - - auto session = setup.ProxyRequest("res0"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - session = setup.ProxyRequest("res1"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 43); - session = setup.ProxyRequest("res2"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 44); - - EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 43); - UNIT_ASSERT(record.GetResourcesInfo(0).GetConsumeResource()); - })); - + Y_UNIT_TEST(GetsQuotaAfterPause) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); + Sleep(TDuration::MilliSeconds(1000)); + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); + } + + Y_UNIT_TEST(GetsSeveralQuotas) { + TKesusQuoterTestSetup setup; + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res1"); + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res2"); + setup.GetQuota({{TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res1", 6}, {TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res2", 5}}); + setup.GetQuota({{TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res1", 10}, {TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res2", 1}}); + } + + Y_UNIT_TEST(KesusRecreation) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); + + setup.GetClient().DeleteKesus(TKesusQuoterTestSetup::DEFAULT_KESUS_PARENT_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_NAME); + Sleep(TDuration::MilliSeconds(500)); // Wait for pipe disconnection, reconnection and passing info that old kesus was destroyed + setup.GetClient().RefreshPathCache(setup.GetServer().GetRuntime(), TKesusQuoterTestSetup::DEFAULT_KESUS_PATH); + + setup.CreateDefaultKesusAndResource(); + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res"); + + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "Res"); + } + + Y_UNIT_TEST(AllocationStatistics) { + TKesusQuoterTestSetup setup; + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + cfg.SetMaxUnitsPerSecond(100'000'000.0); + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, "root", cfg); + + auto GetResourceName = [](size_t resIndex) -> TString { + return TStringBuilder() << "root/" << resIndex; + }; + + constexpr size_t ResourceCount = 5; + + cfg.ClearMaxUnitsPerSecond(); + for (size_t resIndex = 0; resIndex < ResourceCount; ++resIndex) { + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, GetResourceName(resIndex), cfg); + } + + auto UseQuota = [&]() { + for (size_t resIndex = 0; resIndex < ResourceCount; ++resIndex) { + for (size_t i = 0; i < 3; ++i) { + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, GetResourceName(resIndex), 10'000'000); + } + } + }; + + ui64 prevValue = 0; + auto CheckCountersIncreased = [&]() { + auto counters = setup.GetQuoterCounters(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH); + UNIT_ASSERT_C(counters.ResourceCountersSize() > 0, counters); + UNIT_ASSERT_VALUES_EQUAL_C(counters.GetResourceCounters(1).GetResourcePath(), "root", counters); + const ui64 allocated = counters.GetResourceCounters(1).GetAllocated(); + UNIT_ASSERT_C(allocated > prevValue, counters); + prevValue = allocated; + }; + + for (size_t i = 0; i < 3; ++i) { + UseQuota(); + CheckCountersIncreased(); + } + } + + Y_UNIT_TEST(UpdatesCountersForParentResources) { + TKesusQuoterTestSetup setup; + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1"); + setup.CreateKesusResource(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1/Child2"); + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1/Child2", 42); + auto quoterCounters = setup.GetServer().GetRuntime()->GetAppData().Counters + ->GetSubgroup("counters", "quoter_service") + ->GetSubgroup("quoter", TKesusQuoterTestSetup::DEFAULT_KESUS_PATH); + + auto CheckConsumedCounter = [&](const TString& resourcePath) { + size_t attempts = 30; // Counters are updated asynchronously, so make several attempts to get proper counter values. + do { + auto counter = quoterCounters->GetSubgroup("resource", resourcePath)->GetCounter("QuotaConsumed"); + if (counter->Val() != 42 && attempts > 1) { + Sleep(TDuration::MilliSeconds(50)); + } else { + UNIT_ASSERT_VALUES_EQUAL_C(counter->Val(), 42, "Resource path: " << resourcePath); + } + } while (--attempts); + }; + + CheckConsumedCounter(TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE); + CheckConsumedCounter(TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1"); + CheckConsumedCounter(TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE + "/Child1/Child2"); + } + + Y_UNIT_TEST(CanDeleteResourceWhenUsingIt) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); // success + setup.SendGetQuotaRequest(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 100500); // will wait for big amount of resource + setup.DeleteKesusResource(); + auto answer = setup.WaitGetQuotaAnswer(); + UNIT_ASSERT_VALUES_EQUAL(answer->Result, TEvQuota::TEvClearance::EResult::UnknownResource); + } + + Y_UNIT_TEST(CanKillKesusWhenUsingIt) { + TKesusQuoterTestSetup setup; + setup.GetQuota(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 12); + setup.SendGetQuotaRequest(TKesusQuoterTestSetup::DEFAULT_KESUS_PATH, TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE, 5); + setup.KillKesusTablet(); + auto answer = setup.WaitGetQuotaAnswer(); + UNIT_ASSERT_VALUES_EQUAL(answer->Result, TEvQuota::TEvClearance::EResult::Success); + } +} + +Y_UNIT_TEST_SUITE(KesusProxyTest) { + void FillProps(NKikimrKesus::TStreamingQuoterResource* props, ui64 resId = 42, double speed = 100.0) { + props->SetResourceId(resId); + auto* cfg = props->MutableHierarhicalDRRResourceConfig(); + cfg->SetMaxUnitsPerSecond(speed); + } + + void FillResult(NKikimrKesus::TEvSubscribeOnResourcesResult::TResourceSubscribeResult* result, ui64 resId = 42, double speed = 100.0) { + result->SetResourceId(resId); + result->MutableError()->SetStatus(Ydb::StatusIds::SUCCESS); + FillProps(result->MutableEffectiveProps(), resId, speed); + } + + void FillResultNotFound(NKikimrKesus::TEvSubscribeOnResourcesResult::TResourceSubscribeResult* result) { + result->MutableError()->SetStatus(Ydb::StatusIds::NOT_FOUND); + } + + Y_UNIT_TEST(ReconnectsWithKesusWhenNotConnected) { + TKesusProxyTestSetup setup; + auto* pipeMock = setup.GetPipeFactory().ExpectTabletPipeCreation(true); + + EXPECT_CALL(*pipeMock, OnPoisonPill()); + + // expect a new pipe after receiving NotConnected + setup.GetPipeFactory().ExpectTabletPipeCreation(); + + setup.SendNotConnected(pipeMock); + + TDispatchOptions reconnected; + reconnected.CustomFinalCondition = [&] { + return setup.GetPipeFactory().GetPipesCreatedCount() >= 2; + }; + setup.GetRuntime().DispatchEvents(reconnected); + } + + Y_UNIT_TEST(ReconnectsWithKesusWhenPipeDestroyed) { + TKesusProxyTestSetup setup; + auto* pipeMock = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipeMock, OnPoisonPill()); + + setup.WaitConnected(); + + setup.SendDestroyed(pipeMock); + + setup.WaitPipesCreated(2); + } + + Y_UNIT_TEST(RejectsNotCanonizedResourceName) { + TKesusProxyTestSetup setup; + + setup.ProxyRequest("/resource", TEvQuota::TEvProxySession::GenericError); + setup.ProxyRequest("resource//resource", TEvQuota::TEvProxySession::GenericError); + } + + Y_UNIT_TEST(SubscribesOnResource) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + } + + Y_UNIT_TEST(SubscribesOnResourcesWhenReconnected) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + size_t resCounter = 0; + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .Times(3) + .WillRepeatedly(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), TStringBuilder() << "res" << resCounter); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults(), 42 + resCounter, 5.0); + pipe->SendSubscribeOnResourceResult(ans, cookie); + ++resCounter; + })); + + auto session = setup.ProxyRequest("res0"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + session = setup.ProxyRequest("res1"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 43); + session = setup.ProxyRequest("res2"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 44); + + EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 43); + UNIT_ASSERT(record.GetResourcesInfo(0).GetConsumeResource()); + })); + setup.SendProxyStats({TEvQuota::TProxyStat(43, 1, 0, {}, 3, 5.0, 0, 0)}); - setup.WaitEvent<NKesus::TEvKesus::TEvUpdateConsumptionState>(); - - // Disconnected - setup.SendDestroyed(pipe); - - // second pipe - auto* pipe2 = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe2, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 3); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res0"); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(1).GetResourcePath(), "res1"); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(2).GetResourcePath(), "res2"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - UNIT_ASSERT(record.GetResources(1).GetStartConsuming()); - UNIT_ASSERT(!record.GetResources(2).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - for (size_t res = 0; res < 3; ++res) { - FillResult(ans.AddResults(), 42 + res, 5.0); - } - pipe2->SendSubscribeOnResourceResult(ans, cookie); - })); - - setup.WaitEvent<NKesus::TEvKesus::TEvSubscribeOnResources>(); - } - - Y_UNIT_TEST(ProxyRequestDuringDisconnection) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeCreation(); - - setup.SendProxyRequest("res"); - - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - // Connected - setup.SendConnected(pipe); - - setup.WaitEvent<NKesus::TEvKesus::TEvSubscribeOnResources>(); - } - - Y_UNIT_TEST(DeactivateSessionWhenResourceClosed) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - + setup.WaitEvent<NKesus::TEvKesus::TEvUpdateConsumptionState>(); + + // Disconnected + setup.SendDestroyed(pipe); + + // second pipe + auto* pipe2 = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe2, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 3); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res0"); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(1).GetResourcePath(), "res1"); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(2).GetResourcePath(), "res2"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + UNIT_ASSERT(record.GetResources(1).GetStartConsuming()); + UNIT_ASSERT(!record.GetResources(2).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + for (size_t res = 0; res < 3; ++res) { + FillResult(ans.AddResults(), 42 + res, 5.0); + } + pipe2->SendSubscribeOnResourceResult(ans, cookie); + })); + + setup.WaitEvent<NKesus::TEvKesus::TEvSubscribeOnResources>(); + } + + Y_UNIT_TEST(ProxyRequestDuringDisconnection) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeCreation(); + + setup.SendProxyRequest("res"); + + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + // Connected + setup.SendConnected(pipe); + + setup.WaitEvent<NKesus::TEvKesus::TEvSubscribeOnResources>(); + } + + Y_UNIT_TEST(DeactivateSessionWhenResourceClosed) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + setup.SendProxyStats({TEvQuota::TProxyStat(42, 1, 0, {}, 1, 25.0, 0, 0)}); - - auto& startSession = - EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 42); - UNIT_ASSERT(record.GetResourcesInfo(0).GetConsumeResource()); - })); - - EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) - .After(startSession) - .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 42); - UNIT_ASSERT(!record.GetResourcesInfo(0).GetConsumeResource()); - })); - - setup.WaitEvent<NKesus::TEvKesus::TEvUpdateConsumptionState>(); - setup.SendCloseSession("res", 42); - setup.WaitEvent<TEvQuota::TEvProxyCloseSession>(); - } - - void SendsProxySessionOnce(bool onSuccess) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillRepeatedly(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults(), 42, 5.0); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 42); - UNIT_ASSERT(record.GetResourcesInfo(0).GetConsumeResource()); - })); - + + auto& startSession = + EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 42); + UNIT_ASSERT(record.GetResourcesInfo(0).GetConsumeResource()); + })); + + EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) + .After(startSession) + .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 42); + UNIT_ASSERT(!record.GetResourcesInfo(0).GetConsumeResource()); + })); + + setup.WaitEvent<NKesus::TEvKesus::TEvUpdateConsumptionState>(); + setup.SendCloseSession("res", 42); + setup.WaitEvent<TEvQuota::TEvProxyCloseSession>(); + } + + void SendsProxySessionOnce(bool onSuccess) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillRepeatedly(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults(), 42, 5.0); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + EXPECT_CALL(*pipe, OnUpdateConsumptionState(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvUpdateConsumptionState& record, ui64) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesInfoSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResourcesInfo(0).GetResourceId(), 42); + UNIT_ASSERT(record.GetResourcesInfo(0).GetConsumeResource()); + })); + setup.SendProxyStats({TEvQuota::TProxyStat(42, 1, 0, {}, 3, 5.0, 0, 0)}); - setup.WaitEvent<NKesus::TEvKesus::TEvUpdateConsumptionState>(); - - // Disconnected - setup.SendDestroyed(pipe); - - // second pipe - auto* pipe2 = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe2, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - if (onSuccess) { - FillResult(ans.AddResults(), 42, 5.0); - } else { - FillResultNotFound(ans.AddResults()); - } - pipe2->SendSubscribeOnResourceResult(ans, cookie); - })); - - for (size_t i = 0; i < 5; ++i) { - // Error request. If second ProxySession was sent, it will arrive first. - setup.SendProxyRequest("//invalid res"); - - const auto sessionEvent = setup.GetRuntime().GrabEdgeEvent<TEvQuota::TEvProxySession>(TDuration::MilliSeconds(300)); - UNIT_ASSERT_VALUES_EQUAL(sessionEvent->Resource, "//invalid res"); - } - } - - Y_UNIT_TEST(SendsProxySessionOnceOnSuccess) { - SendsProxySessionOnce(true); - } - - Y_UNIT_TEST(SendsProxySessionOnceOnFailure) { - SendsProxySessionOnce(false); - } - - Y_UNIT_TEST(AnswersWithSessionWhenResourceIsAlreadyKnown) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - // Session with the same resource - auto session2 = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session2->Get()->ResourceId, 42); - } - - Y_UNIT_TEST(SendsBrokenUpdateWhenKesusPassesError) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - setup.SendResourcesAllocated(pipe, 42, 0, Ydb::StatusIds::NOT_FOUND); - - bool broken = false; - for (size_t i = 0; i < 3; ++i) { - auto update = setup.GetProxyUpdate(); - UNIT_ASSERT_VALUES_EQUAL(update->Get()->Resources.size(), 1); - if (update->Get()->Resources[0].ResourceState == TEvQuota::EUpdateState::Broken) { - broken = true; - break; - } - } - UNIT_ASSERT(broken); - } - - Y_UNIT_TEST(AllocatesResourceWithKesus) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - UNIT_ASSERT(setup.ConsumeResourceAllocateByKesus(pipe, 42, 30.0, session->Get()->TickSize)); - } - - Y_UNIT_TEST(DisconnectsDuringActiveSession) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - setup.GetPipeFactory().ExpectTabletPipeCreation(); // Expect second pipe. Without connecting. - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - UNIT_ASSERT(!setup.ConsumeResourceAllocateByKesus(pipe, 42, 30.0, session->Get()->TickSize, 1)); - - // Disconnected - setup.SendDestroyed(pipe); - - UNIT_ASSERT(setup.ConsumeResourceAdvanceTime(42, 30.0, session->Get()->TickSize)); - } - - Y_UNIT_TEST(AllocatesResourceOffline) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - setup.GetPipeFactory().ExpectTabletPipeCreation(); // Expect second pipe. Without connecting. - - // No statistics, so resource is allocated every 100 ms with default speed. - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - // Disconnected - setup.SendDestroyed(pipe); - - UNIT_ASSERT(setup.ConsumeResourceAdvanceTime(42, 30.0, session->Get()->TickSize)); - } - - Y_UNIT_TEST(ConnectsDuringOfflineAllocation) { - TKesusProxyTestSetup setup; - auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); - EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) - .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { - UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); - UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); - NKikimrKesus::TEvSubscribeOnResourcesResult ans; - FillResult(ans.AddResults()); - pipe->SendSubscribeOnResourceResult(ans, cookie); - })); - - auto* pipe2 = setup.GetPipeFactory().ExpectTabletPipeCreation(); // Expect second pipe. Without connecting. - - // No statistics, so resource is allocated every 100 ms with default speed. - - auto session = setup.ProxyRequest("res"); - UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); - - // Disconnected - setup.SendDestroyed(pipe); - - UNIT_ASSERT(!setup.ConsumeResourceAdvanceTime(42, 60.0, session->Get()->TickSize, 1)); - - setup.SendConnected(pipe2); - - UNIT_ASSERT(setup.ConsumeResourceAllocateByKesus(pipe2, 42, 60.0, session->Get()->TickSize, 2)); - } -} - -Y_UNIT_TEST_SUITE(KesusResourceAllocationStatisticsTest) { - using NQuoter::TKesusResourceAllocationStatistics; - - void CheckParams(TKesusResourceAllocationStatistics& stat, TDuration delta, double amount) { - auto params = stat.GetAverageAllocationParams(); - UNIT_ASSERT_VALUES_EQUAL(params.first, delta); - UNIT_ASSERT_DOUBLES_EQUAL(params.second, amount, 0.001); - } - - Y_UNIT_TEST(ReturnsDefaultValues) { - TKesusResourceAllocationStatistics stat; - NKikimrKesus::TStreamingQuoterResource props; - props.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(42); - stat.SetProps(props); - - CheckParams(stat, TDuration::MilliSeconds(100), 4.2); - - // Add one allocation event: - // still can't calculate. - stat.OnResourceAllocated(TInstant::Seconds(100), 1000); - CheckParams(stat, TDuration::MilliSeconds(100), 4.2); - - // Now we can calculate. - stat.OnResourceAllocated(TInstant::Seconds(200), 2000); - CheckParams(stat, TDuration::Seconds(100), 1500); - } - - Y_UNIT_TEST(CalculatesAverage) { - TKesusResourceAllocationStatistics stat(5); - stat.OnResourceAllocated(TInstant::Seconds(100), 1000); - stat.OnResourceAllocated(TInstant::Seconds(200), 1000); - CheckParams(stat, TDuration::Seconds(100), 1000); - - stat.OnResourceAllocated(TInstant::Seconds(400), 4000); - CheckParams(stat, TDuration::Seconds(150), 2000); - - stat.OnResourceAllocated(TInstant::Seconds(1000), 4000); - CheckParams(stat, TDuration::Seconds(300), 2500); - - stat.OnResourceAllocated(TInstant::Seconds(1300), 5000); - CheckParams(stat, TDuration::Seconds(300), 3000); - - // Forgets first value. - stat.OnResourceAllocated(TInstant::Seconds(2000), 2000); - CheckParams(stat, TDuration::Seconds(450), 3200); - - // Forgets second value. - stat.OnResourceAllocated(TInstant::Seconds(4400), 5000); - CheckParams(stat, TDuration::Seconds(1000), 4000); - } - - Y_UNIT_TEST(TakesBestStat) { - TKesusResourceAllocationStatistics stat(4); - stat.OnResourceAllocated(TInstant::Seconds(100), 10); - stat.OnResourceAllocated(TInstant::Seconds(200), 10); - CheckParams(stat, TDuration::Seconds(100), 10); - - stat.OnConnected(); - CheckParams(stat, TDuration::Seconds(100), 10); - stat.OnResourceAllocated(TInstant::Seconds(300), 20); - CheckParams(stat, TDuration::Seconds(100), 10); - stat.OnResourceAllocated(TInstant::Seconds(400), 20); - CheckParams(stat, TDuration::Seconds(100), 20); - stat.OnResourceAllocated(TInstant::Seconds(500), 20); - CheckParams(stat, TDuration::Seconds(100), 20); - - stat.OnConnected(); - CheckParams(stat, TDuration::Seconds(100), 20); - stat.OnResourceAllocated(TInstant::Seconds(700), 30); - CheckParams(stat, TDuration::Seconds(100), 20); - stat.OnResourceAllocated(TInstant::Seconds(800), 30); - CheckParams(stat, TDuration::Seconds(100), 20); - - stat.OnConnected(); - CheckParams(stat, TDuration::Seconds(100), 20); - stat.OnResourceAllocated(TInstant::Seconds(900), 40); - CheckParams(stat, TDuration::Seconds(100), 20); - stat.OnResourceAllocated(TInstant::Seconds(1100), 40); - CheckParams(stat, TDuration::Seconds(100), 20); - stat.OnResourceAllocated(TInstant::Seconds(1300), 40); - CheckParams(stat, TDuration::Seconds(200), 40); - stat.OnResourceAllocated(TInstant::Seconds(1800), 80); - CheckParams(stat, TDuration::Seconds(300), 50); - } -} - -} // namespace NKikimr + setup.WaitEvent<NKesus::TEvKesus::TEvUpdateConsumptionState>(); + + // Disconnected + setup.SendDestroyed(pipe); + + // second pipe + auto* pipe2 = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe2, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + if (onSuccess) { + FillResult(ans.AddResults(), 42, 5.0); + } else { + FillResultNotFound(ans.AddResults()); + } + pipe2->SendSubscribeOnResourceResult(ans, cookie); + })); + + for (size_t i = 0; i < 5; ++i) { + // Error request. If second ProxySession was sent, it will arrive first. + setup.SendProxyRequest("//invalid res"); + + const auto sessionEvent = setup.GetRuntime().GrabEdgeEvent<TEvQuota::TEvProxySession>(TDuration::MilliSeconds(300)); + UNIT_ASSERT_VALUES_EQUAL(sessionEvent->Resource, "//invalid res"); + } + } + + Y_UNIT_TEST(SendsProxySessionOnceOnSuccess) { + SendsProxySessionOnce(true); + } + + Y_UNIT_TEST(SendsProxySessionOnceOnFailure) { + SendsProxySessionOnce(false); + } + + Y_UNIT_TEST(AnswersWithSessionWhenResourceIsAlreadyKnown) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + // Session with the same resource + auto session2 = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session2->Get()->ResourceId, 42); + } + + Y_UNIT_TEST(SendsBrokenUpdateWhenKesusPassesError) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + UNIT_ASSERT(!record.GetResources(0).GetStartConsuming()); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + setup.SendResourcesAllocated(pipe, 42, 0, Ydb::StatusIds::NOT_FOUND); + + bool broken = false; + for (size_t i = 0; i < 3; ++i) { + auto update = setup.GetProxyUpdate(); + UNIT_ASSERT_VALUES_EQUAL(update->Get()->Resources.size(), 1); + if (update->Get()->Resources[0].ResourceState == TEvQuota::EUpdateState::Broken) { + broken = true; + break; + } + } + UNIT_ASSERT(broken); + } + + Y_UNIT_TEST(AllocatesResourceWithKesus) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + UNIT_ASSERT(setup.ConsumeResourceAllocateByKesus(pipe, 42, 30.0, session->Get()->TickSize)); + } + + Y_UNIT_TEST(DisconnectsDuringActiveSession) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + setup.GetPipeFactory().ExpectTabletPipeCreation(); // Expect second pipe. Without connecting. + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + UNIT_ASSERT(!setup.ConsumeResourceAllocateByKesus(pipe, 42, 30.0, session->Get()->TickSize, 1)); + + // Disconnected + setup.SendDestroyed(pipe); + + UNIT_ASSERT(setup.ConsumeResourceAdvanceTime(42, 30.0, session->Get()->TickSize)); + } + + Y_UNIT_TEST(AllocatesResourceOffline) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + setup.GetPipeFactory().ExpectTabletPipeCreation(); // Expect second pipe. Without connecting. + + // No statistics, so resource is allocated every 100 ms with default speed. + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + // Disconnected + setup.SendDestroyed(pipe); + + UNIT_ASSERT(setup.ConsumeResourceAdvanceTime(42, 30.0, session->Get()->TickSize)); + } + + Y_UNIT_TEST(ConnectsDuringOfflineAllocation) { + TKesusProxyTestSetup setup; + auto* pipe = setup.GetPipeFactory().ExpectTabletPipeConnection(); + EXPECT_CALL(*pipe, OnSubscribeOnResources(_, _)) + .WillOnce(Invoke([&](const NKikimrKesus::TEvSubscribeOnResources& record, ui64 cookie) { + UNIT_ASSERT_VALUES_EQUAL(record.ResourcesSize(), 1); + UNIT_ASSERT_VALUES_EQUAL(record.GetResources(0).GetResourcePath(), "res"); + NKikimrKesus::TEvSubscribeOnResourcesResult ans; + FillResult(ans.AddResults()); + pipe->SendSubscribeOnResourceResult(ans, cookie); + })); + + auto* pipe2 = setup.GetPipeFactory().ExpectTabletPipeCreation(); // Expect second pipe. Without connecting. + + // No statistics, so resource is allocated every 100 ms with default speed. + + auto session = setup.ProxyRequest("res"); + UNIT_ASSERT_VALUES_EQUAL(session->Get()->ResourceId, 42); + + // Disconnected + setup.SendDestroyed(pipe); + + UNIT_ASSERT(!setup.ConsumeResourceAdvanceTime(42, 60.0, session->Get()->TickSize, 1)); + + setup.SendConnected(pipe2); + + UNIT_ASSERT(setup.ConsumeResourceAllocateByKesus(pipe2, 42, 60.0, session->Get()->TickSize, 2)); + } +} + +Y_UNIT_TEST_SUITE(KesusResourceAllocationStatisticsTest) { + using NQuoter::TKesusResourceAllocationStatistics; + + void CheckParams(TKesusResourceAllocationStatistics& stat, TDuration delta, double amount) { + auto params = stat.GetAverageAllocationParams(); + UNIT_ASSERT_VALUES_EQUAL(params.first, delta); + UNIT_ASSERT_DOUBLES_EQUAL(params.second, amount, 0.001); + } + + Y_UNIT_TEST(ReturnsDefaultValues) { + TKesusResourceAllocationStatistics stat; + NKikimrKesus::TStreamingQuoterResource props; + props.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(42); + stat.SetProps(props); + + CheckParams(stat, TDuration::MilliSeconds(100), 4.2); + + // Add one allocation event: + // still can't calculate. + stat.OnResourceAllocated(TInstant::Seconds(100), 1000); + CheckParams(stat, TDuration::MilliSeconds(100), 4.2); + + // Now we can calculate. + stat.OnResourceAllocated(TInstant::Seconds(200), 2000); + CheckParams(stat, TDuration::Seconds(100), 1500); + } + + Y_UNIT_TEST(CalculatesAverage) { + TKesusResourceAllocationStatistics stat(5); + stat.OnResourceAllocated(TInstant::Seconds(100), 1000); + stat.OnResourceAllocated(TInstant::Seconds(200), 1000); + CheckParams(stat, TDuration::Seconds(100), 1000); + + stat.OnResourceAllocated(TInstant::Seconds(400), 4000); + CheckParams(stat, TDuration::Seconds(150), 2000); + + stat.OnResourceAllocated(TInstant::Seconds(1000), 4000); + CheckParams(stat, TDuration::Seconds(300), 2500); + + stat.OnResourceAllocated(TInstant::Seconds(1300), 5000); + CheckParams(stat, TDuration::Seconds(300), 3000); + + // Forgets first value. + stat.OnResourceAllocated(TInstant::Seconds(2000), 2000); + CheckParams(stat, TDuration::Seconds(450), 3200); + + // Forgets second value. + stat.OnResourceAllocated(TInstant::Seconds(4400), 5000); + CheckParams(stat, TDuration::Seconds(1000), 4000); + } + + Y_UNIT_TEST(TakesBestStat) { + TKesusResourceAllocationStatistics stat(4); + stat.OnResourceAllocated(TInstant::Seconds(100), 10); + stat.OnResourceAllocated(TInstant::Seconds(200), 10); + CheckParams(stat, TDuration::Seconds(100), 10); + + stat.OnConnected(); + CheckParams(stat, TDuration::Seconds(100), 10); + stat.OnResourceAllocated(TInstant::Seconds(300), 20); + CheckParams(stat, TDuration::Seconds(100), 10); + stat.OnResourceAllocated(TInstant::Seconds(400), 20); + CheckParams(stat, TDuration::Seconds(100), 20); + stat.OnResourceAllocated(TInstant::Seconds(500), 20); + CheckParams(stat, TDuration::Seconds(100), 20); + + stat.OnConnected(); + CheckParams(stat, TDuration::Seconds(100), 20); + stat.OnResourceAllocated(TInstant::Seconds(700), 30); + CheckParams(stat, TDuration::Seconds(100), 20); + stat.OnResourceAllocated(TInstant::Seconds(800), 30); + CheckParams(stat, TDuration::Seconds(100), 20); + + stat.OnConnected(); + CheckParams(stat, TDuration::Seconds(100), 20); + stat.OnResourceAllocated(TInstant::Seconds(900), 40); + CheckParams(stat, TDuration::Seconds(100), 20); + stat.OnResourceAllocated(TInstant::Seconds(1100), 40); + CheckParams(stat, TDuration::Seconds(100), 20); + stat.OnResourceAllocated(TInstant::Seconds(1300), 40); + CheckParams(stat, TDuration::Seconds(200), 40); + stat.OnResourceAllocated(TInstant::Seconds(1800), 80); + CheckParams(stat, TDuration::Seconds(300), 50); + } +} + +} // namespace NKikimr diff --git a/ydb/core/quoter/probes.cpp b/ydb/core/quoter/probes.cpp index e7761073b8d..5cb129376fc 100644 --- a/ydb/core/quoter/probes.cpp +++ b/ydb/core/quoter/probes.cpp @@ -1,3 +1,3 @@ -#include "probes.h" - -LWTRACE_DEFINE_PROVIDER(QUOTER_SERVICE_PROVIDER); +#include "probes.h" + +LWTRACE_DEFINE_PROVIDER(QUOTER_SERVICE_PROVIDER); diff --git a/ydb/core/quoter/probes.h b/ydb/core/quoter/probes.h index c5ef9aca125..b0bacd4bc10 100644 --- a/ydb/core/quoter/probes.h +++ b/ydb/core/quoter/probes.h @@ -1,39 +1,39 @@ -#pragma once +#pragma once #include <ydb/core/base/quoter.h> - + #include <library/cpp/lwtrace/all.h> - -#include <util/string/builder.h> - -#include <type_traits> -#include <limits> - -#define QUOTER_SERVICE_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ - PROBE(StartRequest, GROUPS("QuoterService", "ClientRequest", "Orbit"), \ - TYPES(NLWTrace::TEnumParamWithSerialization<NKikimr::TEvQuota::EResourceOperator>, TDuration, ui64), \ - NAMES("operator", "deadlineMs", "cookie")) \ - PROBE(RequestResource, GROUPS("QuoterService", "ClientRequest"), \ - TYPES(ui64, TString, TString, ui64, ui64), \ - NAMES("amount", "quoter", "resource", "quoterId", "resourceId")) \ - PROBE(RequestDone, GROUPS("QuoterService", "ClientRequest"), \ - TYPES(NLWTrace::TEnumParamWithSerialization<NKikimr::TEvQuota::TEvClearance::EResult>, ui64), \ - NAMES("result", "cookie")) \ - PROBE(ResourceQueueState, GROUPS("QuoterService", "ClientRequest"), \ - TYPES(TString, TString, ui64, ui64, ui64, double), \ - NAMES("quoter", "resource", "quoterId", "resourceId", "queueSize", "queueWeight")) \ - PROBE(StartCharging, GROUPS("QuoterService", "ClientRequest"), \ - TYPES(TString, TString, ui64, ui64), \ - NAMES("quoter", "resource", "quoterId", "resourceId")) \ + +#include <util/string/builder.h> + +#include <type_traits> +#include <limits> + +#define QUOTER_SERVICE_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ + PROBE(StartRequest, GROUPS("QuoterService", "ClientRequest", "Orbit"), \ + TYPES(NLWTrace::TEnumParamWithSerialization<NKikimr::TEvQuota::EResourceOperator>, TDuration, ui64), \ + NAMES("operator", "deadlineMs", "cookie")) \ + PROBE(RequestResource, GROUPS("QuoterService", "ClientRequest"), \ + TYPES(ui64, TString, TString, ui64, ui64), \ + NAMES("amount", "quoter", "resource", "quoterId", "resourceId")) \ + PROBE(RequestDone, GROUPS("QuoterService", "ClientRequest"), \ + TYPES(NLWTrace::TEnumParamWithSerialization<NKikimr::TEvQuota::TEvClearance::EResult>, ui64), \ + NAMES("result", "cookie")) \ + PROBE(ResourceQueueState, GROUPS("QuoterService", "ClientRequest"), \ + TYPES(TString, TString, ui64, ui64, ui64, double), \ + NAMES("quoter", "resource", "quoterId", "resourceId", "queueSize", "queueWeight")) \ + PROBE(StartCharging, GROUPS("QuoterService", "ClientRequest"), \ + TYPES(TString, TString, ui64, ui64), \ + NAMES("quoter", "resource", "quoterId", "resourceId")) \ PROBE(Charge, GROUPS("QuoterService", "ClientRequest"), \ TYPES(TString, TString, ui64, ui64), \ NAMES("quoter", "resource", "quoterId", "resourceId")) \ - \ - PROBE(AllocateResource, GROUPS("QuoterService", "Resource"), \ - TYPES(TString, TString, ui64, ui64, ui64, double, ui64, double, double, double), \ - NAMES("quoter", "resource", "quoterId", "resourceId", "requestsProcessed", "amountConsumed", "queueSize", "queueWeight", "balance", "freeBalance")) \ - PROBE(FeedResource, GROUPS("QuoterService", "Resource"), \ - TYPES(TString, TString, ui64, ui64, double, double), \ - NAMES("quoter", "resource", "quoterId", "resourceId", "balance", "freeBalance")) \ - /**/ - -LWTRACE_DECLARE_PROVIDER(QUOTER_SERVICE_PROVIDER) + \ + PROBE(AllocateResource, GROUPS("QuoterService", "Resource"), \ + TYPES(TString, TString, ui64, ui64, ui64, double, ui64, double, double, double), \ + NAMES("quoter", "resource", "quoterId", "resourceId", "requestsProcessed", "amountConsumed", "queueSize", "queueWeight", "balance", "freeBalance")) \ + PROBE(FeedResource, GROUPS("QuoterService", "Resource"), \ + TYPES(TString, TString, ui64, ui64, double, double), \ + NAMES("quoter", "resource", "quoterId", "resourceId", "balance", "freeBalance")) \ + /**/ + +LWTRACE_DECLARE_PROVIDER(QUOTER_SERVICE_PROVIDER) diff --git a/ydb/core/quoter/quoter_service.cpp b/ydb/core/quoter/quoter_service.cpp index b9a1b629f51..3470b8e2392 100644 --- a/ydb/core/quoter/quoter_service.cpp +++ b/ydb/core/quoter/quoter_service.cpp @@ -1,52 +1,52 @@ #include "quoter_service_impl.h" -#include "debug_info.h" -#include "kesus_quoter_proxy.h" -#include "probes.h" - +#include "debug_info.h" +#include "kesus_quoter_proxy.h" +#include "probes.h" + #include <ydb/core/base/counters.h> #include <library/cpp/lwtrace/mon/mon_lwtrace.h> -#include <cmath> - +#include <cmath> + #if defined BLOG_D || defined BLOG_I || defined BLOG_ERROR #error log macro definition clash #endif -#define BLOG_T(stream) LOG_TRACE_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) +#define BLOG_T(stream) LOG_TRACE_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) #define BLOG_D(stream) LOG_DEBUG_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) #define BLOG_I(stream) LOG_INFO_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) -#define BLOG_WARN(stream) LOG_WARN_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) +#define BLOG_WARN(stream) LOG_WARN_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) #define BLOG_ERROR(stream) LOG_ERROR_S((TlsActivationContext->AsActorContext()), NKikimrServices::QUOTER_SERVICE, stream) -LWTRACE_USING(QUOTER_SERVICE_PROVIDER); - +LWTRACE_USING(QUOTER_SERVICE_PROVIDER); + namespace NKikimr { namespace NQuoter { -extern const TString CONSUMED_COUNTER_NAME = "QuotaConsumed"; -extern const TString REQUESTED_COUNTER_NAME = "QuotaRequested"; -extern const TString RESOURCE_COUNTER_SENSOR_NAME = "resource"; -extern const TString QUOTER_COUNTER_SENSOR_NAME = "quoter"; -extern const TString QUOTER_SERVICE_COUNTER_SENSOR_NAME = "quoter_service"; -extern const TString RESOURCE_QUEUE_SIZE_COUNTER_SENSOR_NAME = "QueueSize"; -extern const TString RESOURCE_QUEUE_WEIGHT_COUNTER_SENSOR_NAME = "QueueWeight"; -extern const TString RESOURCE_ALLOCATED_OFFLINE_COUNTER_SENSOR_NAME = "AllocatedOffline"; -extern const TString RESOURCE_DROPPED_COUNTER_SENSOR_NAME = "QuotaDropped"; -extern const TString RESOURCE_ACCUMULATED_COUNTER_SENSOR_NAME = "QuotaAccumulated"; -extern const TString RESOURCE_RECEIVED_FROM_KESUS_COUNTER_SENSOR_NAME = "QuotaReceivedFromKesus"; -extern const TString REQUEST_QUEUE_TIME_SENSOR_NAME = "RequestQueueTimeMs"; -extern const TString REQUESTS_COUNT_SENSOR_NAME = "RequestsCount"; -extern const TString ELAPSED_MICROSEC_IN_STARVATION_SENSOR_NAME = "ElapsedMicrosecInStarvation"; -extern const TString REQUEST_TIME_SENSOR_NAME = "RequestTimeMs"; -extern const TString DISCONNECTS_COUNTER_SENSOR_NAME = "Disconnects"; - -constexpr double TICK_RATE_EPSILON = 0.0000000001; - -NMonitoring::IHistogramCollectorPtr GetLatencyHistogramBuckets() { - return NMonitoring::ExplicitHistogram({0, 1, 2, 5, 10, 20, 50, 100, 500, 1000, 2000, 5000, 10000, 30000, 50000}); -} - +extern const TString CONSUMED_COUNTER_NAME = "QuotaConsumed"; +extern const TString REQUESTED_COUNTER_NAME = "QuotaRequested"; +extern const TString RESOURCE_COUNTER_SENSOR_NAME = "resource"; +extern const TString QUOTER_COUNTER_SENSOR_NAME = "quoter"; +extern const TString QUOTER_SERVICE_COUNTER_SENSOR_NAME = "quoter_service"; +extern const TString RESOURCE_QUEUE_SIZE_COUNTER_SENSOR_NAME = "QueueSize"; +extern const TString RESOURCE_QUEUE_WEIGHT_COUNTER_SENSOR_NAME = "QueueWeight"; +extern const TString RESOURCE_ALLOCATED_OFFLINE_COUNTER_SENSOR_NAME = "AllocatedOffline"; +extern const TString RESOURCE_DROPPED_COUNTER_SENSOR_NAME = "QuotaDropped"; +extern const TString RESOURCE_ACCUMULATED_COUNTER_SENSOR_NAME = "QuotaAccumulated"; +extern const TString RESOURCE_RECEIVED_FROM_KESUS_COUNTER_SENSOR_NAME = "QuotaReceivedFromKesus"; +extern const TString REQUEST_QUEUE_TIME_SENSOR_NAME = "RequestQueueTimeMs"; +extern const TString REQUESTS_COUNT_SENSOR_NAME = "RequestsCount"; +extern const TString ELAPSED_MICROSEC_IN_STARVATION_SENSOR_NAME = "ElapsedMicrosecInStarvation"; +extern const TString REQUEST_TIME_SENSOR_NAME = "RequestTimeMs"; +extern const TString DISCONNECTS_COUNTER_SENSOR_NAME = "Disconnects"; + +constexpr double TICK_RATE_EPSILON = 0.0000000001; + +NMonitoring::IHistogramCollectorPtr GetLatencyHistogramBuckets() { + return NMonitoring::ExplicitHistogram({0, 1, 2, 5, 10, 20, 50, 100, 500, 1000, 2000, 5000, 10000, 30000, 50000}); +} + TRequest& TReqState::Get(ui32 idx) { Y_VERIFY(idx < Requests.size()); auto &x = Requests[idx]; @@ -80,7 +80,7 @@ ui32 TReqState::Allocate(TActorId source, ui64 eventCookie) { auto &x = Requests[idx]; x.Source = source; x.EventCookie = eventCookie; - x.StartTime = TActivationContext::Now(); + x.StartTime = TActivationContext::Now(); Y_VERIFY_DEBUG(x.PrevByOwner == Max<ui32>()); Y_VERIFY_DEBUG(x.NextByOwner == Max<ui32>()); @@ -112,16 +112,16 @@ void TReqState::Free(ui32 idx) { Requests[x.PrevByOwner].NextByOwner = x.NextByOwner; } - if (lastEntry) { - ByOwner.erase(x.Source); - } else { - auto byOwnerIt = ByOwner.find(x.Source); - Y_VERIFY_DEBUG(byOwnerIt != ByOwner.end()); - if (byOwnerIt->second == idx) { - byOwnerIt->second = x.NextByOwner != Max<ui32>() ? x.NextByOwner : x.PrevByOwner; - } - } - + if (lastEntry) { + ByOwner.erase(x.Source); + } else { + auto byOwnerIt = ByOwner.find(x.Source); + Y_VERIFY_DEBUG(byOwnerIt != ByOwner.end()); + if (byOwnerIt->second == idx) { + byOwnerIt->second = x.NextByOwner != Max<ui32>() ? x.NextByOwner : x.PrevByOwner; + } + } + x.NextByOwner = Max<ui32>(); x.PrevByOwner = Max<ui32>(); @@ -137,8 +137,8 @@ void TReqState::Free(ui32 idx) { x.NextDeadlineRequest = Max<ui32>(); x.Source = TActorId(); - x.Orbit.Reset(); - + x.Orbit.Reset(); + Unused.push_back(idx); } @@ -194,8 +194,8 @@ void TResState::FreeChain(ui32 headIdx) { x.ResourceId = 0; TString().swap(x.ResourceName); TString().swap(x.QuoterName); - x.StartQueueing = TInstant::Zero(); - x.StartCharging = TInstant::Zero(); + x.StartQueueing = TInstant::Zero(); + x.StartCharging = TInstant::Zero(); } } @@ -205,46 +205,46 @@ void TResource::ApplyQuotaChannel(const TEvQuota::TUpdateTick &tick) { QuotaChannels[tick.Channel] = tick; } -void TResource::MarkStartedCharging(TRequest& request, TResourceLeaf& leaf, TInstant now) { - if (leaf.StartCharging == TInstant::Zero()) { - leaf.StartCharging = now; - LWTRACK(StartCharging, request.Orbit, leaf.QuoterName, leaf.ResourceName, leaf.QuoterId, leaf.ResourceId); - if (leaf.StartQueueing == TInstant::Zero()) { // was not in queue - Counters.RequestQueueTime->Collect(0); - } else { - Counters.RequestQueueTime->Collect((now - leaf.StartQueueing).MilliSeconds()); - } - } -} - -void TResource::StartStarvation(TInstant now) { - StopStarvation(now); - StartStarvationTime = now; -} - -void TResource::StopStarvation(TInstant now) { - if (StartStarvationTime != TInstant::Zero()) { - *Counters.ElapsedMicrosecInStarvation += (now - StartStarvationTime).MicroSeconds(); - StartStarvationTime = TInstant::Zero(); - } -} - -TDuration TResource::Charge(TRequest& request, TResourceLeaf& leaf, TInstant now) { - MarkStartedCharging(request, leaf, now); - +void TResource::MarkStartedCharging(TRequest& request, TResourceLeaf& leaf, TInstant now) { + if (leaf.StartCharging == TInstant::Zero()) { + leaf.StartCharging = now; + LWTRACK(StartCharging, request.Orbit, leaf.QuoterName, leaf.ResourceName, leaf.QuoterId, leaf.ResourceId); + if (leaf.StartQueueing == TInstant::Zero()) { // was not in queue + Counters.RequestQueueTime->Collect(0); + } else { + Counters.RequestQueueTime->Collect((now - leaf.StartQueueing).MilliSeconds()); + } + } +} + +void TResource::StartStarvation(TInstant now) { + StopStarvation(now); + StartStarvationTime = now; +} + +void TResource::StopStarvation(TInstant now) { + if (StartStarvationTime != TInstant::Zero()) { + *Counters.ElapsedMicrosecInStarvation += (now - StartStarvationTime).MicroSeconds(); + StartStarvationTime = TInstant::Zero(); + } +} + +TDuration TResource::Charge(TRequest& request, TResourceLeaf& leaf, TInstant now) { + MarkStartedCharging(request, leaf, now); + if (leaf.IsUsedAmount) { ChargeUsedAmount(leaf.Amount, now); Counters.RequestTime->Collect((now - request.StartTime).MilliSeconds()); return TDuration::Zero(); } - const TDuration result = Charge(leaf.Amount, now); - if (result == TDuration::Zero()) { - Counters.RequestTime->Collect((now - request.StartTime).MilliSeconds()); - } - return result; -} - + const TDuration result = Charge(leaf.Amount, now); + if (result == TDuration::Zero()) { + Counters.RequestTime->Collect((now - request.StartTime).MilliSeconds()); + } + return result; +} + void TResource::ChargeUsedAmount(double amount, TInstant now) { BLOG_T("ChargeUsedAmount \"" << Resource << "\" for " << amount << ". Balance: " << Balance @@ -263,30 +263,30 @@ void TResource::ChargeUsedAmount(double amount, TInstant now) { StartStarvation(now); } -TDuration TResource::Charge(double amount, TInstant now) { +TDuration TResource::Charge(double amount, TInstant now) { // Zero - charged // Max - not in current tick (or resource already queued) // smth b/w - delayed by pace limit - if (TickRate < TICK_RATE_EPSILON) { // zero - return TDuration::Max(); - } + if (TickRate < TICK_RATE_EPSILON) { // zero + return TDuration::Max(); + } // could be fullfilled right now? - const double ticksToFullfill = amount / TickRate; - const double durationToFullfillInUs = ticksToFullfill * static_cast<double>(TickSize.MicroSeconds()); - // TODO: calculate time for many requests (not for one). Now errors can be accumulated when big rates are used. - const TInstant timeToFullfill = LastAllocated + TDuration::MicroSeconds(lround(durationToFullfillInUs)); - - BLOG_T("Charge \"" << Resource << "\" for " << amount - << ". Balance: " << Balance - << ". FreeBalance: " << FreeBalance - << ". TicksToFullfill: " << ticksToFullfill - << ". DurationToFullfillInUs: " << durationToFullfillInUs - << ". TimeToFullfill: " << timeToFullfill - << ". Now: " << now - << ". LastAllocated: " << LastAllocated); - - if (Balance >= 0.0) { + const double ticksToFullfill = amount / TickRate; + const double durationToFullfillInUs = ticksToFullfill * static_cast<double>(TickSize.MicroSeconds()); + // TODO: calculate time for many requests (not for one). Now errors can be accumulated when big rates are used. + const TInstant timeToFullfill = LastAllocated + TDuration::MicroSeconds(lround(durationToFullfillInUs)); + + BLOG_T("Charge \"" << Resource << "\" for " << amount + << ". Balance: " << Balance + << ". FreeBalance: " << FreeBalance + << ". TicksToFullfill: " << ticksToFullfill + << ". DurationToFullfillInUs: " << durationToFullfillInUs + << ". TimeToFullfill: " << timeToFullfill + << ". Now: " << now + << ". LastAllocated: " << LastAllocated); + + if (Balance >= 0.0) { if (timeToFullfill <= now) { LastAllocated = Max(now - QuoterServiceConfig.ScheduleTickSize * 2, timeToFullfill); Balance -= amount; @@ -296,8 +296,8 @@ TDuration TResource::Charge(double amount, TInstant now) { if (FreeBalance > Balance) FreeBalance = Balance; - Counters.Consumed->Add(static_cast<i64>(amount)); - StopStarvation(now); + Counters.Consumed->Add(static_cast<i64>(amount)); + StopStarvation(now); return TDuration::Zero(); } @@ -308,21 +308,21 @@ TDuration TResource::Charge(double amount, TInstant now) { AmountConsumed += amount; History.Add(now, amount); - Counters.Consumed->Add(static_cast<i64>(amount)); - StopStarvation(now); + Counters.Consumed->Add(static_cast<i64>(amount)); + StopStarvation(now); return TDuration::Zero(); } } - StartStarvation(now); + StartStarvation(now); const TDuration delay = timeToFullfill - now; return (delay > TDuration::Zero()) ? delay : TDuration::Max(); } -TResource& TQuoterState::GetOrCreate(ui64 quoterId, ui64 resId, const TString& quoter, const TString& resource, const TQuoterServiceConfig "erServiceConfig) { +TResource& TQuoterState::GetOrCreate(ui64 quoterId, ui64 resId, const TString& quoter, const TString& resource, const TQuoterServiceConfig "erServiceConfig) { auto xpair = Resources.emplace(resId, nullptr); if (xpair.second) - xpair.first->second.Reset(new TResource(quoterId, resId, quoter, resource, quoterServiceConfig, Counters.QuoterCounters)); + xpair.first->second.Reset(new TResource(quoterId, resId, quoter, resource, quoterServiceConfig, Counters.QuoterCounters)); return *xpair.first->second; } @@ -331,27 +331,27 @@ bool TQuoterState::Empty() { return Resources.empty() && WaitingResource.empty() && WaitingQueueResolve.empty(); } -TQuoterService::TQuoterService(const TQuoterServiceConfig &config) - : Config(config) - , LastProcessed(TInstant::Zero()) - , StaticRatedQuoter("__StaticRatedQuoter", nullptr) - , TickScheduled(false) -{ - QUOTER_SYSTEM_DEBUG(DebugInfo->QuoterService = this); -} - -TQuoterService::~TQuoterService() { - QUOTER_SYSTEM_DEBUG(DebugInfo->QuoterService = nullptr); -} - +TQuoterService::TQuoterService(const TQuoterServiceConfig &config) + : Config(config) + , LastProcessed(TInstant::Zero()) + , StaticRatedQuoter("__StaticRatedQuoter", nullptr) + , TickScheduled(false) +{ + QUOTER_SYSTEM_DEBUG(DebugInfo->QuoterService = this); +} + +TQuoterService::~TQuoterService() { + QUOTER_SYSTEM_DEBUG(DebugInfo->QuoterService = nullptr); +} + void TQuoterService::ScheduleNextTick(TInstant requested, TResource &quores) { - TryTickSchedule(); + TryTickSchedule(); const TInstant next = TimeToGranularity(requested); const TInstant last = TimeToGranularity(quores.LastTick + quores.TickSize); - const TInstant selected = Max(next, last, LastProcessed); + const TInstant selected = Max(next, last, LastProcessed); quores.NextTick = selected; quores.LastTick = selected; - BLOG_T("Schedule next tick for \"" << quores.Resource << "\". Tick size: " << quores.TickSize << ". Time: " << quores.NextTick); + BLOG_T("Schedule next tick for \"" << quores.Resource << "\". Tick size: " << quores.TickSize << ". Time: " << quores.NextTick); ScheduleFeed[quores.NextTick].emplace(&quores); } @@ -364,7 +364,7 @@ TInstant TQuoterService::TimeToGranularity(TInstant rawTime) { } void TQuoterService::Bootstrap() { - TIntrusivePtr<NMonitoring::TDynamicCounters> counters = GetServiceCounters(AppData()->Counters, QUOTER_SERVICE_COUNTER_SENSOR_NAME); + TIntrusivePtr<NMonitoring::TDynamicCounters> counters = GetServiceCounters(AppData()->Counters, QUOTER_SERVICE_COUNTER_SENSOR_NAME); Counters.ActiveQuoterProxies = counters->GetCounter("ActiveQuoterProxies", false); Counters.ActiveProxyResources = counters->GetCounter("ActiveProxyResources", false); @@ -374,27 +374,27 @@ void TQuoterService::Bootstrap() { Counters.ResultOk = counters->GetCounter("ResultOk", true); Counters.ResultDeadline = counters->GetCounter("ResultDeadline", true); Counters.ResultError = counters->GetCounter("ResultError", true); - Counters.RequestLatency = counters->GetHistogram("RequestLatencyMs", GetLatencyHistogramBuckets()); - - Counters.ServiceCounters = std::move(counters); - - StaticRatedQuoter.InitCounters(Counters.ServiceCounters); - - NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(QUOTER_SERVICE_PROVIDER)); - + Counters.RequestLatency = counters->GetHistogram("RequestLatencyMs", GetLatencyHistogramBuckets()); + + Counters.ServiceCounters = std::move(counters); + + StaticRatedQuoter.InitCounters(Counters.ServiceCounters); + + NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(QUOTER_SERVICE_PROVIDER)); + Become(&TThis::StateFunc); } -void TQuoterService::TryTickSchedule(TInstant now) { +void TQuoterService::TryTickSchedule(TInstant now) { if (!TickScheduled) { TickScheduled = true; - LastProcessed = TimeToGranularity(now != TInstant::Zero() ? now : TActivationContext::Now()); + LastProcessed = TimeToGranularity(now != TInstant::Zero() ? now : TActivationContext::Now()); Schedule(Config.ScheduleTickSize, new TEvents::TEvWakeup()); } } void TQuoterService::ReplyRequest(TRequest &request, ui32 reqIdx, TEvQuota::TEvClearance::EResult resultCode) { - LWTRACK(RequestDone, request.Orbit, resultCode, request.EventCookie); + LWTRACK(RequestDone, request.Orbit, resultCode, request.EventCookie); Send(request.Source, new TEvQuota::TEvClearance(resultCode), 0, request.EventCookie); ForgetRequest(request, reqIdx); @@ -403,7 +403,7 @@ void TQuoterService::ReplyRequest(TRequest &request, ui32 reqIdx, TEvQuota::TEvC void TQuoterService::ForgetRequest(TRequest &request, ui32 reqIdx) { // request must be replied // we must not stop track request while not replied or explicitly canceled - // so only correct entry points are from ReplyRequest or from CancelRequest + // so only correct entry points are from ReplyRequest or from CancelRequest // cleanup from resource wait queue for (ui32 leafIdx = request.ResourceLeaf; leafIdx != Max<ui32>(); ) { @@ -431,7 +431,7 @@ void TQuoterService::ForgetRequest(TRequest &request, ui32 reqIdx) { if (leaf.Resource->QueueHead == Max<ui32>()) { leaf.Resource->QueueSize = 0; - leaf.Resource->QueueWeight = 0.0; + leaf.Resource->QueueWeight = 0.0; } else { leaf.Resource->QueueSize -= 1; leaf.Resource->QueueWeight -= leaf.Amount; @@ -469,26 +469,26 @@ void TQuoterService::ForgetRequest(TRequest &request, ui32 reqIdx) { } void TQuoterService::DeclineRequest(TRequest &request, ui32 reqIdx) { - Counters.ResultError->Inc(); + Counters.ResultError->Inc(); return ReplyRequest(request, reqIdx, TEvQuota::TEvClearance::EResult::UnknownResource); } -void TQuoterService::FailRequest(TRequest &request, ui32 reqIdx) { - Counters.ResultError->Inc(); - - return ReplyRequest(request, reqIdx, TEvQuota::TEvClearance::EResult::GenericError); -} - +void TQuoterService::FailRequest(TRequest &request, ui32 reqIdx) { + Counters.ResultError->Inc(); + + return ReplyRequest(request, reqIdx, TEvQuota::TEvClearance::EResult::GenericError); +} + void TQuoterService::AllowRequest(TRequest &request, ui32 reqIdx) { Counters.ResultOk->Inc(); - Counters.RequestLatency->Collect((TActivationContext::Now() - request.StartTime).MilliSeconds()); + Counters.RequestLatency->Collect((TActivationContext::Now() - request.StartTime).MilliSeconds()); return ReplyRequest(request, reqIdx, TEvQuota::TEvClearance::EResult::Success); } void TQuoterService::DeadlineRequest(TRequest &request, ui32 reqIdx) { - Counters.ResultDeadline->Inc(); + Counters.ResultDeadline->Inc(); return ReplyRequest(request, reqIdx, TEvQuota::TEvClearance::EResult::Deadline); } @@ -506,7 +506,7 @@ TQuoterService::EInitLeafStatus TQuoterService::InitSystemLeaf(const TEvQuota::T if ((leaf.ResourceId & (0x3ULL << 62)) == (1ULL << 62)) { // static rated resource const ui32 rate = (leaf.ResourceId & 0x3FFFFFFF); - auto &quores = StaticRatedQuoter.GetOrCreate(leaf.QuoterId, leaf.ResourceId, TString(), TString(), Config); + auto &quores = StaticRatedQuoter.GetOrCreate(leaf.QuoterId, leaf.ResourceId, TString(), TString(), Config); if (quores.LastAllocated == TInstant::Max()) { Counters.KnownLocalResources->Inc(); @@ -517,11 +517,11 @@ TQuoterService::EInitLeafStatus TQuoterService::InitSystemLeaf(const TEvQuota::T quores.QueueTail = Max<ui32>(); quores.LastAllocated = TInstant::Zero(); - quores.AmountConsumed = 0.0; + quores.AmountConsumed = 0.0; // NOTE: do not change `History`: we dont need it for static rate - quores.FreeBalance = 0.0; - quores.TickRate = static_cast<double>(rate); + quores.FreeBalance = 0.0; + quores.TickRate = static_cast<double>(rate); quores.Balance = quores.TickRate; quores.TickSize = TDuration::Seconds(1); @@ -547,25 +547,25 @@ TQuoterService::EInitLeafStatus TQuoterService::InitResourceLeaf(const TEvQuota: TQuoterState *quoter = quoterId ? Quoters.FindPtr(quoterId) : nullptr; if (quoter == nullptr) { if (!leaf.Quoter) - return EInitLeafStatus::GenericError; + return EInitLeafStatus::GenericError; auto qIndxIt = QuotersIndex.find(leaf.Quoter); if (qIndxIt == QuotersIndex.end()) { TVector<TString> path = NKikimr::SplitPath(leaf.Quoter); - if (path.empty()) { - BLOG_WARN("Empty path to quoter is provided: \"" << leaf.Quoter << "\""); - return EInitLeafStatus::GenericError; - } + if (path.empty()) { + BLOG_WARN("Empty path to quoter is provided: \"" << leaf.Quoter << "\""); + return EInitLeafStatus::GenericError; + } - if (CanonizePath(path) != leaf.Quoter) { - BLOG_WARN("Not canonized path to quoter is provided. Provided: \"" << leaf.Quoter << "\", but canonized is \"" << CanonizePath(path) << "\""); - return EInitLeafStatus::GenericError; - } + if (CanonizePath(path) != leaf.Quoter) { + BLOG_WARN("Not canonized path to quoter is provided. Provided: \"" << leaf.Quoter << "\", but canonized is \"" << CanonizePath(path) << "\""); + return EInitLeafStatus::GenericError; + } quoterId = ++QuoterIdCounter; QuotersIndex.emplace(leaf.Quoter, quoterId); - quoter = &Quoters.emplace(quoterId, TQuoterState(leaf.Quoter, Counters.ServiceCounters)).first->second; + quoter = &Quoters.emplace(quoterId, TQuoterState(leaf.Quoter, Counters.ServiceCounters)).first->second; Counters.ActiveQuoterProxies->Inc(); THolder<NSchemeCache::TSchemeCacheNavigate> req(new NSchemeCache::TSchemeCacheNavigate()); @@ -607,7 +607,7 @@ TQuoterService::EInitLeafStatus TQuoterService::InitResourceLeaf(const TEvQuota: THolder<TResource> *resHolder = leaf.ResourceId ? quoter->Resources.FindPtr(resourceId) : nullptr; if (resHolder == nullptr) { if (!leaf.Resource) - return EInitLeafStatus::GenericError; + return EInitLeafStatus::GenericError; if (const ui64 *rsId = quoter->ResourcesIndex.FindPtr(leaf.Resource)) { resourceId = *rsId; @@ -640,16 +640,16 @@ TQuoterService::EInitLeafStatus TQuoterService::InitResourceLeaf(const TEvQuota: } } - if ((*resHolder)->NextTick == TInstant::Zero()) { - ScheduleNextTick(TActivationContext::Now(), **resHolder); - } - + if ((*resHolder)->NextTick == TInstant::Zero()) { + ScheduleNextTick(TActivationContext::Now(), **resHolder); + } + // ok, got resource const EInitLeafStatus chargeResult = TryCharge(**resHolder, quoterId, resourceId, leaf, request, reqIdx); switch (resHolder->Get()->StatUpdatePolicy) { - case EStatUpdatePolicy::EveryTick: - case EStatUpdatePolicy::EveryActiveTick: + case EStatUpdatePolicy::EveryTick: + case EStatUpdatePolicy::EveryActiveTick: case EStatUpdatePolicy::OnActivity: FillStats(**resHolder); break; @@ -661,7 +661,7 @@ TQuoterService::EInitLeafStatus TQuoterService::InitResourceLeaf(const TEvQuota: } void TQuoterService::MarkScheduleAllocation(TResource& quores, TDuration delay, TInstant now) { - TryTickSchedule(now); + TryTickSchedule(now); Y_VERIFY(quores.NextTick != TInstant::Zero() && quores.NextTick != TInstant::Max()); Y_VERIFY(delay > TDuration::Zero()); @@ -687,12 +687,12 @@ void TQuoterService::MarkScheduleAllocation(TResource& quores, TDuration delay, } TQuoterService::EInitLeafStatus TQuoterService::TryCharge(TResource& quores, ui64 quoterId, ui64 resourceId, const TEvQuota::TResourceLeaf &leaf, TRequest &request, ui32 reqIdx) { - *quores.Counters.Requested += leaf.Amount; - ++*quores.Counters.RequestsCount; - - const TInstant now = TActivationContext::Now(); - bool startedCharge = false; - LWTRACK(ResourceQueueState, request.Orbit, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId, quores.QueueSize, quores.QueueWeight); + *quores.Counters.Requested += leaf.Amount; + ++*quores.Counters.RequestsCount; + + const TInstant now = TActivationContext::Now(); + bool startedCharge = false; + LWTRACK(ResourceQueueState, request.Orbit, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId, quores.QueueSize, quores.QueueWeight); if (leaf.IsUsedAmount) { quores.ChargeUsedAmount(leaf.Amount, now); LWTRACK(Charge, request.Orbit, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); @@ -700,16 +700,16 @@ TQuoterService::EInitLeafStatus TQuoterService::TryCharge(TResource& quores, ui6 return EInitLeafStatus::Charged; } - if (quores.QueueSize == 0) { - startedCharge = true; - const TDuration delay = quores.Charge(leaf.Amount, now); - - if (delay == TDuration::Zero()) { - LWTRACK(StartCharging, request.Orbit, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); - quores.Counters.RequestTime->Collect((now - request.StartTime).MilliSeconds()); - return EInitLeafStatus::Charged; - } + if (quores.QueueSize == 0) { + startedCharge = true; + const TDuration delay = quores.Charge(leaf.Amount, now); + if (delay == TDuration::Zero()) { + LWTRACK(StartCharging, request.Orbit, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); + quores.Counters.RequestTime->Collect((now - request.StartTime).MilliSeconds()); + return EInitLeafStatus::Charged; + } + MarkScheduleAllocation(quores, delay, now); } @@ -718,11 +718,11 @@ TQuoterService::EInitLeafStatus TQuoterService::TryCharge(TResource& quores, ui6 TResourceLeaf& resLeaf = ResState.Get(resLeafIdx); resLeaf.State = EResourceState::Wait; - if (startedCharge) { - quores.MarkStartedCharging(request, resLeaf, now); - } else { - resLeaf.StartQueueing = now; - } + if (startedCharge) { + quores.MarkStartedCharging(request, resLeaf, now); + } else { + resLeaf.StartQueueing = now; + } quores.QueueSize += 1; quores.QueueWeight += leaf.Amount; @@ -736,10 +736,10 @@ TQuoterService::EInitLeafStatus TQuoterService::TryCharge(TResource& quores, ui6 quores.QueueTail = resLeafIdx; quores.QueueHead = resLeafIdx; } else { - Y_VERIFY_DEBUG(ResState.Get(quores.QueueTail).NextInWaitQueue == Max<ui32>()); + Y_VERIFY_DEBUG(ResState.Get(quores.QueueTail).NextInWaitQueue == Max<ui32>()); resLeaf.PrevInWaitQueue = quores.QueueTail; ResState.Get(quores.QueueTail).NextInWaitQueue = resLeafIdx; - quores.QueueTail = resLeafIdx; + quores.QueueTail = resLeafIdx; } resLeaf.NextResourceLeaf = request.ResourceLeaf; @@ -761,7 +761,7 @@ void TQuoterService::InitialRequestProcessing(TEvQuota::TEvRequest::TPtr &ev, co Y_VERIFY(msg->Reqs.size() >= 1); bool canAllow = true; for (const auto &leaf : msg->Reqs) { - LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); + LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); const EInitLeafStatus initLeafStatus = (leaf.QuoterId == TEvQuota::TResourceLeaf::QuoterSystem) ? InitSystemLeaf(leaf, request, reqIdx) : @@ -770,8 +770,8 @@ void TQuoterService::InitialRequestProcessing(TEvQuota::TEvRequest::TPtr &ev, co switch (initLeafStatus) { case EInitLeafStatus::Unknown: return DeclineRequest(request, reqIdx); - case EInitLeafStatus::GenericError: - return FailRequest(request, reqIdx); + case EInitLeafStatus::GenericError: + return FailRequest(request, reqIdx); case EInitLeafStatus::Forbid: return DeadlineRequest(request, reqIdx); case EInitLeafStatus::Charged: @@ -791,9 +791,9 @@ void TQuoterService::InitialRequestProcessing(TEvQuota::TEvRequest::TPtr &ev, co if (msg->Deadline != TDuration::Max()) { const TDuration delay = Min(TDuration::Days(1), msg->Deadline); - const TInstant now = TActivationContext::Now(); - TryTickSchedule(now); - request.Deadline = TimeToGranularity(now + delay); + const TInstant now = TActivationContext::Now(); + TryTickSchedule(now); + request.Deadline = TimeToGranularity(now + delay); auto deadlineIt = ScheduleDeadline.find(request.Deadline); if (deadlineIt == ScheduleDeadline.end()) { @@ -815,22 +815,22 @@ void TQuoterService::InitialRequestProcessing(TEvQuota::TEvRequest::TPtr &ev, co } void TQuoterService::Handle(TEvQuota::TEvRequest::TPtr &ev) { - BLOG_T("Request(" << PrintEvent(ev) << ")"); - + BLOG_T("Request(" << PrintEvent(ev) << ")"); + Counters.RequestsInFly->Inc(); Counters.Requests->Inc(); TEvQuota::TEvRequest *msg = ev->Get(); const ui32 reqIdx = ReqState.Allocate(ev->Sender, ev->Cookie); TRequest &request = ReqState.Get(reqIdx); - LWTRACK(StartRequest, request.Orbit, msg->Operator, msg->Deadline, ev->Cookie); + LWTRACK(StartRequest, request.Orbit, msg->Operator, msg->Deadline, ev->Cookie); if (msg->Reqs.empty()) // request nothing? most probably is error so decline return DeclineRequest(request, reqIdx); // dirty processing of simple embedded resources if (msg->Reqs.size() == 1) { - const TEvQuota::TResourceLeaf &leaf = msg->Reqs[0]; + const TEvQuota::TResourceLeaf &leaf = msg->Reqs[0]; switch (msg->Operator) { case EResourceOperator::And: // only one case supported right now @@ -838,10 +838,10 @@ void TQuoterService::Handle(TEvQuota::TEvRequest::TPtr &ev) { if (leaf.QuoterId == TEvQuota::TResourceLeaf::QuoterSystem) { switch (leaf.ResourceId) { case TEvQuota::TResourceLeaf::ResourceForbid: - LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); + LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); return DeadlineRequest(request, reqIdx); case TEvQuota::TResourceLeaf::ResourceNocheck: - LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); + LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); return AllowRequest(request, reqIdx); } } @@ -849,7 +849,7 @@ void TQuoterService::Handle(TEvQuota::TEvRequest::TPtr &ev) { break; // not supported yet modes default: - LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); + LWTRACK(RequestResource, request.Orbit, leaf.Amount, leaf.Quoter, leaf.Resource, leaf.QuoterId, leaf.ResourceId); return DeclineRequest(request, reqIdx); } } @@ -904,13 +904,13 @@ void TQuoterService::Handle(TEvQuota::TEvProxySession::TPtr &ev) { if (isError) { BLOG_I("resource sesson failed: " << quoter.QuoterName << ":" << resourceName); - for (ui32 reqIdx : waitingRequests) { - if (msg->Result == TEvQuota::TEvProxySession::UnknownResource) { - DeclineRequest(ReqState.Get(reqIdx), reqIdx); - } else { - FailRequest(ReqState.Get(reqIdx), reqIdx); - } - } + for (ui32 reqIdx : waitingRequests) { + if (msg->Result == TEvQuota::TEvProxySession::UnknownResource) { + DeclineRequest(ReqState.Get(reqIdx), reqIdx); + } else { + FailRequest(ReqState.Get(reqIdx), reqIdx); + } + } return; } @@ -920,7 +920,7 @@ void TQuoterService::Handle(TEvQuota::TEvProxySession::TPtr &ev) { BLOG_I("resource session established: " << quoter.QuoterName << ":" << resourceName << " as " << resourceId); // success, create resource - auto resPairIt = quoter.Resources.emplace(resourceId, new TResource(quoterId, resourceId, quoter.QuoterName, resourceName, Config, quoter.Counters.QuoterCounters)); + auto resPairIt = quoter.Resources.emplace(resourceId, new TResource(quoterId, resourceId, quoter.QuoterName, resourceName, Config, quoter.Counters.QuoterCounters)); Y_VERIFY(resPairIt.second, "must be no duplicating resources"); quoter.ResourcesIndex.emplace(resourceName, resourceId); @@ -929,7 +929,7 @@ void TQuoterService::Handle(TEvQuota::TEvProxySession::TPtr &ev) { TResource &quores = *resPairIt.first->second; quores.TickSize = msg->TickSize; quores.StatUpdatePolicy = msg->StatUpdatePolicy; - quores.LastAllocated = TInstant::Zero(); + quores.LastAllocated = TInstant::Zero(); // move requests to 'wait resource' state for (ui32 reqId : waitingRequests) { @@ -948,16 +948,16 @@ void TQuoterService::Handle(TEvQuota::TEvProxySession::TPtr &ev) { quores.QueueSize += 1; quores.QueueWeight += leaf.Amount; - quores.Counters.Requested->Add(leaf.Amount); + quores.Counters.Requested->Add(leaf.Amount); if (quores.QueueTail == Max<ui32>()) { quores.QueueTail = resIdx; quores.QueueHead = resIdx; } else { - Y_VERIFY_DEBUG(ResState.Get(quores.QueueTail).NextInWaitQueue == Max<ui32>()); + Y_VERIFY_DEBUG(ResState.Get(quores.QueueTail).NextInWaitQueue == Max<ui32>()); leaf.PrevInWaitQueue = quores.QueueTail; ResState.Get(quores.QueueTail).NextInWaitQueue = resIdx; - quores.QueueTail = resIdx; + quores.QueueTail = resIdx; } } // initial charge would be in first session update @@ -1007,7 +1007,7 @@ void TQuoterService::Handle(TEvQuota::TEvProxyUpdate::TPtr &ev) { if (resUpdate.ResourceState == EUpdateState::Broken || (resUpdate.ResourceState == EUpdateState::Evict && quores.QueueHead == Max<ui32>())) { - BLOG_I("closing resource on ProxyUpdate " << quoter.QuoterName << ":" << quores.Resource); + BLOG_I("closing resource on ProxyUpdate " << quoter.QuoterName << ":" << quores.Resource); Send(quoter.ProxyId, new TEvQuota::TEvProxyCloseSession(quores.Resource, quores.ResourceId)); ForbidResource(quores); @@ -1017,22 +1017,22 @@ void TQuoterService::Handle(TEvQuota::TEvProxyUpdate::TPtr &ev) { } for (auto &update : resUpdate.Update) { - if (update.Ticks == 0) { + if (update.Ticks == 0) { quores.QuotaChannels.erase(update.Channel); - } else { - Y_VERIFY(update.Rate >= 0.0); + } else { + Y_VERIFY(update.Rate >= 0.0); quores.QuotaChannels[update.Channel] = update; - } + } } - if (quores.NextTick == TInstant::Zero()) { + if (quores.NextTick == TInstant::Zero()) { FeedResource(quores); - TryTickSchedule(); - } + TryTickSchedule(); + } } if (quoter.Empty()) { - BLOG_I("closing quoter on ProxyUpdate as no activity left " << quoter.QuoterName); + BLOG_I("closing quoter on ProxyUpdate as no activity left " << quoter.QuoterName); return BreakQuoter(quoterIt); } } @@ -1072,7 +1072,7 @@ void TQuoterService::CreateKesusQuoter(NSchemeCache::TSchemeCacheNavigate::TEntr return BreakQuoter(indexIt, quoterIt); } - quoter.ProxyId = Register(CreateKesusQuoterProxy(quoterId, navigate, SelfId()), TMailboxType::HTSwap, AppData()->UserPoolId); + quoter.ProxyId = Register(CreateKesusQuoterProxy(quoterId, navigate, SelfId()), TMailboxType::HTSwap, AppData()->UserPoolId); TSet<ui32> waitingQueueResolve(std::move(quoter.WaitingQueueResolve)); for (ui32 reqIdx : waitingQueueResolve) { @@ -1169,17 +1169,17 @@ void TQuoterService::CheckRequest(ui32 reqIdx) { void TQuoterService::FillStats(TResource &quores) { auto &dq = StatsToPublish[quores.QuoterId]; - const double expectedRate = -1.0; - const double cap = -1.0; + const double expectedRate = -1.0; + const double cap = -1.0; dq.emplace_back(quores.ResourceId, 0, quores.AmountConsumed, quores.History, quores.QueueSize, quores.QueueWeight, expectedRate, cap); - quores.AmountConsumed = 0.0; + quores.AmountConsumed = 0.0; quores.History.Clear(); } void TQuoterService::FeedResource(TResource &quores) { - quores.Balance = 0.0; - quores.FreeBalance = 0.0; - quores.TickRate = 0.0; + quores.Balance = 0.0; + quores.FreeBalance = 0.0; + quores.TickRate = 0.0; for (auto it = quores.QuotaChannels.begin(), end = quores.QuotaChannels.end(); it != end;) { auto "a = it->second; @@ -1206,20 +1206,20 @@ void TQuoterService::FeedResource(TResource &quores) { } } - BLOG_T("Feed resource \"" << quores.Resource << "\". Balance: " << quores.Balance << ". FreeBalance: " << quores.FreeBalance); - LWPROBE(FeedResource, - quores.Quoter, - quores.Resource, - quores.QuoterId, - quores.ResourceId, - quores.Balance, - quores.FreeBalance); - + BLOG_T("Feed resource \"" << quores.Resource << "\". Balance: " << quores.Balance << ". FreeBalance: " << quores.FreeBalance); + LWPROBE(FeedResource, + quores.Quoter, + quores.Resource, + quores.QuoterId, + quores.ResourceId, + quores.Balance, + quores.FreeBalance); + if (quores.QueueTail == Max<ui32>()) { quores.NextTick = TInstant::Zero(); } else { // must recheck resource allocation - ScheduleNextTick(quores.NextTick ? quores.NextTick + quores.TickSize : TActivationContext::Now(), quores); + ScheduleNextTick(quores.NextTick ? quores.NextTick + quores.TickSize : TActivationContext::Now(), quores); AllocateResource(quores); } @@ -1241,13 +1241,13 @@ void TQuoterService::FeedResource(TResource &quores) { } void TQuoterService::AllocateResource(TResource &quores) { - BLOG_T("Allocate resource \"" << quores.Resource << "\""); + BLOG_T("Allocate resource \"" << quores.Resource << "\""); const TInstant now = TActivationContext::Now(); - ui64 requestsProcessed = 0; - const double prevAmountConsumed = quores.AmountConsumed; + ui64 requestsProcessed = 0; + const double prevAmountConsumed = quores.AmountConsumed; while (quores.QueueHead != Max<ui32>()) { TResourceLeaf &leaf = ResState.Get(quores.QueueHead); - TDuration delay = quores.Charge(ReqState.Get(leaf.RequestIdx), leaf, now); + TDuration delay = quores.Charge(ReqState.Get(leaf.RequestIdx), leaf, now); if (delay == TDuration::Zero()) { // resource available and charged @@ -1266,7 +1266,7 @@ void TQuoterService::AllocateResource(TResource &quores) { quores.QueueTail = Max<ui32>(); quores.QueueSize = 0; - quores.QueueWeight = 0.0; + quores.QueueWeight = 0.0; } leaf.NextInWaitQueue = Max<ui32>(); @@ -1275,28 +1275,28 @@ void TQuoterService::AllocateResource(TResource &quores) { leaf.State = EResourceState::Cleared; CheckRequest(leaf.RequestIdx); - ++requestsProcessed; + ++requestsProcessed; } else { MarkScheduleAllocation(quores, delay, now); - break; + break; } } - LWPROBE(AllocateResource, - quores.Quoter, - quores.Resource, - quores.QuoterId, - quores.ResourceId, - requestsProcessed, - quores.AmountConsumed - prevAmountConsumed, - quores.QueueSize, - quores.QueueWeight, - quores.Balance, - quores.FreeBalance); + LWPROBE(AllocateResource, + quores.Quoter, + quores.Resource, + quores.QuoterId, + quores.ResourceId, + requestsProcessed, + quores.AmountConsumed - prevAmountConsumed, + quores.QueueSize, + quores.QueueWeight, + quores.Balance, + quores.FreeBalance); } void TQuoterService::HandleTick() { - const TInstant until = TimeToGranularity(TActivationContext::Now()); - while (LastProcessed < until) { + const TInstant until = TimeToGranularity(TActivationContext::Now()); + while (LastProcessed < until) { // process resource allocation auto allocIt = ScheduleAllocation.find(LastProcessed); if (allocIt != ScheduleAllocation.end()) { @@ -1342,8 +1342,8 @@ void TQuoterService::HandleTick() { if (ScheduleAllocation || ScheduleFeed || ScheduleDeadline) { Schedule(Config.ScheduleTickSize, new TEvents::TEvWakeup()); - } else { - TickScheduled = false; + } else { + TickScheduled = false; } } @@ -1356,37 +1356,37 @@ void TQuoterService::PublishStats() { StatsToPublish.clear(); } -TString TQuoterService::PrintEvent(const TEvQuota::TEvRequest::TPtr& ev) { - const auto& req = *ev->Get(); - TStringBuilder ret; - ret << "{ Operator: " << req.Operator - << " Deadline: "; - if (req.Deadline == TDuration::Max()) { - ret << "no"; - } else if (req.Deadline == TDuration::Zero()) { - ret << "0"; - } else { - ret << req.Deadline; - } - ret << " Cookie: " << ev->Cookie; - ret << " ["; - for (size_t i = 0; i < req.Reqs.size(); ++i) { - const auto& leaf = req.Reqs[i]; - if (i > 0) { - ret << ","; - } - ret << " { " << leaf.Amount << ", "; - if (leaf.Quoter) { - ret << "\"" << leaf.Quoter << "\":\"" << leaf.Resource << "\""; - } else { - ret << leaf.QuoterId << ":" << leaf.ResourceId; - } - ret << " }"; - } - ret << " ] }"; - return std::move(ret); -} - +TString TQuoterService::PrintEvent(const TEvQuota::TEvRequest::TPtr& ev) { + const auto& req = *ev->Get(); + TStringBuilder ret; + ret << "{ Operator: " << req.Operator + << " Deadline: "; + if (req.Deadline == TDuration::Max()) { + ret << "no"; + } else if (req.Deadline == TDuration::Zero()) { + ret << "0"; + } else { + ret << req.Deadline; + } + ret << " Cookie: " << ev->Cookie; + ret << " ["; + for (size_t i = 0; i < req.Reqs.size(); ++i) { + const auto& leaf = req.Reqs[i]; + if (i > 0) { + ret << ","; + } + ret << " { " << leaf.Amount << ", "; + if (leaf.Quoter) { + ret << "\"" << leaf.Quoter << "\":\"" << leaf.Resource << "\""; + } else { + ret << leaf.QuoterId << ":" << leaf.ResourceId; + } + ret << " }"; + } + ret << " ] }"; + return std::move(ret); +} + } // namespace NQuoter IActor* CreateQuoterService(const TQuoterServiceConfig &config) { diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp b/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp index a04106743e0..db8b8c14cff 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp +++ b/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp @@ -1,40 +1,40 @@ -#include "options.h" -#include "server.h" - +#include "options.h" +#include "server.h" + #include <library/cpp/colorizer/colors.h> - -using namespace NKikimr; - -void PrintResults(const TOptions& opts, const TRequestStats& stats) { - opts.PrintOpts(); - - auto PrintValue = [](const char* name, const auto& value) { - Cout << NColorizer::StdOut().Blue() - << name << NColorizer::StdOut().Default() << ": " - << NColorizer::StdOut().Green() << value << NColorizer::StdOut().Default() << Endl; - }; - - const size_t reqs = stats.RequestsCount; - const size_t resps = stats.ResponsesCount; - PrintValue("Requests sent", reqs); - PrintValue("Responses received", resps); - PrintValue("OK responses", stats.OkResponses.load()); - PrintValue("Deadline responses", stats.DeadlineResponses.load()); - const double seconds = static_cast<double>(opts.TestTime.MicroSeconds()) / 1000000.0; - const size_t reqsPerSecond = static_cast<size_t>(static_cast<double>(resps) / seconds); - const size_t respsPerSecond = static_cast<size_t>(static_cast<double>(resps) / seconds); - PrintValue("Requests per second", reqsPerSecond); - PrintValue("Responses per second", respsPerSecond); - - const size_t lostRequests = reqs - resps; - PrintValue("Requests lost (no response)", lostRequests); - PrintValue("Percent of lost requests", static_cast<double>(lostRequests) / static_cast<double>(reqs) * 100.0); -} - -int main(int argc, const char* argv[]) { - TOptions opts(argc, argv); - TTestServer server(opts); - TRequestStats stats; - server.RunQuotaRequesters(stats); - PrintResults(opts, stats); -} + +using namespace NKikimr; + +void PrintResults(const TOptions& opts, const TRequestStats& stats) { + opts.PrintOpts(); + + auto PrintValue = [](const char* name, const auto& value) { + Cout << NColorizer::StdOut().Blue() + << name << NColorizer::StdOut().Default() << ": " + << NColorizer::StdOut().Green() << value << NColorizer::StdOut().Default() << Endl; + }; + + const size_t reqs = stats.RequestsCount; + const size_t resps = stats.ResponsesCount; + PrintValue("Requests sent", reqs); + PrintValue("Responses received", resps); + PrintValue("OK responses", stats.OkResponses.load()); + PrintValue("Deadline responses", stats.DeadlineResponses.load()); + const double seconds = static_cast<double>(opts.TestTime.MicroSeconds()) / 1000000.0; + const size_t reqsPerSecond = static_cast<size_t>(static_cast<double>(resps) / seconds); + const size_t respsPerSecond = static_cast<size_t>(static_cast<double>(resps) / seconds); + PrintValue("Requests per second", reqsPerSecond); + PrintValue("Responses per second", respsPerSecond); + + const size_t lostRequests = reqs - resps; + PrintValue("Requests lost (no response)", lostRequests); + PrintValue("Percent of lost requests", static_cast<double>(lostRequests) / static_cast<double>(reqs) * 100.0); +} + +int main(int argc, const char* argv[]) { + TOptions opts(argc, argv); + TTestServer server(opts); + TRequestStats stats; + server.RunQuotaRequesters(stats); + PrintResults(opts, stats); +} diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/options.h b/ydb/core/quoter/quoter_service_bandwidth_test/options.h index 6dd98ebaea1..1186853512c 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/options.h +++ b/ydb/core/quoter/quoter_service_bandwidth_test/options.h @@ -1,98 +1,98 @@ -#pragma once +#pragma once #include <library/cpp/getopt/opt.h> #include <library/cpp/colorizer/colors.h> - -#include <util/datetime/base.h> - -#include <random> - -namespace NKikimr { - -struct TOptions { - size_t KesusCount = 0; - size_t ResourcePerKesus = 0; - size_t RootResourceSpeedLimit = 0; - - size_t LocalResourceCount = 0; - ui32 LocalResourceQuotaRate = 0; - - TDuration TestTime; - size_t RequestRate = 0; - TDuration QuotaRequestDeadline; - - mutable std::seed_seq SeedSeq; - - TOptions(int argc, const char *argv[]) - : SeedSeq({std::random_device()(), std::random_device()(), std::random_device()()}) - { - ParseOptions(argc, argv); - } - - void ParseOptions(int argc, const char *argv[]) { - NLastGetopt::TOpts opts; - opts.SetTitle("Quoter service bandwidth test program"); - opts.SetFreeArgsNum(0); - opts.AddHelpOption('h'); - opts.AddVersionOption(); - - opts.AddLongOption('k', "kesus-count", "kesus count in test") - .DefaultValue(100) - .RequiredArgument("COUNT") - .StoreResult(&KesusCount); - opts.AddLongOption('p', "resource-per-kesus", "resource count per each kesus") - .DefaultValue(100) - .RequiredArgument("COUNT") - .StoreResult(&ResourcePerKesus); - opts.AddLongOption('s', "root-resource-speed-limit", "kesus root resource speed limit") - .DefaultValue(100000) - .RequiredArgument("RATE") - .StoreResult(&RootResourceSpeedLimit); - opts.AddLongOption('l', "local-resource-count", "local resource count") - .DefaultValue(100) - .RequiredArgument("COUNT") - .StoreResult(&LocalResourceCount); - opts.AddLongOption('q', "local-resource-speed-limit", "local resource speed limit") - .DefaultValue(100000) - .RequiredArgument("RATE") - .StoreResult(&LocalResourceQuotaRate); - opts.AddLongOption('t', "test-time", "test time") - .DefaultValue(TDuration::Seconds(10)) - .RequiredArgument("DURATION") - .StoreResult(&TestTime); - opts.AddLongOption('r', "request-rate", "request rate") - .DefaultValue(100) - .RequiredArgument("RATE") - .StoreResult(&RequestRate); - opts.AddLongOption('d', "quota-request-deadline", "quota request deadline") - .DefaultValue(TDuration::MilliSeconds(150)) - .RequiredArgument("DURATION") - .StoreResult(&QuotaRequestDeadline); - - NLastGetopt::TOptsParseResult res(&opts, argc, argv); - - CheckOptions(); - - //PrintOpts(); - } - - void CheckOptions() { - Y_VERIFY(RequestRate > 0, "Zero request rate"); - } - - void PrintOpts() const { - auto PrintOption = [](const char* name, const auto& value) { - Cerr << NColorizer::StdErr().Red() << name << NColorizer::StdErr().Default() << ": " - << NColorizer::StdErr().Green() << value << NColorizer::StdErr().Default() << Endl; - }; - PrintOption("Kesus count", KesusCount); - PrintOption("Resources per kesus", ResourcePerKesus); - PrintOption("Root kesus resource speed limit", RootResourceSpeedLimit); - PrintOption("Local resource count", LocalResourceCount); - PrintOption("Local resource quota rate", LocalResourceQuotaRate); - PrintOption("Quota request rate per resource", RequestRate); - PrintOption("Quota request deadline", QuotaRequestDeadline); - PrintOption("Test time", TestTime); - } -}; - -} // namespace NKikimr + +#include <util/datetime/base.h> + +#include <random> + +namespace NKikimr { + +struct TOptions { + size_t KesusCount = 0; + size_t ResourcePerKesus = 0; + size_t RootResourceSpeedLimit = 0; + + size_t LocalResourceCount = 0; + ui32 LocalResourceQuotaRate = 0; + + TDuration TestTime; + size_t RequestRate = 0; + TDuration QuotaRequestDeadline; + + mutable std::seed_seq SeedSeq; + + TOptions(int argc, const char *argv[]) + : SeedSeq({std::random_device()(), std::random_device()(), std::random_device()()}) + { + ParseOptions(argc, argv); + } + + void ParseOptions(int argc, const char *argv[]) { + NLastGetopt::TOpts opts; + opts.SetTitle("Quoter service bandwidth test program"); + opts.SetFreeArgsNum(0); + opts.AddHelpOption('h'); + opts.AddVersionOption(); + + opts.AddLongOption('k', "kesus-count", "kesus count in test") + .DefaultValue(100) + .RequiredArgument("COUNT") + .StoreResult(&KesusCount); + opts.AddLongOption('p', "resource-per-kesus", "resource count per each kesus") + .DefaultValue(100) + .RequiredArgument("COUNT") + .StoreResult(&ResourcePerKesus); + opts.AddLongOption('s', "root-resource-speed-limit", "kesus root resource speed limit") + .DefaultValue(100000) + .RequiredArgument("RATE") + .StoreResult(&RootResourceSpeedLimit); + opts.AddLongOption('l', "local-resource-count", "local resource count") + .DefaultValue(100) + .RequiredArgument("COUNT") + .StoreResult(&LocalResourceCount); + opts.AddLongOption('q', "local-resource-speed-limit", "local resource speed limit") + .DefaultValue(100000) + .RequiredArgument("RATE") + .StoreResult(&LocalResourceQuotaRate); + opts.AddLongOption('t', "test-time", "test time") + .DefaultValue(TDuration::Seconds(10)) + .RequiredArgument("DURATION") + .StoreResult(&TestTime); + opts.AddLongOption('r', "request-rate", "request rate") + .DefaultValue(100) + .RequiredArgument("RATE") + .StoreResult(&RequestRate); + opts.AddLongOption('d', "quota-request-deadline", "quota request deadline") + .DefaultValue(TDuration::MilliSeconds(150)) + .RequiredArgument("DURATION") + .StoreResult(&QuotaRequestDeadline); + + NLastGetopt::TOptsParseResult res(&opts, argc, argv); + + CheckOptions(); + + //PrintOpts(); + } + + void CheckOptions() { + Y_VERIFY(RequestRate > 0, "Zero request rate"); + } + + void PrintOpts() const { + auto PrintOption = [](const char* name, const auto& value) { + Cerr << NColorizer::StdErr().Red() << name << NColorizer::StdErr().Default() << ": " + << NColorizer::StdErr().Green() << value << NColorizer::StdErr().Default() << Endl; + }; + PrintOption("Kesus count", KesusCount); + PrintOption("Resources per kesus", ResourcePerKesus); + PrintOption("Root kesus resource speed limit", RootResourceSpeedLimit); + PrintOption("Local resource count", LocalResourceCount); + PrintOption("Local resource quota rate", LocalResourceQuotaRate); + PrintOption("Quota request rate per resource", RequestRate); + PrintOption("Quota request deadline", QuotaRequestDeadline); + PrintOption("Test time", TestTime); + } +}; + +} // namespace NKikimr diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp b/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp index 1a48d8211a2..1199e0e2a21 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp +++ b/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp @@ -1,118 +1,118 @@ -#include "quota_requester.h" +#include "quota_requester.h" #include <ydb/core/base/quoter.h> - -#include <cmath> - -namespace NKikimr { - -TRequestDistribution::TRequestDistribution(std::seed_seq& seedSeq, size_t requestRate) - : RandomEngine(seedSeq) - , Distrib(0.0, 1.0) - , LambdaCoefficient(-1000000.0 / static_cast<double>(requestRate)) -{ -} - -TDuration TRequestDistribution::GetNextRequestTimeDelta() { - double rand = Distrib(RandomEngine); - while (!rand) { // 0.0 is not OK - rand = Distrib(RandomEngine); - } - return TDuration::MicroSeconds(static_cast<ui64>(LambdaCoefficient * std::log(rand))); -} - + +#include <cmath> + +namespace NKikimr { + +TRequestDistribution::TRequestDistribution(std::seed_seq& seedSeq, size_t requestRate) + : RandomEngine(seedSeq) + , Distrib(0.0, 1.0) + , LambdaCoefficient(-1000000.0 / static_cast<double>(requestRate)) +{ +} + +TDuration TRequestDistribution::GetNextRequestTimeDelta() { + double rand = Distrib(RandomEngine); + while (!rand) { // 0.0 is not OK + rand = Distrib(RandomEngine); + } + return TDuration::MicroSeconds(static_cast<ui64>(LambdaCoefficient * std::log(rand))); +} + TBaseQuotaRequester::TBaseQuotaRequester(const TOptions& opts, TRequestStats& stats, TActorId parent) - : Opts(opts) - , Stats(stats) - , Parent(parent) - , Distribution(Opts.SeedSeq, Opts.RequestRate) -{ -} - -void TBaseQuotaRequester::Bootstrap(const NActors::TActorContext&) { - Become(&TBaseQuotaRequester::StateFunc); - - QuotaRequestTime = StartTime = TActivationContext::Now(); - SleepUntilNextRequest(QuotaRequestDelta = Distribution.GetNextRequestTimeDelta()); -} - -void TBaseQuotaRequester::PassAway() { - Send(Parent, new TEvents::TEvWakeup()); - TActorBootstrapped::PassAway(); -} - -void TBaseQuotaRequester::Handle(TEvQuota::TEvClearance::TPtr& ev) { - ++Stats.ResponsesCount; - switch (ev->Get()->Result) { - case TEvQuota::TEvClearance::EResult::Deadline: - ++Stats.DeadlineResponses; - break; - case TEvQuota::TEvClearance::EResult::Success: - ++Stats.OkResponses; - break; - default: - Y_FAIL("Error result"); - } -} - -void TBaseQuotaRequester::Handle(TEvents::TEvWakeup::TPtr&) { - if (TActivationContext::Now() - StartTime < Opts.TestTime) { - RequestQuota(); - while (true) { - QuotaRequestDelta = Distribution.GetNextRequestTimeDelta(); - const TInstant now = TActivationContext::Now(); - const TDuration passed = now - QuotaRequestTime; - if (passed >= QuotaRequestDelta) { - RequestQuota(); - } else { - SleepUntilNextRequest(QuotaRequestDelta - passed); - break; - } - } - } else { - if (WaitingForDeadlinedRequests || !Opts.QuotaRequestDeadline) { - PassAway(); - } else { - WaitingForDeadlinedRequests = true; - Schedule(Opts.QuotaRequestDeadline, new TEvents::TEvWakeup()); - } - } -} - -void TBaseQuotaRequester::RequestQuota() { - Send(MakeQuoterServiceID(), MakeQuoterRequest()); - ++Stats.RequestsCount; - QuotaRequestTime += QuotaRequestDelta; -} - -void TBaseQuotaRequester::SleepUntilNextRequest(TDuration duration) { - Schedule(duration, new TEvents::TEvWakeup()); -} - + : Opts(opts) + , Stats(stats) + , Parent(parent) + , Distribution(Opts.SeedSeq, Opts.RequestRate) +{ +} + +void TBaseQuotaRequester::Bootstrap(const NActors::TActorContext&) { + Become(&TBaseQuotaRequester::StateFunc); + + QuotaRequestTime = StartTime = TActivationContext::Now(); + SleepUntilNextRequest(QuotaRequestDelta = Distribution.GetNextRequestTimeDelta()); +} + +void TBaseQuotaRequester::PassAway() { + Send(Parent, new TEvents::TEvWakeup()); + TActorBootstrapped::PassAway(); +} + +void TBaseQuotaRequester::Handle(TEvQuota::TEvClearance::TPtr& ev) { + ++Stats.ResponsesCount; + switch (ev->Get()->Result) { + case TEvQuota::TEvClearance::EResult::Deadline: + ++Stats.DeadlineResponses; + break; + case TEvQuota::TEvClearance::EResult::Success: + ++Stats.OkResponses; + break; + default: + Y_FAIL("Error result"); + } +} + +void TBaseQuotaRequester::Handle(TEvents::TEvWakeup::TPtr&) { + if (TActivationContext::Now() - StartTime < Opts.TestTime) { + RequestQuota(); + while (true) { + QuotaRequestDelta = Distribution.GetNextRequestTimeDelta(); + const TInstant now = TActivationContext::Now(); + const TDuration passed = now - QuotaRequestTime; + if (passed >= QuotaRequestDelta) { + RequestQuota(); + } else { + SleepUntilNextRequest(QuotaRequestDelta - passed); + break; + } + } + } else { + if (WaitingForDeadlinedRequests || !Opts.QuotaRequestDeadline) { + PassAway(); + } else { + WaitingForDeadlinedRequests = true; + Schedule(Opts.QuotaRequestDeadline, new TEvents::TEvWakeup()); + } + } +} + +void TBaseQuotaRequester::RequestQuota() { + Send(MakeQuoterServiceID(), MakeQuoterRequest()); + ++Stats.RequestsCount; + QuotaRequestTime += QuotaRequestDelta; +} + +void TBaseQuotaRequester::SleepUntilNextRequest(TDuration duration) { + Schedule(duration, new TEvents::TEvWakeup()); +} + TKesusQuotaRequester::TKesusQuotaRequester(const NKikimr::TOptions& opts, NKikimr::TRequestStats& stats, TActorId parent, size_t kesusIndex, size_t resourceIndex) - : TBaseQuotaRequester(opts, stats, parent) - , KesusPath(TTestServer::GetKesusPath(kesusIndex)) - , ResourcePath(TTestServer::GetKesusResource(resourceIndex)) -{ -} - -THolder<TEvQuota::TEvRequest> TKesusQuotaRequester::MakeQuoterRequest() { - TVector<TEvQuota::TResourceLeaf> reqs = { - TEvQuota::TResourceLeaf(KesusPath, ResourcePath, 1.0) - }; - return MakeHolder<TEvQuota::TEvRequest>(TEvQuota::EResourceOperator::And, std::move(reqs), Opts.QuotaRequestDeadline); -} - + : TBaseQuotaRequester(opts, stats, parent) + , KesusPath(TTestServer::GetKesusPath(kesusIndex)) + , ResourcePath(TTestServer::GetKesusResource(resourceIndex)) +{ +} + +THolder<TEvQuota::TEvRequest> TKesusQuotaRequester::MakeQuoterRequest() { + TVector<TEvQuota::TResourceLeaf> reqs = { + TEvQuota::TResourceLeaf(KesusPath, ResourcePath, 1.0) + }; + return MakeHolder<TEvQuota::TEvRequest>(TEvQuota::EResourceOperator::And, std::move(reqs), Opts.QuotaRequestDeadline); +} + TLocalResourceQuotaRequester::TLocalResourceQuotaRequester(const NKikimr::TOptions& opts, NKikimr::TRequestStats& stats, TActorId parent, size_t resourceIndex) - : TBaseQuotaRequester(opts, stats, parent) - , ResourceId(TEvQuota::TResourceLeaf::MakeTaggedRateRes(resourceIndex, Opts.LocalResourceQuotaRate)) -{ -} - -THolder<TEvQuota::TEvRequest> TLocalResourceQuotaRequester::MakeQuoterRequest() { - TVector<TEvQuota::TResourceLeaf> reqs = { - TEvQuota::TResourceLeaf(TEvQuota::TResourceLeaf::QuoterSystem, ResourceId, 1.0) - }; - return MakeHolder<TEvQuota::TEvRequest>(TEvQuota::EResourceOperator::And, std::move(reqs), Opts.QuotaRequestDeadline); -} - -} // namespace NKikimr + : TBaseQuotaRequester(opts, stats, parent) + , ResourceId(TEvQuota::TResourceLeaf::MakeTaggedRateRes(resourceIndex, Opts.LocalResourceQuotaRate)) +{ +} + +THolder<TEvQuota::TEvRequest> TLocalResourceQuotaRequester::MakeQuoterRequest() { + TVector<TEvQuota::TResourceLeaf> reqs = { + TEvQuota::TResourceLeaf(TEvQuota::TResourceLeaf::QuoterSystem, ResourceId, 1.0) + }; + return MakeHolder<TEvQuota::TEvRequest>(TEvQuota::EResourceOperator::And, std::move(reqs), Opts.QuotaRequestDeadline); +} + +} // namespace NKikimr diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.h b/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.h index 6f21fa652ad..6a0341dd827 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.h +++ b/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.h @@ -1,82 +1,82 @@ -#pragma once -#include "options.h" -#include "server.h" - +#pragma once +#include "options.h" +#include "server.h" + #include <ydb/core/base/quoter.h> - + #include <library/cpp/actors/core/actor_bootstrapped.h> - -#include <random> - -namespace NKikimr { - -class TRequestDistribution { -public: - TRequestDistribution(std::seed_seq& seedSeq, size_t requestRate); - - TDuration GetNextRequestTimeDelta(); - -private: - std::mt19937_64 RandomEngine; - std::uniform_real_distribution<> Distrib; - double LambdaCoefficient; -}; - -class TBaseQuotaRequester : public NActors::TActorBootstrapped<TBaseQuotaRequester> { -public: + +#include <random> + +namespace NKikimr { + +class TRequestDistribution { +public: + TRequestDistribution(std::seed_seq& seedSeq, size_t requestRate); + + TDuration GetNextRequestTimeDelta(); + +private: + std::mt19937_64 RandomEngine; + std::uniform_real_distribution<> Distrib; + double LambdaCoefficient; +}; + +class TBaseQuotaRequester : public NActors::TActorBootstrapped<TBaseQuotaRequester> { +public: TBaseQuotaRequester(const TOptions& opts, TRequestStats& stats, TActorId parent); - - void Bootstrap(const NActors::TActorContext& ctx); - - STFUNC(StateFunc) { - Y_UNUSED(ctx); - switch (ev->GetTypeRewrite()) { - hFunc(TEvQuota::TEvClearance, Handle); - hFunc(TEvents::TEvWakeup, Handle); - } - } - -protected: - virtual THolder<TEvQuota::TEvRequest> MakeQuoterRequest() = 0; - void RequestQuota(); - void SleepUntilNextRequest(TDuration duration); - - void Handle(TEvQuota::TEvClearance::TPtr&); - void Handle(TEvents::TEvWakeup::TPtr&); - - void PassAway() override; - -protected: - const TOptions& Opts; - TRequestStats& Stats; + + void Bootstrap(const NActors::TActorContext& ctx); + + STFUNC(StateFunc) { + Y_UNUSED(ctx); + switch (ev->GetTypeRewrite()) { + hFunc(TEvQuota::TEvClearance, Handle); + hFunc(TEvents::TEvWakeup, Handle); + } + } + +protected: + virtual THolder<TEvQuota::TEvRequest> MakeQuoterRequest() = 0; + void RequestQuota(); + void SleepUntilNextRequest(TDuration duration); + + void Handle(TEvQuota::TEvClearance::TPtr&); + void Handle(TEvents::TEvWakeup::TPtr&); + + void PassAway() override; + +protected: + const TOptions& Opts; + TRequestStats& Stats; const TActorId Parent; - - TInstant StartTime; - TRequestDistribution Distribution; - TInstant QuotaRequestTime; - TDuration QuotaRequestDelta; - bool WaitingForDeadlinedRequests = false; -}; - -class TKesusQuotaRequester : public TBaseQuotaRequester { -public: + + TInstant StartTime; + TRequestDistribution Distribution; + TInstant QuotaRequestTime; + TDuration QuotaRequestDelta; + bool WaitingForDeadlinedRequests = false; +}; + +class TKesusQuotaRequester : public TBaseQuotaRequester { +public: TKesusQuotaRequester(const TOptions& opts, TRequestStats& stats, TActorId parent, size_t kesusIndex, size_t resourceIndex); - - THolder<TEvQuota::TEvRequest> MakeQuoterRequest() override; - -private: - TString KesusPath; - TString ResourcePath; -}; - -class TLocalResourceQuotaRequester : public TBaseQuotaRequester { -public: + + THolder<TEvQuota::TEvRequest> MakeQuoterRequest() override; + +private: + TString KesusPath; + TString ResourcePath; +}; + +class TLocalResourceQuotaRequester : public TBaseQuotaRequester { +public: TLocalResourceQuotaRequester(const TOptions& opts, TRequestStats& stats, TActorId parent, size_t resourceIndex); - - THolder<TEvQuota::TEvRequest> MakeQuoterRequest() override; - -private: - ui64 ResourceId; -}; - -} // namespace NKikimr + + THolder<TEvQuota::TEvRequest> MakeQuoterRequest() override; + +private: + ui64 ResourceId; +}; + +} // namespace NKikimr diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp b/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp index 4b301ea3edf..c5544e91055 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp +++ b/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp @@ -1,140 +1,140 @@ -#include "server.h" -#include "quota_requester.h" +#include "server.h" +#include "quota_requester.h" #include <ydb/core/quoter/quoter_service.h> #include <ydb/core/quoter/quoter_service_impl.h> - + #include <ydb/core/kesus/tablet/events.h> - -namespace NKikimr { - -TTestServer::TTestServer(const TOptions &opts) - : Opts(opts) - , MsgBusPort(PortManager.GetPort()) - , ServerSettings(MakeIntrusive<Tests::TServerSettings>(MsgBusPort)) -{ - SetupSettings(); - RunServer(); -} - -std::pair<TString, TString> TTestServer::GetKesusPathAndName(size_t i) { - return {Tests::TestDomainName, TStringBuilder() << "Kesus_" << i}; -} - -TString TTestServer::GetKesusPath(size_t i) { - return TStringBuilder() << "/" << Tests::TestDomainName << "/Kesus_" << i; -} - -TString TTestServer::GetKesusRootResource() { - return "Root"; -} - -TString TTestServer::GetKesusResource(size_t i) { - return TStringBuilder() << GetKesusRootResource() << "/Resource_" << i; -} - -void TTestServer::SetupSettings() { - (*ServerSettings) - .SetUseRealThreads(true); -} - -void TTestServer::RunServer() { - Server = MakeIntrusive<Tests::TServer>(ServerSettings, true); - Client = MakeHolder<Tests::TClient>(*ServerSettings); - - Server->GetRuntime()->SetDispatchTimeout(TDuration::Minutes(10)); - - Client->InitRootScheme(); - - RegisterQuoterService(); - CreateKesusesAndResources(); -} - -void TTestServer::RunQuotaRequesters(TRequestStats& stats) { - TTestActorRuntime* const runtime = Server->GetRuntime(); - const ui32 userPoolId = runtime->GetAppData().UserPoolId; - //Cerr << "User pool: " << userPoolId << Endl; - const ui32 nodeIndex = 0; - size_t requesters = 0; - for (size_t kesusIndex = 0; kesusIndex < Opts.KesusCount; ++kesusIndex) { - for (size_t resIndex = 0; resIndex < Opts.ResourcePerKesus; ++resIndex) { - runtime->Register(new TKesusQuotaRequester(Opts, stats, GetEdgeActor(), kesusIndex, resIndex), nodeIndex, userPoolId); - ++requesters; - } - } - for (size_t localResIndex = 0; localResIndex < Opts.LocalResourceCount; ++localResIndex) { - runtime->Register(new TLocalResourceQuotaRequester(Opts, stats, GetEdgeActor(), localResIndex), nodeIndex, userPoolId); - ++requesters; - } - - while (requesters) { - runtime->GrabEdgeEvent<TEvents::TEvWakeup>(GetEdgeActor())->Release(); - --requesters; - } -} - -void TTestServer::RegisterQuoterService() { - TTestActorRuntime* const runtime = Server->GetRuntime(); - const ui32 systemPoolId = runtime->GetAppData().SystemPoolId; - //Cerr << "System pool: " << systemPoolId << Endl; - const ui32 nodeIndex = 0; + +namespace NKikimr { + +TTestServer::TTestServer(const TOptions &opts) + : Opts(opts) + , MsgBusPort(PortManager.GetPort()) + , ServerSettings(MakeIntrusive<Tests::TServerSettings>(MsgBusPort)) +{ + SetupSettings(); + RunServer(); +} + +std::pair<TString, TString> TTestServer::GetKesusPathAndName(size_t i) { + return {Tests::TestDomainName, TStringBuilder() << "Kesus_" << i}; +} + +TString TTestServer::GetKesusPath(size_t i) { + return TStringBuilder() << "/" << Tests::TestDomainName << "/Kesus_" << i; +} + +TString TTestServer::GetKesusRootResource() { + return "Root"; +} + +TString TTestServer::GetKesusResource(size_t i) { + return TStringBuilder() << GetKesusRootResource() << "/Resource_" << i; +} + +void TTestServer::SetupSettings() { + (*ServerSettings) + .SetUseRealThreads(true); +} + +void TTestServer::RunServer() { + Server = MakeIntrusive<Tests::TServer>(ServerSettings, true); + Client = MakeHolder<Tests::TClient>(*ServerSettings); + + Server->GetRuntime()->SetDispatchTimeout(TDuration::Minutes(10)); + + Client->InitRootScheme(); + + RegisterQuoterService(); + CreateKesusesAndResources(); +} + +void TTestServer::RunQuotaRequesters(TRequestStats& stats) { + TTestActorRuntime* const runtime = Server->GetRuntime(); + const ui32 userPoolId = runtime->GetAppData().UserPoolId; + //Cerr << "User pool: " << userPoolId << Endl; + const ui32 nodeIndex = 0; + size_t requesters = 0; + for (size_t kesusIndex = 0; kesusIndex < Opts.KesusCount; ++kesusIndex) { + for (size_t resIndex = 0; resIndex < Opts.ResourcePerKesus; ++resIndex) { + runtime->Register(new TKesusQuotaRequester(Opts, stats, GetEdgeActor(), kesusIndex, resIndex), nodeIndex, userPoolId); + ++requesters; + } + } + for (size_t localResIndex = 0; localResIndex < Opts.LocalResourceCount; ++localResIndex) { + runtime->Register(new TLocalResourceQuotaRequester(Opts, stats, GetEdgeActor(), localResIndex), nodeIndex, userPoolId); + ++requesters; + } + + while (requesters) { + runtime->GrabEdgeEvent<TEvents::TEvWakeup>(GetEdgeActor())->Release(); + --requesters; + } +} + +void TTestServer::RegisterQuoterService() { + TTestActorRuntime* const runtime = Server->GetRuntime(); + const ui32 systemPoolId = runtime->GetAppData().SystemPoolId; + //Cerr << "System pool: " << systemPoolId << Endl; + const ui32 nodeIndex = 0; const TActorId quoterServiceActorId = runtime->Register(CreateQuoterService(), nodeIndex, systemPoolId); - runtime->RegisterService(MakeQuoterServiceID(), quoterServiceActorId); -} - -void TTestServer::CreateKesusesAndResources() { - for (size_t i = 0; i < Opts.KesusCount; ++i) { - auto [parent, name] = GetKesusPathAndName(i); - const NMsgBusProxy::EResponseStatus status = Client->CreateKesus(parent, name); - Y_VERIFY(status == NMsgBusProxy::MSTATUS_OK); - - // Create resources - const ui64 tabletId = GetKesusTabletId(GetKesusPath(i)); - CreateKesusResource(tabletId, GetKesusRootResource(), static_cast<double>(Opts.RootResourceSpeedLimit)); - for (size_t r = 0; r < Opts.ResourcePerKesus; ++r) { - CreateKesusResource(tabletId, GetKesusResource(r)); - } - } -} - -ui64 TTestServer::GetKesusTabletId(const TString& path) { - TAutoPtr<NMsgBusProxy::TBusResponse> resp = Client->Ls(path); - Y_VERIFY(resp->Record.GetStatusCode() == NKikimrIssues::TStatusIds::SUCCESS); - const auto& pathDesc = resp->Record.GetPathDescription(); - Y_VERIFY(pathDesc.HasKesus()); - const ui64 tabletId = pathDesc.GetKesus().GetKesusTabletId(); - Y_VERIFY(tabletId); - return tabletId; -} - -void TTestServer::CreateKesusResource(ui64 kesusTabletId, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond) { - TTestActorRuntime* const runtime = Server->GetRuntime(); - - TAutoPtr<NKesus::TEvKesus::TEvAddQuoterResource> request(new NKesus::TEvKesus::TEvAddQuoterResource()); - request->Record.MutableResource()->SetResourcePath(resourcePath); - auto* hdrrConfig = request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig(); // Create HDRR config - if (maxUnitsPerSecond) { - hdrrConfig->SetMaxUnitsPerSecond(*maxUnitsPerSecond); - } - + runtime->RegisterService(MakeQuoterServiceID(), quoterServiceActorId); +} + +void TTestServer::CreateKesusesAndResources() { + for (size_t i = 0; i < Opts.KesusCount; ++i) { + auto [parent, name] = GetKesusPathAndName(i); + const NMsgBusProxy::EResponseStatus status = Client->CreateKesus(parent, name); + Y_VERIFY(status == NMsgBusProxy::MSTATUS_OK); + + // Create resources + const ui64 tabletId = GetKesusTabletId(GetKesusPath(i)); + CreateKesusResource(tabletId, GetKesusRootResource(), static_cast<double>(Opts.RootResourceSpeedLimit)); + for (size_t r = 0; r < Opts.ResourcePerKesus; ++r) { + CreateKesusResource(tabletId, GetKesusResource(r)); + } + } +} + +ui64 TTestServer::GetKesusTabletId(const TString& path) { + TAutoPtr<NMsgBusProxy::TBusResponse> resp = Client->Ls(path); + Y_VERIFY(resp->Record.GetStatusCode() == NKikimrIssues::TStatusIds::SUCCESS); + const auto& pathDesc = resp->Record.GetPathDescription(); + Y_VERIFY(pathDesc.HasKesus()); + const ui64 tabletId = pathDesc.GetKesus().GetKesusTabletId(); + Y_VERIFY(tabletId); + return tabletId; +} + +void TTestServer::CreateKesusResource(ui64 kesusTabletId, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond) { + TTestActorRuntime* const runtime = Server->GetRuntime(); + + TAutoPtr<NKesus::TEvKesus::TEvAddQuoterResource> request(new NKesus::TEvKesus::TEvAddQuoterResource()); + request->Record.MutableResource()->SetResourcePath(resourcePath); + auto* hdrrConfig = request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig(); // Create HDRR config + if (maxUnitsPerSecond) { + hdrrConfig->SetMaxUnitsPerSecond(*maxUnitsPerSecond); + } + TActorId sender = GetEdgeActor(); ForwardToTablet(*runtime, kesusTabletId, sender, request.Release(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); - const NKikimrKesus::TEvAddQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; - Y_VERIFY(record.GetError().GetStatus() == Ydb::StatusIds::SUCCESS); -} - -void TTestServer::CreateKesusResource(const TString& kesusPath, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond) { - CreateKesusResource(GetKesusTabletId(kesusPath), resourcePath, maxUnitsPerSecond); -} - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); + const NKikimrKesus::TEvAddQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; + Y_VERIFY(record.GetError().GetStatus() == Ydb::StatusIds::SUCCESS); +} + +void TTestServer::CreateKesusResource(const TString& kesusPath, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond) { + CreateKesusResource(GetKesusTabletId(kesusPath), resourcePath, maxUnitsPerSecond); +} + TActorId TTestServer::GetEdgeActor() { - if (!EdgeActor) { - EdgeActor = Server->GetRuntime()->AllocateEdgeActor(0); - } - return EdgeActor; -} - - -} // namespace NKikimr + if (!EdgeActor) { + EdgeActor = Server->GetRuntime()->AllocateEdgeActor(0); + } + return EdgeActor; +} + + +} // namespace NKikimr diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/server.h b/ydb/core/quoter/quoter_service_bandwidth_test/server.h index 59522eb5bfa..46724a71aa5 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/server.h +++ b/ydb/core/quoter/quoter_service_bandwidth_test/server.h @@ -1,55 +1,55 @@ -#pragma once -#include "options.h" - +#pragma once +#include "options.h" + #include <ydb/core/testlib/actors/test_runtime.h> #include <ydb/core/testlib/test_client.h> - -#include <util/generic/maybe.h> - -#include <atomic> - -namespace NKikimr { - -struct TRequestStats { - std::atomic<size_t> RequestsCount = 0; - std::atomic<size_t> ResponsesCount = 0; - std::atomic<size_t> OkResponses = 0; - std::atomic<size_t> DeadlineResponses = 0; -}; - -class TTestServer { -public: - TTestServer(const TOptions& opts); - - void RunQuotaRequesters(TRequestStats& stats); - - static std::pair<TString, TString> GetKesusPathAndName(size_t i); - static TString GetKesusPath(size_t i); - static TString GetKesusResource(size_t i); - static TString GetKesusRootResource(); - -private: - void RunServer(); - - void RegisterQuoterService(); - void CreateKesusesAndResources(); - ui64 GetKesusTabletId(const TString& path); - void CreateKesusResource(const TString& kesusPath, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond = Nothing()); - void CreateKesusResource(ui64 kesusTabletId, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond = Nothing()); - + +#include <util/generic/maybe.h> + +#include <atomic> + +namespace NKikimr { + +struct TRequestStats { + std::atomic<size_t> RequestsCount = 0; + std::atomic<size_t> ResponsesCount = 0; + std::atomic<size_t> OkResponses = 0; + std::atomic<size_t> DeadlineResponses = 0; +}; + +class TTestServer { +public: + TTestServer(const TOptions& opts); + + void RunQuotaRequesters(TRequestStats& stats); + + static std::pair<TString, TString> GetKesusPathAndName(size_t i); + static TString GetKesusPath(size_t i); + static TString GetKesusResource(size_t i); + static TString GetKesusRootResource(); + +private: + void RunServer(); + + void RegisterQuoterService(); + void CreateKesusesAndResources(); + ui64 GetKesusTabletId(const TString& path); + void CreateKesusResource(const TString& kesusPath, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond = Nothing()); + void CreateKesusResource(ui64 kesusTabletId, const TString& resourcePath, TMaybe<double> maxUnitsPerSecond = Nothing()); + TActorId GetEdgeActor(); - - void SetupSettings(); - -private: - const TOptions &Opts; - TPortManager PortManager; - const ui16 MsgBusPort; - Tests::TServerSettings::TPtr ServerSettings; - - Tests::TServer::TPtr Server; - THolder <Tests::TClient> Client; + + void SetupSettings(); + +private: + const TOptions &Opts; + TPortManager PortManager; + const ui16 MsgBusPort; + Tests::TServerSettings::TPtr ServerSettings; + + Tests::TServer::TPtr Server; + THolder <Tests::TClient> Client; TActorId EdgeActor; -}; - -} // namespace NKikimr +}; + +} // namespace NKikimr diff --git a/ydb/core/quoter/quoter_service_bandwidth_test/ya.make b/ydb/core/quoter/quoter_service_bandwidth_test/ya.make index ddb00e5873a..07bba9c8fdc 100644 --- a/ydb/core/quoter/quoter_service_bandwidth_test/ya.make +++ b/ydb/core/quoter/quoter_service_bandwidth_test/ya.make @@ -1,25 +1,25 @@ -PROGRAM() - +PROGRAM() + OWNER( galaxycrab g:kikimr ) - -PEERDIR( + +PEERDIR( library/cpp/colorizer library/cpp/getopt ydb/core/base ydb/core/kesus/tablet ydb/core/quoter ydb/core/testlib -) - +) + YQL_LAST_ABI_VERSION() -SRCS( - main.cpp - quota_requester.cpp - server.cpp -) - -END() +SRCS( + main.cpp + quota_requester.cpp + server.cpp +) + +END() diff --git a/ydb/core/quoter/quoter_service_impl.h b/ydb/core/quoter/quoter_service_impl.h index d27ea134e19..29a692e43f1 100644 --- a/ydb/core/quoter/quoter_service_impl.h +++ b/ydb/core/quoter/quoter_service_impl.h @@ -1,53 +1,53 @@ #pragma once #include "defs.h" #include "quoter_service.h" - + #include <ydb/core/base/tablet_pipe.h> #include <ydb/core/base/appdata.h> #include <ydb/core/base/path.h> #include <ydb/core/tx/scheme_cache/scheme_cache.h> - + #include <library/cpp/actors/core/hfunc.h> #include <library/cpp/actors/core/log.h> #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/lwtrace/shuttle.h> - + #include <util/generic/set.h> #include <util/generic/deque.h> namespace NKikimr { namespace NQuoter { -extern const TString CONSUMED_COUNTER_NAME; -extern const TString REQUESTED_COUNTER_NAME; -extern const TString RESOURCE_COUNTER_SENSOR_NAME; -extern const TString QUOTER_COUNTER_SENSOR_NAME; -extern const TString QUOTER_SERVICE_COUNTER_SENSOR_NAME; -extern const TString RESOURCE_QUEUE_SIZE_COUNTER_SENSOR_NAME; -extern const TString RESOURCE_QUEUE_WEIGHT_COUNTER_SENSOR_NAME; -extern const TString RESOURCE_ALLOCATED_OFFLINE_COUNTER_SENSOR_NAME; -extern const TString RESOURCE_DROPPED_COUNTER_SENSOR_NAME; -extern const TString RESOURCE_ACCUMULATED_COUNTER_SENSOR_NAME; -extern const TString RESOURCE_RECEIVED_FROM_KESUS_COUNTER_SENSOR_NAME; -extern const TString REQUEST_QUEUE_TIME_SENSOR_NAME; -extern const TString REQUEST_TIME_SENSOR_NAME; -extern const TString REQUESTS_COUNT_SENSOR_NAME; -extern const TString ELAPSED_MICROSEC_IN_STARVATION_SENSOR_NAME; -extern const TString DISCONNECTS_COUNTER_SENSOR_NAME; - +extern const TString CONSUMED_COUNTER_NAME; +extern const TString REQUESTED_COUNTER_NAME; +extern const TString RESOURCE_COUNTER_SENSOR_NAME; +extern const TString QUOTER_COUNTER_SENSOR_NAME; +extern const TString QUOTER_SERVICE_COUNTER_SENSOR_NAME; +extern const TString RESOURCE_QUEUE_SIZE_COUNTER_SENSOR_NAME; +extern const TString RESOURCE_QUEUE_WEIGHT_COUNTER_SENSOR_NAME; +extern const TString RESOURCE_ALLOCATED_OFFLINE_COUNTER_SENSOR_NAME; +extern const TString RESOURCE_DROPPED_COUNTER_SENSOR_NAME; +extern const TString RESOURCE_ACCUMULATED_COUNTER_SENSOR_NAME; +extern const TString RESOURCE_RECEIVED_FROM_KESUS_COUNTER_SENSOR_NAME; +extern const TString REQUEST_QUEUE_TIME_SENSOR_NAME; +extern const TString REQUEST_TIME_SENSOR_NAME; +extern const TString REQUESTS_COUNT_SENSOR_NAME; +extern const TString ELAPSED_MICROSEC_IN_STARVATION_SENSOR_NAME; +extern const TString DISCONNECTS_COUNTER_SENSOR_NAME; + using EResourceOperator = TEvQuota::EResourceOperator; using EStatUpdatePolicy = TEvQuota::EStatUpdatePolicy; using EUpdateState = TEvQuota::EUpdateState; struct TResource; -NMonitoring::IHistogramCollectorPtr GetLatencyHistogramBuckets(); - +NMonitoring::IHistogramCollectorPtr GetLatencyHistogramBuckets(); + struct TRequest { TActorId Source = TActorId(); ui64 EventCookie = 0; - TInstant StartTime; + TInstant StartTime; EResourceOperator Operator = EResourceOperator::Unknown; TInstant Deadline = TInstant::Max(); ui32 ResourceLeaf = Max<ui32>(); @@ -57,9 +57,9 @@ struct TRequest { ui32 PrevByOwner = Max<ui32>(); ui32 NextByOwner = Max<ui32>(); - - // tracing - mutable NLWTrace::TOrbit Orbit; + + // tracing + mutable NLWTrace::TOrbit Orbit; }; class TReqState { @@ -102,9 +102,9 @@ struct TResourceLeaf { TString QuoterName; // optional TString ResourceName; - - TInstant StartQueueing = TInstant::Zero(); // optional phase - TInstant StartCharging = TInstant::Zero(); // when resource is processed + + TInstant StartQueueing = TInstant::Zero(); // optional phase + TInstant StartCharging = TInstant::Zero(); // when resource is processed }; class TResState { @@ -120,9 +120,9 @@ public: struct TResource { const ui64 QuoterId; const ui64 ResourceId; - const TString Quoter; + const TString Quoter; const TString Resource; - const TQuoterServiceConfig& QuoterServiceConfig; + const TQuoterServiceConfig& QuoterServiceConfig; TInstant Activation = TInstant::Zero(); TInstant NextTick = TInstant::Zero(); @@ -132,56 +132,56 @@ struct TResource { ui32 QueueTail = Max<ui32>(); ui32 QueueSize = 0; - double QueueWeight = 0; + double QueueWeight = 0; TInstant LastAllocated = TInstant::Max(); - double FreeBalance = 0.0; // could be used w/o pace limit - double Balance = 0.0; // total balance, but under pace limit - double TickRate = 0.0; + double FreeBalance = 0.0; // could be used w/o pace limit + double Balance = 0.0; // total balance, but under pace limit + double TickRate = 0.0; TDuration TickSize = TDuration::Seconds(1); TMap<ui32, TEvQuota::TUpdateTick> QuotaChannels; // stats block TEvQuota::EStatUpdatePolicy StatUpdatePolicy = TEvQuota::EStatUpdatePolicy::Never; - double AmountConsumed = 0.0; // consumed from last stats notification + double AmountConsumed = 0.0; // consumed from last stats notification TTimeSeriesMap<double> History; // consumption history from last stats notification - TInstant StartStarvationTime = TInstant::Zero(); - - struct { - NMonitoring::TDynamicCounters::TCounterPtr Consumed; - NMonitoring::TDynamicCounters::TCounterPtr Requested; - NMonitoring::TDynamicCounters::TCounterPtr RequestsCount; - NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosecInStarvation; - NMonitoring::THistogramPtr RequestQueueTime; - NMonitoring::THistogramPtr RequestTime; - } Counters; - - TResource(ui64 quoterId, ui64 resourceId, const TString& quoter, const TString& resource, const TQuoterServiceConfig "erServiceConfig, const NMonitoring::TDynamicCounterPtr& quoterCounters) + TInstant StartStarvationTime = TInstant::Zero(); + + struct { + NMonitoring::TDynamicCounters::TCounterPtr Consumed; + NMonitoring::TDynamicCounters::TCounterPtr Requested; + NMonitoring::TDynamicCounters::TCounterPtr RequestsCount; + NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosecInStarvation; + NMonitoring::THistogramPtr RequestQueueTime; + NMonitoring::THistogramPtr RequestTime; + } Counters; + + TResource(ui64 quoterId, ui64 resourceId, const TString& quoter, const TString& resource, const TQuoterServiceConfig "erServiceConfig, const NMonitoring::TDynamicCounterPtr& quoterCounters) : QuoterId(quoterId) , ResourceId(resourceId) - , Quoter(quoter) + , Quoter(quoter) , Resource(resource) - , QuoterServiceConfig(quoterServiceConfig) - { - auto counters = quoterCounters->GetSubgroup(RESOURCE_COUNTER_SENSOR_NAME, resource ? resource : "__StaticRatedResource"); - Counters.Consumed = counters->GetCounter(CONSUMED_COUNTER_NAME, true); - Counters.Requested = counters->GetCounter(REQUESTED_COUNTER_NAME, true); - Counters.RequestQueueTime = counters->GetHistogram(REQUEST_QUEUE_TIME_SENSOR_NAME, GetLatencyHistogramBuckets()); - Counters.RequestTime = counters->GetHistogram(REQUEST_TIME_SENSOR_NAME, GetLatencyHistogramBuckets()); - Counters.RequestsCount = counters->GetCounter(REQUESTS_COUNT_SENSOR_NAME, true); - Counters.ElapsedMicrosecInStarvation = counters->GetCounter(ELAPSED_MICROSEC_IN_STARVATION_SENSOR_NAME, true); - } + , QuoterServiceConfig(quoterServiceConfig) + { + auto counters = quoterCounters->GetSubgroup(RESOURCE_COUNTER_SENSOR_NAME, resource ? resource : "__StaticRatedResource"); + Counters.Consumed = counters->GetCounter(CONSUMED_COUNTER_NAME, true); + Counters.Requested = counters->GetCounter(REQUESTED_COUNTER_NAME, true); + Counters.RequestQueueTime = counters->GetHistogram(REQUEST_QUEUE_TIME_SENSOR_NAME, GetLatencyHistogramBuckets()); + Counters.RequestTime = counters->GetHistogram(REQUEST_TIME_SENSOR_NAME, GetLatencyHistogramBuckets()); + Counters.RequestsCount = counters->GetCounter(REQUESTS_COUNT_SENSOR_NAME, true); + Counters.ElapsedMicrosecInStarvation = counters->GetCounter(ELAPSED_MICROSEC_IN_STARVATION_SENSOR_NAME, true); + } void ApplyQuotaChannel(const TEvQuota::TUpdateTick &tick); TDuration Charge(double amount, TInstant now); // Zero - fullfiled, Max - not in current tick, Duration - in current tick, but not right now due to pace limit - TDuration Charge(TRequest& request, TResourceLeaf& leaf, TInstant now); + TDuration Charge(TRequest& request, TResourceLeaf& leaf, TInstant now); void ChargeUsedAmount(double amount, TInstant now); - - void MarkStartedCharging(TRequest& request, TResourceLeaf& leaf, TInstant now); - void StartStarvation(TInstant now); - void StopStarvation(TInstant now); + + void MarkStartedCharging(TRequest& request, TResourceLeaf& leaf, TInstant now); + void StartStarvation(TInstant now); + void StopStarvation(TInstant now); }; struct TScheduleTick { @@ -198,24 +198,24 @@ struct TQuoterState { TSet<ui32> WaitingQueueResolve; // => requests TMap<TString, TSet<ui32>> WaitingResource; // => requests - struct { - NMonitoring::TDynamicCounterPtr QuoterCounters; - } Counters; - - TResource& GetOrCreate(ui64 quoterId, ui64 resId, const TString& quoter, const TString& resource, const TQuoterServiceConfig "erServiceConfig); + struct { + NMonitoring::TDynamicCounterPtr QuoterCounters; + } Counters; + + TResource& GetOrCreate(ui64 quoterId, ui64 resId, const TString& quoter, const TString& resource, const TQuoterServiceConfig "erServiceConfig); bool Empty(); - void InitCounters(const NMonitoring::TDynamicCounterPtr& serviceCounters) { - Counters.QuoterCounters = serviceCounters->GetSubgroup(QUOTER_COUNTER_SENSOR_NAME, QuoterName); - } - - TQuoterState(const TString& quoterName, const NMonitoring::TDynamicCounterPtr& serviceCounters) + void InitCounters(const NMonitoring::TDynamicCounterPtr& serviceCounters) { + Counters.QuoterCounters = serviceCounters->GetSubgroup(QUOTER_COUNTER_SENSOR_NAME, QuoterName); + } + + TQuoterState(const TString& quoterName, const NMonitoring::TDynamicCounterPtr& serviceCounters) : QuoterName(quoterName) - { - if (serviceCounters) { - InitCounters(serviceCounters); - } - } + { + if (serviceCounters) { + InitCounters(serviceCounters); + } + } }; class TQuoterService : public TActorBootstrapped<TQuoterService> { @@ -241,7 +241,7 @@ class TQuoterService : public TActorBootstrapped<TQuoterService> { TMap<ui64, TDeque<TEvQuota::TProxyStat>> StatsToPublish; // quoterId -> stats struct { - NMonitoring::TDynamicCounterPtr ServiceCounters; + NMonitoring::TDynamicCounterPtr ServiceCounters; NMonitoring::TDynamicCounters::TCounterPtr ActiveQuoterProxies; NMonitoring::TDynamicCounters::TCounterPtr ActiveProxyResources; NMonitoring::TDynamicCounters::TCounterPtr KnownLocalResources; @@ -250,7 +250,7 @@ class TQuoterService : public TActorBootstrapped<TQuoterService> { NMonitoring::TDynamicCounters::TCounterPtr ResultOk; NMonitoring::TDynamicCounters::TCounterPtr ResultDeadline; NMonitoring::TDynamicCounters::TCounterPtr ResultError; - NMonitoring::THistogramPtr RequestLatency; + NMonitoring::THistogramPtr RequestLatency; } Counters; enum class EInitLeafStatus { @@ -258,17 +258,17 @@ class TQuoterService : public TActorBootstrapped<TQuoterService> { Forbid, Charged, Wait, - GenericError, + GenericError, }; void ScheduleNextTick(TInstant requested, TResource &quores); TInstant TimeToGranularity(TInstant rawTime); - void TryTickSchedule(TInstant now = TInstant::Zero()); + void TryTickSchedule(TInstant now = TInstant::Zero()); void ReplyRequest(TRequest &request, ui32 reqIdx, TEvQuota::TEvClearance::EResult resultCode); void ForgetRequest(TRequest &request, ui32 reqIdx); void DeclineRequest(TRequest &request, ui32 reqIdx); - void FailRequest(TRequest &request, ui32 reqIdx); + void FailRequest(TRequest &request, ui32 reqIdx); void AllowRequest(TRequest &request, ui32 reqIdx); void DeadlineRequest(TRequest &request, ui32 reqIdx); @@ -297,18 +297,18 @@ class TQuoterService : public TActorBootstrapped<TQuoterService> { void CreateKesusQuoter(NSchemeCache::TSchemeCacheNavigate::TEntry &navigate, decltype(QuotersIndex)::iterator indexIt, decltype(Quoters)::iterator quoterIt); void BreakQuoter(decltype(QuotersIndex)::iterator indexIt, decltype(Quoters)::iterator quoterIt); void BreakQuoter(decltype(Quoters)::iterator quoterIt); - - TString PrintEvent(const TEvQuota::TEvRequest::TPtr& ev); + + TString PrintEvent(const TEvQuota::TEvRequest::TPtr& ev); public: static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::QUOTER_SERVICE_ACTOR; } - TQuoterService(const TQuoterServiceConfig &config); - ~TQuoterService(); - - void Bootstrap(); + TQuoterService(const TQuoterServiceConfig &config); + ~TQuoterService(); + void Bootstrap(); + STFUNC(StateFunc) { Y_UNUSED(ctx); switch (ev->GetTypeRewrite()) { diff --git a/ydb/core/quoter/quoter_service_ut.cpp b/ydb/core/quoter/quoter_service_ut.cpp index 88c74ca3fb1..da40aeece72 100644 --- a/ydb/core/quoter/quoter_service_ut.cpp +++ b/ydb/core/quoter/quoter_service_ut.cpp @@ -8,9 +8,9 @@ #include <library/cpp/testing/unittest/registar.h> -#include <util/system/compiler.h> -#include <util/system/valgrind.h> - +#include <util/system/compiler.h> +#include <util/system/valgrind.h> + namespace NKikimr { using namespace Tests; @@ -53,24 +53,24 @@ Y_UNIT_TEST_SUITE(TQuoterServiceTest) { THolder<TEvQuota::TEvClearance> reply = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(); UNIT_ASSERT(reply->Result == TEvQuota::TEvClearance::EResult::Success); } - - { - // test static quter queues processing - size_t cnt = 100; - for (size_t i = 0; i < cnt; ++i) { - runtime->Send(new IEventHandle(MakeQuoterServiceID(), sender, - new TEvQuota::TEvRequest(TEvQuota::EResourceOperator::And, { - TEvQuota::TResourceLeaf(TEvQuota::TResourceLeaf::QuoterSystem, TEvQuota::TResourceLeaf::MakeTaggedRateRes(2, 50), 1) - }, TDuration::Max()), 0, 300 + i)); - } - - TAutoPtr<IEventHandle> ev; - for (size_t i = 0; i < cnt; ++i) { - TEvQuota::TEvClearance* reply = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(ev); - UNIT_ASSERT(reply->Result == TEvQuota::TEvClearance::EResult::Success); - UNIT_ASSERT_VALUES_EQUAL(ev->Cookie, 300 + i); - } - } + + { + // test static quter queues processing + size_t cnt = 100; + for (size_t i = 0; i < cnt; ++i) { + runtime->Send(new IEventHandle(MakeQuoterServiceID(), sender, + new TEvQuota::TEvRequest(TEvQuota::EResourceOperator::And, { + TEvQuota::TResourceLeaf(TEvQuota::TResourceLeaf::QuoterSystem, TEvQuota::TResourceLeaf::MakeTaggedRateRes(2, 50), 1) + }, TDuration::Max()), 0, 300 + i)); + } + + TAutoPtr<IEventHandle> ev; + for (size_t i = 0; i < cnt; ++i) { + TEvQuota::TEvClearance* reply = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(ev); + UNIT_ASSERT(reply->Result == TEvQuota::TEvClearance::EResult::Success); + UNIT_ASSERT_VALUES_EQUAL(ev->Cookie, 300 + i); + } + } { auto resId = TEvQuota::TResourceLeaf::MakeTaggedRateRes(1, 1); @@ -109,153 +109,153 @@ Y_UNIT_TEST_SUITE(TQuoterServiceTest) { } } -#if defined(OPTIMIZED) -#error "Macro conflict." -#endif - -#if defined(_MSC_VER) - -#if defined(NDEBUG) -#define OPTIMIZED // release builds -#endif - -#else // non msvc compiler: use __OPTIMIZE__ flag to include relwithdebinfo builds - -#if defined(__OPTIMIZE__) -#define OPTIMIZED // release builds and relwithdebinfo builds -#endif - -#endif - -#if defined(OPTIMIZED) && !defined(_san_enabled_) && !defined(WITH_VALGRIND) - enum class ESpeedTestResourceType { - StaticTaggedRateResource, - KesusResource, - }; - - void CreateKesus(TServer& server) { - Tests::TClient client(server.GetSettings()); - client.InitRootScheme(); - const NMsgBusProxy::EResponseStatus status = client.CreateKesus(Tests::TestDomainName, "KesusQuoter"); - UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK); - } - - void CreateKesusResource(TServer& server, double rate) { - Tests::TClient client(server.GetSettings()); - TTestActorRuntime* const runtime = server.GetRuntime(); - - // request - TAutoPtr<NKesus::TEvKesus::TEvAddQuoterResource> request(new NKesus::TEvKesus::TEvAddQuoterResource()); - request->Record.MutableResource()->SetResourcePath("/Res"); - request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(rate); - - // Get tablet id - TAutoPtr<NMsgBusProxy::TBusResponse> resp = client.Ls(TStringBuilder() << Tests::TestDomainName << "/KesusQuoter"); - UNIT_ASSERT_EQUAL(resp->Record.GetStatusCode(), NKikimrIssues::TStatusIds::SUCCESS); - const auto& pathDesc = resp->Record.GetPathDescription(); - UNIT_ASSERT(pathDesc.HasKesus()); - const ui64 tabletId = pathDesc.GetKesus().GetKesusTabletId(); - +#if defined(OPTIMIZED) +#error "Macro conflict." +#endif + +#if defined(_MSC_VER) + +#if defined(NDEBUG) +#define OPTIMIZED // release builds +#endif + +#else // non msvc compiler: use __OPTIMIZE__ flag to include relwithdebinfo builds + +#if defined(__OPTIMIZE__) +#define OPTIMIZED // release builds and relwithdebinfo builds +#endif + +#endif + +#if defined(OPTIMIZED) && !defined(_san_enabled_) && !defined(WITH_VALGRIND) + enum class ESpeedTestResourceType { + StaticTaggedRateResource, + KesusResource, + }; + + void CreateKesus(TServer& server) { + Tests::TClient client(server.GetSettings()); + client.InitRootScheme(); + const NMsgBusProxy::EResponseStatus status = client.CreateKesus(Tests::TestDomainName, "KesusQuoter"); + UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK); + } + + void CreateKesusResource(TServer& server, double rate) { + Tests::TClient client(server.GetSettings()); + TTestActorRuntime* const runtime = server.GetRuntime(); + + // request + TAutoPtr<NKesus::TEvKesus::TEvAddQuoterResource> request(new NKesus::TEvKesus::TEvAddQuoterResource()); + request->Record.MutableResource()->SetResourcePath("/Res"); + request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(rate); + + // Get tablet id + TAutoPtr<NMsgBusProxy::TBusResponse> resp = client.Ls(TStringBuilder() << Tests::TestDomainName << "/KesusQuoter"); + UNIT_ASSERT_EQUAL(resp->Record.GetStatusCode(), NKikimrIssues::TStatusIds::SUCCESS); + const auto& pathDesc = resp->Record.GetPathDescription(); + UNIT_ASSERT(pathDesc.HasKesus()); + const ui64 tabletId = pathDesc.GetKesus().GetKesusTabletId(); + TActorId sender = runtime->AllocateEdgeActor(); ForwardToTablet(*runtime, tabletId, sender, request.Release(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); - const NKikimrKesus::TEvAddQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; - UNIT_ASSERT_VALUES_EQUAL(record.GetError().GetStatus(), Ydb::StatusIds::SUCCESS); - } - - // Tests that quoter service can serve resource allocation requests at high rates. - void SpeedTest(ESpeedTestResourceType resType) { - TPortManager portManager; - TServerSettings serverSettings(portManager.GetPort()); - TServer server = TServer(serverSettings, true); - - TTestActorRuntime* runtime = server.GetRuntime(); - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); + const NKikimrKesus::TEvAddQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; + UNIT_ASSERT_VALUES_EQUAL(record.GetError().GetStatus(), Ydb::StatusIds::SUCCESS); + } + + // Tests that quoter service can serve resource allocation requests at high rates. + void SpeedTest(ESpeedTestResourceType resType) { + TPortManager portManager; + TServerSettings serverSettings(portManager.GetPort()); + TServer server = TServer(serverSettings, true); + + TTestActorRuntime* runtime = server.GetRuntime(); + const TActorId serviceId = MakeQuoterServiceID(); const TActorId serviceActorId = runtime->Register(CreateQuoterService()); - runtime->RegisterService(serviceId, serviceActorId); - + runtime->RegisterService(serviceId, serviceActorId); + const TActorId sender = runtime->AllocateEdgeActor(); - - constexpr TDuration testDuration = TDuration::Seconds(2); - constexpr TDuration waitDuration = TDuration::MilliSeconds(150); - constexpr ui32 rate = 2000; - - constexpr double secondsForTest = static_cast<double>(testDuration.MicroSeconds()) / 1000000.0; - constexpr double secondsForWait = static_cast<double>(waitDuration.MicroSeconds()) / 1000000.0; - constexpr double doubleRate = static_cast<double>(rate); - - TString quoter; - TString resource; - if (resType == ESpeedTestResourceType::KesusResource) { - CreateKesus(server); - CreateKesusResource(server, doubleRate); - quoter = TStringBuilder() << "/" << Tests::TestDomainName << "/KesusQuoter"; - resource = "Res"; - } - - const TEvQuota::TResourceLeaf resLeaf = resType == ESpeedTestResourceType::StaticTaggedRateResource ? - TEvQuota::TResourceLeaf(TEvQuota::TResourceLeaf::QuoterSystem, TEvQuota::TResourceLeaf::MakeTaggedRateRes(42, rate), 1) : - TEvQuota::TResourceLeaf(quoter, resource, 1); - - for (size_t iteration = 0; iteration < 2; ++iteration) { - const TInstant start = TInstant::Now(); - size_t sent = 0; - while (TInstant::Now() - start < testDuration) { - runtime->Send(new IEventHandle(MakeQuoterServiceID(), sender, - new TEvQuota::TEvRequest(TEvQuota::EResourceOperator::And, { resLeaf }, waitDuration), 0, 0)); - ++sent; - if ((sent & 3) != 0) { - Sleep(TDuration::MicroSeconds(1)); - } - } - Cerr << "Requests sent: " << sent << Endl; - - if (static_cast<double>(sent) > secondsForTest * doubleRate * 7.0) { // check if we have slow machine - TAutoPtr<IEventHandle> ev; - int ok = 0; - int deadline = 0; - for (size_t i = 0; i < sent; ++i) { - TEvQuota::TEvClearance* reply = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(ev); - if (reply->Result == TEvQuota::TEvClearance::EResult::Success) { - ++ok; - } else if (reply->Result == TEvQuota::TEvClearance::EResult::Deadline) { - ++deadline; - } else { - UNIT_ASSERT(false); - } - } - - Cerr << "OK: " << ok << Endl; - Cerr << "Deadline: " << deadline << Endl; - const double expectedSuccesses = (secondsForTest + secondsForWait) * doubleRate; - Cerr << "Expected OK's: " << expectedSuccesses << Endl; - const double maxDeviation = expectedSuccesses * 0.2; - UNIT_ASSERT_DOUBLES_EQUAL_C(static_cast<double>(ok), expectedSuccesses, maxDeviation, - "ok: " << ok << ", deadline: " << deadline << ", sent: " << sent << ", expectedSuccesses: " << expectedSuccesses - << ", secondsForTest: " << secondsForTest << ", secondsForWait: " << secondsForWait); - } else { - Cerr << "Too few requests sent" << Endl; - break; // Else we would receive TEvClearance from previous test iteration. - } - - if (iteration == 0) { - Sleep(TDuration::MilliSeconds(300)); // Make a pause to check that algorithm will consider it. - } - } - } - - Y_UNIT_TEST(StaticRateLimiterSpeed) { - SpeedTest(ESpeedTestResourceType::StaticTaggedRateResource); - } - - Y_UNIT_TEST(KesusResourceSpeed) { - SpeedTest(ESpeedTestResourceType::KesusResource); - } -#endif - + + constexpr TDuration testDuration = TDuration::Seconds(2); + constexpr TDuration waitDuration = TDuration::MilliSeconds(150); + constexpr ui32 rate = 2000; + + constexpr double secondsForTest = static_cast<double>(testDuration.MicroSeconds()) / 1000000.0; + constexpr double secondsForWait = static_cast<double>(waitDuration.MicroSeconds()) / 1000000.0; + constexpr double doubleRate = static_cast<double>(rate); + + TString quoter; + TString resource; + if (resType == ESpeedTestResourceType::KesusResource) { + CreateKesus(server); + CreateKesusResource(server, doubleRate); + quoter = TStringBuilder() << "/" << Tests::TestDomainName << "/KesusQuoter"; + resource = "Res"; + } + + const TEvQuota::TResourceLeaf resLeaf = resType == ESpeedTestResourceType::StaticTaggedRateResource ? + TEvQuota::TResourceLeaf(TEvQuota::TResourceLeaf::QuoterSystem, TEvQuota::TResourceLeaf::MakeTaggedRateRes(42, rate), 1) : + TEvQuota::TResourceLeaf(quoter, resource, 1); + + for (size_t iteration = 0; iteration < 2; ++iteration) { + const TInstant start = TInstant::Now(); + size_t sent = 0; + while (TInstant::Now() - start < testDuration) { + runtime->Send(new IEventHandle(MakeQuoterServiceID(), sender, + new TEvQuota::TEvRequest(TEvQuota::EResourceOperator::And, { resLeaf }, waitDuration), 0, 0)); + ++sent; + if ((sent & 3) != 0) { + Sleep(TDuration::MicroSeconds(1)); + } + } + Cerr << "Requests sent: " << sent << Endl; + + if (static_cast<double>(sent) > secondsForTest * doubleRate * 7.0) { // check if we have slow machine + TAutoPtr<IEventHandle> ev; + int ok = 0; + int deadline = 0; + for (size_t i = 0; i < sent; ++i) { + TEvQuota::TEvClearance* reply = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(ev); + if (reply->Result == TEvQuota::TEvClearance::EResult::Success) { + ++ok; + } else if (reply->Result == TEvQuota::TEvClearance::EResult::Deadline) { + ++deadline; + } else { + UNIT_ASSERT(false); + } + } + + Cerr << "OK: " << ok << Endl; + Cerr << "Deadline: " << deadline << Endl; + const double expectedSuccesses = (secondsForTest + secondsForWait) * doubleRate; + Cerr << "Expected OK's: " << expectedSuccesses << Endl; + const double maxDeviation = expectedSuccesses * 0.2; + UNIT_ASSERT_DOUBLES_EQUAL_C(static_cast<double>(ok), expectedSuccesses, maxDeviation, + "ok: " << ok << ", deadline: " << deadline << ", sent: " << sent << ", expectedSuccesses: " << expectedSuccesses + << ", secondsForTest: " << secondsForTest << ", secondsForWait: " << secondsForWait); + } else { + Cerr << "Too few requests sent" << Endl; + break; // Else we would receive TEvClearance from previous test iteration. + } + + if (iteration == 0) { + Sleep(TDuration::MilliSeconds(300)); // Make a pause to check that algorithm will consider it. + } + } + } + + Y_UNIT_TEST(StaticRateLimiterSpeed) { + SpeedTest(ESpeedTestResourceType::StaticTaggedRateResource); + } + + Y_UNIT_TEST(KesusResourceSpeed) { + SpeedTest(ESpeedTestResourceType::KesusResource); + } +#endif + Y_UNIT_TEST(StaticMultipleAndResources) { TServerSettings serverSettings(0); TServer server = TServer(serverSettings, true); @@ -326,13 +326,13 @@ Y_UNIT_TEST_SUITE(TQuoterServiceTest) { }, TDuration::Seconds(3)))); THolder<TEvQuota::TEvClearance> reply1 = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(); - UNIT_ASSERT_C(reply1->Result == TEvQuota::TEvClearance::EResult::Success, "Result: " << static_cast<int>(reply1->Result)); + UNIT_ASSERT_C(reply1->Result == TEvQuota::TEvClearance::EResult::Success, "Result: " << static_cast<int>(reply1->Result)); THolder<TEvQuota::TEvClearance> reply2 = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(); - UNIT_ASSERT_C(reply2->Result == TEvQuota::TEvClearance::EResult::Success, "Result: " << static_cast<int>(reply2->Result)); + UNIT_ASSERT_C(reply2->Result == TEvQuota::TEvClearance::EResult::Success, "Result: " << static_cast<int>(reply2->Result)); THolder<TEvQuota::TEvClearance> reply3 = runtime->GrabEdgeEvent<TEvQuota::TEvClearance>(); - UNIT_ASSERT_C(reply3->Result == TEvQuota::TEvClearance::EResult::Deadline, "Result: " << static_cast<int>(reply3->Result)); + UNIT_ASSERT_C(reply3->Result == TEvQuota::TEvClearance::EResult::Deadline, "Result: " << static_cast<int>(reply3->Result)); } } diff --git a/ydb/core/quoter/ut/ya.make b/ydb/core/quoter/ut/ya.make index eca5ac0e40b..3990bec2f9c 100644 --- a/ydb/core/quoter/ut/ya.make +++ b/ydb/core/quoter/ut/ya.make @@ -1,7 +1,7 @@ -# Disable test on windows until DEVTOOLS-5591 and DEVTOOLS-5388 will be fixed. -IF (NOT OS_WINDOWS) +# Disable test on windows until DEVTOOLS-5591 and DEVTOOLS-5388 will be fixed. +IF (NOT OS_WINDOWS) UNITTEST_FOR(ydb/core/quoter) - + OWNER(g:kikimr) PEERDIR( diff --git a/ydb/core/quoter/ut_helpers.cpp b/ydb/core/quoter/ut_helpers.cpp index e57a073d74a..137aa4465a9 100644 --- a/ydb/core/quoter/ut_helpers.cpp +++ b/ydb/core/quoter/ut_helpers.cpp @@ -1,505 +1,505 @@ -#include "ut_helpers.h" - +#include "ut_helpers.h" + #include <ydb/core/testlib/tablet_helpers.h> #include <ydb/core/tx/schemeshard/schemeshard.h> - + #include <ydb/core/kesus/tablet/events.h> - -namespace NKikimr { - -const TString TKesusQuoterTestSetup::DEFAULT_KESUS_PARENT_PATH = Tests::TestDomainName; -const TString TKesusQuoterTestSetup::DEFAULT_KESUS_NAME = "KesusQuoter"; -const TString TKesusQuoterTestSetup::DEFAULT_KESUS_PATH = TString::Join("/", DEFAULT_KESUS_PARENT_PATH, "/", DEFAULT_KESUS_NAME); -const TString TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE = "Resource"; - -TKesusQuoterTestSetup::TKesusQuoterTestSetup(bool runServer) - : MsgBusPort(PortManager.GetPort()) - , ServerSettings(MakeIntrusive<Tests::TServerSettings>(MsgBusPort)) -{ - // Settings - GetServerSettings() - .SetNodeCount(2); - - if (runServer) { - RunServer(); - } -} - -void TKesusQuoterTestSetup::RunServer() { - Server = MakeIntrusive<Tests::TServer>(ServerSettings, true); - Client = MakeHolder<Tests::TClient>(*ServerSettings); - - SetupLogging(); - - Client->InitRootScheme(); - - RegisterQuoterService(); - CreateDefaultKesusAndResource(); -} - -void TKesusQuoterTestSetup::SetupLogging() { - Server->GetRuntime()->SetLogPriority(NKikimrServices::KESUS_TABLET, NActors::NLog::PRI_TRACE); - Server->GetRuntime()->SetLogPriority(NKikimrServices::QUOTER_SERVICE, NActors::NLog::PRI_TRACE); - Server->GetRuntime()->SetLogPriority(NKikimrServices::QUOTER_PROXY, NActors::NLog::PRI_TRACE); -} - -void TKesusQuoterTestSetup::RegisterQuoterService() { - TTestActorRuntime* const runtime = GetServer().GetRuntime(); + +namespace NKikimr { + +const TString TKesusQuoterTestSetup::DEFAULT_KESUS_PARENT_PATH = Tests::TestDomainName; +const TString TKesusQuoterTestSetup::DEFAULT_KESUS_NAME = "KesusQuoter"; +const TString TKesusQuoterTestSetup::DEFAULT_KESUS_PATH = TString::Join("/", DEFAULT_KESUS_PARENT_PATH, "/", DEFAULT_KESUS_NAME); +const TString TKesusQuoterTestSetup::DEFAULT_KESUS_RESOURCE = "Resource"; + +TKesusQuoterTestSetup::TKesusQuoterTestSetup(bool runServer) + : MsgBusPort(PortManager.GetPort()) + , ServerSettings(MakeIntrusive<Tests::TServerSettings>(MsgBusPort)) +{ + // Settings + GetServerSettings() + .SetNodeCount(2); + + if (runServer) { + RunServer(); + } +} + +void TKesusQuoterTestSetup::RunServer() { + Server = MakeIntrusive<Tests::TServer>(ServerSettings, true); + Client = MakeHolder<Tests::TClient>(*ServerSettings); + + SetupLogging(); + + Client->InitRootScheme(); + + RegisterQuoterService(); + CreateDefaultKesusAndResource(); +} + +void TKesusQuoterTestSetup::SetupLogging() { + Server->GetRuntime()->SetLogPriority(NKikimrServices::KESUS_TABLET, NActors::NLog::PRI_TRACE); + Server->GetRuntime()->SetLogPriority(NKikimrServices::QUOTER_SERVICE, NActors::NLog::PRI_TRACE); + Server->GetRuntime()->SetLogPriority(NKikimrServices::QUOTER_PROXY, NActors::NLog::PRI_TRACE); +} + +void TKesusQuoterTestSetup::RegisterQuoterService() { + TTestActorRuntime* const runtime = GetServer().GetRuntime(); const TActorId quoterServiceActorId = runtime->Register(CreateQuoterService()); - runtime->RegisterService(MakeQuoterServiceID(), quoterServiceActorId); -} - -void TKesusQuoterTestSetup::CreateKesus(const TString& parent, const TString& name, NMsgBusProxy::EResponseStatus expectedStatus) { - const NMsgBusProxy::EResponseStatus status = GetClient().CreateKesus(parent, name); - UNIT_ASSERT_VALUES_EQUAL_C(status, expectedStatus, "Expected status: " << expectedStatus); -} - -ui64 TKesusQuoterTestSetup::GetKesusTabletId(const TString& path) { - TAutoPtr<NMsgBusProxy::TBusResponse> resp = Client->Ls(path); - UNIT_ASSERT_EQUAL(resp->Record.GetStatusCode(), NKikimrIssues::TStatusIds::SUCCESS); - const auto& pathDesc = resp->Record.GetPathDescription(); - UNIT_ASSERT(pathDesc.HasKesus()); - const ui64 tabletId = pathDesc.GetKesus().GetKesusTabletId(); - UNIT_ASSERT(tabletId); - return tabletId; -} - -NKikimrKesus::THierarchicalDRRResourceConfig TKesusQuoterTestSetup::MakeDefaultResourceProps() { - NKikimrKesus::THierarchicalDRRResourceConfig ret; - ret.SetMaxUnitsPerSecond(10); - return ret; -} - -void TKesusQuoterTestSetup::CreateKesusResource(const TString& kesusPath, const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& cfg) { - TTestActorRuntime* const runtime = Server->GetRuntime(); - - TAutoPtr<NKesus::TEvKesus::TEvAddQuoterResource> request(new NKesus::TEvKesus::TEvAddQuoterResource()); - request->Record.MutableResource()->SetResourcePath(resourcePath); - *request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig() = cfg; - + runtime->RegisterService(MakeQuoterServiceID(), quoterServiceActorId); +} + +void TKesusQuoterTestSetup::CreateKesus(const TString& parent, const TString& name, NMsgBusProxy::EResponseStatus expectedStatus) { + const NMsgBusProxy::EResponseStatus status = GetClient().CreateKesus(parent, name); + UNIT_ASSERT_VALUES_EQUAL_C(status, expectedStatus, "Expected status: " << expectedStatus); +} + +ui64 TKesusQuoterTestSetup::GetKesusTabletId(const TString& path) { + TAutoPtr<NMsgBusProxy::TBusResponse> resp = Client->Ls(path); + UNIT_ASSERT_EQUAL(resp->Record.GetStatusCode(), NKikimrIssues::TStatusIds::SUCCESS); + const auto& pathDesc = resp->Record.GetPathDescription(); + UNIT_ASSERT(pathDesc.HasKesus()); + const ui64 tabletId = pathDesc.GetKesus().GetKesusTabletId(); + UNIT_ASSERT(tabletId); + return tabletId; +} + +NKikimrKesus::THierarchicalDRRResourceConfig TKesusQuoterTestSetup::MakeDefaultResourceProps() { + NKikimrKesus::THierarchicalDRRResourceConfig ret; + ret.SetMaxUnitsPerSecond(10); + return ret; +} + +void TKesusQuoterTestSetup::CreateKesusResource(const TString& kesusPath, const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& cfg) { + TTestActorRuntime* const runtime = Server->GetRuntime(); + + TAutoPtr<NKesus::TEvKesus::TEvAddQuoterResource> request(new NKesus::TEvKesus::TEvAddQuoterResource()); + request->Record.MutableResource()->SetResourcePath(resourcePath); + *request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig() = cfg; + TActorId sender = GetEdgeActor(); - Cerr << "AddQuoterResource: " << request->Record << Endl; + Cerr << "AddQuoterResource: " << request->Record << Endl; ForwardToTablet(*runtime, GetKesusTabletId(kesusPath), sender, request.Release(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); - const NKikimrKesus::TEvAddQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; - UNIT_ASSERT_VALUES_EQUAL(record.GetError().GetStatus(), Ydb::StatusIds::SUCCESS); -} - -void TKesusQuoterTestSetup::CreateDefaultKesusAndResource() { - CreateKesus(DEFAULT_KESUS_PARENT_PATH, DEFAULT_KESUS_NAME); - CreateKesusResource(DEFAULT_KESUS_PATH, DEFAULT_KESUS_RESOURCE); -} - -void TKesusQuoterTestSetup::DeleteKesusResource(const TString& kesusPath, const TString& resourcePath) { - TTestActorRuntime* const runtime = Server->GetRuntime(); - - TAutoPtr<NKesus::TEvKesus::TEvDeleteQuoterResource> request(new NKesus::TEvKesus::TEvDeleteQuoterResource()); - request->Record.SetResourcePath(resourcePath); - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); + const NKikimrKesus::TEvAddQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; + UNIT_ASSERT_VALUES_EQUAL(record.GetError().GetStatus(), Ydb::StatusIds::SUCCESS); +} + +void TKesusQuoterTestSetup::CreateDefaultKesusAndResource() { + CreateKesus(DEFAULT_KESUS_PARENT_PATH, DEFAULT_KESUS_NAME); + CreateKesusResource(DEFAULT_KESUS_PATH, DEFAULT_KESUS_RESOURCE); +} + +void TKesusQuoterTestSetup::DeleteKesusResource(const TString& kesusPath, const TString& resourcePath) { + TTestActorRuntime* const runtime = Server->GetRuntime(); + + TAutoPtr<NKesus::TEvKesus::TEvDeleteQuoterResource> request(new NKesus::TEvKesus::TEvDeleteQuoterResource()); + request->Record.SetResourcePath(resourcePath); + TActorId sender = GetEdgeActor(); - Cerr << "DeleteQuoterResource: " << request->Record << Endl; + Cerr << "DeleteQuoterResource: " << request->Record << Endl; ForwardToTablet(*runtime, GetKesusTabletId(kesusPath), sender, request.Release(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvDeleteQuoterResourceResult>(handle); - const NKikimrKesus::TEvDeleteQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvDeleteQuoterResourceResult>()->Record; - UNIT_ASSERT_VALUES_EQUAL(record.GetError().GetStatus(), Ydb::StatusIds::SUCCESS); -} - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvDeleteQuoterResourceResult>(handle); + const NKikimrKesus::TEvDeleteQuoterResourceResult& record = handle->Get<NKesus::TEvKesus::TEvDeleteQuoterResourceResult>()->Record; + UNIT_ASSERT_VALUES_EQUAL(record.GetError().GetStatus(), Ydb::StatusIds::SUCCESS); +} + TActorId TKesusQuoterTestSetup::GetEdgeActor() { - if (!EdgeActor) { - EdgeActor = GetServer().GetRuntime()->AllocateEdgeActor(0); - } - return EdgeActor; -} - -void TKesusQuoterTestSetup::GetQuota(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation, TDuration deadline, TEvQuota::TEvClearance::EResult expectedResult) { - SendGetQuotaRequest(resources, operation, deadline); - auto answer = WaitGetQuotaAnswer(); - UNIT_ASSERT_VALUES_EQUAL(answer->Result, expectedResult); -} - -void TKesusQuoterTestSetup::GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline, TEvQuota::TEvClearance::EResult expectedResult) { - GetQuota({{kesusPath, resourcePath, amount}}, TEvQuota::EResourceOperator::And, deadline, expectedResult); -} - -void TKesusQuoterTestSetup::GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount, TEvQuota::TEvClearance::EResult expectedResult) { - GetQuota(kesusPath, resourcePath, amount, TDuration::Max(), expectedResult); -} - -void TKesusQuoterTestSetup::SendGetQuotaRequest(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation, TDuration deadline) { - TVector<TEvQuota::TResourceLeaf> res; - res.reserve(resources.size()); - for (auto&& [kesusPath, resourcePath, amount] : resources) { - res.emplace_back(kesusPath, resourcePath, amount); - } - GetServer().GetRuntime()->Send(new IEventHandle(MakeQuoterServiceID(), GetEdgeActor(), new TEvQuota::TEvRequest(operation, std::move(res), deadline))); -} - -void TKesusQuoterTestSetup::SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount) { - SendGetQuotaRequest(kesusPath, resourcePath, amount, TDuration::Max()); -} - -void TKesusQuoterTestSetup::SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline) { - SendGetQuotaRequest({{kesusPath, resourcePath, amount}}, TEvQuota::EResourceOperator::And, deadline); -} - -THolder<TEvQuota::TEvClearance> TKesusQuoterTestSetup::WaitGetQuotaAnswer() { - return GetServer().GetRuntime()->GrabEdgeEvent<TEvQuota::TEvClearance>(); -} - -void TKesusQuoterTestSetup::KillKesusTablet(const TString& kesusPath) { - TTestActorRuntime* const runtime = Server->GetRuntime(); - + if (!EdgeActor) { + EdgeActor = GetServer().GetRuntime()->AllocateEdgeActor(0); + } + return EdgeActor; +} + +void TKesusQuoterTestSetup::GetQuota(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation, TDuration deadline, TEvQuota::TEvClearance::EResult expectedResult) { + SendGetQuotaRequest(resources, operation, deadline); + auto answer = WaitGetQuotaAnswer(); + UNIT_ASSERT_VALUES_EQUAL(answer->Result, expectedResult); +} + +void TKesusQuoterTestSetup::GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline, TEvQuota::TEvClearance::EResult expectedResult) { + GetQuota({{kesusPath, resourcePath, amount}}, TEvQuota::EResourceOperator::And, deadline, expectedResult); +} + +void TKesusQuoterTestSetup::GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount, TEvQuota::TEvClearance::EResult expectedResult) { + GetQuota(kesusPath, resourcePath, amount, TDuration::Max(), expectedResult); +} + +void TKesusQuoterTestSetup::SendGetQuotaRequest(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation, TDuration deadline) { + TVector<TEvQuota::TResourceLeaf> res; + res.reserve(resources.size()); + for (auto&& [kesusPath, resourcePath, amount] : resources) { + res.emplace_back(kesusPath, resourcePath, amount); + } + GetServer().GetRuntime()->Send(new IEventHandle(MakeQuoterServiceID(), GetEdgeActor(), new TEvQuota::TEvRequest(operation, std::move(res), deadline))); +} + +void TKesusQuoterTestSetup::SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount) { + SendGetQuotaRequest(kesusPath, resourcePath, amount, TDuration::Max()); +} + +void TKesusQuoterTestSetup::SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline) { + SendGetQuotaRequest({{kesusPath, resourcePath, amount}}, TEvQuota::EResourceOperator::And, deadline); +} + +THolder<TEvQuota::TEvClearance> TKesusQuoterTestSetup::WaitGetQuotaAnswer() { + return GetServer().GetRuntime()->GrabEdgeEvent<TEvQuota::TEvClearance>(); +} + +void TKesusQuoterTestSetup::KillKesusTablet(const TString& kesusPath) { + TTestActorRuntime* const runtime = Server->GetRuntime(); + TActorId sender = GetEdgeActor(); - Cerr << "Kill kesus tablet: " << kesusPath << Endl; + Cerr << "Kill kesus tablet: " << kesusPath << Endl; ForwardToTablet(*runtime, GetKesusTabletId(kesusPath), sender, new TEvents::TEvPoisonPill(), 0); -} - -NKikimrKesus::TEvGetQuoterResourceCountersResult TKesusQuoterTestSetup::GetQuoterCounters(const TString& kesusPath) { - TTestActorRuntime* const runtime = Server->GetRuntime(); - +} + +NKikimrKesus::TEvGetQuoterResourceCountersResult TKesusQuoterTestSetup::GetQuoterCounters(const TString& kesusPath) { + TTestActorRuntime* const runtime = Server->GetRuntime(); + ForwardToTablet(*runtime, GetKesusTabletId(kesusPath), GetEdgeActor(), new NKesus::TEvKesus::TEvGetQuoterResourceCounters(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvGetQuoterResourceCountersResult>(handle); - NKikimrKesus::TEvGetQuoterResourceCountersResult record = handle->Get<NKesus::TEvKesus::TEvGetQuoterResourceCountersResult>()->Record; - std::sort(record.MutableResourceCounters()->begin(), record.MutableResourceCounters()->end(), - [] (const auto& r1, const auto& r2) { - return r1.GetResourcePath() < r2.GetResourcePath(); - } - ); - Cerr << (TStringBuilder() << "Kesus quoter counters: " << record << Endl); - return record; -} - -TKesusProxyTestSetup::TKesusProxyTestSetup() { - Start(); -} - -TTestActorRuntime::TEgg MakeEgg() { - return { new TAppData(0, 0, 0, 0, { }, nullptr, nullptr, nullptr, nullptr), nullptr, nullptr }; -} - -void TKesusProxyTestSetup::Start() { - Runtime = MakeHolder<TTestActorRuntime>(); - Runtime->Initialize(MakeEgg()); - SetupLogging(); - - Runtime->UpdateCurrentTime(TInstant::Now()); - - StartKesusProxy(); -} - -void TKesusProxyTestSetup::StartKesusProxy() { - NSchemeCache::TSchemeCacheNavigate::TEntry entry; - entry.Path.push_back("Path"); - entry.Path.push_back("KesusName"); - auto kesusInfo = MakeIntrusive<NSchemeCache::TSchemeCacheNavigate::TKesusInfo>(); - entry.KesusInfo = kesusInfo; - kesusInfo->Kind = NSchemeCache::TSchemeCacheNavigate::KindKesus; - kesusInfo->Description.SetName("KesusName"); - kesusInfo->Description.SetKesusTabletId(KESUS_TABLET_ID); - - auto pipeFactory = MakeHolder<TTestTabletPipeFactory>(this); - PipeFactory = pipeFactory.Get(); - KesusProxyId = Runtime->Register(CreateKesusQuoterProxy(QUOTER_ID, entry, GetEdgeActor(), std::move(pipeFactory))); - Runtime->EnableScheduleForActor(KesusProxyId); -} - -void TKesusProxyTestSetup::SetupLogging() { - Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NActors::NLog::PRI_TRACE); - Runtime->SetLogPriority(NKikimrServices::QUOTER_SERVICE, NActors::NLog::PRI_TRACE); - Runtime->SetLogPriority(NKikimrServices::QUOTER_PROXY, NActors::NLog::PRI_TRACE); -} - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvGetQuoterResourceCountersResult>(handle); + NKikimrKesus::TEvGetQuoterResourceCountersResult record = handle->Get<NKesus::TEvKesus::TEvGetQuoterResourceCountersResult>()->Record; + std::sort(record.MutableResourceCounters()->begin(), record.MutableResourceCounters()->end(), + [] (const auto& r1, const auto& r2) { + return r1.GetResourcePath() < r2.GetResourcePath(); + } + ); + Cerr << (TStringBuilder() << "Kesus quoter counters: " << record << Endl); + return record; +} + +TKesusProxyTestSetup::TKesusProxyTestSetup() { + Start(); +} + +TTestActorRuntime::TEgg MakeEgg() { + return { new TAppData(0, 0, 0, 0, { }, nullptr, nullptr, nullptr, nullptr), nullptr, nullptr }; +} + +void TKesusProxyTestSetup::Start() { + Runtime = MakeHolder<TTestActorRuntime>(); + Runtime->Initialize(MakeEgg()); + SetupLogging(); + + Runtime->UpdateCurrentTime(TInstant::Now()); + + StartKesusProxy(); +} + +void TKesusProxyTestSetup::StartKesusProxy() { + NSchemeCache::TSchemeCacheNavigate::TEntry entry; + entry.Path.push_back("Path"); + entry.Path.push_back("KesusName"); + auto kesusInfo = MakeIntrusive<NSchemeCache::TSchemeCacheNavigate::TKesusInfo>(); + entry.KesusInfo = kesusInfo; + kesusInfo->Kind = NSchemeCache::TSchemeCacheNavigate::KindKesus; + kesusInfo->Description.SetName("KesusName"); + kesusInfo->Description.SetKesusTabletId(KESUS_TABLET_ID); + + auto pipeFactory = MakeHolder<TTestTabletPipeFactory>(this); + PipeFactory = pipeFactory.Get(); + KesusProxyId = Runtime->Register(CreateKesusQuoterProxy(QUOTER_ID, entry, GetEdgeActor(), std::move(pipeFactory))); + Runtime->EnableScheduleForActor(KesusProxyId); +} + +void TKesusProxyTestSetup::SetupLogging() { + Runtime->SetLogPriority(NKikimrServices::KESUS_TABLET, NActors::NLog::PRI_TRACE); + Runtime->SetLogPriority(NKikimrServices::QUOTER_SERVICE, NActors::NLog::PRI_TRACE); + Runtime->SetLogPriority(NKikimrServices::QUOTER_PROXY, NActors::NLog::PRI_TRACE); +} + TActorId TKesusProxyTestSetup::GetEdgeActor() { - if (!EdgeActor) { - EdgeActor = Runtime->AllocateEdgeActor(0); - } - return EdgeActor; -} - + if (!EdgeActor) { + EdgeActor = Runtime->AllocateEdgeActor(0); + } + return EdgeActor; +} + TActorId TKesusProxyTestSetup::GetPipeEdgeActor() { - if (!PipeEdgeActor) { - PipeEdgeActor = Runtime->AllocateEdgeActor(0); - } - return PipeEdgeActor; -} - -void TKesusProxyTestSetup::WaitProxyStart() { - if (PipeFactory->GetPipesCreatedCount() == 0) { - WaitPipesCreated(1); - } -} - -void TKesusProxyTestSetup::SendNotConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe) { - WaitProxyStart(); + if (!PipeEdgeActor) { + PipeEdgeActor = Runtime->AllocateEdgeActor(0); + } + return PipeEdgeActor; +} + +void TKesusProxyTestSetup::WaitProxyStart() { + if (PipeFactory->GetPipesCreatedCount() == 0) { + WaitPipesCreated(1); + } +} + +void TKesusProxyTestSetup::SendNotConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe) { + WaitProxyStart(); Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new TEvTabletPipe::TEvClientConnected(KESUS_TABLET_ID, NKikimrProto::ERROR, pipe->GetSelfID(), TActorId(), true, false)), 0, true); -} - -void TKesusProxyTestSetup::SendConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe) { - WaitProxyStart(); - Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new TEvTabletPipe::TEvClientConnected(KESUS_TABLET_ID, NKikimrProto::OK, pipe->GetSelfID(), pipe->GetSelfID(), true, false)), 0, true); -} - -void TKesusProxyTestSetup::SendDestroyed(TTestTabletPipeFactory::TTestTabletPipe* pipe) { - WaitProxyStart(); - Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new TEvTabletPipe::TEvClientDestroyed(KESUS_TABLET_ID, pipe->GetSelfID(), pipe->GetSelfID())), 0, true); -} - -void TKesusProxyTestSetup::WaitPipesCreated(size_t count) { - UNIT_ASSERT(PipeFactory); - TDispatchOptions pipesCreated; - pipesCreated.CustomFinalCondition = [this, count] { - return PipeFactory->GetPipesCreatedCount() >= count; - }; - Runtime->DispatchEvents(pipesCreated); -} - -void TKesusProxyTestSetup::WaitEvent(ui32 eventType, ui32 requiredCount) { - TDispatchOptions waitOpt; - waitOpt.FinalEvents.push_back( - TDispatchOptions::TFinalEventCondition( - [eventType](IEventHandle& ev) { - return - ev.GetTypeRewrite() == eventType - || ev.Type == eventType; // Events forwarded to pipe - }, - requiredCount)); - GetRuntime().DispatchEvents(waitOpt); -} - -ui32 TKesusProxyTestSetup::WaitEvent(const THashSet<ui32>& eventTypes, ui32 requiredCount) { - TDispatchOptions waitOpt; - ui32 firedEvent = 0; - waitOpt.FinalEvents.push_back( - TDispatchOptions::TFinalEventCondition( - [&eventTypes, &firedEvent](IEventHandle& ev) { - if (IsIn(eventTypes, ev.GetTypeRewrite())) { - firedEvent = ev.GetTypeRewrite(); - return true; - } - if (ev.Type != ev.GetTypeRewrite() && IsIn(eventTypes, ev.Type)) { // Events forwarded to pipe - firedEvent = ev.Type; - return true; - } - return false; - }, - requiredCount)); - GetRuntime().DispatchEvents(waitOpt); - return firedEvent; -} - -void TKesusProxyTestSetup::WaitConnected() { - WaitEvent<TEvTabletPipe::TEvClientConnected>(); -} - -void TKesusProxyTestSetup::SendProxyRequest(const TString& resourceName) { - WaitProxyStart(); - Runtime->Send(new IEventHandle(KesusProxyId, GetEdgeActor(), new TEvQuota::TEvProxyRequest(resourceName)), 0, true); -} - -THolder<TEventHandle<TEvQuota::TEvProxySession>> TKesusProxyTestSetup::ProxyRequest(const TString& resourceName, TEvQuota::TEvProxySession::EResult expectedResult) { - SendProxyRequest(resourceName); - - TAutoPtr<IEventHandle> handle; - TEvQuota::TEvProxySession* ret = Runtime->GrabEdgeEvent<TEvQuota::TEvProxySession>(handle); - UNIT_ASSERT_EQUAL_C(ret->Result, expectedResult, "Actual result: " << static_cast<int>(ret->Result) << ", but expected: " << static_cast<int>(expectedResult)); +} + +void TKesusProxyTestSetup::SendConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe) { + WaitProxyStart(); + Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new TEvTabletPipe::TEvClientConnected(KESUS_TABLET_ID, NKikimrProto::OK, pipe->GetSelfID(), pipe->GetSelfID(), true, false)), 0, true); +} + +void TKesusProxyTestSetup::SendDestroyed(TTestTabletPipeFactory::TTestTabletPipe* pipe) { + WaitProxyStart(); + Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new TEvTabletPipe::TEvClientDestroyed(KESUS_TABLET_ID, pipe->GetSelfID(), pipe->GetSelfID())), 0, true); +} + +void TKesusProxyTestSetup::WaitPipesCreated(size_t count) { + UNIT_ASSERT(PipeFactory); + TDispatchOptions pipesCreated; + pipesCreated.CustomFinalCondition = [this, count] { + return PipeFactory->GetPipesCreatedCount() >= count; + }; + Runtime->DispatchEvents(pipesCreated); +} + +void TKesusProxyTestSetup::WaitEvent(ui32 eventType, ui32 requiredCount) { + TDispatchOptions waitOpt; + waitOpt.FinalEvents.push_back( + TDispatchOptions::TFinalEventCondition( + [eventType](IEventHandle& ev) { + return + ev.GetTypeRewrite() == eventType + || ev.Type == eventType; // Events forwarded to pipe + }, + requiredCount)); + GetRuntime().DispatchEvents(waitOpt); +} + +ui32 TKesusProxyTestSetup::WaitEvent(const THashSet<ui32>& eventTypes, ui32 requiredCount) { + TDispatchOptions waitOpt; + ui32 firedEvent = 0; + waitOpt.FinalEvents.push_back( + TDispatchOptions::TFinalEventCondition( + [&eventTypes, &firedEvent](IEventHandle& ev) { + if (IsIn(eventTypes, ev.GetTypeRewrite())) { + firedEvent = ev.GetTypeRewrite(); + return true; + } + if (ev.Type != ev.GetTypeRewrite() && IsIn(eventTypes, ev.Type)) { // Events forwarded to pipe + firedEvent = ev.Type; + return true; + } + return false; + }, + requiredCount)); + GetRuntime().DispatchEvents(waitOpt); + return firedEvent; +} + +void TKesusProxyTestSetup::WaitConnected() { + WaitEvent<TEvTabletPipe::TEvClientConnected>(); +} + +void TKesusProxyTestSetup::SendProxyRequest(const TString& resourceName) { + WaitProxyStart(); + Runtime->Send(new IEventHandle(KesusProxyId, GetEdgeActor(), new TEvQuota::TEvProxyRequest(resourceName)), 0, true); +} + +THolder<TEventHandle<TEvQuota::TEvProxySession>> TKesusProxyTestSetup::ProxyRequest(const TString& resourceName, TEvQuota::TEvProxySession::EResult expectedResult) { + SendProxyRequest(resourceName); + + TAutoPtr<IEventHandle> handle; + TEvQuota::TEvProxySession* ret = Runtime->GrabEdgeEvent<TEvQuota::TEvProxySession>(handle); + UNIT_ASSERT_EQUAL_C(ret->Result, expectedResult, "Actual result: " << static_cast<int>(ret->Result) << ", but expected: " << static_cast<int>(expectedResult)); return THolder<TEventHandle<TEvQuota::TEvProxySession>>{static_cast<TEventHandle<TEvQuota::TEvProxySession>*>(handle.Release())}; -} - -void TKesusProxyTestSetup::SendProxyStats(TDeque<TEvQuota::TProxyStat> stats) { - WaitProxyStart(); - Runtime->Send(new IEventHandle(KesusProxyId, GetEdgeActor(), new TEvQuota::TEvProxyStats(std::move(stats))), 0, true); -} - -THolder<TEventHandle<TEvQuota::TEvProxyUpdate>> TKesusProxyTestSetup::GetProxyUpdate() { - TAutoPtr<IEventHandle> handle; - Runtime->GrabEdgeEvent<TEvQuota::TEvProxyUpdate>(handle); +} + +void TKesusProxyTestSetup::SendProxyStats(TDeque<TEvQuota::TProxyStat> stats) { + WaitProxyStart(); + Runtime->Send(new IEventHandle(KesusProxyId, GetEdgeActor(), new TEvQuota::TEvProxyStats(std::move(stats))), 0, true); +} + +THolder<TEventHandle<TEvQuota::TEvProxyUpdate>> TKesusProxyTestSetup::GetProxyUpdate() { + TAutoPtr<IEventHandle> handle; + Runtime->GrabEdgeEvent<TEvQuota::TEvProxyUpdate>(handle); return THolder<TEventHandle<TEvQuota::TEvProxyUpdate>>{static_cast<TEventHandle<TEvQuota::TEvProxyUpdate>*>(handle.Release())}; -} - -void TKesusProxyTestSetup::SendCloseSession(const TString& resource, ui64 resourceId) { - WaitProxyStart(); - Runtime->Send(new IEventHandle(KesusProxyId, GetEdgeActor(), new TEvQuota::TEvProxyCloseSession(resource, resourceId)), 0, true); -} - -void TKesusProxyTestSetup::SendResourcesAllocated(TTestTabletPipeFactory::TTestTabletPipe* pipe, ui64 resId, double amount, Ydb::StatusIds::StatusCode status) { - NKikimrKesus::TEvResourcesAllocated ev; - auto* resInfo = ev.AddResourcesInfo(); - resInfo->SetResourceId(resId); - resInfo->SetAmount(amount); - resInfo->MutableStateNotification()->SetStatus(status); - - Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new NKesus::TEvKesus::TEvResourcesAllocated(std::move(ev))), 0, true); -} - -bool TKesusProxyTestSetup::ConsumeResource(ui64 resId, double amount, TDuration tickSize, std::function<void()> afterStat, const size_t maxUpdates) { - UNIT_ASSERT(maxUpdates > 0); - UNIT_ASSERT(resId > 0); - size_t updatesGot = 0; - const TInstant start = Runtime->GetCurrentTime(); - while (updatesGot < maxUpdates) { - ++updatesGot; +} + +void TKesusProxyTestSetup::SendCloseSession(const TString& resource, ui64 resourceId) { + WaitProxyStart(); + Runtime->Send(new IEventHandle(KesusProxyId, GetEdgeActor(), new TEvQuota::TEvProxyCloseSession(resource, resourceId)), 0, true); +} + +void TKesusProxyTestSetup::SendResourcesAllocated(TTestTabletPipeFactory::TTestTabletPipe* pipe, ui64 resId, double amount, Ydb::StatusIds::StatusCode status) { + NKikimrKesus::TEvResourcesAllocated ev; + auto* resInfo = ev.AddResourcesInfo(); + resInfo->SetResourceId(resId); + resInfo->SetAmount(amount); + resInfo->MutableStateNotification()->SetStatus(status); + + Runtime->Send(new IEventHandle(KesusProxyId, pipe->GetSelfID(), new NKesus::TEvKesus::TEvResourcesAllocated(std::move(ev))), 0, true); +} + +bool TKesusProxyTestSetup::ConsumeResource(ui64 resId, double amount, TDuration tickSize, std::function<void()> afterStat, const size_t maxUpdates) { + UNIT_ASSERT(maxUpdates > 0); + UNIT_ASSERT(resId > 0); + size_t updatesGot = 0; + const TInstant start = Runtime->GetCurrentTime(); + while (updatesGot < maxUpdates) { + ++updatesGot; SendProxyStats({TEvQuota::TProxyStat(resId, 1, 0, {}, 1, amount, 0, 0)}); - WaitEvent<TEvQuota::TEvProxyStats>(); // wait event to be processed - - afterStat(); - - for (size_t i = 0; i < 2; ++i) { // The first is for TEvProxyStats answer, the second is for real answer - auto update = GetProxyUpdate(); - UNIT_ASSERT_VALUES_EQUAL(update->Get()->QuoterId, resId); - UNIT_ASSERT_GT_C(update->Get()->Resources.size(), 0, "Resources count: " << update->Get()->Resources.size()); - bool found = false; - for (const auto& res : update->Get()->Resources) { - UNIT_ASSERT(res.ResourceId); - if (res.ResourceId == resId) { - found = true; - UNIT_ASSERT_VALUES_EQUAL(res.ResourceState, TEvQuota::EUpdateState::Normal); - - UNIT_ASSERT_VALUES_EQUAL(res.Update.size(), 1); - const auto& updateTick = res.Update.front(); - UNIT_ASSERT_VALUES_EQUAL(updateTick.Channel, 0); - UNIT_ASSERT_VALUES_EQUAL(updateTick.Policy, TEvQuota::ETickPolicy::Front); - const bool noAmount = updateTick.Rate == 0 && updateTick.Ticks == 0; - if (!noAmount) { - UNIT_ASSERT(res.SustainedRate > 0); - UNIT_ASSERT_VALUES_EQUAL(updateTick.Ticks, 2); - UNIT_ASSERT(updateTick.Rate > 0); - } - - const TDuration timeToCharge = amount / updateTick.Rate * tickSize; - if (Runtime->GetCurrentTime() - start >= timeToCharge || amount <= updateTick.Rate) { - // spend and exit + WaitEvent<TEvQuota::TEvProxyStats>(); // wait event to be processed + + afterStat(); + + for (size_t i = 0; i < 2; ++i) { // The first is for TEvProxyStats answer, the second is for real answer + auto update = GetProxyUpdate(); + UNIT_ASSERT_VALUES_EQUAL(update->Get()->QuoterId, resId); + UNIT_ASSERT_GT_C(update->Get()->Resources.size(), 0, "Resources count: " << update->Get()->Resources.size()); + bool found = false; + for (const auto& res : update->Get()->Resources) { + UNIT_ASSERT(res.ResourceId); + if (res.ResourceId == resId) { + found = true; + UNIT_ASSERT_VALUES_EQUAL(res.ResourceState, TEvQuota::EUpdateState::Normal); + + UNIT_ASSERT_VALUES_EQUAL(res.Update.size(), 1); + const auto& updateTick = res.Update.front(); + UNIT_ASSERT_VALUES_EQUAL(updateTick.Channel, 0); + UNIT_ASSERT_VALUES_EQUAL(updateTick.Policy, TEvQuota::ETickPolicy::Front); + const bool noAmount = updateTick.Rate == 0 && updateTick.Ticks == 0; + if (!noAmount) { + UNIT_ASSERT(res.SustainedRate > 0); + UNIT_ASSERT_VALUES_EQUAL(updateTick.Ticks, 2); + UNIT_ASSERT(updateTick.Rate > 0); + } + + const TDuration timeToCharge = amount / updateTick.Rate * tickSize; + if (Runtime->GetCurrentTime() - start >= timeToCharge || amount <= updateTick.Rate) { + // spend and exit SendProxyStats({TEvQuota::TProxyStat(resId, 1, amount, {}, 0, 0, 0, 0)}); - return true; - } - } - } - UNIT_ASSERT(found); - } - } - return false; -} - -TKesusProxyTestSetup::TTestTabletPipeFactory::~TTestTabletPipeFactory() { - if (NextPipe < PipesExpectedToCreate.size()) { - UNIT_FAIL_NONFATAL("Expected " << PipesExpectedToCreate.size() << " kesus tablet pipe creations, but actually were only " << NextPipe << " ones"); - } - for (size_t i = NextPipe; i < PipesExpectedToCreate.size(); ++i) { - delete PipesExpectedToCreate[i]; - } - PipesExpectedToCreate.clear(); -} - + return true; + } + } + } + UNIT_ASSERT(found); + } + } + return false; +} + +TKesusProxyTestSetup::TTestTabletPipeFactory::~TTestTabletPipeFactory() { + if (NextPipe < PipesExpectedToCreate.size()) { + UNIT_FAIL_NONFATAL("Expected " << PipesExpectedToCreate.size() << " kesus tablet pipe creations, but actually were only " << NextPipe << " ones"); + } + for (size_t i = NextPipe; i < PipesExpectedToCreate.size(); ++i) { + delete PipesExpectedToCreate[i]; + } + PipesExpectedToCreate.clear(); +} + IActor* TKesusProxyTestSetup::TTestTabletPipeFactory::CreateTabletPipe(const NActors::TActorId& owner, ui64 tabletId, const NKikimr::NTabletPipe::TClientConfig&) { - UNIT_ASSERT(owner); - UNIT_ASSERT_VALUES_EQUAL(owner, Parent->KesusProxyId); - UNIT_ASSERT(tabletId); - UNIT_ASSERT_VALUES_EQUAL(tabletId, KESUS_TABLET_ID); - if (NextPipe >= PipesExpectedToCreate.size()) { - ExpectTabletPipeConnection(); - UNIT_ASSERT(NextPipe < PipesExpectedToCreate.size()); - } - return PipesExpectedToCreate[NextPipe++]; -} - -TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe* TKesusProxyTestSetup::TTestTabletPipeFactory::ExpectTabletPipeCreation(bool wait) { - THolder<TTestTabletPipe> pipe = MakeHolder<TTestTabletPipe>(this); - TTestTabletPipe* ret = pipe.Get(); - PipesExpectedToCreate.push_back(pipe.Release()); - if (wait) { - Parent->WaitPipesCreated(PipesExpectedToCreate.size()); - } - return ret; -} - -TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe* TKesusProxyTestSetup::TTestTabletPipeFactory::ExpectTabletPipeConnection() { - TTestTabletPipe* pipe = ExpectTabletPipeCreation(); - EXPECT_CALL(*pipe, OnStart()) - .WillOnce(Invoke(pipe, &TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendConnected)); - return pipe; -} - -TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::TTestTabletPipe(TTestTabletPipeFactory* parent) - : Parent(parent) -{ -} - -STFUNC(TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::StateFunc) { - Y_UNUSED(ctx); - switch (ev->GetTypeRewrite()) { - FFunc(TEvTabletPipe::EvSend, HandleSend); - cFunc(TEvents::TEvPoisonPill::EventType, HandlePoisonPill); - default: - UNIT_ASSERT_C(false, "Unexpected event got in tablet pipe: " << ev->GetTypeRewrite()); - } -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::Bootstrap(const TActorContext& ctx) { - SelfID = ctx.SelfID; - Become(&TTestTabletPipe::StateFunc); - OnStart(); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandlePoisonPill() { - IsDead = true; - OnPoisonPill(); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleSend(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx) { - Y_UNUSED(ctx); - ev->DropRewrite(); - switch (ev->GetTypeRewrite()) { - hFunc(NKesus::TEvKesus::TEvSubscribeOnResources, HandleSubscribeOnResources); - hFunc(NKesus::TEvKesus::TEvUpdateConsumptionState, HandleUpdateConsumptionState); - hFunc(NKesus::TEvKesus::TEvResourcesAllocatedAck, HandleResourcesAllocatedAck); - default: - UNIT_ASSERT_C(false, "Unexpected send event got in tablet pipe: " << ev->GetTypeRewrite()); - } -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleSubscribeOnResources(NKesus::TEvKesus::TEvSubscribeOnResources::TPtr& ev) { + UNIT_ASSERT(owner); + UNIT_ASSERT_VALUES_EQUAL(owner, Parent->KesusProxyId); + UNIT_ASSERT(tabletId); + UNIT_ASSERT_VALUES_EQUAL(tabletId, KESUS_TABLET_ID); + if (NextPipe >= PipesExpectedToCreate.size()) { + ExpectTabletPipeConnection(); + UNIT_ASSERT(NextPipe < PipesExpectedToCreate.size()); + } + return PipesExpectedToCreate[NextPipe++]; +} + +TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe* TKesusProxyTestSetup::TTestTabletPipeFactory::ExpectTabletPipeCreation(bool wait) { + THolder<TTestTabletPipe> pipe = MakeHolder<TTestTabletPipe>(this); + TTestTabletPipe* ret = pipe.Get(); + PipesExpectedToCreate.push_back(pipe.Release()); + if (wait) { + Parent->WaitPipesCreated(PipesExpectedToCreate.size()); + } + return ret; +} + +TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe* TKesusProxyTestSetup::TTestTabletPipeFactory::ExpectTabletPipeConnection() { + TTestTabletPipe* pipe = ExpectTabletPipeCreation(); + EXPECT_CALL(*pipe, OnStart()) + .WillOnce(Invoke(pipe, &TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendConnected)); + return pipe; +} + +TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::TTestTabletPipe(TTestTabletPipeFactory* parent) + : Parent(parent) +{ +} + +STFUNC(TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::StateFunc) { + Y_UNUSED(ctx); + switch (ev->GetTypeRewrite()) { + FFunc(TEvTabletPipe::EvSend, HandleSend); + cFunc(TEvents::TEvPoisonPill::EventType, HandlePoisonPill); + default: + UNIT_ASSERT_C(false, "Unexpected event got in tablet pipe: " << ev->GetTypeRewrite()); + } +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::Bootstrap(const TActorContext& ctx) { + SelfID = ctx.SelfID; + Become(&TTestTabletPipe::StateFunc); + OnStart(); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandlePoisonPill() { + IsDead = true; + OnPoisonPill(); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleSend(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx) { + Y_UNUSED(ctx); + ev->DropRewrite(); + switch (ev->GetTypeRewrite()) { + hFunc(NKesus::TEvKesus::TEvSubscribeOnResources, HandleSubscribeOnResources); + hFunc(NKesus::TEvKesus::TEvUpdateConsumptionState, HandleUpdateConsumptionState); + hFunc(NKesus::TEvKesus::TEvResourcesAllocatedAck, HandleResourcesAllocatedAck); + default: + UNIT_ASSERT_C(false, "Unexpected send event got in tablet pipe: " << ev->GetTypeRewrite()); + } +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleSubscribeOnResources(NKesus::TEvKesus::TEvSubscribeOnResources::TPtr& ev) { const TActorId proxy = ActorIdFromProto(ev->Get()->Record.GetActorID()); - UNIT_ASSERT_VALUES_EQUAL(proxy, Parent->Parent->KesusProxyId); - - OnSubscribeOnResources(ev->Get()->Record, ev->Cookie); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleUpdateConsumptionState(NKesus::TEvKesus::TEvUpdateConsumptionState::TPtr& ev) { - OnUpdateConsumptionState(ev->Get()->Record, ev->Cookie); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleResourcesAllocatedAck(NKesus::TEvKesus::TEvResourcesAllocatedAck::TPtr& ev) { - OnResourcesAllocatedAck(ev->Get()->Record, ev->Cookie); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendNotConnected() { + UNIT_ASSERT_VALUES_EQUAL(proxy, Parent->Parent->KesusProxyId); + + OnSubscribeOnResources(ev->Get()->Record, ev->Cookie); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleUpdateConsumptionState(NKesus::TEvKesus::TEvUpdateConsumptionState::TPtr& ev) { + OnUpdateConsumptionState(ev->Get()->Record, ev->Cookie); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::HandleResourcesAllocatedAck(NKesus::TEvKesus::TEvResourcesAllocatedAck::TPtr& ev) { + OnResourcesAllocatedAck(ev->Get()->Record, ev->Cookie); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendNotConnected() { Send(Parent->Parent->KesusProxyId, new TEvTabletPipe::TEvClientConnected(KESUS_TABLET_ID, NKikimrProto::ERROR, SelfID, TActorId(), true, false)); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendConnected() { - Send(Parent->Parent->KesusProxyId, new TEvTabletPipe::TEvClientConnected(KESUS_TABLET_ID, NKikimrProto::OK, SelfID, SelfID, true, false)); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendDestroyed() { - Send(Parent->Parent->KesusProxyId, new TEvTabletPipe::TEvClientDestroyed(KESUS_TABLET_ID, SelfID, SelfID)); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendSubscribeOnResourceResult(const NKikimrKesus::TEvSubscribeOnResourcesResult& record, ui64 cookie) { - Send(Parent->Parent->KesusProxyId, new NKesus::TEvKesus::TEvSubscribeOnResourcesResult(record), 0, cookie); -} - -void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendUpdateConsumptionStateAck() { - NKikimrKesus::TEvUpdateConsumptionStateAck ack; - Send(Parent->Parent->KesusProxyId, new NKesus::TEvKesus::TEvUpdateConsumptionStateAck(ack)); -} - -THolder<IEventHandle> TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::GetDestroyedEventHandle() { - return MakeHolder<IEventHandle>(Parent->Parent->KesusProxyId, SelfID, new TEvTabletPipe::TEvClientDestroyed(KESUS_TABLET_ID, SelfID, SelfID)); -} - -} // namespace NKikimr +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendConnected() { + Send(Parent->Parent->KesusProxyId, new TEvTabletPipe::TEvClientConnected(KESUS_TABLET_ID, NKikimrProto::OK, SelfID, SelfID, true, false)); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendDestroyed() { + Send(Parent->Parent->KesusProxyId, new TEvTabletPipe::TEvClientDestroyed(KESUS_TABLET_ID, SelfID, SelfID)); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendSubscribeOnResourceResult(const NKikimrKesus::TEvSubscribeOnResourcesResult& record, ui64 cookie) { + Send(Parent->Parent->KesusProxyId, new NKesus::TEvKesus::TEvSubscribeOnResourcesResult(record), 0, cookie); +} + +void TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::SendUpdateConsumptionStateAck() { + NKikimrKesus::TEvUpdateConsumptionStateAck ack; + Send(Parent->Parent->KesusProxyId, new NKesus::TEvKesus::TEvUpdateConsumptionStateAck(ack)); +} + +THolder<IEventHandle> TKesusProxyTestSetup::TTestTabletPipeFactory::TTestTabletPipe::GetDestroyedEventHandle() { + return MakeHolder<IEventHandle>(Parent->Parent->KesusProxyId, SelfID, new TEvTabletPipe::TEvClientDestroyed(KESUS_TABLET_ID, SelfID, SelfID)); +} + +} // namespace NKikimr diff --git a/ydb/core/quoter/ut_helpers.h b/ydb/core/quoter/ut_helpers.h index ea76fdc0ae1..94e7be2235b 100644 --- a/ydb/core/quoter/ut_helpers.h +++ b/ydb/core/quoter/ut_helpers.h @@ -1,245 +1,245 @@ -#pragma once -#include "quoter_service.h" -#include "quoter_service_impl.h" -#include "kesus_quoter_proxy.h" - +#pragma once +#include "quoter_service.h" +#include "quoter_service_impl.h" +#include "kesus_quoter_proxy.h" + #include <ydb/core/kesus/tablet/events.h> #include <ydb/core/testlib/actors/test_runtime.h> #include <ydb/core/testlib/basics/appdata.h> #include <ydb/core/testlib/basics/helpers.h> #include <ydb/core/testlib/tablet_helpers.h> #include <ydb/core/testlib/test_client.h> - + #include <ydb/core/protos/flat_tx_scheme.pb.h> #include <ydb/core/protos/kesus.pb.h> - + #include <library/cpp/testing/gmock_in_unittest/gmock.h> #include <library/cpp/testing/unittest/registar.h> - -#include <util/generic/hash_set.h> - -namespace NKikimr { - -using namespace testing; // gtest - -class TKesusQuoterTestSetup { -public: - static const TString DEFAULT_KESUS_PARENT_PATH; - static const TString DEFAULT_KESUS_NAME; - - static const TString DEFAULT_KESUS_PATH; - static const TString DEFAULT_KESUS_RESOURCE; - - TKesusQuoterTestSetup(bool runServer = true); - - void RunServer(); - - Tests::TServerSettings& GetServerSettings() { - return *ServerSettings; - } - - Tests::TServer& GetServer() { - UNIT_ASSERT(Server); - return *Server; - } - - Tests::TClient& GetClient() { - UNIT_ASSERT(Client); - return *Client; - } - - static NKikimrKesus::THierarchicalDRRResourceConfig MakeDefaultResourceProps(); - - void CreateKesus(const TString& parent, const TString& name, NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK); - void KillKesusTablet(const TString& kesusPath = DEFAULT_KESUS_PATH); - - void CreateKesusResource(const TString& kesusPath = DEFAULT_KESUS_PATH, const TString& resourcePath = DEFAULT_KESUS_RESOURCE, const NKikimrKesus::THierarchicalDRRResourceConfig& cfg = MakeDefaultResourceProps()); - void CreateDefaultKesusAndResource(); - - void DeleteKesusResource(const TString& kesusPath = DEFAULT_KESUS_PATH, const TString& resourcePath = DEFAULT_KESUS_RESOURCE); - - NKikimrKesus::TEvGetQuoterResourceCountersResult GetQuoterCounters(const TString& kesusPath); - - void GetQuota(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation = TEvQuota::EResourceOperator::And, TDuration deadline = TDuration::Max(), TEvQuota::TEvClearance::EResult expectedResult = TEvQuota::TEvClearance::EResult::Success); - void GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount = 1, TEvQuota::TEvClearance::EResult expectedResult = TEvQuota::TEvClearance::EResult::Success); - void GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline, TEvQuota::TEvClearance::EResult expectedResult = TEvQuota::TEvClearance::EResult::Success); - - void SendGetQuotaRequest(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation = TEvQuota::EResourceOperator::And, TDuration deadline = TDuration::Max()); - void SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount = 1); - void SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline); - - THolder<TEvQuota::TEvClearance> WaitGetQuotaAnswer(); - + +#include <util/generic/hash_set.h> + +namespace NKikimr { + +using namespace testing; // gtest + +class TKesusQuoterTestSetup { +public: + static const TString DEFAULT_KESUS_PARENT_PATH; + static const TString DEFAULT_KESUS_NAME; + + static const TString DEFAULT_KESUS_PATH; + static const TString DEFAULT_KESUS_RESOURCE; + + TKesusQuoterTestSetup(bool runServer = true); + + void RunServer(); + + Tests::TServerSettings& GetServerSettings() { + return *ServerSettings; + } + + Tests::TServer& GetServer() { + UNIT_ASSERT(Server); + return *Server; + } + + Tests::TClient& GetClient() { + UNIT_ASSERT(Client); + return *Client; + } + + static NKikimrKesus::THierarchicalDRRResourceConfig MakeDefaultResourceProps(); + + void CreateKesus(const TString& parent, const TString& name, NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK); + void KillKesusTablet(const TString& kesusPath = DEFAULT_KESUS_PATH); + + void CreateKesusResource(const TString& kesusPath = DEFAULT_KESUS_PATH, const TString& resourcePath = DEFAULT_KESUS_RESOURCE, const NKikimrKesus::THierarchicalDRRResourceConfig& cfg = MakeDefaultResourceProps()); + void CreateDefaultKesusAndResource(); + + void DeleteKesusResource(const TString& kesusPath = DEFAULT_KESUS_PATH, const TString& resourcePath = DEFAULT_KESUS_RESOURCE); + + NKikimrKesus::TEvGetQuoterResourceCountersResult GetQuoterCounters(const TString& kesusPath); + + void GetQuota(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation = TEvQuota::EResourceOperator::And, TDuration deadline = TDuration::Max(), TEvQuota::TEvClearance::EResult expectedResult = TEvQuota::TEvClearance::EResult::Success); + void GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount = 1, TEvQuota::TEvClearance::EResult expectedResult = TEvQuota::TEvClearance::EResult::Success); + void GetQuota(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline, TEvQuota::TEvClearance::EResult expectedResult = TEvQuota::TEvClearance::EResult::Success); + + void SendGetQuotaRequest(const std::vector<std::tuple<TString, TString, ui64>>& resources, TEvQuota::EResourceOperator operation = TEvQuota::EResourceOperator::And, TDuration deadline = TDuration::Max()); + void SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount = 1); + void SendGetQuotaRequest(const TString& kesusPath, const TString& resourcePath, ui64 amount, TDuration deadline); + + THolder<TEvQuota::TEvClearance> WaitGetQuotaAnswer(); + TActorId GetEdgeActor(); - -private: - void SetupLogging(); - void RegisterQuoterService(); - - ui64 GetKesusTabletId(const TString& path); - -private: - TPortManager PortManager; - const ui16 MsgBusPort; - Tests::TServerSettings::TPtr ServerSettings; - Tests::TServer::TPtr Server; - THolder<Tests::TClient> Client; + +private: + void SetupLogging(); + void RegisterQuoterService(); + + ui64 GetKesusTabletId(const TString& path); + +private: + TPortManager PortManager; + const ui16 MsgBusPort; + Tests::TServerSettings::TPtr ServerSettings; + Tests::TServer::TPtr Server; + THolder<Tests::TClient> Client; TActorId EdgeActor; -}; - -class TKesusProxyTestSetup { -public: - static constexpr ui64 QUOTER_ID = 42; - static constexpr ui64 KESUS_TABLET_ID = 100500; - - class TTestTabletPipeFactory : public NQuoter::ITabletPipeFactory { - public: - class TTestTabletPipe; - - public: - explicit TTestTabletPipeFactory(TKesusProxyTestSetup* parent) - : Parent(parent) - { - } - - ~TTestTabletPipeFactory(); - +}; + +class TKesusProxyTestSetup { +public: + static constexpr ui64 QUOTER_ID = 42; + static constexpr ui64 KESUS_TABLET_ID = 100500; + + class TTestTabletPipeFactory : public NQuoter::ITabletPipeFactory { + public: + class TTestTabletPipe; + + public: + explicit TTestTabletPipeFactory(TKesusProxyTestSetup* parent) + : Parent(parent) + { + } + + ~TTestTabletPipeFactory(); + IActor* CreateTabletPipe(const NActors::TActorId& owner, ui64 tabletId, const NKikimr::NTabletPipe::TClientConfig& config) override; - - TTestTabletPipe* ExpectTabletPipeCreation(bool wait = false); // Set expectation to creation of a new pipe. - TTestTabletPipe* ExpectTabletPipeConnection(); // Set expectation of creation and connecting to new pipe. If no expectation for a pipe is set this is set by default. - - const std::vector<TTestTabletPipe*>& GetPipes() const { - return PipesExpectedToCreate; - } - - size_t GetPipesCreatedCount() const { - return NextPipe; - } - - class TTestTabletPipe : public TActorBootstrapped<TTestTabletPipe> { - public: - TTestTabletPipe(TTestTabletPipeFactory* parent); - + + TTestTabletPipe* ExpectTabletPipeCreation(bool wait = false); // Set expectation to creation of a new pipe. + TTestTabletPipe* ExpectTabletPipeConnection(); // Set expectation of creation and connecting to new pipe. If no expectation for a pipe is set this is set by default. + + const std::vector<TTestTabletPipe*>& GetPipes() const { + return PipesExpectedToCreate; + } + + size_t GetPipesCreatedCount() const { + return NextPipe; + } + + class TTestTabletPipe : public TActorBootstrapped<TTestTabletPipe> { + public: + TTestTabletPipe(TTestTabletPipeFactory* parent); + MOCK_METHOD(void, OnStart, (), ()); MOCK_METHOD(void, OnPoisonPill, (), ()); MOCK_METHOD(void, OnSubscribeOnResources, (const NKikimrKesus::TEvSubscribeOnResources&, ui64 cookie), ()); MOCK_METHOD(void, OnUpdateConsumptionState, (const NKikimrKesus::TEvUpdateConsumptionState&, ui64 cookie), ()); MOCK_METHOD(void, OnResourcesAllocatedAck, (const NKikimrKesus::TEvResourcesAllocatedAck&, ui64 cookie), ()); - - void SendNotConnected(); - void SendConnected(); - void SendDestroyed(); - - void SendSubscribeOnResourceResult(const NKikimrKesus::TEvSubscribeOnResourcesResult& record, ui64 cookie); - void SendUpdateConsumptionStateAck(); - - THolder<IEventHandle> GetDestroyedEventHandle(); - + + void SendNotConnected(); + void SendConnected(); + void SendDestroyed(); + + void SendSubscribeOnResourceResult(const NKikimrKesus::TEvSubscribeOnResourcesResult& record, ui64 cookie); + void SendUpdateConsumptionStateAck(); + + THolder<IEventHandle> GetDestroyedEventHandle(); + const TActorId& GetSelfID() const { - return SelfID; - } - - public: - void Bootstrap(const TActorContext& ctx); - - private: - void HandlePoisonPill(); - void HandleSend(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx); - - void HandleSubscribeOnResources(NKesus::TEvKesus::TEvSubscribeOnResources::TPtr& ev); - void HandleUpdateConsumptionState(NKesus::TEvKesus::TEvUpdateConsumptionState::TPtr& ev); - void HandleResourcesAllocatedAck(NKesus::TEvKesus::TEvResourcesAllocatedAck::TPtr& ev); - - STFUNC(StateFunc); - - private: - TTestTabletPipeFactory* Parent; + return SelfID; + } + + public: + void Bootstrap(const TActorContext& ctx); + + private: + void HandlePoisonPill(); + void HandleSend(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx); + + void HandleSubscribeOnResources(NKesus::TEvKesus::TEvSubscribeOnResources::TPtr& ev); + void HandleUpdateConsumptionState(NKesus::TEvKesus::TEvUpdateConsumptionState::TPtr& ev); + void HandleResourcesAllocatedAck(NKesus::TEvKesus::TEvResourcesAllocatedAck::TPtr& ev); + + STFUNC(StateFunc); + + private: + TTestTabletPipeFactory* Parent; TActorId SelfID; - bool IsDead = false; - }; - - private: - TKesusProxyTestSetup* Parent; - size_t NextPipe = 0; - std::vector<TTestTabletPipe*> PipesExpectedToCreate; - }; - -public: - TKesusProxyTestSetup(); - - TTestTabletPipeFactory& GetPipeFactory() { - UNIT_ASSERT(PipeFactory); - return *PipeFactory; - } - - TTestActorRuntime& GetRuntime() { - return *Runtime; - } - + bool IsDead = false; + }; + + private: + TKesusProxyTestSetup* Parent; + size_t NextPipe = 0; + std::vector<TTestTabletPipe*> PipesExpectedToCreate; + }; + +public: + TKesusProxyTestSetup(); + + TTestTabletPipeFactory& GetPipeFactory() { + UNIT_ASSERT(PipeFactory); + return *PipeFactory; + } + + TTestActorRuntime& GetRuntime() { + return *Runtime; + } + const TActorId& GetKesusProxyId() const { - return KesusProxyId; - } - - void WaitProxyStart(); - + return KesusProxyId; + } + + void WaitProxyStart(); + TActorId GetEdgeActor(); TActorId GetPipeEdgeActor(); - - void SendProxyRequest(const TString& resourceName); - THolder<TEventHandle<TEvQuota::TEvProxySession>> ProxyRequest(const TString& resourceName, TEvQuota::TEvProxySession::EResult = TEvQuota::TEvProxySession::Success); - - void SendProxyStats(TDeque<TEvQuota::TProxyStat> stats); - THolder<TEventHandle<TEvQuota::TEvProxyUpdate>> GetProxyUpdate(); - - void SendCloseSession(const TString& resource, ui64 resourceId); - - void WaitEvent(ui32 eventType, ui32 requiredCount = 1); - - ui32 WaitEvent(const THashSet<ui32>& eventTypes, ui32 requiredCount = 1); // returns fired event type - - template <class TEvent> - void WaitEvent(ui32 requiredCount = 1) { - WaitEvent(TEvent::EventType, requiredCount); - } - - void WaitPipesCreated(size_t count); - void WaitConnected(); - void SendNotConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe); - void SendConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe); - void SendDestroyed(TTestTabletPipeFactory::TTestTabletPipe* pipe); - void SendResourcesAllocated(TTestTabletPipeFactory::TTestTabletPipe* pipe, ui64 resId, double amount, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); - - // Update/Stat exchange - Y_WARN_UNUSED_RESULT bool ConsumeResource(ui64 resId, double amount, TDuration tickSize, std::function<void()> afterStat, size_t maxUpdates = 15); - - // Consume with Kesus allocation - Y_WARN_UNUSED_RESULT bool ConsumeResourceAllocateByKesus(TTestTabletPipeFactory::TTestTabletPipe* pipe, ui64 resId, double amount, TDuration tickSize, size_t maxUpdates = 15) { - return ConsumeResource(resId, amount, tickSize, [=] { - Runtime->AdvanceCurrentTime(TDuration::MilliSeconds(100)); - SendResourcesAllocated(pipe, resId, 10); - }, maxUpdates); - } - - // Consume with offline allocation - Y_WARN_UNUSED_RESULT bool ConsumeResourceAdvanceTime(ui64 resId, double amount, TDuration tickSize, size_t maxUpdates = 15) { - return ConsumeResource(resId, amount, tickSize, [this] { - Runtime->AdvanceCurrentTime(TDuration::MilliSeconds(100)); - }, maxUpdates); - } - -private: - void Start(); - - void StartKesusProxy(); - void SetupLogging(); - -private: - THolder<TTestActorRuntime> Runtime; + + void SendProxyRequest(const TString& resourceName); + THolder<TEventHandle<TEvQuota::TEvProxySession>> ProxyRequest(const TString& resourceName, TEvQuota::TEvProxySession::EResult = TEvQuota::TEvProxySession::Success); + + void SendProxyStats(TDeque<TEvQuota::TProxyStat> stats); + THolder<TEventHandle<TEvQuota::TEvProxyUpdate>> GetProxyUpdate(); + + void SendCloseSession(const TString& resource, ui64 resourceId); + + void WaitEvent(ui32 eventType, ui32 requiredCount = 1); + + ui32 WaitEvent(const THashSet<ui32>& eventTypes, ui32 requiredCount = 1); // returns fired event type + + template <class TEvent> + void WaitEvent(ui32 requiredCount = 1) { + WaitEvent(TEvent::EventType, requiredCount); + } + + void WaitPipesCreated(size_t count); + void WaitConnected(); + void SendNotConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe); + void SendConnected(TTestTabletPipeFactory::TTestTabletPipe* pipe); + void SendDestroyed(TTestTabletPipeFactory::TTestTabletPipe* pipe); + void SendResourcesAllocated(TTestTabletPipeFactory::TTestTabletPipe* pipe, ui64 resId, double amount, Ydb::StatusIds::StatusCode status = Ydb::StatusIds::SUCCESS); + + // Update/Stat exchange + Y_WARN_UNUSED_RESULT bool ConsumeResource(ui64 resId, double amount, TDuration tickSize, std::function<void()> afterStat, size_t maxUpdates = 15); + + // Consume with Kesus allocation + Y_WARN_UNUSED_RESULT bool ConsumeResourceAllocateByKesus(TTestTabletPipeFactory::TTestTabletPipe* pipe, ui64 resId, double amount, TDuration tickSize, size_t maxUpdates = 15) { + return ConsumeResource(resId, amount, tickSize, [=] { + Runtime->AdvanceCurrentTime(TDuration::MilliSeconds(100)); + SendResourcesAllocated(pipe, resId, 10); + }, maxUpdates); + } + + // Consume with offline allocation + Y_WARN_UNUSED_RESULT bool ConsumeResourceAdvanceTime(ui64 resId, double amount, TDuration tickSize, size_t maxUpdates = 15) { + return ConsumeResource(resId, amount, tickSize, [this] { + Runtime->AdvanceCurrentTime(TDuration::MilliSeconds(100)); + }, maxUpdates); + } + +private: + void Start(); + + void StartKesusProxy(); + void SetupLogging(); + +private: + THolder<TTestActorRuntime> Runtime; TActorId KesusProxyId; - TTestTabletPipeFactory* PipeFactory = nullptr; + TTestTabletPipeFactory* PipeFactory = nullptr; TActorId EdgeActor; TActorId PipeEdgeActor; -}; - -} // namespace NKikimr +}; + +} // namespace NKikimr diff --git a/ydb/core/quoter/ya.make b/ydb/core/quoter/ya.make index 937aae960db..a2d526b7363 100644 --- a/ydb/core/quoter/ya.make +++ b/ydb/core/quoter/ya.make @@ -6,10 +6,10 @@ OWNER( ) SRCS( - debug_info.cpp + debug_info.cpp defs.h kesus_quoter_proxy.cpp - probes.cpp + probes.cpp quoter_service.cpp quoter_service.h quoter_service_impl.h diff --git a/ydb/core/testlib/basics/helpers.h b/ydb/core/testlib/basics/helpers.h index 7a70063f7a3..6232e6ee85d 100644 --- a/ydb/core/testlib/basics/helpers.h +++ b/ydb/core/testlib/basics/helpers.h @@ -50,7 +50,7 @@ namespace NFake { void SetupGRpcProxyStatus(TTestActorRuntime& runtime, ui32 nodeIndex); void SetupNodeTabletMonitor(TTestActorRuntime& runtime, ui32 nodeIndex); void SetupSchemeCache(TTestActorRuntime& runtime, ui32 nodeIndex, const TString& root); - void SetupQuoterService(TTestActorRuntime& runtime, ui32 nodeIndex); + void SetupQuoterService(TTestActorRuntime& runtime, ui32 nodeIndex); void SetupSysViewService(TTestActorRuntime& runtime, ui32 nodeIndex); // StateStorage, NodeWarden, TabletResolver, ResourceBroker, SharedPageCache diff --git a/ydb/core/testlib/basics/services.cpp b/ydb/core/testlib/basics/services.cpp index b4585ad96c6..2e0b1b2fc24 100644 --- a/ydb/core/testlib/basics/services.cpp +++ b/ydb/core/testlib/basics/services.cpp @@ -229,13 +229,13 @@ namespace NPDisk { } } - void SetupQuoterService(TTestActorRuntime& runtime, ui32 nodeIndex) - { - runtime.AddLocalService(MakeQuoterServiceID(), - TActorSetupCmd(CreateQuoterService(), TMailboxType::HTSwap, 0), - nodeIndex); - } - + void SetupQuoterService(TTestActorRuntime& runtime, ui32 nodeIndex) + { + runtime.AddLocalService(MakeQuoterServiceID(), + TActorSetupCmd(CreateQuoterService(), TMailboxType::HTSwap, 0), + nodeIndex); + } + void SetupBasicServices(TTestActorRuntime& runtime, TAppPrepare& app, bool mock, NFake::INode* factory, NFake::TStorage storage, NFake::TCaches caches) { @@ -267,7 +267,7 @@ namespace NPDisk { SetupResourceBroker(runtime, nodeIndex); SetupSharedPageCache(runtime, nodeIndex, caches); SetupBlobCache(runtime, nodeIndex); - SetupQuoterService(runtime, nodeIndex); + SetupQuoterService(runtime, nodeIndex); if (factory) factory->Birth(nodeIndex); diff --git a/ydb/core/testlib/fake_scheme_shard.h b/ydb/core/testlib/fake_scheme_shard.h index 8c388d49dd0..105b1065c5f 100644 --- a/ydb/core/testlib/fake_scheme_shard.h +++ b/ydb/core/testlib/fake_scheme_shard.h @@ -1,4 +1,4 @@ -#pragma once +#pragma once #include <ydb/core/base/tablet.h> #include <ydb/core/engine/minikql/flat_local_tx_factory.h> #include <ydb/core/tablet_flat/tablet_flat_executed.h> @@ -6,117 +6,117 @@ #include <ydb/core/testlib/basics/helpers.h> #include <ydb/core/tx/schemeshard/schemeshard.h> #include <ydb/library/aclib/aclib.h> - + #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/events.h> #include <library/cpp/testing/unittest/registar.h> - -#include <util/generic/ptr.h> -#include <util/stream/output.h> - -namespace NKikimr { - -struct TFakeSchemeShardState : public TThrRefBase { - typedef TIntrusivePtr<TFakeSchemeShardState> TPtr; - - TFakeSchemeShardState() - {} - NACLib::TSecurityObject ACL; -}; - -// The functionality of this class is not full. -// So anyone is welcome to improve it. -class TFakeSchemeShard : public TActor<TFakeSchemeShard>, public NTabletFlatExecutor::TTabletExecutedFlat { -public: - using TState = TFakeSchemeShardState; - + +#include <util/generic/ptr.h> +#include <util/stream/output.h> + +namespace NKikimr { + +struct TFakeSchemeShardState : public TThrRefBase { + typedef TIntrusivePtr<TFakeSchemeShardState> TPtr; + + TFakeSchemeShardState() + {} + NACLib::TSecurityObject ACL; +}; + +// The functionality of this class is not full. +// So anyone is welcome to improve it. +class TFakeSchemeShard : public TActor<TFakeSchemeShard>, public NTabletFlatExecutor::TTabletExecutedFlat { +public: + using TState = TFakeSchemeShardState; + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::FLAT_SCHEMESHARD_ACTOR; } TFakeSchemeShard(const TActorId &tablet, TTabletStorageInfo *info, TState::TPtr state) - : TActor<TFakeSchemeShard>(&TFakeSchemeShard::StateInit) - , NTabletFlatExecutor::TTabletExecutedFlat(info, tablet, new NMiniKQL::TMiniKQLFactory) - , State(state) - { - } - - void OnActivateExecutor(const TActorContext &ctx) final { - Become(&TFakeSchemeShard::StateWork); - - while (!InitialEventsQueue.empty()) { - TAutoPtr<IEventHandle> &ev = InitialEventsQueue.front(); - ctx.ExecutorThread.Send(ev.Release()); - InitialEventsQueue.pop_front(); - } - } - - void OnDetach(const TActorContext &ctx) override { - Die(ctx); - } - - void OnTabletDead(TEvTablet::TEvTabletDead::TPtr &ev, const TActorContext &ctx) override { - Y_UNUSED(ev); - Die(ctx); - } - - void Enqueue(STFUNC_SIG) override { - Y_UNUSED(ctx); - InitialEventsQueue.push_back(ev); - } - - void StateInit(STFUNC_SIG) { - StateInitImpl(ev, ctx); - } - - void StateWork(STFUNC_SIG) { - switch (ev->GetTypeRewrite()) { - HFunc(TEvTablet::TEvTabletDead, HandleTabletDead); + : TActor<TFakeSchemeShard>(&TFakeSchemeShard::StateInit) + , NTabletFlatExecutor::TTabletExecutedFlat(info, tablet, new NMiniKQL::TMiniKQLFactory) + , State(state) + { + } + + void OnActivateExecutor(const TActorContext &ctx) final { + Become(&TFakeSchemeShard::StateWork); + + while (!InitialEventsQueue.empty()) { + TAutoPtr<IEventHandle> &ev = InitialEventsQueue.front(); + ctx.ExecutorThread.Send(ev.Release()); + InitialEventsQueue.pop_front(); + } + } + + void OnDetach(const TActorContext &ctx) override { + Die(ctx); + } + + void OnTabletDead(TEvTablet::TEvTabletDead::TPtr &ev, const TActorContext &ctx) override { + Y_UNUSED(ev); + Die(ctx); + } + + void Enqueue(STFUNC_SIG) override { + Y_UNUSED(ctx); + InitialEventsQueue.push_back(ev); + } + + void StateInit(STFUNC_SIG) { + StateInitImpl(ev, ctx); + } + + void StateWork(STFUNC_SIG) { + switch (ev->GetTypeRewrite()) { + HFunc(TEvTablet::TEvTabletDead, HandleTabletDead); HFunc(NSchemeShard::TEvSchemeShard::TEvDescribeScheme, Handle); - HFunc(TEvents::TEvPoisonPill, Handle); - } - } - - void BrokenState(STFUNC_SIG) { - switch (ev->GetTypeRewrite()) { - HFunc(TEvTablet::TEvTabletDead, HandleTabletDead); - } - } - + HFunc(TEvents::TEvPoisonPill, Handle); + } + } + + void BrokenState(STFUNC_SIG) { + switch (ev->GetTypeRewrite()) { + HFunc(TEvTablet::TEvTabletDead, HandleTabletDead); + } + } + void Handle(NSchemeShard::TEvSchemeShard::TEvDescribeScheme::TPtr &ev, const TActorContext &ctx) { - const auto& record = ev->Get()->Record; - UNIT_ASSERT(record.GetPathId() == 1); + const auto& record = ev->Get()->Record; + UNIT_ASSERT(record.GetPathId() == 1); TAutoPtr<NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResultBuilder> response = new NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResultBuilder(); - TString out; + TString out; Y_PROTOBUF_SUPPRESS_NODISCARD State->ACL.GetACL().SerializeToString(&out); - response->Record.MutablePathDescription()->MutableSelf()->SetACL(out); + response->Record.MutablePathDescription()->MutableSelf()->SetACL(out); response->Record.MutablePathDescription()->MutableSelf()->SetEffectiveACL(out); - //Fill response from State - ctx.Send(ev->Sender, response.Release()); - } - - void Handle(TEvents::TEvPoisonPill::TPtr &ev, const TActorContext &ctx) { - Y_UNUSED(ev); - Become(&TThis::BrokenState); - ctx.Send(Tablet(), new TEvents::TEvPoisonPill); - } - -private: - TState::TPtr State; - TDeque<TAutoPtr<IEventHandle>> InitialEventsQueue; -}; - -void BootFakeSchemeShard(TTestActorRuntime& runtime, ui64 tabletId, TFakeSchemeShardState::TPtr state) { + //Fill response from State + ctx.Send(ev->Sender, response.Release()); + } + + void Handle(TEvents::TEvPoisonPill::TPtr &ev, const TActorContext &ctx) { + Y_UNUSED(ev); + Become(&TThis::BrokenState); + ctx.Send(Tablet(), new TEvents::TEvPoisonPill); + } + +private: + TState::TPtr State; + TDeque<TAutoPtr<IEventHandle>> InitialEventsQueue; +}; + +void BootFakeSchemeShard(TTestActorRuntime& runtime, ui64 tabletId, TFakeSchemeShardState::TPtr state) { CreateTestBootstrapper(runtime, CreateTestTabletInfo(tabletId, TTabletTypes::SchemeShard), [=](const TActorId & tablet, TTabletStorageInfo* info) { - return new TFakeSchemeShard(tablet, info, state); - }); - - { - TDispatchOptions options; - options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot, 1)); - runtime.DispatchEvents(options); - } -} - -} // namespace NKikimr + return new TFakeSchemeShard(tablet, info, state); + }); + + { + TDispatchOptions options; + options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot, 1)); + runtime.DispatchEvents(options); + } +} + +} // namespace NKikimr diff --git a/ydb/core/testlib/mock_pq_metacache.h b/ydb/core/testlib/mock_pq_metacache.h index 1280f877792..a567ad57feb 100644 --- a/ydb/core/testlib/mock_pq_metacache.h +++ b/ydb/core/testlib/mock_pq_metacache.h @@ -1,46 +1,46 @@ -#pragma once +#pragma once #include <ydb/core/client/server/msgbus_server_pq_metacache.h> - + #include <library/cpp/actors/core/actor.h> #include <library/cpp/testing/gmock_in_unittest/gmock.h> #include <library/cpp/testing/unittest/registar.h> - -#include <util/generic/ptr.h> - -namespace NKikimr { - + +#include <util/generic/ptr.h> + +namespace NKikimr { + using TEvPqMetaCache = NMsgBusProxy::NPqMetaCacheV2::TEvPqNewMetaCache; using TPQGroupInfoPtr = TIntrusiveConstPtr<NSchemeCache::TSchemeCacheNavigate::TPQGroupInfo>; using ESchemeStatus = NSchemeCache::TSchemeCacheNavigate::EStatus; -// The functionality of this class is not full. -// So anyone is welcome to improve it. -class TMockPQMetaCache: public TActor<TMockPQMetaCache> { -public: - TMockPQMetaCache() - : TActor<TMockPQMetaCache>(&TMockPQMetaCache::StateFunc) - { - } - - STFUNC(StateFunc) { - switch (ev->GetTypeRewrite()) { +// The functionality of this class is not full. +// So anyone is welcome to improve it. +class TMockPQMetaCache: public TActor<TMockPQMetaCache> { +public: + TMockPQMetaCache() + : TActor<TMockPQMetaCache>(&TMockPQMetaCache::StateFunc) + { + } + + STFUNC(StateFunc) { + switch (ev->GetTypeRewrite()) { HFunc(TEvPqMetaCache::TEvDescribeTopicsRequest, HandleDescribeTopics); HFunc(TEvPqMetaCache::TEvDescribeAllTopicsRequest, HandleDescribeAllTopics); - default: - UNIT_FAIL_NONFATAL("Unexpected event to PQ metacache: " << ev->GetTypeRewrite()); - } - } - + default: + UNIT_FAIL_NONFATAL("Unexpected event to PQ metacache: " << ev->GetTypeRewrite()); + } + } + MOCK_METHOD(void, HandleDescribeTopics, (TEvPqMetaCache::TEvDescribeTopicsRequest::TPtr& ev, const TActorContext& ctx), ()); MOCK_METHOD(void, HandleDescribeAllTopics, (TEvPqMetaCache::TEvDescribeAllTopicsRequest::TPtr& ev, const TActorContext& ctx), ()); //MOCK_METHOD4(HandleDescribeAllTopics, void(const TString& topic, ui64 balancerTabletId, NMsgBusProxy::TEvPqMetaCache::TEvGetBalancerDescribe::TPtr& ev, const TActorContext& ctx)); - - // - // Helpers - // - + + // + // Helpers + // + void SetDescribeCustomTopicsAnswer(const NSchemeCache::TSchemeCacheNavigate::TResultSet& resultSet = {}) { // ToDo - !!! - using namespace testing; + using namespace testing; // auto handle = [success, description](NMsgBusProxy::TEvPqMetaCache::TEvGetNode::TPtr& ev, const TActorContext& ctx) { // auto& req = ev->Get()->Request; // req->Description = description; @@ -54,16 +54,16 @@ public: result->ResultSet = resultSet; auto* response = new TEvPqMetaCache::TEvDescribeTopicsResponse(ev->Get()->Topics, result); ctx.Send(ev->Sender, response); - }; - + }; + EXPECT_CALL(*this, HandleDescribeTopics(_, _)) - .WillOnce(Invoke(handle)); - } - + .WillOnce(Invoke(handle)); + } + void SetAllTopicsAnswer( bool success = true, const NSchemeCache::TSchemeCacheNavigate::TResultSet& resultSet = {} ) { - using namespace testing; + using namespace testing; auto handle = [=](TEvPqMetaCache::TEvDescribeAllTopicsRequest::TPtr& ev, const TActorContext& ctx) { auto* response = new TEvPqMetaCache::TEvDescribeAllTopicsResponse("/Root/PQ/"); response->Success = success; @@ -71,11 +71,11 @@ public: result->ResultSet = resultSet; response->Result.Reset(result); ctx.Send(ev->Sender, std::move(response)); - }; - + }; + EXPECT_CALL(*this, HandleDescribeAllTopics(_, _)) - .WillOnce(Invoke(handle)); - } -}; - -} // namespace NKikimr + .WillOnce(Invoke(handle)); + } +}; + +} // namespace NKikimr diff --git a/ydb/core/testlib/service_mocks/access_service_mock.h b/ydb/core/testlib/service_mocks/access_service_mock.h index f18ac92f0a8..457d6766e4f 100644 --- a/ydb/core/testlib/service_mocks/access_service_mock.h +++ b/ydb/core/testlib/service_mocks/access_service_mock.h @@ -3,33 +3,33 @@ #include <yandex/cloud/priv/servicecontrol/v1/access_service.grpc.pb.h> #include <library/cpp/testing/unittest/registar.h> - -#include <iterator> - + +#include <iterator> + class TAccessServiceMock : public yandex::cloud::priv::servicecontrol::v1::AccessService::Service { public: - template <class TResonseProto> - struct TResponse { - TResonseProto Response; - grpc::Status Status = grpc::Status::OK; - bool RequireRequestId = false; - }; - - THashMap<TString, TResponse<yandex::cloud::priv::servicecontrol::v1::AuthenticateResponse>> AuthenticateData; - THashMap<TString, TResponse<yandex::cloud::priv::servicecontrol::v1::AuthorizeResponse>> AuthorizeData; - - template <class TResonseProto> - void CheckRequestId(grpc::ServerContext* ctx, const TResponse<TResonseProto>& resp, const TString& token) { - if (resp.RequireRequestId) { - auto [reqIdBegin, reqIdEnd] = ctx->client_metadata().equal_range("x-request-id"); - UNIT_ASSERT_C(reqIdBegin != reqIdEnd, "RequestId is expected. Token: " << token); - UNIT_ASSERT_VALUES_EQUAL_C(std::distance(reqIdBegin, reqIdEnd), 1, "Only one RequestId is expected. Token: " << token); - UNIT_ASSERT_C(!reqIdBegin->second.empty(), "RequestId is expected to be not empty. Token: " << token); - } - } + template <class TResonseProto> + struct TResponse { + TResonseProto Response; + grpc::Status Status = grpc::Status::OK; + bool RequireRequestId = false; + }; + THashMap<TString, TResponse<yandex::cloud::priv::servicecontrol::v1::AuthenticateResponse>> AuthenticateData; + THashMap<TString, TResponse<yandex::cloud::priv::servicecontrol::v1::AuthorizeResponse>> AuthorizeData; + + template <class TResonseProto> + void CheckRequestId(grpc::ServerContext* ctx, const TResponse<TResonseProto>& resp, const TString& token) { + if (resp.RequireRequestId) { + auto [reqIdBegin, reqIdEnd] = ctx->client_metadata().equal_range("x-request-id"); + UNIT_ASSERT_C(reqIdBegin != reqIdEnd, "RequestId is expected. Token: " << token); + UNIT_ASSERT_VALUES_EQUAL_C(std::distance(reqIdBegin, reqIdEnd), 1, "Only one RequestId is expected. Token: " << token); + UNIT_ASSERT_C(!reqIdBegin->second.empty(), "RequestId is expected to be not empty. Token: " << token); + } + } + virtual grpc::Status Authenticate( - grpc::ServerContext* ctx, + grpc::ServerContext* ctx, const yandex::cloud::priv::servicecontrol::v1::AuthenticateRequest* request, yandex::cloud::priv::servicecontrol::v1::AuthenticateResponse* response) override { @@ -41,24 +41,24 @@ public: } auto it = AuthenticateData.find(key); if (it != AuthenticateData.end()) { - response->CopyFrom(it->second.Response); + response->CopyFrom(it->second.Response); CheckRequestId(ctx, it->second, key); - return it->second.Status; + return it->second.Status; } else { return grpc::Status(grpc::StatusCode::PERMISSION_DENIED, "Permission Denied"); } } virtual grpc::Status Authorize( - grpc::ServerContext* ctx, + grpc::ServerContext* ctx, const yandex::cloud::priv::servicecontrol::v1::AuthorizeRequest* request, yandex::cloud::priv::servicecontrol::v1::AuthorizeResponse* response) override { - const TString& token = request->subject().user_account().id() + "-" + request->permission() + "-" + request->resource_path(0).id(); + const TString& token = request->subject().user_account().id() + "-" + request->permission() + "-" + request->resource_path(0).id(); auto it = AuthorizeData.find(token); if (it != AuthorizeData.end()) { - response->CopyFrom(it->second.Response); - CheckRequestId(ctx, it->second, token); - return it->second.Status; + response->CopyFrom(it->second.Response); + CheckRequestId(ctx, it->second, token); + return it->second.Status; } else { return grpc::Status(grpc::StatusCode::PERMISSION_DENIED, "Permission Denied"); } diff --git a/ydb/core/testlib/test_client.cpp b/ydb/core/testlib/test_client.cpp index d4907c26f1e..1bdf46fdbcd 100644 --- a/ydb/core/testlib/test_client.cpp +++ b/ydb/core/testlib/test_client.cpp @@ -209,17 +209,17 @@ namespace Tests { CreateBootstrapTablets(); SetupStorage(); - for (ui32 nodeIdx = 0; nodeIdx < StaticNodes() + DynamicNodes(); ++nodeIdx) { - SetupDomainLocalService(nodeIdx); - Runtime->GetAppData(nodeIdx).AuthConfig.MergeFrom(Settings->AuthConfig); - Runtime->GetAppData(nodeIdx).PQConfig.MergeFrom(Settings->PQConfig); - Runtime->GetAppData(nodeIdx).PQClusterDiscoveryConfig.MergeFrom(Settings->PQClusterDiscoveryConfig); - Runtime->GetAppData(nodeIdx).NetClassifierConfig.MergeFrom(Settings->NetClassifierConfig); - Runtime->GetAppData(nodeIdx).StreamingConfig.MergeFrom(Settings->AppConfig.GetGRpcConfig().GetStreamingConfig()); - Runtime->GetAppData(nodeIdx).EnforceUserTokenRequirement = Settings->AppConfig.GetDomainsConfig().GetSecurityConfig().GetEnforceUserTokenRequirement(); - SetupConfigurators(nodeIdx); + for (ui32 nodeIdx = 0; nodeIdx < StaticNodes() + DynamicNodes(); ++nodeIdx) { + SetupDomainLocalService(nodeIdx); + Runtime->GetAppData(nodeIdx).AuthConfig.MergeFrom(Settings->AuthConfig); + Runtime->GetAppData(nodeIdx).PQConfig.MergeFrom(Settings->PQConfig); + Runtime->GetAppData(nodeIdx).PQClusterDiscoveryConfig.MergeFrom(Settings->PQClusterDiscoveryConfig); + Runtime->GetAppData(nodeIdx).NetClassifierConfig.MergeFrom(Settings->NetClassifierConfig); + Runtime->GetAppData(nodeIdx).StreamingConfig.MergeFrom(Settings->AppConfig.GetGRpcConfig().GetStreamingConfig()); + Runtime->GetAppData(nodeIdx).EnforceUserTokenRequirement = Settings->AppConfig.GetDomainsConfig().GetSecurityConfig().GetEnforceUserTokenRequirement(); + SetupConfigurators(nodeIdx); SetupProxies(nodeIdx); - } + } } void TServer::SetupMessageBus(ui16 port, const TString &tracePath) { @@ -314,7 +314,7 @@ namespace Tests { GRpcServer->AddService(new NGRpcService::TGRpcYdbExperimentalService(system, counters, grpcRequestProxyId)); GRpcServer->AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxyId)); GRpcServer->AddService(new NGRpcService::TGRpcYdbS3InternalService(system, counters, grpcRequestProxyId)); - GRpcServer->AddService(new NQuoter::TRateLimiterGRpcService(system, counters, grpcRequestProxyId)); + GRpcServer->AddService(new NQuoter::TRateLimiterGRpcService(system, counters, grpcRequestProxyId)); GRpcServer->AddService(new NGRpcService::TGRpcYdbLongTxService(system, counters, grpcRequestProxyId)); GRpcServer->AddService(new NGRpcService::TGRpcDataStreamsService(system, counters, grpcRequestProxyId)); if (Settings->EnableYq) { @@ -585,11 +585,11 @@ namespace Tests { void TServer::SetupProxies(ui32 nodeIdx) { Runtime->SetTxAllocatorTabletIds({ChangeStateStorage(TxAllocator, Settings->Domain)}); - { + { IActor* ticketParser = Settings->CreateTicketParser(Settings->AuthConfig); TActorId ticketParserId = Runtime->Register(ticketParser, nodeIdx); Runtime->RegisterService(MakeTicketParserID(), ticketParserId, nodeIdx); - } + } { IActor* healthCheck = NHealthCheck::CreateHealthCheckService(); @@ -609,20 +609,20 @@ namespace Tests { TVector<NKikimrKqp::TKqpSetting>(Settings->KqpSettings), nullptr); TActorId kqpProxyServiceId = Runtime->Register(kqpProxyService, nodeIdx); - Runtime->RegisterService(NKqp::MakeKqpProxyID(Runtime->GetNodeId(nodeIdx)), kqpProxyServiceId, nodeIdx); + Runtime->RegisterService(NKqp::MakeKqpProxyID(Runtime->GetNodeId(nodeIdx)), kqpProxyServiceId, nodeIdx); } - { - IActor* txProxy = CreateTxProxy(Runtime->GetTxAllocatorTabletIds()); + { + IActor* txProxy = CreateTxProxy(Runtime->GetTxAllocatorTabletIds()); TActorId txProxyId = Runtime->Register(txProxy, nodeIdx); - Runtime->RegisterService(MakeTxProxyID(), txProxyId, nodeIdx); - } + Runtime->RegisterService(MakeTxProxyID(), txProxyId, nodeIdx); + } - { - IActor* compileService = CreateMiniKQLCompileService(100000); + { + IActor* compileService = CreateMiniKQLCompileService(100000); TActorId compileServiceId = Runtime->Register(compileService, nodeIdx, Runtime->GetAppData(nodeIdx).SystemPoolId, TMailboxType::Revolving, 0); - Runtime->RegisterService(MakeMiniKQLCompileServiceID(), compileServiceId, nodeIdx); - } + Runtime->RegisterService(MakeMiniKQLCompileServiceID(), compileServiceId, nodeIdx); + } { IActor* longTxService = NLongTxService::CreateLongTxService(); @@ -636,23 +636,23 @@ namespace Tests { Runtime->RegisterService(NSequenceProxy::MakeSequenceProxyServiceID(), sequenceProxyId, nodeIdx); } - if (BusServer && nodeIdx == 0) { // MsgBus and GRPC are run now only on first node - { - IActor* proxy = BusServer->CreateProxy(); + if (BusServer && nodeIdx == 0) { // MsgBus and GRPC are run now only on first node + { + IActor* proxy = BusServer->CreateProxy(); TActorId proxyId = Runtime->Register(proxy, nodeIdx, Runtime->GetAppData(nodeIdx).SystemPoolId, TMailboxType::Revolving, 0); - Runtime->RegisterService(NMsgBusProxy::CreateMsgBusProxyId(), proxyId, nodeIdx); - } + Runtime->RegisterService(NMsgBusProxy::CreateMsgBusProxyId(), proxyId, nodeIdx); + } - { - IActor* traceService = BusServer->CreateMessageBusTraceService(); - if (traceService) { + { + IActor* traceService = BusServer->CreateMessageBusTraceService(); + if (traceService) { TActorId traceServiceId = Runtime->Register(traceService, nodeIdx, Runtime->GetAppData(nodeIdx).IOPoolId, TMailboxType::Simple, 0); - Runtime->RegisterService(NMessageBusTracer::MakeMessageBusTraceServiceID(), traceServiceId, nodeIdx); - } + Runtime->RegisterService(NMessageBusTracer::MakeMessageBusTraceServiceID(), traceServiceId, nodeIdx); + } } } - { + { auto driverConfig = NYdb::TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << Settings->GrpcPort); if (!Driver) { Driver.Reset(new NYdb::TDriver(driverConfig)); @@ -669,9 +669,9 @@ namespace Tests { TActorId pqMetaCacheId = Runtime->Register(pqMetaCache, nodeIdx); Runtime->RegisterService(NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), pqMetaCacheId, nodeIdx); } - } + } - { + { if (Settings->EnableMetering) { THolder<TFileLogBackend> fileBackend; try { @@ -688,29 +688,29 @@ namespace Tests { } { - IActor* kesusService = NKesus::CreateKesusProxyService(); + IActor* kesusService = NKesus::CreateKesusProxyService(); TActorId kesusServiceId = Runtime->Register(kesusService, nodeIdx); - Runtime->RegisterService(NKesus::MakeKesusProxyServiceId(), kesusServiceId, nodeIdx); - } + Runtime->RegisterService(NKesus::MakeKesusProxyServiceId(), kesusServiceId, nodeIdx); + } - { - IActor* pqClusterTracker = NPQ::NClusterTracker::CreateClusterTracker(); + { + IActor* pqClusterTracker = NPQ::NClusterTracker::CreateClusterTracker(); TActorId pqClusterTrackerId = Runtime->Register(pqClusterTracker, nodeIdx); - Runtime->RegisterService(NPQ::NClusterTracker::MakeClusterTrackerID(), pqClusterTrackerId, nodeIdx); - } + Runtime->RegisterService(NPQ::NClusterTracker::MakeClusterTrackerID(), pqClusterTrackerId, nodeIdx); + } - { - IActor* netClassifier = NNetClassifier::CreateNetClassifier(); + { + IActor* netClassifier = NNetClassifier::CreateNetClassifier(); TActorId netClassifierId = Runtime->Register(netClassifier, nodeIdx); - Runtime->RegisterService(NNetClassifier::MakeNetClassifierID(), netClassifierId, nodeIdx); - } + Runtime->RegisterService(NNetClassifier::MakeNetClassifierID(), netClassifierId, nodeIdx); + } if (Settings->EnableYq) { NYq::NConfig::TConfig protoConfig; protoConfig.SetEnabled(true); - protoConfig.MutableCommon()->SetIdsPrefix("id"); - + protoConfig.MutableCommon()->SetIdsPrefix("id"); + TString endpoint = TStringBuilder() << "localhost:" << Settings->GrpcPort; TString prefix = "Root/yq"; auto port = Runtime->GetPortManager().GetPort(); @@ -803,20 +803,20 @@ namespace Tests { const auto ydbCredFactory = NKikimr::CreateYdbCredentialsProviderFactory; auto counters = MakeIntrusive<NMonitoring::TDynamicCounters>(); auto yqSharedResources = NYq::CreateYqSharedResources(protoConfig, ydbCredFactory, counters); - NYq::Init( + NYq::Init( protoConfig, Runtime->GetNodeId(nodeIdx), actorRegistrator, &appData, "TestTenant", - nullptr, // MakeIntrusive<NPq::NConfigurationManager::TConnections>(), + nullptr, // MakeIntrusive<NPq::NConfigurationManager::TConnections>(), yqSharedResources, NKikimr::NFolderService::CreateMockFolderServiceActor, NYq::CreateMockYqAuditServiceActor, ydbCredFactory, /*IcPort = */0 ); - NYq::InitTest(Runtime.Get(), port, Settings->GrpcPort, yqSharedResources); + NYq::InitTest(Runtime.Get(), port, Settings->GrpcPort, yqSharedResources); } } @@ -1425,19 +1425,19 @@ namespace Tests { return (NMsgBusProxy::EResponseStatus)response.GetStatus(); } - NMsgBusProxy::EResponseStatus TClient::DeleteKesus(const TString& parent, const TString& name) { - auto* request = new NMsgBusProxy::TBusSchemeOperation(); - auto* tx = request->Record.MutableTransaction()->MutableModifyScheme(); + NMsgBusProxy::EResponseStatus TClient::DeleteKesus(const TString& parent, const TString& name) { + auto* request = new NMsgBusProxy::TBusSchemeOperation(); + auto* tx = request->Record.MutableTransaction()->MutableModifyScheme(); tx->SetOperationType(NKikimrSchemeOp::ESchemeOpDropKesus); - tx->SetWorkingDir(parent); - tx->MutableDrop()->SetName(name); - TAutoPtr<NBus::TBusMessage> reply; - NBus::EMessageStatus msgStatus = SendAndWaitCompletion(request, reply); - UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK); - const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record; - return (NMsgBusProxy::EResponseStatus)response.GetStatus(); - } - + tx->SetWorkingDir(parent); + tx->MutableDrop()->SetName(name); + TAutoPtr<NBus::TBusMessage> reply; + NBus::EMessageStatus msgStatus = SendAndWaitCompletion(request, reply); + UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK); + const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record; + return (NMsgBusProxy::EResponseStatus)response.GetStatus(); + } + NMsgBusProxy::EResponseStatus TClient::CreateOlapStore(const TString& parent, const TString& scheme) { NKikimrSchemeOp::TColumnStoreDescription store; bool parseOk = ::google::protobuf::TextFormat::ParseFromString(scheme, &store); @@ -2223,45 +2223,45 @@ namespace Tests { res.Swap(&response); } - ui64 TClient::GetKesusTabletId(const TString& kesusPath) { - auto describeResult = Ls(kesusPath); - UNIT_ASSERT_C(describeResult->Record.GetPathDescription().HasKesus(), describeResult->Record); - return describeResult->Record.GetPathDescription().GetKesus().GetKesusTabletId(); - } - - Ydb::StatusIds::StatusCode TClient::AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const TMaybe<double> maxUnitsPerSecond) { - NKikimrKesus::THierarchicalDRRResourceConfig cfg; - if (maxUnitsPerSecond) { - cfg.SetMaxUnitsPerSecond(*maxUnitsPerSecond); - } - return AddQuoterResource(runtime, kesusPath, resourcePath, cfg); - } - - Ydb::StatusIds::StatusCode TClient::AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& props) { - THolder<NKesus::TEvKesus::TEvAddQuoterResource> request = MakeHolder<NKesus::TEvKesus::TEvAddQuoterResource>(); - request->Record.MutableResource()->SetResourcePath(resourcePath); - *request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig() = props; - + ui64 TClient::GetKesusTabletId(const TString& kesusPath) { + auto describeResult = Ls(kesusPath); + UNIT_ASSERT_C(describeResult->Record.GetPathDescription().HasKesus(), describeResult->Record); + return describeResult->Record.GetPathDescription().GetKesus().GetKesusTabletId(); + } + + Ydb::StatusIds::StatusCode TClient::AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const TMaybe<double> maxUnitsPerSecond) { + NKikimrKesus::THierarchicalDRRResourceConfig cfg; + if (maxUnitsPerSecond) { + cfg.SetMaxUnitsPerSecond(*maxUnitsPerSecond); + } + return AddQuoterResource(runtime, kesusPath, resourcePath, cfg); + } + + Ydb::StatusIds::StatusCode TClient::AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& props) { + THolder<NKesus::TEvKesus::TEvAddQuoterResource> request = MakeHolder<NKesus::TEvKesus::TEvAddQuoterResource>(); + request->Record.MutableResource()->SetResourcePath(resourcePath); + *request->Record.MutableResource()->MutableHierarhicalDRRResourceConfig() = props; + TActorId sender = runtime->AllocateEdgeActor(0); ForwardToTablet(*runtime, GetKesusTabletId(kesusPath), sender, request.Release(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); - auto& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; - return record.GetError().GetStatus(); - } - - THolder<NKesus::TEvKesus::TEvGetConfigResult> TClient::GetKesusConfig(TTestActorRuntime* runtime, const TString& kesusPath) { - THolder<NKesus::TEvKesus::TEvGetConfig> request = MakeHolder<NKesus::TEvKesus::TEvGetConfig>(); - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvAddQuoterResourceResult>(handle); + auto& record = handle->Get<NKesus::TEvKesus::TEvAddQuoterResourceResult>()->Record; + return record.GetError().GetStatus(); + } + + THolder<NKesus::TEvKesus::TEvGetConfigResult> TClient::GetKesusConfig(TTestActorRuntime* runtime, const TString& kesusPath) { + THolder<NKesus::TEvKesus::TEvGetConfig> request = MakeHolder<NKesus::TEvKesus::TEvGetConfig>(); + TActorId sender = runtime->AllocateEdgeActor(0); ForwardToTablet(*runtime, GetKesusTabletId(kesusPath), sender, request.Release(), 0); - - TAutoPtr<IEventHandle> handle; - runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvGetConfigResult>(handle); - return handle->Release<NKesus::TEvKesus::TEvGetConfigResult>(); - } - + + TAutoPtr<IEventHandle> handle; + runtime->GrabEdgeEvent<NKesus::TEvKesus::TEvGetConfigResult>(handle); + return handle->Release<NKesus::TEvKesus::TEvGetConfigResult>(); + } + bool IsServerRedirected() { return !!GetEnv(ServerRedirectEnvVar); } diff --git a/ydb/core/testlib/test_client.h b/ydb/core/testlib/test_client.h index 2064752ab45..65b21cd8bee 100644 --- a/ydb/core/testlib/test_client.h +++ b/ydb/core/testlib/test_client.h @@ -219,19 +219,19 @@ namespace Tests { void EnableGRpc(const NGrpc::TServerOptions& options); void EnableGRpc(ui16 port); - + void SetupDefaultProfiles(); - + TIntrusivePtr<NMonitoring::TDynamicCounters> GetGRpcServerRootCounters() const { return GRpcServerRootCounters; } - void ShutdownGRpc() { - if (GRpcServer) { - GRpcServer->Stop(); - GRpcServer = nullptr; - } - } + void ShutdownGRpc() { + if (GRpcServer) { + GRpcServer->Stop(); + GRpcServer = nullptr; + } + } void StartDummyTablets(); TTestActorRuntime* GetRuntime() const; const TServerSettings& GetSettings() const; @@ -367,7 +367,7 @@ namespace Tests { NMsgBusProxy::EResponseStatus SplitTable(const TString& table, ui64 datashardId, ui64 border, TDuration timeout = TDuration::Seconds(5000)); NMsgBusProxy::EResponseStatus CopyTable(const TString& parent, const TString& name, const TString& src); NMsgBusProxy::EResponseStatus CreateKesus(const TString& parent, const TString& name); - NMsgBusProxy::EResponseStatus DeleteKesus(const TString& parent, const TString& name); + NMsgBusProxy::EResponseStatus DeleteKesus(const TString& parent, const TString& name); NMsgBusProxy::EResponseStatus ConsistentCopyTables(TVector<std::pair<TString, TString>> desc, TDuration timeout = TDuration::Seconds(5000)); NMsgBusProxy::EResponseStatus DeleteTable(const TString& parent, const TString& name); NMsgBusProxy::EResponseStatus AlterTable(const TString& parent, const NKikimrSchemeOp::TTableDescription& update); @@ -433,12 +433,12 @@ namespace Tests { static void RefreshPathCache(TTestActorRuntime* runtime, const TString& path, ui32 nodeIdx = 0); - ui64 GetKesusTabletId(const TString& kesusPath); - Ydb::StatusIds::StatusCode AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& props); - Ydb::StatusIds::StatusCode AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const TMaybe<double> maxUnitsPerSecond = Nothing()); - - THolder<NKesus::TEvKesus::TEvGetConfigResult> GetKesusConfig(TTestActorRuntime* runtime, const TString& kesusPath); - + ui64 GetKesusTabletId(const TString& kesusPath); + Ydb::StatusIds::StatusCode AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const NKikimrKesus::THierarchicalDRRResourceConfig& props); + Ydb::StatusIds::StatusCode AddQuoterResource(TTestActorRuntime* runtime, const TString& kesusPath, const TString& resourcePath, const TMaybe<double> maxUnitsPerSecond = Nothing()); + + THolder<NKesus::TEvKesus::TEvGetConfigResult> GetKesusConfig(TTestActorRuntime* runtime, const TString& kesusPath); + protected: template <class TMsg> TString PrintResult(NBus::TBusMessage* msg, size_t maxSz = 1000) { diff --git a/ydb/core/testlib/test_pq_client.h b/ydb/core/testlib/test_pq_client.h index f0599232508..2d0058afe25 100644 --- a/ydb/core/testlib/test_pq_client.h +++ b/ydb/core/testlib/test_pq_client.h @@ -1,6 +1,6 @@ -#pragma once -#include "test_client.h" - +#pragma once +#include "test_client.h" + #include <ydb/core/client/flat_ut_client.h> #include <ydb/core/protos/flat_tx_scheme.pb.h> #include <ydb/core/mind/address_classification/net_classifier.h> @@ -10,22 +10,22 @@ #include <ydb/public/sdk/cpp/client/ydb_persqueue_public/persqueue.h> #include <ydb/library/aclib/aclib.h> #include <ydb/library/persqueue/topic_parser/topic_parser.h> - + #include <library/cpp/tvmauth/unittest.h> #include <library/cpp/testing/unittest/registar.h> - + #include <util/string/printf.h> #include <util/system/tempfile.h> -namespace NKikimr { -namespace NPersQueueTests { - +namespace NKikimr { +namespace NPersQueueTests { + using namespace NNetClassifier; using namespace NKikimr::Tests; inline Tests::TServerSettings PQSettings(ui16 port, ui32 nodesCount = 2, bool roundrobin = true, const TString& yql_timeout = "10", const THolder<TTempFileHandle>& netDataFile = nullptr) { - NKikimrPQ::TPQConfig pqConfig; + NKikimrPQ::TPQConfig pqConfig; NKikimrProto::TAuthConfig authConfig; authConfig.SetUseBlackBox(false); authConfig.SetUseAccessService(false); @@ -44,8 +44,8 @@ inline Tests::TServerSettings PQSettings(ui16 port, ui32 nodesCount = 2, bool ro authConfig.MutableUserRegistryConfig()->SetQuery(query); - pqConfig.SetEnabled(true); - pqConfig.SetMaxReadCookies(10); + pqConfig.SetEnabled(true); + pqConfig.SetMaxReadCookies(10); for (int i = 0; i < 12; ++i) { auto profile = pqConfig.AddChannelProfiles(); Y_UNUSED(profile); @@ -68,14 +68,14 @@ inline Tests::TServerSettings PQSettings(ui16 port, ui32 nodesCount = 2, bool ro if (netDataFile) settings.NetClassifierConfig.SetNetDataFilePath(netDataFile->Name()); - return settings; -} - -const TString TopicPrefix = "/Root/PQ/"; + return settings; +} + +const TString TopicPrefix = "/Root/PQ/"; const static TString DEFAULT_SRC_IDS_PATH = "/Root/PQ/SourceIdMeta2"; + - -struct TRequestCreatePQ { +struct TRequestCreatePQ { TRequestCreatePQ( const TString& topic, ui32 numParts, @@ -106,17 +106,17 @@ struct TRequestCreatePQ { , SourceIdLifetime(sourceIdLifetime) {} - TString Topic; - ui32 NumParts; - ui32 CacheSize; + TString Topic; + ui32 NumParts; + ui32 CacheSize; ui64 LifetimeS; ui32 LowWatermark; - + ui64 WriteSpeed; - - TString User; + + TString User; ui64 ReadSpeed; - + TVector<TString> ReadRules; TVector<TString> Important; @@ -125,21 +125,21 @@ struct TRequestCreatePQ { ui64 SourceIdMaxCount; ui64 SourceIdLifetime; - THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest()->MutableCmdCreateTopic(); - req->SetTopic(Topic); - req->SetNumPartitions(NumParts); - auto config = req->MutableConfig(); - if (CacheSize) - config->SetCacheSize(CacheSize); - config->MutablePartitionConfig()->SetLifetimeSeconds(LifetimeS); + THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest()->MutableCmdCreateTopic(); + req->SetTopic(Topic); + req->SetNumPartitions(NumParts); + auto config = req->MutableConfig(); + if (CacheSize) + config->SetCacheSize(CacheSize); + config->MutablePartitionConfig()->SetLifetimeSeconds(LifetimeS); config->MutablePartitionConfig()->SetSourceIdLifetimeSeconds(SourceIdLifetime); config->MutablePartitionConfig()->SetSourceIdMaxCounts(SourceIdMaxCount); - config->MutablePartitionConfig()->SetLowWatermark(LowWatermark); + config->MutablePartitionConfig()->SetLowWatermark(LowWatermark); config->SetLocalDC(true); - + auto codec = config->MutableCodecs(); codec->AddIds(0); codec->AddCodecs("raw"); @@ -152,8 +152,8 @@ struct TRequestCreatePQ { config->MutablePartitionConfig()->AddImportantClientId(i); } - config->MutablePartitionConfig()->SetWriteSpeedInBytesPerSecond(WriteSpeed); - config->MutablePartitionConfig()->SetBurstSize(WriteSpeed); + config->MutablePartitionConfig()->SetWriteSpeedInBytesPerSecond(WriteSpeed); + config->MutablePartitionConfig()->SetBurstSize(WriteSpeed); for (auto& rr : ReadRules) { config->AddReadRules(rr); config->AddReadFromTimestampsMs(0); @@ -164,23 +164,23 @@ struct TRequestCreatePQ { if (!ReadRules.empty()) { config->SetRequireAuthRead(true); } - if (!User.empty()) { - auto rq = config->MutablePartitionConfig()->AddReadQuota(); - rq->SetSpeedInBytesPerSecond(ReadSpeed); - rq->SetBurstSize(ReadSpeed); - rq->SetClientId(User); - } - + if (!User.empty()) { + auto rq = config->MutablePartitionConfig()->AddReadQuota(); + rq->SetSpeedInBytesPerSecond(ReadSpeed); + rq->SetBurstSize(ReadSpeed); + rq->SetClientId(User); + } + if (MirrorFrom) { auto mirrorFromConfig = config->MutablePartitionConfig()->MutableMirrorFrom(); mirrorFromConfig->CopyFrom(MirrorFrom.value()); } - return request; - } -}; - - -struct TRequestAlterPQ { + return request; + } +}; + + +struct TRequestAlterPQ { TRequestAlterPQ( const TString& topic, ui32 numParts, @@ -197,68 +197,68 @@ struct TRequestAlterPQ { , MirrorFrom(mirrorFrom) {} - TString Topic; - ui32 NumParts; - ui64 CacheSize; - ui64 LifetimeS; + TString Topic; + ui32 NumParts; + ui64 CacheSize; + ui64 LifetimeS; bool FillPartitionConfig; std::optional<NKikimrPQ::TMirrorPartitionConfig> MirrorFrom; - - THolder<NMsgBusProxy::TBusPersQueue> GetRequest() { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest()->MutableCmdChangeTopic(); - req->SetTopic(Topic); - req->SetNumPartitions(NumParts); - if (CacheSize) { - auto config = req->MutableConfig(); - config->SetCacheSize(CacheSize); - } + + THolder<NMsgBusProxy::TBusPersQueue> GetRequest() { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest()->MutableCmdChangeTopic(); + req->SetTopic(Topic); + req->SetNumPartitions(NumParts); + if (CacheSize) { + auto config = req->MutableConfig(); + config->SetCacheSize(CacheSize); + } if (FillPartitionConfig) { req->MutableConfig()->MutablePartitionConfig()->SetLifetimeSeconds(LifetimeS); if (MirrorFrom) { req->MutableConfig()->MutablePartitionConfig()->MutableMirrorFrom()->CopyFrom(MirrorFrom.value()); } } - return request; - } -}; - -struct TRequestDeletePQ { + return request; + } +}; + +struct TRequestDeletePQ { TRequestDeletePQ(const TString& topic) : Topic(topic) {} - TString Topic; - - THolder<NMsgBusProxy::TBusPersQueue> GetRequest() { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest()->MutableCmdDeleteTopic(); - req->SetTopic(Topic); - return request; - } -}; - -struct TRequestGetOwnership { + TString Topic; + + THolder<NMsgBusProxy::TBusPersQueue> GetRequest() { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest()->MutableCmdDeleteTopic(); + req->SetTopic(Topic); + return request; + } +}; + +struct TRequestGetOwnership { TRequestGetOwnership(const TString& topic, ui32 partition) : Topic(topic) , Partition(partition) {} - TString Topic; - ui32 Partition; - - THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutablePartitionRequest(); - req->SetTopic(Topic); - req->SetPartition(Partition); - req->MutableCmdGetOwnership(); - return request; - } -}; - - -struct TRequestWritePQ { + TString Topic; + ui32 Partition; + + THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutablePartitionRequest(); + req->SetTopic(Topic); + req->SetPartition(Partition); + req->MutableCmdGetOwnership(); + return request; + } +}; + + +struct TRequestWritePQ { TRequestWritePQ(const TString& topic, ui32 partition, const TString& sourceId, ui64 seqNo) : Topic(topic) , Partition(partition) @@ -266,27 +266,27 @@ struct TRequestWritePQ { , SeqNo(seqNo) {} - TString Topic; - ui32 Partition; - TString SourceId; - ui64 SeqNo; - - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TString& data, const TString& cookie) const { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutablePartitionRequest(); - req->SetTopic(Topic); - req->SetPartition(Partition); - req->SetMessageNo(0); - req->SetOwnerCookie(cookie); - auto write = req->AddCmdWrite(); - write->SetSourceId(SourceId); - write->SetSeqNo(SeqNo); - write->SetData(data); - return request; - } -}; - -struct TRequestReadPQ { + TString Topic; + ui32 Partition; + TString SourceId; + ui64 SeqNo; + + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TString& data, const TString& cookie) const { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutablePartitionRequest(); + req->SetTopic(Topic); + req->SetPartition(Partition); + req->SetMessageNo(0); + req->SetOwnerCookie(cookie); + auto write = req->AddCmdWrite(); + write->SetSourceId(SourceId); + write->SetSeqNo(SeqNo); + write->SetData(data); + return request; + } +}; + +struct TRequestReadPQ { TRequestReadPQ( const TString& topic, ui32 partition, @@ -301,26 +301,26 @@ struct TRequestReadPQ { , User(user) {} - TString Topic; - ui32 Partition; - ui64 StartOffset; - ui32 Count; - TString User; - - THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutablePartitionRequest(); - req->SetTopic(Topic); - req->SetPartition(Partition); - auto read = req->MutableCmdRead(); - read->SetOffset(StartOffset); - read->SetCount(Count); - read->SetClientId(User); - return request; - } -}; - -struct TRequestSetClientOffsetPQ { + TString Topic; + ui32 Partition; + ui64 StartOffset; + ui32 Count; + TString User; + + THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutablePartitionRequest(); + req->SetTopic(Topic); + req->SetPartition(Partition); + auto read = req->MutableCmdRead(); + read->SetOffset(StartOffset); + read->SetCount(Count); + read->SetClientId(User); + return request; + } +}; + +struct TRequestSetClientOffsetPQ { TRequestSetClientOffsetPQ( const TString& topic, ui32 partition, @@ -333,129 +333,129 @@ struct TRequestSetClientOffsetPQ { , User(user) {} - TString Topic; - ui32 Partition; - ui64 Offset; - TString User; - - THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutablePartitionRequest(); - req->SetTopic(Topic); - req->SetPartition(Partition); - auto cmd = req->MutableCmdSetClientOffset(); - cmd->SetOffset(Offset); - cmd->SetClientId(User); - return request; - } -}; - -struct FetchPartInfo { - TString Topic; - i32 Partition; - ui64 Offset; - ui32 MaxBytes; -}; - -struct TFetchRequestPQ { - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<FetchPartInfo>& fetchParts, ui32 maxBytes, ui32 waitMs) { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableFetchRequest(); - req->SetWaitMs(waitMs); - req->SetTotalMaxBytes(maxBytes); - req->SetClientId("user"); - for (const auto& t : fetchParts) { - auto part = req->AddPartition(); - part->SetTopic(t.Topic); - part->SetPartition(t.Partition); - part->SetOffset(t.Offset); - part->SetMaxBytes(t.MaxBytes); - } - return request; - } -}; - -struct TRequestGetPartOffsets { - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts) { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest(); - auto partOff = req->MutableCmdGetPartitionOffsets(); - partOff->SetClientId("user"); - for (const auto& t : topicsAndParts) { - auto req = partOff->AddTopicRequest(); - req->SetTopic(t.first); - for (const auto& p : t.second) { - req->AddPartition(p); - } - } - return request; - } -}; - -struct TRequestGetClientInfo { - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<TString>& topics, const TString& user) { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest(); - auto partOff = req->MutableCmdGetReadSessionsInfo(); - partOff->SetClientId(user); - for (const auto& t : topics) { - partOff->AddTopic(t); - } - return request; - } -}; - - -struct TRequestGetPartStatus { - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts) { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest(); - auto partOff = req->MutableCmdGetPartitionStatus(); - partOff->SetClientId("user1"); - for (const auto& t : topicsAndParts) { - auto req = partOff->AddTopicRequest(); - req->SetTopic(t.first); - for (const auto& p : t.second) { - req->AddPartition(p); - } - } - return request; - } -}; - -struct TRequestGetPartLocations { - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts) { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest(); - auto partOff = req->MutableCmdGetPartitionLocations(); - for (const auto& t : topicsAndParts) { - auto req = partOff->AddTopicRequest(); - req->SetTopic(t.first); - for (const auto& p : t.second) { - req->AddPartition(p); - } - } - return request; - } -}; - -struct TRequestDescribePQ { - THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<TString>& topics) const { - THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest(); - auto partOff = req->MutableCmdGetTopicMetadata(); - for (const auto& t : topics) { - partOff->AddTopic(t); - } - return request; - } -}; - -enum class ETransport { - MsgBus, - GRpc -}; - + TString Topic; + ui32 Partition; + ui64 Offset; + TString User; + + THolder<NMsgBusProxy::TBusPersQueue> GetRequest() const { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutablePartitionRequest(); + req->SetTopic(Topic); + req->SetPartition(Partition); + auto cmd = req->MutableCmdSetClientOffset(); + cmd->SetOffset(Offset); + cmd->SetClientId(User); + return request; + } +}; + +struct FetchPartInfo { + TString Topic; + i32 Partition; + ui64 Offset; + ui32 MaxBytes; +}; + +struct TFetchRequestPQ { + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<FetchPartInfo>& fetchParts, ui32 maxBytes, ui32 waitMs) { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableFetchRequest(); + req->SetWaitMs(waitMs); + req->SetTotalMaxBytes(maxBytes); + req->SetClientId("user"); + for (const auto& t : fetchParts) { + auto part = req->AddPartition(); + part->SetTopic(t.Topic); + part->SetPartition(t.Partition); + part->SetOffset(t.Offset); + part->SetMaxBytes(t.MaxBytes); + } + return request; + } +}; + +struct TRequestGetPartOffsets { + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts) { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest(); + auto partOff = req->MutableCmdGetPartitionOffsets(); + partOff->SetClientId("user"); + for (const auto& t : topicsAndParts) { + auto req = partOff->AddTopicRequest(); + req->SetTopic(t.first); + for (const auto& p : t.second) { + req->AddPartition(p); + } + } + return request; + } +}; + +struct TRequestGetClientInfo { + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<TString>& topics, const TString& user) { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest(); + auto partOff = req->MutableCmdGetReadSessionsInfo(); + partOff->SetClientId(user); + for (const auto& t : topics) { + partOff->AddTopic(t); + } + return request; + } +}; + + +struct TRequestGetPartStatus { + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts) { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest(); + auto partOff = req->MutableCmdGetPartitionStatus(); + partOff->SetClientId("user1"); + for (const auto& t : topicsAndParts) { + auto req = partOff->AddTopicRequest(); + req->SetTopic(t.first); + for (const auto& p : t.second) { + req->AddPartition(p); + } + } + return request; + } +}; + +struct TRequestGetPartLocations { + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts) { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest(); + auto partOff = req->MutableCmdGetPartitionLocations(); + for (const auto& t : topicsAndParts) { + auto req = partOff->AddTopicRequest(); + req->SetTopic(t.first); + for (const auto& p : t.second) { + req->AddPartition(p); + } + } + return request; + } +}; + +struct TRequestDescribePQ { + THolder<NMsgBusProxy::TBusPersQueue> GetRequest(const TVector<TString>& topics) const { + THolder<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest(); + auto partOff = req->MutableCmdGetTopicMetadata(); + for (const auto& t : topics) { + partOff->AddTopic(t); + } + return request; + } +}; + +enum class ETransport { + MsgBus, + GRpc +}; + struct TPQTestClusterInfo { TString Balancer; bool Enabled; @@ -471,17 +471,17 @@ static THashMap<TString, TPQTestClusterInfo> CLUSTERS_LIST_ONE_DC = { {"dc1", {"localhost", true}} }; -class TFlatMsgBusPQClient : public NFlatTests::TFlatMsgBusClient { -private: - static constexpr ui32 FlatDomain = 0; - static constexpr bool FlatSupportsRedirect = true; - const Tests::TServerSettings Settings; - const ui16 GRpcPort; - NClient::TKikimr Kikimr; +class TFlatMsgBusPQClient : public NFlatTests::TFlatMsgBusClient { +private: + static constexpr ui32 FlatDomain = 0; + static constexpr bool FlatSupportsRedirect = true; + const Tests::TServerSettings Settings; + const ui16 GRpcPort; + NClient::TKikimr Kikimr; THolder<NYdb::TDriver> Driver; ui64 TopicsVersion = 0; bool UseConfigTables = true; - + void RunYqlSchemeQuery(TString query) { auto tableClient = NYdb::NTable::TTableClient(*Driver); auto result = tableClient.RetryOperationSync([&](NYdb::NTable::TSession session) { @@ -514,14 +514,14 @@ private: } -public: +public: TFlatMsgBusPQClient( const Tests::TServerSettings& settings, ui16 grpc, TMaybe<TString> databaseName = Nothing() ) - : TFlatMsgBusClient(settings) - , Settings(settings) - , GRpcPort(grpc) - , Kikimr(GetClientConfig()) + : TFlatMsgBusClient(settings) + , Settings(settings) + , GRpcPort(grpc) + , Kikimr(GetClientConfig()) { auto driverConfig = NYdb::TDriverConfig() .SetEndpoint(TStringBuilder() << "localhost:" << GRpcPort) @@ -531,7 +531,7 @@ public: } Driver.Reset(MakeHolder<NYdb::TDriver>(driverConfig)); } - + ~TFlatMsgBusPQClient() { Driver->Stop(true); } @@ -553,11 +553,11 @@ public: InitSourceIds({}); } } - void InitRoot() { - InitRootScheme(); - MkDir("/Root", "PQ"); - } - + void InitRoot() { + InitRootScheme(); + MkDir("/Root", "PQ"); + } + NYdb::TDriver* GetDriver() { return Driver.Get(); } @@ -567,15 +567,15 @@ public: CreateTable(fsPath.Dirname(), "Name: \"" + fsPath.Basename() + "\"" "Columns { Name: \"Hash\" Type: \"Uint32\"}" - "Columns { Name: \"SourceId\" Type: \"Utf8\"}" - "Columns { Name: \"Topic\" Type: \"Utf8\"}" - "Columns { Name: \"Partition\" Type: \"Uint32\"}" - "Columns { Name: \"CreateTime\" Type: \"Uint64\"}" - "Columns { Name: \"AccessTime\" Type: \"Uint64\"}" - "KeyColumnNames: [\"Hash\", \"SourceId\", \"Topic\"]" - ); - } - + "Columns { Name: \"SourceId\" Type: \"Utf8\"}" + "Columns { Name: \"Topic\" Type: \"Utf8\"}" + "Columns { Name: \"Partition\" Type: \"Uint32\"}" + "Columns { Name: \"CreateTime\" Type: \"Uint64\"}" + "Columns { Name: \"AccessTime\" Type: \"Uint64\"}" + "KeyColumnNames: [\"Hash\", \"SourceId\", \"Topic\"]" + ); + } + void InsertSourceId(ui32 hash, TString sourceId, ui64 accessTime, const TString& path = "/Root/PQ/SourceIdMeta2") { TString query = "DECLARE $Hash AS Uint32; " @@ -607,24 +607,24 @@ public: } void InitDCs(THashMap<TString, TPQTestClusterInfo> clusters = DEFAULT_CLUSTERS_LIST, const TString& localCluster = TString()) { - MkDir("/Root/PQ", "Config"); + MkDir("/Root/PQ", "Config"); MkDir("/Root/PQ/Config", "V2"); RunYqlSchemeQuery(R"___( CREATE TABLE [/Root/PQ/Config/V2/Cluster] ( - name Utf8, + name Utf8, balancer Utf8, - local Bool, - enabled Bool, + local Bool, + enabled Bool, weight Uint64, - PRIMARY KEY (name) - ); + PRIMARY KEY (name) + ); CREATE TABLE [/Root/PQ/Config/V2/Topics] ( path Utf8, dc Utf8, PRIMARY KEY (path, dc) ); )___"); - + RunYqlSchemeQuery(R"___( CREATE TABLE [/Root/PQ/Config/V2/Versions] ( name Utf8, @@ -651,8 +651,8 @@ public: UPSERT INTO [/Root/PQ/Config/V2/Versions] (name, version) VALUES ("Cluster", 1); UPSERT INTO [/Root/PQ/Config/V2/Versions] (name, version) VALUES ("Topics", 0); )___"); - } - + } + void UpdateDcEnabled(const TString& name, bool enabled) { TStringBuilder query; query << "UPDATE [/Root/PQ/Config/V2/Cluster] SET enabled = " << (enabled ? "true" : "false") @@ -717,70 +717,70 @@ public: RunYqlDataQuery(query); } - void DisableDC() { + void DisableDC() { UpdateDC("dc1", true, false); - } - - void RestartSchemeshard(TTestActorRuntime* runtime) { + } + + void RestartSchemeshard(TTestActorRuntime* runtime) { TActorId sender = runtime->AllocateEdgeActor(); - const ui64 schemeRoot = GetPatchedSchemeRoot(Tests::SchemeRoot, Settings.Domain, Settings.SupportsRedirect); + const ui64 schemeRoot = GetPatchedSchemeRoot(Tests::SchemeRoot, Settings.Domain, Settings.SupportsRedirect); ForwardToTablet(*runtime, schemeRoot, sender, new TEvents::TEvPoisonPill(), 0); - TDispatchOptions options; - runtime->DispatchEvents(options); - } - - ui32 TopicCreated(const TString& name, ui64 cacheSize = 0) { - TAutoPtr<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest()->MutableCmdGetTopicMetadata(); - req->AddTopic(name); - - TAutoPtr<NBus::TBusMessage> reply; - NBus::EMessageStatus status = SyncCall(request, reply); + TDispatchOptions options; + runtime->DispatchEvents(options); + } + + ui32 TopicCreated(const TString& name, ui64 cacheSize = 0) { + TAutoPtr<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest()->MutableCmdGetTopicMetadata(); + req->AddTopic(name); + + TAutoPtr<NBus::TBusMessage> reply; + NBus::EMessageStatus status = SyncCall(request, reply); Cerr << "Topic created - response: " << PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()) << Endl; - if (status != NBus::MESSAGE_OK) - return 0; - UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK); - const NMsgBusProxy::TBusResponse* response = dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Get()); - UNIT_ASSERT(response); - UNIT_ASSERT(response->Record.HasErrorCode()); - - if (response->Record.GetErrorCode() != (ui32)NPersQueue::NErrorCode::OK) + if (status != NBus::MESSAGE_OK) return 0; - UNIT_ASSERT(response->Record.HasMetaResponse()); - const auto& metaResp = response->Record.GetMetaResponse(); - UNIT_ASSERT(metaResp.HasCmdGetTopicMetadataResult()); - const auto& resp = metaResp.GetCmdGetTopicMetadataResult(); - UNIT_ASSERT(resp.TopicInfoSize() == 1); - const auto& topicInfo = resp.GetTopicInfo(0); - UNIT_ASSERT(topicInfo.GetTopic() == name); - if (cacheSize) { - UNIT_ASSERT(topicInfo.GetConfig().HasCacheSize()); - ui64 actualSize = topicInfo.GetConfig().GetCacheSize(); - if (actualSize != cacheSize) - return 0; - } + UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK); + const NMsgBusProxy::TBusResponse* response = dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Get()); + UNIT_ASSERT(response); + UNIT_ASSERT(response->Record.HasErrorCode()); + + if (response->Record.GetErrorCode() != (ui32)NPersQueue::NErrorCode::OK) + return 0; + UNIT_ASSERT(response->Record.HasMetaResponse()); + const auto& metaResp = response->Record.GetMetaResponse(); + UNIT_ASSERT(metaResp.HasCmdGetTopicMetadataResult()); + const auto& resp = metaResp.GetCmdGetTopicMetadataResult(); + UNIT_ASSERT(resp.TopicInfoSize() == 1); + const auto& topicInfo = resp.GetTopicInfo(0); + UNIT_ASSERT(topicInfo.GetTopic() == name); + if (cacheSize) { + UNIT_ASSERT(topicInfo.GetConfig().HasCacheSize()); + ui64 actualSize = topicInfo.GetConfig().GetCacheSize(); + if (actualSize != cacheSize) + return 0; + } Cerr << "=== Topic created, have version: " << topicInfo.GetConfig().GetVersion() << Endl; - return topicInfo.GetConfig().GetVersion(); - } - - ui32 TopicRealCreated(const TString& name) { - TAutoPtr<NMsgBusProxy::TBusResponse> res = Ls("/Root/PQ/" + name); - Cerr << res->Record << "\n"; - return res->Record.GetPathDescription().GetPersQueueGroup().GetAlterVersion(); - } - - - void RestartBalancerTablet(TTestActorRuntime* runtime, const TString& topic) { - TAutoPtr<NMsgBusProxy::TBusResponse> res = Ls("/Root/PQ/" + topic); - Cerr << res->Record << "\n"; - const ui64 tablet = res->Record.GetPathDescription().GetPersQueueGroup().GetBalancerTabletID(); + return topicInfo.GetConfig().GetVersion(); + } + + ui32 TopicRealCreated(const TString& name) { + TAutoPtr<NMsgBusProxy::TBusResponse> res = Ls("/Root/PQ/" + name); + Cerr << res->Record << "\n"; + return res->Record.GetPathDescription().GetPersQueueGroup().GetAlterVersion(); + } + + + void RestartBalancerTablet(TTestActorRuntime* runtime, const TString& topic) { + TAutoPtr<NMsgBusProxy::TBusResponse> res = Ls("/Root/PQ/" + topic); + Cerr << res->Record << "\n"; + const ui64 tablet = res->Record.GetPathDescription().GetPersQueueGroup().GetBalancerTabletID(); TActorId sender = runtime->AllocateEdgeActor(); ForwardToTablet(*runtime, tablet, sender, new TEvents::TEvPoisonPill(), 0); - TDispatchOptions options; - runtime->DispatchEvents(options); - } - - + TDispatchOptions options; + runtime->DispatchEvents(options); + } + + void RestartPartitionTablets(TTestActorRuntime* runtime, const TString& topic) { TAutoPtr<NMsgBusProxy::TBusResponse> res = Ls("/Root/PQ/" + topic); Cerr << res->Record << "\n"; @@ -800,42 +800,42 @@ public: } } - bool TopicDeleted(const TString& name) { - TAutoPtr<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); - auto req = request->Record.MutableMetaRequest()->MutableCmdGetTopicMetadata(); - req->AddTopic(name); - - TAutoPtr<NBus::TBusMessage> reply; - NBus::EMessageStatus status = SyncCall(request, reply); + bool TopicDeleted(const TString& name) { + TAutoPtr<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); + auto req = request->Record.MutableMetaRequest()->MutableCmdGetTopicMetadata(); + req->AddTopic(name); + + TAutoPtr<NBus::TBusMessage> reply; + NBus::EMessageStatus status = SyncCall(request, reply); Cerr << "Topic deleted got reply with status: " << status << Endl; - if (status != NBus::MESSAGE_OK) - return false; - const NMsgBusProxy::TBusResponse* response = dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Get()); + if (status != NBus::MESSAGE_OK) + return false; + const NMsgBusProxy::TBusResponse* response = dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Get()); if (response == nullptr) return false; - Cerr << "TopicDeleted response " << response->Record << "\n"; - UNIT_ASSERT(response); - UNIT_ASSERT(response->Record.HasErrorCode()); - if (response->Record.GetErrorCode() != (ui32)NPersQueue::NErrorCode::UNKNOWN_TOPIC) - return false; - return true; - } - - - const NMsgBusProxy::TBusResponse* SendAndGetReply(TAutoPtr<NMsgBusProxy::TBusPersQueue> request, - TAutoPtr<NBus::TBusMessage>& reply, ui64 maxPrintSize = 0) { - NBus::EMessageStatus status = SyncCall(request, reply); - TString msgStr; + Cerr << "TopicDeleted response " << response->Record << "\n"; + UNIT_ASSERT(response); + UNIT_ASSERT(response->Record.HasErrorCode()); + if (response->Record.GetErrorCode() != (ui32)NPersQueue::NErrorCode::UNKNOWN_TOPIC) + return false; + return true; + } + + + const NMsgBusProxy::TBusResponse* SendAndGetReply(TAutoPtr<NMsgBusProxy::TBusPersQueue> request, + TAutoPtr<NBus::TBusMessage>& reply, ui64 maxPrintSize = 0) { + NBus::EMessageStatus status = SyncCall(request, reply); + TString msgStr; UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK); - if (maxPrintSize) { - msgStr = PrintResult<NMsgBusProxy::TBusResponse>(reply.Get(), maxPrintSize); - } else { - msgStr = PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()); - } - Cerr << msgStr << Endl; - return dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Get()); - } - + if (maxPrintSize) { + msgStr = PrintResult<NMsgBusProxy::TBusResponse>(reply.Get(), maxPrintSize); + } else { + msgStr = PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()); + } + Cerr << msgStr << Endl; + return dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Get()); + } + void CreateConsumer(const TString& oldName) { auto name = NPersQueue::ConvertOldConsumerName(oldName); RunYqlSchemeQuery("CREATE TABLE [/Root/PQ/" + name + "] (" + "Topic Utf8, Partition Uint32, Offset Uint64, PRIMARY KEY (Topic,Partition) );"); @@ -938,7 +938,7 @@ public: ); return CreateTopic(request); } - + void AlterTopicNoLegacy(const TString& name, ui32 nParts, ui64 lifetimeS = 86400) { TString path = name; if (!UseConfigTables) { @@ -953,8 +953,8 @@ public: } res.Wait(); Cerr << "Alter topic (" << path << ") response: " << res.GetValue().IsSuccess() << " " << res.GetValue().GetIssues().ToString() << Endl; - } - + } + void AlterTopic( const TString& name, ui32 nParts, @@ -966,28 +966,28 @@ public: Y_VERIFY(name.StartsWith("rt3.")); TRequestAlterPQ requestDescr(name, nParts, cacheSize, lifetimeS, fillPartitionConfig, mirrorFrom); THolder<NMsgBusProxy::TBusPersQueue> request = requestDescr.GetRequest(); - - ui32 prevVersion = TopicCreated(name); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - UNIT_ASSERT_VALUES_EQUAL_C((ui32)response->Record.GetErrorCode(), (ui32)NPersQueue::NErrorCode::OK, - response->Record.DebugString().c_str()); - - const TInstant start = TInstant::Now(); + + ui32 prevVersion = TopicCreated(name); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + UNIT_ASSERT_VALUES_EQUAL_C((ui32)response->Record.GetErrorCode(), (ui32)NPersQueue::NErrorCode::OK, + response->Record.DebugString().c_str()); + + const TInstant start = TInstant::Now(); AlterTopic(); - while (TopicCreated(name, cacheSize) != prevVersion + 1) { - Sleep(TDuration::MilliSeconds(500)); - UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); - } - while (TopicRealCreated(name) != prevVersion + 1) { - Sleep(TDuration::MilliSeconds(500)); - UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); - } - - } - + while (TopicCreated(name, cacheSize) != prevVersion + 1) { + Sleep(TDuration::MilliSeconds(500)); + UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); + } + while (TopicRealCreated(name) != prevVersion + 1) { + Sleep(TDuration::MilliSeconds(500)); + UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); + } + + } + void DeleteTopicNoLegacy (const TString& name) { TString path = name; if (!UseConfigTables) { @@ -1005,77 +1005,77 @@ public: void DeleteTopic2(const TString& name, NPersQueue::NErrorCode::EErrorCode expectedStatus = NPersQueue::NErrorCode::OK, bool waitForTopicDeletion = true) { Y_VERIFY(name.StartsWith("rt3.")); - THolder<NMsgBusProxy::TBusPersQueue> request = TRequestDeletePQ{name}.GetRequest(); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - UNIT_ASSERT_VALUES_EQUAL_C((ui32)response->Record.GetErrorCode(), (ui32)expectedStatus, - "proxy failure"); - - // wait for drop completion - if (expectedStatus == NPersQueue::NErrorCode::OK) { - ui32 i = 0; - for (; i < 500; ++i) { - TAutoPtr<NMsgBusProxy::TBusResponse> r = TryDropPersQueueGroup("/Root/PQ", name); - UNIT_ASSERT(r); + THolder<NMsgBusProxy::TBusPersQueue> request = TRequestDeletePQ{name}.GetRequest(); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + UNIT_ASSERT_VALUES_EQUAL_C((ui32)response->Record.GetErrorCode(), (ui32)expectedStatus, + "proxy failure"); + + // wait for drop completion + if (expectedStatus == NPersQueue::NErrorCode::OK) { + ui32 i = 0; + for (; i < 500; ++i) { + TAutoPtr<NMsgBusProxy::TBusResponse> r = TryDropPersQueueGroup("/Root/PQ", name); + UNIT_ASSERT(r); if (r->Record.GetSchemeStatus() == NKikimrScheme::StatusPathDoesNotExist) { - break; - } - Sleep(TDuration::MilliSeconds(50)); - } - UNIT_ASSERT_C(i < 500, "Drop is taking too long"); //25 seconds - } + break; + } + Sleep(TDuration::MilliSeconds(50)); + } + UNIT_ASSERT_C(i < 500, "Drop is taking too long"); //25 seconds + } RemoveTopic(name); - const TInstant start = TInstant::Now(); + const TInstant start = TInstant::Now(); while (waitForTopicDeletion && !TopicDeleted(name)) { - Sleep(TDuration::MilliSeconds(50)); - UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); - } - } - - TString GetOwnership(const TRequestGetOwnership& getOwnership, - NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK) { - - THolder<NMsgBusProxy::TBusPersQueue> request = getOwnership.GetRequest(); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), expectedStatus, - "proxy failure"); - - if (expectedStatus == NMsgBusProxy::MSTATUS_OK) { - UNIT_ASSERT_VALUES_EQUAL_C((ui32)response->Record.GetErrorCode(), (ui32)NPersQueue::NErrorCode::OK, - "write failure"); - return response->Record.GetPartitionResponse().GetCmdGetOwnershipResult().GetOwnerCookie(); - } - return ""; - } - - void ChooseProxy(ETransport transport = ETransport::MsgBus) { - THolder<NMsgBusProxy::TBusChooseProxy> request = MakeHolder<NMsgBusProxy::TBusChooseProxy>(); - NKikimrClient::TResponse response; - - if (transport == ETransport::GRpc) { - auto channel = grpc::CreateChannel("localhost:"+ToString(GRpcPort), grpc::InsecureChannelCredentials()); - auto stub(NKikimrClient::TGRpcServer::NewStub(channel)); - grpc::ClientContext context; - auto status = stub->ChooseProxy(&context, request->Record, &response); - - UNIT_ASSERT(status.ok()); - } else { - Y_FAIL("not allowed"); - } - - Cerr << response << "\n"; - - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response.GetStatus(), NMsgBusProxy::MSTATUS_OK, - "proxy failure"); - } - - + Sleep(TDuration::MilliSeconds(50)); + UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); + } + } + + TString GetOwnership(const TRequestGetOwnership& getOwnership, + NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK) { + + THolder<NMsgBusProxy::TBusPersQueue> request = getOwnership.GetRequest(); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), expectedStatus, + "proxy failure"); + + if (expectedStatus == NMsgBusProxy::MSTATUS_OK) { + UNIT_ASSERT_VALUES_EQUAL_C((ui32)response->Record.GetErrorCode(), (ui32)NPersQueue::NErrorCode::OK, + "write failure"); + return response->Record.GetPartitionResponse().GetCmdGetOwnershipResult().GetOwnerCookie(); + } + return ""; + } + + void ChooseProxy(ETransport transport = ETransport::MsgBus) { + THolder<NMsgBusProxy::TBusChooseProxy> request = MakeHolder<NMsgBusProxy::TBusChooseProxy>(); + NKikimrClient::TResponse response; + + if (transport == ETransport::GRpc) { + auto channel = grpc::CreateChannel("localhost:"+ToString(GRpcPort), grpc::InsecureChannelCredentials()); + auto stub(NKikimrClient::TGRpcServer::NewStub(channel)); + grpc::ClientContext context; + auto status = stub->ChooseProxy(&context, request->Record, &response); + + UNIT_ASSERT(status.ok()); + } else { + Y_FAIL("not allowed"); + } + + Cerr << response << "\n"; + + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response.GetStatus(), NMsgBusProxy::MSTATUS_OK, + "proxy failure"); + } + + void WriteToPQ( const TRequestWritePQ& writeRequest, const TString& data, const TString& ticket = "", @@ -1083,168 +1083,168 @@ public: NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, NMsgBusProxy::EResponseStatus expectedOwnerStatus = NMsgBusProxy::MSTATUS_OK ) { - - TString cookie = GetOwnership({writeRequest.Topic, writeRequest.Partition}, expectedOwnerStatus); - - THolder<NMsgBusProxy::TBusPersQueue> request = writeRequest.GetRequest(data, cookie); - if (!ticket.empty()) { - request.Get()->Record.SetTicket(ticket); - } - NKikimrClient::TResponse response; - - if (transport == ETransport::GRpc) { - auto channel = grpc::CreateChannel("localhost:"+ToString(GRpcPort), grpc::InsecureChannelCredentials()); - auto stub(NKikimrClient::TGRpcServer::NewStub(channel)); - grpc::ClientContext context; - auto status = stub->PersQueueRequest(&context, request->Record, &response); - - UNIT_ASSERT(status.ok()); - } else { - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* busResponse = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(busResponse); - - response.CopyFrom(busResponse->Record); - } - + + TString cookie = GetOwnership({writeRequest.Topic, writeRequest.Partition}, expectedOwnerStatus); + + THolder<NMsgBusProxy::TBusPersQueue> request = writeRequest.GetRequest(data, cookie); + if (!ticket.empty()) { + request.Get()->Record.SetTicket(ticket); + } + NKikimrClient::TResponse response; + + if (transport == ETransport::GRpc) { + auto channel = grpc::CreateChannel("localhost:"+ToString(GRpcPort), grpc::InsecureChannelCredentials()); + auto stub(NKikimrClient::TGRpcServer::NewStub(channel)); + grpc::ClientContext context; + auto status = stub->PersQueueRequest(&context, request->Record, &response); + + UNIT_ASSERT(status.ok()); + } else { + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* busResponse = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(busResponse); + + response.CopyFrom(busResponse->Record); + } + Cerr << response << "\n"; - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response.GetStatus(), expectedStatus, - "proxy failure"); - if (expectedStatus == NMsgBusProxy::MSTATUS_OK) { - UNIT_ASSERT_VALUES_EQUAL_C((ui32)response.GetErrorCode(), (ui32)NPersQueue::NErrorCode::OK, - "write failure"); - } - } - - void WriteToPQ(const TString& topic, ui32 partition, const TString& sourceId, const ui64 seqNo, const TString& data, - const TString& ticket = "", - ETransport transport = ETransport::MsgBus, - NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, - NMsgBusProxy::EResponseStatus expectedOwnerStatus = NMsgBusProxy::MSTATUS_OK) { - WriteToPQ({topic, partition, sourceId, seqNo}, data, ticket, transport, expectedStatus, expectedOwnerStatus); - } - - struct TReadDebugInfo { - ui32 BlobsFromDisk = 0; - ui32 BlobsFromCache = 0; - TVector<TString> Values; - }; - + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response.GetStatus(), expectedStatus, + "proxy failure"); + if (expectedStatus == NMsgBusProxy::MSTATUS_OK) { + UNIT_ASSERT_VALUES_EQUAL_C((ui32)response.GetErrorCode(), (ui32)NPersQueue::NErrorCode::OK, + "write failure"); + } + } + + void WriteToPQ(const TString& topic, ui32 partition, const TString& sourceId, const ui64 seqNo, const TString& data, + const TString& ticket = "", + ETransport transport = ETransport::MsgBus, + NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, + NMsgBusProxy::EResponseStatus expectedOwnerStatus = NMsgBusProxy::MSTATUS_OK) { + WriteToPQ({topic, partition, sourceId, seqNo}, data, ticket, transport, expectedStatus, expectedOwnerStatus); + } + + struct TReadDebugInfo { + ui32 BlobsFromDisk = 0; + ui32 BlobsFromCache = 0; + TVector<TString> Values; + }; + TReadDebugInfo ReadFromPQ( const TRequestReadPQ& readRequest, ui32 readCount, const TString& ticket = "", NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, NPersQueue::NErrorCode::EErrorCode expectedError = NPersQueue::NErrorCode::OK ) { - THolder<NMsgBusProxy::TBusPersQueue> request = readRequest.GetRequest(); - if (!ticket.empty()) { - request.Get()->Record.SetTicket(ticket); - } - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - - auto status = response->Record.GetStatus(); - auto errorCode = response->Record.GetErrorCode(); - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, expectedStatus, response->Record.GetErrorReason()); - UNIT_ASSERT_VALUES_EQUAL_C((ui32)errorCode, (ui32)expectedError, response->Record.GetErrorReason()); - - if (expectedStatus == NMsgBusProxy::MSTATUS_OK) { - UNIT_ASSERT(response->Record.GetPartitionResponse().HasCmdReadResult()); - UNIT_ASSERT_VALUES_EQUAL(response->Record.GetPartitionResponse().GetCmdReadResult().ResultSize(), readCount); - } - - TReadDebugInfo info; - auto result = response->Record.GetPartitionResponse().GetCmdReadResult(); - if (result.HasBlobsFromDisk()) - info.BlobsFromDisk = result.GetBlobsFromDisk(); - if (result.HasBlobsFromCache()) - info.BlobsFromCache = result.GetBlobsFromCache(); - - for (ui32 i = 0; i < result.ResultSize(); ++i) { - auto r = result.GetResult(i); - if (r.HasData()) - info.Values.push_back(r.GetData()); - } - return info; - } - - - TReadDebugInfo ReadFromPQ(const TString& topic, ui32 partition, ui64 startOffset, ui32 count, ui32 readCount, const TString& ticket = "") { - return ReadFromPQ({topic, partition, startOffset, count, "user"}, readCount, ticket); - } - + THolder<NMsgBusProxy::TBusPersQueue> request = readRequest.GetRequest(); + if (!ticket.empty()) { + request.Get()->Record.SetTicket(ticket); + } + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + + auto status = response->Record.GetStatus(); + auto errorCode = response->Record.GetErrorCode(); + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, expectedStatus, response->Record.GetErrorReason()); + UNIT_ASSERT_VALUES_EQUAL_C((ui32)errorCode, (ui32)expectedError, response->Record.GetErrorReason()); + + if (expectedStatus == NMsgBusProxy::MSTATUS_OK) { + UNIT_ASSERT(response->Record.GetPartitionResponse().HasCmdReadResult()); + UNIT_ASSERT_VALUES_EQUAL(response->Record.GetPartitionResponse().GetCmdReadResult().ResultSize(), readCount); + } + + TReadDebugInfo info; + auto result = response->Record.GetPartitionResponse().GetCmdReadResult(); + if (result.HasBlobsFromDisk()) + info.BlobsFromDisk = result.GetBlobsFromDisk(); + if (result.HasBlobsFromCache()) + info.BlobsFromCache = result.GetBlobsFromCache(); + + for (ui32 i = 0; i < result.ResultSize(); ++i) { + auto r = result.GetResult(i); + if (r.HasData()) + info.Values.push_back(r.GetData()); + } + return info; + } + + + TReadDebugInfo ReadFromPQ(const TString& topic, ui32 partition, ui64 startOffset, ui32 count, ui32 readCount, const TString& ticket = "") { + return ReadFromPQ({topic, partition, startOffset, count, "user"}, readCount, ticket); + } + void SetClientOffsetPQ( const TRequestSetClientOffsetPQ& cmdRequest, const TString& ticket = "", NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, NPersQueue::NErrorCode::EErrorCode expectedError = NPersQueue::NErrorCode::OK ) { - THolder<NMsgBusProxy::TBusPersQueue> request = cmdRequest.GetRequest(); - if (!ticket.empty()) { - request.Get()->Record.SetTicket(ticket); - } - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - - auto status = response->Record.GetStatus(); - auto errorCode = response->Record.GetErrorCode(); - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, expectedStatus, response->Record.GetErrorReason()); - UNIT_ASSERT_VALUES_EQUAL_C((ui32)errorCode, (ui32)expectedError, response->Record.GetErrorReason()); - } - - void SetClientOffsetPQ(const TString& topic, ui32 partition, ui64 offset, const TString& ticket = "", - NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, - NPersQueue::NErrorCode::EErrorCode expectedError = NPersQueue::NErrorCode::OK) { - return SetClientOffsetPQ({topic, partition, offset, "user"}, ticket, expectedStatus, expectedError); - } - - void FetchRequestPQ(const TVector<FetchPartInfo>& fetchParts, ui32 maxBytes, ui32 waitMs) { - THolder<NMsgBusProxy::TBusPersQueue> request = TFetchRequestPQ().GetRequest(fetchParts, maxBytes, waitMs); - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - } - - void GetPartOffset(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, ui32 hasClientOffset, bool ok) { - THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetPartOffsets().GetRequest(topicsAndParts); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, - "proxy failure"); - - if (!ok) - return; - - auto res = response->Record.GetMetaResponse().GetCmdGetPartitionOffsetsResult(); - ui32 count = 0; - ui32 clientOffsetCount = 0; - for (ui32 i = 0; i < res.TopicResultSize(); ++i) { - auto t = res.GetTopicResult(i); - count += t.PartitionResultSize(); - for (ui32 j = 0; j < t.PartitionResultSize(); ++j) { - if (t.GetPartitionResult(j).HasClientOffset()) - ++clientOffsetCount; - } - } - UNIT_ASSERT_VALUES_EQUAL(count, resCount); - UNIT_ASSERT_VALUES_EQUAL(clientOffsetCount, hasClientOffset); - } - + THolder<NMsgBusProxy::TBusPersQueue> request = cmdRequest.GetRequest(); + if (!ticket.empty()) { + request.Get()->Record.SetTicket(ticket); + } + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + + auto status = response->Record.GetStatus(); + auto errorCode = response->Record.GetErrorCode(); + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, expectedStatus, response->Record.GetErrorReason()); + UNIT_ASSERT_VALUES_EQUAL_C((ui32)errorCode, (ui32)expectedError, response->Record.GetErrorReason()); + } + + void SetClientOffsetPQ(const TString& topic, ui32 partition, ui64 offset, const TString& ticket = "", + NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK, + NPersQueue::NErrorCode::EErrorCode expectedError = NPersQueue::NErrorCode::OK) { + return SetClientOffsetPQ({topic, partition, offset, "user"}, ticket, expectedStatus, expectedError); + } + + void FetchRequestPQ(const TVector<FetchPartInfo>& fetchParts, ui32 maxBytes, ui32 waitMs) { + THolder<NMsgBusProxy::TBusPersQueue> request = TFetchRequestPQ().GetRequest(fetchParts, maxBytes, waitMs); + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + } + + void GetPartOffset(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, ui32 hasClientOffset, bool ok) { + THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetPartOffsets().GetRequest(topicsAndParts); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, + "proxy failure"); + + if (!ok) + return; + + auto res = response->Record.GetMetaResponse().GetCmdGetPartitionOffsetsResult(); + ui32 count = 0; + ui32 clientOffsetCount = 0; + for (ui32 i = 0; i < res.TopicResultSize(); ++i) { + auto t = res.GetTopicResult(i); + count += t.PartitionResultSize(); + for (ui32 j = 0; j < t.PartitionResultSize(); ++j) { + if (t.GetPartitionResult(j).HasClientOffset()) + ++clientOffsetCount; + } + } + UNIT_ASSERT_VALUES_EQUAL(count, resCount); + UNIT_ASSERT_VALUES_EQUAL(clientOffsetCount, hasClientOffset); + } + NKikimrClient::TResponse GetClientInfo(const TVector<TString>& topics, const TString& user, bool ok, const TVector<TString>& badTopics = {}) { - THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetClientInfo().GetRequest(topics, user); - Cerr << "Request: " << request->Record << Endl; - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - Cerr << "Response: " << response->Record << "\n"; - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, - "proxy failure"); + THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetClientInfo().GetRequest(topics, user); + Cerr << "Request: " << request->Record << Endl; + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + Cerr << "Response: " << response->Record << "\n"; + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, + "proxy failure"); THashSet<TString> good; THashSet<TString> bad; for (auto& t : badTopics) { @@ -1264,92 +1264,92 @@ public: } } return response->Record; - } - - - void GetPartStatus(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, bool ok) { - THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetPartStatus().GetRequest(topicsAndParts); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, - "proxy failure"); - if (!ok) - return; - - auto res = response->Record.GetMetaResponse().GetCmdGetPartitionStatusResult(); - ui32 count = 0; - for (ui32 i = 0; i < res.TopicResultSize(); ++i) { - auto t = res.GetTopicResult(i); - count += t.PartitionResultSize(); - } - UNIT_ASSERT_VALUES_EQUAL(count, resCount); - } - - TVector<ui32> GetPartLocation(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, bool ok) { - bool doRetry = true; - - TVector<ui32> nodeIds; - const TInstant start = TInstant::Now(); - while (doRetry) { - doRetry = false; - nodeIds.clear(); - - THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetPartLocations().GetRequest(topicsAndParts); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); - if (response->Record.GetErrorCode() == NPersQueue::NErrorCode::INITIALIZING) { - doRetry = true; - continue; - } - UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, - "proxy failure"); - - if (!ok) - return {}; - - auto res = response->Record.GetMetaResponse().GetCmdGetPartitionLocationsResult(); - - for (ui32 i = 0; i < res.TopicResultSize(); ++i) { - auto t = res.GetTopicResult(i); + } + + + void GetPartStatus(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, bool ok) { + THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetPartStatus().GetRequest(topicsAndParts); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, + "proxy failure"); + if (!ok) + return; + + auto res = response->Record.GetMetaResponse().GetCmdGetPartitionStatusResult(); + ui32 count = 0; + for (ui32 i = 0; i < res.TopicResultSize(); ++i) { + auto t = res.GetTopicResult(i); + count += t.PartitionResultSize(); + } + UNIT_ASSERT_VALUES_EQUAL(count, resCount); + } + + TVector<ui32> GetPartLocation(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, bool ok) { + bool doRetry = true; + + TVector<ui32> nodeIds; + const TInstant start = TInstant::Now(); + while (doRetry) { + doRetry = false; + nodeIds.clear(); + + THolder<NMsgBusProxy::TBusPersQueue> request = TRequestGetPartLocations().GetRequest(topicsAndParts); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); + if (response->Record.GetErrorCode() == NPersQueue::NErrorCode::INITIALIZING) { + doRetry = true; + continue; + } + UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), ok ? NMsgBusProxy::MSTATUS_OK : NMsgBusProxy::MSTATUS_ERROR, + "proxy failure"); + + if (!ok) + return {}; + + auto res = response->Record.GetMetaResponse().GetCmdGetPartitionLocationsResult(); + + for (ui32 i = 0; i < res.TopicResultSize(); ++i) { + auto t = res.GetTopicResult(i); if (t.GetErrorCode() == NPersQueue::NErrorCode::INITIALIZING) doRetry = true; - for (ui32 pi = 0; pi < t.PartitionLocationSize(); ++pi) { - if (!t.GetPartitionLocation(pi).HasHostId()) { - // Retry until the requested partiotions are successfully resolved - doRetry = true; - } else { - nodeIds.push_back(t.GetPartitionLocation(pi).GetHostId()); - } - } - } - UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); - if (doRetry) { - Sleep(TDuration::MilliSeconds(50)); - } - } - UNIT_ASSERT_VALUES_EQUAL(nodeIds.size(), resCount); - return nodeIds; - } - + for (ui32 pi = 0; pi < t.PartitionLocationSize(); ++pi) { + if (!t.GetPartitionLocation(pi).HasHostId()) { + // Retry until the requested partiotions are successfully resolved + doRetry = true; + } else { + nodeIds.push_back(t.GetPartitionLocation(pi).GetHostId()); + } + } + } + UNIT_ASSERT(TInstant::Now() - start < ::DEFAULT_DISPATCH_TIMEOUT); + if (doRetry) { + Sleep(TDuration::MilliSeconds(50)); + } + } + UNIT_ASSERT_VALUES_EQUAL(nodeIds.size(), resCount); + return nodeIds; + } + NKikimrClient::TPersQueueMetaResponse::TCmdGetTopicMetadataResult DescribeTopic(const TVector<TString>& topics, bool error = false) { - THolder<NMsgBusProxy::TBusPersQueue> request = TRequestDescribePQ().GetRequest(topics); - - TAutoPtr<NBus::TBusMessage> reply; - const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); - UNIT_ASSERT(response); + THolder<NMsgBusProxy::TBusPersQueue> request = TRequestDescribePQ().GetRequest(topics); + + TAutoPtr<NBus::TBusMessage> reply; + const NMsgBusProxy::TBusResponse* response = SendAndGetReply(request.Release(), reply); + UNIT_ASSERT(response); if ((NMsgBusProxy::EResponseStatus)response->Record.GetStatus() != NMsgBusProxy::MSTATUS_OK) { UNIT_ASSERT(error); return {}; } UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)response->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK, - "proxy failure"); - - auto res = response->Record.GetMetaResponse().GetCmdGetTopicMetadataResult(); + "proxy failure"); + + auto res = response->Record.GetMetaResponse().GetCmdGetTopicMetadataResult(); UNIT_ASSERT(topics.size() <= res.TopicInfoSize()); for (ui32 i = 0; i < res.TopicInfoSize(); ++i) { @@ -1357,21 +1357,21 @@ public: if (error) { UNIT_ASSERT(topicInfo.GetErrorCode() == NPersQueue::NErrorCode::INITIALIZING); } else { - UNIT_ASSERT(topicInfo.GetNumPartitions() > 0 || topicInfo.GetErrorCode() != (ui32)NPersQueue::NErrorCode::OK); - UNIT_ASSERT(topicInfo.GetConfig().HasPartitionConfig() || topicInfo.GetErrorCode() != (ui32)NPersQueue::NErrorCode::OK); - } + UNIT_ASSERT(topicInfo.GetNumPartitions() > 0 || topicInfo.GetErrorCode() != (ui32)NPersQueue::NErrorCode::OK); + UNIT_ASSERT(topicInfo.GetConfig().HasPartitionConfig() || topicInfo.GetErrorCode() != (ui32)NPersQueue::NErrorCode::OK); + } ui32 j = 0; for (; j < topics.size() && topics[j] != topicInfo.GetTopic(); ++j); UNIT_ASSERT(j == 0 || j != topics.size()); - } + } return res; - } - - void TestCase(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, ui32 hasClientOffset, bool ok) { - GetPartOffset(topicsAndParts, resCount, hasClientOffset, ok); - GetPartLocation(topicsAndParts, resCount, ok); - GetPartStatus(topicsAndParts, resCount, ok); - } + } + + void TestCase(const TVector<std::pair<TString, TVector<ui32>>>& topicsAndParts, ui32 resCount, ui32 hasClientOffset, bool ok) { + GetPartOffset(topicsAndParts, resCount, hasClientOffset, ok); + GetPartLocation(topicsAndParts, resCount, ok); + GetPartStatus(topicsAndParts, resCount, ok); + } private: static TString GetAlterTopicsVersionQuery() { @@ -1417,7 +1417,7 @@ public: query << "DECLARE $version as Int64; " << GetAlterTopicsVersionQuery(); RunYqlDataQueryWithParams(query, params); } -}; - -} // namespace NPersQueueTests -} // namespace NKikimr +}; + +} // namespace NPersQueueTests +} // namespace NKikimr diff --git a/ydb/core/testlib/ya.make b/ydb/core/testlib/ya.make index 137fa829f3a..2e1ce7ee675 100644 --- a/ydb/core/testlib/ya.make +++ b/ydb/core/testlib/ya.make @@ -12,9 +12,9 @@ SRCS( actor_helpers.h fake_coordinator.cpp fake_coordinator.h - fake_scheme_shard.h + fake_scheme_shard.h minikql_compile.h - mock_pq_metacache.h + mock_pq_metacache.h tablet_flat_dummy.cpp tablet_helpers.cpp tablet_helpers.h diff --git a/ydb/core/tx/datashard/datashard_kqp.cpp b/ydb/core/tx/datashard/datashard_kqp.cpp index a262c98de5a..90d107f1822 100644 --- a/ydb/core/tx/datashard/datashard_kqp.cpp +++ b/ydb/core/tx/datashard/datashard_kqp.cpp @@ -764,9 +764,9 @@ class TKqpTaskRunnerExecutionContext : public NDq::IDqTaskRunnerExecutionContext public: NDq::IDqOutputConsumer::TPtr CreateOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const NMiniKQL::TType* type, NUdf::IApplyContext* applyCtx, const NMiniKQL::TTypeEnvironment& typeEnv, - TVector<NDq::IDqOutput::TPtr>&& outputs) const override + TVector<NDq::IDqOutput::TPtr>&& outputs) const override { - return NKqp::KqpBuildOutputConsumer(outputDesc, type, applyCtx, typeEnv, std::move(outputs)); + return NKqp::KqpBuildOutputConsumer(outputDesc, type, applyCtx, typeEnv, std::move(outputs)); } NDq::IDqChannelStorage::TPtr CreateChannelStorage(ui64 /* channelId */) const override { diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp index 38e5f747774..b7262cb5339 100644 --- a/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp +++ b/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp @@ -131,7 +131,7 @@ public: auto tabletId = context.SS->ShardInfos[shardIdx].TabletID; Y_VERIFY(shard.TabletType == ETabletType::Kesus); - auto event = MakeHolder<NKesus::TEvKesus::TEvSetConfig>(ui64(OperationId.GetTxId()), *kesus->AlterConfig, kesus->AlterVersion); + auto event = MakeHolder<NKesus::TEvKesus::TEvSetConfig>(ui64(OperationId.GetTxId()), *kesus->AlterConfig, kesus->AlterVersion); event->Record.MutableConfig()->set_path(kesusPath.PathString()); // TODO: remove legacy field eventually event->Record.SetPath(kesusPath.PathString()); diff --git a/ydb/core/tx/tx_proxy/describe.cpp b/ydb/core/tx/tx_proxy/describe.cpp index deb38ca2b7d..a23315ed82a 100644 --- a/ydb/core/tx/tx_proxy/describe.cpp +++ b/ydb/core/tx/tx_proxy/describe.cpp @@ -381,7 +381,7 @@ void TDescribeReq::Handle(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult: "Actor# " << ctx.SelfID.ToString() << " Handle TEvDescribeSchemeResult" << " Forward to# " << Source.ToString() << - " Cookie: " << ev->Cookie << + " Cookie: " << ev->Cookie << " TEvDescribeSchemeResult: " << ev->Get()->ToString()); TxProxyMon->NavigateLatency->Collect((ctx.Now() - WallClockStarted).MilliSeconds()); diff --git a/ydb/core/viewer/content/viewer.js b/ydb/core/viewer/content/viewer.js index 1108f42278c..28beed4af40 100644 --- a/ydb/core/viewer/content/viewer.js +++ b/ydb/core/viewer/content/viewer.js @@ -1496,8 +1496,8 @@ function tabletTypeToSymbol(type) { return "CN"; case "TenantSlotBroker": return "TB"; - case "Kesus": - return "K"; + case "Kesus": + return "K"; case "OlapShard": return "OS"; case "ColumnShard": @@ -2588,8 +2588,8 @@ function schemaPathTypeToString(pathType) { return "Table"; case 3: return "PersQueueGroup"; - case 7: - return "Kesus"; + case 7: + return "Kesus"; case 10: return "Tenant"; case 15: @@ -2843,53 +2843,53 @@ function onTreeNodeComplete(result, obj) { } } - if (result.PathDescription.Self.PathType === 7) { - tablet = String(result.PathDescription.Kesus.KesusTabletId); - SchemaTabletElements[tablet] = null; - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "PathId"; - value.innerHTML = String(result.PathDescription.Kesus.PathId); - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "KesusTabletId"; - value.innerHTML = String(result.PathDescription.Kesus.KesusTabletId); - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "Version"; - value.innerHTML = String(result.PathDescription.Kesus.Version); - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "Self check period (ms)"; - value.innerHTML = String(result.PathDescription.Kesus.Config.self_check_period_millis); - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "Session grace period (ms)"; - value.innerHTML = String(result.PathDescription.Kesus.Config.session_grace_period_millis); - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "Read consistency mode"; - value.innerHTML = String(result.PathDescription.Kesus.Config.read_consistency_mode); - - row = tab.insertRow(); - name = row.insertCell(-1); - value = row.insertCell(-1); - name.innerHTML = "Attach consistency mode"; - value.innerHTML = String(result.PathDescription.Kesus.Config.attach_consistency_mode); - } - + if (result.PathDescription.Self.PathType === 7) { + tablet = String(result.PathDescription.Kesus.KesusTabletId); + SchemaTabletElements[tablet] = null; + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "PathId"; + value.innerHTML = String(result.PathDescription.Kesus.PathId); + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "KesusTabletId"; + value.innerHTML = String(result.PathDescription.Kesus.KesusTabletId); + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "Version"; + value.innerHTML = String(result.PathDescription.Kesus.Version); + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "Self check period (ms)"; + value.innerHTML = String(result.PathDescription.Kesus.Config.self_check_period_millis); + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "Session grace period (ms)"; + value.innerHTML = String(result.PathDescription.Kesus.Config.session_grace_period_millis); + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "Read consistency mode"; + value.innerHTML = String(result.PathDescription.Kesus.Config.read_consistency_mode); + + row = tab.insertRow(); + name = row.insertCell(-1); + value = row.insertCell(-1); + name.innerHTML = "Attach consistency mode"; + value.innerHTML = String(result.PathDescription.Kesus.Config.attach_consistency_mode); + } + if (Object.keys(SchemaTabletElements).length !== 0) { panelTablets.innerHTML = "<img src='throbber.gif'></img>"; row = tab.insertRow(); diff --git a/ydb/core/ymq/actor/action.h b/ydb/core/ymq/actor/action.h index 1801c98d68f..dc5b3d7f8c6 100644 --- a/ydb/core/ymq/actor/action.h +++ b/ydb/core/ymq/actor/action.h @@ -1,13 +1,13 @@ #pragma once -#include "defs.h" +#include "defs.h" #include "actor.h" -#include "cfg.h" -#include "error.h" +#include "cfg.h" +#include "error.h" #include "events.h" #include "limits.h" -#include "log.h" -#include "proxy_actor.h" +#include "log.h" +#include "proxy_actor.h" #include "serviceid.h" #include "schema.h" @@ -31,54 +31,54 @@ #include <util/string/ascii.h> #include <util/string/join.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { template <typename TDerived> class TActionActor - : public TActorBootstrapped<TDerived> + : public TActorBootstrapped<TDerived> { public: - TActionActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, EAction action, THolder<IReplyCallback> cb) - : Action_(action) - , RequestId_(sourceSqsRequest.GetRequestId()) + TActionActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, EAction action, THolder<IReplyCallback> cb) + : Action_(action) + , RequestId_(sourceSqsRequest.GetRequestId()) , Cb_(std::move(cb)) , Shards_(1) - , SourceSqsRequest_(sourceSqsRequest) + , SourceSqsRequest_(sourceSqsRequest) { - Y_VERIFY(RequestId_); - DebugInfo->ActionActors.emplace(RequestId_, this); - } - - ~TActionActor() { - DebugInfo->ActionActors.EraseKeyValue(RequestId_, this); + Y_VERIFY(RequestId_); + DebugInfo->ActionActors.emplace(RequestId_, this); } + ~TActionActor() { + DebugInfo->ActionActors.EraseKeyValue(RequestId_, this); + } + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; - } - - static constexpr bool NeedQueueAttributes() { // override it in TDerived if needed - return false; - } - - // For queue requests - static constexpr bool NeedExistingQueue() { - return true; - } - + } + + static constexpr bool NeedQueueAttributes() { // override it in TDerived if needed + return false; + } + + // For queue requests + static constexpr bool NeedExistingQueue() { + return true; + } + static constexpr bool CreateMissingAccount() { return false; } - - static constexpr bool NeedUserSpecified() { - return true; - } - - void DoCloudBootstrap() { + + static constexpr bool NeedUserSpecified() { + return true; + } + + void DoCloudBootstrap() { if (!SecurityToken_) { // TODO: use access service - MakeError(MutableErrorDesc(), NErrors::INVALID_CLIENT_TOKEN_ID, "Failed to parse cloud id."); - SendReplyAndDie(); + MakeError(MutableErrorDesc(), NErrors::INVALID_CLIENT_TOKEN_ID, "Failed to parse cloud id."); + SendReplyAndDie(); return; } @@ -87,69 +87,69 @@ public: FolderId_ = TString(tokenBuf); } - void DoBootstrap() { - ui64 configurationFlags = 0; - if (TDerived::NeedQueueAttributes()) { - configurationFlags |= TSqsEvents::TEvGetConfiguration::EFlags::NeedQueueAttributes; - } - if (TProxyActor::NeedCreateProxyActor(Action_)) { + void DoBootstrap() { + ui64 configurationFlags = 0; + if (TDerived::NeedQueueAttributes()) { + configurationFlags |= TSqsEvents::TEvGetConfiguration::EFlags::NeedQueueAttributes; + } + if (TProxyActor::NeedCreateProxyActor(Action_)) { configurationFlags |= TSqsEvents::TEvGetConfiguration::EFlags::NeedQueueLeader; - } - this->Send(MakeSqsServiceID(this->SelfId().NodeId()), + } + this->Send(MakeSqsServiceID(this->SelfId().NodeId()), MakeHolder<TSqsEvents::TEvGetConfiguration>( - RequestId_, + RequestId_, UserName_, - GetQueueName(), - configurationFlags) + GetQueueName(), + configurationFlags) ); } - void CreateAccountOnTheFly() const { + void CreateAccountOnTheFly() const { // TODO: move to separate actor - this->Register( - new TCreateUserSchemaActor(Cfg().GetRoot(), UserName_, this->SelfId(), RequestId_, UserCounters_) + this->Register( + new TCreateUserSchemaActor(Cfg().GetRoot(), UserName_, this->SelfId(), RequestId_, UserCounters_) ); } - void HandleAccountCreated(TSqsEvents::TEvUserCreated::TPtr& ev) { - auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; + void HandleAccountCreated(TSqsEvents::TEvUserCreated::TPtr& ev) { + auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; if (ev->Get()->Success) { - INC_COUNTER(detailedCounters, CreateAccountOnTheFly_Success); + INC_COUNTER(detailedCounters, CreateAccountOnTheFly_Success); } else { - RLOG_SQS_ERROR("Failed to create cloud account on the fly. Account name: " << UserName_); + RLOG_SQS_ERROR("Failed to create cloud account on the fly. Account name: " << UserName_); MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); - INC_COUNTER(detailedCounters, CreateAccountOnTheFly_Errors); - SendReplyAndDie(); + INC_COUNTER(detailedCounters, CreateAccountOnTheFly_Errors); + SendReplyAndDie(); return; } - DoBootstrap(); + DoBootstrap(); } - void Bootstrap(const NActors::TActorContext&) { - RLOG_SQS_DEBUG("Request started. Actor: " << this->SelfId()); // log new request id - StartTs_ = TActivationContext::Now(); + void Bootstrap(const NActors::TActorContext&) { + RLOG_SQS_DEBUG("Request started. Actor: " << this->SelfId()); // log new request id + StartTs_ = TActivationContext::Now(); - const auto& cfg = Cfg(); + const auto& cfg = Cfg(); this->Become(&TActionActor::InitialState); - // Set timeout - if (cfg.GetRequestTimeoutMs()) { - this->Schedule(TDuration::MilliSeconds(cfg.GetRequestTimeoutMs()), new TEvWakeup(REQUEST_TIMEOUT_WAKEUP_TAG), TimeoutCookie_.Get()); - } - + // Set timeout + if (cfg.GetRequestTimeoutMs()) { + this->Schedule(TDuration::MilliSeconds(cfg.GetRequestTimeoutMs()), new TEvWakeup(REQUEST_TIMEOUT_WAKEUP_TAG), TimeoutCookie_.Get()); + } + if (IsCloud()) { - DoCloudBootstrap(); + DoCloudBootstrap(); if (TDerived::CreateMissingAccount()) { - CreateAccountOnTheFly(); + CreateAccountOnTheFly(); return; } } - DoBootstrap(); + DoBootstrap(); } protected: @@ -163,19 +163,19 @@ protected: UserName_ = request.GetAuth().GetUserName(); } - virtual void DoAction() = 0; - - virtual TError* MutableErrorDesc() = 0; + virtual void DoAction() = 0; + virtual TError* MutableErrorDesc() = 0; + virtual TString DoGetQueueName() const = 0; - virtual bool Validate() { - if (TDerived::NeedUserSpecified() && !UserName_) { - MakeError(MutableErrorDesc(), NErrors::ACCESS_DENIED, "No account name."); - } - return DoValidate(); - } - + virtual bool Validate() { + if (TDerived::NeedUserSpecified() && !UserName_) { + MakeError(MutableErrorDesc(), NErrors::ACCESS_DENIED, "No account name."); + } + return DoValidate(); + } + virtual bool DoValidate() { return true; } @@ -189,38 +189,38 @@ protected: virtual void DoFinish() { } - virtual TString DumpState() { - TStringBuilder ret; - ret << "SecurityCheckRequestsToWaitFor: " << SecurityCheckRequestsToWaitFor_ - << " Shards: " << Shards_ - << " SchemeCache: " << SchemeCache_; - if (QueueAttributes_) { - ret << " QueueAttributes: " << *QueueAttributes_; - } - ret << "Response: " << Response_; - return std::move(ret); - } - - virtual bool HandleWakeup(TEvWakeup::TPtr& ev) { - if (ev->Get()->Tag == REQUEST_TIMEOUT_WAKEUP_TAG) { - HandleRequestTimeout(); - return true; - } - return false; - } - - virtual void OnRequestTimeout() { - } - - void HandleRequestTimeout() { - OnRequestTimeout(); - - RLOG_SQS_ERROR("Request timeout. User [" << UserName_ << "] Queue [" << GetQueueName() << "] Action [" << Action_ << "]. State: { " << DumpState() << " }"); - if (QueueCounters_) { + virtual TString DumpState() { + TStringBuilder ret; + ret << "SecurityCheckRequestsToWaitFor: " << SecurityCheckRequestsToWaitFor_ + << " Shards: " << Shards_ + << " SchemeCache: " << SchemeCache_; + if (QueueAttributes_) { + ret << " QueueAttributes: " << *QueueAttributes_; + } + ret << "Response: " << Response_; + return std::move(ret); + } + + virtual bool HandleWakeup(TEvWakeup::TPtr& ev) { + if (ev->Get()->Tag == REQUEST_TIMEOUT_WAKEUP_TAG) { + HandleRequestTimeout(); + return true; + } + return false; + } + + virtual void OnRequestTimeout() { + } + + void HandleRequestTimeout() { + OnRequestTimeout(); + + RLOG_SQS_ERROR("Request timeout. User [" << UserName_ << "] Queue [" << GetQueueName() << "] Action [" << Action_ << "]. State: { " << DumpState() << " }"); + if (QueueCounters_) { INC_COUNTER_COUPLE(QueueCounters_, RequestTimeouts, request_timeouts_count_per_second); - } else if (UserCounters_) { - INC_COUNTER(UserCounters_, RequestTimeouts); - } else { + } else if (UserCounters_) { + INC_COUNTER(UserCounters_, RequestTimeouts); + } else { TIntrusivePtrCntrCouple rootCounters { SqsCoreCounters_ ? SqsCoreCounters_ : GetSqsServiceCounters(AppData()->Counters, "core"), GetYmqPublicCounters(AppData()->Counters) @@ -230,46 +230,46 @@ protected: queueCounters.SqsCounters->GetCounter("RequestTimeouts", true)->Inc(); } else if (userCounters.SqsCounters) { userCounters.SqsCounters->GetCounter("RequestTimeouts", true)->Inc(); - } + } if (queueCounters.YmqCounters) { queueCounters.YmqCounters->GetCounter("RequestTimeouts", true)->Inc(); } - } - - MakeError(MutableErrorDesc(), NErrors::TIMEOUT); - SendReplyAndDie(); - } - - TString GetQueueName() const { + } + + MakeError(MutableErrorDesc(), NErrors::TIMEOUT); + SendReplyAndDie(); + } + + TString GetQueueName() const { return DoGetQueueName(); } - TQueuePath GetQueuePath() const { - const TString root = Cfg().GetRoot(); - return TQueuePath(root, UserName_, DoGetQueueName()); + TQueuePath GetQueuePath() const { + const TString root = Cfg().GetRoot(); + return TQueuePath(root, UserName_, DoGetQueueName()); } - TQueuePath GetUserPath() const { - const TString root = Cfg().GetRoot(); - return TQueuePath(root, UserName_, TString()); + TQueuePath GetUserPath() const { + const TString root = Cfg().GetRoot(); + return TQueuePath(root, UserName_, TString()); } - TString MakeQueueUrl(const TString& name) const { + TString MakeQueueUrl(const TString& name) const { return Join("/", RootUrl_, UserName_, name); } - void SendReplyAndDie() { - RLOG_SQS_TRACE("SendReplyAndDie from action actor " << Response_); + void SendReplyAndDie() { + RLOG_SQS_TRACE("SendReplyAndDie from action actor " << Response_); auto actionCountersCouple = GetActionCounters(); - auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; - const size_t errors = ErrorsCount(Response_, detailedCounters ? &detailedCounters->APIStatuses : nullptr); + auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; + const size_t errors = ErrorsCount(Response_, detailedCounters ? &detailedCounters->APIStatuses : nullptr); if (actionCountersCouple.SqsCounters) { - if (errors) { + if (errors) { ADD_COUNTER(actionCountersCouple.SqsCounters, Errors, errors); - } else { + } else { INC_COUNTER(actionCountersCouple.SqsCounters, Success); - } - } + } + } if (actionCountersCouple.YmqCounters) { if (errors) { ADD_COUNTER(actionCountersCouple.YmqCounters, Errors, errors); @@ -277,82 +277,82 @@ protected: INC_COUNTER(actionCountersCouple.YmqCounters, Success); } } - FinishTs_ = TActivationContext::Now(); - const TDuration workingDuration = GetRequestWorkingDuration(); - RLOG_SQS_DEBUG("Request " << Action_ << " working duration: " << workingDuration.MilliSeconds() << "ms"); + FinishTs_ = TActivationContext::Now(); + const TDuration workingDuration = GetRequestWorkingDuration(); + RLOG_SQS_DEBUG("Request " << Action_ << " working duration: " << workingDuration.MilliSeconds() << "ms"); if (actionCountersCouple.Defined()) { - const TDuration duration = GetRequestDuration(); + const TDuration duration = GetRequestDuration(); COLLECT_HISTOGRAM_COUNTER_COUPLE(actionCountersCouple, Duration, duration.MilliSeconds()); COLLECT_HISTOGRAM_COUNTER_COUPLE(actionCountersCouple, WorkingDuration, workingDuration.MilliSeconds()); - } - if (IsRequestSlow()) { - PrintSlowRequestWarning(); - } - Finish(); + } + if (IsRequestSlow()) { + PrintSlowRequestWarning(); + } + Finish(); - if (Cfg().GetYandexCloudMode()) { + if (Cfg().GetYandexCloudMode()) { Response_.SetFolderId(FolderId_); Response_.SetIsFifo(IsFifo_ ? *IsFifo_ : false); Response_.SetResourceId(GetQueueName()); } - Cb_->DoSendReply(Response_); - PassAway(); + Cb_->DoSendReply(Response_); + PassAway(); } - void PassAway() { - if (TProxyActor::NeedCreateProxyActor(Action_)) { - if (TString queueName = GetQueueName()) { + void PassAway() { + if (TProxyActor::NeedCreateProxyActor(Action_)) { + if (TString queueName = GetQueueName()) { this->Send(MakeSqsServiceID(this->SelfId().NodeId()), new TSqsEvents::TEvQueueLeaderDecRef()); - } - } - if (StartRequestWasCalled_ != FinishRequestWasCalled_) { - RLOG_SQS_WARN("Start/Finish calls inconsistency. Start: " << StartRequestWasCalled_ << ", Finish: " << FinishRequestWasCalled_); - } - TActorBootstrapped<TDerived>::PassAway(); - } - - void DoRoutine() { - RLOG_SQS_TRACE("DoRoutine"); - Start(); - if (Validate()) { - DoAction(); + } + } + if (StartRequestWasCalled_ != FinishRequestWasCalled_) { + RLOG_SQS_WARN("Start/Finish calls inconsistency. Start: " << StartRequestWasCalled_ << ", Finish: " << FinishRequestWasCalled_); + } + TActorBootstrapped<TDerived>::PassAway(); + } + + void DoRoutine() { + RLOG_SQS_TRACE("DoRoutine"); + Start(); + if (Validate()) { + DoAction(); } else { - SendReplyAndDie(); - } - } - - // Duration of request - virtual TDuration GetRequestDuration() const { - return FinishTs_ - StartTs_; - } - - // Duration of sleeps (waits) in request - virtual TDuration GetRequestWaitDuration() const { - return TDuration::Zero(); - } - - // Duration of request without sleeps - virtual TDuration GetRequestWorkingDuration() const { - return GetRequestDuration() - GetRequestWaitDuration(); - } - + SendReplyAndDie(); + } + } + + // Duration of request + virtual TDuration GetRequestDuration() const { + return FinishTs_ - StartTs_; + } + + // Duration of sleeps (waits) in request + virtual TDuration GetRequestWaitDuration() const { + return TDuration::Zero(); + } + + // Duration of request without sleeps + virtual TDuration GetRequestWorkingDuration() const { + return GetRequestDuration() - GetRequestWaitDuration(); + } + virtual TString GetCustomACLPath() const { return GetQueuePath().GetQueuePath(); } - virtual bool IsRequestSlow() const { - return GetRequestWorkingDuration() >= TDuration::MilliSeconds(Cfg().GetSlowRequestTimeMs()); - } - - void PrintSlowRequestWarning() { - RLOG_SQS_INFO("Request [" << UserName_ << "] [" << GetQueueName() << "] [" << Action_ << "] is slow. Working duration: " << GetRequestWorkingDuration().MilliSeconds() << "ms"); - } + virtual bool IsRequestSlow() const { + return GetRequestWorkingDuration() >= TDuration::MilliSeconds(Cfg().GetSlowRequestTimeMs()); + } + + void PrintSlowRequestWarning() { + RLOG_SQS_INFO("Request [" << UserName_ << "] [" << GetQueueName() << "] [" << Action_ << "] is slow. Working duration: " << GetRequestWorkingDuration().MilliSeconds() << "ms"); + } TString SanitizeNodePath(const TString& path) const { TStringBuf sanitizedPath(path); // just skip SQS root path if there's such a prefix - if (sanitizedPath.SkipPrefix(TStringBuf(Cfg().GetRoot()))) { // always skip SQS root prefix + if (sanitizedPath.SkipPrefix(TStringBuf(Cfg().GetRoot()))) { // always skip SQS root prefix return TString(sanitizedPath); } else { Y_VERIFY(false); // should never be applied in any other way @@ -363,7 +363,7 @@ protected: TString MakeAbsolutePath(const TString& relativePath) const { TStringBuilder fullPath; - fullPath << Cfg().GetRoot(); + fullPath << Cfg().GetRoot(); if (!relativePath.StartsWith("/")) { fullPath << "/"; } @@ -385,7 +385,7 @@ protected: } bool IsCloud() const { - return Cfg().GetYandexCloudMode(); + return Cfg().GetYandexCloudMode(); } bool IsInternalResource(const TString& path) const { @@ -402,14 +402,14 @@ protected: TCountersCouple<TActionCounters*> GetActionCounters() const { TCountersCouple<TActionCounters*> result{nullptr, nullptr}; - if (IsActionForQueue(Action_) && QueueCounters_) { - if (IsActionForMessage(Action_) || QueueCounters_->NeedToShowDetailedCounters()) { + if (IsActionForQueue(Action_) && QueueCounters_) { + if (IsActionForMessage(Action_) || QueueCounters_->NeedToShowDetailedCounters()) { result.SqsCounters = &QueueCounters_->SqsActionCounters[Action_]; - } - } else if (IsActionForUser(Action_) && UserCounters_) { - if (UserCounters_->NeedToShowDetailedCounters()) { + } + } else if (IsActionForUser(Action_) && UserCounters_) { + if (UserCounters_->NeedToShowDetailedCounters()) { result.SqsCounters = &UserCounters_->SqsActionCounters[Action_]; - } + } } if (IsActionForQueueYMQ(Action_) && QueueCounters_) { if (IsActionForMessage(Action_) || QueueCounters_->NeedToShowDetailedCounters()) { @@ -423,7 +423,7 @@ protected: return result; } - void RequestSchemeCache(const TString& path) { + void RequestSchemeCache(const TString& path) { auto schemeCacheRequest = MakeHolder<NSchemeCache::TSchemeCacheNavigate>(); NSchemeCache::TSchemeCacheNavigate::TEntry entry; @@ -431,35 +431,35 @@ protected: entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpPath; schemeCacheRequest->ResultSet.emplace_back(entry); - this->Send(SchemeCache_, new TEvTxProxySchemeCache::TEvNavigateKeySet(schemeCacheRequest.Release())); + this->Send(SchemeCache_, new TEvTxProxySchemeCache::TEvNavigateKeySet(schemeCacheRequest.Release())); } private: - STATEFN(InitialState) { + STATEFN(InitialState) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvConfiguration, HandleConfiguration); - hFunc(TSqsEvents::TEvUserCreated, HandleAccountCreated); - hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvConfiguration, HandleConfiguration); + hFunc(TSqsEvents::TEvUserCreated, HandleAccountCreated); + hFunc(TEvWakeup, HandleWakeup); } } - STATEFN(WaitAuthCheckMessages) { + STATEFN(WaitAuthCheckMessages) { switch (ev->GetTypeRewrite()) { - hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, HandleSchemeCacheResponse); + hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, HandleSchemeCacheResponse); hFunc(TEvTicketParser::TEvAuthorizeTicketResult, HandleTicketParserResponse); - hFunc(TEvWakeup, HandleWakeup); - } - } - - STATEFN(WaitQuotaState) { - switch (ev->GetTypeRewrite()) { - hFunc(TEvQuota::TEvClearance, HandleQuota); - hFunc(TEvWakeup, HandleWakeup); + hFunc(TEvWakeup, HandleWakeup); } } + STATEFN(WaitQuotaState) { + switch (ev->GetTypeRewrite()) { + hFunc(TEvQuota::TEvClearance, HandleQuota); + hFunc(TEvWakeup, HandleWakeup); + } + } + TString GetActionACLSourcePath() const { - const EACLSourceType aclSourceType = GetActionACLSourceType(ToString(Action_)); + const EACLSourceType aclSourceType = GetActionACLSourceType(ToString(Action_)); switch (aclSourceType) { case EACLSourceType::Unknown: { return {}; @@ -481,100 +481,100 @@ private: return {}; } - void RequestTicketParser() { + void RequestTicketParser() { this->Send(MakeTicketParserID(), new TEvTicketParser::TEvAuthorizeTicket(SecurityToken_)); } bool IsACLProtectedAccount(const TString& accountName) const { if (accountName) { // temporary O(N) solution since the list contains up to 100 items - return !IsIn(Cfg().GetAccountsWithoutMandatoryAuth(), accountName); + return !IsIn(Cfg().GetAccountsWithoutMandatoryAuth(), accountName); } return true; } - void HandleConfiguration(TSqsEvents::TEvConfiguration::TPtr& ev) { - const TDuration confDuration = TActivationContext::Now() - StartTs_; - RLOG_SQS_DEBUG("Get configuration duration: " << confDuration.MilliSeconds() << "ms"); - - RootUrl_ = std::move(ev->Get()->RootUrl); - UserExists_ = ev->Get()->UserExists; - QueueExists_ = ev->Get()->QueueExists; + void HandleConfiguration(TSqsEvents::TEvConfiguration::TPtr& ev) { + const TDuration confDuration = TActivationContext::Now() - StartTs_; + RLOG_SQS_DEBUG("Get configuration duration: " << confDuration.MilliSeconds() << "ms"); + + RootUrl_ = std::move(ev->Get()->RootUrl); + UserExists_ = ev->Get()->UserExists; + QueueExists_ = ev->Get()->QueueExists; Shards_ = ev->Get()->Shards; IsFifo_ = ev->Get()->Fifo; - QueueAttributes_ = std::move(ev->Get()->QueueAttributes); + QueueAttributes_ = std::move(ev->Get()->QueueAttributes); SchemeCache_ = ev->Get()->SchemeCache; - SqsCoreCounters_ = std::move(ev->Get()->SqsCoreCounters); - QueueCounters_ = std::move(ev->Get()->QueueCounters); - UserCounters_ = std::move(ev->Get()->UserCounters); + SqsCoreCounters_ = std::move(ev->Get()->SqsCoreCounters); + QueueCounters_ = std::move(ev->Get()->QueueCounters); + UserCounters_ = std::move(ev->Get()->UserCounters); QueueLeader_ = ev->Get()->QueueLeader; - QuoterResources_ = std::move(ev->Get()->QuoterResources); + QuoterResources_ = std::move(ev->Get()->QuoterResources); Y_VERIFY(SchemeCache_); - RLOG_SQS_TRACE("Got configuration. Root url: " << RootUrl_ - << ", Shards: " << Shards_ - << ", Fail: " << ev->Get()->Fail); - - if (QueueCounters_) { - auto* detailedCounters = QueueCounters_ ? QueueCounters_->GetDetailedCounters() : nullptr; - COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetConfiguration_Duration, confDuration.MilliSeconds()); - } else if (UserCounters_) { - auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; - COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetConfiguration_Duration, confDuration.MilliSeconds()); - } - - const bool needQueueAttributes = TDerived::NeedQueueAttributes(); - if (needQueueAttributes) { - Y_VERIFY(ev->Get()->Fail || !ev->Get()->QueueExists || QueueAttributes_.Defined()); - - if (QueueAttributes_.Defined()) { - RLOG_SQS_TRACE("Got configuration. Attributes: " << *QueueAttributes_); - } - } - - if (ev->Get()->Fail) { - MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE, "Failed to get configuration."); - SendReplyAndDie(); - return; - } - - if (TDerived::NeedExistingQueue() && !ev->Get()->QueueExists) { - MakeError(MutableErrorDesc(), NErrors::NON_EXISTENT_QUEUE); - SendReplyAndDie(); - return; - } - - bool isACLProtectedAccount = Cfg().GetForceAccessControl(); - if (!IsCloud() && (SecurityToken_ || (Cfg().GetForceAccessControl() && (isACLProtectedAccount = IsACLProtectedAccount(UserName_))))) { + RLOG_SQS_TRACE("Got configuration. Root url: " << RootUrl_ + << ", Shards: " << Shards_ + << ", Fail: " << ev->Get()->Fail); + + if (QueueCounters_) { + auto* detailedCounters = QueueCounters_ ? QueueCounters_->GetDetailedCounters() : nullptr; + COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetConfiguration_Duration, confDuration.MilliSeconds()); + } else if (UserCounters_) { + auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; + COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetConfiguration_Duration, confDuration.MilliSeconds()); + } + + const bool needQueueAttributes = TDerived::NeedQueueAttributes(); + if (needQueueAttributes) { + Y_VERIFY(ev->Get()->Fail || !ev->Get()->QueueExists || QueueAttributes_.Defined()); + + if (QueueAttributes_.Defined()) { + RLOG_SQS_TRACE("Got configuration. Attributes: " << *QueueAttributes_); + } + } + + if (ev->Get()->Fail) { + MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE, "Failed to get configuration."); + SendReplyAndDie(); + return; + } + + if (TDerived::NeedExistingQueue() && !ev->Get()->QueueExists) { + MakeError(MutableErrorDesc(), NErrors::NON_EXISTENT_QUEUE); + SendReplyAndDie(); + return; + } + + bool isACLProtectedAccount = Cfg().GetForceAccessControl(); + if (!IsCloud() && (SecurityToken_ || (Cfg().GetForceAccessControl() && (isACLProtectedAccount = IsACLProtectedAccount(UserName_))))) { this->Become(&TActionActor::WaitAuthCheckMessages); const auto& actionACLSourcePath = GetActionACLSourcePath(); if (!actionACLSourcePath || IsForbiddenPath(actionACLSourcePath)) { - RLOG_SQS_ERROR("Bad ACL source path " << actionACLSourcePath << " for " << Action_ << " action"); + RLOG_SQS_ERROR("Bad ACL source path " << actionACLSourcePath << " for " << Action_ << " action"); MakeError(MutableErrorDesc(), NErrors::ACCESS_DENIED); - SendReplyAndDie(); + SendReplyAndDie(); return; } - if (!SecurityToken_) { - MakeError(MutableErrorDesc(), NErrors::INVALID_CLIENT_TOKEN_ID, "No security token was provided."); - SendReplyAndDie(); - return; - } - - RequestSchemeCache(GetActionACLSourcePath()); // this also checks that requested queue (if any) does exist - RequestTicketParser(); + if (!SecurityToken_) { + MakeError(MutableErrorDesc(), NErrors::INVALID_CLIENT_TOKEN_ID, "No security token was provided."); + SendReplyAndDie(); + return; + } + + RequestSchemeCache(GetActionACLSourcePath()); // this also checks that requested queue (if any) does exist + RequestTicketParser(); } else { - if (!isACLProtectedAccount) { // !IsCloud && !SecurityToken_ && account is in AccountsWithoutMandatoryAuth setting. - INC_COUNTER(UserCounters_, UnauthenticatedAccess); // if !ForceAccessControl, this counter is not initialized. - } + if (!isACLProtectedAccount) { // !IsCloud && !SecurityToken_ && account is in AccountsWithoutMandatoryAuth setting. + INC_COUNTER(UserCounters_, UnauthenticatedAccess); // if !ForceAccessControl, this counter is not initialized. + } // old habits - DoGetQuotaAndProcess(); + DoGetQuotaAndProcess(); } } - void HandleSchemeCacheResponse(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) { + void HandleSchemeCacheResponse(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) { TEvTxProxySchemeCache::TEvNavigateKeySetResult* msg = ev->Get(); const NSchemeCache::TSchemeCacheNavigate* navigate = msg->Request.Get(); @@ -590,123 +590,123 @@ private: MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); + SendReplyAndDie(); return; } SecurityObject_ = navigate->ResultSet.front().SecurityObject; - OnAuthCheckMessage(); + OnAuthCheckMessage(); } void HandleTicketParserResponse(TEvTicketParser::TEvAuthorizeTicketResult::TPtr& ev) { const TEvTicketParser::TEvAuthorizeTicketResult& result(*ev->Get()); if (!result.Error.empty()) { - RLOG_SQS_ERROR("Got ticket parser error: " << result.Error << ". " << Action_ << " was rejected"); + RLOG_SQS_ERROR("Got ticket parser error: " << result.Error << ". " << Action_ << " was rejected"); MakeError(MutableErrorDesc(), NErrors::ACCESS_DENIED); - SendReplyAndDie(); + SendReplyAndDie(); return; } else { UserToken_ = ev->Get()->Token; Y_VERIFY(UserToken_); } - OnAuthCheckMessage(); + OnAuthCheckMessage(); } - void OnAuthCheckMessage() { + void OnAuthCheckMessage() { --SecurityCheckRequestsToWaitFor_; if (SecurityCheckRequestsToWaitFor_ == 0) { - const TString& actionName = ToString(Action_); - const ui32 requiredAccess = GetActionRequiredAccess(actionName); + const TString& actionName = ToString(Action_); + const ui32 requiredAccess = GetActionRequiredAccess(actionName); UserSID_ = UserToken_->GetUserSID(); if (requiredAccess != 0 && SecurityObject_ && !SecurityObject_->CheckAccess(requiredAccess, *UserToken_)) { - if (Action_ == EAction::ModifyPermissions) { + if (Action_ == EAction::ModifyPermissions) { // do not spam for other actions - RLOG_SQS_WARN("User " << UserSID_ << " tried to modify ACL for " << GetActionACLSourcePath() << ". Access denied"); + RLOG_SQS_WARN("User " << UserSID_ << " tried to modify ACL for " << GetActionACLSourcePath() << ". Access denied"); } - MakeError(MutableErrorDesc(), NErrors::ACCESS_DENIED, Sprintf("%s on %s was denied for %s due to missing permission %s.", - actionName.c_str(), SanitizeNodePath(GetActionACLSourcePath()).c_str(), UserSID_.c_str(), GetActionMatchingACE(actionName).c_str())); - SendReplyAndDie(); + MakeError(MutableErrorDesc(), NErrors::ACCESS_DENIED, Sprintf("%s on %s was denied for %s due to missing permission %s.", + actionName.c_str(), SanitizeNodePath(GetActionACLSourcePath()).c_str(), UserSID_.c_str(), GetActionMatchingACE(actionName).c_str())); + SendReplyAndDie(); return; } - DoGetQuotaAndProcess(); - } - } - - void DoGetQuotaAndProcess() { - if (SourceSqsRequest_.GetRequestRateLimit() && Cfg().GetQuotingConfig().GetEnableQuoting() && QuoterResources_) { - this->Become(&TActionActor::WaitQuotaState); - RLOG_SQS_DEBUG("Requesting quota"); - QuotaRequestTs_ = TActivationContext::Now(); - ui64 quoterId = 0; - ui64 resourceId = 0; - TDuration deadline = TDuration::Max(); // defaut deadline is infinity - auto resourceForAction = QuoterResources_->ActionsResources.find(Action_); - if (resourceForAction != QuoterResources_->ActionsResources.end()) { - quoterId = resourceForAction->second.QuoterId; - resourceId = resourceForAction->second.ResourceId; - } else { - quoterId = QuoterResources_->OtherActions.QuoterId; - resourceId = QuoterResources_->OtherActions.ResourceId; - } - if (Cfg().GetQuotingConfig().HasQuotaDeadlineMs()) { - deadline = TDuration::MilliSeconds(Cfg().GetQuotingConfig().GetQuotaDeadlineMs()); - } - this->Send(MakeQuoterServiceID(), - new TEvQuota::TEvRequest( - TEvQuota::EResourceOperator::And, - { TEvQuota::TResourceLeaf(quoterId, resourceId, 1) }, - deadline)); - } else { - DoRoutine(); - } - } - - void HandleQuota(TEvQuota::TEvClearance::TPtr& ev) { - const TDuration quotaWaitDuration = TActivationContext::Now() - QuotaRequestTs_; - switch (ev->Get()->Result) { - case TEvQuota::TEvClearance::EResult::GenericError: - case TEvQuota::TEvClearance::EResult::UnknownResource: { - RLOG_SQS_ERROR("Failed to get quota: " << ev->Get()->Result); - MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); - SendReplyAndDie(); - break; - } - case TEvQuota::TEvClearance::EResult::Deadline: { - RLOG_SQS_WARN("Failed to get quota: deadline expired. Quota wait duration: " << quotaWaitDuration << ". Action: " << Action_); - INC_COUNTER(QueueCounters_, RequestsThrottled); - MakeError(MutableErrorDesc(), NErrors::THROTTLING_EXCEPTION); - SendReplyAndDie(); - break; - } - case TEvQuota::TEvClearance::EResult::Success: { - RLOG_SQS_DEBUG("Successfully got quota for request. Quota wait duration: " << quotaWaitDuration << ". Action: " << Action_); - if (UserCounters_) { - auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; - COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetQuota_Duration, quotaWaitDuration.MilliSeconds()); - } - DoRoutine(); - break; - } - } - } - -private: - void Start() { + DoGetQuotaAndProcess(); + } + } + + void DoGetQuotaAndProcess() { + if (SourceSqsRequest_.GetRequestRateLimit() && Cfg().GetQuotingConfig().GetEnableQuoting() && QuoterResources_) { + this->Become(&TActionActor::WaitQuotaState); + RLOG_SQS_DEBUG("Requesting quota"); + QuotaRequestTs_ = TActivationContext::Now(); + ui64 quoterId = 0; + ui64 resourceId = 0; + TDuration deadline = TDuration::Max(); // defaut deadline is infinity + auto resourceForAction = QuoterResources_->ActionsResources.find(Action_); + if (resourceForAction != QuoterResources_->ActionsResources.end()) { + quoterId = resourceForAction->second.QuoterId; + resourceId = resourceForAction->second.ResourceId; + } else { + quoterId = QuoterResources_->OtherActions.QuoterId; + resourceId = QuoterResources_->OtherActions.ResourceId; + } + if (Cfg().GetQuotingConfig().HasQuotaDeadlineMs()) { + deadline = TDuration::MilliSeconds(Cfg().GetQuotingConfig().GetQuotaDeadlineMs()); + } + this->Send(MakeQuoterServiceID(), + new TEvQuota::TEvRequest( + TEvQuota::EResourceOperator::And, + { TEvQuota::TResourceLeaf(quoterId, resourceId, 1) }, + deadline)); + } else { + DoRoutine(); + } + } + + void HandleQuota(TEvQuota::TEvClearance::TPtr& ev) { + const TDuration quotaWaitDuration = TActivationContext::Now() - QuotaRequestTs_; + switch (ev->Get()->Result) { + case TEvQuota::TEvClearance::EResult::GenericError: + case TEvQuota::TEvClearance::EResult::UnknownResource: { + RLOG_SQS_ERROR("Failed to get quota: " << ev->Get()->Result); + MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); + SendReplyAndDie(); + break; + } + case TEvQuota::TEvClearance::EResult::Deadline: { + RLOG_SQS_WARN("Failed to get quota: deadline expired. Quota wait duration: " << quotaWaitDuration << ". Action: " << Action_); + INC_COUNTER(QueueCounters_, RequestsThrottled); + MakeError(MutableErrorDesc(), NErrors::THROTTLING_EXCEPTION); + SendReplyAndDie(); + break; + } + case TEvQuota::TEvClearance::EResult::Success: { + RLOG_SQS_DEBUG("Successfully got quota for request. Quota wait duration: " << quotaWaitDuration << ". Action: " << Action_); + if (UserCounters_) { + auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; + COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetQuota_Duration, quotaWaitDuration.MilliSeconds()); + } + DoRoutine(); + break; + } + } + } + +private: + void Start() { auto actionCountersCouple = GetActionCounters(); if (actionCountersCouple.SqsCounters) { - if (IsActionForQueue(Action_) && QueueCounters_) { + if (IsActionForQueue(Action_) && QueueCounters_) { NeedReportSqsActionInflyCounter = QueueCounters_->NeedToShowDetailedCounters(); - } else if (IsActionForUser(Action_) && UserCounters_) { + } else if (IsActionForUser(Action_) && UserCounters_) { NeedReportSqsActionInflyCounter = UserCounters_->NeedToShowDetailedCounters(); - } + } if (NeedReportSqsActionInflyCounter) { INC_COUNTER(actionCountersCouple.SqsCounters, Infly); - } - } + } + } if (actionCountersCouple.YmqCounters) { if (IsActionForQueueYMQ(Action_) && QueueCounters_) { NeedReportYmqActionInflyCounter = QueueCounters_->NeedToShowDetailedCounters(); @@ -717,60 +717,60 @@ private: INC_COUNTER(actionCountersCouple.YmqCounters, Infly); } } - DoStart(); - StartRequestWasCalled_ = true; - } - - void Finish() { + DoStart(); + StartRequestWasCalled_ = true; + } + + void Finish() { auto actionCounters = GetActionCounters(); if (NeedReportSqsActionInflyCounter) { DEC_COUNTER(actionCounters.SqsCounters, Infly); - } + } if (NeedReportYmqActionInflyCounter && actionCounters.YmqCounters) { DEC_COUNTER(actionCounters.YmqCounters, Infly); } - if (StartRequestWasCalled_) { - DoFinish(); - FinishRequestWasCalled_ = true; - } - } - + if (StartRequestWasCalled_) { + DoFinish(); + FinishRequestWasCalled_ = true; + } + } + protected: - static constexpr ui64 REQUEST_TIMEOUT_WAKEUP_TAG = 100; - - const EAction Action_; + static constexpr ui64 REQUEST_TIMEOUT_WAKEUP_TAG = 100; + + const EAction Action_; const TString RequestId_; THolder<IReplyCallback> Cb_; - TString RootUrl_; - TString UserName_; + TString RootUrl_; + TString UserName_; TString SecurityToken_; TString FolderId_; size_t SecurityCheckRequestsToWaitFor_ = 2; TIntrusivePtr<TSecurityObject> SecurityObject_; TIntrusivePtr<NACLib::TUserToken> UserToken_; TString UserSID_; // identifies the client who sent this request - bool UserExists_ = false; - bool QueueExists_ = false; - ui64 Shards_; + bool UserExists_ = false; + bool QueueExists_ = false; + ui64 Shards_; TMaybe<bool> IsFifo_; - TInstant StartTs_; - TInstant FinishTs_; - TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCoreCounters_; // Raw counters interface. Is is not prefered to use them + TInstant StartTs_; + TInstant FinishTs_; + TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCoreCounters_; // Raw counters interface. Is is not prefered to use them TIntrusivePtr<NMonitoring::TDynamicCounters> YmqRootCounters_; // Raw counters interface. Is is not prefered to use them - TIntrusivePtr<TUserCounters> UserCounters_; - TIntrusivePtr<TQueueCounters> QueueCounters_; - TMaybe<TSqsEvents::TQueueAttributes> QueueAttributes_; + TIntrusivePtr<TUserCounters> UserCounters_; + TIntrusivePtr<TQueueCounters> QueueCounters_; + TMaybe<TSqsEvents::TQueueAttributes> QueueAttributes_; NKikimrClient::TSqsResponse Response_; TActorId SchemeCache_; TActorId QueueLeader_; - bool StartRequestWasCalled_ = false; - bool FinishRequestWasCalled_ = false; - TInstant QuotaRequestTs_; - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; + bool StartRequestWasCalled_ = false; + bool FinishRequestWasCalled_ = false; + TInstant QuotaRequestTs_; + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; bool NeedReportSqsActionInflyCounter = false; bool NeedReportYmqActionInflyCounter = false; - TSchedulerCookieHolder TimeoutCookie_ = ISchedulerCookie::Make2Way(); - NKikimrClient::TSqsRequest SourceSqsRequest_; + TSchedulerCookieHolder TimeoutCookie_ = ISchedulerCookie::Make2Way(); + NKikimrClient::TSqsRequest SourceSqsRequest_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/actor.cpp b/ydb/core/ymq/actor/actor.cpp index d2ebd7cf50f..dd0e003e3f9 100644 --- a/ydb/core/ymq/actor/actor.cpp +++ b/ydb/core/ymq/actor/actor.cpp @@ -1,45 +1,45 @@ #include "actor.h" #include "action.h" -#include "ping.h" -#include "proxy_actor.h" - -#include <util/system/defaults.h> +#include "ping.h" +#include "proxy_actor.h" +#include <util/system/defaults.h> + using namespace NKikimrTxUserProxy; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TUnimplementedRequestActor : public TActionActor<TUnimplementedRequestActor> { public: - TUnimplementedRequestActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb) - : TActionActor(req, EAction::Unknown, std::move(cb)) + TUnimplementedRequestActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb) + : TActionActor(req, EAction::Unknown, std::move(cb)) { Response_.MutableGetQueueUrl()->SetRequestId(RequestId_); } private: - void DoAction() override { - SendReplyAndDie(); - } - - TError* MutableErrorDesc() override { - return Response_.MutableGetQueueUrl()->MutableError(); + void DoAction() override { + SendReplyAndDie(); } + TError* MutableErrorDesc() override { + return Response_.MutableGetQueueUrl()->MutableError(); + } + TString DoGetQueueName() const override { return TString(); } }; -IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb) { - Y_VERIFY(req.GetRequestId()); +IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb) { + Y_VERIFY(req.GetRequestId()); #define REQUEST_CASE(action) \ - case NKikimrClient::TSqsRequest::Y_CAT(k, action): { \ - extern IActor* Y_CAT(Y_CAT(Create, action), Actor)(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb); \ - return Y_CAT(Y_CAT(Create, action), Actor)(req, std::move(cb)); \ + case NKikimrClient::TSqsRequest::Y_CAT(k, action): { \ + extern IActor* Y_CAT(Y_CAT(Create, action), Actor)(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb); \ + return Y_CAT(Y_CAT(Create, action), Actor)(req, std::move(cb)); \ } switch (req.GetRequestCase()) { @@ -50,17 +50,17 @@ IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyC REQUEST_CASE(DeleteMessage) REQUEST_CASE(DeleteMessageBatch) REQUEST_CASE(DeleteQueue) - REQUEST_CASE(DeleteQueueBatch) + REQUEST_CASE(DeleteQueueBatch) REQUEST_CASE(DeleteUser) REQUEST_CASE(ListPermissions) REQUEST_CASE(GetQueueAttributes) - REQUEST_CASE(GetQueueAttributesBatch) + REQUEST_CASE(GetQueueAttributesBatch) REQUEST_CASE(GetQueueUrl) REQUEST_CASE(ListQueues) REQUEST_CASE(ListUsers) REQUEST_CASE(ModifyPermissions) REQUEST_CASE(PurgeQueue) - REQUEST_CASE(PurgeQueueBatch) + REQUEST_CASE(PurgeQueueBatch) REQUEST_CASE(ReceiveMessage) REQUEST_CASE(SendMessage) REQUEST_CASE(SendMessageBatch) @@ -71,7 +71,7 @@ IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyC #undef REQUEST_CASE case NKikimrClient::TSqsRequest::REQUEST_NOT_SET: - return new TUnimplementedRequestActor(req, std::move(cb)); + return new TUnimplementedRequestActor(req, std::move(cb)); } Y_FAIL(); @@ -79,14 +79,14 @@ IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyC IActor* CreateProxyActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb, bool enableQueueLeader) { if (enableQueueLeader && TProxyActor::NeedCreateProxyActor(req)) { - return new TProxyActor(req, std::move(cb)); - } else { - return CreateActionActor(req, std::move(cb)); - } -} - -IActor* CreatePingActor(THolder<IPingReplyCallback> cb, const TString& requestId) { - return new TPingActor(std::move(cb), requestId); -} - -} // namespace NKikimr::NSQS + return new TProxyActor(req, std::move(cb)); + } else { + return CreateActionActor(req, std::move(cb)); + } +} + +IActor* CreatePingActor(THolder<IPingReplyCallback> cb, const TString& requestId) { + return new TPingActor(std::move(cb), requestId); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/actor.h b/ydb/core/ymq/actor/actor.h index d6a825d2f82..0ccd1e4261d 100644 --- a/ydb/core/ymq/actor/actor.h +++ b/ydb/core/ymq/actor/actor.h @@ -1,33 +1,33 @@ #pragma once -#include "defs.h" +#include "defs.h" #include <ydb/core/protos/msgbus.pb.h> #include <library/cpp/actors/core/actor.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class IReplyCallback { public: virtual ~IReplyCallback() = default; - virtual void DoSendReply(const NKikimrClient::TSqsResponse& resp) = 0; + virtual void DoSendReply(const NKikimrClient::TSqsResponse& resp) = 0; }; -class IPingReplyCallback { -public: - virtual ~IPingReplyCallback() = default; - - virtual void DoSendReply() = 0; -}; - -// Create actor that would process request. +class IPingReplyCallback { +public: + virtual ~IPingReplyCallback() = default; + + virtual void DoSendReply() = 0; +}; + +// Create actor that would process request. // Called from leader node. -IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb); +IActor* CreateActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb); // Create actor that would proxy request to leader // or process it if leader is not required for given operation type. IActor* CreateProxyActionActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb, bool enableQueueLeader); - -IActor* CreatePingActor(THolder<IPingReplyCallback> cb, const TString& requestId); - -} // namespace NKikimr::NSQS + +IActor* CreatePingActor(THolder<IPingReplyCallback> cb, const TString& requestId); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/attributes_md5.cpp b/ydb/core/ymq/actor/attributes_md5.cpp index 411c3b0861f..6f57fda70a0 100644 --- a/ydb/core/ymq/actor/attributes_md5.cpp +++ b/ydb/core/ymq/actor/attributes_md5.cpp @@ -1,65 +1,65 @@ -#include "attributes_md5.h" - +#include "attributes_md5.h" + #include <library/cpp/digest/md5/md5.h> - -#include <util/network/init.h> - -#include <cstdint> -#include <vector> - -// -// About attributes MD5 calculation. -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#sqs-attributes-md5-message-digest-calculation -// - -namespace NKikimr::NSQS { - -static const std::uint8_t STRING_TRANSPORT_TYPE_CODE = 1; -static const std::uint8_t BINARY_TRANSPORT_TYPE_CODE = 2; - -static std::uint32_t ToBigEndian(std::uint32_t x) { - return htonl(x); -} - -static void Update(MD5& md5, const TMessageAttribute& attr) { - const std::uint32_t nameLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetName().size())); - const std::uint32_t dataTypeLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetDataType().size())); - md5 - .Update(&nameLen, sizeof(nameLen)).Update(attr.GetName()) - .Update(&dataTypeLen, sizeof(dataTypeLen)).Update(attr.GetDataType()); - - if (attr.HasStringValue()) { - const std::uint32_t valueLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetStringValue().size())); - md5 - .Update(&STRING_TRANSPORT_TYPE_CODE, sizeof(std::uint8_t)) - .Update(&valueLen, sizeof(valueLen)).Update(attr.GetStringValue()); - } else { - const std::uint32_t valueLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetBinaryValue().size())); - md5 - .Update(&BINARY_TRANSPORT_TYPE_CODE, sizeof(std::uint8_t)) - .Update(&valueLen, sizeof(valueLen)).Update(attr.GetBinaryValue()); - } -} - -TString CalcMD5OfMessageAttributes(const google::protobuf::RepeatedPtrField<TMessageAttribute>& attributes) { - std::vector<const TMessageAttribute*> sortedAttrs(attributes.size()); - for (size_t i = 0; i < sortedAttrs.size(); ++i) { - sortedAttrs[i] = &attributes.Get(i); - } - std::sort( - sortedAttrs.begin(), - sortedAttrs.end(), - [](const TMessageAttribute* a1, const TMessageAttribute* a2) { - return a1->GetName() < a2->GetName(); - } - ); - MD5 md5; - for (const auto* attr : sortedAttrs) { - Update(md5, *attr); - } - char res[33] = {'\0'}; - md5.End(res); - return res; -} - -} // namespace NKikimr::NSQS + +#include <util/network/init.h> + +#include <cstdint> +#include <vector> + +// +// About attributes MD5 calculation. +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#sqs-attributes-md5-message-digest-calculation +// + +namespace NKikimr::NSQS { + +static const std::uint8_t STRING_TRANSPORT_TYPE_CODE = 1; +static const std::uint8_t BINARY_TRANSPORT_TYPE_CODE = 2; + +static std::uint32_t ToBigEndian(std::uint32_t x) { + return htonl(x); +} + +static void Update(MD5& md5, const TMessageAttribute& attr) { + const std::uint32_t nameLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetName().size())); + const std::uint32_t dataTypeLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetDataType().size())); + md5 + .Update(&nameLen, sizeof(nameLen)).Update(attr.GetName()) + .Update(&dataTypeLen, sizeof(dataTypeLen)).Update(attr.GetDataType()); + + if (attr.HasStringValue()) { + const std::uint32_t valueLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetStringValue().size())); + md5 + .Update(&STRING_TRANSPORT_TYPE_CODE, sizeof(std::uint8_t)) + .Update(&valueLen, sizeof(valueLen)).Update(attr.GetStringValue()); + } else { + const std::uint32_t valueLen = ToBigEndian(static_cast<std::uint32_t>(attr.GetBinaryValue().size())); + md5 + .Update(&BINARY_TRANSPORT_TYPE_CODE, sizeof(std::uint8_t)) + .Update(&valueLen, sizeof(valueLen)).Update(attr.GetBinaryValue()); + } +} + +TString CalcMD5OfMessageAttributes(const google::protobuf::RepeatedPtrField<TMessageAttribute>& attributes) { + std::vector<const TMessageAttribute*> sortedAttrs(attributes.size()); + for (size_t i = 0; i < sortedAttrs.size(); ++i) { + sortedAttrs[i] = &attributes.Get(i); + } + std::sort( + sortedAttrs.begin(), + sortedAttrs.end(), + [](const TMessageAttribute* a1, const TMessageAttribute* a2) { + return a1->GetName() < a2->GetName(); + } + ); + MD5 md5; + for (const auto* attr : sortedAttrs) { + Update(md5, *attr); + } + char res[33] = {'\0'}; + md5.End(res); + return res; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/attributes_md5.h b/ydb/core/ymq/actor/attributes_md5.h index 0f92ce0200b..63edbf67e15 100644 --- a/ydb/core/ymq/actor/attributes_md5.h +++ b/ydb/core/ymq/actor/attributes_md5.h @@ -1,9 +1,9 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <ydb/core/protos/sqs.pb.h> - -namespace NKikimr::NSQS { - -TString CalcMD5OfMessageAttributes(const google::protobuf::RepeatedPtrField<TMessageAttribute>& attributes); - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +TString CalcMD5OfMessageAttributes(const google::protobuf::RepeatedPtrField<TMessageAttribute>& attributes); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/cfg.cpp b/ydb/core/ymq/actor/cfg.cpp index 10361b92b2b..55dd6cc4bce 100644 --- a/ydb/core/ymq/actor/cfg.cpp +++ b/ydb/core/ymq/actor/cfg.cpp @@ -1,13 +1,13 @@ -#include "cfg.h" - +#include "cfg.h" + #include <ydb/core/base/appdata.h> - -namespace NKikimr::NSQS { - -const NKikimrConfig::TSqsConfig& Cfg() { - return AppData()->SqsConfig; -} - + +namespace NKikimr::NSQS { + +const NKikimrConfig::TSqsConfig& Cfg() { + return AppData()->SqsConfig; +} + ui32 GetLeadersDescriberUpdateTimeMs() { const auto& config = AppData()->SqsConfig; if (config.HasMastersDescriberUpdateTimeMs()) { @@ -16,4 +16,4 @@ ui32 GetLeadersDescriberUpdateTimeMs() { return config.GetLeadersDescriberUpdateTimeMs(); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/cfg.h b/ydb/core/ymq/actor/cfg.h index 8d7fcac91b6..56629ff3e5c 100644 --- a/ydb/core/ymq/actor/cfg.h +++ b/ydb/core/ymq/actor/cfg.h @@ -1,11 +1,11 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <ydb/core/base/defs.h> #include <ydb/core/protos/config.pb.h> - -namespace NKikimr::NSQS { - -const NKikimrConfig::TSqsConfig& Cfg(); + +namespace NKikimr::NSQS { + +const NKikimrConfig::TSqsConfig& Cfg(); ui32 GetLeadersDescriberUpdateTimeMs(); - -} // namespace NKikimr::NSQS + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/change_visibility.cpp b/ydb/core/ymq/actor/change_visibility.cpp index 5c3140bcc8b..a6b6ddbbcf1 100644 --- a/ydb/core/ymq/actor/change_visibility.cpp +++ b/ydb/core/ymq/actor/change_visibility.cpp @@ -1,7 +1,7 @@ #include "action.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include <ydb/core/ymq/base/limits.h> @@ -12,209 +12,209 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TChangeMessageVisibilityActor : public TActionActor<TChangeMessageVisibilityActor> { public: - TChangeMessageVisibilityActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, bool isBatch, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, isBatch ? EAction::ChangeMessageVisibilityBatch : EAction::ChangeMessageVisibility, std::move(cb)) - , IsBatch_(isBatch) + TChangeMessageVisibilityActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, bool isBatch, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, isBatch ? EAction::ChangeMessageVisibilityBatch : EAction::ChangeMessageVisibility, std::move(cb)) + , IsBatch_(isBatch) { - if (IsBatch_) { - CopyAccountName(BatchRequest()); - Response_.MutableChangeMessageVisibilityBatch()->SetRequestId(RequestId_); - CopySecurityToken(BatchRequest()); - } else { - CopyAccountName(Request()); - Response_.MutableChangeMessageVisibility()->SetRequestId(RequestId_); - CopySecurityToken(Request()); - } + if (IsBatch_) { + CopyAccountName(BatchRequest()); + Response_.MutableChangeMessageVisibilityBatch()->SetRequestId(RequestId_); + CopySecurityToken(BatchRequest()); + } else { + CopyAccountName(Request()); + Response_.MutableChangeMessageVisibility()->SetRequestId(RequestId_); + CopySecurityToken(Request()); + } } protected: - void AppendEntry(const TChangeMessageVisibilityRequest& entry, TChangeMessageVisibilityResponse* resp, size_t requestIndexInBatch) { - try { - // Validate - if (!entry.HasVisibilityTimeout()) { - MakeError(resp, NErrors::MISSING_PARAMETER, "VisibilityTimeout was not provided."); - return; - } - - const TDuration newVisibilityTimeout = TDuration::Seconds(entry.GetVisibilityTimeout()); - if (newVisibilityTimeout > TLimits::MaxVisibilityTimeout) { - MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, "VisibilityTimeout parameter must be less than or equal to 12 hours."); - return; - } - - const TReceipt receipt = DecodeReceiptHandle(entry.GetReceiptHandle()); // can throw - RLOG_SQS_DEBUG("Decoded receipt handle: " << receipt); - if (receipt.GetShard() >= Shards_) { - throw yexception() << "Invalid shard: " << receipt.GetShard(); - } - - const bool isFifo = IsFifoQueue(); - if (isFifo && !receipt.GetMessageGroupId()) { - throw yexception() << "No message group id"; - } - - auto& shardInfo = ShardInfo_[receipt.GetShard()]; - // Create request - if (!shardInfo.Request_) { + void AppendEntry(const TChangeMessageVisibilityRequest& entry, TChangeMessageVisibilityResponse* resp, size_t requestIndexInBatch) { + try { + // Validate + if (!entry.HasVisibilityTimeout()) { + MakeError(resp, NErrors::MISSING_PARAMETER, "VisibilityTimeout was not provided."); + return; + } + + const TDuration newVisibilityTimeout = TDuration::Seconds(entry.GetVisibilityTimeout()); + if (newVisibilityTimeout > TLimits::MaxVisibilityTimeout) { + MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, "VisibilityTimeout parameter must be less than or equal to 12 hours."); + return; + } + + const TReceipt receipt = DecodeReceiptHandle(entry.GetReceiptHandle()); // can throw + RLOG_SQS_DEBUG("Decoded receipt handle: " << receipt); + if (receipt.GetShard() >= Shards_) { + throw yexception() << "Invalid shard: " << receipt.GetShard(); + } + + const bool isFifo = IsFifoQueue(); + if (isFifo && !receipt.GetMessageGroupId()) { + throw yexception() << "No message group id"; + } + + auto& shardInfo = ShardInfo_[receipt.GetShard()]; + // Create request + if (!shardInfo.Request_) { ++RequestsToLeader_; - shardInfo.Request_ = MakeHolder<TSqsEvents::TEvChangeMessageVisibilityBatch>(); - shardInfo.Request_->Shard = receipt.GetShard(); - shardInfo.Request_->RequestId = RequestId_; - shardInfo.Request_->NowTimestamp = NowTimestamp_; - } - - // Add new message to shard request - if (IsBatch_) { - shardInfo.RequestToReplyIndexMapping_.push_back(requestIndexInBatch); - } - shardInfo.Request_->Messages.emplace_back(); - auto& msgReq = shardInfo.Request_->Messages.back(); - msgReq.Offset = receipt.GetOffset(); - msgReq.LockTimestamp = TInstant::MilliSeconds(receipt.GetLockTimestamp()); - if (isFifo) { - msgReq.MessageGroupId = receipt.GetMessageGroupId(); - msgReq.ReceiveAttemptId = receipt.GetReceiveRequestAttemptId(); - } - - msgReq.VisibilityDeadline = NowTimestamp_ + newVisibilityTimeout; - } catch (...) { - RLOG_SQS_WARN("Failed to process receipt handle " << entry.GetReceiptHandle() << ": " << CurrentExceptionMessage()); - MakeError(resp, NErrors::RECEIPT_HANDLE_IS_INVALID); - } - } - - void ProcessAnswer(TChangeMessageVisibilityResponse* resp, const TSqsEvents::TEvChangeMessageVisibilityBatchResponse::TMessageResult& answer) { - switch (answer.Status) { - case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::OK: { - break; - } - case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotFound: { - MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, "No such message."); - break; - } - case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotInFly: { - MakeError(resp, NErrors::MESSAGE_NOT_INFLIGHT); - break; - } - case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::Failed: { - MakeError(resp, NErrors::INTERNAL_FAILURE); - break; - } + shardInfo.Request_ = MakeHolder<TSqsEvents::TEvChangeMessageVisibilityBatch>(); + shardInfo.Request_->Shard = receipt.GetShard(); + shardInfo.Request_->RequestId = RequestId_; + shardInfo.Request_->NowTimestamp = NowTimestamp_; + } + + // Add new message to shard request + if (IsBatch_) { + shardInfo.RequestToReplyIndexMapping_.push_back(requestIndexInBatch); + } + shardInfo.Request_->Messages.emplace_back(); + auto& msgReq = shardInfo.Request_->Messages.back(); + msgReq.Offset = receipt.GetOffset(); + msgReq.LockTimestamp = TInstant::MilliSeconds(receipt.GetLockTimestamp()); + if (isFifo) { + msgReq.MessageGroupId = receipt.GetMessageGroupId(); + msgReq.ReceiveAttemptId = receipt.GetReceiveRequestAttemptId(); + } + + msgReq.VisibilityDeadline = NowTimestamp_ + newVisibilityTimeout; + } catch (...) { + RLOG_SQS_WARN("Failed to process receipt handle " << entry.GetReceiptHandle() << ": " << CurrentExceptionMessage()); + MakeError(resp, NErrors::RECEIPT_HANDLE_IS_INVALID); } + } + + void ProcessAnswer(TChangeMessageVisibilityResponse* resp, const TSqsEvents::TEvChangeMessageVisibilityBatchResponse::TMessageResult& answer) { + switch (answer.Status) { + case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::OK: { + break; + } + case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotFound: { + MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, "No such message."); + break; + } + case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotInFly: { + MakeError(resp, NErrors::MESSAGE_NOT_INFLIGHT); + break; + } + case TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::Failed: { + MakeError(resp, NErrors::INTERNAL_FAILURE); + break; + } + } } bool DoValidate() override { if (IsBatch_) { - if (BatchRequest().EntriesSize() == 0) { - MakeError(Response_.MutableChangeMessageVisibilityBatch(), NErrors::EMPTY_BATCH_REQUEST); - return false; - } else if (BatchRequest().EntriesSize() > TLimits::MaxBatchSize) { - MakeError(Response_.MutableChangeMessageVisibilityBatch(), NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); + if (BatchRequest().EntriesSize() == 0) { + MakeError(Response_.MutableChangeMessageVisibilityBatch(), NErrors::EMPTY_BATCH_REQUEST); return false; + } else if (BatchRequest().EntriesSize() > TLimits::MaxBatchSize) { + MakeError(Response_.MutableChangeMessageVisibilityBatch(), NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); + return false; } } return true; } - TError* MutableErrorDesc() override { - return IsBatch_ ? Response_.MutableChangeMessageVisibilityBatch()->MutableError() : Response_.MutableChangeMessageVisibility()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return IsBatch_ ? Response_.MutableChangeMessageVisibilityBatch()->MutableError() : Response_.MutableChangeMessageVisibility()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - ShardInfo_.resize(Shards_); - NowTimestamp_ = TActivationContext::Now(); - - if (IsBatch_) { - for (size_t i = 0, size = BatchRequest().EntriesSize(); i < size; ++i) { - const auto& entry = BatchRequest().GetEntries(i); - auto* response = Response_.MutableChangeMessageVisibilityBatch()->AddEntries(); - response->SetId(entry.GetId()); - AppendEntry(entry, response, i); - } - } else { - AppendEntry(Request(), Response_.MutableChangeMessageVisibility(), 0); + ShardInfo_.resize(Shards_); + NowTimestamp_ = TActivationContext::Now(); + + if (IsBatch_) { + for (size_t i = 0, size = BatchRequest().EntriesSize(); i < size; ++i) { + const auto& entry = BatchRequest().GetEntries(i); + auto* response = Response_.MutableChangeMessageVisibilityBatch()->AddEntries(); + response->SetId(entry.GetId()); + AppendEntry(entry, response, i); + } + } else { + AppendEntry(Request(), Response_.MutableChangeMessageVisibility(), 0); } if (RequestsToLeader_) { Y_VERIFY(RequestsToLeader_ <= Shards_); - for (auto& shardInfo : ShardInfo_) { - if (shardInfo.Request_) { + for (auto& shardInfo : ShardInfo_) { + if (shardInfo.Request_) { Send(QueueLeader_, shardInfo.Request_.Release()); } } } else { - SendReplyAndDie(); + SendReplyAndDie(); } } TString DoGetQueueName() const override { - return IsBatch_ ? BatchRequest().GetQueueName() : Request().GetQueueName(); + return IsBatch_ ? BatchRequest().GetQueueName() : Request().GetQueueName(); } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvChangeMessageVisibilityBatchResponse, HandleChangeMessageVisibilityBatchResponse); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvChangeMessageVisibilityBatchResponse, HandleChangeMessageVisibilityBatchResponse); } } - void HandleChangeMessageVisibilityBatchResponse(TSqsEvents::TEvChangeMessageVisibilityBatchResponse::TPtr& ev) { - if (IsBatch_) { - Y_VERIFY(ev->Get()->Shard < Shards_); - const auto& shardInfo = ShardInfo_[ev->Get()->Shard]; - Y_VERIFY(ev->Get()->Statuses.size() == shardInfo.RequestToReplyIndexMapping_.size()); - for (size_t i = 0, size = ev->Get()->Statuses.size(); i < size; ++i) { - const size_t entryIndex = shardInfo.RequestToReplyIndexMapping_[i]; - Y_VERIFY(entryIndex < Response_.GetChangeMessageVisibilityBatch().EntriesSize()); - ProcessAnswer(Response_.MutableChangeMessageVisibilityBatch()->MutableEntries(entryIndex), ev->Get()->Statuses[i]); + void HandleChangeMessageVisibilityBatchResponse(TSqsEvents::TEvChangeMessageVisibilityBatchResponse::TPtr& ev) { + if (IsBatch_) { + Y_VERIFY(ev->Get()->Shard < Shards_); + const auto& shardInfo = ShardInfo_[ev->Get()->Shard]; + Y_VERIFY(ev->Get()->Statuses.size() == shardInfo.RequestToReplyIndexMapping_.size()); + for (size_t i = 0, size = ev->Get()->Statuses.size(); i < size; ++i) { + const size_t entryIndex = shardInfo.RequestToReplyIndexMapping_[i]; + Y_VERIFY(entryIndex < Response_.GetChangeMessageVisibilityBatch().EntriesSize()); + ProcessAnswer(Response_.MutableChangeMessageVisibilityBatch()->MutableEntries(entryIndex), ev->Get()->Statuses[i]); } } else { Y_VERIFY(RequestsToLeader_ == 1); - Y_VERIFY(ev->Get()->Statuses.size() == 1); - ProcessAnswer(Response_.MutableChangeMessageVisibility(), ev->Get()->Statuses[0]); + Y_VERIFY(ev->Get()->Statuses.size() == 1); + ProcessAnswer(Response_.MutableChangeMessageVisibility(), ev->Get()->Statuses[0]); } --RequestsToLeader_; if (RequestsToLeader_ == 0) { - SendReplyAndDie(); + SendReplyAndDie(); } } - const TChangeMessageVisibilityRequest& Request() const { - return SourceSqsRequest_.GetChangeMessageVisibility(); - } - - const TChangeMessageVisibilityBatchRequest& BatchRequest() const { - return SourceSqsRequest_.GetChangeMessageVisibilityBatch(); - } - + const TChangeMessageVisibilityRequest& Request() const { + return SourceSqsRequest_.GetChangeMessageVisibility(); + } + + const TChangeMessageVisibilityBatchRequest& BatchRequest() const { + return SourceSqsRequest_.GetChangeMessageVisibilityBatch(); + } + private: - const bool IsBatch_; + const bool IsBatch_; - struct TShardInfo { - std::vector<size_t> RequestToReplyIndexMapping_; - THolder<TSqsEvents::TEvChangeMessageVisibilityBatch> Request_; // actual when processing initial request, then nullptr - }; + struct TShardInfo { + std::vector<size_t> RequestToReplyIndexMapping_; + THolder<TSqsEvents::TEvChangeMessageVisibilityBatch> Request_; // actual when processing initial request, then nullptr + }; size_t RequestsToLeader_ = 0; - std::vector<TShardInfo> ShardInfo_; - TInstant NowTimestamp_; + std::vector<TShardInfo> ShardInfo_; + TInstant NowTimestamp_; }; -IActor* CreateChangeMessageVisibilityActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TChangeMessageVisibilityActor(sourceSqsRequest, false, std::move(cb)); +IActor* CreateChangeMessageVisibilityActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TChangeMessageVisibilityActor(sourceSqsRequest, false, std::move(cb)); } -IActor* CreateChangeMessageVisibilityBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TChangeMessageVisibilityActor(sourceSqsRequest, true, std::move(cb)); +IActor* CreateChangeMessageVisibilityBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TChangeMessageVisibilityActor(sourceSqsRequest, true, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/common_batch_actor.h b/ydb/core/ymq/actor/common_batch_actor.h index 1d9cc6916d9..49b3e421e8f 100644 --- a/ydb/core/ymq/actor/common_batch_actor.h +++ b/ydb/core/ymq/actor/common_batch_actor.h @@ -1,93 +1,93 @@ -#pragma once -#include "defs.h" -#include "action.h" -#include "actor.h" -#include "error.h" -#include "proxy_actor.h" - -namespace NKikimr::NSQS { - -class TBatchRequestReplyCallback : public IReplyCallback { -public: +#pragma once +#include "defs.h" +#include "action.h" +#include "actor.h" +#include "error.h" +#include "proxy_actor.h" + +namespace NKikimr::NSQS { + +class TBatchRequestReplyCallback : public IReplyCallback { +public: TBatchRequestReplyCallback(const TActorId& bacthActor, ui64 cookie) - : BatchActor_(bacthActor) - , Cookie_(cookie) - { - } - -private: - void DoSendReply(const NKikimrClient::TSqsResponse& resp) override { + : BatchActor_(bacthActor) + , Cookie_(cookie) + { + } + +private: + void DoSendReply(const NKikimrClient::TSqsResponse& resp) override { const TActorId sender = TActivationContext::AsActorContext().SelfID; - TActivationContext::Send(new IEventHandle(BatchActor_, sender, new TSqsEvents::TEvSqsResponse(resp), 0, Cookie_)); - } - -private: + TActivationContext::Send(new IEventHandle(BatchActor_, sender, new TSqsEvents::TEvSqsResponse(resp), 0, Cookie_)); + } + +private: const TActorId BatchActor_; - const ui64 Cookie_; -}; - -template <class TDerived> -class TCommonBatchActor - : public TActionActor<TDerived> -{ -public: - static constexpr bool NeedExistingQueue() { - return false; - } - - TCommonBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, const EAction action, THolder<IReplyCallback> cb) - : TActionActor<TDerived>(sourceSqsRequest, action, std::move(cb)) - { - } - -private: - virtual std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const = 0; - virtual void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) = 0; // Fills Response_ with merged response - - void DoAction() override { - RLOG_SQS_TRACE("TCommonBatchActor::DoAction"); - this->Become(&TCommonBatchActor::StateFunc); - - std::vector<NKikimrClient::TSqsRequest> requests = GenerateRequestsFromBatch(); - if (requests.empty()) { - MakeError(this->MutableErrorDesc(), NErrors::EMPTY_BATCH_REQUEST); - this->SendReplyAndDie(); - return; - } - Responses.resize(requests.size()); - for (ui64 i = 0; i < requests.size(); ++i) { - TStringBuilder reqId; - reqId << RequestId_ << "-" << i; - RLOG_SQS_DEBUG("Create proxy subactor[" << i << "]. Req id: " << reqId); - requests[i].SetRequestId(reqId); - requests[i].SetRequestRateLimit(false); // already requested + const ui64 Cookie_; +}; + +template <class TDerived> +class TCommonBatchActor + : public TActionActor<TDerived> +{ +public: + static constexpr bool NeedExistingQueue() { + return false; + } + + TCommonBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, const EAction action, THolder<IReplyCallback> cb) + : TActionActor<TDerived>(sourceSqsRequest, action, std::move(cb)) + { + } + +private: + virtual std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const = 0; + virtual void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) = 0; // Fills Response_ with merged response + + void DoAction() override { + RLOG_SQS_TRACE("TCommonBatchActor::DoAction"); + this->Become(&TCommonBatchActor::StateFunc); + + std::vector<NKikimrClient::TSqsRequest> requests = GenerateRequestsFromBatch(); + if (requests.empty()) { + MakeError(this->MutableErrorDesc(), NErrors::EMPTY_BATCH_REQUEST); + this->SendReplyAndDie(); + return; + } + Responses.resize(requests.size()); + for (ui64 i = 0; i < requests.size(); ++i) { + TStringBuilder reqId; + reqId << RequestId_ << "-" << i; + RLOG_SQS_DEBUG("Create proxy subactor[" << i << "]. Req id: " << reqId); + requests[i].SetRequestId(reqId); + requests[i].SetRequestRateLimit(false); // already requested this->Register(new TProxyActor(requests[i], MakeHolder<TBatchRequestReplyCallback>(this->SelfId(), i))); - } - } - - STATEFN(StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvSqsResponse, HandleResponse); - } - } - - void HandleResponse(TSqsEvents::TEvSqsResponse::TPtr& ev) { - const ui64 cookie = ev->Cookie; - Y_VERIFY(cookie < Responses.size()); - RLOG_SQS_TRACE("Batch actor got reply from proxy actor[" << cookie << "]: " << ev->Get()->Record); - Responses[cookie] = std::move(ev->Get()->Record); - if (++ResponsesReceived == Responses.size()) { - OnResponses(std::move(Responses)); - this->SendReplyAndDie(); - } - } - -protected: - using TActionActor<TDerived>::RequestId_; - -private: - std::vector<NKikimrClient::TSqsResponse> Responses; - size_t ResponsesReceived = 0; -}; - -} // namespace NKikimr::NSQS + } + } + + STATEFN(StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TSqsEvents::TEvSqsResponse, HandleResponse); + } + } + + void HandleResponse(TSqsEvents::TEvSqsResponse::TPtr& ev) { + const ui64 cookie = ev->Cookie; + Y_VERIFY(cookie < Responses.size()); + RLOG_SQS_TRACE("Batch actor got reply from proxy actor[" << cookie << "]: " << ev->Get()->Record); + Responses[cookie] = std::move(ev->Get()->Record); + if (++ResponsesReceived == Responses.size()) { + OnResponses(std::move(Responses)); + this->SendReplyAndDie(); + } + } + +protected: + using TActionActor<TDerived>::RequestId_; + +private: + std::vector<NKikimrClient::TSqsResponse> Responses; + size_t ResponsesReceived = 0; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/count_queues.cpp b/ydb/core/ymq/actor/count_queues.cpp index 8f217a192ac..80f5abc2320 100644 --- a/ydb/core/ymq/actor/count_queues.cpp +++ b/ydb/core/ymq/actor/count_queues.cpp @@ -23,13 +23,13 @@ public: return false; } - TCountQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::CountQueues, std::move(cb)) + TCountQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::CountQueues, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableCountQueues()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: @@ -37,42 +37,42 @@ private: return Response_.MutableCountQueues()->MutableError(); } - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvCountQueues(RequestId_, UserName_, FolderId_)); + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvCountQueues(RequestId_, UserName_, FolderId_)); } TString DoGetQueueName() const override { return TString(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvCountQueuesResponse, HandleCountQueuesResponse); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvCountQueuesResponse, HandleCountQueuesResponse); } } - void HandleCountQueuesResponse(TSqsEvents::TEvCountQueuesResponse::TPtr& ev) { + void HandleCountQueuesResponse(TSqsEvents::TEvCountQueuesResponse::TPtr& ev) { if (ev->Get()->Failed) { - RLOG_SQS_WARN("Count queues failed"); + RLOG_SQS_WARN("Count queues failed"); MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); } else { auto* result = Response_.MutableCountQueues(); result->SetCount(ev->Get()->Count); } - SendReplyAndDie(); + SendReplyAndDie(); } - const TCountQueuesRequest& Request() const { - return SourceSqsRequest_.GetCountQueues(); - } + const TCountQueuesRequest& Request() const { + return SourceSqsRequest_.GetCountQueues(); + } }; -IActor* CreateCountQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TCountQueuesActor(sourceSqsRequest, std::move(cb)); +IActor* CreateCountQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TCountQueuesActor(sourceSqsRequest, std::move(cb)); } } // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/create_queue.cpp b/ydb/core/ymq/actor/create_queue.cpp index 2d4b3af214e..039aa114bd1 100644 --- a/ydb/core/ymq/actor/create_queue.cpp +++ b/ydb/core/ymq/actor/create_queue.cpp @@ -1,6 +1,6 @@ #include "action.h" -#include "error.h" -#include "log.h" +#include "error.h" +#include "log.h" #include "queue_schema.h" #include <ydb/core/ymq/base/constants.h> @@ -8,38 +8,38 @@ #include <ydb/core/ymq/base/queue_id.h> #include <util/string/join.h> -#include <util/string/type.h> +#include <util/string/type.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TCreateQueueActor : public TActionActor<TCreateQueueActor> { public: - static constexpr bool NeedExistingQueue() { - return false; - } - + static constexpr bool NeedExistingQueue() { + return false; + } + static constexpr bool CreateMissingAccount() { return true; } - TCreateQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::CreateQueue, std::move(cb)) + TCreateQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::CreateQueue, std::move(cb)) { - CopyAccountName(Request()); // will be replaced during bootstrap for cloud mode + CopyAccountName(Request()); // will be replaced during bootstrap for cloud mode Response_.MutableCreateQueue()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } protected: bool IsFifoQueue() const override { - return AsciiHasSuffixIgnoreCase(Request().GetQueueName(), ".fifo"); // works for cloud too, since the custom name should end with '.fifo' + return AsciiHasSuffixIgnoreCase(Request().GetQueueName(), ".fifo"); // works for cloud too, since the custom name should end with '.fifo' } private: bool DoValidate() override { - auto* result = Response_.MutableCreateQueue(); + auto* result = Response_.MutableCreateQueue(); if (!IsCloud() && !UserExists_) { MakeError(result, NErrors::OPT_IN_REQUIRED, "The specified account does not exist."); @@ -47,7 +47,7 @@ private: } TAttribute fifo; - for (const auto& attr : Request().attributes()) { + for (const auto& attr : Request().attributes()) { if (attr.GetName() == "FifoQueue") { fifo = attr; break; @@ -55,119 +55,119 @@ private: } if (IsFifoQueue()) { - if (!fifo.HasName() || !IsTrue(fifo.GetValue())) { - MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "The FifoQueue attribute should be set to true for FIFO queue."); + if (!fifo.HasName() || !IsTrue(fifo.GetValue())) { + MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "The FifoQueue attribute should be set to true for FIFO queue."); return false; } } else { - if (fifo.HasName() && IsTrue(fifo.GetValue())) { - MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Name of FIFO queue should end with \".fifo\"."); + if (fifo.HasName() && IsTrue(fifo.GetValue())) { + MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Name of FIFO queue should end with \".fifo\"."); return false; } } - if (!Request().GetQueueName()) { - MakeError(result, NErrors::MISSING_PARAMETER, "No QueueName parameter."); - return false; - } - - if (!ValidateQueueNameOrUserName(Request().GetQueueName())) { - MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Invalid queue name."); + if (!Request().GetQueueName()) { + MakeError(result, NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } - if (Request().GetShards() > MAX_SHARDS_COUNT) { - MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Too many shards."); - return false; - } - - if (Request().GetPartitions() > MAX_PARTITIONS_COUNT) { - MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Too many partitions."); - return false; - } - - if (Request().GetEnableAutosplit() && Request().GetSizeToSplit() == 0) { - MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Zero SizeToSplit."); + if (!ValidateQueueNameOrUserName(Request().GetQueueName())) { + MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Invalid queue name."); + return false; + } + + if (Request().GetShards() > MAX_SHARDS_COUNT) { + MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Too many shards."); return false; } + if (Request().GetPartitions() > MAX_PARTITIONS_COUNT) { + MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Too many partitions."); + return false; + } + + if (Request().GetEnableAutosplit() && Request().GetSizeToSplit() == 0) { + MakeError(result, NErrors::INVALID_PARAMETER_VALUE, "Zero SizeToSplit."); + return false; + } + return true; } - TError* MutableErrorDesc() override { - return Response_.MutableCreateQueue()->MutableError(); - } - - void StartQueueCreation(const TString& queueName, const TString& accountName, const TString& customQueueName) { - const auto& cfg = Cfg(); - SchemaActor_ = Register( - new TCreateQueueSchemaActorV2(TQueuePath(cfg.GetRoot(), accountName, queueName), - Request(), SelfId(), RequestId_, customQueueName, FolderId_, IsCloud(), - cfg.GetEnableQueueAttributesValidation(), UserCounters_, QuoterResources_) + TError* MutableErrorDesc() override { + return Response_.MutableCreateQueue()->MutableError(); + } + + void StartQueueCreation(const TString& queueName, const TString& accountName, const TString& customQueueName) { + const auto& cfg = Cfg(); + SchemaActor_ = Register( + new TCreateQueueSchemaActorV2(TQueuePath(cfg.GetRoot(), accountName, queueName), + Request(), SelfId(), RequestId_, customQueueName, FolderId_, IsCloud(), + cfg.GetEnableQueueAttributesValidation(), UserCounters_, QuoterResources_) ); } - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); if (IsCloud()) { - Register(new TAtomicCounterActor(SelfId(), Cfg().GetRoot(), RequestId_)); + Register(new TAtomicCounterActor(SelfId(), Cfg().GetRoot(), RequestId_)); } else { static const TString emptyCustomQueueName = ""; - StartQueueCreation(Request().GetQueueName(), UserName_, emptyCustomQueueName); + StartQueueCreation(Request().GetQueueName(), UserName_, emptyCustomQueueName); } } TString DoGetQueueName() const override { - return TString(); + return TString(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvAtomicCounterIncrementResult, HandleAtomicCounterIncrement); - hFunc(TSqsEvents::TEvQueueCreated, HandleQueueCreated); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvAtomicCounterIncrementResult, HandleAtomicCounterIncrement); + hFunc(TSqsEvents::TEvQueueCreated, HandleQueueCreated); } } - void HandleAtomicCounterIncrement(TSqsEvents::TEvAtomicCounterIncrementResult::TPtr& ev) { + void HandleAtomicCounterIncrement(TSqsEvents::TEvAtomicCounterIncrementResult::TPtr& ev) { auto event = ev->Get(); - auto* result = Response_.MutableCreateQueue(); + auto* result = Response_.MutableCreateQueue(); if (event->Success) { - const ui16 serviceId = Cfg().GetYandexCloudServiceId(); + const ui16 serviceId = Cfg().GetYandexCloudServiceId(); const TString cloudId = UserName_; // should decode from creds ResourceId_ = MakeQueueId(serviceId, event->NewValue, UserName_); - RLOG_SQS_DEBUG("Created resource id: " << MakeQueueId(serviceId, event->NewValue, UserName_) + RLOG_SQS_DEBUG("Created resource id: " << MakeQueueId(serviceId, event->NewValue, UserName_) << " for service id: " << serviceId << " unique num: " << event->NewValue << " account name: " << UserName_); - StartQueueCreation(ResourceId_, cloudId, Request().GetQueueName()); + StartQueueCreation(ResourceId_, cloudId, Request().GetQueueName()); } else { MakeError(result, NErrors::INTERNAL_FAILURE); - SendReplyAndDie(); + SendReplyAndDie(); } } - void HandleQueueCreated(TSqsEvents::TEvQueueCreated::TPtr& ev) { + void HandleQueueCreated(TSqsEvents::TEvQueueCreated::TPtr& ev) { SchemaActor_ = TActorId(); auto event = ev->Get(); - auto* result = Response_.MutableCreateQueue(); + auto* result = Response_.MutableCreateQueue(); - TStringBuilder errMsg; - if (!event->Success && ev->Get()->Error) { - errMsg << "Cannot create queue: " << ev->Get()->Error; - } + TStringBuilder errMsg; + if (!event->Success && ev->Get()->Error) { + errMsg << "Cannot create queue: " << ev->Get()->Error; + } switch (event->State) { case EQueueState::Creating: - MakeError(result, *ev->Get()->ErrorClass, errMsg); + MakeError(result, *ev->Get()->ErrorClass, errMsg); break; case EQueueState::Active: if (event->Success) { - const TString& name = Request().GetQueueName(); + const TString& name = Request().GetQueueName(); if (IsCloud()) { const auto finalResourceId = event->AlreadyExists ? event->ExistingQueueResourceId : ResourceId_; result->SetQueueName(finalResourceId); @@ -177,37 +177,37 @@ private: result->SetQueueUrl(MakeQueueUrl(name)); } } else { - MakeError(result, *ev->Get()->ErrorClass, errMsg); + MakeError(result, *ev->Get()->ErrorClass, errMsg); } break; case EQueueState::Deleting: - MakeError(result, NErrors::QUEUE_DELETED_RECENTLY, errMsg); + MakeError(result, NErrors::QUEUE_DELETED_RECENTLY, errMsg); break; } - SendReplyAndDie(); + SendReplyAndDie(); } - void PassAway() override { - if (SchemaActor_) { - Send(SchemaActor_, new TEvPoisonPill()); + void PassAway() override { + if (SchemaActor_) { + Send(SchemaActor_, new TEvPoisonPill()); SchemaActor_ = TActorId(); - } - TActionActor<TCreateQueueActor>::PassAway(); - } - - const TCreateQueueRequest& Request() const { - return SourceSqsRequest_.GetCreateQueue(); - } - + } + TActionActor<TCreateQueueActor>::PassAway(); + } + + const TCreateQueueRequest& Request() const { + return SourceSqsRequest_.GetCreateQueue(); + } + private: TString ResourceId_; TActorId SchemaActor_; }; -IActor* CreateCreateQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TCreateQueueActor(sourceSqsRequest, std::move(cb)); +IActor* CreateCreateQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TCreateQueueActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/create_user.cpp b/ydb/core/ymq/actor/create_user.cpp index deebfc54e8a..66564d58816 100644 --- a/ydb/core/ymq/actor/create_user.cpp +++ b/ydb/core/ymq/actor/create_user.cpp @@ -1,52 +1,52 @@ #include "action.h" -#include "error.h" +#include "error.h" #include "schema.h" #include <ydb/core/ymq/base/helpers.h> - -namespace NKikimr::NSQS { + +namespace NKikimr::NSQS { class TCreateUserActor : public TActionActor<TCreateUserActor> { public: - static constexpr bool NeedExistingQueue() { - return false; - } - - TCreateUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::CreateUser, std::move(cb)) + static constexpr bool NeedExistingQueue() { + return false; + } + + TCreateUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::CreateUser, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableCreateUser()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: bool DoValidate() override { - if (!Request().GetUserName()) { - MakeError(Response_.MutableCreateUser(), NErrors::MISSING_PARAMETER, "No user name parameter."); - return false; - } - - if (!ValidateQueueNameOrUserName(Request().GetUserName())) { - MakeError(Response_.MutableCreateUser(), NErrors::INVALID_PARAMETER_VALUE, "Invalid user name."); + if (!Request().GetUserName()) { + MakeError(Response_.MutableCreateUser(), NErrors::MISSING_PARAMETER, "No user name parameter."); return false; } + if (!ValidateQueueNameOrUserName(Request().GetUserName())) { + MakeError(Response_.MutableCreateUser(), NErrors::INVALID_PARAMETER_VALUE, "Invalid user name."); + return false; + } + return true; } - TError* MutableErrorDesc() override { - return Response_.MutableCreateUser()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return Response_.MutableCreateUser()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - SchemaActor = Register( - new TCreateUserSchemaActor(Cfg().GetRoot(), Request().GetUserName(), SelfId(), RequestId_, UserCounters_) + SchemaActor = Register( + new TCreateUserSchemaActor(Cfg().GetRoot(), Request().GetUserName(), SelfId(), RequestId_, UserCounters_) ); } @@ -55,41 +55,41 @@ private: } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvUserCreated, HandleUserCreated); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvUserCreated, HandleUserCreated); } } - void HandleUserCreated(TSqsEvents::TEvUserCreated::TPtr& ev) { + void HandleUserCreated(TSqsEvents::TEvUserCreated::TPtr& ev) { SchemaActor = TActorId(); if (ev->Get()->Success) { } else { - MakeError(Response_.MutableCreateUser(), NErrors::INTERNAL_FAILURE); + MakeError(Response_.MutableCreateUser(), NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); + SendReplyAndDie(); } - void PassAway() override { - if (SchemaActor) { - Send(SchemaActor, new TEvPoisonPill()); + void PassAway() override { + if (SchemaActor) { + Send(SchemaActor, new TEvPoisonPill()); SchemaActor = TActorId(); - } - TActionActor<TCreateUserActor>::PassAway(); - } - - const TCreateUserRequest& Request() const { - return SourceSqsRequest_.GetCreateUser(); - } - + } + TActionActor<TCreateUserActor>::PassAway(); + } + + const TCreateUserRequest& Request() const { + return SourceSqsRequest_.GetCreateUser(); + } + private: TActorId SchemaActor; }; -IActor* CreateCreateUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TCreateUserActor(sourceSqsRequest, std::move(cb)); +IActor* CreateCreateUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TCreateUserActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/defs.h b/ydb/core/ymq/actor/defs.h index 2ca60599036..192107a5ada 100644 --- a/ydb/core/ymq/actor/defs.h +++ b/ydb/core/ymq/actor/defs.h @@ -1,19 +1,19 @@ -#pragma once +#pragma once #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/actor_bootstrapped.h> - -namespace NKikimr::NSQS { - + +namespace NKikimr::NSQS { + using NActors::TActorId; -using NActors::IActor; - -template <typename TDerived> -using TActor = NActors::TActor<TDerived>; - -template <typename TDerived> -using TActorBootstrapped = NActors::TActorBootstrapped<TDerived>; - -using TEvWakeup = NActors::TEvents::TEvWakeup; -using TEvPoisonPill = NActors::TEvents::TEvPoisonPill; - -} // namespace NKikimr::NSQS +using NActors::IActor; + +template <typename TDerived> +using TActor = NActors::TActor<TDerived>; + +template <typename TDerived> +using TActorBootstrapped = NActors::TActorBootstrapped<TDerived>; + +using TEvWakeup = NActors::TEvents::TEvWakeup; +using TEvPoisonPill = NActors::TEvents::TEvPoisonPill; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/delete_message.cpp b/ydb/core/ymq/actor/delete_message.cpp index 50b59ddcfb0..fd305707ac8 100644 --- a/ydb/core/ymq/actor/delete_message.cpp +++ b/ydb/core/ymq/actor/delete_message.cpp @@ -1,7 +1,7 @@ #include "action.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include <ydb/core/ymq/base/helpers.h> @@ -13,40 +13,40 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TDeleteMessageActor : public TActionActor<TDeleteMessageActor> { public: - TDeleteMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, bool isBatch, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, isBatch ? EAction::DeleteMessageBatch : EAction::DeleteMessage, std::move(cb)) - , IsBatch_(isBatch) + TDeleteMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, bool isBatch, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, isBatch ? EAction::DeleteMessageBatch : EAction::DeleteMessage, std::move(cb)) + , IsBatch_(isBatch) { - if (IsBatch_) { - CopyAccountName(BatchRequest()); - Response_.MutableDeleteMessageBatch()->SetRequestId(RequestId_); - CopySecurityToken(BatchRequest()); - } else { - CopyAccountName(Request()); - Response_.MutableDeleteMessage()->SetRequestId(RequestId_); - CopySecurityToken(Request()); - } + if (IsBatch_) { + CopyAccountName(BatchRequest()); + Response_.MutableDeleteMessageBatch()->SetRequestId(RequestId_); + CopySecurityToken(BatchRequest()); + } else { + CopyAccountName(Request()); + Response_.MutableDeleteMessage()->SetRequestId(RequestId_); + CopySecurityToken(Request()); + } } bool DoValidate() override { if (!GetQueueName()) { - MakeError(Response_.MutableDeleteMessage(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutableDeleteMessage(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } if (IsBatch_) { - if (BatchRequest().EntriesSize() == 0) { - MakeError(Response_.MutableDeleteMessageBatch(), NErrors::EMPTY_BATCH_REQUEST); - return false; - } else if (BatchRequest().EntriesSize() > TLimits::MaxBatchSize) { - MakeError(Response_.MutableDeleteMessageBatch(), NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); + if (BatchRequest().EntriesSize() == 0) { + MakeError(Response_.MutableDeleteMessageBatch(), NErrors::EMPTY_BATCH_REQUEST); return false; + } else if (BatchRequest().EntriesSize() > TLimits::MaxBatchSize) { + MakeError(Response_.MutableDeleteMessageBatch(), NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); + return false; } } @@ -54,55 +54,55 @@ public: } private: - void AppendEntry(const TDeleteMessageRequest& entry, TDeleteMessageResponse* resp, size_t requestIndexInBatch) { + void AppendEntry(const TDeleteMessageRequest& entry, TDeleteMessageResponse* resp, size_t requestIndexInBatch) { try { - // Validate - const TReceipt receipt = DecodeReceiptHandle(entry.GetReceiptHandle()); // can throw - RLOG_SQS_DEBUG("Decoded receipt handle: " << receipt); - if (receipt.GetShard() >= Shards_) { - throw yexception() << "Invalid shard: " << receipt.GetShard(); - } - - const bool isFifo = IsFifoQueue(); - if (isFifo && !receipt.GetMessageGroupId()) { - throw yexception() << "No message group id"; - } - - auto& shardInfo = ShardInfo_[receipt.GetShard()]; - // Create request - if (!shardInfo.Request_) { + // Validate + const TReceipt receipt = DecodeReceiptHandle(entry.GetReceiptHandle()); // can throw + RLOG_SQS_DEBUG("Decoded receipt handle: " << receipt); + if (receipt.GetShard() >= Shards_) { + throw yexception() << "Invalid shard: " << receipt.GetShard(); + } + + const bool isFifo = IsFifoQueue(); + if (isFifo && !receipt.GetMessageGroupId()) { + throw yexception() << "No message group id"; + } + + auto& shardInfo = ShardInfo_[receipt.GetShard()]; + // Create request + if (!shardInfo.Request_) { ++RequestsToLeader_; - shardInfo.Request_ = MakeHolder<TSqsEvents::TEvDeleteMessageBatch>(); - shardInfo.Request_->Shard = receipt.GetShard(); - shardInfo.Request_->RequestId = RequestId_; - } - - // Add new message to shard request - if (IsBatch_) { - shardInfo.RequestToReplyIndexMapping_.push_back(requestIndexInBatch); - } - shardInfo.Request_->Messages.emplace_back(); - auto& msgReq = shardInfo.Request_->Messages.back(); - msgReq.Offset = receipt.GetOffset(); - const TInstant lockTimestamp = TInstant::MilliSeconds(receipt.GetLockTimestamp()); - msgReq.LockTimestamp = lockTimestamp; - if (isFifo) { - msgReq.MessageGroupId = receipt.GetMessageGroupId(); - msgReq.ReceiveAttemptId = receipt.GetReceiveRequestAttemptId(); + shardInfo.Request_ = MakeHolder<TSqsEvents::TEvDeleteMessageBatch>(); + shardInfo.Request_->Shard = receipt.GetShard(); + shardInfo.Request_->RequestId = RequestId_; } - - // Calc metrics - const TDuration processingDuration = TActivationContext::Now() - lockTimestamp; - COLLECT_HISTOGRAM_COUNTER(QueueCounters_, ClientMessageProcessing_Duration, processingDuration.MilliSeconds()); + + // Add new message to shard request + if (IsBatch_) { + shardInfo.RequestToReplyIndexMapping_.push_back(requestIndexInBatch); + } + shardInfo.Request_->Messages.emplace_back(); + auto& msgReq = shardInfo.Request_->Messages.back(); + msgReq.Offset = receipt.GetOffset(); + const TInstant lockTimestamp = TInstant::MilliSeconds(receipt.GetLockTimestamp()); + msgReq.LockTimestamp = lockTimestamp; + if (isFifo) { + msgReq.MessageGroupId = receipt.GetMessageGroupId(); + msgReq.ReceiveAttemptId = receipt.GetReceiveRequestAttemptId(); + } + + // Calc metrics + const TDuration processingDuration = TActivationContext::Now() - lockTimestamp; + COLLECT_HISTOGRAM_COUNTER(QueueCounters_, ClientMessageProcessing_Duration, processingDuration.MilliSeconds()); COLLECT_HISTOGRAM_COUNTER(QueueCounters_, client_processing_duration_milliseconds, processingDuration.MilliSeconds()); } catch (...) { - RLOG_SQS_WARN("Failed to process receipt handle " << entry.GetReceiptHandle() << ": " << CurrentExceptionMessage()); - MakeError(resp, NErrors::RECEIPT_HANDLE_IS_INVALID); + RLOG_SQS_WARN("Failed to process receipt handle " << entry.GetReceiptHandle() << ": " << CurrentExceptionMessage()); + MakeError(resp, NErrors::RECEIPT_HANDLE_IS_INVALID); } } - void ProcessAnswer(TDeleteMessageResponse* resp, const TSqsEvents::TEvDeleteMessageBatchResponse::TMessageResult& answer) { - switch (answer.Status) { + void ProcessAnswer(TDeleteMessageResponse* resp, const TSqsEvents::TEvDeleteMessageBatchResponse::TMessageResult& answer) { + switch (answer.Status) { case TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::OK: { INC_COUNTER_COUPLE(QueueCounters_, DeleteMessage_Count, deleted_count_per_second); break; @@ -118,95 +118,95 @@ private: } } - TError* MutableErrorDesc() override { - return IsBatch_ ? Response_.MutableDeleteMessageBatch()->MutableError() : Response_.MutableDeleteMessage()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return IsBatch_ ? Response_.MutableDeleteMessageBatch()->MutableError() : Response_.MutableDeleteMessage()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - ShardInfo_.resize(Shards_); - + ShardInfo_.resize(Shards_); + if (IsBatch_) { - for (size_t i = 0, size = BatchRequest().EntriesSize(); i < size; ++i) { - const auto& entry = BatchRequest().GetEntries(i); - auto* response = Response_.MutableDeleteMessageBatch()->AddEntries(); - response->SetId(entry.GetId()); - AppendEntry(entry, response, i); - } - } else { - AppendEntry(Request(), Response_.MutableDeleteMessage(), 0); - } + for (size_t i = 0, size = BatchRequest().EntriesSize(); i < size; ++i) { + const auto& entry = BatchRequest().GetEntries(i); + auto* response = Response_.MutableDeleteMessageBatch()->AddEntries(); + response->SetId(entry.GetId()); + AppendEntry(entry, response, i); + } + } else { + AppendEntry(Request(), Response_.MutableDeleteMessage(), 0); + } if (RequestsToLeader_) { Y_VERIFY(RequestsToLeader_ <= Shards_); - for (auto& shardInfo : ShardInfo_) { - if (shardInfo.Request_) { + for (auto& shardInfo : ShardInfo_) { + if (shardInfo.Request_) { Send(QueueLeader_, shardInfo.Request_.Release()); } } } else { - SendReplyAndDie(); + SendReplyAndDie(); } } TString DoGetQueueName() const override { - return IsBatch_ ? BatchRequest().GetQueueName() : Request().GetQueueName(); + return IsBatch_ ? BatchRequest().GetQueueName() : Request().GetQueueName(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvDeleteMessageBatchResponse, HandleDeleteMessageBatchResponse); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvDeleteMessageBatchResponse, HandleDeleteMessageBatchResponse); } } - void HandleDeleteMessageBatchResponse(TSqsEvents::TEvDeleteMessageBatchResponse::TPtr& ev) { - if (IsBatch_) { - Y_VERIFY(ev->Get()->Shard < Shards_); - const auto& shardInfo = ShardInfo_[ev->Get()->Shard]; - Y_VERIFY(ev->Get()->Statuses.size() == shardInfo.RequestToReplyIndexMapping_.size()); - for (size_t i = 0, size = ev->Get()->Statuses.size(); i < size; ++i) { - const size_t entryIndex = shardInfo.RequestToReplyIndexMapping_[i]; - Y_VERIFY(entryIndex < Response_.GetDeleteMessageBatch().EntriesSize()); - ProcessAnswer(Response_.MutableDeleteMessageBatch()->MutableEntries(entryIndex), ev->Get()->Statuses[i]); + void HandleDeleteMessageBatchResponse(TSqsEvents::TEvDeleteMessageBatchResponse::TPtr& ev) { + if (IsBatch_) { + Y_VERIFY(ev->Get()->Shard < Shards_); + const auto& shardInfo = ShardInfo_[ev->Get()->Shard]; + Y_VERIFY(ev->Get()->Statuses.size() == shardInfo.RequestToReplyIndexMapping_.size()); + for (size_t i = 0, size = ev->Get()->Statuses.size(); i < size; ++i) { + const size_t entryIndex = shardInfo.RequestToReplyIndexMapping_[i]; + Y_VERIFY(entryIndex < Response_.GetDeleteMessageBatch().EntriesSize()); + ProcessAnswer(Response_.MutableDeleteMessageBatch()->MutableEntries(entryIndex), ev->Get()->Statuses[i]); } } else { Y_VERIFY(RequestsToLeader_ == 1); - Y_VERIFY(ev->Get()->Statuses.size() == 1); - ProcessAnswer(Response_.MutableDeleteMessage(), ev->Get()->Statuses[0]); + Y_VERIFY(ev->Get()->Statuses.size() == 1); + ProcessAnswer(Response_.MutableDeleteMessage(), ev->Get()->Statuses[0]); } --RequestsToLeader_; if (RequestsToLeader_ == 0) { - SendReplyAndDie(); + SendReplyAndDie(); } } - const TDeleteMessageRequest& Request() const { - return SourceSqsRequest_.GetDeleteMessage(); - } - - const TDeleteMessageBatchRequest& BatchRequest() const { - return SourceSqsRequest_.GetDeleteMessageBatch(); - } - + const TDeleteMessageRequest& Request() const { + return SourceSqsRequest_.GetDeleteMessage(); + } + + const TDeleteMessageBatchRequest& BatchRequest() const { + return SourceSqsRequest_.GetDeleteMessageBatch(); + } + private: const bool IsBatch_; - struct TShardInfo { - std::vector<size_t> RequestToReplyIndexMapping_; - THolder<TSqsEvents::TEvDeleteMessageBatch> Request_; // actual when processing initial request, then nullptr - }; + struct TShardInfo { + std::vector<size_t> RequestToReplyIndexMapping_; + THolder<TSqsEvents::TEvDeleteMessageBatch> Request_; // actual when processing initial request, then nullptr + }; size_t RequestsToLeader_ = 0; - std::vector<TShardInfo> ShardInfo_; + std::vector<TShardInfo> ShardInfo_; }; -IActor* CreateDeleteMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TDeleteMessageActor(sourceSqsRequest, false, std::move(cb)); +IActor* CreateDeleteMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TDeleteMessageActor(sourceSqsRequest, false, std::move(cb)); } -IActor* CreateDeleteMessageBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TDeleteMessageActor(sourceSqsRequest, true, std::move(cb)); +IActor* CreateDeleteMessageBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TDeleteMessageActor(sourceSqsRequest, true, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/delete_queue.cpp b/ydb/core/ymq/actor/delete_queue.cpp index 9711b917340..6498ba54c1c 100644 --- a/ydb/core/ymq/actor/delete_queue.cpp +++ b/ydb/core/ymq/actor/delete_queue.cpp @@ -1,158 +1,158 @@ #include "action.h" -#include "common_batch_actor.h" -#include "error.h" +#include "common_batch_actor.h" +#include "error.h" #include "queue_schema.h" #include <util/string/join.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TDeleteQueueActor : public TActionActor<TDeleteQueueActor> { public: - TDeleteQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::DeleteQueue, std::move(cb)) + TDeleteQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::DeleteQueue, std::move(cb)) { - UserName_ = Request().GetAuth().GetUserName(); + UserName_ = Request().GetAuth().GetUserName(); Response_.MutableDeleteQueue()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: bool DoValidate() override { if (!GetQueueName()) { - MakeError(Response_.MutableDeleteQueue(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutableDeleteQueue(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } return true; } - TError* MutableErrorDesc() override { - return Response_.MutableDeleteQueue()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return Response_.MutableDeleteQueue()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - SchemaActor_ = Register( + SchemaActor_ = Register( new TDeleteQueueSchemaActorV2( - TQueuePath(Cfg().GetRoot(), UserName_, GetQueueName()), SelfId(), RequestId_, UserCounters_) + TQueuePath(Cfg().GetRoot(), UserName_, GetQueueName()), SelfId(), RequestId_, UserCounters_) ); } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvQueueDeleted, HandleQueueDeleted); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvQueueDeleted, HandleQueueDeleted); } } - void HandleQueueDeleted(TSqsEvents::TEvQueueDeleted::TPtr& ev) { + void HandleQueueDeleted(TSqsEvents::TEvQueueDeleted::TPtr& ev) { SchemaActor_ = TActorId(); - if (!ev->Get()->Success) { - MakeError(Response_.MutableDeleteQueue(), NErrors::INTERNAL_FAILURE, ev->Get()->Message); + if (!ev->Get()->Success) { + MakeError(Response_.MutableDeleteQueue(), NErrors::INTERNAL_FAILURE, ev->Get()->Message); } - SendReplyAndDie(); + SendReplyAndDie(); } - void PassAway() override { - if (SchemaActor_) { - Send(SchemaActor_, new TEvPoisonPill()); + void PassAway() override { + if (SchemaActor_) { + Send(SchemaActor_, new TEvPoisonPill()); SchemaActor_ = TActorId(); - } - TActionActor<TDeleteQueueActor>::PassAway(); - } - - const TDeleteQueueRequest& Request() const { - return SourceSqsRequest_.GetDeleteQueue(); - } - + } + TActionActor<TDeleteQueueActor>::PassAway(); + } + + const TDeleteQueueRequest& Request() const { + return SourceSqsRequest_.GetDeleteQueue(); + } + private: TActorId SchemaActor_; }; -class TDeleteQueueBatchActor - : public TCommonBatchActor<TDeleteQueueBatchActor> -{ -public: - TDeleteQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TCommonBatchActor(sourceSqsRequest, EAction::DeleteQueueBatch, std::move(cb)) - { - UserName_ = Request().GetAuth().GetUserName(); - Response_.MutableDeleteQueueBatch()->SetRequestId(RequestId_); - - CopySecurityToken(Request()); - } - -private: - std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const override { - std::vector<NKikimrClient::TSqsRequest> ret; - ret.resize(Request().EntriesSize()); - for (size_t i = 0; i < Request().EntriesSize(); ++i) { - const auto& entry = Request().GetEntries(i); - auto& req = *ret[i].MutableDeleteQueue(); - req.MutableAuth()->SetUserName(UserName_); - - if (Request().HasCredentials()) { - *req.MutableCredentials() = Request().GetCredentials(); - } - - req.SetQueueName(entry.GetQueueName()); - req.SetId(entry.GetId()); - } - return ret; - } - - void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) override { - Y_VERIFY(Request().EntriesSize() == responses.size()); - auto& resp = *Response_.MutableDeleteQueueBatch(); - for (size_t i = 0; i < Request().EntriesSize(); ++i) { - const auto& reqEntry = Request().GetEntries(i); - auto& respEntry = *resp.AddEntries(); - Y_VERIFY(responses[i].HasDeleteQueue()); - respEntry = std::move(*responses[i].MutableDeleteQueue()); - respEntry.SetId(reqEntry.GetId()); - } - } - - bool DoValidate() override { - for (const auto& entry : Request().GetEntries()) { - if (entry.GetQueueName().empty()) { - MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, TStringBuilder() << "No QueueName parameter in entry " << entry.GetId() << "."); - return false; +class TDeleteQueueBatchActor + : public TCommonBatchActor<TDeleteQueueBatchActor> +{ +public: + TDeleteQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TCommonBatchActor(sourceSqsRequest, EAction::DeleteQueueBatch, std::move(cb)) + { + UserName_ = Request().GetAuth().GetUserName(); + Response_.MutableDeleteQueueBatch()->SetRequestId(RequestId_); + + CopySecurityToken(Request()); + } + +private: + std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const override { + std::vector<NKikimrClient::TSqsRequest> ret; + ret.resize(Request().EntriesSize()); + for (size_t i = 0; i < Request().EntriesSize(); ++i) { + const auto& entry = Request().GetEntries(i); + auto& req = *ret[i].MutableDeleteQueue(); + req.MutableAuth()->SetUserName(UserName_); + + if (Request().HasCredentials()) { + *req.MutableCredentials() = Request().GetCredentials(); } - } - return true; - } - - TError* MutableErrorDesc() override { - return Response_.MutableDeleteQueueBatch()->MutableError(); - } - - TString DoGetQueueName() const override { - return {}; - } - - const TDeleteQueueBatchRequest& Request() const { - return SourceSqsRequest_.GetDeleteQueueBatch(); - } -}; - -IActor* CreateDeleteQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TDeleteQueueActor(sourceSqsRequest, std::move(cb)); -} -IActor* CreateDeleteQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TDeleteQueueBatchActor(sourceSqsRequest, std::move(cb)); + req.SetQueueName(entry.GetQueueName()); + req.SetId(entry.GetId()); + } + return ret; + } + + void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) override { + Y_VERIFY(Request().EntriesSize() == responses.size()); + auto& resp = *Response_.MutableDeleteQueueBatch(); + for (size_t i = 0; i < Request().EntriesSize(); ++i) { + const auto& reqEntry = Request().GetEntries(i); + auto& respEntry = *resp.AddEntries(); + Y_VERIFY(responses[i].HasDeleteQueue()); + respEntry = std::move(*responses[i].MutableDeleteQueue()); + respEntry.SetId(reqEntry.GetId()); + } + } + + bool DoValidate() override { + for (const auto& entry : Request().GetEntries()) { + if (entry.GetQueueName().empty()) { + MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, TStringBuilder() << "No QueueName parameter in entry " << entry.GetId() << "."); + return false; + } + } + return true; + } + + TError* MutableErrorDesc() override { + return Response_.MutableDeleteQueueBatch()->MutableError(); + } + + TString DoGetQueueName() const override { + return {}; + } + + const TDeleteQueueBatchRequest& Request() const { + return SourceSqsRequest_.GetDeleteQueueBatch(); + } +}; + +IActor* CreateDeleteQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TDeleteQueueActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +IActor* CreateDeleteQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TDeleteQueueBatchActor(sourceSqsRequest, std::move(cb)); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/delete_user.cpp b/ydb/core/ymq/actor/delete_user.cpp index bea58c5a7a2..682a55b0cd4 100644 --- a/ydb/core/ymq/actor/delete_user.cpp +++ b/ydb/core/ymq/actor/delete_user.cpp @@ -1,7 +1,7 @@ #include "action.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "queue_schema.h" #include <ydb/public/lib/value/value.h> @@ -10,51 +10,51 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TDeleteUserActor : public TActionActor<TDeleteUserActor> { public: - static constexpr bool NeedExistingQueue() { - return false; - } - - TDeleteUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::DeleteUser, std::move(cb)) + static constexpr bool NeedExistingQueue() { + return false; + } + + TDeleteUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::DeleteUser, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableDeleteUser()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: bool DoValidate() override { - if (!Request().GetUserName()) { - MakeError(Response_.MutableDeleteUser(), NErrors::MISSING_PARAMETER, "No user name parameter."); + if (!Request().GetUserName()) { + MakeError(Response_.MutableDeleteUser(), NErrors::MISSING_PARAMETER, "No user name parameter."); return false; } return true; } - TError* MutableErrorDesc() override { - return Response_.MutableDeleteUser()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return Response_.MutableDeleteUser()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - TExecutorBuilder(SelfId(), RequestId_) - .User(Request().GetUserName()) - .QueryId(LIST_QUEUES_ID) - .Counters(QueueCounters_) - .RetryOnTimeout() + TExecutorBuilder(SelfId(), RequestId_) + .User(Request().GetUserName()) + .QueryId(LIST_QUEUES_ID) + .Counters(QueueCounters_) + .RetryOnTimeout() .Params() .Utf8("FOLDERID", "") - .Utf8("USER_NAME", UserName_) - .ParentBuilder().Start(); + .Utf8("USER_NAME", UserName_) + .ParentBuilder().Start(); } TString DoGetQueueName() const override { @@ -62,28 +62,28 @@ private: } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TSqsEvents::TEvQueueDeleted, HandleQueueDeleted); - hFunc(TSqsEvents::TEvUserDeleted, HandleUserDeleted); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TSqsEvents::TEvQueueDeleted, HandleQueueDeleted); + hFunc(TSqsEvents::TEvUserDeleted, HandleUserDeleted); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - const auto& record = ev->Get()->Record; + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + const auto& record = ev->Get()->Record; ui32 status = record.GetStatus(); - auto* result = Response_.MutableDeleteUser(); + auto* result = Response_.MutableDeleteUser(); if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); const TValue queues(val["queues"]); if (queues.Size() == 0) { - Register( + Register( new TDeleteUserSchemaActor( - Cfg().GetRoot(), Request().GetUserName(), SelfId(), RequestId_, UserCounters_) + Cfg().GetRoot(), Request().GetUserName(), SelfId(), RequestId_, UserCounters_) ); return; } @@ -93,59 +93,59 @@ private: Queues_.insert(name); - Register( + Register( new TDeleteQueueSchemaActorV2( - TQueuePath(Cfg().GetRoot(), Request().GetUserName(), name), SelfId(), RequestId_, UserCounters_) + TQueuePath(Cfg().GetRoot(), Request().GetUserName(), name), SelfId(), RequestId_, UserCounters_) ); } return; } else { - RLOG_SQS_WARN("Request failed: " << record); - MakeError(result, NErrors::INTERNAL_FAILURE); + RLOG_SQS_WARN("Request failed: " << record); + MakeError(result, NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); + SendReplyAndDie(); } - void HandleQueueDeleted(TSqsEvents::TEvQueueDeleted::TPtr& ev) { + void HandleQueueDeleted(TSqsEvents::TEvQueueDeleted::TPtr& ev) { if (ev->Get()->Success) { Queues_.erase(ev->Get()->QueuePath.QueueName); if (Queues_.empty()) { - Register( + Register( new TDeleteUserSchemaActor( - Cfg().GetRoot(), Request().GetUserName(), SelfId(), RequestId_, UserCounters_) + Cfg().GetRoot(), Request().GetUserName(), SelfId(), RequestId_, UserCounters_) ); } return; } else { - MakeError(Response_.MutableDeleteUser(), NErrors::INTERNAL_FAILURE, ev->Get()->Message); + MakeError(Response_.MutableDeleteUser(), NErrors::INTERNAL_FAILURE, ev->Get()->Message); } - SendReplyAndDie(); + SendReplyAndDie(); } - void HandleUserDeleted(TSqsEvents::TEvUserDeleted::TPtr& ev) { + void HandleUserDeleted(TSqsEvents::TEvUserDeleted::TPtr& ev) { if (ev->Get()->Success) { } else { - MakeError(Response_.MutableDeleteUser(), NErrors::INTERNAL_FAILURE, "Can't delete user: " + ev->Get()->Error); + MakeError(Response_.MutableDeleteUser(), NErrors::INTERNAL_FAILURE, "Can't delete user: " + ev->Get()->Error); } - SendReplyAndDie(); - } - - const TDeleteUserRequest& Request() const { - return SourceSqsRequest_.GetDeleteUser(); + SendReplyAndDie(); } + const TDeleteUserRequest& Request() const { + return SourceSqsRequest_.GetDeleteUser(); + } + private: TSet<TString> Queues_; }; -IActor* CreateDeleteUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TDeleteUserActor(sourceSqsRequest, std::move(cb)); +IActor* CreateDeleteUserActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TDeleteUserActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/error.cpp b/ydb/core/ymq/actor/error.cpp index 23a65c56f63..6fcbf6d2d5e 100644 --- a/ydb/core/ymq/actor/error.cpp +++ b/ydb/core/ymq/actor/error.cpp @@ -1,89 +1,89 @@ -#include "error.h" - -namespace NKikimr::NSQS { - -void MakeError(NSQS::TError* error, const TErrorClass& errorClass, const TString& message) { - error->SetErrorCode(errorClass.ErrorCode); - error->SetStatus(errorClass.HttpStatusCode); - if (!message.empty()) { - error->SetMessage(message); - } else { - error->SetMessage(errorClass.DefaultMessage); - } -} - -size_t ErrorsCount(const NKikimrClient::TSqsResponse& response, TAPIStatusesCounters* counters) { -#define RESPONSE_CASE(action) \ - case NKikimrClient::TSqsResponse::Y_CAT(k, action): { \ - const auto& actionResponse = response.Y_CAT(Get, action)(); \ - const size_t hasError = actionResponse.HasError(); \ - if (counters) { \ - if (hasError) { \ - counters->AddError( \ - actionResponse.GetError().GetErrorCode()); \ - } else { \ - counters->AddOk(); \ - } \ - } \ - return hasError; \ - } - -#define RESPONSE_BATCH_CASE(action) \ - case NKikimrClient::TSqsResponse::Y_CAT(k, action): { \ - const auto& r = response.Y_CAT(Get, action)(); \ - size_t errors = r.HasError(); \ - if (errors && counters) { \ - counters->AddError(r.GetError().GetErrorCode()); \ - } \ - for (const auto& entry : r.GetEntries()) { \ - const bool hasError = entry.HasError(); \ - if (hasError) { \ - ++errors; \ - } \ - if (counters) { \ - if (hasError) { \ - counters->AddError( \ - entry.GetError().GetErrorCode()); \ - } else { \ - counters->AddOk(); \ - } \ - } \ - } \ - return errors; \ - } - - switch (response.GetResponseCase()) { - RESPONSE_CASE(ChangeMessageVisibility) - RESPONSE_BATCH_CASE(ChangeMessageVisibilityBatch) - RESPONSE_CASE(CreateQueue) - RESPONSE_CASE(CreateUser) - RESPONSE_CASE(DeleteMessage) - RESPONSE_BATCH_CASE(DeleteMessageBatch) - RESPONSE_CASE(DeleteQueue) - RESPONSE_BATCH_CASE(DeleteQueueBatch) - RESPONSE_CASE(DeleteUser) +#include "error.h" + +namespace NKikimr::NSQS { + +void MakeError(NSQS::TError* error, const TErrorClass& errorClass, const TString& message) { + error->SetErrorCode(errorClass.ErrorCode); + error->SetStatus(errorClass.HttpStatusCode); + if (!message.empty()) { + error->SetMessage(message); + } else { + error->SetMessage(errorClass.DefaultMessage); + } +} + +size_t ErrorsCount(const NKikimrClient::TSqsResponse& response, TAPIStatusesCounters* counters) { +#define RESPONSE_CASE(action) \ + case NKikimrClient::TSqsResponse::Y_CAT(k, action): { \ + const auto& actionResponse = response.Y_CAT(Get, action)(); \ + const size_t hasError = actionResponse.HasError(); \ + if (counters) { \ + if (hasError) { \ + counters->AddError( \ + actionResponse.GetError().GetErrorCode()); \ + } else { \ + counters->AddOk(); \ + } \ + } \ + return hasError; \ + } + +#define RESPONSE_BATCH_CASE(action) \ + case NKikimrClient::TSqsResponse::Y_CAT(k, action): { \ + const auto& r = response.Y_CAT(Get, action)(); \ + size_t errors = r.HasError(); \ + if (errors && counters) { \ + counters->AddError(r.GetError().GetErrorCode()); \ + } \ + for (const auto& entry : r.GetEntries()) { \ + const bool hasError = entry.HasError(); \ + if (hasError) { \ + ++errors; \ + } \ + if (counters) { \ + if (hasError) { \ + counters->AddError( \ + entry.GetError().GetErrorCode()); \ + } else { \ + counters->AddOk(); \ + } \ + } \ + } \ + return errors; \ + } + + switch (response.GetResponseCase()) { + RESPONSE_CASE(ChangeMessageVisibility) + RESPONSE_BATCH_CASE(ChangeMessageVisibilityBatch) + RESPONSE_CASE(CreateQueue) + RESPONSE_CASE(CreateUser) + RESPONSE_CASE(DeleteMessage) + RESPONSE_BATCH_CASE(DeleteMessageBatch) + RESPONSE_CASE(DeleteQueue) + RESPONSE_BATCH_CASE(DeleteQueueBatch) + RESPONSE_CASE(DeleteUser) RESPONSE_CASE(ListPermissions) - RESPONSE_CASE(GetQueueAttributes) - RESPONSE_BATCH_CASE(GetQueueAttributesBatch) - RESPONSE_CASE(GetQueueUrl) - RESPONSE_CASE(ListQueues) - RESPONSE_CASE(ListUsers) - RESPONSE_CASE(ModifyPermissions) - RESPONSE_CASE(PurgeQueue) - RESPONSE_BATCH_CASE(PurgeQueueBatch) - RESPONSE_CASE(ReceiveMessage) - RESPONSE_CASE(SendMessage) - RESPONSE_BATCH_CASE(SendMessageBatch) - RESPONSE_CASE(SetQueueAttributes) + RESPONSE_CASE(GetQueueAttributes) + RESPONSE_BATCH_CASE(GetQueueAttributesBatch) + RESPONSE_CASE(GetQueueUrl) + RESPONSE_CASE(ListQueues) + RESPONSE_CASE(ListUsers) + RESPONSE_CASE(ModifyPermissions) + RESPONSE_CASE(PurgeQueue) + RESPONSE_BATCH_CASE(PurgeQueueBatch) + RESPONSE_CASE(ReceiveMessage) + RESPONSE_CASE(SendMessage) + RESPONSE_BATCH_CASE(SendMessageBatch) + RESPONSE_CASE(SetQueueAttributes) RESPONSE_CASE(ListDeadLetterSourceQueues) RESPONSE_CASE(CountQueues) - - case NKikimrClient::TSqsResponse::RESPONSE_NOT_SET: - return 0; - } - -#undef RESPONSE_BATCH_CASE -#undef RESPONSE_CASE -} - -} // namespace NKikimr::NSQS + + case NKikimrClient::TSqsResponse::RESPONSE_NOT_SET: + return 0; + } + +#undef RESPONSE_BATCH_CASE +#undef RESPONSE_CASE +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/error.h b/ydb/core/ymq/actor/error.h index 90d6a70e98d..c0ed6507923 100644 --- a/ydb/core/ymq/actor/error.h +++ b/ydb/core/ymq/actor/error.h @@ -1,30 +1,30 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <ydb/core/protos/sqs.pb.h> #include <ydb/core/protos/msgbus.pb.h> #include <ydb/library/http_proxy/error/error.h> #include <ydb/core/ymq/base/counters.h> - -namespace NKikimr::NSQS { - + +namespace NKikimr::NSQS { + /// The function creates an error message for a user. /// There must not be implementation details in this message! /// Examples of implementations details are: /// - internal database query errors; /// - paths to sources /// - exception messages (if they contain source paths like those ones from ythrow macro) -void MakeError(NSQS::TError* error, const TErrorClass& errorClass, const TString& message = TString()); - -template <class TProtoMessage> -void MakeError(TProtoMessage& proto, const TErrorClass& errorClass, const TString& message = TString()) { - MakeError(proto.MutableError(), errorClass, message); -} - -template <class TProtoMessage> -void MakeError(TProtoMessage* proto, const TErrorClass& errorClass, const TString& message = TString()) { - MakeError(proto->MutableError(), errorClass, message); -} - -size_t ErrorsCount(const NKikimrClient::TSqsResponse& response, TAPIStatusesCounters* counters); - -} // namespace NKikimr::NSQS +void MakeError(NSQS::TError* error, const TErrorClass& errorClass, const TString& message = TString()); + +template <class TProtoMessage> +void MakeError(TProtoMessage& proto, const TErrorClass& errorClass, const TString& message = TString()) { + MakeError(proto.MutableError(), errorClass, message); +} + +template <class TProtoMessage> +void MakeError(TProtoMessage* proto, const TErrorClass& errorClass, const TString& message = TString()) { + MakeError(proto->MutableError(), errorClass, message); +} + +size_t ErrorsCount(const NKikimrClient::TSqsResponse& response, TAPIStatusesCounters* counters); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/events.h b/ydb/core/ymq/actor/events.h index 9fff62c7cf8..a29447e14f5 100644 --- a/ydb/core/ymq/actor/events.h +++ b/ydb/core/ymq/actor/events.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include <ydb/core/base/defs.h> #include <ydb/core/tx/scheme_cache/scheme_cache.h> @@ -15,19 +15,19 @@ #include <ydb/core/ymq/base/query_id.h> #include <ydb/core/ymq/base/queue_path.h> #include <ydb/core/ymq/proto/records.pb.h> - + #include <library/cpp/actors/core/event_pb.h> #include <library/cpp/actors/core/event_local.h> #include <library/cpp/monlib/dynamic_counters/counters.h> #include <util/generic/hash.h> -#include <util/generic/maybe.h> -#include <util/generic/ptr.h> - -#include <map> -#include <set> +#include <util/generic/maybe.h> +#include <util/generic/ptr.h> -namespace NKikimr::NSQS { +#include <map> +#include <set> + +namespace NKikimr::NSQS { enum class EQueueState { Creating = 0, @@ -45,8 +45,8 @@ struct TSqsEvents { /// Notification about completed transaction EvExecuted, /// Request to select the requested number of visible messages - EvLockRequest, // not used - EvLockResponse, // not used + EvLockRequest, // not used + EvLockResponse, // not used /// Notification about queue creation EvQueueCreated, /// Notification about queue deletion @@ -57,65 +57,65 @@ struct TSqsEvents { /// Clear the queue EvPurgeQueue, /// Go to the next request from a packet - EvNextRequest, // not used + EvNextRequest, // not used /// Request from a proxy to sqs service - EvSqsRequest, + EvSqsRequest, /// Response from sqs service to the proxy - EvSqsResponse, + EvSqsResponse, /// Request for proxying request to another node - EvProxySqsRequest, + EvProxySqsRequest, /// Response to proxy from the leader - EvProxySqsResponse, + EvProxySqsResponse, /// Update queue attributes cache - EvClearQueueAttributesCache, + EvClearQueueAttributesCache, /// Incrementing of atomic counter EvAtomicCounterIncrementResult, - + /// Request for finding leader node for the given queue EvGetLeaderNodeForQueueRequest, EvGetLeaderNodeForQueueResponse, - + EvQueueLeaderDecRef, - - EvGetQueueId, - EvQueueId, - - // Cloud specific + + EvGetQueueId, + EvQueueId, + + // Cloud specific EvGetQueueFolderIdAndCustomName, EvQueueFolderIdAndCustomName, EvCountQueues, EvCountQueuesResponse, - + // Send/Receive/Delete requests. Action actor sends these requests to queue leader - EvSendMessageBatch, - EvSendMessageBatchResponse, - - EvReceiveMessageBatch, - EvReceiveMessageBatchResponse, - - EvDeleteMessageBatch, - EvDeleteMessageBatchResponse, - - EvChangeMessageVisibilityBatch, - EvChangeMessageVisibilityBatchResponse, - - EvGetRuntimeQueueAttributes, - EvGetRuntimeQueueAttributesResponse, - - EvInflyIsPurgingNotification, - EvQueuePurgedNotification, - - EvMigrationDone, - + EvSendMessageBatch, + EvSendMessageBatchResponse, + + EvReceiveMessageBatch, + EvReceiveMessageBatchResponse, + + EvDeleteMessageBatch, + EvDeleteMessageBatchResponse, + + EvChangeMessageVisibilityBatch, + EvChangeMessageVisibilityBatchResponse, + + EvGetRuntimeQueueAttributes, + EvGetRuntimeQueueAttributesResponse, + + EvInflyIsPurgingNotification, + EvQueuePurgedNotification, + + EvMigrationDone, + EvReportProcessedRequestAttributes, - EvInsertQueueCounters, - - EvUserSettingsChanged, - - EvReadQueuesList, - EvQueuesList, - + EvInsertQueueCounters, + + EvUserSettingsChanged, + + EvReadQueuesList, + EvQueuesList, + EvDeadLetterQueueNotification, EvSchemeTraversalResult, @@ -129,93 +129,93 @@ struct TSqsEvents { EvEnd, }; - using TExecutedCallback = std::function<void (const NKikimrTxUserProxy::TEvProposeTransactionStatus&)>; - - struct TQueueAttributes { - bool ContentBasedDeduplication = false; - TDuration DelaySeconds = TDuration::Zero(); - bool FifoQueue = false; - size_t MaximumMessageSize = 0; - TDuration MessageRetentionPeriod = TDuration::Zero(); - TDuration ReceiveMessageWaitTime = TDuration::Zero(); - TDuration VisibilityTimeout = TDuration::Zero(); - - // has operator<< - }; - + using TExecutedCallback = std::function<void (const NKikimrTxUserProxy::TEvProposeTransactionStatus&)>; + + struct TQueueAttributes { + bool ContentBasedDeduplication = false; + TDuration DelaySeconds = TDuration::Zero(); + bool FifoQueue = false; + size_t MaximumMessageSize = 0; + TDuration MessageRetentionPeriod = TDuration::Zero(); + TDuration ReceiveMessageWaitTime = TDuration::Zero(); + TDuration VisibilityTimeout = TDuration::Zero(); + + // has operator<< + }; + struct TEvGetConfiguration : public NActors::TEventLocal<TEvGetConfiguration, EvGetConfiguration> { - TString RequestId; + TString RequestId; TString UserName; TString QueueName; - ui64 Flags = 0; + ui64 Flags = 0; - enum EFlags { + enum EFlags { NeedQueueLeader = 1, NeedQueueAttributes = NeedQueueLeader | 2, // attributes are stored in leader actor, so, when you need attributes, you need leader - }; + }; - TEvGetConfiguration() = default; - TEvGetConfiguration(const TEvGetConfiguration& other) = default; - TEvGetConfiguration(TString requestId, const TString& user, const TString& name, ui64 flags = 0) - : RequestId(std::move(requestId)) + TEvGetConfiguration() = default; + TEvGetConfiguration(const TEvGetConfiguration& other) = default; + TEvGetConfiguration(TString requestId, const TString& user, const TString& name, ui64 flags = 0) + : RequestId(std::move(requestId)) , UserName(user) , QueueName(name) - , Flags(flags) + , Flags(flags) { } }; - struct TQuoterResourcesForActions : public TAtomicRefCount<TQuoterResourcesForActions> { - virtual ~TQuoterResourcesForActions() = default; - - struct TResourceDescription { - ui64 QuoterId; - ui64 ResourceId; - }; - - THashMap<EAction, TResourceDescription> ActionsResources; - TResourceDescription OtherActions; - TResourceDescription CreateQueueAction; // Separate action for create queue. Quota is requested only when actor knows that there is no such queue. https://st.yandex-team.ru/SQS-620 - }; - + struct TQuoterResourcesForActions : public TAtomicRefCount<TQuoterResourcesForActions> { + virtual ~TQuoterResourcesForActions() = default; + + struct TResourceDescription { + ui64 QuoterId; + ui64 ResourceId; + }; + + THashMap<EAction, TResourceDescription> ActionsResources; + TResourceDescription OtherActions; + TResourceDescription CreateQueueAction; // Separate action for create queue. Quota is requested only when actor knows that there is no such queue. https://st.yandex-team.ru/SQS-620 + }; + struct TEvConfiguration : public NActors::TEventLocal<TEvConfiguration, EvConfiguration> { - // Success status - bool Fail = false; - - // Existence - bool UserExists = false; - bool QueueExists = false; - - // Queue info - ui64 Shards = 1; - bool Fifo = false; - TMaybe<TQueueAttributes> QueueAttributes; - TIntrusivePtr<TQuoterResourcesForActions> QuoterResources; - - // Counters - TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCoreCounters; // Raw counters interface. Is is not prefered to use them - TIntrusivePtr<TUserCounters> UserCounters; - TIntrusivePtr<TQueueCounters> QueueCounters; - - // Common info - TString RootUrl; + // Success status + bool Fail = false; + + // Existence + bool UserExists = false; + bool QueueExists = false; + + // Queue info + ui64 Shards = 1; + bool Fifo = false; + TMaybe<TQueueAttributes> QueueAttributes; + TIntrusivePtr<TQuoterResourcesForActions> QuoterResources; + + // Counters + TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCoreCounters; // Raw counters interface. Is is not prefered to use them + TIntrusivePtr<TUserCounters> UserCounters; + TIntrusivePtr<TQueueCounters> QueueCounters; + + // Common info + TString RootUrl; TActorId SchemeCache; TActorId QueueLeader; }; - struct TEvClearQueueAttributesCache : public NActors::TEventLocal<TEvClearQueueAttributesCache, EvClearQueueAttributesCache> { + struct TEvClearQueueAttributesCache : public NActors::TEventLocal<TEvClearQueueAttributesCache, EvClearQueueAttributesCache> { }; struct TEvExecute : public NActors::TEventLocal<TEvExecute, EvExecute> { /// Query sender TActorId Sender; // User request id this query belongs to - TString RequestId; + TString RequestId; /// Queue path in the catalog TQueuePath QueuePath; /// Shard id we address by this query ui64 Shard; /// Query index to execute - EQueryId QueryIdx; + EQueryId QueryIdx; /// Query params NKikimrMiniKQL::TParams Params; /// Callback that is called on response receiving @@ -223,21 +223,21 @@ struct TSqsEvents { /// This option specifies if it safe to retry transaction in case of undertermined transaction status, /// for example timeout. If transaction is idempotent, this options makes the whole system less sensitive to /// tablets restarts. - bool RetryOnTimeout = false; + bool RetryOnTimeout = false; - TEvExecute() = default; + TEvExecute() = default; - TEvExecute(const TEvExecute& other) = default; + TEvExecute(const TEvExecute& other) = default; TEvExecute(const TActorId& sender, TString requestId, const TQueuePath& path, const EQueryId idx, const ui64 shard = 0) : Sender(sender) - , RequestId(std::move(requestId)) + , RequestId(std::move(requestId)) , QueuePath(path) , Shard(shard) - , QueryIdx(idx) - { - Y_VERIFY(QueryIdx < EQueryId::QUERY_VECTOR_SIZE); - } + , QueryIdx(idx) + { + Y_VERIFY(QueryIdx < EQueryId::QUERY_VECTOR_SIZE); + } }; struct TEvExecuted : public NActors::TEventPB<TEvExecuted, NKikimrTxUserProxy::TEvProposeTransactionStatus, EvExecuted> { @@ -246,64 +246,64 @@ struct TSqsEvents { TExecutedCallback Cb; ui64 Shard; - TEvExecuted() + TEvExecuted() : Shard(0) { } - explicit TEvExecuted(TExecutedCallback cb, ui64 shard) + explicit TEvExecuted(TExecutedCallback cb, ui64 shard) : Cb(cb) , Shard(shard) { } - TEvExecuted(const TRecord& rec, TExecutedCallback cb, ui64 shard) + TEvExecuted(const TRecord& rec, TExecutedCallback cb, ui64 shard) : TEventPB(rec) , Cb(cb) , Shard(shard) { } - void Call() { + void Call() { if (Cb) { - Cb(Record); - } - } - - static bool IsOk(const NKikimrTxUserProxy::TEvProposeTransactionStatus& record) { - const ui32 status = record.GetStatus(); - return status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete; - } - - bool IsOk() const { - return IsOk(Record); - } - - // Error that occurs when user has deleted & created queue and then requested other node to do some prepared query. - // In these cases we should clear prepared query with old tables ids and recompile them. - static bool IsResolvingError(const NKikimrTxUserProxy::TEvProposeTransactionStatus& record) { - if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ResolveError) { - return true; - } - if (record.GetStatusCode() == NKikimrIssues::TStatusIds::PATH_NOT_EXIST || record.GetStatusCode() == NKikimrIssues::TStatusIds::SCHEME_ERROR) { - return true; + Cb(Record); } - for (const Ydb::Issue::IssueMessage& issue : record.GetIssues()) { - const ui32 issueCode = issue.issue_code(); - if (issueCode == NKikimrIssues::TIssuesIds::GENERIC_RESOLVE_ERROR - || issueCode == NKikimrIssues::TIssuesIds::PATH_NOT_EXIST) { - return true; - } - } - return false; - } - - bool IsResolvingError() const { - return IsResolvingError(Record); } + + static bool IsOk(const NKikimrTxUserProxy::TEvProposeTransactionStatus& record) { + const ui32 status = record.GetStatus(); + return status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete; + } + + bool IsOk() const { + return IsOk(Record); + } + + // Error that occurs when user has deleted & created queue and then requested other node to do some prepared query. + // In these cases we should clear prepared query with old tables ids and recompile them. + static bool IsResolvingError(const NKikimrTxUserProxy::TEvProposeTransactionStatus& record) { + if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ResolveError) { + return true; + } + if (record.GetStatusCode() == NKikimrIssues::TStatusIds::PATH_NOT_EXIST || record.GetStatusCode() == NKikimrIssues::TStatusIds::SCHEME_ERROR) { + return true; + } + for (const Ydb::Issue::IssueMessage& issue : record.GetIssues()) { + const ui32 issueCode = issue.issue_code(); + if (issueCode == NKikimrIssues::TIssuesIds::GENERIC_RESOLVE_ERROR + || issueCode == NKikimrIssues::TIssuesIds::PATH_NOT_EXIST) { + return true; + } + } + return false; + } + + bool IsResolvingError() const { + return IsResolvingError(Record); + } }; struct TEvUserCreated : public NActors::TEventLocal<TEvUserCreated, EvUserCreated> { bool Success; - TEvUserCreated(bool success) + TEvUserCreated(bool success) : Success(success) { } @@ -321,13 +321,13 @@ struct TSqsEvents { }; struct TEvQueueCreated : public NActors::TEventLocal<TEvQueueCreated, EvQueueCreated> { - TString QueueId; - TString ExistingQueueResourceId; - TString Error; - EQueueState State; - bool AlreadyExists; - bool Success; - const TErrorClass* ErrorClass = nullptr; + TString QueueId; + TString ExistingQueueResourceId; + TString Error; + EQueueState State; + bool AlreadyExists; + bool Success; + const TErrorClass* ErrorClass = nullptr; }; struct TEvQueueDeleted : public NActors::TEventLocal<TEvQueueDeleted, EvQueueDeleted> { @@ -372,10 +372,10 @@ struct TSqsEvents { TInstant Boundary; ui64 Shard; - TEvPurgeQueue() = default; + TEvPurgeQueue() = default; TEvPurgeQueue(const TEvPurgeQueue& other) - : QueuePath(other.QueuePath) + : QueuePath(other.QueuePath) , Boundary(other.Boundary) , Shard(other.Shard) { @@ -383,188 +383,188 @@ struct TSqsEvents { }; // Request that is sent from proxy to sqs service actor on other (leader) node - struct TEvSqsRequest : public NActors::TEventPB<TEvSqsRequest, NKikimrClient::TSqsRequest, EvSqsRequest> { - using TEventPB::TEventPB; - }; - - // Response to TEvSqsRequest - struct TEvSqsResponse : public NActors::TEventPB<TEvSqsResponse, NKikimrClient::TSqsResponse, EvSqsResponse> { - using TEventPB::TEventPB; - }; - + struct TEvSqsRequest : public NActors::TEventPB<TEvSqsRequest, NKikimrClient::TSqsRequest, EvSqsRequest> { + using TEventPB::TEventPB; + }; + + // Response to TEvSqsRequest + struct TEvSqsResponse : public NActors::TEventPB<TEvSqsResponse, NKikimrClient::TSqsResponse, EvSqsResponse> { + using TEventPB::TEventPB; + }; + // Request for proxying request to sqs service actor on other (leader) node - struct TEvProxySqsRequest : public NActors::TEventLocal<TEvProxySqsRequest, EvProxySqsRequest> { - NKikimrClient::TSqsRequest Record; - + struct TEvProxySqsRequest : public NActors::TEventLocal<TEvProxySqsRequest, EvProxySqsRequest> { + NKikimrClient::TSqsRequest Record; + // Information to identify leader node - TString UserName; - TString QueueName; - - TString RequestId; - - TEvProxySqsRequest() = default; - - TEvProxySqsRequest(const NKikimrClient::TSqsRequest& record, TString userName, TString queueName) - : Record(record) - , UserName(std::move(userName)) - , QueueName(std::move(queueName)) - , RequestId(Record.GetRequestId()) - { - } - - TEvProxySqsRequest(NKikimrClient::TSqsRequest&& record, TString userName, TString queueName) - : UserName(std::move(userName)) - , QueueName(std::move(queueName)) - , RequestId(record.GetRequestId()) - { - Record.Swap(&record); - } - }; - - // Response to TEvProxySqsRequest - struct TEvProxySqsResponse : public NActors::TEventLocal<TEvProxySqsResponse, EvProxySqsResponse> { - enum class EProxyStatus { // can be written to text stream - OK, + TString UserName; + TString QueueName; + + TString RequestId; + + TEvProxySqsRequest() = default; + + TEvProxySqsRequest(const NKikimrClient::TSqsRequest& record, TString userName, TString queueName) + : Record(record) + , UserName(std::move(userName)) + , QueueName(std::move(queueName)) + , RequestId(Record.GetRequestId()) + { + } + + TEvProxySqsRequest(NKikimrClient::TSqsRequest&& record, TString userName, TString queueName) + : UserName(std::move(userName)) + , QueueName(std::move(queueName)) + , RequestId(record.GetRequestId()) + { + Record.Swap(&record); + } + }; + + // Response to TEvProxySqsRequest + struct TEvProxySqsResponse : public NActors::TEventLocal<TEvProxySqsResponse, EvProxySqsResponse> { + enum class EProxyStatus { // can be written to text stream + OK, LeaderResolvingError, - SessionError, - QueueDoesNotExist, - UserDoesNotExist, - }; - - NKikimrClient::TSqsResponse Record; - EProxyStatus ProxyStatus = EProxyStatus::OK; - - TEvProxySqsResponse() = default; - - TEvProxySqsResponse(const NKikimrClient::TSqsResponse& record, EProxyStatus status = EProxyStatus::OK) - : Record(record) - , ProxyStatus(status) - { - } - - TEvProxySqsResponse(NKikimrClient::TSqsResponse&& record, EProxyStatus status = EProxyStatus::OK) - : ProxyStatus(status) - { - Record.Swap(&record); - } - }; - + SessionError, + QueueDoesNotExist, + UserDoesNotExist, + }; + + NKikimrClient::TSqsResponse Record; + EProxyStatus ProxyStatus = EProxyStatus::OK; + + TEvProxySqsResponse() = default; + + TEvProxySqsResponse(const NKikimrClient::TSqsResponse& record, EProxyStatus status = EProxyStatus::OK) + : Record(record) + , ProxyStatus(status) + { + } + + TEvProxySqsResponse(NKikimrClient::TSqsResponse&& record, EProxyStatus status = EProxyStatus::OK) + : ProxyStatus(status) + { + Record.Swap(&record); + } + }; + struct TEvGetLeaderNodeForQueueRequest : public NActors::TEventLocal<TEvGetLeaderNodeForQueueRequest, EvGetLeaderNodeForQueueRequest> { - TString RequestId; - TString UserName; - TString QueueName; - + TString RequestId; + TString UserName; + TString QueueName; + TEvGetLeaderNodeForQueueRequest(TString requestId, TString user, TString queue) - : RequestId(std::move(requestId)) - , UserName(std::move(user)) - , QueueName(std::move(queue)) - { - } - }; - + : RequestId(std::move(requestId)) + , UserName(std::move(user)) + , QueueName(std::move(queue)) + { + } + }; + struct TEvGetLeaderNodeForQueueResponse : public NActors::TEventLocal<TEvGetLeaderNodeForQueueResponse, EvGetLeaderNodeForQueueResponse> { - enum class EStatus { - OK, - NoUser, - NoQueue, + enum class EStatus { + OK, + NoUser, + NoQueue, FailedToConnectToLeader, - Error, - }; - - TString RequestId; - TString UserName; - TString QueueName; - ui64 NodeId = 0; - EStatus Status = EStatus::OK; - + Error, + }; + + TString RequestId; + TString UserName; + TString QueueName; + ui64 NodeId = 0; + EStatus Status = EStatus::OK; + TEvGetLeaderNodeForQueueResponse(TString requestId, TString user, TString queue, ui64 nodeId) - : RequestId(std::move(requestId)) - , UserName(std::move(user)) - , QueueName(std::move(queue)) - , NodeId(nodeId) - { - } - + : RequestId(std::move(requestId)) + , UserName(std::move(user)) + , QueueName(std::move(queue)) + , NodeId(nodeId) + { + } + TEvGetLeaderNodeForQueueResponse(TString requestId, TString user, TString queue, EStatus error) - : RequestId(std::move(requestId)) - , UserName(std::move(user)) - , QueueName(std::move(queue)) - , Status(error) - { - } - }; - + : RequestId(std::move(requestId)) + , UserName(std::move(user)) + , QueueName(std::move(queue)) + , Status(error) + { + } + }; + struct TEvQueueLeaderDecRef : public NActors::TEventLocal<TEvQueueLeaderDecRef, EvQueueLeaderDecRef> { - }; - - struct TEvGetQueueId : public NActors::TEventLocal<TEvGetQueueId, EvGetQueueId> { - TString RequestId; - TString UserName; - TString CustomQueueName; // custom name in case of Yandex.Cloud mode and queue name in case of Yandex - TString FolderId; // empty in case of Yandex mode - - TEvGetQueueId(TString requestId, TString userName, TString customQueueName, TString folderId) - : RequestId(std::move(requestId)) - , UserName(std::move(userName)) - , CustomQueueName(std::move(customQueueName)) - , FolderId(std::move(folderId)) - { - } - }; - - struct TEvQueueId : public NActors::TEventLocal<TEvQueueId, EvQueueId> { - bool Exists = false; - bool Failed = false; - TString QueueId; // resource id in case of Yandex.Cloud mode and queue name in case of Yandex + }; + + struct TEvGetQueueId : public NActors::TEventLocal<TEvGetQueueId, EvGetQueueId> { + TString RequestId; + TString UserName; + TString CustomQueueName; // custom name in case of Yandex.Cloud mode and queue name in case of Yandex + TString FolderId; // empty in case of Yandex mode + + TEvGetQueueId(TString requestId, TString userName, TString customQueueName, TString folderId) + : RequestId(std::move(requestId)) + , UserName(std::move(userName)) + , CustomQueueName(std::move(customQueueName)) + , FolderId(std::move(folderId)) + { + } + }; + + struct TEvQueueId : public NActors::TEventLocal<TEvQueueId, EvQueueId> { + bool Exists = false; + bool Failed = false; + TString QueueId; // resource id in case of Yandex.Cloud mode and queue name in case of Yandex ui64 Version = 0; // last queue version registered in service actor ui64 ShardsCount = 0; // number of queue shards - + TEvQueueId(const bool failed = false) - : Failed(failed) - { - } - + : Failed(failed) + { + } + explicit TEvQueueId(const TString queueId, const ui64 version, const ui64 shardsCount) - : Exists(true) - , QueueId(std::move(queueId)) + : Exists(true) + , QueueId(std::move(queueId)) , Version(version) , ShardsCount(shardsCount) - { - } - }; - + { + } + }; + struct TEvGetQueueFolderIdAndCustomName : public NActors::TEventLocal<TEvGetQueueFolderIdAndCustomName, EvGetQueueFolderIdAndCustomName> { - TString RequestId; - TString UserName; - TString QueueName; - + TString RequestId; + TString UserName; + TString QueueName; + TEvGetQueueFolderIdAndCustomName(TString requestId, TString userName, TString queueName) - : RequestId(std::move(requestId)) - , UserName(std::move(userName)) - , QueueName(std::move(queueName)) - { - } - }; - + : RequestId(std::move(requestId)) + , UserName(std::move(userName)) + , QueueName(std::move(queueName)) + { + } + }; + struct TEvQueueFolderIdAndCustomName : public NActors::TEventLocal<TEvQueueFolderIdAndCustomName, EvQueueFolderIdAndCustomName> { - bool Exists = false; - bool Failed = false; - TString QueueFolderId; + bool Exists = false; + bool Failed = false; + TString QueueFolderId; TString QueueCustomName; - + TEvQueueFolderIdAndCustomName(bool failed = false) - : Failed(failed) - { - } - + : Failed(failed) + { + } + explicit TEvQueueFolderIdAndCustomName(TString queueFolderId, TString queueCustomName) - : Exists(true) - , QueueFolderId(std::move(queueFolderId)) + : Exists(true) + , QueueFolderId(std::move(queueFolderId)) , QueueCustomName(std::move(queueCustomName)) - { - } - }; - + { + } + }; + struct TEvCountQueues : public NActors::TEventLocal<TEvCountQueues, EvCountQueues> { TString RequestId; TString UserName; @@ -591,225 +591,225 @@ struct TSqsEvents { } }; - struct TEvSendMessageBatch : public NActors::TEventLocal<TEvSendMessageBatch, EvSendMessageBatch> { - struct TMessageEntry { - TString MessageId; - - TString Body; - TString Attributes; // serialized attributes - TDuration Delay; - - // for fifo - TString DeduplicationId; - TString MessageGroupId; - }; - - TString RequestId; - TString SenderId; - std::vector<TMessageEntry> Messages; - }; - - struct TEvSendMessageBatchResponse : public NActors::TEventLocal<TEvSendMessageBatchResponse, EvSendMessageBatchResponse> { - enum class ESendMessageStatus { - OK, - AlreadySent, // deduplicated - Failed, - }; - struct TMessageResult { - ESendMessageStatus Status; - TString MessageId; // In case of deduplication reasons message id can be different from one that was provided by action actor - ui64 SequenceNumber = 0; - }; - std::vector<TMessageResult> Statuses; - }; - - // Request to try to receive message batch. + struct TEvSendMessageBatch : public NActors::TEventLocal<TEvSendMessageBatch, EvSendMessageBatch> { + struct TMessageEntry { + TString MessageId; + + TString Body; + TString Attributes; // serialized attributes + TDuration Delay; + + // for fifo + TString DeduplicationId; + TString MessageGroupId; + }; + + TString RequestId; + TString SenderId; + std::vector<TMessageEntry> Messages; + }; + + struct TEvSendMessageBatchResponse : public NActors::TEventLocal<TEvSendMessageBatchResponse, EvSendMessageBatchResponse> { + enum class ESendMessageStatus { + OK, + AlreadySent, // deduplicated + Failed, + }; + struct TMessageResult { + ESendMessageStatus Status; + TString MessageId; // In case of deduplication reasons message id can be different from one that was provided by action actor + ui64 SequenceNumber = 0; + }; + std::vector<TMessageResult> Statuses; + }; + + // Request to try to receive message batch. // While processing this request leader doesn't perform long polling. - struct TEvReceiveMessageBatch : public NActors::TEventLocal<TEvReceiveMessageBatch, EvReceiveMessageBatch> { - TString RequestId; - size_t MaxMessagesCount = 0; - TString ReceiveAttemptId; - TDuration VisibilityTimeout = TDuration::Zero(); - TInstant WaitDeadline = TInstant::Zero(); - }; - - struct TEvReceiveMessageBatchResponse : public NActors::TEventLocal<TEvReceiveMessageBatchResponse, EvReceiveMessageBatchResponse> { - bool Failed = false; - bool OverLimit = false; - bool Retried = false; - - struct TMessageResult { - TInstant FirstReceiveTimestamp = TInstant::Zero(); - ui32 ReceiveCount = 0; - TString MessageId; - TString MessageDeduplicationId; - TString MessageGroupId; - TString MessageAttributes; - TString Data; - TReceipt ReceiptHandle; - TInstant SentTimestamp = TInstant::Zero(); - ui64 SequenceNumber = 0; - TString SenderId; - }; - std::vector<TMessageResult> Messages; - }; - - struct TEvDeleteMessageBatch : public NActors::TEventLocal<TEvDeleteMessageBatch, EvDeleteMessageBatch> { - struct TMessageEntry { - ui64 Offset = 0; - TInstant LockTimestamp = TInstant::Zero(); - - // for fifo - TString MessageGroupId; - TString ReceiveAttemptId; - }; - - TString RequestId; - ui64 Shard = 0; - std::vector<TMessageEntry> Messages; - }; - - struct TEvDeleteMessageBatchResponse : public NActors::TEventLocal<TEvDeleteMessageBatchResponse, EvDeleteMessageBatchResponse> { - enum class EDeleteMessageStatus { - OK, - NotFound, - Failed, - }; - struct TMessageResult { - EDeleteMessageStatus Status = EDeleteMessageStatus::NotFound; - }; - ui64 Shard = 0; - std::vector<TMessageResult> Statuses; - }; - - struct TEvChangeMessageVisibilityBatch : public NActors::TEventLocal<TEvChangeMessageVisibilityBatch, EvChangeMessageVisibilityBatch> { - struct TMessageEntry { - ui64 Offset = 0; - TInstant LockTimestamp; - - // for fifo - TString MessageGroupId; - TString ReceiveAttemptId; - - TInstant VisibilityDeadline; - }; - - TString RequestId; - ui64 Shard = 0; - TInstant NowTimestamp; - std::vector<TMessageEntry> Messages; - }; - - struct TEvChangeMessageVisibilityBatchResponse : public NActors::TEventLocal<TEvChangeMessageVisibilityBatchResponse, EvChangeMessageVisibilityBatchResponse> { - enum class EMessageStatus { - OK, - NotFound, - NotInFly, - Failed, - }; - struct TMessageResult { - EMessageStatus Status = EMessageStatus::NotFound; - }; - ui64 Shard = 0; - std::vector<TMessageResult> Statuses; - }; - - struct TEvInflyIsPurgingNotification : public NActors::TEventLocal<TEvInflyIsPurgingNotification, EvInflyIsPurgingNotification> { - ui64 Shard = 0; - std::vector<ui64> Offsets; - }; - - struct TEvQueuePurgedNotification : public NActors::TEventLocal<TEvQueuePurgedNotification, EvQueuePurgedNotification> { - ui64 Shard = 0; - ui64 NewMessagesCount = 0; - }; - - struct TEvGetRuntimeQueueAttributes : public NActors::TEventLocal<TEvGetRuntimeQueueAttributes, EvGetRuntimeQueueAttributes> { - TString RequestId; - - TEvGetRuntimeQueueAttributes(const TString& requestId) - :RequestId(requestId) - { - } - }; - - struct TEvGetRuntimeQueueAttributesResponse : public NActors::TEventLocal<TEvGetRuntimeQueueAttributesResponse, EvGetRuntimeQueueAttributesResponse> { - bool Failed = false; - - size_t MessagesCount = 0; - size_t InflyMessagesCount = 0; - size_t MessagesDelayed = 0; - TInstant CreatedTimestamp; - }; - - struct TEvMigrationDone : public NActors::TEventLocal<TEvMigrationDone, EvMigrationDone> { - bool Success = true; - - TEvMigrationDone(bool ok = true) - : Success(ok) - { - } - }; + struct TEvReceiveMessageBatch : public NActors::TEventLocal<TEvReceiveMessageBatch, EvReceiveMessageBatch> { + TString RequestId; + size_t MaxMessagesCount = 0; + TString ReceiveAttemptId; + TDuration VisibilityTimeout = TDuration::Zero(); + TInstant WaitDeadline = TInstant::Zero(); + }; + + struct TEvReceiveMessageBatchResponse : public NActors::TEventLocal<TEvReceiveMessageBatchResponse, EvReceiveMessageBatchResponse> { + bool Failed = false; + bool OverLimit = false; + bool Retried = false; + + struct TMessageResult { + TInstant FirstReceiveTimestamp = TInstant::Zero(); + ui32 ReceiveCount = 0; + TString MessageId; + TString MessageDeduplicationId; + TString MessageGroupId; + TString MessageAttributes; + TString Data; + TReceipt ReceiptHandle; + TInstant SentTimestamp = TInstant::Zero(); + ui64 SequenceNumber = 0; + TString SenderId; + }; + std::vector<TMessageResult> Messages; + }; + + struct TEvDeleteMessageBatch : public NActors::TEventLocal<TEvDeleteMessageBatch, EvDeleteMessageBatch> { + struct TMessageEntry { + ui64 Offset = 0; + TInstant LockTimestamp = TInstant::Zero(); + + // for fifo + TString MessageGroupId; + TString ReceiveAttemptId; + }; + + TString RequestId; + ui64 Shard = 0; + std::vector<TMessageEntry> Messages; + }; + + struct TEvDeleteMessageBatchResponse : public NActors::TEventLocal<TEvDeleteMessageBatchResponse, EvDeleteMessageBatchResponse> { + enum class EDeleteMessageStatus { + OK, + NotFound, + Failed, + }; + struct TMessageResult { + EDeleteMessageStatus Status = EDeleteMessageStatus::NotFound; + }; + ui64 Shard = 0; + std::vector<TMessageResult> Statuses; + }; + + struct TEvChangeMessageVisibilityBatch : public NActors::TEventLocal<TEvChangeMessageVisibilityBatch, EvChangeMessageVisibilityBatch> { + struct TMessageEntry { + ui64 Offset = 0; + TInstant LockTimestamp; + + // for fifo + TString MessageGroupId; + TString ReceiveAttemptId; + + TInstant VisibilityDeadline; + }; + + TString RequestId; + ui64 Shard = 0; + TInstant NowTimestamp; + std::vector<TMessageEntry> Messages; + }; + + struct TEvChangeMessageVisibilityBatchResponse : public NActors::TEventLocal<TEvChangeMessageVisibilityBatchResponse, EvChangeMessageVisibilityBatchResponse> { + enum class EMessageStatus { + OK, + NotFound, + NotInFly, + Failed, + }; + struct TMessageResult { + EMessageStatus Status = EMessageStatus::NotFound; + }; + ui64 Shard = 0; + std::vector<TMessageResult> Statuses; + }; + + struct TEvInflyIsPurgingNotification : public NActors::TEventLocal<TEvInflyIsPurgingNotification, EvInflyIsPurgingNotification> { + ui64 Shard = 0; + std::vector<ui64> Offsets; + }; + + struct TEvQueuePurgedNotification : public NActors::TEventLocal<TEvQueuePurgedNotification, EvQueuePurgedNotification> { + ui64 Shard = 0; + ui64 NewMessagesCount = 0; + }; + + struct TEvGetRuntimeQueueAttributes : public NActors::TEventLocal<TEvGetRuntimeQueueAttributes, EvGetRuntimeQueueAttributes> { + TString RequestId; + + TEvGetRuntimeQueueAttributes(const TString& requestId) + :RequestId(requestId) + { + } + }; + + struct TEvGetRuntimeQueueAttributesResponse : public NActors::TEventLocal<TEvGetRuntimeQueueAttributesResponse, EvGetRuntimeQueueAttributesResponse> { + bool Failed = false; + + size_t MessagesCount = 0; + size_t InflyMessagesCount = 0; + size_t MessagesDelayed = 0; + TInstant CreatedTimestamp; + }; + + struct TEvMigrationDone : public NActors::TEventLocal<TEvMigrationDone, EvMigrationDone> { + bool Success = true; + + TEvMigrationDone(bool ok = true) + : Success(ok) + { + } + }; struct TEvReportProcessedRequestAttributes : public NActors::TEventLocal<TEvReportProcessedRequestAttributes, EvReportProcessedRequestAttributes> { TProcessedRequestAttributes Data; }; - - struct TEvInsertQueueCounters : public NActors::TEventLocal<TEvInsertQueueCounters, EvInsertQueueCounters> { + + struct TEvInsertQueueCounters : public NActors::TEventLocal<TEvInsertQueueCounters, EvInsertQueueCounters> { TEvInsertQueueCounters(const TString& user, const TString& queue, ui64 leaderTabletId) - : User(user) - , Queue(queue) + : User(user) + , Queue(queue) , LeaderTabletId(leaderTabletId) - { - } - - TString User; - TString Queue; + { + } + + TString User; + TString Queue; ui64 LeaderTabletId; - }; - - struct TEvUserSettingsChanged : public NActors::TEventLocal<TEvUserSettingsChanged, EvUserSettingsChanged> { + }; + + struct TEvUserSettingsChanged : public NActors::TEventLocal<TEvUserSettingsChanged, EvUserSettingsChanged> { TEvUserSettingsChanged(const TString& userName, std::shared_ptr<const std::map<TString, TString>> settings, std::shared_ptr<const std::set<TString>> diff) - : UserName(userName) - , Settings(std::move(settings)) - , Diff(std::move(diff)) - { - } - - TString UserName; + : UserName(userName) + , Settings(std::move(settings)) + , Diff(std::move(diff)) + { + } + + TString UserName; std::shared_ptr<const std::map<TString, TString>> Settings; std::shared_ptr<const std::set<TString>> Diff; - }; - - struct TEvReadQueuesList : public NActors::TEventLocal<TEvReadQueuesList, EvReadQueuesList> { - }; - - struct TEvQueuesList : public NActors::TEventLocal<TEvQueuesList, EvQueuesList> { - struct TQueueRecord { - TString UserName; - TString QueueName; + }; + + struct TEvReadQueuesList : public NActors::TEventLocal<TEvReadQueuesList, EvReadQueuesList> { + }; + + struct TEvQueuesList : public NActors::TEventLocal<TEvQueuesList, EvQueuesList> { + struct TQueueRecord { + TString UserName; + TString QueueName; ui64 LeaderTabletId = 0; - TString CustomName; - TString FolderId; + TString CustomName; + TString FolderId; TString DlqName; - ui64 Version = 0; - ui64 ShardsCount = 0; - TInstant CreatedTimestamp; - - bool operator<(const TQueueRecord& r) const { - return std::tie(UserName, QueueName) < std::tie(r.UserName, r.QueueName); - } - }; - - bool Success = true; - std::vector<TQueueRecord> SortedQueues; - - explicit TEvQueuesList(bool success = true) - : Success(success) - { - } - }; + ui64 Version = 0; + ui64 ShardsCount = 0; + TInstant CreatedTimestamp; + + bool operator<(const TQueueRecord& r) const { + return std::tie(UserName, QueueName) < std::tie(r.UserName, r.QueueName); + } + }; + + bool Success = true; + std::vector<TQueueRecord> SortedQueues; + + explicit TEvQueuesList(bool success = true) + : Success(success) + { + } + }; // Used by service to notify dead letter queue leader struct TEvDeadLetterQueueNotification : public NActors::TEventLocal<TEvDeadLetterQueueNotification, EvDeadLetterQueueNotification> { @@ -887,4 +887,4 @@ struct TSqsEvents { }; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/executor.cpp b/ydb/core/ymq/actor/executor.cpp index e17dc68fe95..06707254b52 100644 --- a/ydb/core/ymq/actor/executor.cpp +++ b/ydb/core/ymq/actor/executor.cpp @@ -1,6 +1,6 @@ #include "executor.h" -#include "log.h" -#include "cfg.h" +#include "log.h" +#include "cfg.h" #include <ydb/core/protos/tx_proxy.pb.h> #include <ydb/core/protos/flat_tx_scheme.pb.h> @@ -10,257 +10,257 @@ #include <ydb/core/ymq/base/debug_info.h> #include <ydb/core/ymq/queues/fifo/queries.h> #include <ydb/core/ymq/queues/std/queries.h> - + #include <ydb/library/yql/minikql/mkql_node_serialization.h> #include <ydb/library/yql/public/issue/yql_issue_message.h> -#include <util/generic/ptr.h> -#include <util/generic/utility.h> - -namespace NKikimr::NSQS { - -constexpr ui64 EXECUTE_RETRY_WAKEUP_TAG = 1; -constexpr ui64 COMPILE_RETRY_WAKEUP_TAG = 2; - -static TString MiniKQLDataResponseToString(const TSqsEvents::TEvExecuted::TRecord& record) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - return val.GetValueText<NClient::TFormatJSON>(); -} - -static TString MiniKQLParamsToString(const NKikimrMiniKQL::TParams& params) { - if (!params.HasValue() && !params.HasType()) { - return "{}"; - } - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(params.GetValue(), params.GetType())); - return val.GetValueText<NClient::TFormatJSON>(); -} - +#include <util/generic/ptr.h> +#include <util/generic/utility.h> + +namespace NKikimr::NSQS { + +constexpr ui64 EXECUTE_RETRY_WAKEUP_TAG = 1; +constexpr ui64 COMPILE_RETRY_WAKEUP_TAG = 2; + +static TString MiniKQLDataResponseToString(const TSqsEvents::TEvExecuted::TRecord& record) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); + return val.GetValueText<NClient::TFormatJSON>(); +} + +static TString MiniKQLParamsToString(const NKikimrMiniKQL::TParams& params) { + if (!params.HasValue() && !params.HasType()) { + return "{}"; + } + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(params.GetValue(), params.GetType())); + return val.GetValueText<NClient::TFormatJSON>(); +} + TExecutorBuilder::TExecutorBuilder(TActorId parent, const TString& requestId) - : Parent_(parent) - , RequestId_(requestId) - , ProposeTransactionRequest_(MakeHolder<TEvTxUserProxy::TEvProposeTransaction>()) -{ -} - -void TExecutorBuilder::Start() { - if (HasQueryId() && QueueName_) { + : Parent_(parent) + , RequestId_(requestId) + , ProposeTransactionRequest_(MakeHolder<TEvTxUserProxy::TEvProposeTransaction>()) +{ +} + +void TExecutorBuilder::Start() { + if (HasQueryId() && QueueName_) { SendToQueueLeader(); - } else { - StartExecutorActor(); - } -} - -void TExecutorBuilder::StartExecutorActor() { - TQueuePath path(Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_); - if (Request().Record.GetTransaction().HasMiniKQLTransaction()) { - if (!Request().Record.GetTransaction().GetMiniKQLTransaction().HasMode()) { - Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->SetMode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE_AND_EXEC); - } - } - - if (HasQueryId()) { - auto* trans = Request().Record.MutableTransaction()->MutableMiniKQLTransaction(); - if (!trans->MutableProgram()->HasText() && !trans->MutableProgram()->HasBin()) { - Text(Sprintf(GetQueryById(QueryId_), - TString(path.GetVersionedQueuePath()).c_str(), - Shard_, - path.GetUserPath().c_str(), - QueueName_.c_str(), - Cfg().GetRoot().c_str())); - } - } - - if (HasQueryId()) { - RLOG_SQS_DEBUG("Starting executor actor for query(idx=" << QueryId_ << "). Mode: " << NKikimrTxUserProxy::TMiniKQLTransaction::EMode_Name(Request().Record.GetTransaction().GetMiniKQLTransaction().GetMode())); - } else { - RLOG_SQS_DEBUG("Starting executor actor for text query. Mode: " << NKikimrTxUserProxy::TMiniKQLTransaction::EMode_Name(Request().Record.GetTransaction().GetMiniKQLTransaction().GetMode())); - } - - THolder<TMiniKqlExecutionActor> actor = - MakeHolder<TMiniKqlExecutionActor>( - Parent_, - RequestId_, - std::move(ProposeTransactionRequest_), - RetryOnTimeout_, - path, - TransactionCounters_, - Callback_); - - if (HasQueryId()) { // query with id - actor->SetQueryIdForLogging(QueryId_); - } - - TActivationContext::Register(actor.Release()); -} - + } else { + StartExecutorActor(); + } +} + +void TExecutorBuilder::StartExecutorActor() { + TQueuePath path(Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_); + if (Request().Record.GetTransaction().HasMiniKQLTransaction()) { + if (!Request().Record.GetTransaction().GetMiniKQLTransaction().HasMode()) { + Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->SetMode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE_AND_EXEC); + } + } + + if (HasQueryId()) { + auto* trans = Request().Record.MutableTransaction()->MutableMiniKQLTransaction(); + if (!trans->MutableProgram()->HasText() && !trans->MutableProgram()->HasBin()) { + Text(Sprintf(GetQueryById(QueryId_), + TString(path.GetVersionedQueuePath()).c_str(), + Shard_, + path.GetUserPath().c_str(), + QueueName_.c_str(), + Cfg().GetRoot().c_str())); + } + } + + if (HasQueryId()) { + RLOG_SQS_DEBUG("Starting executor actor for query(idx=" << QueryId_ << "). Mode: " << NKikimrTxUserProxy::TMiniKQLTransaction::EMode_Name(Request().Record.GetTransaction().GetMiniKQLTransaction().GetMode())); + } else { + RLOG_SQS_DEBUG("Starting executor actor for text query. Mode: " << NKikimrTxUserProxy::TMiniKQLTransaction::EMode_Name(Request().Record.GetTransaction().GetMiniKQLTransaction().GetMode())); + } + + THolder<TMiniKqlExecutionActor> actor = + MakeHolder<TMiniKqlExecutionActor>( + Parent_, + RequestId_, + std::move(ProposeTransactionRequest_), + RetryOnTimeout_, + path, + TransactionCounters_, + Callback_); + + if (HasQueryId()) { // query with id + actor->SetQueryIdForLogging(QueryId_); + } + + TActivationContext::Register(actor.Release()); +} + void TExecutorBuilder::SendToQueueLeader() { Y_VERIFY(QueueLeaderActor_); - - auto ev = MakeHolder<TSqsEvents::TEvExecute>(Parent_, RequestId_, TQueuePath(Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_), QueryId_, Shard_); - ev->RetryOnTimeout = RetryOnTimeout_; - ev->Cb = std::move(Callback_); - Params(); // create params if not yet exist - ev->Params = std::move(*Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto()); - + + auto ev = MakeHolder<TSqsEvents::TEvExecute>(Parent_, RequestId_, TQueuePath(Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_), QueryId_, Shard_); + ev->RetryOnTimeout = RetryOnTimeout_; + ev->Cb = std::move(Callback_); + Params(); // create params if not yet exist + ev->Params = std::move(*Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto()); + RLOG_SQS_DEBUG("Sending execute request for query(idx=" << QueryId_ << ") to queue leader"); - + TActivationContext::Send(new IEventHandle(QueueLeaderActor_, Parent_, ev.Release())); -} - +} + const char* TExecutorBuilder::GetQueryById(size_t idx) { const char* query = IsFifoQueue_ ? GetFifoQueryById(idx) : GetStdQueryById(idx); Y_VERIFY(query); return query; -} - +} + TMiniKqlExecutionActor::TMiniKqlExecutionActor( const TActorId sender, - TString requestId, + TString requestId, THolder<TRequest> req, - bool retryOnTimeout, - const TQueuePath& path, // queue or user - const TIntrusivePtr<TTransactionCounters>& counters, - TSqsEvents::TExecutedCallback cb) + bool retryOnTimeout, + const TQueuePath& path, // queue or user + const TIntrusivePtr<TTransactionCounters>& counters, + TSqsEvents::TExecutedCallback cb) : Sender_(sender) - , RequestId_(std::move(requestId)) + , RequestId_(std::move(requestId)) , Cb_(cb) , Request_(std::move(req)) - , Counters_(counters) - , QueuePath_(path) - , RetryOnTimeout_(retryOnTimeout) + , Counters_(counters) + , QueuePath_(path) + , RetryOnTimeout_(retryOnTimeout) { - DebugInfo->ExecutorActors.emplace(RequestId_, this); + DebugInfo->ExecutorActors.emplace(RequestId_, this); } -TMiniKqlExecutionActor::~TMiniKqlExecutionActor() { - DebugInfo->ExecutorActors.EraseKeyValue(RequestId_, this); -} - -void TMiniKqlExecutionActor::Bootstrap() { - StartTs_ = TActivationContext::Now(); - - auto& transaction = *Request_->Record.MutableTransaction(); - if (RequestId_) { - transaction.SetUserRequestId(RequestId_); - } - - // Set timeout - if (transaction.HasMiniKQLTransaction()) { - const auto& cfg = Cfg(); - Request_->Record.SetExecTimeoutPeriod(3 * cfg.GetTransactionTimeoutMs()); - } - - auto& mkqlTx = *transaction.MutableMiniKQLTransaction(); - - if (mkqlTx.HasParams() && mkqlTx.GetParams().HasProto()) { - try { - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Serializing params: " << MiniKQLParamsToString(mkqlTx.GetParams().GetProto())); - NMiniKQL::TScopedAlloc alloc(Counters_ && Counters_->AllocPoolCounters ? *Counters_->AllocPoolCounters : TAlignedPagePoolCounters(), AppData()->FunctionRegistry->SupportsSizedAllocators()); - NMiniKQL::TTypeEnvironment env(alloc); - NMiniKQL::TRuntimeNode node = NMiniKQL::ImportValueFromProto(mkqlTx.GetParams().GetProto(), env); - TString bin = NMiniKQL::SerializeRuntimeNode(node, env); - ProtoParamsForDebug.Swap(mkqlTx.MutableParams()->MutableProto()); - mkqlTx.MutableParams()->ClearProto(); - mkqlTx.MutableParams()->SetBin(bin); - } catch (const yexception& e) { - RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Error while making mkql execution request params: " << CurrentExceptionMessage()); - // TODO Set error - Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(Cb_, ui64(0))); - LogRequestDuration(); - PassAway(); - return; +TMiniKqlExecutionActor::~TMiniKqlExecutionActor() { + DebugInfo->ExecutorActors.EraseKeyValue(RequestId_, this); +} + +void TMiniKqlExecutionActor::Bootstrap() { + StartTs_ = TActivationContext::Now(); + + auto& transaction = *Request_->Record.MutableTransaction(); + if (RequestId_) { + transaction.SetUserRequestId(RequestId_); + } + + // Set timeout + if (transaction.HasMiniKQLTransaction()) { + const auto& cfg = Cfg(); + Request_->Record.SetExecTimeoutPeriod(3 * cfg.GetTransactionTimeoutMs()); + } + + auto& mkqlTx = *transaction.MutableMiniKQLTransaction(); + + if (mkqlTx.HasParams() && mkqlTx.GetParams().HasProto()) { + try { + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Serializing params: " << MiniKQLParamsToString(mkqlTx.GetParams().GetProto())); + NMiniKQL::TScopedAlloc alloc(Counters_ && Counters_->AllocPoolCounters ? *Counters_->AllocPoolCounters : TAlignedPagePoolCounters(), AppData()->FunctionRegistry->SupportsSizedAllocators()); + NMiniKQL::TTypeEnvironment env(alloc); + NMiniKQL::TRuntimeNode node = NMiniKQL::ImportValueFromProto(mkqlTx.GetParams().GetProto(), env); + TString bin = NMiniKQL::SerializeRuntimeNode(node, env); + ProtoParamsForDebug.Swap(mkqlTx.MutableParams()->MutableProto()); + mkqlTx.MutableParams()->ClearProto(); + mkqlTx.MutableParams()->SetBin(bin); + } catch (const yexception& e) { + RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Error while making mkql execution request params: " << CurrentExceptionMessage()); + // TODO Set error + Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(Cb_, ui64(0))); + LogRequestDuration(); + PassAway(); + return; } } if (mkqlTx.HasProgram() && mkqlTx.GetProgram().HasText()) { MkqlProgramText_ = mkqlTx.GetProgram().GetText(); - const bool compileMode = mkqlTx.GetMode() == NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE; - Mode_ = compileMode ? EMode::Compile : EMode::CompileAndExec; - CompileProgram(compileMode); + const bool compileMode = mkqlTx.GetMode() == NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE; + Mode_ = compileMode ? EMode::Compile : EMode::CompileAndExec; + CompileProgram(compileMode); } else { - Mode_ = EMode::Exec; - ProceedWithExecution(); + Mode_ = EMode::Exec; + ProceedWithExecution(); } Become(&TMiniKqlExecutionActor::AwaitState); } -void TMiniKqlExecutionActor::CompileProgram(bool forceRefresh) { +void TMiniKqlExecutionActor::CompileProgram(bool forceRefresh) { auto compileEv = MakeHolder<TMiniKQLCompileServiceEvents::TEvCompile>(MkqlProgramText_); compileEv->ForceRefresh = forceRefresh; if (!CompileResolveCookies_.empty()) { compileEv->CompileResolveCookies = std::move(CompileResolveCookies_); } - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Compile program: " << MkqlProgramText_); - Send(MakeMiniKQLCompileServiceID(), compileEv.Release()); + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Compile program: " << MkqlProgramText_); + Send(MakeMiniKQLCompileServiceID(), compileEv.Release()); CompilationPending_ = true; - INC_COUNTER(Counters_, CompileQueryCount); + INC_COUNTER(Counters_, CompileQueryCount); } -void TMiniKqlExecutionActor::ProceedWithExecution() { +void TMiniKqlExecutionActor::ProceedWithExecution() { if (!CompilationPending_) { THolder<TRequest> ev = MakeHolder<TRequest>(); ev->Record.CopyFrom(Request_->Record); - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Execute program: " << ev->Record << ". Params: " << MiniKQLParamsToString(ProtoParamsForDebug)); - StartExecutionTs_ = TActivationContext::Now(); - Send(MakeTxProxyID(), std::move(ev)); - ++AttemptNumber_; - - INC_COUNTER(Counters_, TransactionsInfly); + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Execute program: " << ev->Record << ". Params: " << MiniKQLParamsToString(ProtoParamsForDebug)); + StartExecutionTs_ = TActivationContext::Now(); + Send(MakeTxProxyID(), std::move(ev)); + ++AttemptNumber_; + + INC_COUNTER(Counters_, TransactionsInfly); } } -void TMiniKqlExecutionActor::HandleCompile(TMiniKQLCompileServiceEvents::TEvCompileStatus::TPtr& ev) { - if (Mode_ == EMode::CompileAndExec) { - const TDuration duration = TActivationContext::Now() - StartTs_; - RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Compilation duration: " << duration.MilliSeconds() << "ms"); - } +void TMiniKqlExecutionActor::HandleCompile(TMiniKQLCompileServiceEvents::TEvCompileStatus::TPtr& ev) { + if (Mode_ == EMode::CompileAndExec) { + const TDuration duration = TActivationContext::Now() - StartTs_; + RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Compilation duration: " << duration.MilliSeconds() << "ms"); + } const auto& result = ev->Get()->Result; auto& mkqlTx = *Request_->Record.MutableTransaction()->MutableMiniKQLTransaction(); if (!result.Errors.Empty()) { - const TString errors = result.Errors.ToString(); - RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Errors while compiling program: " << errors << ", program text: " << MkqlProgramText_); - - bool retriableResolveError = false; - if (CompilationRetries_ > 0) { - for (const NYql::TIssue& issue : result.Errors) { - if (issue.GetCode() == NKikimrIssues::TIssuesIds::GENERIC_RESOLVE_ERROR) { - retriableResolveError = true; - break; - } - } - } - - if (retriableResolveError) { - --CompilationRetries_; - ScheduleRetry(true); - } else { - NKikimrTxUserProxy::TEvProposeTransactionStatus resp; - resp.SetStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError); - IssuesToMessage(result.Errors, resp.MutableIssues()); - resp.SetMiniKQLErrors(errors); - THolder<TSqsEvents::TEvExecuted> e(new TSqsEvents::TEvExecuted(resp, Cb_, ui64(0))); - Send(Sender_, std::move(e)); - LogRequestDuration(); - PassAway(); - } + const TString errors = result.Errors.ToString(); + RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Errors while compiling program: " << errors << ", program text: " << MkqlProgramText_); + + bool retriableResolveError = false; + if (CompilationRetries_ > 0) { + for (const NYql::TIssue& issue : result.Errors) { + if (issue.GetCode() == NKikimrIssues::TIssuesIds::GENERIC_RESOLVE_ERROR) { + retriableResolveError = true; + break; + } + } + } + + if (retriableResolveError) { + --CompilationRetries_; + ScheduleRetry(true); + } else { + NKikimrTxUserProxy::TEvProposeTransactionStatus resp; + resp.SetStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError); + IssuesToMessage(result.Errors, resp.MutableIssues()); + resp.SetMiniKQLErrors(errors); + THolder<TSqsEvents::TEvExecuted> e(new TSqsEvents::TEvExecuted(resp, Cb_, ui64(0))); + Send(Sender_, std::move(e)); + LogRequestDuration(); + PassAway(); + } return; - } else { - PrevAttemptWaitTime_ = TDuration::Zero(); + } else { + PrevAttemptWaitTime_ = TDuration::Zero(); } if (mkqlTx.GetMode() == NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) { NKikimrTxUserProxy::TEvProposeTransactionStatus resp; resp.SetStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete); resp.MutableMiniKQLCompileResults()->SetCompiledProgram(result.CompiledProgram); - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Compile program response: " << resp); - Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(resp, Cb_, ui64(0))); - LogRequestDuration(); - PassAway(); + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Compile program response: " << resp); + Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(resp, Cb_, ui64(0))); + LogRequestDuration(); + PassAway(); return; } @@ -271,7 +271,7 @@ void TMiniKqlExecutionActor::HandleCompile(TMiniKQLCompileServiceEvents::TEvComp CompileResolveCookies_ = std::move(ev->Get()->CompileResolveCookies); CompilationPending_ = false; - ProceedWithExecution(); + ProceedWithExecution(); } template<typename TKikimrResultRecord> @@ -282,197 +282,197 @@ bool TMiniKqlExecutionActor::ShouldRetryOnFail(const TKikimrResultRecord& record (record.HasSchemeShardStatus() && record.GetSchemeShardStatus() == NKikimrScheme::EStatus::StatusMultipleModifications); // very rare case in queue creation } -void TMiniKqlExecutionActor::HandleResponse(TResponse::TPtr& ev) { - const TDuration executionDuration = TActivationContext::Now() - StartExecutionTs_; +void TMiniKqlExecutionActor::HandleResponse(TResponse::TPtr& ev) { + const TDuration executionDuration = TActivationContext::Now() - StartExecutionTs_; auto& response = *ev->Get(); auto& record = response.Record; const auto status = NKikimr::NTxProxy::TResultStatus::EStatus(record.GetStatus()); - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " HandleResponse " << record); - RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Attempt " << AttemptNumber_ << " execution duration: " << executionDuration.MilliSeconds() << "ms"); - if (Counters_) { - DEC_COUNTER(Counters_, TransactionsInfly); - INC_COUNTER(Counters_, TransactionsCount); - if (QueryId_.Defined()) { - Counters_->QueryTypeCounters[*QueryId_].TransactionsCount->Inc(); - } - } + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " HandleResponse " << record); + RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Attempt " << AttemptNumber_ << " execution duration: " << executionDuration.MilliSeconds() << "ms"); + if (Counters_) { + DEC_COUNTER(Counters_, TransactionsInfly); + INC_COUNTER(Counters_, TransactionsCount); + if (QueryId_.Defined()) { + Counters_->QueryTypeCounters[*QueryId_].TransactionsCount->Inc(); + } + } const bool resolveError = status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ResolveError || status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardNotAvailable; - if (resolveError && CompilationRetries_ > 0 && !MkqlProgramText_.empty()) { - RLOG_SQS_INFO(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Resolve error. Retrying mkql request"); - --CompilationRetries_; - ScheduleRetry(true); + if (resolveError && CompilationRetries_ > 0 && !MkqlProgramText_.empty()) { + RLOG_SQS_INFO(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Resolve error. Retrying mkql request"); + --CompilationRetries_; + ScheduleRetry(true); return; } - bool retryableError = false; - bool failed = false; - if (status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete - && status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress - && status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecAlready) { - failed = true; + bool retryableError = false; + bool failed = false; + if (status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete + && status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress + && status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecAlready) { + failed = true; retryableError = ShouldRetryOnFail(record); - if (retryableError) { - RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Retryable error in mkql execution result: " << response.Record); - } else { - RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Error in mkql execution result: " << response.Record); - } - } else { - PrevAttemptWaitTime_ = TDuration::Zero(); - if (status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress) { - if (QueryId_ && Counters_) { - Counters_->QueryTypeCounters[*QueryId_].TransactionDuration->Collect(executionDuration.MilliSeconds()); - } - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Sending mkql execution result: " << response.Record); - if (response.Record.HasExecutionEngineEvaluatedResponse()) { - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Minikql data response: " << MiniKQLDataResponseToString(response.Record)); - } - } - } - - const bool timeout = failed && RetryTimeoutExpired(); - if (retryableError && timeout) { - INC_COUNTER(Counters_, TransactionRetryTimeouts); - RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Can't retry. Timeout."); - } - if (retryableError && !timeout) { - ScheduleRetry(false); - } else { - if (failed && Counters_) { - INC_COUNTER(Counters_, TransactionsFailed); - if (QueryId_.Defined()) { - Counters_->QueryTypeCounters[*QueryId_].TransactionsFailed->Inc(); - } - } - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress) { - ResponseEvent_ = std::move(ev); - WaitForCompletion(); - } else { - Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(response.Record, Cb_, ui64(0))); - LogRequestDuration(); - PassAway(); - } - } -} - -void TMiniKqlExecutionActor::HandleWakeup(TEvWakeup::TPtr& ev) { - Y_VERIFY(ev->Get()->Tag != 0); - switch (ev->Get()->Tag) { - case EXECUTE_RETRY_WAKEUP_TAG: { - ProceedWithExecution(); - break; - } - case COMPILE_RETRY_WAKEUP_TAG: { - CompileProgram(Mode_ == EMode::Compile); - break; - } - default: { - Y_FAIL(); - } - } -} - -TDuration TMiniKqlExecutionActor::NextAttemptWaitDuration() const { - const auto& cfg = Cfg(); - const TDuration attemptWaitTime = ClampVal(PrevAttemptWaitTime_ * 2, - TDuration::MilliSeconds(cfg.GetTransactionRetryWaitDurationMs()), - TDuration::MilliSeconds(cfg.GetTransactionMaxRetryWaitDurationMs())); - return attemptWaitTime; -} - -bool TMiniKqlExecutionActor::RetryTimeoutExpired() { - const auto& cfg = Cfg(); - return TActivationContext::Now() + NextAttemptWaitDuration() >= StartTs_ + TDuration::MilliSeconds(cfg.GetTransactionTimeoutMs()); -} - -void TMiniKqlExecutionActor::ScheduleRetry(bool compilation) { - INC_COUNTER(Counters_, TransactionRetries); - const auto& cfg = Cfg(); - const TDuration randomComponent = TDuration::MilliSeconds(RandomNumber<ui64>(cfg.GetTransactionRetryWaitDurationMs() / 2)); - const TDuration attemptWaitTime = NextAttemptWaitDuration(); - PrevAttemptWaitTime_ = attemptWaitTime; - const TDuration waitTime = attemptWaitTime + randomComponent; - RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Retry" << (compilation ? " compilation" : " transaction") << " in " << waitTime.MilliSeconds() << "ms"); - this->Schedule(waitTime, new TEvWakeup(compilation ? COMPILE_RETRY_WAKEUP_TAG : EXECUTE_RETRY_WAKEUP_TAG)); -} - -void TMiniKqlExecutionActor::LogRequestDuration() { - const TInstant endTime = TActivationContext::Now(); - const TDuration duration = endTime - StartTs_; - RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " " << GetActionType() << " duration: " << duration.MilliSeconds() << "ms"); + if (retryableError) { + RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Retryable error in mkql execution result: " << response.Record); + } else { + RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Error in mkql execution result: " << response.Record); + } + } else { + PrevAttemptWaitTime_ = TDuration::Zero(); + if (status != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress) { + if (QueryId_ && Counters_) { + Counters_->QueryTypeCounters[*QueryId_].TransactionDuration->Collect(executionDuration.MilliSeconds()); + } + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Sending mkql execution result: " << response.Record); + if (response.Record.HasExecutionEngineEvaluatedResponse()) { + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Minikql data response: " << MiniKQLDataResponseToString(response.Record)); + } + } + } + + const bool timeout = failed && RetryTimeoutExpired(); + if (retryableError && timeout) { + INC_COUNTER(Counters_, TransactionRetryTimeouts); + RLOG_SQS_ERROR(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Can't retry. Timeout."); + } + if (retryableError && !timeout) { + ScheduleRetry(false); + } else { + if (failed && Counters_) { + INC_COUNTER(Counters_, TransactionsFailed); + if (QueryId_.Defined()) { + Counters_->QueryTypeCounters[*QueryId_].TransactionsFailed->Inc(); + } + } + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress) { + ResponseEvent_ = std::move(ev); + WaitForCompletion(); + } else { + Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(response.Record, Cb_, ui64(0))); + LogRequestDuration(); + PassAway(); + } + } } -TString TMiniKqlExecutionActor::GetActionType() const { - switch (Mode_) { - case EMode::Compile: - return "compilation"; - case EMode::Exec: - return "execution"; - case EMode::CompileAndExec: - return "compile & exec"; - } - return TString(); -} - -TString TMiniKqlExecutionActor::GetRequestType() const { - TStringBuilder ret; - const auto& trans = Request_->Record.GetTransaction(); - if (QueryId_.Defined()) { - ret << "Query(idx=" << *QueryId_ << ")"; - } else if (trans.HasModifyScheme() && trans.GetModifyScheme().HasOperationType()) { - ret << "ModifyScheme(op=" << EOperationType_Name(trans.GetModifyScheme().GetOperationType()) << ")"; - } else if (trans.HasMiniKQLTransaction()) { - ret << "Text query"; - } else { - ret << "Query"; - } - return std::move(ret); -} - -void TMiniKqlExecutionActor::PassAway() { - if (TabletPipeClient_) { - NTabletPipe::CloseClient(SelfId(), TabletPipeClient_); +void TMiniKqlExecutionActor::HandleWakeup(TEvWakeup::TPtr& ev) { + Y_VERIFY(ev->Get()->Tag != 0); + switch (ev->Get()->Tag) { + case EXECUTE_RETRY_WAKEUP_TAG: { + ProceedWithExecution(); + break; + } + case COMPILE_RETRY_WAKEUP_TAG: { + CompileProgram(Mode_ == EMode::Compile); + break; + } + default: { + Y_FAIL(); + } + } +} + +TDuration TMiniKqlExecutionActor::NextAttemptWaitDuration() const { + const auto& cfg = Cfg(); + const TDuration attemptWaitTime = ClampVal(PrevAttemptWaitTime_ * 2, + TDuration::MilliSeconds(cfg.GetTransactionRetryWaitDurationMs()), + TDuration::MilliSeconds(cfg.GetTransactionMaxRetryWaitDurationMs())); + return attemptWaitTime; +} + +bool TMiniKqlExecutionActor::RetryTimeoutExpired() { + const auto& cfg = Cfg(); + return TActivationContext::Now() + NextAttemptWaitDuration() >= StartTs_ + TDuration::MilliSeconds(cfg.GetTransactionTimeoutMs()); +} + +void TMiniKqlExecutionActor::ScheduleRetry(bool compilation) { + INC_COUNTER(Counters_, TransactionRetries); + const auto& cfg = Cfg(); + const TDuration randomComponent = TDuration::MilliSeconds(RandomNumber<ui64>(cfg.GetTransactionRetryWaitDurationMs() / 2)); + const TDuration attemptWaitTime = NextAttemptWaitDuration(); + PrevAttemptWaitTime_ = attemptWaitTime; + const TDuration waitTime = attemptWaitTime + randomComponent; + RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Retry" << (compilation ? " compilation" : " transaction") << " in " << waitTime.MilliSeconds() << "ms"); + this->Schedule(waitTime, new TEvWakeup(compilation ? COMPILE_RETRY_WAKEUP_TAG : EXECUTE_RETRY_WAKEUP_TAG)); +} + +void TMiniKqlExecutionActor::LogRequestDuration() { + const TInstant endTime = TActivationContext::Now(); + const TDuration duration = endTime - StartTs_; + RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " " << GetActionType() << " duration: " << duration.MilliSeconds() << "ms"); +} + +TString TMiniKqlExecutionActor::GetActionType() const { + switch (Mode_) { + case EMode::Compile: + return "compilation"; + case EMode::Exec: + return "execution"; + case EMode::CompileAndExec: + return "compile & exec"; + } + return TString(); +} + +TString TMiniKqlExecutionActor::GetRequestType() const { + TStringBuilder ret; + const auto& trans = Request_->Record.GetTransaction(); + if (QueryId_.Defined()) { + ret << "Query(idx=" << *QueryId_ << ")"; + } else if (trans.HasModifyScheme() && trans.GetModifyScheme().HasOperationType()) { + ret << "ModifyScheme(op=" << EOperationType_Name(trans.GetModifyScheme().GetOperationType()) << ")"; + } else if (trans.HasMiniKQLTransaction()) { + ret << "Text query"; + } else { + ret << "Query"; + } + return std::move(ret); +} + +void TMiniKqlExecutionActor::PassAway() { + if (TabletPipeClient_) { + NTabletPipe::CloseClient(SelfId(), TabletPipeClient_); TabletPipeClient_ = TActorId(); - } - TActorBootstrapped<TMiniKqlExecutionActor>::PassAway(); -} - -void TMiniKqlExecutionActor::WaitForCompletion(bool retry) { - const ui64 schemeShardId = ResponseEvent_->Get()->Record.GetSchemeShardTabletId(); - NTabletPipe::TClientConfig clientConfig; + } + TActorBootstrapped<TMiniKqlExecutionActor>::PassAway(); +} + +void TMiniKqlExecutionActor::WaitForCompletion(bool retry) { + const ui64 schemeShardId = ResponseEvent_->Get()->Record.GetSchemeShardTabletId(); + NTabletPipe::TClientConfig clientConfig; clientConfig.RetryPolicy = {.RetryLimitCount = 5, .MinRetryTime = TDuration::MilliSeconds(100), .DoFirstRetryInstantly = !retry}; TabletPipeClient_ = RegisterWithSameMailbox(NTabletPipe::CreateClient(SelfId(), schemeShardId, clientConfig)); - + TAutoPtr<NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletion> request(new NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletion()); - request->Record.SetTxId(ResponseEvent_->Get()->Record.GetTxId()); - NTabletPipe::SendData(SelfId(), TabletPipeClient_, request.Release()); - - RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Waiting for transaction to complete. TxId: " << ResponseEvent_->Get()->Record.GetTxId() << ". Scheme shard id: " << schemeShardId); -} - + request->Record.SetTxId(ResponseEvent_->Get()->Record.GetTxId()); + NTabletPipe::SendData(SelfId(), TabletPipeClient_, request.Release()); + + RLOG_SQS_DEBUG(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Waiting for transaction to complete. TxId: " << ResponseEvent_->Get()->Record.GetTxId() << ". Scheme shard id: " << schemeShardId); +} + void TMiniKqlExecutionActor::HandleResult(NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletionResult::TPtr& ev) { - Y_VERIFY(ev->Get()->Record.GetTxId() == ResponseEvent_->Get()->Record.GetTxId()); - ResponseEvent_->Get()->Record.SetStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete); - RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Sending mkql execution result: " << ResponseEvent_->Get()->Record); - - Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(ResponseEvent_->Get()->Record, Cb_, ui64(0))); - LogRequestDuration(); - PassAway(); -} - -void TMiniKqlExecutionActor::HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev) { - if (ev->Get()->Status != NKikimrProto::OK) { - RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Failed to connect to pipe: " << ev->Get()->Status << ". Reconnecting"); - WaitForCompletion(true); - } -} - -void TMiniKqlExecutionActor::HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr&) { - RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Pipe disconnected. Reconnecting"); - WaitForCompletion(true); -} - -} // namespace NKikimr::NSQS + Y_VERIFY(ev->Get()->Record.GetTxId() == ResponseEvent_->Get()->Record.GetTxId()); + ResponseEvent_->Get()->Record.SetStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete); + RLOG_SQS_TRACE(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Sending mkql execution result: " << ResponseEvent_->Get()->Record); + + Send(Sender_, MakeHolder<TSqsEvents::TEvExecuted>(ResponseEvent_->Get()->Record, Cb_, ui64(0))); + LogRequestDuration(); + PassAway(); +} + +void TMiniKqlExecutionActor::HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev) { + if (ev->Get()->Status != NKikimrProto::OK) { + RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Failed to connect to pipe: " << ev->Get()->Status << ". Reconnecting"); + WaitForCompletion(true); + } +} + +void TMiniKqlExecutionActor::HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr&) { + RLOG_SQS_WARN(GetRequestType() << " Queue " << TLogQueueName(QueuePath_) << " Pipe disconnected. Reconnecting"); + WaitForCompletion(true); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/executor.h b/ydb/core/ymq/actor/executor.h index 5dd51c6da23..d8068ec3b90 100644 --- a/ydb/core/ymq/actor/executor.h +++ b/ydb/core/ymq/actor/executor.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include "events.h" #include <ydb/core/base/tablet_pipe.h> @@ -14,139 +14,139 @@ #include <library/cpp/monlib/dynamic_counters/counters.h> #include <util/generic/hash.h> -#include <util/generic/maybe.h> +#include <util/generic/maybe.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -// Builds transaction request and properly executes it +// Builds transaction request and properly executes it // Can either send TEvExecute to queue leader or create execution actor. -class TExecutorBuilder { -public: +class TExecutorBuilder { +public: explicit TExecutorBuilder(TActorId parent, const TString& requestId); - - TExecutorBuilder& User(const TString& user) { - UserName_ = user; - return *this; - } - TExecutorBuilder& Queue(const TString& queue) { - QueueName_ = queue; - return *this; - } - TExecutorBuilder& Shard(ui64 shard) { - Shard_ = shard; - return *this; - } + + TExecutorBuilder& User(const TString& user) { + UserName_ = user; + return *this; + } + TExecutorBuilder& Queue(const TString& queue) { + QueueName_ = queue; + return *this; + } + TExecutorBuilder& Shard(ui64 shard) { + Shard_ = shard; + return *this; + } TExecutorBuilder& QueueVersion(ui64 version) { QueueVersion_ = version; return *this; } TExecutorBuilder& QueueLeader(const TActorId& queueLeaderActor) { QueueLeaderActor_ = queueLeaderActor; - return *this; - } - TExecutorBuilder& Text(const TString& text, bool miniKql = true) { - auto* trans = Request().Record.MutableTransaction()->MutableMiniKQLTransaction(); - trans->MutableProgram()->SetText(text); - trans->SetFlatMKQL(miniKql); - return *this; - } - TExecutorBuilder& Bin(const TString& program, bool miniKql = true) { - auto* trans = Request().Record.MutableTransaction()->MutableMiniKQLTransaction(); - trans->MutableProgram()->SetBin(program); - trans->SetFlatMKQL(miniKql); - return *this; - } - TExecutorBuilder& QueryId(EQueryId id) { - QueryId_ = id; - return *this; - } + return *this; + } + TExecutorBuilder& Text(const TString& text, bool miniKql = true) { + auto* trans = Request().Record.MutableTransaction()->MutableMiniKQLTransaction(); + trans->MutableProgram()->SetText(text); + trans->SetFlatMKQL(miniKql); + return *this; + } + TExecutorBuilder& Bin(const TString& program, bool miniKql = true) { + auto* trans = Request().Record.MutableTransaction()->MutableMiniKQLTransaction(); + trans->MutableProgram()->SetBin(program); + trans->SetFlatMKQL(miniKql); + return *this; + } + TExecutorBuilder& QueryId(EQueryId id) { + QueryId_ = id; + return *this; + } TExecutorBuilder& Fifo(bool isFifo) { IsFifoQueue_ = isFifo; return *this; } - TExecutorBuilder& Mode(NKikimrTxUserProxy::TMiniKQLTransaction::EMode mode) { - Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->SetMode(mode); - return *this; - } - TExecutorBuilder& RetryOnTimeout(bool retry = true) { - RetryOnTimeout_ = retry; - return *this; - } - TExecutorBuilder& OnExecuted(TSqsEvents::TExecutedCallback cb) { - Callback_ = std::move(cb); - return *this; - } - TExecutorBuilder& Counters(TIntrusivePtr<TTransactionCounters> counters) { - if (counters) { - TransactionCounters_ = std::move(counters); - } - return *this; - } - TExecutorBuilder& Counters(const TIntrusivePtr<TQueueCounters>& queueCounters) { - if (queueCounters) { - TransactionCounters_ = queueCounters->GetTransactionCounters(); - } - return *this; - } - TExecutorBuilder& Counters(const TIntrusivePtr<TUserCounters>& userCounters) { - if (userCounters) { - TransactionCounters_ = userCounters->GetTransactionCounters(); - } - return *this; - } - - TParameters& Params() { - if (!Parameters_) { - Parameters_.ConstructInPlace( - Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto(), - this - ); - } - return *Parameters_; - } - - NClient::TWriteValue ParamsValue() { - auto* params = Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto(); - return NClient::TWriteValue::Create(*params->MutableValue(), *params->MutableType()); - } - - TEvTxUserProxy::TEvProposeTransaction& Request() { - return *ProposeTransactionRequest_; - } - - // Start transaction - // Invalidates all internal data - void Start(); // choose execution way automatically // prefered - void StartExecutorActor(); // explicilty choose a way to start actor - -private: + TExecutorBuilder& Mode(NKikimrTxUserProxy::TMiniKQLTransaction::EMode mode) { + Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->SetMode(mode); + return *this; + } + TExecutorBuilder& RetryOnTimeout(bool retry = true) { + RetryOnTimeout_ = retry; + return *this; + } + TExecutorBuilder& OnExecuted(TSqsEvents::TExecutedCallback cb) { + Callback_ = std::move(cb); + return *this; + } + TExecutorBuilder& Counters(TIntrusivePtr<TTransactionCounters> counters) { + if (counters) { + TransactionCounters_ = std::move(counters); + } + return *this; + } + TExecutorBuilder& Counters(const TIntrusivePtr<TQueueCounters>& queueCounters) { + if (queueCounters) { + TransactionCounters_ = queueCounters->GetTransactionCounters(); + } + return *this; + } + TExecutorBuilder& Counters(const TIntrusivePtr<TUserCounters>& userCounters) { + if (userCounters) { + TransactionCounters_ = userCounters->GetTransactionCounters(); + } + return *this; + } + + TParameters& Params() { + if (!Parameters_) { + Parameters_.ConstructInPlace( + Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto(), + this + ); + } + return *Parameters_; + } + + NClient::TWriteValue ParamsValue() { + auto* params = Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto(); + return NClient::TWriteValue::Create(*params->MutableValue(), *params->MutableType()); + } + + TEvTxUserProxy::TEvProposeTransaction& Request() { + return *ProposeTransactionRequest_; + } + + // Start transaction + // Invalidates all internal data + void Start(); // choose execution way automatically // prefered + void StartExecutorActor(); // explicilty choose a way to start actor + +private: void SendToQueueLeader(); // make transaction throught leader to use cached compiled query - - bool HasQueryId() const { - return QueryId_ != EQueryId::QUERY_VECTOR_SIZE; - } - + + bool HasQueryId() const { + return QueryId_ != EQueryId::QUERY_VECTOR_SIZE; + } + const char* GetQueryById(size_t idx); - -private: + +private: const TActorId Parent_; - TString RequestId_; - THolder<TEvTxUserProxy::TEvProposeTransaction> ProposeTransactionRequest_; - TMaybe<TParameters> Parameters_; - bool RetryOnTimeout_ = false; + TString RequestId_; + THolder<TEvTxUserProxy::TEvProposeTransaction> ProposeTransactionRequest_; + TMaybe<TParameters> Parameters_; + bool RetryOnTimeout_ = false; bool IsFifoQueue_ = false; - TString UserName_; - TString QueueName_; - ui64 Shard_ = 0; + TString UserName_; + TString QueueName_; + ui64 Shard_ = 0; ui64 QueueVersion_ = 0; TActorId QueueLeaderActor_; - TSqsEvents::TExecutedCallback Callback_; - EQueryId QueryId_ = EQueryId::QUERY_VECTOR_SIZE; - TIntrusivePtr<TTransactionCounters> TransactionCounters_; -}; - + TSqsEvents::TExecutedCallback Callback_; + EQueryId QueryId_ = EQueryId::QUERY_VECTOR_SIZE; + TIntrusivePtr<TTransactionCounters> TransactionCounters_; +}; + class TMiniKqlExecutionActor - : public TActorBootstrapped<TMiniKqlExecutionActor> + : public TActorBootstrapped<TMiniKqlExecutionActor> { using TRequest = TEvTxUserProxy::TEvProposeTransaction; using TResponse = TEvTxUserProxy::TEvProposeTransactionStatus; @@ -154,95 +154,95 @@ class TMiniKqlExecutionActor public: TMiniKqlExecutionActor( const TActorId sender, - TString requestId, + TString requestId, THolder<TRequest> req, - bool retryOnTimeout, - const TQueuePath& path, // queue or user - const TIntrusivePtr<TTransactionCounters>& counters, - TSqsEvents::TExecutedCallback cb = TSqsEvents::TExecutedCallback()); + bool retryOnTimeout, + const TQueuePath& path, // queue or user + const TIntrusivePtr<TTransactionCounters>& counters, + TSqsEvents::TExecutedCallback cb = TSqsEvents::TExecutedCallback()); - ~TMiniKqlExecutionActor(); - - void Bootstrap(); + ~TMiniKqlExecutionActor(); + + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_EXECUTOR_ACTOR; - } - - void SetQueryIdForLogging(EQueryId queryId) { - QueryId_ = queryId; - } - + } + + void SetQueryIdForLogging(EQueryId queryId) { + QueryId_ = queryId; + } + private: - void CompileProgram(bool forceRefresh); - - void ProceedWithExecution(); + void CompileProgram(bool forceRefresh); - TString GetRequestType() const; - TString GetActionType() const; - void LogRequestDuration(); + void ProceedWithExecution(); + TString GetRequestType() const; + TString GetActionType() const; + void LogRequestDuration(); + private: - STATEFN(AwaitState) { + STATEFN(AwaitState) { switch (ev->GetTypeRewrite()) { - hFunc(TEvTxUserProxy::TEvProposeTransactionStatus, HandleResponse); - hFunc(TMiniKQLCompileServiceEvents::TEvCompileStatus, HandleCompile); + hFunc(TEvTxUserProxy::TEvProposeTransactionStatus, HandleResponse); + hFunc(TMiniKQLCompileServiceEvents::TEvCompileStatus, HandleCompile); hFunc(NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletionResult, HandleResult); - hFunc(TEvTabletPipe::TEvClientDestroyed, HandlePipeClientDisconnected); - hFunc(TEvTabletPipe::TEvClientConnected, HandlePipeClientConnected); - hFunc(TEvWakeup, HandleWakeup); + hFunc(TEvTabletPipe::TEvClientDestroyed, HandlePipeClientDisconnected); + hFunc(TEvTabletPipe::TEvClientConnected, HandlePipeClientConnected); + hFunc(TEvWakeup, HandleWakeup); } } - void PassAway(); - - void HandleCompile(TMiniKQLCompileServiceEvents::TEvCompileStatus::TPtr& ev); + void PassAway(); + + void HandleCompile(TMiniKQLCompileServiceEvents::TEvCompileStatus::TPtr& ev); template<typename TKikimrResultRecord> - bool ShouldRetryOnFail(const TKikimrResultRecord& record) const; + bool ShouldRetryOnFail(const TKikimrResultRecord& record) const; - void HandleResponse(TResponse::TPtr& ev); - void HandleWakeup(TEvWakeup::TPtr& ev); + void HandleResponse(TResponse::TPtr& ev); + void HandleWakeup(TEvWakeup::TPtr& ev); void HandleResult(NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletionResult::TPtr& ev); - void HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev); - void HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev); - - TDuration NextAttemptWaitDuration() const; - void ScheduleRetry(bool compilation); - bool RetryTimeoutExpired(); - - void WaitForCompletion(bool retry = false); - -private: - enum class EMode { - Compile, - Exec, - CompileAndExec, - }; - + void HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev); + void HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev); + + TDuration NextAttemptWaitDuration() const; + void ScheduleRetry(bool compilation); + bool RetryTimeoutExpired(); + + void WaitForCompletion(bool retry = false); + private: + enum class EMode { + Compile, + Exec, + CompileAndExec, + }; + +private: const TActorId Sender_; - const TString RequestId_; + const TString RequestId_; const TSqsEvents::TExecutedCallback Cb_; THolder<TRequest> Request_; TString MkqlProgramText_; THashMap<TString, ui64> CompileResolveCookies_; bool CompilationPending_ = false; - size_t CompilationRetries_ = 3; - TMaybe<EQueryId> QueryId_; // information for logging - TInstant StartTs_; - EMode Mode_ = EMode::CompileAndExec; - TInstant StartExecutionTs_ = TInstant::Zero(); - size_t AttemptNumber_ = 0; - TDuration PrevAttemptWaitTime_ = TDuration::Zero(); - TIntrusivePtr<TTransactionCounters> Counters_; - TQueuePath QueuePath_; - bool RetryOnTimeout_; - NKikimrMiniKQL::TParams ProtoParamsForDebug; - - // Waiting for transaction to complete - TResponse::TPtr ResponseEvent_; + size_t CompilationRetries_ = 3; + TMaybe<EQueryId> QueryId_; // information for logging + TInstant StartTs_; + EMode Mode_ = EMode::CompileAndExec; + TInstant StartExecutionTs_ = TInstant::Zero(); + size_t AttemptNumber_ = 0; + TDuration PrevAttemptWaitTime_ = TDuration::Zero(); + TIntrusivePtr<TTransactionCounters> Counters_; + TQueuePath QueuePath_; + bool RetryOnTimeout_; + NKikimrMiniKQL::TParams ProtoParamsForDebug; + + // Waiting for transaction to complete + TResponse::TPtr ResponseEvent_; TActorId TabletPipeClient_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/fifo_cleanup.cpp b/ydb/core/ymq/actor/fifo_cleanup.cpp index 218deb0fcc0..ef74d279caa 100644 --- a/ydb/core/ymq/actor/fifo_cleanup.cpp +++ b/ydb/core/ymq/actor/fifo_cleanup.cpp @@ -1,117 +1,117 @@ -#include "fifo_cleanup.h" -#include "cfg.h" -#include "log.h" -#include "executor.h" - +#include "fifo_cleanup.h" +#include "cfg.h" +#include "log.h" +#include "executor.h" + #include <ydb/public/lib/value/value.h> #include <ydb/core/base/appdata.h> #include <ydb/core/ymq/base/debug_info.h> - + #include <library/cpp/actors/core/hfunc.h> - -#include <util/random/random.h> - -namespace NKikimr::NSQS { - + +#include <util/random/random.h> + +namespace NKikimr::NSQS { + TCleanupActor::TCleanupActor(const TQueuePath& queuePath, const TActorId& queueLeader, ECleanupType cleanupType) - : QueuePath_(queuePath) - , RequestId_(CreateGuidAsString()) + : QueuePath_(queuePath) + , RequestId_(CreateGuidAsString()) , QueueLeader_(queueLeader) - , CleanupType(cleanupType) -{ - DebugInfo->QueueCleanupActors.emplace(TStringBuilder() << TLogQueueName(QueuePath_), this); -} - -TCleanupActor::~TCleanupActor() { - DebugInfo->QueueCleanupActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueuePath_), this); -} - -void TCleanupActor::Bootstrap() { - RLOG_SQS_INFO("Bootstrap cleanup actor for queue " << TLogQueueName(QueuePath_)); - Become(&TThis::StateFunc); - Schedule(RandomCleanupPeriod(), new TEvWakeup()); -} - -TDuration TCleanupActor::RandomCleanupPeriod() { - const ui64 cleanupPeriodMs = Cfg().GetCleanupPeriodMs(); - Y_VERIFY(cleanupPeriodMs > 0); - return TDuration::MilliSeconds(cleanupPeriodMs) + - TDuration::MilliSeconds(RandomNumber<ui64>(cleanupPeriodMs / 4)); -} - -void TCleanupActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - const auto& record = ev->Get()->Record; - const ui32 status = record.GetStatus(); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - const bool shouldContinue = val["moreData"]; - const TValue lastProcessedKey = val["lastProcessedKey"]; - if (lastProcessedKey.HaveValue()) { - KeyRangeStart = lastProcessedKey; - } - - if (shouldContinue) { - RunCleanupQuery(); - } else { - Schedule(RandomCleanupPeriod(), new TEvWakeup()); - } - } else { - RLOG_SQS_ERROR("Cleanup query failed. Queue: " << TLogQueueName(QueuePath_)); - Schedule(RandomCleanupPeriod(), new TEvWakeup()); - } -} - -void TCleanupActor::HandlePoisonPill(TEvPoisonPill::TPtr&) { - PassAway(); -} - -void TCleanupActor::HandleWakeup() { - KeyRangeStart = ""; - RunCleanupQuery(); -} - -void TCleanupActor::RunCleanupQuery() { - TExecutorBuilder builder(SelfId(), RequestId_); - builder - .User(QueuePath_.UserName) - .Queue(QueuePath_.QueueName) + , CleanupType(cleanupType) +{ + DebugInfo->QueueCleanupActors.emplace(TStringBuilder() << TLogQueueName(QueuePath_), this); +} + +TCleanupActor::~TCleanupActor() { + DebugInfo->QueueCleanupActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueuePath_), this); +} + +void TCleanupActor::Bootstrap() { + RLOG_SQS_INFO("Bootstrap cleanup actor for queue " << TLogQueueName(QueuePath_)); + Become(&TThis::StateFunc); + Schedule(RandomCleanupPeriod(), new TEvWakeup()); +} + +TDuration TCleanupActor::RandomCleanupPeriod() { + const ui64 cleanupPeriodMs = Cfg().GetCleanupPeriodMs(); + Y_VERIFY(cleanupPeriodMs > 0); + return TDuration::MilliSeconds(cleanupPeriodMs) + + TDuration::MilliSeconds(RandomNumber<ui64>(cleanupPeriodMs / 4)); +} + +void TCleanupActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + const auto& record = ev->Get()->Record; + const ui32 status = record.GetStatus(); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); + const bool shouldContinue = val["moreData"]; + const TValue lastProcessedKey = val["lastProcessedKey"]; + if (lastProcessedKey.HaveValue()) { + KeyRangeStart = lastProcessedKey; + } + + if (shouldContinue) { + RunCleanupQuery(); + } else { + Schedule(RandomCleanupPeriod(), new TEvWakeup()); + } + } else { + RLOG_SQS_ERROR("Cleanup query failed. Queue: " << TLogQueueName(QueuePath_)); + Schedule(RandomCleanupPeriod(), new TEvWakeup()); + } +} + +void TCleanupActor::HandlePoisonPill(TEvPoisonPill::TPtr&) { + PassAway(); +} + +void TCleanupActor::HandleWakeup() { + KeyRangeStart = ""; + RunCleanupQuery(); +} + +void TCleanupActor::RunCleanupQuery() { + TExecutorBuilder builder(SelfId(), RequestId_); + builder + .User(QueuePath_.UserName) + .Queue(QueuePath_.QueueName) .QueueLeader(QueueLeader_) - .QueryId(GetCleanupQueryId()) - .RetryOnTimeout() - .Params() - .Uint64("NOW", Now().MilliSeconds()) - .Uint64("BATCH_SIZE", Cfg().GetCleanupBatchSize()); - - switch (CleanupType) { - case ECleanupType::Deduplication: - builder.Params().String("KEY_RANGE_START", KeyRangeStart); - break; - case ECleanupType::Reads: - builder.Params().Utf8("KEY_RANGE_START", KeyRangeStart); - break; - } - - builder.Start(); - - RLOG_SQS_DEBUG("Executing cleanup request for queue " << TLogQueueName(QueuePath_)); -} - -STATEFN(TCleanupActor::StateFunc) { - switch (ev->GetTypeRewrite()) { - cFunc(TEvWakeup::EventType, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TEvPoisonPill, HandlePoisonPill); - } -} - -EQueryId TCleanupActor::GetCleanupQueryId() const { - switch (CleanupType) { - case ECleanupType::Deduplication: - return CLEANUP_DEDUPLICATION_ID; - case ECleanupType::Reads: - return CLEANUP_READS_ID; - } -} - -} // namespace NKikimr::NSQS + .QueryId(GetCleanupQueryId()) + .RetryOnTimeout() + .Params() + .Uint64("NOW", Now().MilliSeconds()) + .Uint64("BATCH_SIZE", Cfg().GetCleanupBatchSize()); + + switch (CleanupType) { + case ECleanupType::Deduplication: + builder.Params().String("KEY_RANGE_START", KeyRangeStart); + break; + case ECleanupType::Reads: + builder.Params().Utf8("KEY_RANGE_START", KeyRangeStart); + break; + } + + builder.Start(); + + RLOG_SQS_DEBUG("Executing cleanup request for queue " << TLogQueueName(QueuePath_)); +} + +STATEFN(TCleanupActor::StateFunc) { + switch (ev->GetTypeRewrite()) { + cFunc(TEvWakeup::EventType, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvPoisonPill, HandlePoisonPill); + } +} + +EQueryId TCleanupActor::GetCleanupQueryId() const { + switch (CleanupType) { + case ECleanupType::Deduplication: + return CLEANUP_DEDUPLICATION_ID; + case ECleanupType::Reads: + return CLEANUP_READS_ID; + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/fifo_cleanup.h b/ydb/core/ymq/actor/fifo_cleanup.h index edf6040eec5..4afde0041fe 100644 --- a/ydb/core/ymq/actor/fifo_cleanup.h +++ b/ydb/core/ymq/actor/fifo_cleanup.h @@ -1,48 +1,48 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <ydb/core/ymq/actor/events.h> #include <ydb/core/protos/services.pb.h> - + #include <library/cpp/actors/core/actor.h> - -namespace NKikimr::NSQS { - -class TCleanupActor : public TActorBootstrapped<TCleanupActor> { -public: - enum class ECleanupType { - Deduplication, - Reads, - }; - + +namespace NKikimr::NSQS { + +class TCleanupActor : public TActorBootstrapped<TCleanupActor> { +public: + enum class ECleanupType { + Deduplication, + Reads, + }; + TCleanupActor(const TQueuePath& queuePath, const TActorId& queueLeader, ECleanupType cleanupType); - ~TCleanupActor(); - - void Bootstrap(); - + ~TCleanupActor(); + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_CLEANUP_BACKGROUND_ACTOR; - } - -private: - TDuration RandomCleanupPeriod(); - - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandlePoisonPill(TEvPoisonPill::TPtr&); - void HandleWakeup(); - - void RunCleanupQuery(); - - EQueryId GetCleanupQueryId() const; - -private: - STATEFN(StateFunc); - -private: - const TQueuePath QueuePath_; - const TString RequestId_; + } + +private: + TDuration RandomCleanupPeriod(); + + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandlePoisonPill(TEvPoisonPill::TPtr&); + void HandleWakeup(); + + void RunCleanupQuery(); + + EQueryId GetCleanupQueryId() const; + +private: + STATEFN(StateFunc); + +private: + const TQueuePath QueuePath_; + const TString RequestId_; const TActorId QueueLeader_; - const ECleanupType CleanupType; - TString KeyRangeStart; -}; - -} // namespace NKikimr::NSQS + const ECleanupType CleanupType; + TString KeyRangeStart; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/garbage_collector.h b/ydb/core/ymq/actor/garbage_collector.h index 713b31cc1de..9a0c79b3db7 100644 --- a/ydb/core/ymq/actor/garbage_collector.h +++ b/ydb/core/ymq/actor/garbage_collector.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include "actor.h" diff --git a/ydb/core/ymq/actor/get_queue_attributes.cpp b/ydb/core/ymq/actor/get_queue_attributes.cpp index 28dc67b14f5..c081a90cea2 100644 --- a/ydb/core/ymq/actor/get_queue_attributes.cpp +++ b/ydb/core/ymq/actor/get_queue_attributes.cpp @@ -1,8 +1,8 @@ #include "action.h" -#include "common_batch_actor.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "common_batch_actor.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include "serviceid.h" @@ -15,16 +15,16 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -struct TAttributeInfo { - bool NeedRuntimeAttributes = false; - bool NeedAttributesTable = false; +struct TAttributeInfo { + bool NeedRuntimeAttributes = false; + bool NeedAttributesTable = false; bool NeedArn = false; - bool FifoOnly = false; -}; - -static const std::map<TString, TAttributeInfo> AttributesInfo = { + bool FifoOnly = false; +}; + +static const std::map<TString, TAttributeInfo> AttributesInfo = { { "ApproximateNumberOfMessages", { true, false, false, false } }, { "ApproximateNumberOfMessagesDelayed", { true, false, false, false } }, { "ApproximateNumberOfMessagesNotVisible", { true, false, false, false } }, @@ -38,68 +38,68 @@ static const std::map<TString, TAttributeInfo> AttributesInfo = { { "FifoQueue", { false, true, false, true } }, { "ContentBasedDeduplication", { false, true, false, true } }, { "QueueArn", { false, false, true, false } }, -}; - +}; + class TGetQueueAttributesActor : public TActionActor<TGetQueueAttributesActor> { public: - TGetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::GetQueueAttributes, std::move(cb)) + TGetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::GetQueueAttributes, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableGetQueueAttributes()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: - bool ExpandNames() { - if (!Request().NamesSize()) { + bool ExpandNames() { + if (!Request().NamesSize()) { return false; } - bool all = false; - for (const auto& name : Request().names()) { + bool all = false; + for (const auto& name : Request().names()) { if (name == "All") { - all = true; - } else { - const auto info = AttributesInfo.find(name); - if (info == AttributesInfo.end()) { - MakeError(MutableErrorDesc(), NErrors::INVALID_ATTRIBUTE_NAME); - return false; - } - if (info->second.NeedAttributesTable) { - NeedAttributesTable_ = true; - } - if (info->second.NeedRuntimeAttributes) { - NeedRuntimeAttributes_ = true; - } + all = true; + } else { + const auto info = AttributesInfo.find(name); + if (info == AttributesInfo.end()) { + MakeError(MutableErrorDesc(), NErrors::INVALID_ATTRIBUTE_NAME); + return false; + } + if (info->second.NeedAttributesTable) { + NeedAttributesTable_ = true; + } + if (info->second.NeedRuntimeAttributes) { + NeedRuntimeAttributes_ = true; + } if (info->second.NeedArn) { NeedArn_ = true; } - AttributesSet_.insert(name); - } - } + AttributesSet_.insert(name); + } + } - if (all) { - const bool isFifo = IsFifoQueue(); - for (const auto& [name, props] : AttributesInfo) { - if (!props.FifoOnly || isFifo) { - AttributesSet_.insert(name); - } + if (all) { + const bool isFifo = IsFifoQueue(); + for (const auto& [name, props] : AttributesInfo) { + if (!props.FifoOnly || isFifo) { + AttributesSet_.insert(name); + } } - NeedRuntimeAttributes_ = true; - NeedAttributesTable_ = true; + NeedRuntimeAttributes_ = true; + NeedAttributesTable_ = true; NeedArn_ = true; } - return true; + return true; } private: bool HasAttributeName(const TStringBuf name) const { - return IsIn(AttributesSet_, name); + return IsIn(AttributesSet_, name); } TString MakeQueueArn(const TString& prefix, const TString& region, const TString& account, const TString& queueName) const { @@ -108,105 +108,105 @@ private: bool DoValidate() override { if (!GetQueueName()) { - MakeError(Response_.MutableGetQueueAttributes(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutableGetQueueAttributes(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } return true; } - TError* MutableErrorDesc() override { - return Response_.MutableGetQueueAttributes()->MutableError(); - } - - void ReplyIfReady() { + TError* MutableErrorDesc() override { + return Response_.MutableGetQueueAttributes()->MutableError(); + } + + void ReplyIfReady() { if (WaitCount_ == 0) { - SendReplyAndDie(); + SendReplyAndDie(); } } - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); - if (!ExpandNames()) { - SendReplyAndDie(); - return; - } + if (!ExpandNames()) { + SendReplyAndDie(); + return; + } - if (NeedAttributesTable_) { - TExecutorBuilder builder(SelfId(), RequestId_); - builder - .User(UserName_) - .Queue(GetQueueName()) + if (NeedAttributesTable_) { + TExecutorBuilder builder(SelfId(), RequestId_); + builder + .User(UserName_) + .Queue(GetQueueName()) .QueueLeader(QueueLeader_) - .QueryId(INTERNAL_GET_QUEUE_ATTRIBUTES_ID) - .Counters(QueueCounters_) - .RetryOnTimeout() - .Start(); - ++WaitCount_; - } - - if (NeedRuntimeAttributes_) { + .QueryId(INTERNAL_GET_QUEUE_ATTRIBUTES_ID) + .Counters(QueueCounters_) + .RetryOnTimeout() + .Start(); + ++WaitCount_; + } + + if (NeedRuntimeAttributes_) { Send(QueueLeader_, MakeHolder<TSqsEvents::TEvGetRuntimeQueueAttributes>(RequestId_)); - ++WaitCount_; + ++WaitCount_; } if (NeedArn_) { if (IsCloud()) { - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueFolderIdAndCustomName(RequestId_, UserName_, GetQueueName())); + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueFolderIdAndCustomName(RequestId_, UserName_, GetQueueName())); ++WaitCount_; } else { auto* result = Response_.MutableGetQueueAttributes(); - result->SetQueueArn(MakeQueueArn(yaSqsArnPrefix, Cfg().GetYandexCloudServiceRegion(), UserName_, GetQueueName())); + result->SetQueueArn(MakeQueueArn(yaSqsArnPrefix, Cfg().GetYandexCloudServiceRegion(), UserName_, GetQueueName())); } } - ReplyIfReady(); + ReplyIfReady(); } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TSqsEvents::TEvGetRuntimeQueueAttributesResponse, HandleRuntimeAttributes); - hFunc(TSqsEvents::TEvQueueFolderIdAndCustomName, HandleQueueFolderIdAndCustomName); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TSqsEvents::TEvGetRuntimeQueueAttributesResponse, HandleRuntimeAttributes); + hFunc(TSqsEvents::TEvQueueFolderIdAndCustomName, HandleQueueFolderIdAndCustomName); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - const auto& record = ev->Get()->Record; - const ui32 status = record.GetStatus(); - auto* result = Response_.MutableGetQueueAttributes(); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + const auto& record = ev->Get()->Record; + const ui32 status = record.GetStatus(); + auto* result = Response_.MutableGetQueueAttributes(); if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - const TValue& attrs(val["attrs"]); - - if (HasAttributeName("ContentBasedDeduplication")) { - result->SetContentBasedDeduplication(bool(attrs["ContentBasedDeduplication"])); - } - if (HasAttributeName("DelaySeconds")) { - result->SetDelaySeconds(TDuration::MilliSeconds(ui64(attrs["DelaySeconds"])).Seconds()); - } - if (HasAttributeName("FifoQueue")) { - result->SetFifoQueue(bool(attrs["FifoQueue"])); - } - if (HasAttributeName("MaximumMessageSize")) { - result->SetMaximumMessageSize(ui64(attrs["MaximumMessageSize"])); - } - if (HasAttributeName("MessageRetentionPeriod")) { - result->SetMessageRetentionPeriod(TDuration::MilliSeconds(ui64(attrs["MessageRetentionPeriod"])).Seconds()); - } - if (HasAttributeName("ReceiveMessageWaitTimeSeconds")) { - result->SetReceiveMessageWaitTimeSeconds(TDuration::MilliSeconds(ui64(attrs["ReceiveMessageWaitTime"])).Seconds()); - } - if (HasAttributeName("VisibilityTimeout")) { - result->SetVisibilityTimeout(TDuration::MilliSeconds(ui64(attrs["VisibilityTimeout"])).Seconds()); - } + const TValue& attrs(val["attrs"]); + + if (HasAttributeName("ContentBasedDeduplication")) { + result->SetContentBasedDeduplication(bool(attrs["ContentBasedDeduplication"])); + } + if (HasAttributeName("DelaySeconds")) { + result->SetDelaySeconds(TDuration::MilliSeconds(ui64(attrs["DelaySeconds"])).Seconds()); + } + if (HasAttributeName("FifoQueue")) { + result->SetFifoQueue(bool(attrs["FifoQueue"])); + } + if (HasAttributeName("MaximumMessageSize")) { + result->SetMaximumMessageSize(ui64(attrs["MaximumMessageSize"])); + } + if (HasAttributeName("MessageRetentionPeriod")) { + result->SetMessageRetentionPeriod(TDuration::MilliSeconds(ui64(attrs["MessageRetentionPeriod"])).Seconds()); + } + if (HasAttributeName("ReceiveMessageWaitTimeSeconds")) { + result->SetReceiveMessageWaitTimeSeconds(TDuration::MilliSeconds(ui64(attrs["ReceiveMessageWaitTime"])).Seconds()); + } + if (HasAttributeName("VisibilityTimeout")) { + result->SetVisibilityTimeout(TDuration::MilliSeconds(ui64(attrs["VisibilityTimeout"])).Seconds()); + } if (HasAttributeName("RedrivePolicy")) { const TValue& dlqArn(attrs["DlqArn"]); if (dlqArn.HaveValue() && !TString(dlqArn).empty()) { @@ -217,148 +217,148 @@ private: result->SetRedrivePolicy(redrivePolicy.ToJson()); } } - } else { - RLOG_SQS_ERROR("Get queue attributes query failed"); - MakeError(result, NErrors::INTERNAL_FAILURE); - SendReplyAndDie(); - return; + } else { + RLOG_SQS_ERROR("Get queue attributes query failed"); + MakeError(result, NErrors::INTERNAL_FAILURE); + SendReplyAndDie(); + return; + } + + --WaitCount_; + ReplyIfReady(); + } + + void HandleRuntimeAttributes(TSqsEvents::TEvGetRuntimeQueueAttributesResponse::TPtr& ev) { + auto* result = Response_.MutableGetQueueAttributes(); + + if (ev->Get()->Failed) { + RLOG_SQS_ERROR("Get runtime queue attributes failed"); + MakeError(result, NErrors::INTERNAL_FAILURE); + SendReplyAndDie(); + return; } - --WaitCount_; - ReplyIfReady(); + if (HasAttributeName("CreatedTimestamp")) { + result->SetCreatedTimestamp(ev->Get()->CreatedTimestamp.Seconds()); + } + if (HasAttributeName("ApproximateNumberOfMessages")) { + result->SetApproximateNumberOfMessages(ev->Get()->MessagesCount); + } + if (HasAttributeName("ApproximateNumberOfMessagesNotVisible")) { + result->SetApproximateNumberOfMessagesNotVisible(ev->Get()->InflyMessagesCount); + } + if (HasAttributeName("ApproximateNumberOfMessagesDelayed")) { + result->SetApproximateNumberOfMessagesDelayed(ev->Get()->MessagesDelayed); + } + + --WaitCount_; + ReplyIfReady(); } - void HandleRuntimeAttributes(TSqsEvents::TEvGetRuntimeQueueAttributesResponse::TPtr& ev) { - auto* result = Response_.MutableGetQueueAttributes(); - - if (ev->Get()->Failed) { - RLOG_SQS_ERROR("Get runtime queue attributes failed"); - MakeError(result, NErrors::INTERNAL_FAILURE); - SendReplyAndDie(); - return; - } - - if (HasAttributeName("CreatedTimestamp")) { - result->SetCreatedTimestamp(ev->Get()->CreatedTimestamp.Seconds()); - } - if (HasAttributeName("ApproximateNumberOfMessages")) { - result->SetApproximateNumberOfMessages(ev->Get()->MessagesCount); - } - if (HasAttributeName("ApproximateNumberOfMessagesNotVisible")) { - result->SetApproximateNumberOfMessagesNotVisible(ev->Get()->InflyMessagesCount); - } - if (HasAttributeName("ApproximateNumberOfMessagesDelayed")) { - result->SetApproximateNumberOfMessagesDelayed(ev->Get()->MessagesDelayed); - } - - --WaitCount_; - ReplyIfReady(); - } - - void HandleQueueFolderIdAndCustomName(TSqsEvents::TEvQueueFolderIdAndCustomName::TPtr& ev) { + void HandleQueueFolderIdAndCustomName(TSqsEvents::TEvQueueFolderIdAndCustomName::TPtr& ev) { auto* result = Response_.MutableGetQueueAttributes(); if (ev->Get()->Failed || !ev->Get()->Exists) { - RLOG_SQS_DEBUG("Get queue folder id and custom name failed. Failed: " << ev->Get()->Failed << ". Exists: " << ev->Get()->Exists); + RLOG_SQS_DEBUG("Get queue folder id and custom name failed. Failed: " << ev->Get()->Failed << ". Exists: " << ev->Get()->Exists); MakeError(result, NErrors::INTERNAL_FAILURE); - SendReplyAndDie(); + SendReplyAndDie(); return; } if (NeedArn_) { - result->SetQueueArn(MakeQueueArn(cloudArnPrefix, Cfg().GetYandexCloudServiceRegion(), ev->Get()->QueueFolderId, ev->Get()->QueueCustomName)); + result->SetQueueArn(MakeQueueArn(cloudArnPrefix, Cfg().GetYandexCloudServiceRegion(), ev->Get()->QueueFolderId, ev->Get()->QueueCustomName)); } --WaitCount_; - ReplyIfReady(); - } - - const TGetQueueAttributesRequest& Request() const { - return SourceSqsRequest_.GetGetQueueAttributes(); + ReplyIfReady(); } + const TGetQueueAttributesRequest& Request() const { + return SourceSqsRequest_.GetGetQueueAttributes(); + } + private: - THashSet<TString> AttributesSet_; - bool NeedRuntimeAttributes_ = false; - bool NeedAttributesTable_ = false; + THashSet<TString> AttributesSet_; + bool NeedRuntimeAttributes_ = false; + bool NeedAttributesTable_ = false; bool NeedArn_ = false; - size_t WaitCount_ = 0; + size_t WaitCount_ = 0; }; -class TGetQueueAttributesBatchActor - : public TCommonBatchActor<TGetQueueAttributesBatchActor> -{ -public: - TGetQueueAttributesBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TCommonBatchActor(sourceSqsRequest, EAction::GetQueueAttributesBatch, std::move(cb)) - { - CopyAccountName(Request()); - Response_.MutableGetQueueAttributesBatch()->SetRequestId(RequestId_); - - CopySecurityToken(Request()); - } - -private: - std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const override { - std::vector<NKikimrClient::TSqsRequest> ret; - ret.resize(Request().EntriesSize()); - for (size_t i = 0; i < Request().EntriesSize(); ++i) { - const auto& entry = Request().GetEntries(i); - auto& req = *ret[i].MutableGetQueueAttributes(); - req.MutableAuth()->SetUserName(UserName_); - - if (Request().HasCredentials()) { - *req.MutableCredentials() = Request().GetCredentials(); +class TGetQueueAttributesBatchActor + : public TCommonBatchActor<TGetQueueAttributesBatchActor> +{ +public: + TGetQueueAttributesBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TCommonBatchActor(sourceSqsRequest, EAction::GetQueueAttributesBatch, std::move(cb)) + { + CopyAccountName(Request()); + Response_.MutableGetQueueAttributesBatch()->SetRequestId(RequestId_); + + CopySecurityToken(Request()); + } + +private: + std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const override { + std::vector<NKikimrClient::TSqsRequest> ret; + ret.resize(Request().EntriesSize()); + for (size_t i = 0; i < Request().EntriesSize(); ++i) { + const auto& entry = Request().GetEntries(i); + auto& req = *ret[i].MutableGetQueueAttributes(); + req.MutableAuth()->SetUserName(UserName_); + + if (Request().HasCredentials()) { + *req.MutableCredentials() = Request().GetCredentials(); } - req.SetQueueName(entry.GetQueueName()); - req.SetId(entry.GetId()); - *req.MutableNames() = Request().GetNames(); - } - return ret; - } - - void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) override { - Y_VERIFY(Request().EntriesSize() == responses.size()); - auto& resp = *Response_.MutableGetQueueAttributesBatch(); - for (size_t i = 0; i < Request().EntriesSize(); ++i) { - const auto& reqEntry = Request().GetEntries(i); - auto& respEntry = *resp.AddEntries(); - Y_VERIFY(responses[i].HasGetQueueAttributes()); - respEntry = std::move(*responses[i].MutableGetQueueAttributes()); - respEntry.SetId(reqEntry.GetId()); - } - } - - bool DoValidate() override { - for (const auto& entry : Request().GetEntries()) { - if (entry.GetQueueName().empty()) { - MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, TStringBuilder() << "No QueueName parameter in entry " << entry.GetId() << "."); - return false; - } - } - return true; - } - - TError* MutableErrorDesc() override { - return Response_.MutableGetQueueAttributesBatch()->MutableError(); - } - - TString DoGetQueueName() const override { - return {}; - } - - const TGetQueueAttributesBatchRequest& Request() const { - return SourceSqsRequest_.GetGetQueueAttributesBatch(); - } -}; - -IActor* CreateGetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TGetQueueAttributesActor(sourceSqsRequest, std::move(cb)); -} - -IActor* CreateGetQueueAttributesBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TGetQueueAttributesBatchActor(sourceSqsRequest, std::move(cb)); + req.SetQueueName(entry.GetQueueName()); + req.SetId(entry.GetId()); + *req.MutableNames() = Request().GetNames(); + } + return ret; + } + + void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) override { + Y_VERIFY(Request().EntriesSize() == responses.size()); + auto& resp = *Response_.MutableGetQueueAttributesBatch(); + for (size_t i = 0; i < Request().EntriesSize(); ++i) { + const auto& reqEntry = Request().GetEntries(i); + auto& respEntry = *resp.AddEntries(); + Y_VERIFY(responses[i].HasGetQueueAttributes()); + respEntry = std::move(*responses[i].MutableGetQueueAttributes()); + respEntry.SetId(reqEntry.GetId()); + } + } + + bool DoValidate() override { + for (const auto& entry : Request().GetEntries()) { + if (entry.GetQueueName().empty()) { + MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, TStringBuilder() << "No QueueName parameter in entry " << entry.GetId() << "."); + return false; + } + } + return true; + } + + TError* MutableErrorDesc() override { + return Response_.MutableGetQueueAttributesBatch()->MutableError(); + } + + TString DoGetQueueName() const override { + return {}; + } + + const TGetQueueAttributesBatchRequest& Request() const { + return SourceSqsRequest_.GetGetQueueAttributesBatch(); + } +}; + +IActor* CreateGetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TGetQueueAttributesActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +IActor* CreateGetQueueAttributesBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TGetQueueAttributesBatchActor(sourceSqsRequest, std::move(cb)); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/get_queue_url.cpp b/ydb/core/ymq/actor/get_queue_url.cpp index 95554d7bb0f..96d8b52b31a 100644 --- a/ydb/core/ymq/actor/get_queue_url.cpp +++ b/ydb/core/ymq/actor/get_queue_url.cpp @@ -1,6 +1,6 @@ #include "action.h" -#include "error.h" -#include "log.h" +#include "error.h" +#include "log.h" #include "params.h" #include "serviceid.h" @@ -12,19 +12,19 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TGetQueueUrlActor : public TActionActor<TGetQueueUrlActor> { public: - TGetQueueUrlActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::GetQueueUrl, std::move(cb)) + TGetQueueUrlActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::GetQueueUrl, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableGetQueueUrl()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } static constexpr bool NeedExistingQueue() { @@ -34,60 +34,60 @@ public: private: bool DoValidate() override { if (!GetQueueName()) { - MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } return true; } - TError* MutableErrorDesc() override { - return Response_.MutableGetQueueUrl()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return Response_.MutableGetQueueUrl()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId(RequestId_, UserName_, Request().GetQueueName(), FolderId_)); + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId(RequestId_, UserName_, Request().GetQueueName(), FolderId_)); } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvQueueId, HandleQueueId); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvQueueId, HandleQueueId); } } - void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { - if (ev->Get()->Failed) { - RLOG_SQS_WARN("Get queue id failed"); - MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); - } else { - if (ev->Get()->Exists) { - auto* result = Response_.MutableGetQueueUrl(); + void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { + if (ev->Get()->Failed) { + RLOG_SQS_WARN("Get queue id failed"); + MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); + } else { + if (ev->Get()->Exists) { + auto* result = Response_.MutableGetQueueUrl(); if (IsCloud()) { - result->SetQueueUrl(MakeQueueUrl(TString::Join(ev->Get()->QueueId, "/", GetQueueName()))); + result->SetQueueUrl(MakeQueueUrl(TString::Join(ev->Get()->QueueId, "/", GetQueueName()))); } else { - result->SetQueueUrl(MakeQueueUrl(ev->Get()->QueueId)); + result->SetQueueUrl(MakeQueueUrl(ev->Get()->QueueId)); } } else { - MakeError(MutableErrorDesc(), NErrors::NON_EXISTENT_QUEUE); + MakeError(MutableErrorDesc(), NErrors::NON_EXISTENT_QUEUE); } } - SendReplyAndDie(); + SendReplyAndDie(); } - const TGetQueueUrlRequest& Request() const { - return SourceSqsRequest_.GetGetQueueUrl(); - } + const TGetQueueUrlRequest& Request() const { + return SourceSqsRequest_.GetGetQueueUrl(); + } }; -IActor* CreateGetQueueUrlActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TGetQueueUrlActor(sourceSqsRequest, std::move(cb)); +IActor* CreateGetQueueUrlActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TGetQueueUrlActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/infly.cpp b/ydb/core/ymq/actor/infly.cpp index a08cb653872..94b96fb9ff5 100644 --- a/ydb/core/ymq/actor/infly.cpp +++ b/ydb/core/ymq/actor/infly.cpp @@ -1,231 +1,231 @@ -#include "infly.h" - -#include <vector> - -namespace NKikimr::NSQS { - -bool TCmpByVisibilityDeadline::Compare(const TInflyMessageWithVisibilityDeadlineKey& l, const TInflyMessageWithVisibilityDeadlineKey& r) { - return l.Message().GetVisibilityDeadline() < r.Message().GetVisibilityDeadline(); -} - -bool TCmpByVisibilityDeadline::Compare(const TInflyMessageWithVisibilityDeadlineKey& l, TInstant r) { - return l.Message().GetVisibilityDeadline() < r; -} - -bool TCmpByVisibilityDeadline::Compare(TInstant l, const TInflyMessageWithVisibilityDeadlineKey& r) { - return l < r.Message().GetVisibilityDeadline(); -} - -bool TCmpByOffset::Compare(const TInflyMessageWithOffsetKey& l, const TInflyMessageWithOffsetKey& r) { - return l.Message().GetOffset() < r.Message().GetOffset(); -} - -bool TCmpByOffset::Compare(const TInflyMessageWithOffsetKey& l, ui64 r) { - return l.Message().GetOffset() < r; -} - -bool TCmpByOffset::Compare(ui64 l, const TInflyMessageWithOffsetKey& r) { - return l < r.Message().GetOffset(); -} - -TInflyMessage& TInflyMessageWithVisibilityDeadlineKey::Message() { - return *static_cast<TInflyMessage*>(this); -} - -const TInflyMessage& TInflyMessageWithVisibilityDeadlineKey::Message() const { - return *static_cast<const TInflyMessage*>(this); -} - -TInflyMessage& TInflyMessageWithOffsetKey::Message() { - return *static_cast<TInflyMessage*>(this); -} - -const TInflyMessage& TInflyMessageWithOffsetKey::Message() const { - return *static_cast<const TInflyMessage*>(this); -} - -struct TInflyMessages::TDestroyInflyMessages : public TInflyMessages::TOffsetTree::TDestroy { - void operator()(TInflyMessageWithOffsetKey& v) const noexcept { - TDestroy::operator()(v); // remove from tree - delete static_cast<TInflyMessage*>(&v); - } -}; - -TInflyMessages::~TInflyMessages() { - Y_ASSERT(SetVisibilityDeadlineCandidates.Empty()); - MessagesByVisibilityDeadline.Clear(); - - // MessagesByOffset clear - MessagesByOffset.ForEachNoOrder(TDestroyInflyMessages()); - MessagesByOffset.Init(); -} - -void TInflyMessages::Add(THolder<TInflyMessage> msg) { - ++Size; - MessagesByVisibilityDeadline.Insert(msg.Get()); - MessagesByOffset.Insert(msg.Release()); -} - -THolder<TInflyMessage> TInflyMessages::Delete(ui64 offset) { - auto* msg = MessagesByOffset.Find(offset); - if (msg) { - Y_ASSERT(Size > 0); - --Size; - TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &msg->Message(); - byVisibilityDeadline->UnLink(); - msg->UnLink(); +#include "infly.h" + +#include <vector> + +namespace NKikimr::NSQS { + +bool TCmpByVisibilityDeadline::Compare(const TInflyMessageWithVisibilityDeadlineKey& l, const TInflyMessageWithVisibilityDeadlineKey& r) { + return l.Message().GetVisibilityDeadline() < r.Message().GetVisibilityDeadline(); +} + +bool TCmpByVisibilityDeadline::Compare(const TInflyMessageWithVisibilityDeadlineKey& l, TInstant r) { + return l.Message().GetVisibilityDeadline() < r; +} + +bool TCmpByVisibilityDeadline::Compare(TInstant l, const TInflyMessageWithVisibilityDeadlineKey& r) { + return l < r.Message().GetVisibilityDeadline(); +} + +bool TCmpByOffset::Compare(const TInflyMessageWithOffsetKey& l, const TInflyMessageWithOffsetKey& r) { + return l.Message().GetOffset() < r.Message().GetOffset(); +} + +bool TCmpByOffset::Compare(const TInflyMessageWithOffsetKey& l, ui64 r) { + return l.Message().GetOffset() < r; +} + +bool TCmpByOffset::Compare(ui64 l, const TInflyMessageWithOffsetKey& r) { + return l < r.Message().GetOffset(); +} + +TInflyMessage& TInflyMessageWithVisibilityDeadlineKey::Message() { + return *static_cast<TInflyMessage*>(this); +} + +const TInflyMessage& TInflyMessageWithVisibilityDeadlineKey::Message() const { + return *static_cast<const TInflyMessage*>(this); +} + +TInflyMessage& TInflyMessageWithOffsetKey::Message() { + return *static_cast<TInflyMessage*>(this); +} + +const TInflyMessage& TInflyMessageWithOffsetKey::Message() const { + return *static_cast<const TInflyMessage*>(this); +} + +struct TInflyMessages::TDestroyInflyMessages : public TInflyMessages::TOffsetTree::TDestroy { + void operator()(TInflyMessageWithOffsetKey& v) const noexcept { + TDestroy::operator()(v); // remove from tree + delete static_cast<TInflyMessage*>(&v); + } +}; + +TInflyMessages::~TInflyMessages() { + Y_ASSERT(SetVisibilityDeadlineCandidates.Empty()); + MessagesByVisibilityDeadline.Clear(); + + // MessagesByOffset clear + MessagesByOffset.ForEachNoOrder(TDestroyInflyMessages()); + MessagesByOffset.Init(); +} + +void TInflyMessages::Add(THolder<TInflyMessage> msg) { + ++Size; + MessagesByVisibilityDeadline.Insert(msg.Get()); + MessagesByOffset.Insert(msg.Release()); +} + +THolder<TInflyMessage> TInflyMessages::Delete(ui64 offset) { + auto* msg = MessagesByOffset.Find(offset); + if (msg) { + Y_ASSERT(Size > 0); + --Size; + TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &msg->Message(); + byVisibilityDeadline->UnLink(); + msg->UnLink(); return THolder<TInflyMessage>(&msg->Message()); - } - return nullptr; -} - -TInflyMessages::TReceiveCandidates TInflyMessages::Receive(size_t maxCount, TInstant now) { - maxCount = Min(maxCount, Size); - size_t added = 0; - THolder<TOffsetTree> tree; - while (added < maxCount && MessagesByVisibilityDeadline.Begin()->Message().GetVisibilityDeadline() < now) { - if (!tree) { - tree = MakeHolder<TOffsetTree>(); - } - ++added; - TInflyMessage* msg = &MessagesByVisibilityDeadline.Begin()->Message(); - TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = msg; - byVisibilityDeadline->UnLink(); - TInflyMessageWithOffsetKey* byOffset = msg; - byOffset->UnLink(); - tree->Insert(msg); - } - if (!added) { - return {}; - } - HoldCount += added; - Size -= added; - return TReceiveCandidates(this, std::move(tree)); -} - -TInflyMessages::TReceiveCandidates::TReceiveCandidates(TIntrusivePtr<TInflyMessages> parent, THolder<TOffsetTree> messages) - : Parent(std::move(parent)) - , ReceivedMessages(std::move(messages)) -{ -} - + } + return nullptr; +} + +TInflyMessages::TReceiveCandidates TInflyMessages::Receive(size_t maxCount, TInstant now) { + maxCount = Min(maxCount, Size); + size_t added = 0; + THolder<TOffsetTree> tree; + while (added < maxCount && MessagesByVisibilityDeadline.Begin()->Message().GetVisibilityDeadline() < now) { + if (!tree) { + tree = MakeHolder<TOffsetTree>(); + } + ++added; + TInflyMessage* msg = &MessagesByVisibilityDeadline.Begin()->Message(); + TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = msg; + byVisibilityDeadline->UnLink(); + TInflyMessageWithOffsetKey* byOffset = msg; + byOffset->UnLink(); + tree->Insert(msg); + } + if (!added) { + return {}; + } + HoldCount += added; + Size -= added; + return TReceiveCandidates(this, std::move(tree)); +} + +TInflyMessages::TReceiveCandidates::TReceiveCandidates(TIntrusivePtr<TInflyMessages> parent, THolder<TOffsetTree> messages) + : Parent(std::move(parent)) + , ReceivedMessages(std::move(messages)) +{ +} + void TInflyMessages::TReceiveCandidates::SetVisibilityDeadlineAndReceiveCount(ui64 offset, TInstant visibilityDeadline, const ui32 receiveCount) { - Y_ASSERT(Parent && ReceivedMessages); - if (auto* msg = ReceivedMessages->Find(offset)) { - msg->Message().SetVisibilityDeadline(visibilityDeadline); + Y_ASSERT(Parent && ReceivedMessages); + if (auto* msg = ReceivedMessages->Find(offset)) { + msg->Message().SetVisibilityDeadline(visibilityDeadline); msg->Message().SetReceiveCount(receiveCount); - } -} - -THolder<TInflyMessage> TInflyMessages::TReceiveCandidates::Delete(ui64 offset) { - Y_ASSERT(Parent && ReceivedMessages); - if (auto* msg = ReceivedMessages->Find(offset)) { - Y_ASSERT(Parent->HoldCount > 0); - --Parent->HoldCount; - msg->UnLink(); + } +} + +THolder<TInflyMessage> TInflyMessages::TReceiveCandidates::Delete(ui64 offset) { + Y_ASSERT(Parent && ReceivedMessages); + if (auto* msg = ReceivedMessages->Find(offset)) { + Y_ASSERT(Parent->HoldCount > 0); + --Parent->HoldCount; + msg->UnLink(); return THolder<TInflyMessage>(&msg->Message()); - } - return nullptr; -} - -bool TInflyMessages::TReceiveCandidates::Has(ui64 offset) const { - Y_ASSERT(Parent && ReceivedMessages); - return ReceivedMessages->Find(offset) != nullptr; -} - -struct TInflyMessages::TReturnToParent : public TInflyMessages::TOffsetTree::TDestroy { - explicit TReturnToParent(TIntrusivePtr<TInflyMessages> parent) - : Parent(std::move(parent)) - { - } - - void operator()(TInflyMessageWithOffsetKey& v) const noexcept { - TDestroy::operator()(v); // remove from tree - TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &v.Message(); - byVisibilityDeadline->UnLink(); - Parent->Add(THolder<TInflyMessage>(static_cast<TInflyMessage*>(&v))); - --Parent->HoldCount; - } - - TIntrusivePtr<TInflyMessages> Parent; -}; - -TInflyMessages::TReceiveCandidates::~TReceiveCandidates() { - if (ReceivedMessages) { - Y_ASSERT(Parent); - ReceivedMessages->ForEachNoOrder(TReturnToParent(std::move(Parent))); - ReceivedMessages->Init(); - } -} - -TInflyMessages::TChangeVisibilityCandidates::TChangeVisibilityCandidates(TIntrusivePtr<TInflyMessages> parent) - : Parent(std::move(parent)) -{ -} - -TInflyMessages::TChangeVisibilityCandidates::~TChangeVisibilityCandidates() { - if (Messages) { - Y_ASSERT(Parent); - Messages->ForEachNoOrder(TReturnToParent(std::move(Parent))); - Messages->Init(); - } -} - -bool TInflyMessages::TChangeVisibilityCandidates::Add(ui64 offset) { - Y_ASSERT(Parent); - auto* byOffset = Parent->MessagesByOffset.Find(offset); - if (byOffset) { - if (!Messages) { - Messages = MakeHolder<TOffsetTree>(); - } - - TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &byOffset->Message(); - byOffset->UnLink(); - byVisibilityDeadline->UnLink(); - Parent->SetVisibilityDeadlineCandidates.Insert(byVisibilityDeadline); - - ++Parent->HoldCount; - --Parent->Size; - - Messages->Insert(byOffset); - return true; - } - return false; -} - -void TInflyMessages::TChangeVisibilityCandidates::SetVisibilityDeadline(ui64 offset, TInstant visibilityDeadline) { - Y_ASSERT(Parent); - if (!Messages) { - return; - } - if (auto* msg = Messages->Find(offset)) { - TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &msg->Message(); - // reinsert in SetVisibilityDeadlineCandidates tree by different key - byVisibilityDeadline->UnLink(); - msg->Message().SetVisibilityDeadline(visibilityDeadline); - Parent->SetVisibilityDeadlineCandidates.Insert(byVisibilityDeadline); - } -} - -THolder<TInflyMessage> TInflyMessages::TChangeVisibilityCandidates::Delete(ui64 offset) { - Y_ASSERT(Parent); - if (!Messages) { - return nullptr; - } - if (auto* msg = Messages->Find(offset)) { - Y_ASSERT(Parent->HoldCount > 0); - --Parent->HoldCount; - msg->UnLink(); - TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &msg->Message(); - byVisibilityDeadline->UnLink(); + } + return nullptr; +} + +bool TInflyMessages::TReceiveCandidates::Has(ui64 offset) const { + Y_ASSERT(Parent && ReceivedMessages); + return ReceivedMessages->Find(offset) != nullptr; +} + +struct TInflyMessages::TReturnToParent : public TInflyMessages::TOffsetTree::TDestroy { + explicit TReturnToParent(TIntrusivePtr<TInflyMessages> parent) + : Parent(std::move(parent)) + { + } + + void operator()(TInflyMessageWithOffsetKey& v) const noexcept { + TDestroy::operator()(v); // remove from tree + TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &v.Message(); + byVisibilityDeadline->UnLink(); + Parent->Add(THolder<TInflyMessage>(static_cast<TInflyMessage*>(&v))); + --Parent->HoldCount; + } + + TIntrusivePtr<TInflyMessages> Parent; +}; + +TInflyMessages::TReceiveCandidates::~TReceiveCandidates() { + if (ReceivedMessages) { + Y_ASSERT(Parent); + ReceivedMessages->ForEachNoOrder(TReturnToParent(std::move(Parent))); + ReceivedMessages->Init(); + } +} + +TInflyMessages::TChangeVisibilityCandidates::TChangeVisibilityCandidates(TIntrusivePtr<TInflyMessages> parent) + : Parent(std::move(parent)) +{ +} + +TInflyMessages::TChangeVisibilityCandidates::~TChangeVisibilityCandidates() { + if (Messages) { + Y_ASSERT(Parent); + Messages->ForEachNoOrder(TReturnToParent(std::move(Parent))); + Messages->Init(); + } +} + +bool TInflyMessages::TChangeVisibilityCandidates::Add(ui64 offset) { + Y_ASSERT(Parent); + auto* byOffset = Parent->MessagesByOffset.Find(offset); + if (byOffset) { + if (!Messages) { + Messages = MakeHolder<TOffsetTree>(); + } + + TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &byOffset->Message(); + byOffset->UnLink(); + byVisibilityDeadline->UnLink(); + Parent->SetVisibilityDeadlineCandidates.Insert(byVisibilityDeadline); + + ++Parent->HoldCount; + --Parent->Size; + + Messages->Insert(byOffset); + return true; + } + return false; +} + +void TInflyMessages::TChangeVisibilityCandidates::SetVisibilityDeadline(ui64 offset, TInstant visibilityDeadline) { + Y_ASSERT(Parent); + if (!Messages) { + return; + } + if (auto* msg = Messages->Find(offset)) { + TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &msg->Message(); + // reinsert in SetVisibilityDeadlineCandidates tree by different key + byVisibilityDeadline->UnLink(); + msg->Message().SetVisibilityDeadline(visibilityDeadline); + Parent->SetVisibilityDeadlineCandidates.Insert(byVisibilityDeadline); + } +} + +THolder<TInflyMessage> TInflyMessages::TChangeVisibilityCandidates::Delete(ui64 offset) { + Y_ASSERT(Parent); + if (!Messages) { + return nullptr; + } + if (auto* msg = Messages->Find(offset)) { + Y_ASSERT(Parent->HoldCount > 0); + --Parent->HoldCount; + msg->UnLink(); + TInflyMessageWithVisibilityDeadlineKey* byVisibilityDeadline = &msg->Message(); + byVisibilityDeadline->UnLink(); return THolder<TInflyMessage>(&msg->Message()); - } - return nullptr; -} - -bool TInflyMessages::TChangeVisibilityCandidates::Has(ui64 offset) const { - Y_ASSERT(Parent); - return Messages && Messages->Find(offset) != nullptr; -} - -} // namespace NKikimr::NSQS + } + return nullptr; +} + +bool TInflyMessages::TChangeVisibilityCandidates::Has(ui64 offset) const { + Y_ASSERT(Parent); + return Messages && Messages->Find(offset) != nullptr; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/infly.h b/ydb/core/ymq/actor/infly.h index 1dea6baa6cb..7c6ae2c9fe8 100644 --- a/ydb/core/ymq/actor/infly.h +++ b/ydb/core/ymq/actor/infly.h @@ -1,185 +1,185 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <library/cpp/containers/intrusive_rb_tree/rb_tree.h> - -#include <util/datetime/base.h> -#include <util/generic/ptr.h> -#include <util/system/types.h> - -#include <functional> -#include <tuple> - -namespace NKikimr::NSQS { - -class TInflyMessage; -struct TInflyMessageWithVisibilityDeadlineKey; -struct TInflyMessageWithOffsetKey; - -struct TCmpByVisibilityDeadline { - static bool Compare(const TInflyMessageWithVisibilityDeadlineKey& l, const TInflyMessageWithVisibilityDeadlineKey& r); - static bool Compare(const TInflyMessageWithVisibilityDeadlineKey& l, TInstant r); - static bool Compare(TInstant l, const TInflyMessageWithVisibilityDeadlineKey& r); -}; - -struct TCmpByOffset { - static bool Compare(const TInflyMessageWithOffsetKey& l, const TInflyMessageWithOffsetKey& r); - static bool Compare(const TInflyMessageWithOffsetKey& l, ui64 r); - static bool Compare(ui64 l, const TInflyMessageWithOffsetKey& r); -}; - -struct TInflyMessageWithVisibilityDeadlineKey : public TRbTreeItem<TInflyMessageWithVisibilityDeadlineKey, TCmpByVisibilityDeadline> { - TInflyMessage& Message(); - const TInflyMessage& Message() const; -}; - -struct TInflyMessageWithOffsetKey : public TRbTreeItem<TInflyMessageWithOffsetKey, TCmpByOffset> { - TInflyMessage& Message(); - const TInflyMessage& Message() const; -}; - -class TInflyMessage : public TInflyMessageWithVisibilityDeadlineKey, public TInflyMessageWithOffsetKey { -public: - TInflyMessage() = default; - + +#include <util/datetime/base.h> +#include <util/generic/ptr.h> +#include <util/system/types.h> + +#include <functional> +#include <tuple> + +namespace NKikimr::NSQS { + +class TInflyMessage; +struct TInflyMessageWithVisibilityDeadlineKey; +struct TInflyMessageWithOffsetKey; + +struct TCmpByVisibilityDeadline { + static bool Compare(const TInflyMessageWithVisibilityDeadlineKey& l, const TInflyMessageWithVisibilityDeadlineKey& r); + static bool Compare(const TInflyMessageWithVisibilityDeadlineKey& l, TInstant r); + static bool Compare(TInstant l, const TInflyMessageWithVisibilityDeadlineKey& r); +}; + +struct TCmpByOffset { + static bool Compare(const TInflyMessageWithOffsetKey& l, const TInflyMessageWithOffsetKey& r); + static bool Compare(const TInflyMessageWithOffsetKey& l, ui64 r); + static bool Compare(ui64 l, const TInflyMessageWithOffsetKey& r); +}; + +struct TInflyMessageWithVisibilityDeadlineKey : public TRbTreeItem<TInflyMessageWithVisibilityDeadlineKey, TCmpByVisibilityDeadline> { + TInflyMessage& Message(); + const TInflyMessage& Message() const; +}; + +struct TInflyMessageWithOffsetKey : public TRbTreeItem<TInflyMessageWithOffsetKey, TCmpByOffset> { + TInflyMessage& Message(); + const TInflyMessage& Message() const; +}; + +class TInflyMessage : public TInflyMessageWithVisibilityDeadlineKey, public TInflyMessageWithOffsetKey { +public: + TInflyMessage() = default; + TInflyMessage(const ui64 offset, const ui64 randomId, const TInstant visibilityDeadline, const ui32 receiveCount) - : Offset(offset) - , RandomId(randomId) - , VisibilityDeadline(visibilityDeadline) + : Offset(offset) + , RandomId(randomId) + , VisibilityDeadline(visibilityDeadline) , ReceiveCount(receiveCount) - { - } - - ui64 GetOffset() const { - return Offset; - } - - ui64 GetRandomId() const { - return RandomId; - } - - TInstant GetVisibilityDeadline() const { - return VisibilityDeadline; - } - + { + } + + ui64 GetOffset() const { + return Offset; + } + + ui64 GetRandomId() const { + return RandomId; + } + + TInstant GetVisibilityDeadline() const { + return VisibilityDeadline; + } + ui32 GetReceiveCount() const { return ReceiveCount; } void SetVisibilityDeadline(const TInstant visibilityDeadline) { - VisibilityDeadline = visibilityDeadline; - } - + VisibilityDeadline = visibilityDeadline; + } + void SetReceiveCount(const ui32 receiveCount) { ReceiveCount = receiveCount; } -private: - ui64 Offset = 0; - ui64 RandomId = 0; - TInstant VisibilityDeadline; +private: + ui64 Offset = 0; + ui64 RandomId = 0; + TInstant VisibilityDeadline; ui32 ReceiveCount = 0; -}; - -class TInflyMessages : public TAtomicRefCount<TInflyMessages> { - struct TReturnToParent; -public: - using TVisibilityDeadlineTree = TRbTree<TInflyMessageWithVisibilityDeadlineKey, TCmpByVisibilityDeadline>; - using TOffsetTree = TRbTree<TInflyMessageWithOffsetKey, TCmpByOffset>; - struct TDestroyInflyMessages; - - // Struct to temporarily hold messages that are about to receive - class TReceiveCandidates { - friend class TInflyMessages; - TReceiveCandidates(TIntrusivePtr<TInflyMessages> parent, THolder<TOffsetTree> messages); - - public: - TReceiveCandidates() = default; - TReceiveCandidates(TReceiveCandidates&& msgs) = default; - TReceiveCandidates(const TReceiveCandidates&) = delete; - ~TReceiveCandidates(); - - TReceiveCandidates& operator=(TReceiveCandidates&& msgs) = default; - TReceiveCandidates& operator=(const TReceiveCandidates&) = delete; - - bool operator!() const { - return !ReceivedMessages; - } - - operator bool() const { - return !operator!(); - } - +}; + +class TInflyMessages : public TAtomicRefCount<TInflyMessages> { + struct TReturnToParent; +public: + using TVisibilityDeadlineTree = TRbTree<TInflyMessageWithVisibilityDeadlineKey, TCmpByVisibilityDeadline>; + using TOffsetTree = TRbTree<TInflyMessageWithOffsetKey, TCmpByOffset>; + struct TDestroyInflyMessages; + + // Struct to temporarily hold messages that are about to receive + class TReceiveCandidates { + friend class TInflyMessages; + TReceiveCandidates(TIntrusivePtr<TInflyMessages> parent, THolder<TOffsetTree> messages); + + public: + TReceiveCandidates() = default; + TReceiveCandidates(TReceiveCandidates&& msgs) = default; + TReceiveCandidates(const TReceiveCandidates&) = delete; + ~TReceiveCandidates(); + + TReceiveCandidates& operator=(TReceiveCandidates&& msgs) = default; + TReceiveCandidates& operator=(const TReceiveCandidates&) = delete; + + bool operator!() const { + return !ReceivedMessages; + } + + operator bool() const { + return !operator!(); + } + void SetVisibilityDeadlineAndReceiveCount(ui64 offset, TInstant visibilityDeadline, const ui32 receiveCount); - THolder<TInflyMessage> Delete(ui64 offset); - bool Has(ui64 offset) const; - - TOffsetTree::TIterator Begin() const { - return ReceivedMessages->Begin(); - } - - TOffsetTree::TIterator End() const { - return ReceivedMessages->End(); - } - - private: - TIntrusivePtr<TInflyMessages> Parent; - THolder<TOffsetTree> ReceivedMessages; - }; - - class TChangeVisibilityCandidates { - public: - explicit TChangeVisibilityCandidates(TIntrusivePtr<TInflyMessages> parent); - TChangeVisibilityCandidates() = default; - TChangeVisibilityCandidates(TChangeVisibilityCandidates&& msgs) = default; - TChangeVisibilityCandidates(const TChangeVisibilityCandidates&) = delete; - ~TChangeVisibilityCandidates(); - - TChangeVisibilityCandidates& operator=(TChangeVisibilityCandidates&& msgs) = default; - TChangeVisibilityCandidates& operator=(const TChangeVisibilityCandidates&) = delete; - - bool operator!() const { - return !Messages; - } - - operator bool() const { - return !operator!(); - } - - bool Add(ui64 offset); - void SetVisibilityDeadline(ui64 offset, TInstant visibilityDeadline); - THolder<TInflyMessage> Delete(ui64 offset); - bool Has(ui64 offset) const; - - private: - TIntrusivePtr<TInflyMessages> Parent; - THolder<TOffsetTree> Messages; - }; - -public: - TInflyMessages() = default; - ~TInflyMessages(); - - void Add(THolder<TInflyMessage> msg); - THolder<TInflyMessage> Delete(ui64 offset); - TReceiveCandidates Receive(size_t maxCount, TInstant now); - - size_t GetInflyCount(TInstant now) const { - const size_t infly = Size ? MessagesByVisibilityDeadline.NotLessCount(now) : 0; - const size_t setVisibilityDeadlineCandidates = SetVisibilityDeadlineCandidates.Empty() ? 0 : SetVisibilityDeadlineCandidates.NotLessCount(now); - return infly + setVisibilityDeadlineCandidates; - } - - size_t GetCapacity() const { - return Size + HoldCount; - } - -private: - TVisibilityDeadlineTree MessagesByVisibilityDeadline; - TOffsetTree MessagesByOffset; - TVisibilityDeadlineTree SetVisibilityDeadlineCandidates; // to properly calculate infly cout - size_t Size = 0; - size_t HoldCount = 0; -}; - -} // namespace NKikimr::NSQS + THolder<TInflyMessage> Delete(ui64 offset); + bool Has(ui64 offset) const; + + TOffsetTree::TIterator Begin() const { + return ReceivedMessages->Begin(); + } + + TOffsetTree::TIterator End() const { + return ReceivedMessages->End(); + } + + private: + TIntrusivePtr<TInflyMessages> Parent; + THolder<TOffsetTree> ReceivedMessages; + }; + + class TChangeVisibilityCandidates { + public: + explicit TChangeVisibilityCandidates(TIntrusivePtr<TInflyMessages> parent); + TChangeVisibilityCandidates() = default; + TChangeVisibilityCandidates(TChangeVisibilityCandidates&& msgs) = default; + TChangeVisibilityCandidates(const TChangeVisibilityCandidates&) = delete; + ~TChangeVisibilityCandidates(); + + TChangeVisibilityCandidates& operator=(TChangeVisibilityCandidates&& msgs) = default; + TChangeVisibilityCandidates& operator=(const TChangeVisibilityCandidates&) = delete; + + bool operator!() const { + return !Messages; + } + + operator bool() const { + return !operator!(); + } + + bool Add(ui64 offset); + void SetVisibilityDeadline(ui64 offset, TInstant visibilityDeadline); + THolder<TInflyMessage> Delete(ui64 offset); + bool Has(ui64 offset) const; + + private: + TIntrusivePtr<TInflyMessages> Parent; + THolder<TOffsetTree> Messages; + }; + +public: + TInflyMessages() = default; + ~TInflyMessages(); + + void Add(THolder<TInflyMessage> msg); + THolder<TInflyMessage> Delete(ui64 offset); + TReceiveCandidates Receive(size_t maxCount, TInstant now); + + size_t GetInflyCount(TInstant now) const { + const size_t infly = Size ? MessagesByVisibilityDeadline.NotLessCount(now) : 0; + const size_t setVisibilityDeadlineCandidates = SetVisibilityDeadlineCandidates.Empty() ? 0 : SetVisibilityDeadlineCandidates.NotLessCount(now); + return infly + setVisibilityDeadlineCandidates; + } + + size_t GetCapacity() const { + return Size + HoldCount; + } + +private: + TVisibilityDeadlineTree MessagesByVisibilityDeadline; + TOffsetTree MessagesByOffset; + TVisibilityDeadlineTree SetVisibilityDeadlineCandidates; // to properly calculate infly cout + size_t Size = 0; + size_t HoldCount = 0; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp b/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp index fd4866fd49f..0146e481223 100644 --- a/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp +++ b/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp @@ -18,13 +18,13 @@ class TListDeadLetterSourceQueuesActor : public TActionActor<TListDeadLetterSourceQueuesActor> { public: - TListDeadLetterSourceQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::ListDeadLetterSourceQueues, std::move(cb)) + TListDeadLetterSourceQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::ListDeadLetterSourceQueues, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableListDeadLetterSourceQueues()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } static constexpr bool NeedExistingQueue() { @@ -32,14 +32,14 @@ public: } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); } } @@ -56,10 +56,10 @@ private: return Response_.MutableListDeadLetterSourceQueues()->MutableError(); } - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); - TExecutorBuilder builder(SelfId(), RequestId_); + TExecutorBuilder builder(SelfId(), RequestId_); builder .User(UserName_) .Queue(GetQueueName()) @@ -68,13 +68,13 @@ private: .Counters(QueueCounters_) .RetryOnTimeout() .Params() - .Utf8("USER_NAME", UserName_) + .Utf8("USER_NAME", UserName_) .Utf8("FOLDERID", FolderId_); builder.Start(); } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; auto* result = Response_.MutableListDeadLetterSourceQueues(); @@ -95,20 +95,20 @@ private: } } } else { - RLOG_SQS_WARN("Request failed: " << record); - MakeError(result, NErrors::INTERNAL_FAILURE); + RLOG_SQS_WARN("Request failed: " << record); + MakeError(result, NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); + SendReplyAndDie(); } - const TListDeadLetterSourceQueuesRequest& Request() const { - return SourceSqsRequest_.GetListDeadLetterSourceQueues(); - } + const TListDeadLetterSourceQueuesRequest& Request() const { + return SourceSqsRequest_.GetListDeadLetterSourceQueues(); + } }; -IActor* CreateListDeadLetterSourceQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TListDeadLetterSourceQueuesActor(sourceSqsRequest, std::move(cb)); +IActor* CreateListDeadLetterSourceQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TListDeadLetterSourceQueuesActor(sourceSqsRequest, std::move(cb)); } } // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/list_permissions.cpp b/ydb/core/ymq/actor/list_permissions.cpp index 93a15e1dbf2..60b83ab3855 100644 --- a/ydb/core/ymq/actor/list_permissions.cpp +++ b/ydb/core/ymq/actor/list_permissions.cpp @@ -10,37 +10,37 @@ #include <util/string/cast.h> #include <util/string/join.h> -namespace NKikimr::NSQS { - +namespace NKikimr::NSQS { + class TListPermissionsActor : public TActionActor<TListPermissionsActor> { public: - TListPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::ListPermissions, std::move(cb)) + TListPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::ListPermissions, std::move(cb)) { Response_.MutableListPermissions()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } static constexpr bool NeedExistingQueue() { return false; } - static constexpr bool NeedUserSpecified() { - return false; - } - + static constexpr bool NeedUserSpecified() { + return false; + } + private: bool DoValidate() override { - if (!Request().GetPath()) { + if (!Request().GetPath()) { MakeError(Response_.MutableListPermissions(), NErrors::MISSING_PARAMETER, "No Path parameter."); return false; } - Path_ = MakeAbsolutePath(Request().GetPath()); + Path_ = MakeAbsolutePath(Request().GetPath()); if (IsForbiddenPath(Path_)) { - MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, Sprintf("Path does not exist: %s.", SanitizeNodePath(Path_).c_str())); + MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, Sprintf("Path does not exist: %s.", SanitizeNodePath(Path_).c_str())); return false; } @@ -52,29 +52,29 @@ private: } virtual TString GetCustomACLPath() const override { - return MakeAbsolutePath(Request().GetPath()); + return MakeAbsolutePath(Request().GetPath()); } TString DoGetQueueName() const override { return {}; } - void RequestSchemeShard(const TString& path) { + void RequestSchemeShard(const TString& path) { std::unique_ptr<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate()); NKikimrSchemeOp::TDescribePath* record = navigateRequest->Record.MutableDescribePath(); record->SetPath(path); - Send(MakeTxProxyID(), navigateRequest.release()); + Send(MakeTxProxyID(), navigateRequest.release()); } - void DoAction() override { + void DoAction() override { Become(&TThis::WaitSchemeShardResponse); - RequestSchemeShard(Path_); + RequestSchemeShard(Path_); } - STATEFN(WaitSchemeShardResponse) { + STATEFN(WaitSchemeShardResponse) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); + hFunc(TEvWakeup, HandleWakeup); hFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, HandleSchemeShardResponse); } } @@ -140,7 +140,7 @@ private: break; } case NKikimrScheme::StatusPathDoesNotExist: { - MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, Sprintf("Path does not exist: %s.", SanitizeNodePath(Path_).c_str())); + MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, Sprintf("Path does not exist: %s.", SanitizeNodePath(Path_).c_str())); break; } default: { @@ -149,19 +149,19 @@ private: } } - SendReplyAndDie(); - } - - const TListPermissionsRequest& Request() const { - return SourceSqsRequest_.GetListPermissions(); + SendReplyAndDie(); } + const TListPermissionsRequest& Request() const { + return SourceSqsRequest_.GetListPermissions(); + } + private: TString Path_; }; -IActor* CreateListPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TListPermissionsActor(sourceSqsRequest, std::move(cb)); +IActor* CreateListPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TListPermissionsActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/list_queues.cpp b/ydb/core/ymq/actor/list_queues.cpp index 25ae1083764..50b5469dd57 100644 --- a/ydb/core/ymq/actor/list_queues.cpp +++ b/ydb/core/ymq/actor/list_queues.cpp @@ -1,9 +1,9 @@ #include "action.h" -#include "error.h" -#include "log.h" +#include "error.h" +#include "log.h" #include "params.h" #include "serviceid.h" -#include "executor.h" +#include "executor.h" #include <ydb/public/lib/value/value.h> @@ -13,54 +13,54 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TListQueuesActor : public TActionActor<TListQueuesActor> { public: - static constexpr bool NeedExistingQueue() { - return false; - } - - TListQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::ListQueues, std::move(cb)) + static constexpr bool NeedExistingQueue() { + return false; + } + + TListQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::ListQueues, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableListQueues()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: - TError* MutableErrorDesc() override { - return Response_.MutableListQueues()->MutableError(); + TError* MutableErrorDesc() override { + return Response_.MutableListQueues()->MutableError(); + } + + void DiscoverQueues() { + TExecutorBuilder(SelfId(), RequestId_) + .User(UserName_) + .QueryId(LIST_QUEUES_ID) + .RetryOnTimeout() + .Counters(UserCounters_) + .Params() + .Utf8("FOLDERID", FolderId_) + .Utf8("USER_NAME", UserName_) + .ParentBuilder().Start(); } - void DiscoverQueues() { - TExecutorBuilder(SelfId(), RequestId_) - .User(UserName_) - .QueryId(LIST_QUEUES_ID) - .RetryOnTimeout() - .Counters(UserCounters_) - .Params() - .Utf8("FOLDERID", FolderId_) - .Utf8("USER_NAME", UserName_) - .ParentBuilder().Start(); - } - - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); - if (!UserExists_) { - if (!IsCloud()) { - MakeError(Response_.MutableListQueues(), NErrors::OPT_IN_REQUIRED, "The specified account does not exist."); - } // else respond with an empty list for inexistent account - SendReplyAndDie(); + if (!UserExists_) { + if (!IsCloud()) { + MakeError(Response_.MutableListQueues(), NErrors::OPT_IN_REQUIRED, "The specified account does not exist."); + } // else respond with an empty list for inexistent account + SendReplyAndDie(); return; } - DiscoverQueues(); + DiscoverQueues(); } TString DoGetQueueName() const override { @@ -68,28 +68,28 @@ private: } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - const auto& record = ev->Get()->Record; - auto* result = Response_.MutableListQueues(); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + const auto& record = ev->Get()->Record; + auto* result = Response_.MutableListQueues(); - if (ev->Get()->IsOk()) { + if (ev->Get()->IsOk()) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); const TValue queues(val["queues"]); - const TString prefix = Request().GetQueueNamePrefix(); + const TString prefix = Request().GetQueueNamePrefix(); for (size_t i = 0; i < queues.Size(); ++i) { const TString name((TString(queues[i]["QueueName"]))); const TString customQueueName((TString(queues[i]["CustomQueueName"]))); if (prefix.empty() || AsciiHasPrefix((IsCloud() ? customQueueName : name), prefix)) { - auto* item = result->AddQueues(); + auto* item = result->AddQueues(); item->SetQueueName(name); if (IsCloud()) { item->SetQueueUrl(MakeQueueUrl(TString::Join(name, '/', customQueueName))); @@ -99,20 +99,20 @@ private: } } } else { - RLOG_SQS_WARN("Request failed: " << record); - MakeError(result, NErrors::INTERNAL_FAILURE); + RLOG_SQS_WARN("Request failed: " << record); + MakeError(result, NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); + SendReplyAndDie(); } - const TListQueuesRequest& Request() const { - return SourceSqsRequest_.GetListQueues(); - } + const TListQueuesRequest& Request() const { + return SourceSqsRequest_.GetListQueues(); + } }; -IActor* CreateListQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TListQueuesActor(sourceSqsRequest, std::move(cb)); +IActor* CreateListQueuesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TListQueuesActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/list_users.cpp b/ydb/core/ymq/actor/list_users.cpp index d653fc20a38..d0a2fcebaca 100644 --- a/ydb/core/ymq/actor/list_users.cpp +++ b/ydb/core/ymq/actor/list_users.cpp @@ -9,45 +9,45 @@ #include <util/string/cast.h> #include <util/string/join.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TListUsersActor : public TActionActor<TListUsersActor> { public: - static constexpr bool NeedExistingQueue() { - return false; - } - - TListUsersActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::ListUsers, std::move(cb)) + static constexpr bool NeedExistingQueue() { + return false; + } + + TListUsersActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::ListUsers, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableListUsers()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: - void PassAway() override { - TActionActor<TListUsersActor>::PassAway(); + void PassAway() override { + TActionActor<TListUsersActor>::PassAway(); } - TError* MutableErrorDesc() override { - return Response_.MutableListUsers()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return Response_.MutableListUsers()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); auto proxy = MakeTxProxyID(); auto ev = MakeHolder<TEvTxUserProxy::TEvNavigate>(); - ev->Record.MutableDescribePath()->SetPath(Cfg().GetRoot()); + ev->Record.MutableDescribePath()->SetPath(Cfg().GetRoot()); - RLOG_SQS_TRACE("TListUsersActor generate request." - << ". Proxy actor: " << proxy - << ". TEvNavigate: " << ev->Record.ShortDebugString()); + RLOG_SQS_TRACE("TListUsersActor generate request." + << ". Proxy actor: " << proxy + << ". TEvNavigate: " << ev->Record.ShortDebugString()); Send(proxy, ev.Release()); } @@ -57,9 +57,9 @@ private: } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); + hFunc(TEvWakeup, HandleWakeup); hFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, HandleDescribeSchemeResult); } } @@ -69,13 +69,13 @@ private: const auto& desc = record.GetPathDescription(); if (record.GetStatus() != NKikimrScheme::StatusSuccess) { - RLOG_SQS_CRIT("No error handler at TListUsersActor in HandleDescribeSchemeResult" + RLOG_SQS_CRIT("No error handler at TListUsersActor in HandleDescribeSchemeResult" << ", got msg: " << record.ShortDebugString()); // status, reason might be useful. // The request doesn't have to be seccessfull all the time. StatusNotAvailable for example could occur } - const TString prefix = Request().GetUserNamePrefix(); + const TString prefix = Request().GetUserNamePrefix(); for (const auto& child : desc.children()) { if (child.GetPathType() == NKikimrSchemeOp::EPathTypeDir) { @@ -85,16 +85,16 @@ private: } } - SendReplyAndDie(); + SendReplyAndDie(); } - const TListUsersRequest& Request() const { - return SourceSqsRequest_.GetListUsers(); - } + const TListUsersRequest& Request() const { + return SourceSqsRequest_.GetListUsers(); + } }; -IActor* CreateListUsersActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TListUsersActor(sourceSqsRequest, std::move(cb)); +IActor* CreateListUsersActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TListUsersActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp b/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp index 470665b7657..92e45a38d84 100644 --- a/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp +++ b/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp @@ -1,84 +1,84 @@ -#include "local_rate_limiter_allocator.h" - +#include "local_rate_limiter_allocator.h" + #include <ydb/core/base/quoter.h> - -#include <util/generic/hash.h> -#include <util/system/guard.h> -#include <util/system/spinlock.h> - -#include <limits> - -namespace NKikimr::NSQS { -namespace { - -constexpr ui32 SQS_TAG_START = 1 << 31; - -ui32 NextTag = SQS_TAG_START; -THashMultiMap<ui32, ui32> FreeRateToTag; -TAdaptiveLock RatesLock; - -ui32 AllocateLocalRateLimiterTag(ui32 rate) { - Y_VERIFY(rate != std::numeric_limits<ui32>::max()); - auto lock = Guard(RatesLock); - auto freeTagIt = FreeRateToTag.find(rate); - if (freeTagIt != FreeRateToTag.end()) { - const ui32 tag = freeTagIt->second; - FreeRateToTag.erase(freeTagIt); - return tag; - } - return NextTag++; -} - -void FreeLocalRateLimiterTag(ui32 rate, ui32 tag) { - auto lock = Guard(RatesLock); - FreeRateToTag.emplace(rate, tag); -} - -} - -TLocalRateLimiterResource::TLocalRateLimiterResource() - : Rate(std::numeric_limits<ui32>::max()) - , Tag(std::numeric_limits<ui32>::max()) - , ResourceId(std::numeric_limits<ui64>::max()) -{ -} - -TLocalRateLimiterResource::TLocalRateLimiterResource(ui32 rate) - : Rate(rate) - , Tag(AllocateLocalRateLimiterTag(Rate)) - , ResourceId(TEvQuota::TResourceLeaf::MakeTaggedRateRes(Tag, Rate)) -{ -} - -TLocalRateLimiterResource::TLocalRateLimiterResource(TLocalRateLimiterResource&& res) - : Rate(res.Rate) - , Tag(res.Tag) - , ResourceId(res.ResourceId) -{ - res.Rate = std::numeric_limits<ui32>::max(); - res.Tag = std::numeric_limits<ui32>::max(); - res.ResourceId = std::numeric_limits<ui64>::max(); -} - -TLocalRateLimiterResource& TLocalRateLimiterResource::operator=(TLocalRateLimiterResource&& res) { - if (this != &res) { - if (Rate != std::numeric_limits<ui32>::max()) { - FreeLocalRateLimiterTag(Rate, Tag); - } - Rate = res.Rate; - Tag = res.Tag; - ResourceId = res.ResourceId; - res.Rate = std::numeric_limits<ui32>::max(); - res.Tag = std::numeric_limits<ui32>::max(); - res.ResourceId = std::numeric_limits<ui64>::max(); - } - return *this; -} - -TLocalRateLimiterResource::~TLocalRateLimiterResource() { - if (Rate != std::numeric_limits<ui32>::max()) { - FreeLocalRateLimiterTag(Rate, Tag); - } -} - -} // namespace NKikimr::NSQS + +#include <util/generic/hash.h> +#include <util/system/guard.h> +#include <util/system/spinlock.h> + +#include <limits> + +namespace NKikimr::NSQS { +namespace { + +constexpr ui32 SQS_TAG_START = 1 << 31; + +ui32 NextTag = SQS_TAG_START; +THashMultiMap<ui32, ui32> FreeRateToTag; +TAdaptiveLock RatesLock; + +ui32 AllocateLocalRateLimiterTag(ui32 rate) { + Y_VERIFY(rate != std::numeric_limits<ui32>::max()); + auto lock = Guard(RatesLock); + auto freeTagIt = FreeRateToTag.find(rate); + if (freeTagIt != FreeRateToTag.end()) { + const ui32 tag = freeTagIt->second; + FreeRateToTag.erase(freeTagIt); + return tag; + } + return NextTag++; +} + +void FreeLocalRateLimiterTag(ui32 rate, ui32 tag) { + auto lock = Guard(RatesLock); + FreeRateToTag.emplace(rate, tag); +} + +} + +TLocalRateLimiterResource::TLocalRateLimiterResource() + : Rate(std::numeric_limits<ui32>::max()) + , Tag(std::numeric_limits<ui32>::max()) + , ResourceId(std::numeric_limits<ui64>::max()) +{ +} + +TLocalRateLimiterResource::TLocalRateLimiterResource(ui32 rate) + : Rate(rate) + , Tag(AllocateLocalRateLimiterTag(Rate)) + , ResourceId(TEvQuota::TResourceLeaf::MakeTaggedRateRes(Tag, Rate)) +{ +} + +TLocalRateLimiterResource::TLocalRateLimiterResource(TLocalRateLimiterResource&& res) + : Rate(res.Rate) + , Tag(res.Tag) + , ResourceId(res.ResourceId) +{ + res.Rate = std::numeric_limits<ui32>::max(); + res.Tag = std::numeric_limits<ui32>::max(); + res.ResourceId = std::numeric_limits<ui64>::max(); +} + +TLocalRateLimiterResource& TLocalRateLimiterResource::operator=(TLocalRateLimiterResource&& res) { + if (this != &res) { + if (Rate != std::numeric_limits<ui32>::max()) { + FreeLocalRateLimiterTag(Rate, Tag); + } + Rate = res.Rate; + Tag = res.Tag; + ResourceId = res.ResourceId; + res.Rate = std::numeric_limits<ui32>::max(); + res.Tag = std::numeric_limits<ui32>::max(); + res.ResourceId = std::numeric_limits<ui64>::max(); + } + return *this; +} + +TLocalRateLimiterResource::~TLocalRateLimiterResource() { + if (Rate != std::numeric_limits<ui32>::max()) { + FreeLocalRateLimiterTag(Rate, Tag); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/local_rate_limiter_allocator.h b/ydb/core/ymq/actor/local_rate_limiter_allocator.h index 316b5fabd0f..880834a6fb8 100644 --- a/ydb/core/ymq/actor/local_rate_limiter_allocator.h +++ b/ydb/core/ymq/actor/local_rate_limiter_allocator.h @@ -1,29 +1,29 @@ -#pragma once -#include "defs.h" -#include <util/system/types.h> - -namespace NKikimr::NSQS { - -// Properly allocates resource id for local rate limiter -class TLocalRateLimiterResource { -public: - TLocalRateLimiterResource(); - explicit TLocalRateLimiterResource(ui32 rate); - TLocalRateLimiterResource(const TLocalRateLimiterResource&) = delete; - TLocalRateLimiterResource(TLocalRateLimiterResource&&); - ~TLocalRateLimiterResource(); - - TLocalRateLimiterResource& operator=(const TLocalRateLimiterResource&) = delete; - TLocalRateLimiterResource& operator=(TLocalRateLimiterResource&&); - - operator ui64() const { - return ResourceId; - } - -private: - ui32 Rate; - ui32 Tag; - ui64 ResourceId; -}; - -} // namespace NKikimr::NSQS +#pragma once +#include "defs.h" +#include <util/system/types.h> + +namespace NKikimr::NSQS { + +// Properly allocates resource id for local rate limiter +class TLocalRateLimiterResource { +public: + TLocalRateLimiterResource(); + explicit TLocalRateLimiterResource(ui32 rate); + TLocalRateLimiterResource(const TLocalRateLimiterResource&) = delete; + TLocalRateLimiterResource(TLocalRateLimiterResource&&); + ~TLocalRateLimiterResource(); + + TLocalRateLimiterResource& operator=(const TLocalRateLimiterResource&) = delete; + TLocalRateLimiterResource& operator=(TLocalRateLimiterResource&&); + + operator ui64() const { + return ResourceId; + } + +private: + ui32 Rate; + ui32 Tag; + ui64 ResourceId; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/log.cpp b/ydb/core/ymq/actor/log.cpp index b182d89fe16..53d90e2ed60 100644 --- a/ydb/core/ymq/actor/log.cpp +++ b/ydb/core/ymq/actor/log.cpp @@ -1,46 +1,46 @@ -#include "log.h" - +#include "log.h" + #include <ydb/core/ymq/base/queue_path.h> - -namespace NKikimr::NSQS { - -ui64 RequestIdSample(const TStringBuf& requestId) { - if (Y_UNLIKELY(requestId.size() < sizeof(ui64))) { - return 0; - } + +namespace NKikimr::NSQS { + +ui64 RequestIdSample(const TStringBuf& requestId) { + if (Y_UNLIKELY(requestId.size() < sizeof(ui64))) { + return 0; + } const ui64 result = *reinterpret_cast<const ui64*>(requestId.data()); // We don't need real hashing, - // because logging system will take - // murmur hash from returned ui64 - return result; -} - -TLogQueueName::TLogQueueName(const TString& userName, const TString& queueName, ui64 shard) - : UserName(userName) - , QueueName(queueName) - , Shard(shard) -{ -} - -TLogQueueName::TLogQueueName(const TQueuePath& queuePath, ui64 shard) - : TLogQueueName(queuePath.UserName, queuePath.QueueName, shard) -{ -} - -void TLogQueueName::OutTo(IOutputStream& out) const { + // because logging system will take + // murmur hash from returned ui64 + return result; +} + +TLogQueueName::TLogQueueName(const TString& userName, const TString& queueName, ui64 shard) + : UserName(userName) + , QueueName(queueName) + , Shard(shard) +{ +} + +TLogQueueName::TLogQueueName(const TQueuePath& queuePath, ui64 shard) + : TLogQueueName(queuePath.UserName, queuePath.QueueName, shard) +{ +} + +void TLogQueueName::OutTo(IOutputStream& out) const { out << "["sv << UserName; - if (QueueName) { + if (QueueName) { out << "/"sv << QueueName; - } - if (Shard != std::numeric_limits<ui64>::max()) { + } + if (Shard != std::numeric_limits<ui64>::max()) { out << "/"sv << Shard; - } + } out << "]"sv; -} - -} // namespace NKikimr::NSQS - -template<> -void Out<NKikimr::NSQS::TLogQueueName>(IOutputStream& out, - typename TTypeTraits<NKikimr::NSQS::TLogQueueName>::TFuncParam nameForLogging) { - nameForLogging.OutTo(out); -} +} + +} // namespace NKikimr::NSQS + +template<> +void Out<NKikimr::NSQS::TLogQueueName>(IOutputStream& out, + typename TTypeTraits<NKikimr::NSQS::TLogQueueName>::TFuncParam nameForLogging) { + nameForLogging.OutTo(out); +} diff --git a/ydb/core/ymq/actor/log.h b/ydb/core/ymq/actor/log.h index 6acb8cebaf2..52d0e3a16c1 100644 --- a/ydb/core/ymq/actor/log.h +++ b/ydb/core/ymq/actor/log.h @@ -1,126 +1,126 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <library/cpp/actors/core/log.h> - -#include <util/stream/output.h> - -#include <limits> - -namespace NKikimr::NSQS { - -struct TQueuePath; - -ui64 RequestIdSample(const TStringBuf& requestId); - -// Lightweight class for simple logging queue name -class TLogQueueName { -public: - TLogQueueName(const TString& userName, const TString& queueName, ui64 shard = std::numeric_limits<ui64>::max()); - TLogQueueName(const TQueuePath& queuePath, ui64 shard = std::numeric_limits<ui64>::max()); - - void OutTo(IOutputStream& out) const; - -private: - const TString& UserName; - const TString& QueueName; - const ui64 Shard; -}; - -} // namespace NKikimr::NSQS - - -// -// Outside actor system -// - -#define LOG_SQS_BASE(actorCtxOrSystem, priority, stream) \ - LOG_LOG_S(actorCtxOrSystem, priority, NKikimrServices::SQS, stream) - -#define RLOG_SQS_REQ_BASE(actorCtxOrSystem, priority, requestId, stream) \ - LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, NKikimrServices::SQS, NKikimr::NSQS::RequestIdSample(requestId), "Request [" << requestId << "] " << stream) - -#define RLOG_SQS_BASE(actorCtxOrSystem, priority, stream) \ - RLOG_SQS_REQ_BASE(actorCtxOrSystem, priority, RequestId_, stream) - - -// Log under SQS service component -#define LOG_SQS_BASE_EMERG(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_EMERG, stream) -#define LOG_SQS_BASE_ALERT(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ALERT, stream) -#define LOG_SQS_BASE_CRIT(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_CRIT, stream) -#define LOG_SQS_BASE_ERROR(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ERROR, stream) -#define LOG_SQS_BASE_WARN(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_WARN, stream) -#define LOG_SQS_BASE_NOTICE(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, stream) -#define LOG_SQS_BASE_INFO(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_INFO, stream) -#define LOG_SQS_BASE_DEBUG(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, stream) -#define LOG_SQS_BASE_TRACE(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_TRACE, stream) - -// Log with explicitly specified request id -#define RLOG_SQS_REQ_BASE_EMERG(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_EMERG, requestId, stream) -#define RLOG_SQS_REQ_BASE_ALERT(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_ALERT, requestId, stream) -#define RLOG_SQS_REQ_BASE_CRIT(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_CRIT, requestId, stream) -#define RLOG_SQS_REQ_BASE_ERROR(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_ERROR, requestId, stream) -#define RLOG_SQS_REQ_BASE_WARN(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_WARN, requestId, stream) -#define RLOG_SQS_REQ_BASE_NOTICE(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, requestId, stream) -#define RLOG_SQS_REQ_BASE_INFO(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_INFO, requestId, stream) -#define RLOG_SQS_REQ_BASE_DEBUG(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, requestId, stream) -#define RLOG_SQS_REQ_BASE_TRACE(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_TRACE, requestId, stream) - -// Log with imlicitly specified request id (RequestId_ member) -#define RLOG_SQS_BASE_EMERG(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_EMERG, stream) -#define RLOG_SQS_BASE_ALERT(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ALERT, stream) -#define RLOG_SQS_BASE_CRIT(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_CRIT, stream) -#define RLOG_SQS_BASE_ERROR(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ERROR, stream) -#define RLOG_SQS_BASE_WARN(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_WARN, stream) -#define RLOG_SQS_BASE_NOTICE(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, stream) -#define RLOG_SQS_BASE_INFO(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_INFO, stream) -#define RLOG_SQS_BASE_DEBUG(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, stream) -#define RLOG_SQS_BASE_TRACE(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_TRACE, stream) - - - -// -// Inside actor system -// - -#define LOG_SQS(priority, stream) \ - LOG_SQS_BASE(TActivationContext::AsActorContext(), priority, stream) - -#define RLOG_SQS_REQ(priority, requestId, stream) \ - RLOG_SQS_REQ_BASE(TActivationContext::AsActorContext(), priority, requestId, stream) - -#define RLOG_SQS(priority, stream) \ - RLOG_SQS_BASE(TActivationContext::AsActorContext(), priority, stream) - - -// Log under SQS service component -#define LOG_SQS_EMERG(stream) LOG_SQS_BASE_EMERG(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_ALERT(stream) LOG_SQS_BASE_ALERT(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_CRIT(stream) LOG_SQS_BASE_CRIT(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_ERROR(stream) LOG_SQS_BASE_ERROR(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_WARN(stream) LOG_SQS_BASE_WARN(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_NOTICE(stream) LOG_SQS_BASE_NOTICE(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_INFO(stream) LOG_SQS_BASE_INFO(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_DEBUG(stream) LOG_SQS_BASE_DEBUG(TActivationContext::AsActorContext(), stream) -#define LOG_SQS_TRACE(stream) LOG_SQS_BASE_TRACE(TActivationContext::AsActorContext(), stream) - -// Log with explicitly specified request id -#define RLOG_SQS_REQ_EMERG(requestId, stream) RLOG_SQS_REQ_BASE_EMERG(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_ALERT(requestId, stream) RLOG_SQS_REQ_BASE_ALERT(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_CRIT(requestId, stream) RLOG_SQS_REQ_BASE_CRIT(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_ERROR(requestId, stream) RLOG_SQS_REQ_BASE_ERROR(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_WARN(requestId, stream) RLOG_SQS_REQ_BASE_WARN(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_NOTICE(requestId, stream) RLOG_SQS_REQ_BASE_NOTICE(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_INFO(requestId, stream) RLOG_SQS_REQ_BASE_INFO(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_DEBUG(requestId, stream) RLOG_SQS_REQ_BASE_DEBUG(TActivationContext::AsActorContext(), requestId, stream) -#define RLOG_SQS_REQ_TRACE(requestId, stream) RLOG_SQS_REQ_BASE_TRACE(TActivationContext::AsActorContext(), requestId, stream) - -// Log with imlicitly specified request id (RequestId_ member) -#define RLOG_SQS_EMERG(stream) RLOG_SQS_BASE_EMERG(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_ALERT(stream) RLOG_SQS_BASE_ALERT(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_CRIT(stream) RLOG_SQS_BASE_CRIT(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_ERROR(stream) RLOG_SQS_BASE_ERROR(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_WARN(stream) RLOG_SQS_BASE_WARN(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_NOTICE(stream) RLOG_SQS_BASE_NOTICE(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_INFO(stream) RLOG_SQS_BASE_INFO(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_DEBUG(stream) RLOG_SQS_BASE_DEBUG(TActivationContext::AsActorContext(), stream) -#define RLOG_SQS_TRACE(stream) RLOG_SQS_BASE_TRACE(TActivationContext::AsActorContext(), stream) + +#include <util/stream/output.h> + +#include <limits> + +namespace NKikimr::NSQS { + +struct TQueuePath; + +ui64 RequestIdSample(const TStringBuf& requestId); + +// Lightweight class for simple logging queue name +class TLogQueueName { +public: + TLogQueueName(const TString& userName, const TString& queueName, ui64 shard = std::numeric_limits<ui64>::max()); + TLogQueueName(const TQueuePath& queuePath, ui64 shard = std::numeric_limits<ui64>::max()); + + void OutTo(IOutputStream& out) const; + +private: + const TString& UserName; + const TString& QueueName; + const ui64 Shard; +}; + +} // namespace NKikimr::NSQS + + +// +// Outside actor system +// + +#define LOG_SQS_BASE(actorCtxOrSystem, priority, stream) \ + LOG_LOG_S(actorCtxOrSystem, priority, NKikimrServices::SQS, stream) + +#define RLOG_SQS_REQ_BASE(actorCtxOrSystem, priority, requestId, stream) \ + LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, NKikimrServices::SQS, NKikimr::NSQS::RequestIdSample(requestId), "Request [" << requestId << "] " << stream) + +#define RLOG_SQS_BASE(actorCtxOrSystem, priority, stream) \ + RLOG_SQS_REQ_BASE(actorCtxOrSystem, priority, RequestId_, stream) + + +// Log under SQS service component +#define LOG_SQS_BASE_EMERG(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_EMERG, stream) +#define LOG_SQS_BASE_ALERT(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ALERT, stream) +#define LOG_SQS_BASE_CRIT(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_CRIT, stream) +#define LOG_SQS_BASE_ERROR(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ERROR, stream) +#define LOG_SQS_BASE_WARN(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_WARN, stream) +#define LOG_SQS_BASE_NOTICE(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, stream) +#define LOG_SQS_BASE_INFO(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_INFO, stream) +#define LOG_SQS_BASE_DEBUG(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, stream) +#define LOG_SQS_BASE_TRACE(actorCtxOrSystem, stream) LOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_TRACE, stream) + +// Log with explicitly specified request id +#define RLOG_SQS_REQ_BASE_EMERG(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_EMERG, requestId, stream) +#define RLOG_SQS_REQ_BASE_ALERT(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_ALERT, requestId, stream) +#define RLOG_SQS_REQ_BASE_CRIT(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_CRIT, requestId, stream) +#define RLOG_SQS_REQ_BASE_ERROR(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_ERROR, requestId, stream) +#define RLOG_SQS_REQ_BASE_WARN(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_WARN, requestId, stream) +#define RLOG_SQS_REQ_BASE_NOTICE(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, requestId, stream) +#define RLOG_SQS_REQ_BASE_INFO(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_INFO, requestId, stream) +#define RLOG_SQS_REQ_BASE_DEBUG(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, requestId, stream) +#define RLOG_SQS_REQ_BASE_TRACE(actorCtxOrSystem, requestId, stream) RLOG_SQS_REQ_BASE(actorCtxOrSystem, NActors::NLog::PRI_TRACE, requestId, stream) + +// Log with imlicitly specified request id (RequestId_ member) +#define RLOG_SQS_BASE_EMERG(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_EMERG, stream) +#define RLOG_SQS_BASE_ALERT(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ALERT, stream) +#define RLOG_SQS_BASE_CRIT(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_CRIT, stream) +#define RLOG_SQS_BASE_ERROR(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_ERROR, stream) +#define RLOG_SQS_BASE_WARN(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_WARN, stream) +#define RLOG_SQS_BASE_NOTICE(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, stream) +#define RLOG_SQS_BASE_INFO(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_INFO, stream) +#define RLOG_SQS_BASE_DEBUG(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, stream) +#define RLOG_SQS_BASE_TRACE(actorCtxOrSystem, stream) RLOG_SQS_BASE(actorCtxOrSystem, NActors::NLog::PRI_TRACE, stream) + + + +// +// Inside actor system +// + +#define LOG_SQS(priority, stream) \ + LOG_SQS_BASE(TActivationContext::AsActorContext(), priority, stream) + +#define RLOG_SQS_REQ(priority, requestId, stream) \ + RLOG_SQS_REQ_BASE(TActivationContext::AsActorContext(), priority, requestId, stream) + +#define RLOG_SQS(priority, stream) \ + RLOG_SQS_BASE(TActivationContext::AsActorContext(), priority, stream) + + +// Log under SQS service component +#define LOG_SQS_EMERG(stream) LOG_SQS_BASE_EMERG(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_ALERT(stream) LOG_SQS_BASE_ALERT(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_CRIT(stream) LOG_SQS_BASE_CRIT(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_ERROR(stream) LOG_SQS_BASE_ERROR(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_WARN(stream) LOG_SQS_BASE_WARN(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_NOTICE(stream) LOG_SQS_BASE_NOTICE(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_INFO(stream) LOG_SQS_BASE_INFO(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_DEBUG(stream) LOG_SQS_BASE_DEBUG(TActivationContext::AsActorContext(), stream) +#define LOG_SQS_TRACE(stream) LOG_SQS_BASE_TRACE(TActivationContext::AsActorContext(), stream) + +// Log with explicitly specified request id +#define RLOG_SQS_REQ_EMERG(requestId, stream) RLOG_SQS_REQ_BASE_EMERG(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_ALERT(requestId, stream) RLOG_SQS_REQ_BASE_ALERT(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_CRIT(requestId, stream) RLOG_SQS_REQ_BASE_CRIT(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_ERROR(requestId, stream) RLOG_SQS_REQ_BASE_ERROR(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_WARN(requestId, stream) RLOG_SQS_REQ_BASE_WARN(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_NOTICE(requestId, stream) RLOG_SQS_REQ_BASE_NOTICE(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_INFO(requestId, stream) RLOG_SQS_REQ_BASE_INFO(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_DEBUG(requestId, stream) RLOG_SQS_REQ_BASE_DEBUG(TActivationContext::AsActorContext(), requestId, stream) +#define RLOG_SQS_REQ_TRACE(requestId, stream) RLOG_SQS_REQ_BASE_TRACE(TActivationContext::AsActorContext(), requestId, stream) + +// Log with imlicitly specified request id (RequestId_ member) +#define RLOG_SQS_EMERG(stream) RLOG_SQS_BASE_EMERG(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_ALERT(stream) RLOG_SQS_BASE_ALERT(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_CRIT(stream) RLOG_SQS_BASE_CRIT(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_ERROR(stream) RLOG_SQS_BASE_ERROR(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_WARN(stream) RLOG_SQS_BASE_WARN(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_NOTICE(stream) RLOG_SQS_BASE_NOTICE(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_INFO(stream) RLOG_SQS_BASE_INFO(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_DEBUG(stream) RLOG_SQS_BASE_DEBUG(TActivationContext::AsActorContext(), stream) +#define RLOG_SQS_TRACE(stream) RLOG_SQS_BASE_TRACE(TActivationContext::AsActorContext(), stream) diff --git a/ydb/core/ymq/actor/message_delay_stats.cpp b/ydb/core/ymq/actor/message_delay_stats.cpp index debe63f8ea0..d33a5ada656 100644 --- a/ydb/core/ymq/actor/message_delay_stats.cpp +++ b/ydb/core/ymq/actor/message_delay_stats.cpp @@ -1,60 +1,60 @@ -#include "message_delay_stats.h" - -#include <algorithm> -#include <numeric> - -namespace NKikimr::NSQS { - -constexpr size_t BucketsCount = 901; -constexpr TDuration BucketLength = TDuration::Seconds(1); -const TDuration WindowLength = BucketLength * BucketsCount; - -void TMessageDelayStatistics::AdvanceTime(TInstant now) { - if (now < Start + BucketLength) { - return; - } - - const size_t bucketsDiff = (now - Start).GetValue() / BucketLength.GetValue(); - const size_t bucketsToClear = Min(bucketsDiff, BucketsCount); - - // clear buckets - for (size_t i = FirstBucket; i < FirstBucket + bucketsToClear; ++i) { - const size_t index = i < BucketsCount ? i : i - BucketsCount; - Buckets[index] = 0; - } - FirstBucket += bucketsToClear; - if (FirstBucket >= BucketsCount) { - FirstBucket -= BucketsCount; - } - Start += BucketLength * bucketsDiff; -} - -size_t TMessageDelayStatistics::UpdateAndGetMessagesDelayed(TInstant now) { - if (Buckets.empty()) { - return 0; - } - - AdvanceTime(now); - return std::accumulate(Buckets.begin(), Buckets.end(), 0); -} - -void TMessageDelayStatistics::AddDelayedMessage(TInstant delayDeadline, TInstant now) { - Y_ASSERT(delayDeadline > now); - - // allocate memory with first delayed message - if (Buckets.empty()) { - Buckets.resize(BucketsCount); - Start = now; - } else { - AdvanceTime(now); - } - - if (delayDeadline > Start + WindowLength) { - return; - } - - const size_t bucket = (delayDeadline - Start).GetValue() / BucketLength.GetValue(); - ++Buckets[(FirstBucket + bucket) % BucketsCount]; -} - -} // namespace NKikimr::NSQS +#include "message_delay_stats.h" + +#include <algorithm> +#include <numeric> + +namespace NKikimr::NSQS { + +constexpr size_t BucketsCount = 901; +constexpr TDuration BucketLength = TDuration::Seconds(1); +const TDuration WindowLength = BucketLength * BucketsCount; + +void TMessageDelayStatistics::AdvanceTime(TInstant now) { + if (now < Start + BucketLength) { + return; + } + + const size_t bucketsDiff = (now - Start).GetValue() / BucketLength.GetValue(); + const size_t bucketsToClear = Min(bucketsDiff, BucketsCount); + + // clear buckets + for (size_t i = FirstBucket; i < FirstBucket + bucketsToClear; ++i) { + const size_t index = i < BucketsCount ? i : i - BucketsCount; + Buckets[index] = 0; + } + FirstBucket += bucketsToClear; + if (FirstBucket >= BucketsCount) { + FirstBucket -= BucketsCount; + } + Start += BucketLength * bucketsDiff; +} + +size_t TMessageDelayStatistics::UpdateAndGetMessagesDelayed(TInstant now) { + if (Buckets.empty()) { + return 0; + } + + AdvanceTime(now); + return std::accumulate(Buckets.begin(), Buckets.end(), 0); +} + +void TMessageDelayStatistics::AddDelayedMessage(TInstant delayDeadline, TInstant now) { + Y_ASSERT(delayDeadline > now); + + // allocate memory with first delayed message + if (Buckets.empty()) { + Buckets.resize(BucketsCount); + Start = now; + } else { + AdvanceTime(now); + } + + if (delayDeadline > Start + WindowLength) { + return; + } + + const size_t bucket = (delayDeadline - Start).GetValue() / BucketLength.GetValue(); + ++Buckets[(FirstBucket + bucket) % BucketsCount]; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/message_delay_stats.h b/ydb/core/ymq/actor/message_delay_stats.h index 98e303c1f76..e8e121adfbd 100644 --- a/ydb/core/ymq/actor/message_delay_stats.h +++ b/ydb/core/ymq/actor/message_delay_stats.h @@ -1,27 +1,27 @@ -#pragma once -#include "defs.h" - -#include <util/datetime/base.h> - -#include <vector> - -namespace NKikimr::NSQS { - -class TMessageDelayStatistics { -public: - TMessageDelayStatistics() = default; - - size_t UpdateAndGetMessagesDelayed(TInstant now); - - void AddDelayedMessage(TInstant delayDeadline, TInstant now); - -private: - void AdvanceTime(TInstant now); - -private: - std::vector<size_t> Buckets; - size_t FirstBucket = 0; - TInstant Start = {}; -}; - -} // namespace NKikimr::NSQS +#pragma once +#include "defs.h" + +#include <util/datetime/base.h> + +#include <vector> + +namespace NKikimr::NSQS { + +class TMessageDelayStatistics { +public: + TMessageDelayStatistics() = default; + + size_t UpdateAndGetMessagesDelayed(TInstant now); + + void AddDelayedMessage(TInstant delayDeadline, TInstant now); + +private: + void AdvanceTime(TInstant now); + +private: + std::vector<size_t> Buckets; + size_t FirstBucket = 0; + TInstant Start = {}; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/metering.cpp b/ydb/core/ymq/actor/metering.cpp index 94ccef7eb01..e5639579813 100644 --- a/ydb/core/ymq/actor/metering.cpp +++ b/ydb/core/ymq/actor/metering.cpp @@ -221,7 +221,7 @@ void TProcessedRequestsAggregator::InitAddressClassifier(const NKikimrConfig::TS } class TMeteringActor - : public TActorBootstrapped<TMeteringActor> + : public TActorBootstrapped<TMeteringActor> { public: TMeteringActor() @@ -234,9 +234,9 @@ public: ctx.Send(NNetClassifier::MakeNetClassifierID(), new NNetClassifier::TEvNetClassifier::TEvSubscribe); - Aggregator = MakeHolder<TProcessedRequestsAggregator>(Cfg()); + Aggregator = MakeHolder<TProcessedRequestsAggregator>(Cfg()); - FlushProcessedRequestsAttributes(); + FlushProcessedRequestsAttributes(); } void WriteLogRecord(const NSc::TValue& record, TStringBuilder& records) const { @@ -247,15 +247,15 @@ public: return NKikimrServices::TActivity::SQS_METERING_ACTOR; } - void FlushProcessedRequestsAttributes() { + void FlushProcessedRequestsAttributes() { Y_VERIFY(Aggregator); - Schedule(TDuration::MilliSeconds(Cfg().GetMeteringFlushingIntervalMs()), new TEvWakeup()); + Schedule(TDuration::MilliSeconds(Cfg().GetMeteringFlushingIntervalMs()), new TEvWakeup()); - const auto reportedTrafficRecords = Aggregator->DumpReportedTrafficAsJsonArray(HostFQDN, TActivationContext::Now()); - const auto reportedRequestsRecords = Aggregator->DumpReportedRequestsAsJsonArray(HostFQDN, TActivationContext::Now()); + const auto reportedTrafficRecords = Aggregator->DumpReportedTrafficAsJsonArray(HostFQDN, TActivationContext::Now()); + const auto reportedRequestsRecords = Aggregator->DumpReportedRequestsAsJsonArray(HostFQDN, TActivationContext::Now()); - if (Cfg().GetMeteringLogFilePath()) { + if (Cfg().GetMeteringLogFilePath()) { TStringBuilder records; for (const auto& trafficRecord : reportedTrafficRecords) { @@ -273,21 +273,21 @@ public: Aggregator->ResetReportsStorage(); } - void HandleWakeup(TEvWakeup::TPtr&) { - FlushProcessedRequestsAttributes(); + void HandleWakeup(TEvWakeup::TPtr&) { + FlushProcessedRequestsAttributes(); } - void HandleReportProcessedRequestAttributes(TSqsEvents::TEvReportProcessedRequestAttributes::TPtr& ev) { + void HandleReportProcessedRequestAttributes(TSqsEvents::TEvReportProcessedRequestAttributes::TPtr& ev) { Y_VERIFY(Aggregator); Aggregator->Add(ev->Get()->Data); } - STATEFN(Work) { + STATEFN(Work) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); + hFunc(TEvWakeup, HandleWakeup); hFunc(NNetClassifier::TEvNetClassifier::TEvClassifierUpdate, HandleNetClassifierUpdate); - hFunc(TSqsEvents::TEvReportProcessedRequestAttributes, HandleReportProcessedRequestAttributes); + hFunc(TSqsEvents::TEvReportProcessedRequestAttributes, HandleReportProcessedRequestAttributes); } } @@ -301,6 +301,6 @@ private: THolder<TProcessedRequestsAggregator> Aggregator; }; -IActor* CreateSqsMeteringService() { return new TMeteringActor(); } +IActor* CreateSqsMeteringService() { return new TMeteringActor(); } } // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/metering.h b/ydb/core/ymq/actor/metering.h index c9d5a58991b..949648620de 100644 --- a/ydb/core/ymq/actor/metering.h +++ b/ydb/core/ymq/actor/metering.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include <ydb/core/ymq/base/action.h> #include <ydb/core/util/address_classifier.h> diff --git a/ydb/core/ymq/actor/migration.cpp b/ydb/core/ymq/actor/migration.cpp index e547e975581..34b2b2c85d8 100644 --- a/ydb/core/ymq/actor/migration.cpp +++ b/ydb/core/ymq/actor/migration.cpp @@ -1,362 +1,362 @@ -#include "cfg.h" -#include "log.h" -#include "migration.h" -#include "executor.h" - +#include "cfg.h" +#include "log.h" +#include "migration.h" +#include "executor.h" + #include <ydb/core/tx/scheme_cache/scheme_cache.h> #include <ydb/core/ymq/base/debug_info.h> #include <ydb/core/ymq/queues/common/queries.h> - -#include <util/string/builder.h> -#include <util/string/join.h> - -#include <limits> - -namespace NKikimr::NSQS { - -class TAddColumnActor : public TActorBootstrapped<TAddColumnActor> { -public: - TAddColumnActor(const TString& userName, - const TString& queueName, + +#include <util/string/builder.h> +#include <util/string/join.h> + +#include <limits> + +namespace NKikimr::NSQS { + +class TAddColumnActor : public TActorBootstrapped<TAddColumnActor> { +public: + TAddColumnActor(const TString& userName, + const TString& queueName, const TActorId& parent, const TActorId& schemeCache, - TIntrusivePtr<TQueueCounters> counters, - const TString& tablePath, - const TString& columnName, - NScheme::TTypeId columnType); - virtual ~TAddColumnActor() = default; - - void Bootstrap(); - + TIntrusivePtr<TQueueCounters> counters, + const TString& tablePath, + const TString& columnName, + NScheme::TTypeId columnType); + virtual ~TAddColumnActor() = default; + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_QUEUE_MIGRATION_ACTOR; - } - -private: - STATEFN(StateFunc); - - void HandleTableInfo(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - - void SendReplyAndDie(bool ok); - - void GetTableInfo(); - - void CheckAndAddColumn(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry); - THolder<TEvTxUserProxy::TEvProposeTransaction> MakeAlterTableEvent(); - void OnTableAltered(const TSqsEvents::TEvExecuted::TRecord& ev); - -private: - const TString UserName; - const TString QueueName; - const TString TablePath; - const TString ColumnName; - const NScheme::TTypeId ColumnType; + } + +private: + STATEFN(StateFunc); + + void HandleTableInfo(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + + void SendReplyAndDie(bool ok); + + void GetTableInfo(); + + void CheckAndAddColumn(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry); + THolder<TEvTxUserProxy::TEvProposeTransaction> MakeAlterTableEvent(); + void OnTableAltered(const TSqsEvents::TEvExecuted::TRecord& ev); + +private: + const TString UserName; + const TString QueueName; + const TString TablePath; + const TString ColumnName; + const NScheme::TTypeId ColumnType; const TActorId Parent; const TActorId SchemeCache; - TIntrusivePtr<TQueueCounters> Counters; -}; - -TAddColumnActor::TAddColumnActor(const TString& userName, - const TString& queueName, + TIntrusivePtr<TQueueCounters> Counters; +}; + +TAddColumnActor::TAddColumnActor(const TString& userName, + const TString& queueName, const TActorId& parent, const TActorId& schemeCache, - TIntrusivePtr<TQueueCounters> counters, - const TString& tablePath, - const TString& columnName, - NScheme::TTypeId columnType) - : UserName(userName) - , QueueName(queueName) - , TablePath(tablePath) - , ColumnName(columnName) - , ColumnType(columnType) - , Parent(parent) - , SchemeCache(schemeCache) - , Counters(counters) -{ -} - -STATEFN(TAddColumnActor::StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, HandleTableInfo); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - } -} - -void TAddColumnActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); -} - -void TAddColumnActor::Bootstrap() { - Become(&TAddColumnActor::StateFunc); - - GetTableInfo(); -} - -void TAddColumnActor::SendReplyAndDie(bool ok) { - Send(Parent, new TSqsEvents::TEvMigrationDone(ok)); - PassAway(); -} - -void TAddColumnActor::GetTableInfo() { - auto schemeCacheRequest = MakeHolder<NSchemeCache::TSchemeCacheNavigate>(); - schemeCacheRequest->ResultSet.emplace_back(); - auto& entry = schemeCacheRequest->ResultSet.back(); - entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable; - entry.Path = SplitPath(TablePath); - Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(schemeCacheRequest.Release())); -} - -static TString ToString(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry) { - TStringBuilder s; - s << "{ TableId: " << entry.TableId - << " Columns: ["; - - for (const auto& [id, col] : entry.Columns) { - s << " { Name: \"" << col.Name << "\"" - << " Type: " << NScheme::TypeName(col.PType) - << " }"; - } - s << " ] }"; - return std::move(s); -} - -void TAddColumnActor::HandleTableInfo(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) { - const NSchemeCache::TSchemeCacheNavigate* navigate = ev->Get()->Request.Get(); - Y_VERIFY(navigate->ResultSet.size() == 1); - const auto& entry = navigate->ResultSet.front(); - if (navigate->ErrorCount > 0) { - LOG_SQS_ERROR("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Failed to get table \"" << TablePath << "\" info: " + TIntrusivePtr<TQueueCounters> counters, + const TString& tablePath, + const TString& columnName, + NScheme::TTypeId columnType) + : UserName(userName) + , QueueName(queueName) + , TablePath(tablePath) + , ColumnName(columnName) + , ColumnType(columnType) + , Parent(parent) + , SchemeCache(schemeCache) + , Counters(counters) +{ +} + +STATEFN(TAddColumnActor::StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, HandleTableInfo); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + } +} + +void TAddColumnActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); +} + +void TAddColumnActor::Bootstrap() { + Become(&TAddColumnActor::StateFunc); + + GetTableInfo(); +} + +void TAddColumnActor::SendReplyAndDie(bool ok) { + Send(Parent, new TSqsEvents::TEvMigrationDone(ok)); + PassAway(); +} + +void TAddColumnActor::GetTableInfo() { + auto schemeCacheRequest = MakeHolder<NSchemeCache::TSchemeCacheNavigate>(); + schemeCacheRequest->ResultSet.emplace_back(); + auto& entry = schemeCacheRequest->ResultSet.back(); + entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable; + entry.Path = SplitPath(TablePath); + Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(schemeCacheRequest.Release())); +} + +static TString ToString(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry) { + TStringBuilder s; + s << "{ TableId: " << entry.TableId + << " Columns: ["; + + for (const auto& [id, col] : entry.Columns) { + s << " { Name: \"" << col.Name << "\"" + << " Type: " << NScheme::TypeName(col.PType) + << " }"; + } + s << " ] }"; + return std::move(s); +} + +void TAddColumnActor::HandleTableInfo(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) { + const NSchemeCache::TSchemeCacheNavigate* navigate = ev->Get()->Request.Get(); + Y_VERIFY(navigate->ResultSet.size() == 1); + const auto& entry = navigate->ResultSet.front(); + if (navigate->ErrorCount > 0) { + LOG_SQS_ERROR("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Failed to get table \"" << TablePath << "\" info: " << entry.Status); - SendReplyAndDie(false); - } else { - LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Table \"" << TablePath << "\" info: " - << ToString(entry)); - CheckAndAddColumn(entry); - } -} - -THolder<TEvTxUserProxy::TEvProposeTransaction> TAddColumnActor::MakeAlterTableEvent() { - auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); - // Transaction info - auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); + SendReplyAndDie(false); + } else { + LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Table \"" << TablePath << "\" info: " + << ToString(entry)); + CheckAndAddColumn(entry); + } +} + +THolder<TEvTxUserProxy::TEvProposeTransaction> TAddColumnActor::MakeAlterTableEvent() { + auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); + // Transaction info + auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); trans->SetOperationType(NKikimrSchemeOp::ESchemeOpAlterTable); - - auto* info = trans->MutableAlterTable(); - { - const size_t lastSlashPos = TablePath.rfind('/'); - trans->SetWorkingDir(TablePath.substr(0, lastSlashPos)); - info->SetName(TablePath.substr(lastSlashPos + 1)); - } - - auto* col = info->AddColumns(); - col->SetName(ColumnName); - col->SetType(NScheme::TypeName(ColumnType)); - - return ev; -} - -void TAddColumnActor::CheckAndAddColumn(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry) { - for (const auto& [id, col] : entry.Columns) { - if (col.Name == ColumnName) { - LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Found column \"" << ColumnName << "\" in table \"" << TablePath << "\" info. Do nothing"); - SendReplyAndDie(true); - return; - } - } - - // Column was not found. Start altering table - auto alterTableEvent = MakeAlterTableEvent(); - LOG_SQS_INFO("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Adding column \"" << ColumnName << "\" to table \"" - << TablePath << "\": " << alterTableEvent->Record); - auto transactionCounters = Counters->GetTransactionCounters(); - Register(new TMiniKqlExecutionActor( - SelfId(), - "", - std::move(alterTableEvent), - false, - TQueuePath(Cfg().GetRoot(), UserName, QueueName), - transactionCounters, - [this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnTableAltered(ev); }) - ); -} - -void TAddColumnActor::OnTableAltered(const TSqsEvents::TEvExecuted::TRecord& ev) { - if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - LOG_SQS_INFO("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Added column \"" << ColumnName << "\" to table \"" - << TablePath << "\": " << ev); - SendReplyAndDie(true); - } else { - LOG_SQS_ERROR("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Failed to add column \"" << ColumnName << "\" to table \"" - << TablePath << "\": " << ev); - SendReplyAndDie(false); - } -} - - + + auto* info = trans->MutableAlterTable(); + { + const size_t lastSlashPos = TablePath.rfind('/'); + trans->SetWorkingDir(TablePath.substr(0, lastSlashPos)); + info->SetName(TablePath.substr(lastSlashPos + 1)); + } + + auto* col = info->AddColumns(); + col->SetName(ColumnName); + col->SetType(NScheme::TypeName(ColumnType)); + + return ev; +} + +void TAddColumnActor::CheckAndAddColumn(const NSchemeCache::TSchemeCacheNavigate::TEntry& entry) { + for (const auto& [id, col] : entry.Columns) { + if (col.Name == ColumnName) { + LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Found column \"" << ColumnName << "\" in table \"" << TablePath << "\" info. Do nothing"); + SendReplyAndDie(true); + return; + } + } + + // Column was not found. Start altering table + auto alterTableEvent = MakeAlterTableEvent(); + LOG_SQS_INFO("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Adding column \"" << ColumnName << "\" to table \"" + << TablePath << "\": " << alterTableEvent->Record); + auto transactionCounters = Counters->GetTransactionCounters(); + Register(new TMiniKqlExecutionActor( + SelfId(), + "", + std::move(alterTableEvent), + false, + TQueuePath(Cfg().GetRoot(), UserName, QueueName), + transactionCounters, + [this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnTableAltered(ev); }) + ); +} + +void TAddColumnActor::OnTableAltered(const TSqsEvents::TEvExecuted::TRecord& ev) { + if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + LOG_SQS_INFO("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Added column \"" << ColumnName << "\" to table \"" + << TablePath << "\": " << ev); + SendReplyAndDie(true); + } else { + LOG_SQS_ERROR("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Failed to add column \"" << ColumnName << "\" to table \"" + << TablePath << "\": " << ev); + SendReplyAndDie(false); + } +} + + TQueueMigrationActor::TQueueMigrationActor(const TString& userName, const TString& queueName, const TActorId& queueLeader, const TActorId& schemeCache, TIntrusivePtr<TQueueCounters> counters, TDuration waitBeforeMigration) - : UserName(userName) - , QueueName(queueName) + : UserName(userName) + , QueueName(queueName) , QueueLeader(queueLeader) - , SchemeCache(schemeCache) - , Counters(std::move(counters)) - , WaitBeforeMigration(waitBeforeMigration) -{ - DebugInfo->QueueMigrationActors.emplace(TStringBuilder() << TLogQueueName(QueueName, UserName), this); -} - -TQueueMigrationActor::~TQueueMigrationActor() { - DebugInfo->QueueMigrationActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueueName, UserName), this); -} - -void TQueueMigrationActor::Bootstrap() { - Become(&TQueueMigrationActor::StateFunc); - - if (!Cfg().GetDoAutomaticMigration()) { - LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Migration is turned off. Skipping it"); - SendReplyAndDie(true); - } - - if (WaitBeforeMigration) { - Schedule(WaitBeforeMigration, new TEvWakeup()); - } else { - GetQueueParams(); - } -} - -STATEFN(TQueueMigrationActor::StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TSqsEvents::TEvMigrationDone, HandleMigrationDone); - } -} - -void TQueueMigrationActor::SendReplyAndDie(bool ok) { - if (ok) { - LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Migration is done successfully"); - } else { - LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Migration failed"); - } + , SchemeCache(schemeCache) + , Counters(std::move(counters)) + , WaitBeforeMigration(waitBeforeMigration) +{ + DebugInfo->QueueMigrationActors.emplace(TStringBuilder() << TLogQueueName(QueueName, UserName), this); +} + +TQueueMigrationActor::~TQueueMigrationActor() { + DebugInfo->QueueMigrationActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueueName, UserName), this); +} + +void TQueueMigrationActor::Bootstrap() { + Become(&TQueueMigrationActor::StateFunc); + + if (!Cfg().GetDoAutomaticMigration()) { + LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Migration is turned off. Skipping it"); + SendReplyAndDie(true); + } + + if (WaitBeforeMigration) { + Schedule(WaitBeforeMigration, new TEvWakeup()); + } else { + GetQueueParams(); + } +} + +STATEFN(TQueueMigrationActor::StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TSqsEvents::TEvMigrationDone, HandleMigrationDone); + } +} + +void TQueueMigrationActor::SendReplyAndDie(bool ok) { + if (ok) { + LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Migration is done successfully"); + } else { + LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Migration failed"); + } Send(QueueLeader, new TSqsEvents::TEvMigrationDone(ok)); - PassAway(); -} - -void TQueueMigrationActor::HandleWakeup([[maybe_unused]] TEvWakeup::TPtr& ev) { - GetQueueParams(); -} - -void TQueueMigrationActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); -} - -void TQueueMigrationActor::HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev) { - // All logging was done in child actor if something happened - if (!ev->Get()->Success) { - Answer = false; - } - if (--WaitChildrenCount == 0) { // Wait all children even if there was an error to prevent heavy concurrent modifications - SendReplyAndDie(Answer); - } -} - -void TQueueMigrationActor::GetQueueParams() { - TExecutorBuilder(SelfId(), "") - .User(UserName) - .Queue(QueueName) - .RetryOnTimeout() - .Text(Sprintf(GetQueueParamsQuery, Cfg().GetRoot().c_str())) - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueueParams(ev); }) - .Counters(Counters) - .Params() - .Utf8("NAME", QueueName) - .Utf8("USER_NAME", UserName) - .ParentBuilder().StartExecutorActor(); -} - -void TQueueMigrationActor::OnQueueParams(const TSqsEvents::TEvExecuted::TRecord& ev) { - if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - - if (bool(val["exists"])) { - const auto data(val["queue"]); - ShardsCount = data["Shards"]; - if (data["Version"].HaveValue()) { - QueueVersion = data["Version"]; - } - IsFifoQueue = data["FifoQueue"]; - - LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Got queue params: { ShardsCount: " << ShardsCount << " QueueVersion: " << QueueVersion << " IsFifoQueue: " << IsFifoQueue << " }"); - - StartAltering(); - - if (WaitChildrenCount == 0) { - SendReplyAndDie(true); - } - } else { - LOG_SQS_WARN("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Queue doen't exist. Do nothing"); - // Queue is deleting. - // Do nothing. - SendReplyAndDie(true); - } - } else { - LOG_SQS_ERROR("Queue " << TLogQueueName(UserName, QueueName) - << " migration. Failed to get queue params: " << ev); - SendReplyAndDie(false); - } -} - -void TQueueMigrationActor::RegisterAndWaitChildMigrationActor(IActor* child) { - Register(child); - ++WaitChildrenCount; -} - -TString TQueueMigrationActor::GetTablePath(const ui64 shard, const TString& tableName) { - const TString queuePath = TQueuePath(Cfg().GetRoot(), UserName, QueueName, QueueVersion).GetVersionedQueuePath(); - TStringBuilder path; - path << queuePath; - if (shard != std::numeric_limits<ui64>::max()) { - path << "/" << shard; - } - path << "/" << tableName; - return std::move(path); -} - -void TQueueMigrationActor::CheckAddColumn(const ui64 shard, const TString& tableName, const TString& columnName, NScheme::TTypeId type) { - RegisterAndWaitChildMigrationActor(new TAddColumnActor(UserName, QueueName, - SelfId(), SchemeCache, - Counters, - GetTablePath(shard, tableName), columnName, type)); -} - -void TQueueMigrationActor::CheckAddColumn(const TString& tableName, const TString& columnName, NScheme::TTypeId type) { - CheckAddColumn(std::numeric_limits<ui64>::max(), tableName, columnName, type); -} - -void TQueueMigrationActor::StartAltering() { - CheckAddColumn("Attributes", "DlqName", NScheme::NTypeIds::Utf8); - CheckAddColumn("Attributes", "DlqArn", NScheme::NTypeIds::Utf8); - CheckAddColumn("Attributes", "MaxReceiveCount", NScheme::NTypeIds::Uint64); - CheckAddColumn("Attributes", "ShowDetailedCountersDeadline", NScheme::NTypeIds::Uint64); - - CheckAddColumn("State", "InflyVersion", NScheme::NTypeIds::Uint64); - if (!IsFifoQueue) { - for (ui64 shard = 0; shard < ShardsCount; ++shard) { - CheckAddColumn(shard, "Infly", "DelayDeadline", NScheme::NTypeIds::Uint64); - } - } -} - -} // namespace NKikimr::NSQS + PassAway(); +} + +void TQueueMigrationActor::HandleWakeup([[maybe_unused]] TEvWakeup::TPtr& ev) { + GetQueueParams(); +} + +void TQueueMigrationActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); +} + +void TQueueMigrationActor::HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev) { + // All logging was done in child actor if something happened + if (!ev->Get()->Success) { + Answer = false; + } + if (--WaitChildrenCount == 0) { // Wait all children even if there was an error to prevent heavy concurrent modifications + SendReplyAndDie(Answer); + } +} + +void TQueueMigrationActor::GetQueueParams() { + TExecutorBuilder(SelfId(), "") + .User(UserName) + .Queue(QueueName) + .RetryOnTimeout() + .Text(Sprintf(GetQueueParamsQuery, Cfg().GetRoot().c_str())) + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueueParams(ev); }) + .Counters(Counters) + .Params() + .Utf8("NAME", QueueName) + .Utf8("USER_NAME", UserName) + .ParentBuilder().StartExecutorActor(); +} + +void TQueueMigrationActor::OnQueueParams(const TSqsEvents::TEvExecuted::TRecord& ev) { + if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + + if (bool(val["exists"])) { + const auto data(val["queue"]); + ShardsCount = data["Shards"]; + if (data["Version"].HaveValue()) { + QueueVersion = data["Version"]; + } + IsFifoQueue = data["FifoQueue"]; + + LOG_SQS_DEBUG("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Got queue params: { ShardsCount: " << ShardsCount << " QueueVersion: " << QueueVersion << " IsFifoQueue: " << IsFifoQueue << " }"); + + StartAltering(); + + if (WaitChildrenCount == 0) { + SendReplyAndDie(true); + } + } else { + LOG_SQS_WARN("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Queue doen't exist. Do nothing"); + // Queue is deleting. + // Do nothing. + SendReplyAndDie(true); + } + } else { + LOG_SQS_ERROR("Queue " << TLogQueueName(UserName, QueueName) + << " migration. Failed to get queue params: " << ev); + SendReplyAndDie(false); + } +} + +void TQueueMigrationActor::RegisterAndWaitChildMigrationActor(IActor* child) { + Register(child); + ++WaitChildrenCount; +} + +TString TQueueMigrationActor::GetTablePath(const ui64 shard, const TString& tableName) { + const TString queuePath = TQueuePath(Cfg().GetRoot(), UserName, QueueName, QueueVersion).GetVersionedQueuePath(); + TStringBuilder path; + path << queuePath; + if (shard != std::numeric_limits<ui64>::max()) { + path << "/" << shard; + } + path << "/" << tableName; + return std::move(path); +} + +void TQueueMigrationActor::CheckAddColumn(const ui64 shard, const TString& tableName, const TString& columnName, NScheme::TTypeId type) { + RegisterAndWaitChildMigrationActor(new TAddColumnActor(UserName, QueueName, + SelfId(), SchemeCache, + Counters, + GetTablePath(shard, tableName), columnName, type)); +} + +void TQueueMigrationActor::CheckAddColumn(const TString& tableName, const TString& columnName, NScheme::TTypeId type) { + CheckAddColumn(std::numeric_limits<ui64>::max(), tableName, columnName, type); +} + +void TQueueMigrationActor::StartAltering() { + CheckAddColumn("Attributes", "DlqName", NScheme::NTypeIds::Utf8); + CheckAddColumn("Attributes", "DlqArn", NScheme::NTypeIds::Utf8); + CheckAddColumn("Attributes", "MaxReceiveCount", NScheme::NTypeIds::Uint64); + CheckAddColumn("Attributes", "ShowDetailedCountersDeadline", NScheme::NTypeIds::Uint64); + + CheckAddColumn("State", "InflyVersion", NScheme::NTypeIds::Uint64); + if (!IsFifoQueue) { + for (ui64 shard = 0; shard < ShardsCount; ++shard) { + CheckAddColumn(shard, "Infly", "DelayDeadline", NScheme::NTypeIds::Uint64); + } + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/migration.h b/ydb/core/ymq/actor/migration.h index 501ec050f3f..37aba645b64 100644 --- a/ydb/core/ymq/actor/migration.h +++ b/ydb/core/ymq/actor/migration.h @@ -1,57 +1,57 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <ydb/core/ymq/actor/events.h> #include <ydb/core/protos/services.pb.h> #include <ydb/public/lib/scheme_types/scheme_type_id.h> - + #include <library/cpp/actors/core/actor.h> - -namespace NKikimr::NSQS { - -class TQueueMigrationActor : public TActorBootstrapped<TQueueMigrationActor> { -public: + +namespace NKikimr::NSQS { + +class TQueueMigrationActor : public TActorBootstrapped<TQueueMigrationActor> { +public: TQueueMigrationActor(const TString& userName, const TString& queueName, const TActorId& queueLeader, const TActorId& schemeCache, TIntrusivePtr<TQueueCounters> counters, TDuration waitBeforeMigration = TDuration::Zero()); - ~TQueueMigrationActor(); - - void Bootstrap(); - + ~TQueueMigrationActor(); + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_QUEUE_MIGRATION_ACTOR; - } - -private: - STATEFN(StateFunc); - - void HandleWakeup(TEvWakeup::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev); - - void SendReplyAndDie(bool ok); - - void GetQueueParams(); - void OnQueueParams(const TSqsEvents::TEvExecuted::TRecord& ev); - - void StartAltering(); - - void RegisterAndWaitChildMigrationActor(IActor* child); - - void CheckAddColumn(const ui64 shard, const TString& tableName, const TString& columnName, NScheme::TTypeId type); - void CheckAddColumn(const TString& tableName, const TString& columnName, NScheme::TTypeId type); - - TString GetTablePath(const ui64 shard, const TString& tableName); - -private: - const TString UserName; - const TString QueueName; + } + +private: + STATEFN(StateFunc); + + void HandleWakeup(TEvWakeup::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev); + + void SendReplyAndDie(bool ok); + + void GetQueueParams(); + void OnQueueParams(const TSqsEvents::TEvExecuted::TRecord& ev); + + void StartAltering(); + + void RegisterAndWaitChildMigrationActor(IActor* child); + + void CheckAddColumn(const ui64 shard, const TString& tableName, const TString& columnName, NScheme::TTypeId type); + void CheckAddColumn(const TString& tableName, const TString& columnName, NScheme::TTypeId type); + + TString GetTablePath(const ui64 shard, const TString& tableName); + +private: + const TString UserName; + const TString QueueName; const TActorId QueueLeader; const TActorId SchemeCache; - TIntrusivePtr<TQueueCounters> Counters; - TDuration WaitBeforeMigration; - bool IsFifoQueue = false; - ui64 ShardsCount = 0; - ui64 QueueVersion = 0; - size_t WaitChildrenCount = 0; - bool Answer = true; -}; - -} // namespace NKikimr::NSQS + TIntrusivePtr<TQueueCounters> Counters; + TDuration WaitBeforeMigration; + bool IsFifoQueue = false; + ui64 ShardsCount = 0; + ui64 QueueVersion = 0; + size_t WaitChildrenCount = 0; + bool Answer = true; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/modify_permissions.cpp b/ydb/core/ymq/actor/modify_permissions.cpp index 8b9e1cb2d7d..4693cafb6c8 100644 --- a/ydb/core/ymq/actor/modify_permissions.cpp +++ b/ydb/core/ymq/actor/modify_permissions.cpp @@ -7,26 +7,26 @@ #include <util/string/cast.h> #include <util/string/join.h> -namespace NKikimr::NSQS { - +namespace NKikimr::NSQS { + class TModifyPermissionsActor : public TActionActor<TModifyPermissionsActor> { public: - TModifyPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::ModifyPermissions, std::move(cb)) + TModifyPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::ModifyPermissions, std::move(cb)) { Response_.MutableModifyPermissions()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } static constexpr bool NeedExistingQueue() { return false; } - static constexpr bool NeedUserSpecified() { - return false; - } - + static constexpr bool NeedUserSpecified() { + return false; + } + private: bool TryConvertSQSPermissionToYdbACLMask(const TString& permission, ui32& mask) const { mask = GetACERequiredAccess(permission); @@ -43,7 +43,7 @@ private: if (!TryConvertSQSPermissionToYdbACLMask(name, mask)) { \ TString permissionName(Y_STRINGIZE(PERMISSION_NAME)); \ permissionName.to_lower(); \ - const auto errorMsg = Sprintf("ModifyPermissions failed to %s unknown permission %s.", permissionName.c_str(), name.c_str()); \ + const auto errorMsg = Sprintf("ModifyPermissions failed to %s unknown permission %s.", permissionName.c_str(), name.c_str()); \ MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, errorMsg); \ return false; \ } \ @@ -53,30 +53,30 @@ private: } bool DoValidate() override { - if (!Request().GetResource()) { + if (!Request().GetResource()) { MakeError(Response_.MutableModifyPermissions(), NErrors::MISSING_PARAMETER, "No Resource parameter."); return false; } - Resource_ = MakeAbsolutePath(Request().GetResource()); + Resource_ = MakeAbsolutePath(Request().GetResource()); if (IsForbiddenPath(Resource_)) { - MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, Sprintf("Path does not exist: %s.", SanitizeNodePath(Resource_).c_str())); + MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, Sprintf("Path does not exist: %s.", SanitizeNodePath(Resource_).c_str())); return false; } - if (Request().GetClearACL()) { + if (Request().GetClearACL()) { ACLDiff_.ClearAccess(); } ui32 mask = 0; - for (const auto& action : Request().GetActions()) { + for (const auto& action : Request().GetActions()) { switch (action.Action_case()) { PROCESS_MODIFY_ACL_ACTION(Set, Add, true); PROCESS_MODIFY_ACL_ACTION(Grant, Add, false); PROCESS_MODIFY_ACL_ACTION(Revoke, Remove, false); default: { - MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, "Unknown ModifyPermissions action."); + MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, "Unknown ModifyPermissions action."); return false; } } @@ -90,14 +90,14 @@ private: } virtual TString GetCustomACLPath() const override { - return MakeAbsolutePath(Request().GetResource()); + return MakeAbsolutePath(Request().GetResource()); } TString DoGetQueueName() const override { return {}; } - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); TStringBuf workingDir, resource; @@ -111,38 +111,38 @@ private: modifyScheme->MutableModifyACL()->SetName(TString(resource)); modifyScheme->MutableModifyACL()->SetDiffACL(ACLDiff_.SerializeAsString()); - Send(MakeTxProxyID(), proposeRequest.Release()); + Send(MakeTxProxyID(), proposeRequest.Release()); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TEvTxUserProxy::TEvProposeTransactionStatus, HandleProposeTransactionStatus); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TEvTxUserProxy::TEvProposeTransactionStatus, HandleProposeTransactionStatus); } } - void LogModifyACLRequestResultSafe(bool success) const { + void LogModifyACLRequestResultSafe(bool success) const { const TString user = (UserToken_ && UserToken_->GetUserSID()) ? UserToken_->GetUserSID() : "Someone"; TModifyPermissionsRequest copy; - copy.CopyFrom(Request()); + copy.CopyFrom(Request()); copy.ClearCredentials(); copy.ClearResource(); if (success) { - RLOG_SQS_WARN(user << " modified ACL for " << Resource_ << " with request " << copy); + RLOG_SQS_WARN(user << " modified ACL for " << Resource_ << " with request " << copy); } else { - RLOG_SQS_ERROR(user << " failed to modify ACL for " << Resource_ << " with request " << copy); + RLOG_SQS_ERROR(user << " failed to modify ACL for " << Resource_ << " with request " << copy); } } - void HandleProposeTransactionStatus(TEvTxUserProxy::TEvProposeTransactionStatus::TPtr& ev) { + void HandleProposeTransactionStatus(TEvTxUserProxy::TEvProposeTransactionStatus::TPtr& ev) { const TEvTxUserProxy::TEvProposeTransactionStatus* msg = ev->Get(); const auto status = static_cast<TEvTxUserProxy::TEvProposeTransactionStatus::EStatus>(msg->Record.GetStatus()); switch (status) { case TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete: case TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecInProgress: { - LogModifyACLRequestResultSafe(true); + LogModifyACLRequestResultSafe(true); break; } @@ -150,25 +150,25 @@ private: case TEvTxUserProxy::TResultStatus::AccessDenied: case TEvTxUserProxy::TResultStatus::ExecError: default: { - LogModifyACLRequestResultSafe(false); + LogModifyACLRequestResultSafe(false); MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); break; } } - SendReplyAndDie(); - } - - const TModifyPermissionsRequest& Request() const { - return SourceSqsRequest_.GetModifyPermissions(); + SendReplyAndDie(); } + const TModifyPermissionsRequest& Request() const { + return SourceSqsRequest_.GetModifyPermissions(); + } + private: NACLib::TDiffACL ACLDiff_; TString Resource_; }; -IActor* CreateModifyPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TModifyPermissionsActor(sourceSqsRequest, std::move(cb)); +IActor* CreateModifyPermissionsActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TModifyPermissionsActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/params.h b/ydb/core/ymq/actor/params.h index abe1154c8d4..c10e1b7d185 100644 --- a/ydb/core/ymq/actor/params.h +++ b/ydb/core/ymq/actor/params.h @@ -1,47 +1,47 @@ #pragma once -#include "defs.h" +#include "defs.h" #include <ydb/library/mkql_proto/protos/minikql.pb.h> #include <ydb/public/lib/value/value.h> #include <util/generic/maybe.h> -namespace NKikimr::NSQS { - -class TExecutorBuilder; +namespace NKikimr::NSQS { +class TExecutorBuilder; + class TParameters { public: - explicit TParameters(NKikimrMiniKQL::TParams* params, TExecutorBuilder* parent = nullptr) + explicit TParameters(NKikimrMiniKQL::TParams* params, TExecutorBuilder* parent = nullptr) : Params_(params) - , Parent(parent) + , Parent(parent) { Params_->MutableType()->SetKind(NKikimrMiniKQL::ETypeKind::Struct); } - explicit TParameters(NKikimrMiniKQL::TParams& params, TExecutorBuilder* parent = nullptr) - : TParameters(¶ms, parent) + explicit TParameters(NKikimrMiniKQL::TParams& params, TExecutorBuilder* parent = nullptr) + : TParameters(¶ms, parent) { } - TExecutorBuilder& ParentBuilder() { - Y_VERIFY(Parent); - return *Parent; - } - - TParameters& Bool(const TString& name, const bool value) { + TExecutorBuilder& ParentBuilder() { + Y_VERIFY(Parent); + return *Parent; + } + + TParameters& Bool(const TString& name, const bool value) { DataType(name, NScheme::NTypeIds::Bool); Params_->MutableValue()->AddStruct()->SetBool(value); return *this; } - TParameters& String(const TString& name, const TString& value) { + TParameters& String(const TString& name, const TString& value) { DataType(name, NScheme::NTypeIds::String); Params_->MutableValue()->AddStruct()->SetBytes(value); return *this; } - TParameters& Uint64(const TString& name, ui64 value) { + TParameters& Uint64(const TString& name, ui64 value) { DataType(name, NScheme::NTypeIds::Uint64); Params_->MutableValue()->AddStruct()->SetUint64(value); return *this; @@ -53,7 +53,7 @@ public: return *this; } - TParameters& OptionalBool(const TString& name, const TMaybe<bool>& value) { + TParameters& OptionalBool(const TString& name, const TMaybe<bool>& value) { OptionalDataType(name, NScheme::NTypeIds::Bool); if (value) { Params_->MutableValue()->AddStruct()->MutableOptional()->SetBool(*value); @@ -63,7 +63,7 @@ public: return *this; } - TParameters& OptionalUint64(const TString& name, const TMaybe<ui64>& value) { + TParameters& OptionalUint64(const TString& name, const TMaybe<ui64>& value) { OptionalDataType(name, NScheme::NTypeIds::Uint64); if (value) { Params_->MutableValue()->AddStruct()->MutableOptional()->SetUint64(*value); @@ -73,7 +73,7 @@ public: return *this; } - TParameters& Utf8(const TString& name, const TString& value) { + TParameters& Utf8(const TString& name, const TString& value) { DataType(name, NScheme::NTypeIds::Utf8); Params_->MutableValue()->AddStruct()->SetText(value); return *this; @@ -91,15 +91,15 @@ public: } private: - void DataType(const TString& name, NScheme::TTypeId typeId) { - auto* member = Params_->MutableType()->MutableStruct()->AddMember(); + void DataType(const TString& name, NScheme::TTypeId typeId) { + auto* member = Params_->MutableType()->MutableStruct()->AddMember(); member->SetName(name); member->MutableType()->SetKind(NKikimrMiniKQL::Data); member->MutableType()->MutableData()->SetScheme(typeId); } - void OptionalDataType(const TString& name, NScheme::TTypeId typeId) { - auto* member = Params_->MutableType()->MutableStruct()->AddMember(); + void OptionalDataType(const TString& name, NScheme::TTypeId typeId) { + auto* member = Params_->MutableType()->MutableStruct()->AddMember(); member->SetName(name); member->MutableType()->SetKind(NKikimrMiniKQL::Optional); member->MutableType()->MutableOptional()->MutableItem()->SetKind(NKikimrMiniKQL::Data); @@ -108,7 +108,7 @@ private: private: NKikimrMiniKQL::TParams* const Params_; - TExecutorBuilder* Parent = nullptr; + TExecutorBuilder* Parent = nullptr; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/ping.h b/ydb/core/ymq/actor/ping.h index 830f7e10b8d..a94a7473f90 100644 --- a/ydb/core/ymq/actor/ping.h +++ b/ydb/core/ymq/actor/ping.h @@ -1,35 +1,35 @@ -#pragma once -#include "defs.h" -#include "actor.h" -#include "log.h" - +#pragma once +#include "defs.h" +#include "actor.h" +#include "log.h" + #include <library/cpp/actors/core/actor_bootstrapped.h> - -namespace NKikimr::NSQS { - -class TPingActor - : public TActorBootstrapped<TPingActor> -{ -public: - TPingActor(THolder<IPingReplyCallback> cb, const TString& requestId) - : Callback_(std::move(cb)) - , RequestId_(requestId) - { - } - + +namespace NKikimr::NSQS { + +class TPingActor + : public TActorBootstrapped<TPingActor> +{ +public: + TPingActor(THolder<IPingReplyCallback> cb, const TString& requestId) + : Callback_(std::move(cb)) + , RequestId_(requestId) + { + } + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_PING_ACTOR; - } - - void Bootstrap() { - RLOG_SQS_DEBUG("Send reply to ping"); - Callback_->DoSendReply(); - PassAway(); - } - -private: - THolder<IPingReplyCallback> Callback_; - const TString RequestId_; -}; - -} // namespace NKikimr::NSQS + } + + void Bootstrap() { + RLOG_SQS_DEBUG("Send reply to ping"); + Callback_->DoSendReply(); + PassAway(); + } + +private: + THolder<IPingReplyCallback> Callback_; + const TString RequestId_; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/proxy_actor.cpp b/ydb/core/ymq/actor/proxy_actor.cpp index 98d85210a53..8e699e3cab2 100644 --- a/ydb/core/ymq/actor/proxy_actor.cpp +++ b/ydb/core/ymq/actor/proxy_actor.cpp @@ -1,26 +1,26 @@ #include "cfg.h" -#include "error.h" -#include "proxy_actor.h" - +#include "error.h" +#include "proxy_actor.h" + #include <ydb/core/protos/sqs.pb.h> #include <ydb/core/ymq/base/counters.h> #include <ydb/core/ymq/base/security.h> #include <library/cpp/actors/core/hfunc.h> - -#include <util/string/builder.h> -#include <util/system/defaults.h> - - -namespace NKikimr::NSQS { - -void TProxyActor::Bootstrap() { - this->Become(&TProxyActor::StateFunc); - - StartTs_ = TActivationContext::Now(); - RLOG_SQS_DEBUG("Request proxy started"); - const auto& cfg = Cfg(); - - if (cfg.GetYandexCloudMode()) { + +#include <util/string/builder.h> +#include <util/system/defaults.h> + + +namespace NKikimr::NSQS { + +void TProxyActor::Bootstrap() { + this->Become(&TProxyActor::StateFunc); + + StartTs_ = TActivationContext::Now(); + RLOG_SQS_DEBUG("Request proxy started"); + const auto& cfg = Cfg(); + + if (cfg.GetYandexCloudMode()) { TString securityToken; #define SQS_REQUEST_CASE(action) \ const auto& request = Request_.Y_CAT(Get, action)(); \ @@ -32,121 +32,121 @@ void TProxyActor::Bootstrap() { FolderId_ = TString(tokenBuf.NextTok(':')); // TODO: handle empty cloud id better - RLOG_SQS_DEBUG("Proxy actor: used " << UserName_ << " as an account name and " << QueueName_ << " as a queue name"); - } - - if (!UserName_ || !QueueName_) { - RLOG_SQS_WARN("Validation error: No " << (!UserName_ ? "user name" : "queue name") << " in proxy actor"); - SendErrorAndDie(NErrors::INVALID_PARAMETER_VALUE, "Both account and queue name should be specified."); - return; - } - - if (cfg.GetRequestTimeoutMs()) { - this->Schedule(TDuration::MilliSeconds(cfg.GetRequestTimeoutMs()), new TEvWakeup(), TimeoutCookie_.Get()); - } - - RequestConfiguration(); -} - -void TProxyActor::HandleConfiguration(TSqsEvents::TEvConfiguration::TPtr& ev) { - const TDuration confDuration = TActivationContext::Now() - StartTs_; - RLOG_SQS_DEBUG("Get configuration duration: " << confDuration.MilliSeconds() << "ms"); - - QueueCounters_ = std::move(ev->Get()->QueueCounters); - UserCounters_ = std::move(ev->Get()->UserCounters); - if (QueueCounters_) { - auto* detailedCounters = QueueCounters_ ? QueueCounters_->GetDetailedCounters() : nullptr; - COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetConfiguration_Duration, confDuration.MilliSeconds()); - } - - if (ev->Get()->Fail) { - RLOG_SQS_ERROR("Failed to get configuration"); - SendErrorAndDie(NErrors::INTERNAL_FAILURE, "Failed to get configuration."); - return; - } - - if (!ev->Get()->QueueExists) { - SendErrorAndDie(NErrors::NON_EXISTENT_QUEUE); - return; - } - - Send(MakeSqsProxyServiceID(SelfId().NodeId()), MakeHolder<TSqsEvents::TEvProxySqsRequest>(Request_, UserName_, QueueName_)); -} - -STATEFN(TProxyActor::StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvConfiguration, HandleConfiguration); - hFunc(TSqsEvents::TEvProxySqsResponse, HandleResponse); - hFunc(TEvWakeup, HandleWakeup); - } -} - -void TProxyActor::RequestConfiguration() { - Send(MakeSqsServiceID(SelfId().NodeId()), - MakeHolder<TSqsEvents::TEvGetConfiguration>( - RequestId_, - UserName_, - QueueName_) - ); -} - -void TProxyActor::SendReplyAndDie(const NKikimrClient::TSqsResponse& resp) { - if (ErrorResponse_) { - RLOG_SQS_WARN("Sending error reply from proxy actor: " << resp); - } else { - RLOG_SQS_DEBUG("Sending reply from proxy actor: " << resp); + RLOG_SQS_DEBUG("Proxy actor: used " << UserName_ << " as an account name and " << QueueName_ << " as a queue name"); } - Cb_->DoSendReply(resp); - PassAway(); -} -void TProxyActor::SendErrorAndDie(const TErrorClass& error, const TString& message) { - ErrorResponse_ = true; - auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; - if (detailedCounters) { - detailedCounters->APIStatuses.AddError(error.ErrorCode); - } - NKikimrClient::TSqsResponse response; + if (!UserName_ || !QueueName_) { + RLOG_SQS_WARN("Validation error: No " << (!UserName_ ? "user name" : "queue name") << " in proxy actor"); + SendErrorAndDie(NErrors::INVALID_PARAMETER_VALUE, "Both account and queue name should be specified."); + return; + } + + if (cfg.GetRequestTimeoutMs()) { + this->Schedule(TDuration::MilliSeconds(cfg.GetRequestTimeoutMs()), new TEvWakeup(), TimeoutCookie_.Get()); + } + + RequestConfiguration(); +} + +void TProxyActor::HandleConfiguration(TSqsEvents::TEvConfiguration::TPtr& ev) { + const TDuration confDuration = TActivationContext::Now() - StartTs_; + RLOG_SQS_DEBUG("Get configuration duration: " << confDuration.MilliSeconds() << "ms"); + + QueueCounters_ = std::move(ev->Get()->QueueCounters); + UserCounters_ = std::move(ev->Get()->UserCounters); + if (QueueCounters_) { + auto* detailedCounters = QueueCounters_ ? QueueCounters_->GetDetailedCounters() : nullptr; + COLLECT_HISTOGRAM_COUNTER(detailedCounters, GetConfiguration_Duration, confDuration.MilliSeconds()); + } + + if (ev->Get()->Fail) { + RLOG_SQS_ERROR("Failed to get configuration"); + SendErrorAndDie(NErrors::INTERNAL_FAILURE, "Failed to get configuration."); + return; + } + + if (!ev->Get()->QueueExists) { + SendErrorAndDie(NErrors::NON_EXISTENT_QUEUE); + return; + } + + Send(MakeSqsProxyServiceID(SelfId().NodeId()), MakeHolder<TSqsEvents::TEvProxySqsRequest>(Request_, UserName_, QueueName_)); +} + +STATEFN(TProxyActor::StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TSqsEvents::TEvConfiguration, HandleConfiguration); + hFunc(TSqsEvents::TEvProxySqsResponse, HandleResponse); + hFunc(TEvWakeup, HandleWakeup); + } +} + +void TProxyActor::RequestConfiguration() { + Send(MakeSqsServiceID(SelfId().NodeId()), + MakeHolder<TSqsEvents::TEvGetConfiguration>( + RequestId_, + UserName_, + QueueName_) + ); +} + +void TProxyActor::SendReplyAndDie(const NKikimrClient::TSqsResponse& resp) { + if (ErrorResponse_) { + RLOG_SQS_WARN("Sending error reply from proxy actor: " << resp); + } else { + RLOG_SQS_DEBUG("Sending reply from proxy actor: " << resp); + } + Cb_->DoSendReply(resp); + PassAway(); +} + +void TProxyActor::SendErrorAndDie(const TErrorClass& error, const TString& message) { + ErrorResponse_ = true; + auto* detailedCounters = UserCounters_ ? UserCounters_->GetDetailedCounters() : nullptr; + if (detailedCounters) { + detailedCounters->APIStatuses.AddError(error.ErrorCode); + } + NKikimrClient::TSqsResponse response; #define SQS_REQUEST_CASE(action) \ - MakeError(response.Y_CAT(Mutable, action)(), error, message); \ - response.Y_CAT(Mutable, action)()->SetRequestId(RequestId_); - + MakeError(response.Y_CAT(Mutable, action)(), error, message); \ + response.Y_CAT(Mutable, action)()->SetRequestId(RequestId_); + SQS_SWITCH_REQUEST(Request_, Y_VERIFY(false)); - + #undef SQS_REQUEST_CASE - if (Cfg().GetYandexCloudMode()) { + if (Cfg().GetYandexCloudMode()) { response.SetFolderId(FolderId_); response.SetIsFifo(false); response.SetResourceId(QueueName_); } - SendReplyAndDie(response); -} - -void TProxyActor::HandleResponse(TSqsEvents::TEvProxySqsResponse::TPtr& ev) { - RLOG_SQS_TRACE("HandleResponse: " << ev->Get()->Record << ", status: " << ev->Get()->ProxyStatus); - if (ev->Get()->ProxyStatus == TSqsEvents::TEvProxySqsResponse::EProxyStatus::OK) { - SendReplyAndDie(ev->Get()->Record); - } else { - SendErrorAndDie(GetErrorClass(ev->Get()->ProxyStatus)); - } -} - -void TProxyActor::HandleWakeup(TEvWakeup::TPtr&) { - TString actionName; - + SendReplyAndDie(response); +} + +void TProxyActor::HandleResponse(TSqsEvents::TEvProxySqsResponse::TPtr& ev) { + RLOG_SQS_TRACE("HandleResponse: " << ev->Get()->Record << ", status: " << ev->Get()->ProxyStatus); + if (ev->Get()->ProxyStatus == TSqsEvents::TEvProxySqsResponse::EProxyStatus::OK) { + SendReplyAndDie(ev->Get()->Record); + } else { + SendErrorAndDie(GetErrorClass(ev->Get()->ProxyStatus)); + } +} + +void TProxyActor::HandleWakeup(TEvWakeup::TPtr&) { + TString actionName; + #define SQS_REQUEST_CASE(action) actionName = Y_STRINGIZE(action); - + SQS_SWITCH_REQUEST(Request_, break;); - + #undef SQS_REQUEST_CASE - - RLOG_SQS_ERROR("Proxy request timeout. User [" << UserName_ << "] Queue [" << QueueName_ << "] Action [" << actionName << "]"); - - if (QueueCounters_) { + + RLOG_SQS_ERROR("Proxy request timeout. User [" << UserName_ << "] Queue [" << QueueName_ << "] Action [" << actionName << "]"); + + if (QueueCounters_) { INC_COUNTER_COUPLE(QueueCounters_, RequestTimeouts, request_timeouts_count_per_second); - } else { + } else { auto rootCounters = TIntrusivePtrCntrCouple{ GetSqsServiceCounters(AppData()->Counters, "core"), GetYmqPublicCounters(AppData()->Counters) @@ -154,51 +154,51 @@ void TProxyActor::HandleWakeup(TEvWakeup::TPtr&) { auto [userCountersCouple, queueCountersCouple] = GetUserAndQueueCounters(rootCounters, TQueuePath(Cfg().GetRoot(), UserName_, QueueName_)); if (queueCountersCouple.SqsCounters) { queueCountersCouple.SqsCounters->GetCounter("RequestTimeouts", true)->Inc(); - } + } if (queueCountersCouple.YmqCounters) { queueCountersCouple.YmqCounters->GetCounter("request_timeouts_count_per_second", true)->Inc(); } - } - - SendErrorAndDie(NErrors::TIMEOUT); -} - -const TErrorClass& TProxyActor::GetErrorClass(TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus) { - using EProxyStatus = TSqsEvents::TEvProxySqsResponse::EProxyStatus; - switch (proxyStatus) { + } + + SendErrorAndDie(NErrors::TIMEOUT); +} + +const TErrorClass& TProxyActor::GetErrorClass(TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus) { + using EProxyStatus = TSqsEvents::TEvProxySqsResponse::EProxyStatus; + switch (proxyStatus) { case EProxyStatus::LeaderResolvingError: return NErrors::LEADER_RESOLVING_ERROR; - case EProxyStatus::SessionError: + case EProxyStatus::SessionError: return NErrors::LEADER_SESSION_ERROR; - case EProxyStatus::QueueDoesNotExist: - case EProxyStatus::UserDoesNotExist: - return NErrors::NON_EXISTENT_QUEUE; - default: - return NErrors::INTERNAL_FAILURE; - } -} - -bool TProxyActor::NeedCreateProxyActor(const NKikimrClient::TSqsRequest& req) { + case EProxyStatus::QueueDoesNotExist: + case EProxyStatus::UserDoesNotExist: + return NErrors::NON_EXISTENT_QUEUE; + default: + return NErrors::INTERNAL_FAILURE; + } +} + +bool TProxyActor::NeedCreateProxyActor(const NKikimrClient::TSqsRequest& req) { #define SQS_REQUEST_CASE(action) return true; - + SQS_SWITCH_REQUEST(req, return false) - + #undef SQS_REQUEST_CASE -} - -bool TProxyActor::NeedCreateProxyActor(EAction action) { - return IsProxyAction(action); -} - -void TProxyActor::RetrieveUserAndQueueParameters() { +} + +bool TProxyActor::NeedCreateProxyActor(EAction action) { + return IsProxyAction(action); +} + +void TProxyActor::RetrieveUserAndQueueParameters() { // User name might be changed later in bootstrap for cloud mode #define SQS_REQUEST_CASE(action) \ - UserName_ = Request_.Y_CAT(Get, action)().GetAuth().GetUserName(); \ - QueueName_ = Request_.Y_CAT(Get, action)().GetQueueName(); \ - + UserName_ = Request_.Y_CAT(Get, action)().GetAuth().GetUserName(); \ + QueueName_ = Request_.Y_CAT(Get, action)().GetQueueName(); \ + SQS_SWITCH_REQUEST(Request_, throw TSQSException(NErrors::INVALID_ACTION) << "Incorrect request type") - + #undef SQS_REQUEST_CASE -} - -} // namespace NKikimr::NSQS +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/proxy_actor.h b/ydb/core/ymq/actor/proxy_actor.h index b553716bf06..0d17a629a52 100644 --- a/ydb/core/ymq/actor/proxy_actor.h +++ b/ydb/core/ymq/actor/proxy_actor.h @@ -1,19 +1,19 @@ -#pragma once -#include "defs.h" -#include "actor.h" -#include "error.h" -#include "events.h" -#include "log.h" -#include "serviceid.h" - +#pragma once +#include "defs.h" +#include "actor.h" +#include "error.h" +#include "events.h" +#include "log.h" +#include "serviceid.h" + #include <ydb/core/ymq/base/counters.h> #include <ydb/core/ymq/base/debug_info.h> - + #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/monlib/dynamic_counters/counters.h> - -namespace NKikimr::NSQS { - + +namespace NKikimr::NSQS { + #define SQS_REQUEST_CASE_WRAP(action) \ case NKikimrClient::TSqsRequest::Y_CAT(k, action): { \ SQS_REQUEST_CASE(action) \ @@ -32,63 +32,63 @@ namespace NKikimr::NSQS { #define SQS_SWITCH_REQUEST(request, default_case) \ SQS_SWITCH_REQUEST_CUSTOM(request, ENUMERATE_PROXY_ACTIONS, default_case) -class TProxyActor - : public TActorBootstrapped<TProxyActor> -{ -public: - TProxyActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb) - : RequestId_(req.GetRequestId()) - , Request_(req) - , Cb_(std::move(cb)) - { - Y_VERIFY(RequestId_); - DebugInfo->ProxyActors.emplace(RequestId_, this); - RetrieveUserAndQueueParameters(); - } - - ~TProxyActor() { - DebugInfo->ProxyActors.EraseKeyValue(RequestId_, this); - } - +class TProxyActor + : public TActorBootstrapped<TProxyActor> +{ +public: + TProxyActor(const NKikimrClient::TSqsRequest& req, THolder<IReplyCallback> cb) + : RequestId_(req.GetRequestId()) + , Request_(req) + , Cb_(std::move(cb)) + { + Y_VERIFY(RequestId_); + DebugInfo->ProxyActors.emplace(RequestId_, this); + RetrieveUserAndQueueParameters(); + } + + ~TProxyActor() { + DebugInfo->ProxyActors.EraseKeyValue(RequestId_, this); + } + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_PROXY_ACTOR; - } - + } + // Watches request type and returns true if this type assumes proxying request to other queue leader node. - // So, TProxyActor must be created only if this function returns true. - static bool NeedCreateProxyActor(const NKikimrClient::TSqsRequest& req); - static bool NeedCreateProxyActor(EAction action); - - void Bootstrap(); - -private: - STATEFN(StateFunc); - - void HandleConfiguration(TSqsEvents::TEvConfiguration::TPtr& ev); - void HandleResponse(TSqsEvents::TEvProxySqsResponse::TPtr& ev); - void HandleWakeup(TEvWakeup::TPtr& ev); - - void RequestConfiguration(); - - void RetrieveUserAndQueueParameters(); - - void SendReplyAndDie(const NKikimrClient::TSqsResponse& resp); - void SendErrorAndDie(const TErrorClass& error, const TString& message = TString()); - static const TErrorClass& GetErrorClass(TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus); - -private: - const TString RequestId_; - NKikimrClient::TSqsRequest Request_; - TString QueueName_; - TString UserName_; + // So, TProxyActor must be created only if this function returns true. + static bool NeedCreateProxyActor(const NKikimrClient::TSqsRequest& req); + static bool NeedCreateProxyActor(EAction action); + + void Bootstrap(); + +private: + STATEFN(StateFunc); + + void HandleConfiguration(TSqsEvents::TEvConfiguration::TPtr& ev); + void HandleResponse(TSqsEvents::TEvProxySqsResponse::TPtr& ev); + void HandleWakeup(TEvWakeup::TPtr& ev); + + void RequestConfiguration(); + + void RetrieveUserAndQueueParameters(); + + void SendReplyAndDie(const NKikimrClient::TSqsResponse& resp); + void SendErrorAndDie(const TErrorClass& error, const TString& message = TString()); + static const TErrorClass& GetErrorClass(TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus); + +private: + const TString RequestId_; + NKikimrClient::TSqsRequest Request_; + TString QueueName_; + TString UserName_; TString FolderId_; - THolder<IReplyCallback> Cb_; - bool ErrorResponse_ = false; - TInstant StartTs_; - TSchedulerCookieHolder TimeoutCookie_ = ISchedulerCookie::Make2Way(); - - TIntrusivePtr<TUserCounters> UserCounters_; - TIntrusivePtr<TQueueCounters> QueueCounters_; -}; - -} // namespace NKikimr::NSQS + THolder<IReplyCallback> Cb_; + bool ErrorResponse_ = false; + TInstant StartTs_; + TSchedulerCookieHolder TimeoutCookie_ = ISchedulerCookie::Make2Way(); + + TIntrusivePtr<TUserCounters> UserCounters_; + TIntrusivePtr<TQueueCounters> QueueCounters_; +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/proxy_service.cpp b/ydb/core/ymq/actor/proxy_service.cpp index 1eecd851031..a840730bf69 100644 --- a/ydb/core/ymq/actor/proxy_service.cpp +++ b/ydb/core/ymq/actor/proxy_service.cpp @@ -1,10 +1,10 @@ -#include "actor.h" +#include "actor.h" #include "executor.h" -#include "log.h" -#include "service.h" +#include "log.h" +#include "service.h" #include "queue_leader.h" #include "params.h" -#include "proxy_service.h" +#include "proxy_service.h" #include "serviceid.h" #include <ydb/core/base/counters.h> @@ -13,202 +13,202 @@ #include <ydb/core/ymq/base/counters.h> #include <ydb/core/ymq/base/secure_protobuf_printer.h> -#include <util/generic/hash.h> -#include <util/generic/hash_set.h> +#include <util/generic/hash.h> +#include <util/generic/hash_set.h> #include <util/generic/map.h> #include <util/generic/queue.h> -#include <queue> - +#include <queue> + using namespace NKikimrTxUserProxy; using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { - -struct TSqsProxyService::TNodeInfo : public TAtomicRefCount<TNodeInfo> { - explicit TNodeInfo(ui32 nodeId) - : NodeId(nodeId) - { - } - - ui32 NodeId = 0; - THashMap<TString, TProxyRequestInfoRef> Requests; // request id -> request // sent proxy requests -}; - -struct TSqsProxyService::TProxyRequestInfo : public TAtomicRefCount<TProxyRequestInfo> { - explicit TProxyRequestInfo(TSqsEvents::TEvProxySqsRequest::TPtr&& ev) - : RequestId(ev->Get()->Record.GetRequestId()) - , ProxyActorId(ev->Sender) - , ProxyRequest(std::move(ev)) - { - } - - TString RequestId; +namespace NKikimr::NSQS { + +struct TSqsProxyService::TNodeInfo : public TAtomicRefCount<TNodeInfo> { + explicit TNodeInfo(ui32 nodeId) + : NodeId(nodeId) + { + } + + ui32 NodeId = 0; + THashMap<TString, TProxyRequestInfoRef> Requests; // request id -> request // sent proxy requests +}; + +struct TSqsProxyService::TProxyRequestInfo : public TAtomicRefCount<TProxyRequestInfo> { + explicit TProxyRequestInfo(TSqsEvents::TEvProxySqsRequest::TPtr&& ev) + : RequestId(ev->Get()->Record.GetRequestId()) + , ProxyActorId(ev->Sender) + , ProxyRequest(std::move(ev)) + { + } + + TString RequestId; TActorId ProxyActorId; - TSqsEvents::TEvProxySqsRequest::TPtr ProxyRequest; -}; - -TSqsProxyService::TSqsProxyService() { - DebugInfo->SqsProxyServiceActorPtr = this; -} - -TSqsProxyService::~TSqsProxyService() { - DebugInfo->SqsProxyServiceActorPtr = nullptr; -} - -void TSqsProxyService::Bootstrap() { - LOG_SQS_INFO("Start SQS proxy service actor"); + TSqsEvents::TEvProxySqsRequest::TPtr ProxyRequest; +}; + +TSqsProxyService::TSqsProxyService() { + DebugInfo->SqsProxyServiceActorPtr = this; +} + +TSqsProxyService::~TSqsProxyService() { + DebugInfo->SqsProxyServiceActorPtr = nullptr; +} + +void TSqsProxyService::Bootstrap() { + LOG_SQS_INFO("Start SQS proxy service actor"); Become(&TThis::StateFunc); SqsCounters_ = GetSqsServiceCounters(AppData()->Counters, "core"); YmqPublicCounters_ = GetYmqPublicCounters(AppData()->Counters); } -void TSqsProxyService::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); +void TSqsProxyService::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); } -void TSqsProxyService::HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev) { - auto replier = MakeHolder<TReplierToSenderActorCallback>(ev); - const auto& request = replier->Request->Get()->Record; - RLOG_SQS_REQ_DEBUG(request.GetRequestId(), "Received Sqs Request: " << SecureShortUtf8DebugString(request)); - Register(CreateActionActor(request, std::move(replier))); -} +void TSqsProxyService::HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev) { + auto replier = MakeHolder<TReplierToSenderActorCallback>(ev); + const auto& request = replier->Request->Get()->Record; + RLOG_SQS_REQ_DEBUG(request.GetRequestId(), "Received Sqs Request: " << SecureShortUtf8DebugString(request)); + Register(CreateActionActor(request, std::move(replier))); +} -void TSqsProxyService::HandleProxySqsRequest(TSqsEvents::TEvProxySqsRequest::TPtr& ev) { - TProxyRequestInfoRef request = new TProxyRequestInfo(std::move(ev)); - RequestsToProxy_.emplace(request->RequestId, request); +void TSqsProxyService::HandleProxySqsRequest(TSqsEvents::TEvProxySqsRequest::TPtr& ev) { + TProxyRequestInfoRef request = new TProxyRequestInfo(std::move(ev)); + RequestsToProxy_.emplace(request->RequestId, request); Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetLeaderNodeForQueueRequest(request->RequestId, request->ProxyRequest->Get()->UserName, request->ProxyRequest->Get()->QueueName)); RLOG_SQS_REQ_DEBUG(request->RequestId, "Send get leader node request to sqs service"); -} - +} + static TSqsEvents::TEvProxySqsResponse::EProxyStatus GetLeaderNodeForQueueStatusToProxyStatus(TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus status) { - switch (status) { + switch (status) { case TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::OK: - return TSqsEvents::TEvProxySqsResponse::EProxyStatus::OK; + return TSqsEvents::TEvProxySqsResponse::EProxyStatus::OK; case TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoUser: - return TSqsEvents::TEvProxySqsResponse::EProxyStatus::UserDoesNotExist; + return TSqsEvents::TEvProxySqsResponse::EProxyStatus::UserDoesNotExist; case TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoQueue: - return TSqsEvents::TEvProxySqsResponse::EProxyStatus::QueueDoesNotExist; + return TSqsEvents::TEvProxySqsResponse::EProxyStatus::QueueDoesNotExist; case TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::FailedToConnectToLeader: - return TSqsEvents::TEvProxySqsResponse::EProxyStatus::SessionError; + return TSqsEvents::TEvProxySqsResponse::EProxyStatus::SessionError; case TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::Error: - default: + default: return TSqsEvents::TEvProxySqsResponse::EProxyStatus::LeaderResolvingError; - } -} - + } +} + void TSqsProxyService::HandleGetLeaderNodeForQueueResponse(TSqsEvents::TEvGetLeaderNodeForQueueResponse::TPtr& ev) { RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "Got leader node for queue response. Node id: " << ev->Get()->NodeId << ". Status: " << static_cast<int>(ev->Get()->Status)); - const auto requestIt = RequestsToProxy_.find(ev->Get()->RequestId); - if (requestIt == RequestsToProxy_.end()) { - RLOG_SQS_REQ_ERROR(ev->Get()->RequestId, "Request was not found in requests to proxy map"); - return; - } - TProxyRequestInfoRef request = requestIt->second; - RequestsToProxy_.erase(requestIt); - + const auto requestIt = RequestsToProxy_.find(ev->Get()->RequestId); + if (requestIt == RequestsToProxy_.end()) { + RLOG_SQS_REQ_ERROR(ev->Get()->RequestId, "Request was not found in requests to proxy map"); + return; + } + TProxyRequestInfoRef request = requestIt->second; + RequestsToProxy_.erase(requestIt); + if (ev->Get()->Status == TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::OK) { - TNodeInfoRef nodeInfo = GetNodeInfo(ev->Get()->NodeId); - SendProxyRequestToNode(*nodeInfo, request); - } else { + TNodeInfoRef nodeInfo = GetNodeInfo(ev->Get()->NodeId); + SendProxyRequestToNode(*nodeInfo, request); + } else { SendProxyError(request, GetLeaderNodeForQueueStatusToProxyStatus(ev->Get()->Status)); - } -} - -void TSqsProxyService::HandleSqsResponse(TSqsEvents::TEvSqsResponse::TPtr& ev) { - LOG_SQS_TRACE("HandleSqsResponse " << SecureShortUtf8DebugString(ev->Get()->Record)); - const ui32 nodeId = ev->Sender.NodeId(); - const auto nodeInfoIt = NodesInfo_.find(nodeId); - if (nodeInfoIt == NodesInfo_.end()) { - LOG_SQS_ERROR("Failed to find node id " << nodeId << " for response " << ev->Get()->Record); - return; - } - const TString& requestId = ev->Get()->Record.GetRequestId(); - auto& requests = nodeInfoIt->second->Requests; - const auto proxyRequestIt = requests.find(requestId); - if (proxyRequestIt == requests.end()) { - LOG_SQS_ERROR("Failed to find request " << requestId << " for node id " << nodeId << ". Response: " << ev->Get()->Record); - return; - } - LOG_SQS_TRACE("Sending answer to proxy actor " << proxyRequestIt->second->ProxyActorId << ": " << SecureShortUtf8DebugString(ev->Get()->Record)); - Send(proxyRequestIt->second->ProxyActorId, new TSqsEvents::TEvProxySqsResponse(std::move(ev->Get()->Record))); - requests.erase(proxyRequestIt); -} - -void TSqsProxyService::HandleDisconnect(ui32 nodeId) { - auto nodeIt = NodesInfo_.find(nodeId); - if (nodeIt != NodesInfo_.end()) { - SendProxyErrors(*nodeIt->second, TSqsEvents::TEvProxySqsResponse::EProxyStatus::SessionError); - } -} - -void TSqsProxyService::HandleDisconnect(TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { - const ui32 nodeId = ev->Get()->NodeId; - LOG_SQS_TRACE("HandleDisconnect from node " << nodeId); - HandleDisconnect(nodeId); -} - -void TSqsProxyService::HandleConnect(TEvInterconnect::TEvNodeConnected::TPtr& ev) { - LOG_SQS_TRACE("HandleConnect from node " << ev->Get()->NodeId); -} - -void TSqsProxyService::HandleUndelivered(TEvents::TEvUndelivered::TPtr& ev) { - const ui32 nodeId = ev->Sender.NodeId(); - LOG_SQS_TRACE("HandleUndelivered from node " << nodeId << ", reason: " << ev->Get()->Reason << ", unsure: " << ev->Get()->Unsure); - HandleDisconnect(nodeId); -} - -STATEFN(TSqsProxyService::StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + } +} + +void TSqsProxyService::HandleSqsResponse(TSqsEvents::TEvSqsResponse::TPtr& ev) { + LOG_SQS_TRACE("HandleSqsResponse " << SecureShortUtf8DebugString(ev->Get()->Record)); + const ui32 nodeId = ev->Sender.NodeId(); + const auto nodeInfoIt = NodesInfo_.find(nodeId); + if (nodeInfoIt == NodesInfo_.end()) { + LOG_SQS_ERROR("Failed to find node id " << nodeId << " for response " << ev->Get()->Record); + return; + } + const TString& requestId = ev->Get()->Record.GetRequestId(); + auto& requests = nodeInfoIt->second->Requests; + const auto proxyRequestIt = requests.find(requestId); + if (proxyRequestIt == requests.end()) { + LOG_SQS_ERROR("Failed to find request " << requestId << " for node id " << nodeId << ". Response: " << ev->Get()->Record); + return; + } + LOG_SQS_TRACE("Sending answer to proxy actor " << proxyRequestIt->second->ProxyActorId << ": " << SecureShortUtf8DebugString(ev->Get()->Record)); + Send(proxyRequestIt->second->ProxyActorId, new TSqsEvents::TEvProxySqsResponse(std::move(ev->Get()->Record))); + requests.erase(proxyRequestIt); +} + +void TSqsProxyService::HandleDisconnect(ui32 nodeId) { + auto nodeIt = NodesInfo_.find(nodeId); + if (nodeIt != NodesInfo_.end()) { + SendProxyErrors(*nodeIt->second, TSqsEvents::TEvProxySqsResponse::EProxyStatus::SessionError); + } +} + +void TSqsProxyService::HandleDisconnect(TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { + const ui32 nodeId = ev->Get()->NodeId; + LOG_SQS_TRACE("HandleDisconnect from node " << nodeId); + HandleDisconnect(nodeId); +} + +void TSqsProxyService::HandleConnect(TEvInterconnect::TEvNodeConnected::TPtr& ev) { + LOG_SQS_TRACE("HandleConnect from node " << ev->Get()->NodeId); +} + +void TSqsProxyService::HandleUndelivered(TEvents::TEvUndelivered::TPtr& ev) { + const ui32 nodeId = ev->Sender.NodeId(); + LOG_SQS_TRACE("HandleUndelivered from node " << nodeId << ", reason: " << ev->Get()->Reason << ", unsure: " << ev->Get()->Unsure); + HandleDisconnect(nodeId); +} + +STATEFN(TSqsProxyService::StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); hFunc(TSqsEvents::TEvSqsRequest, HandleSqsRequest); // request to queue leader node (proxied) // creates request worker and calls it hFunc(TSqsEvents::TEvProxySqsRequest, HandleProxySqsRequest); // request from proxy on our node // proxies request to queue leader node (TEvSqsRequest) - hFunc(TSqsEvents::TEvSqsResponse, HandleSqsResponse); // response from other node on TEvSqsRequest // sends response to source proxy on our node - hFunc(TEvInterconnect::TEvNodeDisconnected, HandleDisconnect); - hFunc(TEvInterconnect::TEvNodeConnected, HandleConnect); - hFunc(TEvents::TEvUndelivered, HandleUndelivered); + hFunc(TSqsEvents::TEvSqsResponse, HandleSqsResponse); // response from other node on TEvSqsRequest // sends response to source proxy on our node + hFunc(TEvInterconnect::TEvNodeDisconnected, HandleDisconnect); + hFunc(TEvInterconnect::TEvNodeConnected, HandleConnect); + hFunc(TEvents::TEvUndelivered, HandleUndelivered); hFunc(TSqsEvents::TEvGetLeaderNodeForQueueResponse, HandleGetLeaderNodeForQueueResponse); - default: - LOG_SQS_ERROR("Unknown type of event came to SQS service actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); - } -} - -void TSqsProxyService::SendProxyRequestToNode(TNodeInfo& nodeInfo, TProxyRequestInfoRef request) { + default: + LOG_SQS_ERROR("Unknown type of event came to SQS service actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); + } +} + +void TSqsProxyService::SendProxyRequestToNode(TNodeInfo& nodeInfo, TProxyRequestInfoRef request) { RLOG_SQS_REQ_TRACE(request->RequestId, "Sending request from proxy to leader node " << nodeInfo.NodeId << ": " << SecureShortUtf8DebugString(request->ProxyRequest->Get()->Record)); - Send(MakeSqsProxyServiceID(nodeInfo.NodeId), new TSqsEvents::TEvSqsRequest(std::move(request->ProxyRequest->Get()->Record)), - IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession); - nodeInfo.Requests.emplace(request->RequestId, std::move(request)); -} - -TSqsProxyService::TNodeInfoRef TSqsProxyService::GetNodeInfo(ui32 nodeId) { - const auto nodeInfoIt = NodesInfo_.find(nodeId); - if (nodeInfoIt != NodesInfo_.end()) { - return nodeInfoIt->second; - } - - // create new node info - TNodeInfoRef nodeInfo = new TNodeInfo(nodeId); - NodesInfo_[nodeId] = nodeInfo; - return nodeInfo; -} - -void TSqsProxyService::SendProxyError(TProxyRequestInfoRef request, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus) { - RLOG_SQS_REQ_TRACE(request->RequestId, "Sending proxy status " << proxyStatus << " to proxy actor"); + Send(MakeSqsProxyServiceID(nodeInfo.NodeId), new TSqsEvents::TEvSqsRequest(std::move(request->ProxyRequest->Get()->Record)), + IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession); + nodeInfo.Requests.emplace(request->RequestId, std::move(request)); +} + +TSqsProxyService::TNodeInfoRef TSqsProxyService::GetNodeInfo(ui32 nodeId) { + const auto nodeInfoIt = NodesInfo_.find(nodeId); + if (nodeInfoIt != NodesInfo_.end()) { + return nodeInfoIt->second; + } + + // create new node info + TNodeInfoRef nodeInfo = new TNodeInfo(nodeId); + NodesInfo_[nodeId] = nodeInfo; + return nodeInfo; +} + +void TSqsProxyService::SendProxyError(TProxyRequestInfoRef request, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus) { + RLOG_SQS_REQ_TRACE(request->RequestId, "Sending proxy status " << proxyStatus << " to proxy actor"); THolder<TSqsEvents::TEvProxySqsResponse> answer = MakeHolder<TSqsEvents::TEvProxySqsResponse>(); - answer->ProxyStatus = proxyStatus; - Send(request->ProxyActorId, std::move(answer)); -} - -void TSqsProxyService::SendProxyErrors(TNodeInfo& nodeInfo, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus) { - for (auto& req : nodeInfo.Requests) { - SendProxyError(std::move(req.second), proxyStatus); - } - nodeInfo.Requests.clear(); -} - -IActor* CreateSqsProxyService() { - return new TSqsProxyService(); -} - -} // namespace NKikimr::NSQS + answer->ProxyStatus = proxyStatus; + Send(request->ProxyActorId, std::move(answer)); +} + +void TSqsProxyService::SendProxyErrors(TNodeInfo& nodeInfo, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus) { + for (auto& req : nodeInfo.Requests) { + SendProxyError(std::move(req.second), proxyStatus); + } + nodeInfo.Requests.clear(); +} + +IActor* CreateSqsProxyService() { + return new TSqsProxyService(); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/proxy_service.h b/ydb/core/ymq/actor/proxy_service.h index a3c991105fe..e95298f9ff7 100644 --- a/ydb/core/ymq/actor/proxy_service.h +++ b/ydb/core/ymq/actor/proxy_service.h @@ -1,6 +1,6 @@ #pragma once -#include "defs.h" -#include "log.h" +#include "defs.h" +#include "log.h" #include "events.h" #include <ydb/core/ymq/base/query_id.h> @@ -8,7 +8,7 @@ #include <ydb/core/tx/schemeshard/schemeshard.h> #include <ydb/core/protos/config.pb.h> #include <ydb/core/ymq/actor/actor.h> - + #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/interconnect.h> @@ -16,82 +16,82 @@ #include <library/cpp/actors/core/hfunc.h> #include <util/generic/hash.h> -#include <util/generic/hash_set.h> +#include <util/generic/hash_set.h> #include <util/generic/queue.h> #include <util/generic/vector.h> -namespace NKikimr::NSQS { - -struct TReplierToSenderActorCallback : public IReplyCallback { - TReplierToSenderActorCallback(TSqsEvents::TEvSqsRequest::TPtr& ev) - : Request(ev) - { - } - - void DoSendReply(const NKikimrClient::TSqsResponse& resp) override { - NKikimrClient::TSqsResponse response = resp; - response.SetRequestId(Request->Get()->Record.GetRequestId()); - - RLOG_SQS_REQ_TRACE(Request->Get()->Record.GetRequestId(), "Sending sqs response: " << response); +namespace NKikimr::NSQS { + +struct TReplierToSenderActorCallback : public IReplyCallback { + TReplierToSenderActorCallback(TSqsEvents::TEvSqsRequest::TPtr& ev) + : Request(ev) + { + } + + void DoSendReply(const NKikimrClient::TSqsResponse& resp) override { + NKikimrClient::TSqsResponse response = resp; + response.SetRequestId(Request->Get()->Record.GetRequestId()); + + RLOG_SQS_REQ_TRACE(Request->Get()->Record.GetRequestId(), "Sending sqs response: " << response); const TActorId selfId = TActivationContext::AsActorContext().SelfID; - TActivationContext::Send( - new IEventHandle( - Request->Sender, - selfId, - new TSqsEvents::TEvSqsResponse(std::move(response)))); - } - - TSqsEvents::TEvSqsRequest::TPtr Request; -}; - -class TSqsProxyService - : public TActorBootstrapped<TSqsProxyService> + TActivationContext::Send( + new IEventHandle( + Request->Sender, + selfId, + new TSqsEvents::TEvSqsResponse(std::move(response)))); + } + + TSqsEvents::TEvSqsRequest::TPtr Request; +}; + +class TSqsProxyService + : public TActorBootstrapped<TSqsProxyService> { +public: + struct TNodeInfo; + using TNodeInfoRef = TIntrusivePtr<TNodeInfo>; + + struct TProxyRequestInfo; + using TProxyRequestInfoRef = TIntrusivePtr<TProxyRequestInfo>; + public: - struct TNodeInfo; - using TNodeInfoRef = TIntrusivePtr<TNodeInfo>; - - struct TProxyRequestInfo; - using TProxyRequestInfoRef = TIntrusivePtr<TProxyRequestInfo>; - -public: - TSqsProxyService(); - ~TSqsProxyService(); + TSqsProxyService(); + ~TSqsProxyService(); - void Bootstrap(); + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_PROXY_SERVICE_ACTOR; - } - + } + private: - void SendProxyRequestToNode(TNodeInfo& nodeInfo, TProxyRequestInfoRef request); - - TNodeInfoRef GetNodeInfo(ui32 nodeId); - - void SendProxyError(TProxyRequestInfoRef request, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus); - void SendProxyErrors(TNodeInfo& nodeInfo, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus); - + void SendProxyRequestToNode(TNodeInfo& nodeInfo, TProxyRequestInfoRef request); + + TNodeInfoRef GetNodeInfo(ui32 nodeId); + + void SendProxyError(TProxyRequestInfoRef request, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus); + void SendProxyErrors(TNodeInfo& nodeInfo, TSqsEvents::TEvProxySqsResponse::EProxyStatus proxyStatus); + private: - STATEFN(StateFunc); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev); // request from proxy - void HandleProxySqsRequest(TSqsEvents::TEvProxySqsRequest::TPtr& ev); // request for proxying - void HandleSqsResponse(TSqsEvents::TEvSqsResponse::TPtr& ev); // response for proxying - void HandleDisconnect(TEvInterconnect::TEvNodeDisconnected::TPtr& ev); - void HandleConnect(TEvInterconnect::TEvNodeConnected::TPtr& ev); - void HandleUndelivered(TEvents::TEvUndelivered::TPtr& ev); - void HandleDisconnect(ui32 nodeId); + STATEFN(StateFunc); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev); // request from proxy + void HandleProxySqsRequest(TSqsEvents::TEvProxySqsRequest::TPtr& ev); // request for proxying + void HandleSqsResponse(TSqsEvents::TEvSqsResponse::TPtr& ev); // response for proxying + void HandleDisconnect(TEvInterconnect::TEvNodeDisconnected::TPtr& ev); + void HandleConnect(TEvInterconnect::TEvNodeConnected::TPtr& ev); + void HandleUndelivered(TEvents::TEvUndelivered::TPtr& ev); + void HandleDisconnect(ui32 nodeId); void HandleGetLeaderNodeForQueueResponse(TSqsEvents::TEvGetLeaderNodeForQueueResponse::TPtr& ev); private: TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCounters_; TIntrusivePtr<NMonitoring::TDynamicCounters> YmqPublicCounters_; - + /// A map of node ids to TNodeIfno THashMap<ui32, TNodeInfoRef> NodesInfo_; - - THashMap<TString, TProxyRequestInfoRef> RequestsToProxy_; + + THashMap<TString, TProxyRequestInfoRef> RequestsToProxy_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/purge.cpp b/ydb/core/ymq/actor/purge.cpp index aff713d7844..2663a1d7593 100644 --- a/ydb/core/ymq/actor/purge.cpp +++ b/ydb/core/ymq/actor/purge.cpp @@ -1,6 +1,6 @@ -#include "log.h" -#include "cfg.h" -#include "executor.h" +#include "log.h" +#include "cfg.h" +#include "executor.h" #include "params.h" #include "purge.h" #include "serviceid.h" @@ -11,225 +11,225 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { TPurgeActor::TPurgeActor(const TQueuePath& queuePath, TIntrusivePtr<TQueueCounters> counters, const TActorId& queueLeader, bool isFifo) : QueuePath_(queuePath) - , RequestId_(CreateGuidAsString()) - , Counters_(std::move(counters)) + , RequestId_(CreateGuidAsString()) + , Counters_(std::move(counters)) , QueueLeader_(queueLeader) - , IsFifo_(isFifo) -{ - DebugInfo->QueuePurgeActors.emplace(TStringBuilder() << TLogQueueName(QueuePath_), this); -} + , IsFifo_(isFifo) +{ + DebugInfo->QueuePurgeActors.emplace(TStringBuilder() << TLogQueueName(QueuePath_), this); +} -TPurgeActor::~TPurgeActor() { - DebugInfo->QueuePurgeActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueuePath_), this); -} +TPurgeActor::~TPurgeActor() { + DebugInfo->QueuePurgeActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueuePath_), this); +} -void TPurgeActor::Bootstrap() { - RLOG_SQS_INFO("Create purge actor for queue " << TString(QueuePath_)); +void TPurgeActor::Bootstrap() { + RLOG_SQS_INFO("Create purge actor for queue " << TString(QueuePath_)); Become(&TThis::StateFunc); } -void TPurgeActor::MakeGetRetentionOffsetRequest(const ui64 shardId, TShard* shard) { - shard->KeysTruncated = false; - const TInstant boundary = shard->TargetBoundary; - auto onExecuted = [this, shardId, shard, boundary] (const TSqsEvents::TEvExecuted::TRecord& ev) { - const ui32 status = ev.GetStatus(); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const TValue& messages = val["messages"]; - shard->KeysTruncated = val["truncated"]; - if (messages.Size() > 0) { - const ui64 from = messages[0]["Offset"]; - const ui64 to = messages[messages.Size() - 1]["Offset"]; - MakeStage1Request(shardId, shard, std::make_pair(from, to)); - } else { - RLOG_SQS_DEBUG("No messages to cleanup"); - shard->PreviousSuccessfullyProcessedLastMessage.SentTimestamp = boundary; - shard->Purging = false; - shard->BoundaryPurged = shard->TargetBoundary; - } - } else { - RLOG_SQS_WARN("Failed to execute cleanup request on queue [" << QueuePath_ << "] shard [" << shardId << "] get retention offset: " << ev); - shard->Purging = false; - shard->TargetBoundary = shard->BoundaryPurged; - } - }; - - TExecutorBuilder(SelfId(), RequestId_) - .User(QueuePath_.UserName) - .Queue(QueuePath_.QueueName) - .Shard(shardId) +void TPurgeActor::MakeGetRetentionOffsetRequest(const ui64 shardId, TShard* shard) { + shard->KeysTruncated = false; + const TInstant boundary = shard->TargetBoundary; + auto onExecuted = [this, shardId, shard, boundary] (const TSqsEvents::TEvExecuted::TRecord& ev) { + const ui32 status = ev.GetStatus(); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const TValue& messages = val["messages"]; + shard->KeysTruncated = val["truncated"]; + if (messages.Size() > 0) { + const ui64 from = messages[0]["Offset"]; + const ui64 to = messages[messages.Size() - 1]["Offset"]; + MakeStage1Request(shardId, shard, std::make_pair(from, to)); + } else { + RLOG_SQS_DEBUG("No messages to cleanup"); + shard->PreviousSuccessfullyProcessedLastMessage.SentTimestamp = boundary; + shard->Purging = false; + shard->BoundaryPurged = shard->TargetBoundary; + } + } else { + RLOG_SQS_WARN("Failed to execute cleanup request on queue [" << QueuePath_ << "] shard [" << shardId << "] get retention offset: " << ev); + shard->Purging = false; + shard->TargetBoundary = shard->BoundaryPurged; + } + }; + + TExecutorBuilder(SelfId(), RequestId_) + .User(QueuePath_.UserName) + .Queue(QueuePath_.QueueName) + .Shard(shardId) .QueueLeader(QueueLeader_) - .QueryId(GET_RETENTION_OFFSET_ID) - .Counters(Counters_) - .RetryOnTimeout() - .OnExecuted(onExecuted) - .Params() - .Uint64("OFFSET_FROM", shard->PreviousSuccessfullyProcessedLastMessage.Offset) - .Uint64("TIME_FROM", shard->PreviousSuccessfullyProcessedLastMessage.SentTimestamp.MilliSeconds()) - .Uint64("TIME_TO", boundary.MilliSeconds()) - .Uint64("BATCH_SIZE", Cfg().GetCleanupBatchSize()) - .ParentBuilder().Start(); -} - -void TPurgeActor::MakeStage1Request(const ui64 shardId, TShard* shard, const std::pair<ui64, ui64>& offsets) { - auto onExecuted = [this, shardId, shard] (const TSqsEvents::TEvExecuted::TRecord& ev) { - const ui32 status = ev.GetStatus(); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const TValue& messages = val["messages"]; - TMaybe<TValue> inflyMessages; - if (!IsFifo_) { - inflyMessages.ConstructInPlace(val["inflyMessages"]); - } - const bool truncated = val["truncated"]; - shard->KeysTruncated = shard->KeysTruncated || truncated; - if (messages.Size() > 0 || !IsFifo_ && inflyMessages->Size() > 0) { - const ui64 cleanupVersion = val["cleanupVersion"]; - MakeStage2Request(cleanupVersion, messages, inflyMessages, shardId, shard); - } else { - RLOG_SQS_DEBUG("No messages to cleanup"); - shard->Purging = false; - shard->BoundaryPurged = shard->TargetBoundary; - } - } else { - RLOG_SQS_WARN("Failed to execute cleanup request on queue [" << QueuePath_ << "] shard [" << shardId << "] stage 1: " << ev); - shard->Purging = false; - shard->TargetBoundary = shard->BoundaryPurged; - } - }; - - TExecutorBuilder(SelfId(), RequestId_) - .User(QueuePath_.UserName) - .Queue(QueuePath_.QueueName) - .Shard(shardId) + .QueryId(GET_RETENTION_OFFSET_ID) + .Counters(Counters_) + .RetryOnTimeout() + .OnExecuted(onExecuted) + .Params() + .Uint64("OFFSET_FROM", shard->PreviousSuccessfullyProcessedLastMessage.Offset) + .Uint64("TIME_FROM", shard->PreviousSuccessfullyProcessedLastMessage.SentTimestamp.MilliSeconds()) + .Uint64("TIME_TO", boundary.MilliSeconds()) + .Uint64("BATCH_SIZE", Cfg().GetCleanupBatchSize()) + .ParentBuilder().Start(); +} + +void TPurgeActor::MakeStage1Request(const ui64 shardId, TShard* shard, const std::pair<ui64, ui64>& offsets) { + auto onExecuted = [this, shardId, shard] (const TSqsEvents::TEvExecuted::TRecord& ev) { + const ui32 status = ev.GetStatus(); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const TValue& messages = val["messages"]; + TMaybe<TValue> inflyMessages; + if (!IsFifo_) { + inflyMessages.ConstructInPlace(val["inflyMessages"]); + } + const bool truncated = val["truncated"]; + shard->KeysTruncated = shard->KeysTruncated || truncated; + if (messages.Size() > 0 || !IsFifo_ && inflyMessages->Size() > 0) { + const ui64 cleanupVersion = val["cleanupVersion"]; + MakeStage2Request(cleanupVersion, messages, inflyMessages, shardId, shard); + } else { + RLOG_SQS_DEBUG("No messages to cleanup"); + shard->Purging = false; + shard->BoundaryPurged = shard->TargetBoundary; + } + } else { + RLOG_SQS_WARN("Failed to execute cleanup request on queue [" << QueuePath_ << "] shard [" << shardId << "] stage 1: " << ev); + shard->Purging = false; + shard->TargetBoundary = shard->BoundaryPurged; + } + }; + + TExecutorBuilder(SelfId(), RequestId_) + .User(QueuePath_.UserName) + .Queue(QueuePath_.QueueName) + .Shard(shardId) .QueueLeader(QueueLeader_) - .QueryId(PURGE_QUEUE_ID) - .Counters(Counters_) - .RetryOnTimeout() - .OnExecuted(onExecuted) - .Params() - .Uint64("OFFSET_FROM", offsets.first) - .Uint64("OFFSET_TO", offsets.second) - .Uint64("NOW", Now().MilliSeconds()) - .Uint64("SHARD", shardId) - .Uint64("BATCH_SIZE", Cfg().GetCleanupBatchSize()) - .ParentBuilder().Start(); -} - -static void FillMessagesParam(NClient::TWriteValue& messagesParam, const NClient::TValue& messages, ui64& lastOffset, TInstant& lastSentTimestamp, TSqsEvents::TEvInflyIsPurgingNotification* notification = nullptr) { - if (notification) { - notification->Offsets.reserve(messages.Size()); - } - for (size_t i = 0; i < messages.Size(); ++i) { - const TValue& message = messages[i]; - auto messageParam = messagesParam.AddListItem(); - const ui64 offset = message["Offset"]; - const ui64 sentTimestamp = message["SentTimestamp"]; - if (notification) { - notification->Offsets.push_back(offset); - } - messageParam["Offset"] = offset; - messageParam["RandomId"] = ui64(message["RandomId"]); - messageParam["SentTimestamp"] = sentTimestamp; - lastOffset = Max(lastOffset, offset); - lastSentTimestamp = Max(TInstant::MilliSeconds(sentTimestamp), lastSentTimestamp); - } -} - -void TPurgeActor::MakeStage2Request(ui64 cleanupVersion, const TValue& messages, const TMaybe<TValue>& inflyMessages, const ui64 shardId, TShard* shard) { - auto onExecuted = [this, shardId, shard] (const TSqsEvents::TEvExecuted::TRecord& ev) { - const ui32 status = ev.GetStatus(); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const ui64 messagesDeleted = val["messagesDeleted"]; + .QueryId(PURGE_QUEUE_ID) + .Counters(Counters_) + .RetryOnTimeout() + .OnExecuted(onExecuted) + .Params() + .Uint64("OFFSET_FROM", offsets.first) + .Uint64("OFFSET_TO", offsets.second) + .Uint64("NOW", Now().MilliSeconds()) + .Uint64("SHARD", shardId) + .Uint64("BATCH_SIZE", Cfg().GetCleanupBatchSize()) + .ParentBuilder().Start(); +} + +static void FillMessagesParam(NClient::TWriteValue& messagesParam, const NClient::TValue& messages, ui64& lastOffset, TInstant& lastSentTimestamp, TSqsEvents::TEvInflyIsPurgingNotification* notification = nullptr) { + if (notification) { + notification->Offsets.reserve(messages.Size()); + } + for (size_t i = 0; i < messages.Size(); ++i) { + const TValue& message = messages[i]; + auto messageParam = messagesParam.AddListItem(); + const ui64 offset = message["Offset"]; + const ui64 sentTimestamp = message["SentTimestamp"]; + if (notification) { + notification->Offsets.push_back(offset); + } + messageParam["Offset"] = offset; + messageParam["RandomId"] = ui64(message["RandomId"]); + messageParam["SentTimestamp"] = sentTimestamp; + lastOffset = Max(lastOffset, offset); + lastSentTimestamp = Max(TInstant::MilliSeconds(sentTimestamp), lastSentTimestamp); + } +} + +void TPurgeActor::MakeStage2Request(ui64 cleanupVersion, const TValue& messages, const TMaybe<TValue>& inflyMessages, const ui64 shardId, TShard* shard) { + auto onExecuted = [this, shardId, shard] (const TSqsEvents::TEvExecuted::TRecord& ev) { + const ui32 status = ev.GetStatus(); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const ui64 messagesDeleted = val["messagesDeleted"]; ADD_COUNTER_COUPLE(Counters_, MessagesPurged, purged_count_per_second, messagesDeleted); - RLOG_SQS_DEBUG("Purged " << messagesDeleted << " messages from queue [" << QueuePath_ << "]"); - const bool versionIsSame = val["versionIsSame"]; - if (versionIsSame) { - shard->PreviousSuccessfullyProcessedLastMessage = shard->CurrentLastMessage; - } - if (!IsFifo_) { - const i64 newMessagesCount = val["newMessagesCount"]; - Y_VERIFY(newMessagesCount >= 0); - auto notification = MakeHolder<TSqsEvents::TEvQueuePurgedNotification>(); - notification->Shard = shardId; - notification->NewMessagesCount = static_cast<ui64>(newMessagesCount); + RLOG_SQS_DEBUG("Purged " << messagesDeleted << " messages from queue [" << QueuePath_ << "]"); + const bool versionIsSame = val["versionIsSame"]; + if (versionIsSame) { + shard->PreviousSuccessfullyProcessedLastMessage = shard->CurrentLastMessage; + } + if (!IsFifo_) { + const i64 newMessagesCount = val["newMessagesCount"]; + Y_VERIFY(newMessagesCount >= 0); + auto notification = MakeHolder<TSqsEvents::TEvQueuePurgedNotification>(); + notification->Shard = shardId; + notification->NewMessagesCount = static_cast<ui64>(newMessagesCount); Send(QueueLeader_, std::move(notification)); - } - - shard->BoundaryPurged = shard->CurrentLastMessage.SentTimestamp; - if (shard->KeysTruncated) { - MakeGetRetentionOffsetRequest(shardId, shard); - } else { - shard->Purging = false; - } - } else { - RLOG_SQS_WARN("Failed to execute cleanup request on queue [" << QueuePath_ << "] shard [" << shardId << "] stage 2: " << ev); - shard->Purging = false; - shard->TargetBoundary = shard->BoundaryPurged; - } - }; - - TExecutorBuilder builder(SelfId(), RequestId_); - builder - .User(QueuePath_.UserName) - .Queue(QueuePath_.QueueName) - .Shard(shardId) + } + + shard->BoundaryPurged = shard->CurrentLastMessage.SentTimestamp; + if (shard->KeysTruncated) { + MakeGetRetentionOffsetRequest(shardId, shard); + } else { + shard->Purging = false; + } + } else { + RLOG_SQS_WARN("Failed to execute cleanup request on queue [" << QueuePath_ << "] shard [" << shardId << "] stage 2: " << ev); + shard->Purging = false; + shard->TargetBoundary = shard->BoundaryPurged; + } + }; + + TExecutorBuilder builder(SelfId(), RequestId_); + builder + .User(QueuePath_.UserName) + .Queue(QueuePath_.QueueName) + .Shard(shardId) .QueueLeader(QueueLeader_) - .QueryId(PURGE_QUEUE_STAGE2_ID) - .Counters(Counters_) - .RetryOnTimeout() - .OnExecuted(onExecuted); - - NClient::TWriteValue params = builder.ParamsValue(); - params["CLEANUP_VERSION"] = cleanupVersion; - params["SHARD"] = shardId; - params["NOW"] = TActivationContext::Now().MilliSeconds(); - - auto messagesParam = params["MESSAGES"]; - FillMessagesParam(messagesParam, messages, shard->CurrentLastMessage.Offset, shard->CurrentLastMessage.SentTimestamp); - if (inflyMessages) { - THolder<TSqsEvents::TEvInflyIsPurgingNotification> notification(new TSqsEvents::TEvInflyIsPurgingNotification()); - notification->Shard = shardId; - FillMessagesParam(messagesParam, *inflyMessages, shard->CurrentLastMessage.Offset, shard->CurrentLastMessage.SentTimestamp, notification.Get()); - if (!notification->Offsets.empty()) { + .QueryId(PURGE_QUEUE_STAGE2_ID) + .Counters(Counters_) + .RetryOnTimeout() + .OnExecuted(onExecuted); + + NClient::TWriteValue params = builder.ParamsValue(); + params["CLEANUP_VERSION"] = cleanupVersion; + params["SHARD"] = shardId; + params["NOW"] = TActivationContext::Now().MilliSeconds(); + + auto messagesParam = params["MESSAGES"]; + FillMessagesParam(messagesParam, messages, shard->CurrentLastMessage.Offset, shard->CurrentLastMessage.SentTimestamp); + if (inflyMessages) { + THolder<TSqsEvents::TEvInflyIsPurgingNotification> notification(new TSqsEvents::TEvInflyIsPurgingNotification()); + notification->Shard = shardId; + FillMessagesParam(messagesParam, *inflyMessages, shard->CurrentLastMessage.Offset, shard->CurrentLastMessage.SentTimestamp, notification.Get()); + if (!notification->Offsets.empty()) { Send(QueueLeader_, std::move(notification)); - } - } - - builder.Start(); + } + } + + builder.Start(); } -void TPurgeActor::HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev) { +void TPurgeActor::HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev) { auto& shard = Shards_[ev->Get()->Shard]; - const char* skipReason = ""; - if (ev->Get()->Boundary > shard.TargetBoundary) { - shard.TargetBoundary = ev->Get()->Boundary; + const char* skipReason = ""; + if (ev->Get()->Boundary > shard.TargetBoundary) { + shard.TargetBoundary = ev->Get()->Boundary; - if (!shard.Purging) { + if (!shard.Purging) { shard.Purging = true; - MakeGetRetentionOffsetRequest(ev->Get()->Shard, &shard); - } else { - skipReason = ". Skipping (already purging)"; + MakeGetRetentionOffsetRequest(ev->Get()->Shard, &shard); + } else { + skipReason = ". Skipping (already purging)"; } - } else { - skipReason = ". Skipping (old boundary)"; + } else { + skipReason = ". Skipping (old boundary)"; } - - RLOG_SQS_INFO("Purge queue request [" << QueuePath_ << "/" << ev->Get()->Shard << "] to " << ev->Get()->Boundary.MilliSeconds() << " (" << ev->Get()->Boundary << ")" << skipReason); + + RLOG_SQS_INFO("Purge queue request [" << QueuePath_ << "/" << ev->Get()->Shard << "] to " << ev->Get()->Boundary.MilliSeconds() << " (" << ev->Get()->Boundary << ")" << skipReason); } -void TPurgeActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); +void TPurgeActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); } -void TPurgeActor::HandlePoisonPill(TEvPoisonPill::TPtr&) { - PassAway(); +void TPurgeActor::HandlePoisonPill(TEvPoisonPill::TPtr&) { + PassAway(); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/purge.h b/ydb/core/ymq/actor/purge.h index 2a16a5aa247..6fc2b70fe3a 100644 --- a/ydb/core/ymq/actor/purge.h +++ b/ydb/core/ymq/actor/purge.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include "events.h" #include <library/cpp/actors/core/actor_bootstrapped.h> @@ -8,61 +8,61 @@ #include <util/generic/map.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -class TPurgeActor : public TActorBootstrapped<TPurgeActor> { +class TPurgeActor : public TActorBootstrapped<TPurgeActor> { struct TShard { - TInstant TargetBoundary = TInstant::Zero(); // Target state - TInstant BoundaryPurged = TInstant::Zero(); // Current state in database + TInstant TargetBoundary = TInstant::Zero(); // Target state + TInstant BoundaryPurged = TInstant::Zero(); // Current state in database bool Purging = false; - bool KeysTruncated = false; - - struct TMessageBoundary { - ui64 Offset = 0; - TInstant SentTimestamp = TInstant::Zero(); - }; - std::pair<ui64, ui64> CurrentOffsets; - TMessageBoundary CurrentLastMessage; - TMessageBoundary PreviousSuccessfullyProcessedLastMessage; + bool KeysTruncated = false; + + struct TMessageBoundary { + ui64 Offset = 0; + TInstant SentTimestamp = TInstant::Zero(); + }; + std::pair<ui64, ui64> CurrentOffsets; + TMessageBoundary CurrentLastMessage; + TMessageBoundary PreviousSuccessfullyProcessedLastMessage; }; public: TPurgeActor(const TQueuePath& queuePath, TIntrusivePtr<TQueueCounters> counters, const TActorId& queueLeader, bool isFifo); ~TPurgeActor(); - void Bootstrap(); + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_PURGE_ACTOR; - } - + } + private: - void MakeGetRetentionOffsetRequest(const ui64 shardId, TShard* shard); - void MakeStage1Request(const ui64 shardId, TShard* shard, const std::pair<ui64, ui64>& offsets); - void MakeStage2Request(ui64 cleanupVersion, const NClient::TValue& messages, const TMaybe<NClient::TValue>& inflyMessages, const ui64 shardId, TShard* shard); + void MakeGetRetentionOffsetRequest(const ui64 shardId, TShard* shard); + void MakeStage1Request(const ui64 shardId, TShard* shard, const std::pair<ui64, ui64>& offsets); + void MakeStage2Request(ui64 cleanupVersion, const NClient::TValue& messages, const TMaybe<NClient::TValue>& inflyMessages, const ui64 shardId, TShard* shard); private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvPurgeQueue, HandlePurgeQueue); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TEvPoisonPill, HandlePoisonPill); + hFunc(TSqsEvents::TEvPurgeQueue, HandlePurgeQueue); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvPoisonPill, HandlePoisonPill); } } - void HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandlePoisonPill(TEvPoisonPill::TPtr&); + void HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandlePoisonPill(TEvPoisonPill::TPtr&); private: const TQueuePath QueuePath_; /// A state of shard processing TMap<ui64, TShard> Shards_; - const TString RequestId_; - TIntrusivePtr<TQueueCounters> Counters_; - TIntrusivePtr<NMonitoring::TCounterForPtr> PurgedMessagesCounter_; + const TString RequestId_; + TIntrusivePtr<TQueueCounters> Counters_; + TIntrusivePtr<NMonitoring::TCounterForPtr> PurgedMessagesCounter_; const TActorId QueueLeader_; - const bool IsFifo_; + const bool IsFifo_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/purge_queue.cpp b/ydb/core/ymq/actor/purge_queue.cpp index 72830bdd4a7..f19fd0d4235 100644 --- a/ydb/core/ymq/actor/purge_queue.cpp +++ b/ydb/core/ymq/actor/purge_queue.cpp @@ -1,8 +1,8 @@ #include "action.h" -#include "common_batch_actor.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "common_batch_actor.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include "serviceid.h" @@ -10,169 +10,169 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TPurgeQueueActor : public TActionActor<TPurgeQueueActor> { public: - TPurgeQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::PurgeQueue, std::move(cb)) + TPurgeQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::PurgeQueue, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutablePurgeQueue()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: bool DoValidate() override { if (!GetQueueName()) { - MakeError(Response_.MutablePurgeQueue(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutablePurgeQueue(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } return true; } - TError* MutableErrorDesc() override { - return Response_.MutablePurgeQueue()->MutableError(); - } - - void DoAction() override { + TError* MutableErrorDesc() override { + return Response_.MutablePurgeQueue()->MutableError(); + } + + void DoAction() override { Become(&TThis::StateFunc); - TExecutorBuilder(SelfId(), RequestId_) - .User(UserName_) - .Queue(GetQueueName()) + TExecutorBuilder(SelfId(), RequestId_) + .User(UserName_) + .Queue(GetQueueName()) .QueueLeader(QueueLeader_) - .QueryId(SET_RETENTION_ID) - .Counters(QueueCounters_) - .RetryOnTimeout() - .Params() - .Uint64("NOW", Now().MilliSeconds()) - .Bool("PURGE", true) - .ParentBuilder().Start(); + .QueryId(SET_RETENTION_ID) + .Counters(QueueCounters_) + .RetryOnTimeout() + .Params() + .Uint64("NOW", Now().MilliSeconds()) + .Bool("PURGE", true) + .ParentBuilder().Start(); } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - const auto& record = ev->Get()->Record; - const ui32 status = record.GetStatus(); - auto* result = Response_.MutablePurgeQueue(); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + const auto& record = ev->Get()->Record; + const ui32 status = record.GetStatus(); + auto* result = Response_.MutablePurgeQueue(); if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - const TValue list(val["result"]); - - for (size_t i = 0; i < list.Size(); ++i) { - auto req = MakeHolder<TSqsEvents::TEvPurgeQueue>(); - req->QueuePath = GetQueuePath(); - req->Boundary = TInstant::MilliSeconds(ui64(list[i]["RetentionBoundary"])); - req->Shard = ui64(list[i]["Shard"]); + const TValue list(val["result"]); - RLOG_SQS_INFO("Purging queue. Set retention boundary for queue [" << req->QueuePath << "/" << req->Shard << "] to " << req->Boundary.MilliSeconds() << " (" << req->Boundary << ")"); + for (size_t i = 0; i < list.Size(); ++i) { + auto req = MakeHolder<TSqsEvents::TEvPurgeQueue>(); + req->QueuePath = GetQueuePath(); + req->Boundary = TInstant::MilliSeconds(ui64(list[i]["RetentionBoundary"])); + req->Shard = ui64(list[i]["Shard"]); + RLOG_SQS_INFO("Purging queue. Set retention boundary for queue [" << req->QueuePath << "/" << req->Shard << "] to " << req->Boundary.MilliSeconds() << " (" << req->Boundary << ")"); + Send(QueueLeader_, std::move(req)); } } else { - RLOG_SQS_ERROR("Failed to set retention boundary for queue [" << GetQueuePath() << "] while purging"); - RLOG_SQS_ERROR("Request failed: " << record); + RLOG_SQS_ERROR("Failed to set retention boundary for queue [" << GetQueuePath() << "] while purging"); + RLOG_SQS_ERROR("Request failed: " << record); - MakeError(result, NErrors::INTERNAL_FAILURE); + MakeError(result, NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); + SendReplyAndDie(); } - const TPurgeQueueRequest& Request() const { - return SourceSqsRequest_.GetPurgeQueue(); - } + const TPurgeQueueRequest& Request() const { + return SourceSqsRequest_.GetPurgeQueue(); + } }; -class TPurgeQueueBatchActor - : public TCommonBatchActor<TPurgeQueueBatchActor> -{ -public: - TPurgeQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TCommonBatchActor(sourceSqsRequest, EAction::PurgeQueueBatch, std::move(cb)) - { - CopyAccountName(Request()); - Response_.MutablePurgeQueueBatch()->SetRequestId(RequestId_); - - CopySecurityToken(Request()); - } - -private: - std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const override { - std::vector<NKikimrClient::TSqsRequest> ret; - ret.resize(Request().EntriesSize()); - for (size_t i = 0; i < Request().EntriesSize(); ++i) { - const auto& entry = Request().GetEntries(i); - auto& req = *ret[i].MutablePurgeQueue(); - req.MutableAuth()->SetUserName(UserName_); - - if (Request().HasCredentials()) { - *req.MutableCredentials() = Request().GetCredentials(); +class TPurgeQueueBatchActor + : public TCommonBatchActor<TPurgeQueueBatchActor> +{ +public: + TPurgeQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TCommonBatchActor(sourceSqsRequest, EAction::PurgeQueueBatch, std::move(cb)) + { + CopyAccountName(Request()); + Response_.MutablePurgeQueueBatch()->SetRequestId(RequestId_); + + CopySecurityToken(Request()); + } + +private: + std::vector<NKikimrClient::TSqsRequest> GenerateRequestsFromBatch() const override { + std::vector<NKikimrClient::TSqsRequest> ret; + ret.resize(Request().EntriesSize()); + for (size_t i = 0; i < Request().EntriesSize(); ++i) { + const auto& entry = Request().GetEntries(i); + auto& req = *ret[i].MutablePurgeQueue(); + req.MutableAuth()->SetUserName(UserName_); + + if (Request().HasCredentials()) { + *req.MutableCredentials() = Request().GetCredentials(); } - req.SetQueueName(entry.GetQueueName()); - req.SetId(entry.GetId()); - } - return ret; - } - - void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) override { - Y_VERIFY(Request().EntriesSize() == responses.size()); - auto& resp = *Response_.MutablePurgeQueueBatch(); - for (size_t i = 0; i < Request().EntriesSize(); ++i) { - const auto& reqEntry = Request().GetEntries(i); - auto& respEntry = *resp.AddEntries(); - Y_VERIFY(responses[i].HasPurgeQueue()); - respEntry = std::move(*responses[i].MutablePurgeQueue()); - respEntry.SetId(reqEntry.GetId()); - } - } - - bool DoValidate() override { - for (const auto& entry : Request().GetEntries()) { - if (entry.GetQueueName().empty()) { - MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, TStringBuilder() << "No QueueName parameter in entry " << entry.GetId() << "."); - return false; - } - } - return true; - } - - TError* MutableErrorDesc() override { - return Response_.MutablePurgeQueueBatch()->MutableError(); - } - - TString DoGetQueueName() const override { - return {}; - } - - const TPurgeQueueBatchRequest& Request() const { - return SourceSqsRequest_.GetPurgeQueueBatch(); - } -}; - -IActor* CreatePurgeQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TPurgeQueueActor(sourceSqsRequest, std::move(cb)); -} - -IActor* CreatePurgeQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TPurgeQueueBatchActor(sourceSqsRequest, std::move(cb)); + req.SetQueueName(entry.GetQueueName()); + req.SetId(entry.GetId()); + } + return ret; + } + + void OnResponses(std::vector<NKikimrClient::TSqsResponse>&& responses) override { + Y_VERIFY(Request().EntriesSize() == responses.size()); + auto& resp = *Response_.MutablePurgeQueueBatch(); + for (size_t i = 0; i < Request().EntriesSize(); ++i) { + const auto& reqEntry = Request().GetEntries(i); + auto& respEntry = *resp.AddEntries(); + Y_VERIFY(responses[i].HasPurgeQueue()); + respEntry = std::move(*responses[i].MutablePurgeQueue()); + respEntry.SetId(reqEntry.GetId()); + } + } + + bool DoValidate() override { + for (const auto& entry : Request().GetEntries()) { + if (entry.GetQueueName().empty()) { + MakeError(MutableErrorDesc(), NErrors::MISSING_PARAMETER, TStringBuilder() << "No QueueName parameter in entry " << entry.GetId() << "."); + return false; + } + } + return true; + } + + TError* MutableErrorDesc() override { + return Response_.MutablePurgeQueueBatch()->MutableError(); + } + + TString DoGetQueueName() const override { + return {}; + } + + const TPurgeQueueBatchRequest& Request() const { + return SourceSqsRequest_.GetPurgeQueueBatch(); + } +}; + +IActor* CreatePurgeQueueActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TPurgeQueueActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +IActor* CreatePurgeQueueBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TPurgeQueueBatchActor(sourceSqsRequest, std::move(cb)); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/queue_leader.cpp b/ydb/core/ymq/actor/queue_leader.cpp index 2c6e81d8275..bb5c99d0958 100644 --- a/ydb/core/ymq/actor/queue_leader.cpp +++ b/ydb/core/ymq/actor/queue_leader.cpp @@ -1,11 +1,11 @@ #include "queue_leader.h" -#include "fifo_cleanup.h" -#include "executor.h" -#include "log.h" -#include "migration.h" -#include "purge.h" -#include "retention.h" - +#include "fifo_cleanup.h" +#include "executor.h" +#include "log.h" +#include "migration.h" +#include "purge.h" +#include "retention.h" + #include <ydb/public/lib/value/value.h> #include <ydb/core/ymq/actor/serviceid.h> #include <ydb/core/ymq/base/constants.h> @@ -17,570 +17,570 @@ #include <ydb/core/ymq/queues/common/queries.h> #include <ydb/core/ymq/queues/fifo/queries.h> #include <ydb/core/ymq/queues/std/queries.h> - + #include <library/cpp/actors/core/hfunc.h> - -#include <util/random/random.h> -#include <util/random/shuffle.h> -#include <util/system/yassert.h> -#include <util/string/ascii.h> - -LWTRACE_USING(SQS_PROVIDER); - -namespace NKikimr::NSQS { - -constexpr ui64 UPDATE_COUNTERS_TAG = 0; -constexpr ui64 UPDATE_MESSAGES_METRICS_TAG = 1; -constexpr ui64 REQUEST_CONFIGURATION_TAG = 2; -constexpr ui64 RELOAD_INFLY_TAG = 1000; - -const TString INFLY_INVALIDATION_REASON_VERSION_CHANGED = "InflyVersionChanged"; -const TString INFLY_INVALIDATION_REASON_DEADLINE_CHANGED = "MessageDeadlineChanged"; -const TString INFLY_INVALIDATION_REASON_DELETED = "MessageDeleted"; - + +#include <util/random/random.h> +#include <util/random/shuffle.h> +#include <util/system/yassert.h> +#include <util/string/ascii.h> + +LWTRACE_USING(SQS_PROVIDER); + +namespace NKikimr::NSQS { + +constexpr ui64 UPDATE_COUNTERS_TAG = 0; +constexpr ui64 UPDATE_MESSAGES_METRICS_TAG = 1; +constexpr ui64 REQUEST_CONFIGURATION_TAG = 2; +constexpr ui64 RELOAD_INFLY_TAG = 1000; + +const TString INFLY_INVALIDATION_REASON_VERSION_CHANGED = "InflyVersionChanged"; +const TString INFLY_INVALIDATION_REASON_DEADLINE_CHANGED = "MessageDeadlineChanged"; +const TString INFLY_INVALIDATION_REASON_DELETED = "MessageDeleted"; + TQueueLeader::TQueueLeader(TString userName, TString queueName, TString folderId, TString rootUrl, TIntrusivePtr<TQueueCounters> counters, TIntrusivePtr<TUserCounters> userCounters, const TActorId& schemeCache, const TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions>& quoterResourcesForUser) - : UserName_(std::move(userName)) - , QueueName_(std::move(queueName)) + : UserName_(std::move(userName)) + , QueueName_(std::move(queueName)) , FolderId_(std::move(folderId)) - , RootUrl_(std::move(rootUrl)) - , SchemeCache_(schemeCache) - , Counters_(std::move(counters)) - , UserCounters_(std::move(userCounters)) -{ + , RootUrl_(std::move(rootUrl)) + , SchemeCache_(schemeCache) + , Counters_(std::move(counters)) + , UserCounters_(std::move(userCounters)) +{ DebugInfo->QueueLeaders.emplace(TStringBuilder() << TLogQueueName(UserName_, QueueName_), this); - if (quoterResourcesForUser) { - QuoterResources_ = new TSqsEvents::TQuoterResourcesForActions(*quoterResourcesForUser); - } -} - + if (quoterResourcesForUser) { + QuoterResources_ = new TSqsEvents::TQuoterResourcesForActions(*quoterResourcesForUser); + } +} + TQueueLeader::~TQueueLeader() { DebugInfo->QueueLeaders.EraseKeyValue(TStringBuilder() << TLogQueueName(UserName_, QueueName_), this); -} - +} + void TQueueLeader::Bootstrap() { Become(&TQueueLeader::StateInit); - Register(new TQueueMigrationActor(UserName_, QueueName_, SelfId(), SchemeCache_, Counters_)); -} - + Register(new TQueueMigrationActor(UserName_, QueueName_, SelfId(), SchemeCache_, Counters_)); +} + void TQueueLeader::BecomeWorking() { Become(&TQueueLeader::StateWorking); - const auto& cfg = Cfg(); - const ui64 randomTimeToWait = RandomNumber<ui64>(cfg.GetBackgroundMetricsUpdateTimeMs() / 4); // Don't start all such operations at one moment - Schedule(TDuration::MilliSeconds(randomTimeToWait), new TEvWakeup(UPDATE_COUNTERS_TAG)); - - Schedule(TDuration::Seconds(1), new TEvWakeup(UPDATE_MESSAGES_METRICS_TAG)); - - std::vector<TSqsEvents::TEvExecute::TPtr> requests; - requests.swap(ExecuteRequests_); - for (auto& req : requests) { - HandleExecuteWhileWorking(req); - } - - for (auto&& [reqId, reqInfo] : SendMessageRequests_) { - ProcessSendMessageBatch(reqInfo); - } - - for (auto&& [reqId, reqInfo] : ReceiveMessageRequests_) { - ProcessReceiveMessageBatch(reqInfo); - } - - for (auto&& [reqIdAndShard, reqInfo] : DeleteMessageRequests_) { - ProcessDeleteMessageBatch(reqInfo); - } - - for (auto&& [reqIdAndShard, reqInfo] : ChangeMessageVisibilityRequests_) { - ProcessChangeMessageVisibilityBatch(reqInfo); - } -} - + const auto& cfg = Cfg(); + const ui64 randomTimeToWait = RandomNumber<ui64>(cfg.GetBackgroundMetricsUpdateTimeMs() / 4); // Don't start all such operations at one moment + Schedule(TDuration::MilliSeconds(randomTimeToWait), new TEvWakeup(UPDATE_COUNTERS_TAG)); + + Schedule(TDuration::Seconds(1), new TEvWakeup(UPDATE_MESSAGES_METRICS_TAG)); + + std::vector<TSqsEvents::TEvExecute::TPtr> requests; + requests.swap(ExecuteRequests_); + for (auto& req : requests) { + HandleExecuteWhileWorking(req); + } + + for (auto&& [reqId, reqInfo] : SendMessageRequests_) { + ProcessSendMessageBatch(reqInfo); + } + + for (auto&& [reqId, reqInfo] : ReceiveMessageRequests_) { + ProcessReceiveMessageBatch(reqInfo); + } + + for (auto&& [reqIdAndShard, reqInfo] : DeleteMessageRequests_) { + ProcessDeleteMessageBatch(reqInfo); + } + + for (auto&& [reqIdAndShard, reqInfo] : ChangeMessageVisibilityRequests_) { + ProcessChangeMessageVisibilityBatch(reqInfo); + } +} + STATEFN(TQueueLeader::StateInit) { - switch (ev->GetTypeRewrite()) { - // interface - cFunc(TEvPoisonPill::EventType, PassAway); // from service - hFunc(TSqsEvents::TEvGetConfiguration, HandleGetConfigurationWhileIniting); // from action actors - hFunc(TSqsEvents::TEvExecute, HandleExecuteWhileIniting); // from action actors - hFunc(TSqsEvents::TEvClearQueueAttributesCache, HandleClearQueueAttributesCache); // from set queue attributes - hFunc(TSqsEvents::TEvPurgeQueue, HandlePurgeQueue); // from purge queue actor - hFunc(TSqsEvents::TEvSendMessageBatch, HandleSendMessageBatchWhileIniting); // from send message action actor - hFunc(TSqsEvents::TEvReceiveMessageBatch, HandleReceiveMessageBatchWhileIniting); // from receive message action actor - hFunc(TSqsEvents::TEvDeleteMessageBatch, HandleDeleteMessageBatchWhileIniting); // from delete message action actor - hFunc(TSqsEvents::TEvChangeMessageVisibilityBatch, HandleChangeMessageVisibilityBatchWhileIniting); // from change message visibility action actor - hFunc(TSqsEvents::TEvGetRuntimeQueueAttributes, HandleGetRuntimeQueueAttributesWhileIniting); // from get queue attributes action actor - hFunc(TSqsEvents::TEvDeadLetterQueueNotification, HandleDeadLetterQueueNotification); // service periodically notifies active dead letter queues - - // internal - hFunc(TSqsEvents::TEvQueueId, HandleQueueId); // discover dlq id and version - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); // from executor - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvMigrationDone, HandleMigrationDone); // from migration actor - default: + switch (ev->GetTypeRewrite()) { + // interface + cFunc(TEvPoisonPill::EventType, PassAway); // from service + hFunc(TSqsEvents::TEvGetConfiguration, HandleGetConfigurationWhileIniting); // from action actors + hFunc(TSqsEvents::TEvExecute, HandleExecuteWhileIniting); // from action actors + hFunc(TSqsEvents::TEvClearQueueAttributesCache, HandleClearQueueAttributesCache); // from set queue attributes + hFunc(TSqsEvents::TEvPurgeQueue, HandlePurgeQueue); // from purge queue actor + hFunc(TSqsEvents::TEvSendMessageBatch, HandleSendMessageBatchWhileIniting); // from send message action actor + hFunc(TSqsEvents::TEvReceiveMessageBatch, HandleReceiveMessageBatchWhileIniting); // from receive message action actor + hFunc(TSqsEvents::TEvDeleteMessageBatch, HandleDeleteMessageBatchWhileIniting); // from delete message action actor + hFunc(TSqsEvents::TEvChangeMessageVisibilityBatch, HandleChangeMessageVisibilityBatchWhileIniting); // from change message visibility action actor + hFunc(TSqsEvents::TEvGetRuntimeQueueAttributes, HandleGetRuntimeQueueAttributesWhileIniting); // from get queue attributes action actor + hFunc(TSqsEvents::TEvDeadLetterQueueNotification, HandleDeadLetterQueueNotification); // service periodically notifies active dead letter queues + + // internal + hFunc(TSqsEvents::TEvQueueId, HandleQueueId); // discover dlq id and version + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); // from executor + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvMigrationDone, HandleMigrationDone); // from migration actor + default: LOG_SQS_ERROR("Unknown type of event came to SQS background queue " << TLogQueueName(UserName_, QueueName_) << " leader actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); - } -} - + } +} + STATEFN(TQueueLeader::StateWorking) { - switch (ev->GetTypeRewrite()) { - // interface - cFunc(TEvPoisonPill::EventType, PassAway); // from service - hFunc(TSqsEvents::TEvGetConfiguration, HandleGetConfigurationWhileWorking); // from action actors - hFunc(TSqsEvents::TEvExecute, HandleExecuteWhileWorking); // from action actors - hFunc(TSqsEvents::TEvClearQueueAttributesCache, HandleClearQueueAttributesCache); // from set queue attributes - hFunc(TSqsEvents::TEvPurgeQueue, HandlePurgeQueue); // from purge queue actor - hFunc(TSqsEvents::TEvSendMessageBatch, HandleSendMessageBatchWhileWorking); // from send message action actor - hFunc(TSqsEvents::TEvReceiveMessageBatch, HandleReceiveMessageBatchWhileWorking); // from receive message action actor - hFunc(TSqsEvents::TEvDeleteMessageBatch, HandleDeleteMessageBatchWhileWorking); // from delete message action actor - hFunc(TSqsEvents::TEvChangeMessageVisibilityBatch, HandleChangeMessageVisibilityBatchWhileWorking); // from change message visibility action actor - hFunc(TSqsEvents::TEvGetRuntimeQueueAttributes, HandleGetRuntimeQueueAttributesWhileWorking); // from get queue attributes action actor - hFunc(TSqsEvents::TEvDeadLetterQueueNotification, HandleDeadLetterQueueNotification); // service periodically notifies active dead letter queues - - // internal - hFunc(TSqsEvents::TEvQueueId, HandleQueueId); // discover dlq id and version - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); // from executor - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvInflyIsPurgingNotification, HandleInflyIsPurgingNotification); - hFunc(TSqsEvents::TEvQueuePurgedNotification, HandleQueuePurgedNotification); - default: + switch (ev->GetTypeRewrite()) { + // interface + cFunc(TEvPoisonPill::EventType, PassAway); // from service + hFunc(TSqsEvents::TEvGetConfiguration, HandleGetConfigurationWhileWorking); // from action actors + hFunc(TSqsEvents::TEvExecute, HandleExecuteWhileWorking); // from action actors + hFunc(TSqsEvents::TEvClearQueueAttributesCache, HandleClearQueueAttributesCache); // from set queue attributes + hFunc(TSqsEvents::TEvPurgeQueue, HandlePurgeQueue); // from purge queue actor + hFunc(TSqsEvents::TEvSendMessageBatch, HandleSendMessageBatchWhileWorking); // from send message action actor + hFunc(TSqsEvents::TEvReceiveMessageBatch, HandleReceiveMessageBatchWhileWorking); // from receive message action actor + hFunc(TSqsEvents::TEvDeleteMessageBatch, HandleDeleteMessageBatchWhileWorking); // from delete message action actor + hFunc(TSqsEvents::TEvChangeMessageVisibilityBatch, HandleChangeMessageVisibilityBatchWhileWorking); // from change message visibility action actor + hFunc(TSqsEvents::TEvGetRuntimeQueueAttributes, HandleGetRuntimeQueueAttributesWhileWorking); // from get queue attributes action actor + hFunc(TSqsEvents::TEvDeadLetterQueueNotification, HandleDeadLetterQueueNotification); // service periodically notifies active dead letter queues + + // internal + hFunc(TSqsEvents::TEvQueueId, HandleQueueId); // discover dlq id and version + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); // from executor + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvInflyIsPurgingNotification, HandleInflyIsPurgingNotification); + hFunc(TSqsEvents::TEvQueuePurgedNotification, HandleQueuePurgedNotification); + default: LOG_SQS_ERROR("Unknown type of event came to SQS background queue " << TLogQueueName(UserName_, QueueName_) << " leader actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); - } -} - + } +} + void TQueueLeader::PassAway() { LOG_SQS_INFO("Queue " << TLogQueueName(UserName_, QueueName_) << " leader is dying"); - - for (auto& req : GetConfigurationRequests_) { - AnswerFailed(req); - } - GetConfigurationRequests_.clear(); - - Y_VERIFY(ExecuteRequests_.empty()); - - if (DeduplicationCleanupActor_) { - Send(DeduplicationCleanupActor_, new TEvPoisonPill()); - } - if (ReadsCleanupActor_) { - Send(ReadsCleanupActor_, new TEvPoisonPill()); - } - if (RetentionActor_) { - Send(RetentionActor_, new TEvPoisonPill()); - } - if (PurgeActor_) { - Send(PurgeActor_, new TEvPoisonPill()); - } - - // Explicitly set absolute counters to zero for proper counting aggregated parent counters: + + for (auto& req : GetConfigurationRequests_) { + AnswerFailed(req); + } + GetConfigurationRequests_.clear(); + + Y_VERIFY(ExecuteRequests_.empty()); + + if (DeduplicationCleanupActor_) { + Send(DeduplicationCleanupActor_, new TEvPoisonPill()); + } + if (ReadsCleanupActor_) { + Send(ReadsCleanupActor_, new TEvPoisonPill()); + } + if (RetentionActor_) { + Send(RetentionActor_, new TEvPoisonPill()); + } + if (PurgeActor_) { + Send(PurgeActor_, new TEvPoisonPill()); + } + + // Explicitly set absolute counters to zero for proper counting aggregated parent counters: SET_COUNTER_COUPLE(Counters_, MessagesCount, stored_count, 0); SET_COUNTER_COUPLE(Counters_, InflyMessagesCount, inflight_count, 0); SET_COUNTER_COUPLE(Counters_, OldestMessageAgeSeconds, oldest_age_milliseconds, 0); - - TActorBootstrapped::PassAway(); -} - + + TActorBootstrapped::PassAway(); +} + void TQueueLeader::HandleWakeup(TEvWakeup::TPtr& ev) { - if (ev->Get()->Tag >= RELOAD_INFLY_TAG && ev->Get()->Tag < RELOAD_INFLY_TAG + MAX_SHARDS_COUNT) { - StartLoadingInfly(ev->Get()->Tag - RELOAD_INFLY_TAG, true); // reload infly after failure while loading infly - return; - } - - switch (ev->Get()->Tag) { - case UPDATE_COUNTERS_TAG: { - StartGatheringMetrics(); - break; - } - case UPDATE_MESSAGES_METRICS_TAG: { - ReportOldestTimestampMetricsIfReady(); - ReportMessagesCountMetricsIfReady(); - Schedule(TDuration::Seconds(1), new TEvWakeup(UPDATE_MESSAGES_METRICS_TAG)); - break; - } - case REQUEST_CONFIGURATION_TAG: { - RequestConfiguration(); - break; - } - default: - Y_FAIL("Unknown wakeup tag: %lu", ev->Get()->Tag); - } -} - + if (ev->Get()->Tag >= RELOAD_INFLY_TAG && ev->Get()->Tag < RELOAD_INFLY_TAG + MAX_SHARDS_COUNT) { + StartLoadingInfly(ev->Get()->Tag - RELOAD_INFLY_TAG, true); // reload infly after failure while loading infly + return; + } + + switch (ev->Get()->Tag) { + case UPDATE_COUNTERS_TAG: { + StartGatheringMetrics(); + break; + } + case UPDATE_MESSAGES_METRICS_TAG: { + ReportOldestTimestampMetricsIfReady(); + ReportMessagesCountMetricsIfReady(); + Schedule(TDuration::Seconds(1), new TEvWakeup(UPDATE_MESSAGES_METRICS_TAG)); + break; + } + case REQUEST_CONFIGURATION_TAG: { + RequestConfiguration(); + break; + } + default: + Y_FAIL("Unknown wakeup tag: %lu", ev->Get()->Tag); + } +} + void TQueueLeader::HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev) { - if (ev->Get()->Success) { - const auto& cfg = Cfg(); - QueueAttributesCacheTime_ = TDuration::MilliSeconds(cfg.GetQueueAttributesCacheTimeMs()); - RequestConfiguration(); - } else { - INC_COUNTER(Counters_, QueueMasterStartProblems); + if (ev->Get()->Success) { + const auto& cfg = Cfg(); + QueueAttributesCacheTime_ = TDuration::MilliSeconds(cfg.GetQueueAttributesCacheTimeMs()); + RequestConfiguration(); + } else { + INC_COUNTER(Counters_, QueueMasterStartProblems); INC_COUNTER(Counters_, QueueLeaderStartProblems); - Register(new TQueueMigrationActor(UserName_, QueueName_, SelfId(), SchemeCache_, Counters_, TDuration::MilliSeconds(500))); - FailRequestsDuringStartProblems(); - } -} - + Register(new TQueueMigrationActor(UserName_, QueueName_, SelfId(), SchemeCache_, Counters_, TDuration::MilliSeconds(500))); + FailRequestsDuringStartProblems(); + } +} + void TQueueLeader::HandleGetConfigurationWhileIniting(TSqsEvents::TEvGetConfiguration::TPtr& ev) { - GetConfigurationRequests_.emplace_back(ev); -} - + GetConfigurationRequests_.emplace_back(ev); +} + void TQueueLeader::HandleGetConfigurationWhileWorking(TSqsEvents::TEvGetConfiguration::TPtr& ev) { - if (ev->Get()->NeedQueueAttributes && TActivationContext::Now() <= AttributesUpdateTime_ + QueueAttributesCacheTime_ && QueueAttributes_) { - AnswerGetConfiguration(ev); - } else { - LWPROBE(QueueAttributesCacheMiss, ev->Get()->UserName, ev->Get()->QueueName, ev->Get()->RequestId); - GetConfigurationRequests_.emplace_back(ev); - AskQueueAttributes(); - } -} - + if (ev->Get()->NeedQueueAttributes && TActivationContext::Now() <= AttributesUpdateTime_ + QueueAttributesCacheTime_ && QueueAttributes_) { + AnswerGetConfiguration(ev); + } else { + LWPROBE(QueueAttributesCacheMiss, ev->Get()->UserName, ev->Get()->QueueName, ev->Get()->RequestId); + GetConfigurationRequests_.emplace_back(ev); + AskQueueAttributes(); + } +} + void TQueueLeader::HandleClearQueueAttributesCache([[maybe_unused]] TSqsEvents::TEvClearQueueAttributesCache::TPtr& ev) { - AttributesUpdateTime_ = TInstant::Zero(); - QueueAttributes_ = Nothing(); -} - + AttributesUpdateTime_ = TInstant::Zero(); + QueueAttributes_ = Nothing(); +} + void TQueueLeader::HandleExecuteWhileIniting(TSqsEvents::TEvExecute::TPtr& ev) { - ExecuteRequests_.emplace_back(ev); -} - + ExecuteRequests_.emplace_back(ev); +} + void TQueueLeader::HandleExecuteWhileWorking(TSqsEvents::TEvExecute::TPtr& ev) { - Y_VERIFY(ev->Get()->QueryIdx < QUERY_VECTOR_SIZE); - Y_VERIFY(ev->Get()->Shard < ShardsCount_); - auto& query = Shards_[ev->Get()->Shard].Queries[ev->Get()->QueryIdx]; - - switch (query.State) { - case EQueryState::Empty: - query.State = EQueryState::Preparing; - Prepare(ev); - break; - case EQueryState::Preparing: - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "Waiting query(idx=" << ev->Get()->QueryIdx << ") compilation"); - query.Deferred.push_back(ev); - break; - case EQueryState::Cached: - ExecuteRequest(ev, query.Compiled); - break; - } -} - + Y_VERIFY(ev->Get()->QueryIdx < QUERY_VECTOR_SIZE); + Y_VERIFY(ev->Get()->Shard < ShardsCount_); + auto& query = Shards_[ev->Get()->Shard].Queries[ev->Get()->QueryIdx]; + + switch (query.State) { + case EQueryState::Empty: + query.State = EQueryState::Preparing; + Prepare(ev); + break; + case EQueryState::Preparing: + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "Waiting query(idx=" << ev->Get()->QueryIdx << ") compilation"); + query.Deferred.push_back(ev); + break; + case EQueryState::Cached: + ExecuteRequest(ev, query.Compiled); + break; + } +} + void TQueueLeader::Prepare(TSqsEvents::TEvExecute::TPtr& ev) { - const TSqsEvents::TEvExecute& req = *ev->Get(); - RLOG_SQS_REQ_DEBUG(req.RequestId, "Preparing query(idx=" << req.QueryIdx << ")"); + const TSqsEvents::TEvExecute& req = *ev->Get(); + RLOG_SQS_REQ_DEBUG(req.RequestId, "Preparing query(idx=" << req.QueryIdx << ")"); - TExecutorBuilder(SelfId(), req.RequestId) - .User(UserName_) - .Queue(QueueName_) - .Shard(req.Shard) + TExecutorBuilder(SelfId(), req.RequestId) + .User(UserName_) + .Queue(QueueName_) + .Shard(req.Shard) .QueueVersion(QueueVersion_) .Fifo(IsFifoQueue_) - .Mode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) - .QueryId(req.QueryIdx) - .RetryOnTimeout(req.RetryOnTimeout) - .Counters(Counters_) - .OnExecuted([this, ev](const TSqsEvents::TEvExecuted::TRecord& record) mutable { OnQueryPrepared(ev, record); }) - .StartExecutorActor(); -} - + .Mode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) + .QueryId(req.QueryIdx) + .RetryOnTimeout(req.RetryOnTimeout) + .Counters(Counters_) + .OnExecuted([this, ev](const TSqsEvents::TEvExecuted::TRecord& record) mutable { OnQueryPrepared(ev, record); }) + .StartExecutorActor(); +} + void TQueueLeader::OnQueryPrepared(TSqsEvents::TEvExecute::TPtr& ev, const TSqsEvents::TEvExecuted::TRecord& record) { - const TSqsEvents::TEvExecute& req = *ev->Get(); - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(record.GetStatus()); - auto& query = Shards_[req.Shard].Queries[req.QueryIdx]; - - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - RLOG_SQS_REQ_DEBUG(req.RequestId, "Query(idx=" << req.QueryIdx << ") has been prepared"); - - query.Compiled = record.GetMiniKQLCompileResults().GetCompiledProgram(); - query.State = EQueryState::Cached; - - std::vector<TSqsEvents::TEvExecute::TPtr> requests; - requests.swap(query.Deferred); - HandleExecuteWhileWorking(ev); - for (auto& r : requests) { - HandleExecuteWhileWorking(r); - } - - } else { - RLOG_SQS_REQ_WARN(req.RequestId, "Request preparation error: " - << "status=" << status << ", " - << "record=" << record); - Send(req.Sender, MakeHolder<TSqsEvents::TEvExecuted>(record, req.Cb, req.Shard)); - - for (const auto& def : query.Deferred) { - RLOG_SQS_REQ_WARN(def->Get()->RequestId, "Request preparation error: " - << "status=" << status << ", " - << "record=" << record); - Send(def->Get()->Sender, MakeHolder<TSqsEvents::TEvExecuted>(record, def->Get()->Cb, def->Get()->Shard)); - } - query.Deferred.clear(); - - if (!NTxProxy::TResultStatus::IsSoftErrorWithoutSideEffects(status)) { - RemoveCachedRequest(req.Shard, req.QueryIdx); - } - } -} - + const TSqsEvents::TEvExecute& req = *ev->Get(); + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(record.GetStatus()); + auto& query = Shards_[req.Shard].Queries[req.QueryIdx]; + + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + RLOG_SQS_REQ_DEBUG(req.RequestId, "Query(idx=" << req.QueryIdx << ") has been prepared"); + + query.Compiled = record.GetMiniKQLCompileResults().GetCompiledProgram(); + query.State = EQueryState::Cached; + + std::vector<TSqsEvents::TEvExecute::TPtr> requests; + requests.swap(query.Deferred); + HandleExecuteWhileWorking(ev); + for (auto& r : requests) { + HandleExecuteWhileWorking(r); + } + + } else { + RLOG_SQS_REQ_WARN(req.RequestId, "Request preparation error: " + << "status=" << status << ", " + << "record=" << record); + Send(req.Sender, MakeHolder<TSqsEvents::TEvExecuted>(record, req.Cb, req.Shard)); + + for (const auto& def : query.Deferred) { + RLOG_SQS_REQ_WARN(def->Get()->RequestId, "Request preparation error: " + << "status=" << status << ", " + << "record=" << record); + Send(def->Get()->Sender, MakeHolder<TSqsEvents::TEvExecuted>(record, def->Get()->Cb, def->Get()->Shard)); + } + query.Deferred.clear(); + + if (!NTxProxy::TResultStatus::IsSoftErrorWithoutSideEffects(status)) { + RemoveCachedRequest(req.Shard, req.QueryIdx); + } + } +} + void TQueueLeader::RemoveCachedRequest(size_t shard, size_t idx) { - TQuery& query = Shards_[shard].Queries[idx]; - if (query.State == EQueryState::Cached) { - LOG_SQS_INFO("Remove cached compiled query(idx=" << idx << ") for queue " << TLogQueueName(UserName_, QueueName_, shard)); - - query.State = EQueryState::Empty; - query.Compiled = TString(); - } else if (query.State == EQueryState::Preparing) { - LOG_SQS_INFO("Clear compiling state for query(idx=" << idx << ") for queue " << TLogQueueName(UserName_, QueueName_, shard)); - Y_VERIFY(query.Deferred.empty()); - - query.State = EQueryState::Empty; - query.Compiled = TString(); - } -} - + TQuery& query = Shards_[shard].Queries[idx]; + if (query.State == EQueryState::Cached) { + LOG_SQS_INFO("Remove cached compiled query(idx=" << idx << ") for queue " << TLogQueueName(UserName_, QueueName_, shard)); + + query.State = EQueryState::Empty; + query.Compiled = TString(); + } else if (query.State == EQueryState::Preparing) { + LOG_SQS_INFO("Clear compiling state for query(idx=" << idx << ") for queue " << TLogQueueName(UserName_, QueueName_, shard)); + Y_VERIFY(query.Deferred.empty()); + + query.State = EQueryState::Empty; + query.Compiled = TString(); + } +} + void TQueueLeader::ExecuteRequest(TSqsEvents::TEvExecute::TPtr& ev, const TString& compiled) { - const TSqsEvents::TEvExecute& req = *ev->Get(); - RLOG_SQS_REQ_DEBUG(req.RequestId, "Executing compiled query(idx=" << req.QueryIdx << ")"); - TExecutorBuilder builder(SelfId(), req.RequestId); - builder - .User(UserName_) - .Queue(QueueName_) - .Shard(req.Shard) + const TSqsEvents::TEvExecute& req = *ev->Get(); + RLOG_SQS_REQ_DEBUG(req.RequestId, "Executing compiled query(idx=" << req.QueryIdx << ")"); + TExecutorBuilder builder(SelfId(), req.RequestId); + builder + .User(UserName_) + .Queue(QueueName_) + .Shard(req.Shard) .QueueVersion(QueueVersion_) .Fifo(IsFifoQueue_) - .QueryId(req.QueryIdx) - .Bin(compiled) - .RetryOnTimeout(req.RetryOnTimeout) - .Counters(Counters_) - .OnExecuted([this, ev](const TSqsEvents::TEvExecuted::TRecord& record) mutable { OnQueryExecuted(ev, record); }); - - builder.Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto()->CopyFrom(req.Params); - builder.StartExecutorActor(); -} - + .QueryId(req.QueryIdx) + .Bin(compiled) + .RetryOnTimeout(req.RetryOnTimeout) + .Counters(Counters_) + .OnExecuted([this, ev](const TSqsEvents::TEvExecuted::TRecord& record) mutable { OnQueryExecuted(ev, record); }); + + builder.Request().Record.MutableTransaction()->MutableMiniKQLTransaction()->MutableParams()->MutableProto()->CopyFrom(req.Params); + builder.StartExecutorActor(); +} + void TQueueLeader::OnQueryExecuted(TSqsEvents::TEvExecute::TPtr& ev, const TSqsEvents::TEvExecuted::TRecord& record) { - const TSqsEvents::TEvExecute& req = *ev->Get(); - bool retried = false; - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(record.GetStatus()); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - CreateBackgroundActors(); - } else { - RLOG_SQS_REQ_WARN(req.RequestId, "Query(idx=" << req.QueryIdx << ") execution error. Queue: [" << UserName_ << "/" << QueueName_ << "]: " << record); - - if (!NTxProxy::TResultStatus::IsSoftErrorWithoutSideEffects(status)) { - TQuery& query = Shards_[req.Shard].Queries[req.QueryIdx]; - if (query.State != EQueryState::Preparing) { // if query is preparing, there is a concurrent process that has cleared our cache - RemoveCachedRequest(req.Shard, req.QueryIdx); - } - if (TSqsEvents::TEvExecuted::IsResolvingError(record)) { - retried = true; - RLOG_SQS_REQ_DEBUG(req.RequestId, "Trying to recompile and execute query second time"); - HandleExecuteWhileWorking(ev); - } - } - } - - if (!retried) { - RLOG_SQS_REQ_DEBUG(req.RequestId, "Sending executed reply"); - Send(req.Sender, MakeHolder<TSqsEvents::TEvExecuted>(record, req.Cb, req.Shard)); - } -} - + const TSqsEvents::TEvExecute& req = *ev->Get(); + bool retried = false; + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(record.GetStatus()); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + CreateBackgroundActors(); + } else { + RLOG_SQS_REQ_WARN(req.RequestId, "Query(idx=" << req.QueryIdx << ") execution error. Queue: [" << UserName_ << "/" << QueueName_ << "]: " << record); + + if (!NTxProxy::TResultStatus::IsSoftErrorWithoutSideEffects(status)) { + TQuery& query = Shards_[req.Shard].Queries[req.QueryIdx]; + if (query.State != EQueryState::Preparing) { // if query is preparing, there is a concurrent process that has cleared our cache + RemoveCachedRequest(req.Shard, req.QueryIdx); + } + if (TSqsEvents::TEvExecuted::IsResolvingError(record)) { + retried = true; + RLOG_SQS_REQ_DEBUG(req.RequestId, "Trying to recompile and execute query second time"); + HandleExecuteWhileWorking(ev); + } + } + } + + if (!retried) { + RLOG_SQS_REQ_DEBUG(req.RequestId, "Sending executed reply"); + Send(req.Sender, MakeHolder<TSqsEvents::TEvExecuted>(record, req.Cb, req.Shard)); + } +} + void TQueueLeader::HandleSendMessageBatchWhileIniting(TSqsEvents::TEvSendMessageBatch::TPtr& ev) { - TString reqId = ev->Get()->RequestId; - Y_VERIFY(SendMessageRequests_.emplace(std::move(reqId), std::move(ev)).second); -} - + TString reqId = ev->Get()->RequestId; + Y_VERIFY(SendMessageRequests_.emplace(std::move(reqId), std::move(ev)).second); +} + void TQueueLeader::HandleSendMessageBatchWhileWorking(TSqsEvents::TEvSendMessageBatch::TPtr& ev) { - TString reqId = ev->Get()->RequestId; - auto [reqIter, inserted] = SendMessageRequests_.emplace(std::move(reqId), std::move(ev)); - Y_VERIFY(inserted); - ProcessSendMessageBatch(reqIter->second); -} - + TString reqId = ev->Get()->RequestId; + auto [reqIter, inserted] = SendMessageRequests_.emplace(std::move(reqId), std::move(ev)); + Y_VERIFY(inserted); + ProcessSendMessageBatch(reqIter->second); +} + void TQueueLeader::ProcessSendMessageBatch(TSendMessageBatchRequestProcessing& reqInfo) { - reqInfo.Init(ShardsCount_); // init if not inited - if (!IncActiveMessageRequests(reqInfo.Shard, reqInfo.Event->Get()->RequestId)) { - return; - } - - auto& shardInfo = Shards_[reqInfo.Shard]; - shardInfo.SendBatchingState.AddRequest(reqInfo); - shardInfo.SendBatchingState.TryExecute(this); -} - + reqInfo.Init(ShardsCount_); // init if not inited + if (!IncActiveMessageRequests(reqInfo.Shard, reqInfo.Event->Get()->RequestId)) { + return; + } + + auto& shardInfo = Shards_[reqInfo.Shard]; + shardInfo.SendBatchingState.AddRequest(reqInfo); + shardInfo.SendBatchingState.TryExecute(this); +} + void TQueueLeader::OnMessageSent(const TString& requestId, size_t index, const TSqsEvents::TEvExecuted::TRecord& reply, const NKikimr::NClient::TValue* messageRecord) { - auto reqInfoIt = SendMessageRequests_.find(requestId); - Y_VERIFY(reqInfoIt != SendMessageRequests_.end()); - auto& reqInfo = reqInfoIt->second; - const ui64 shard = reqInfo.Shard; - auto& messageStatus = reqInfo.Statuses[index]; - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); - RLOG_SQS_REQ_TRACE(reqInfo.Event->Get()->RequestId, "Received reply from DB: " << status); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - if (!(*messageRecord)["dedupCond"]) { - // A message with same deduplication id - // has already been sent. - if (IsFifoQueue_) { - messageStatus.SequenceNumber = (*messageRecord)["dedupSelect"]["Offset"]; - } - messageStatus.MessageId = (*messageRecord)["dedupSelect"]["MessageId"]; - messageStatus.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::AlreadySent; - } else { - if (IsFifoQueue_) { - messageStatus.SequenceNumber = (*messageRecord)["offset"]; - } - messageStatus.MessageId = reqInfo.Event->Get()->Messages[index].MessageId; - messageStatus.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::OK; - } - } else { - messageStatus.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::Failed; - } - - ++reqInfo.AnswersGot; - if (reqInfo.AnswersGot == reqInfo.Statuses.size()) { - auto answer = MakeHolder<TSqsEvents::TEvSendMessageBatchResponse>(); - answer->Statuses.swap(reqInfo.Statuses); - Send(reqInfo.Event->Sender, answer.Release()); - SendMessageRequests_.erase(reqInfo.Event->Get()->RequestId); - DecActiveMessageRequests(shard); - } -} - + auto reqInfoIt = SendMessageRequests_.find(requestId); + Y_VERIFY(reqInfoIt != SendMessageRequests_.end()); + auto& reqInfo = reqInfoIt->second; + const ui64 shard = reqInfo.Shard; + auto& messageStatus = reqInfo.Statuses[index]; + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); + RLOG_SQS_REQ_TRACE(reqInfo.Event->Get()->RequestId, "Received reply from DB: " << status); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + if (!(*messageRecord)["dedupCond"]) { + // A message with same deduplication id + // has already been sent. + if (IsFifoQueue_) { + messageStatus.SequenceNumber = (*messageRecord)["dedupSelect"]["Offset"]; + } + messageStatus.MessageId = (*messageRecord)["dedupSelect"]["MessageId"]; + messageStatus.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::AlreadySent; + } else { + if (IsFifoQueue_) { + messageStatus.SequenceNumber = (*messageRecord)["offset"]; + } + messageStatus.MessageId = reqInfo.Event->Get()->Messages[index].MessageId; + messageStatus.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::OK; + } + } else { + messageStatus.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::Failed; + } + + ++reqInfo.AnswersGot; + if (reqInfo.AnswersGot == reqInfo.Statuses.size()) { + auto answer = MakeHolder<TSqsEvents::TEvSendMessageBatchResponse>(); + answer->Statuses.swap(reqInfo.Statuses); + Send(reqInfo.Event->Sender, answer.Release()); + SendMessageRequests_.erase(reqInfo.Event->Get()->RequestId); + DecActiveMessageRequests(shard); + } +} + void TQueueLeader::OnSendBatchExecuted(ui64 shard, ui64 batchId, const TSqsEvents::TEvExecuted::TRecord& reply) { - auto& shardInfo = Shards_[shard]; - auto& batchingState = shardInfo.SendBatchingState; - auto batchIt = batchingState.BatchesExecuting.find(batchId); - Y_VERIFY(batchIt != batchingState.BatchesExecuting.end()); - auto batch = batchIt->second; - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue result(val["result"]); - Y_VERIFY(result.Size() == batch->Size()); - for (size_t i = 0; i < batch->Size(); ++i) { - const TSendBatchEntry& entry = batch->Entries[i]; - auto messageResult = result[i]; - OnMessageSent(entry.RequestId, entry.IndexInRequest, reply, &messageResult); - if (entry.Message.Delay) { - DelayStatistics_.AddDelayedMessage(batch->TransactionStartedTime + entry.Message.Delay, batch->TransactionStartedTime); - } - } - if (!IsFifoQueue_) { - const i64 newMessagesCount = val["newMessagesCount"]; - Y_VERIFY(newMessagesCount >= 0); - shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); - } - } else { - const TString* prevRequestId = nullptr; - for (size_t i = 0; i < batch->Size(); ++i) { - const TSendBatchEntry& entry = batch->Entries[i]; - if (!prevRequestId || *prevRequestId != entry.RequestId) { - prevRequestId = &entry.RequestId; - RLOG_SQS_REQ_ERROR(entry.RequestId, "Batch transaction failed: " << reply << ". BatchId: " << batch->BatchId); - } - OnMessageSent(entry.RequestId, entry.IndexInRequest, reply, nullptr); - } - } - batchingState.BatchesExecuting.erase(batchId); - batchingState.TryExecute(this); -} - + auto& shardInfo = Shards_[shard]; + auto& batchingState = shardInfo.SendBatchingState; + auto batchIt = batchingState.BatchesExecuting.find(batchId); + Y_VERIFY(batchIt != batchingState.BatchesExecuting.end()); + auto batch = batchIt->second; + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue result(val["result"]); + Y_VERIFY(result.Size() == batch->Size()); + for (size_t i = 0; i < batch->Size(); ++i) { + const TSendBatchEntry& entry = batch->Entries[i]; + auto messageResult = result[i]; + OnMessageSent(entry.RequestId, entry.IndexInRequest, reply, &messageResult); + if (entry.Message.Delay) { + DelayStatistics_.AddDelayedMessage(batch->TransactionStartedTime + entry.Message.Delay, batch->TransactionStartedTime); + } + } + if (!IsFifoQueue_) { + const i64 newMessagesCount = val["newMessagesCount"]; + Y_VERIFY(newMessagesCount >= 0); + shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); + } + } else { + const TString* prevRequestId = nullptr; + for (size_t i = 0; i < batch->Size(); ++i) { + const TSendBatchEntry& entry = batch->Entries[i]; + if (!prevRequestId || *prevRequestId != entry.RequestId) { + prevRequestId = &entry.RequestId; + RLOG_SQS_REQ_ERROR(entry.RequestId, "Batch transaction failed: " << reply << ". BatchId: " << batch->BatchId); + } + OnMessageSent(entry.RequestId, entry.IndexInRequest, reply, nullptr); + } + } + batchingState.BatchesExecuting.erase(batchId); + batchingState.TryExecute(this); +} + void TQueueLeader::HandleReceiveMessageBatchWhileIniting(TSqsEvents::TEvReceiveMessageBatch::TPtr& ev) { - TString reqId = ev->Get()->RequestId; - Y_VERIFY(ReceiveMessageRequests_.emplace(std::move(reqId), std::move(ev)).second); -} - + TString reqId = ev->Get()->RequestId; + Y_VERIFY(ReceiveMessageRequests_.emplace(std::move(reqId), std::move(ev)).second); +} + void TQueueLeader::HandleReceiveMessageBatchWhileWorking(TSqsEvents::TEvReceiveMessageBatch::TPtr& ev) { - TString reqId = ev->Get()->RequestId; - auto [reqIter, inserted] = ReceiveMessageRequests_.emplace(std::move(reqId), std::move(ev)); - Y_VERIFY(inserted); - ProcessReceiveMessageBatch(reqIter->second); -} - + TString reqId = ev->Get()->RequestId; + auto [reqIter, inserted] = ReceiveMessageRequests_.emplace(std::move(reqId), std::move(ev)); + Y_VERIFY(inserted); + ProcessReceiveMessageBatch(reqIter->second); +} + void TQueueLeader::ProcessReceiveMessageBatch(TReceiveMessageBatchRequestProcessing& reqInfo) { - reqInfo.Init(ShardsCount_); // init if not inited - - if (reqInfo.WaitingAddMessagesToInfly) { - return; - } - - if (!IncActiveMessageRequests(reqInfo.GetCurrentShard(), reqInfo.Event->Get()->RequestId)) { - return; - } - if (IsFifoQueue_) { - reqInfo.LockedFifoMessages.reserve(reqInfo.Event->Get()->MaxMessagesCount); - LockFifoGroup(reqInfo); - } else { - GetMessagesFromInfly(reqInfo); - } -} - + reqInfo.Init(ShardsCount_); // init if not inited + + if (reqInfo.WaitingAddMessagesToInfly) { + return; + } + + if (!IncActiveMessageRequests(reqInfo.GetCurrentShard(), reqInfo.Event->Get()->RequestId)) { + return; + } + if (IsFifoQueue_) { + reqInfo.LockedFifoMessages.reserve(reqInfo.Event->Get()->MaxMessagesCount); + LockFifoGroup(reqInfo); + } else { + GetMessagesFromInfly(reqInfo); + } +} + void TQueueLeader::LockFifoGroup(TReceiveMessageBatchRequestProcessing& reqInfo) { - reqInfo.LockSendTs = TActivationContext::Now(); - auto onExecuted = [this, requestId = reqInfo.Event->Get()->RequestId] (const TSqsEvents::TEvExecuted::TRecord& ev) { - OnFifoGroupLocked(requestId, ev); - }; - - TExecutorBuilder(SelfId(), reqInfo.Event->Get()->RequestId) - .User(UserName_) - .Queue(QueueName_) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) + reqInfo.LockSendTs = TActivationContext::Now(); + auto onExecuted = [this, requestId = reqInfo.Event->Get()->RequestId] (const TSqsEvents::TEvExecuted::TRecord& ev) { + OnFifoGroupLocked(requestId, ev); + }; + + TExecutorBuilder(SelfId(), reqInfo.Event->Get()->RequestId) + .User(UserName_) + .Queue(QueueName_) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) .QueueLeader(SelfId()) - .QueryId(LOCK_GROUP_ID) - .Counters(Counters_) - .RetryOnTimeout() - .OnExecuted(onExecuted) - .Params() - .Uint64("NOW", reqInfo.LockSendTs.MilliSeconds()) - .Utf8("ATTEMPT_ID", reqInfo.Event->Get()->ReceiveAttemptId) - .Uint64("COUNT", reqInfo.Event->Get()->MaxMessagesCount - reqInfo.LockedFifoMessages.size()) - .Uint64("VISIBILITY_TIMEOUT", reqInfo.Event->Get()->VisibilityTimeout.MilliSeconds()) - .Uint64("GROUPS_READ_ATTEMPT_IDS_PERIOD", Cfg().GetGroupsReadAttemptIdsPeriodMs()) - .String("FROM_GROUP", reqInfo.FromGroup) - .Uint64("BATCH_SIZE", Cfg().GetGroupSelectionBatchSize()) - .ParentBuilder().Start(); -} - + .QueryId(LOCK_GROUP_ID) + .Counters(Counters_) + .RetryOnTimeout() + .OnExecuted(onExecuted) + .Params() + .Uint64("NOW", reqInfo.LockSendTs.MilliSeconds()) + .Utf8("ATTEMPT_ID", reqInfo.Event->Get()->ReceiveAttemptId) + .Uint64("COUNT", reqInfo.Event->Get()->MaxMessagesCount - reqInfo.LockedFifoMessages.size()) + .Uint64("VISIBILITY_TIMEOUT", reqInfo.Event->Get()->VisibilityTimeout.MilliSeconds()) + .Uint64("GROUPS_READ_ATTEMPT_IDS_PERIOD", Cfg().GetGroupsReadAttemptIdsPeriodMs()) + .String("FROM_GROUP", reqInfo.FromGroup) + .Uint64("BATCH_SIZE", Cfg().GetGroupSelectionBatchSize()) + .ParentBuilder().Start(); +} + void TQueueLeader::OnFifoGroupLocked(const TString& requestId, const TSqsEvents::TEvExecuted::TRecord& ev) { - auto reqInfoIt = ReceiveMessageRequests_.find(requestId); - Y_VERIFY(reqInfoIt != ReceiveMessageRequests_.end()); - auto& reqInfo = reqInfoIt->second; - - if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const TValue offsets(val["offsets"]); - const bool appliedOldAttemptId = val["sameCond"]; - const bool truncated = val["truncated"]; - if (truncated) { - const TValue lastProcessedGroup = val["lastProcessedGroup"]; - reqInfo.FromGroup = lastProcessedGroup["GroupId"]; - } - - for (size_t i = 0; i < offsets.Size(); ++i) { - reqInfo.LockedFifoMessages.emplace_back(); - auto& msg = reqInfo.LockedFifoMessages.back(); - msg.RandomId = offsets[i]["RandomId"]; - msg.Offset = offsets[i]["Head"]; + auto reqInfoIt = ReceiveMessageRequests_.find(requestId); + Y_VERIFY(reqInfoIt != ReceiveMessageRequests_.end()); + auto& reqInfo = reqInfoIt->second; + + if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const TValue offsets(val["offsets"]); + const bool appliedOldAttemptId = val["sameCond"]; + const bool truncated = val["truncated"]; + if (truncated) { + const TValue lastProcessedGroup = val["lastProcessedGroup"]; + reqInfo.FromGroup = lastProcessedGroup["GroupId"]; + } + + for (size_t i = 0; i < offsets.Size(); ++i) { + reqInfo.LockedFifoMessages.emplace_back(); + auto& msg = reqInfo.LockedFifoMessages.back(); + msg.RandomId = offsets[i]["RandomId"]; + msg.Offset = offsets[i]["Head"]; msg.GroupId = offsets[i]["GroupId"]; - } - - if (truncated) { - if (reqInfo.LockedFifoMessages.empty() || appliedOldAttemptId && reqInfo.Event->Get()->MaxMessagesCount > reqInfo.LockedFifoMessages.size()) { - LockFifoGroup(reqInfo); - } else { - ReadFifoMessages(reqInfo); - } - } else { - if (reqInfo.LockedFifoMessages.empty()) { - Reply(reqInfo); - } else { - ReadFifoMessages(reqInfo); - } - } - } else { - reqInfo.Answer->Failed = true; - Reply(reqInfo); - } -} - + } + + if (truncated) { + if (reqInfo.LockedFifoMessages.empty() || appliedOldAttemptId && reqInfo.Event->Get()->MaxMessagesCount > reqInfo.LockedFifoMessages.size()) { + LockFifoGroup(reqInfo); + } else { + ReadFifoMessages(reqInfo); + } + } else { + if (reqInfo.LockedFifoMessages.empty()) { + Reply(reqInfo); + } else { + ReadFifoMessages(reqInfo); + } + } + } else { + reqInfo.Answer->Failed = true; + Reply(reqInfo); + } +} + void TQueueLeader::ReadFifoMessages(TReceiveMessageBatchRequestProcessing& reqInfo) { ui32 maxReceiveCount = 0; // not set - if (Cfg().GetEnableDeadLetterQueues() && DlqInfo_) { + if (Cfg().GetEnableDeadLetterQueues() && DlqInfo_) { const auto& dlqInfo(*DlqInfo_); if (dlqInfo.DlqName && dlqInfo.QueueId) { // dlq is set and resolved @@ -588,25 +588,25 @@ void TQueueLeader::ReadFifoMessages(TReceiveMessageBatchRequestProcessing& reqIn } } - TExecutorBuilder builder(SelfId(), reqInfo.Event->Get()->RequestId); - builder - .User(UserName_) - .Queue(QueueName_) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) + TExecutorBuilder builder(SelfId(), reqInfo.Event->Get()->RequestId); + builder + .User(UserName_) + .Queue(QueueName_) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) .QueueLeader(SelfId()) - .Counters(Counters_) - .RetryOnTimeout(); - - NClient::TWriteValue params = builder.ParamsValue(); - params["NOW"] = ui64(TActivationContext::Now().MilliSeconds()); + .Counters(Counters_) + .RetryOnTimeout(); + + NClient::TWriteValue params = builder.ParamsValue(); + params["NOW"] = ui64(TActivationContext::Now().MilliSeconds()); ui64 index = 0; THashSet<TString> usedGroups; // mitigates extremely rare bug with duplicated groups during locking - for (const auto& msg : reqInfo.LockedFifoMessages) { + for (const auto& msg : reqInfo.LockedFifoMessages) { if (usedGroups.insert(msg.GroupId).second) { auto key = params["KEYS"].AddListItem(); - + key["RandomId"] = msg.RandomId; key["Offset"] = msg.Offset; @@ -615,14 +615,14 @@ void TQueueLeader::ReadFifoMessages(TReceiveMessageBatchRequestProcessing& reqIn key["Index"] = index++; } } - } - + } + if (maxReceiveCount) { // perform heavy read and move transaction (DLQ) Y_VERIFY(DlqInfo_); - const TQueuePath currentQueuePath = { Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_ }; - const TQueuePath deadLetterQueuePath = { Cfg().GetRoot(), UserName_, DlqInfo_->QueueId, DlqInfo_->QueueVersion }; + const TQueuePath currentQueuePath = { Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_ }; + const TQueuePath deadLetterQueuePath = { Cfg().GetRoot(), UserName_, DlqInfo_->QueueId, DlqInfo_->QueueVersion }; const TString transactionText = Sprintf(GetFifoQueryById(READ_OR_REDRIVE_MESSAGE_ID), currentQueuePath.GetVersionedQueuePath().c_str(), @@ -640,67 +640,67 @@ void TQueueLeader::ReadFifoMessages(TReceiveMessageBatchRequestProcessing& reqIn const bool usedDLQ = maxReceiveCount > 0; - builder.OnExecuted([this, requestId = reqInfo.Event->Get()->RequestId, usedDLQ] (const TSqsEvents::TEvExecuted::TRecord& ev) { - OnFifoMessagesRead(requestId, ev, usedDLQ); - }); - - builder.Start(); -} - + builder.OnExecuted([this, requestId = reqInfo.Event->Get()->RequestId, usedDLQ] (const TSqsEvents::TEvExecuted::TRecord& ev) { + OnFifoMessagesRead(requestId, ev, usedDLQ); + }); + + builder.Start(); +} + void TQueueLeader::OnFifoMessagesRead(const TString& requestId, const TSqsEvents::TEvExecuted::TRecord& ev, const bool usedDLQ) { - auto reqInfoIt = ReceiveMessageRequests_.find(requestId); - Y_VERIFY(reqInfoIt != ReceiveMessageRequests_.end()); - auto& reqInfo = reqInfoIt->second; - - if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue value(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const TValue list(value["result"]); - + auto reqInfoIt = ReceiveMessageRequests_.find(requestId); + Y_VERIFY(reqInfoIt != ReceiveMessageRequests_.end()); + auto& reqInfo = reqInfoIt->second; + + if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue value(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const TValue list(value["result"]); + if (const ui64 movedMessagesCount = value["movedMessagesCount"]) { ADD_COUNTER(Counters_, MessagesMovedToDLQ, movedMessagesCount); - - const i64 newMessagesCount = value["newMessagesCount"]; - Y_VERIFY(newMessagesCount >= 0); - auto& shardInfo = Shards_[0]; - shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); + + const i64 newMessagesCount = value["newMessagesCount"]; + Y_VERIFY(newMessagesCount >= 0); + auto& shardInfo = Shards_[0]; + shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); } - reqInfo.Answer->Messages.resize(list.Size()); - for (size_t i = 0; i < list.Size(); ++i) { + reqInfo.Answer->Messages.resize(list.Size()); + for (size_t i = 0; i < list.Size(); ++i) { const TValue& data = list[i]["SourceDataFieldsRead"]; const TValue& msg = list[i]["SourceMessageFieldsRead"]; - const ui64 receiveTimestamp = msg["FirstReceiveTimestamp"]; - auto& msgAnswer = reqInfo.Answer->Messages[i]; - - msgAnswer.FirstReceiveTimestamp = (receiveTimestamp ? TInstant::MilliSeconds(receiveTimestamp) : reqInfo.LockSendTs); + const ui64 receiveTimestamp = msg["FirstReceiveTimestamp"]; + auto& msgAnswer = reqInfo.Answer->Messages[i]; + + msgAnswer.FirstReceiveTimestamp = (receiveTimestamp ? TInstant::MilliSeconds(receiveTimestamp) : reqInfo.LockSendTs); msgAnswer.ReceiveCount = ui32(msg["ReceiveCount"]) + 1; // since the query returns old receive count value - msgAnswer.MessageId = data["MessageId"]; - msgAnswer.MessageDeduplicationId = data["DedupId"]; - msgAnswer.MessageGroupId = msg["GroupId"]; - msgAnswer.Data = data["Data"]; - msgAnswer.SentTimestamp = TInstant::MilliSeconds(ui64(msg["SentTimestamp"])); - msgAnswer.SequenceNumber = msg["Offset"]; - - msgAnswer.ReceiptHandle.SetMessageGroupId(TString(msg["GroupId"])); - msgAnswer.ReceiptHandle.SetOffset(msgAnswer.SequenceNumber); - msgAnswer.ReceiptHandle.SetReceiveRequestAttemptId(reqInfo.Event->Get()->ReceiveAttemptId); - msgAnswer.ReceiptHandle.SetLockTimestamp(reqInfo.LockSendTs.MilliSeconds()); - msgAnswer.ReceiptHandle.SetShard(0); - - const TValue senderIdValue = data["SenderId"]; - if (senderIdValue.HaveValue()) { - if (const TString senderId = TString(senderIdValue)) { - msgAnswer.SenderId = senderId; - } - } - - const TValue attributesValue = data["Attributes"]; - if (attributesValue.HaveValue()) { - msgAnswer.MessageAttributes = attributesValue; - } - } - } else { + msgAnswer.MessageId = data["MessageId"]; + msgAnswer.MessageDeduplicationId = data["DedupId"]; + msgAnswer.MessageGroupId = msg["GroupId"]; + msgAnswer.Data = data["Data"]; + msgAnswer.SentTimestamp = TInstant::MilliSeconds(ui64(msg["SentTimestamp"])); + msgAnswer.SequenceNumber = msg["Offset"]; + + msgAnswer.ReceiptHandle.SetMessageGroupId(TString(msg["GroupId"])); + msgAnswer.ReceiptHandle.SetOffset(msgAnswer.SequenceNumber); + msgAnswer.ReceiptHandle.SetReceiveRequestAttemptId(reqInfo.Event->Get()->ReceiveAttemptId); + msgAnswer.ReceiptHandle.SetLockTimestamp(reqInfo.LockSendTs.MilliSeconds()); + msgAnswer.ReceiptHandle.SetShard(0); + + const TValue senderIdValue = data["SenderId"]; + if (senderIdValue.HaveValue()) { + if (const TString senderId = TString(senderIdValue)) { + msgAnswer.SenderId = senderId; + } + } + + const TValue attributesValue = data["Attributes"]; + if (attributesValue.HaveValue()) { + msgAnswer.MessageAttributes = attributesValue; + } + } + } else { const auto errStatus = NKikimr::NTxProxy::TResultStatus::EStatus(ev.GetStatus()); if (usedDLQ && !NTxProxy::TResultStatus::IsSoftErrorWithoutSideEffects(errStatus)) { // it's possible that DLQ was removed, hence it'd be wise to refresh corresponding info @@ -710,163 +710,163 @@ void TQueueLeader::OnFifoMessagesRead(const TString& requestId, const TSqsEvents } else { reqInfo.Answer->Failed = true; } - } - - Reply(reqInfo); -} - + } + + Reply(reqInfo); +} + void TQueueLeader::GetMessagesFromInfly(TReceiveMessageBatchRequestProcessing& reqInfo) { - reqInfo.LockSendTs = TActivationContext::Now(); - Y_VERIFY(reqInfo.GetCurrentShard() < Shards_.size()); - const ui64 shard = reqInfo.GetCurrentShard(); - auto& shardInfo = Shards_[shard]; - reqInfo.ReceiveCandidates = shardInfo.Infly->Receive(reqInfo.Event->Get()->MaxMessagesCount, reqInfo.LockSendTs); - if (reqInfo.ReceiveCandidates) { - LoadStdMessages(reqInfo); - } else { - RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Received empty result from shard " << shard << " infly. Infly capacity: " << shardInfo.Infly->GetCapacity() - << ". Messages count: " << shardInfo.MessagesCount); - if (shardInfo.Infly->GetCapacity() >= INFLY_LIMIT / ShardsCount_) { - reqInfo.Answer->OverLimit = true; - Reply(reqInfo); - } else { - WaitAddMessagesToInflyOrTryAnotherShard(reqInfo); - } - } -} - + reqInfo.LockSendTs = TActivationContext::Now(); + Y_VERIFY(reqInfo.GetCurrentShard() < Shards_.size()); + const ui64 shard = reqInfo.GetCurrentShard(); + auto& shardInfo = Shards_[shard]; + reqInfo.ReceiveCandidates = shardInfo.Infly->Receive(reqInfo.Event->Get()->MaxMessagesCount, reqInfo.LockSendTs); + if (reqInfo.ReceiveCandidates) { + LoadStdMessages(reqInfo); + } else { + RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Received empty result from shard " << shard << " infly. Infly capacity: " << shardInfo.Infly->GetCapacity() + << ". Messages count: " << shardInfo.MessagesCount); + if (shardInfo.Infly->GetCapacity() >= INFLY_LIMIT / ShardsCount_) { + reqInfo.Answer->OverLimit = true; + Reply(reqInfo); + } else { + WaitAddMessagesToInflyOrTryAnotherShard(reqInfo); + } + } +} + void TQueueLeader::LoadStdMessages(TReceiveMessageBatchRequestProcessing& reqInfo) { - const ui64 shard = reqInfo.GetCurrentShard(); - auto& shardInfo = Shards_[shard]; - RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Reading messages. Shard: " << shard); - shardInfo.LoadBatchingState.AddRequest(reqInfo); - shardInfo.LoadBatchingState.TryExecute(this); - for (auto i = reqInfo.ReceiveCandidates.Begin(), end = reqInfo.ReceiveCandidates.End(); i != end; ++i) { - ++reqInfo.LoadAnswersLeft; // these iterators doesn't support difference_type for std::distance - } -} - + const ui64 shard = reqInfo.GetCurrentShard(); + auto& shardInfo = Shards_[shard]; + RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Reading messages. Shard: " << shard); + shardInfo.LoadBatchingState.AddRequest(reqInfo); + shardInfo.LoadBatchingState.TryExecute(this); + for (auto i = reqInfo.ReceiveCandidates.Begin(), end = reqInfo.ReceiveCandidates.End(); i != end; ++i) { + ++reqInfo.LoadAnswersLeft; // these iterators doesn't support difference_type for std::distance + } +} + void TQueueLeader::OnLoadStdMessageResult(const TString& requestId, const ui64 offset, const TSqsEvents::TEvExecuted::TRecord& ev, const NKikimr::NClient::TValue* messageRecord, const bool ignoreMessageLoadingErrors) { - auto reqInfoIt = ReceiveMessageRequests_.find(requestId); - Y_VERIFY(reqInfoIt != ReceiveMessageRequests_.end()); - auto& reqInfo = reqInfoIt->second; - - --reqInfo.LoadAnswersLeft; - if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - bool deleted = true; - bool deadlineChanged = true; - const bool exists = (*messageRecord)["Exists"]; + auto reqInfoIt = ReceiveMessageRequests_.find(requestId); + Y_VERIFY(reqInfoIt != ReceiveMessageRequests_.end()); + auto& reqInfo = reqInfoIt->second; + + --reqInfo.LoadAnswersLeft; + if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + bool deleted = true; + bool deadlineChanged = true; + const bool exists = (*messageRecord)["Exists"]; const auto wasDeadLetterValue = (*messageRecord)["IsDeadLetter"]; const bool wasDeadLetter = wasDeadLetterValue.HaveValue() ? bool(wasDeadLetterValue) : false; - const bool valid = (*messageRecord)["Valid"]; + const bool valid = (*messageRecord)["Valid"]; if (exists && !wasDeadLetter) { - const ui64 visibilityDeadlineMs = (*messageRecord)["VisibilityDeadline"]; + const ui64 visibilityDeadlineMs = (*messageRecord)["VisibilityDeadline"]; const ui32 receiveCount = (*messageRecord)["ReceiveCount"]; - const TInstant visibilityDeadline = TInstant::MilliSeconds(visibilityDeadlineMs); + const TInstant visibilityDeadline = TInstant::MilliSeconds(visibilityDeadlineMs); // Update actual visibility deadline and receive count even if this message won't be given to user in this request. - // It prevents such synchronization errors later. + // It prevents such synchronization errors later. reqInfo.ReceiveCandidates.SetVisibilityDeadlineAndReceiveCount(offset, visibilityDeadline, receiveCount); - - if (valid && reqInfo.ReceiveCandidates.Has(offset)) { // there may be concurrent successful delete message request (purge) - reqInfo.Answer->Messages.emplace_back(); - auto& msgAnswer = reqInfo.Answer->Messages.back(); - - msgAnswer.ReceiptHandle.SetOffset(offset); - msgAnswer.ReceiptHandle.SetLockTimestamp(ui64((*messageRecord)["LockTimestamp"])); - msgAnswer.ReceiptHandle.SetShard(reqInfo.GetCurrentShard()); - - msgAnswer.FirstReceiveTimestamp = TInstant::MilliSeconds(ui64((*messageRecord)["FirstReceiveTimestamp"])); - msgAnswer.ReceiveCount = receiveCount; - msgAnswer.MessageId = (*messageRecord)["MessageId"]; - msgAnswer.Data = TString((*messageRecord)["Data"]); - msgAnswer.SentTimestamp = TInstant::MilliSeconds(ui64((*messageRecord)["SentTimestamp"])); - - const NKikimr::NClient::TValue senderIdValue = (*messageRecord)["SenderId"]; - if (senderIdValue.HaveValue()) { - if (const TString senderId = TString(senderIdValue)) { - msgAnswer.SenderId = std::move(senderId); - } - } - - const NKikimr::NClient::TValue attributesValue = (*messageRecord)["Attributes"]; - if (attributesValue.HaveValue()) { - msgAnswer.MessageAttributes = attributesValue; - } - } else { - deadlineChanged = true; + + if (valid && reqInfo.ReceiveCandidates.Has(offset)) { // there may be concurrent successful delete message request (purge) + reqInfo.Answer->Messages.emplace_back(); + auto& msgAnswer = reqInfo.Answer->Messages.back(); + + msgAnswer.ReceiptHandle.SetOffset(offset); + msgAnswer.ReceiptHandle.SetLockTimestamp(ui64((*messageRecord)["LockTimestamp"])); + msgAnswer.ReceiptHandle.SetShard(reqInfo.GetCurrentShard()); + + msgAnswer.FirstReceiveTimestamp = TInstant::MilliSeconds(ui64((*messageRecord)["FirstReceiveTimestamp"])); + msgAnswer.ReceiveCount = receiveCount; + msgAnswer.MessageId = (*messageRecord)["MessageId"]; + msgAnswer.Data = TString((*messageRecord)["Data"]); + msgAnswer.SentTimestamp = TInstant::MilliSeconds(ui64((*messageRecord)["SentTimestamp"])); + + const NKikimr::NClient::TValue senderIdValue = (*messageRecord)["SenderId"]; + if (senderIdValue.HaveValue()) { + if (const TString senderId = TString(senderIdValue)) { + msgAnswer.SenderId = std::move(senderId); + } + } + + const NKikimr::NClient::TValue attributesValue = (*messageRecord)["Attributes"]; + if (attributesValue.HaveValue()) { + msgAnswer.MessageAttributes = attributesValue; + } + } else { + deadlineChanged = true; RLOG_SQS_REQ_WARN(requestId, "Attempted to receive message that was received by another leader's request. Shard: " << reqInfo.GetCurrentShard() - << ". Offset: " << offset << ". Visibility deadline: " << visibilityDeadline); - } - } else { - if (exists) { // dlq - deadlineChanged = !valid; - } - if (reqInfo.ReceiveCandidates.Delete(offset)) { - if (wasDeadLetter) { - deleted = false; // Success, not invalidated - } else { - RLOG_SQS_REQ_WARN(requestId, "Attempted to receive message that was deleted. Shard: " << reqInfo.GetCurrentShard() << ". Offset: " << offset); + << ". Offset: " << offset << ". Visibility deadline: " << visibilityDeadline); + } + } else { + if (exists) { // dlq + deadlineChanged = !valid; + } + if (reqInfo.ReceiveCandidates.Delete(offset)) { + if (wasDeadLetter) { + deleted = false; // Success, not invalidated + } else { + RLOG_SQS_REQ_WARN(requestId, "Attempted to receive message that was deleted. Shard: " << reqInfo.GetCurrentShard() << ". Offset: " << offset); deleted = true; } } // else there was concurrent delete (purge) by this leader, => OK - } - const bool invalidated = deleted || deadlineChanged; - if (invalidated) { - auto* detailedCounters = Counters_->GetDetailedCounters(); - INC_COUNTER(detailedCounters, ReceiveMessage_KeysInvalidated); - const TString& reason = deleted ? INFLY_INVALIDATION_REASON_DELETED : INFLY_INVALIDATION_REASON_DEADLINE_CHANGED; - MarkInflyReloading(reqInfo.GetCurrentShard(), 1, reason); - } - } else { + } + const bool invalidated = deleted || deadlineChanged; + if (invalidated) { + auto* detailedCounters = Counters_->GetDetailedCounters(); + INC_COUNTER(detailedCounters, ReceiveMessage_KeysInvalidated); + const TString& reason = deleted ? INFLY_INVALIDATION_REASON_DELETED : INFLY_INVALIDATION_REASON_DEADLINE_CHANGED; + MarkInflyReloading(reqInfo.GetCurrentShard(), 1, reason); + } + } else { reqInfo.LoadError = !ignoreMessageLoadingErrors; // there may be other successful loads - } - - if (reqInfo.LoadAnswersLeft == 0) { - if (reqInfo.Answer->Messages.empty() && reqInfo.LoadError) { - reqInfo.Answer->Failed = true; - } - Reply(reqInfo); - } -} - + } + + if (reqInfo.LoadAnswersLeft == 0) { + if (reqInfo.Answer->Messages.empty() && reqInfo.LoadError) { + reqInfo.Answer->Failed = true; + } + Reply(reqInfo); + } +} + void TQueueLeader::OnLoadStdMessagesBatchExecuted(ui64 shard, ui64 batchId, const bool usedDLQ, const TSqsEvents::TEvExecuted::TRecord& reply) { - auto& shardInfo = Shards_[shard]; - auto& batchingState = shardInfo.LoadBatchingState; - auto batchIt = batchingState.BatchesExecuting.find(batchId); - Y_VERIFY(batchIt != batchingState.BatchesExecuting.end()); - auto batch = batchIt->second; - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); + auto& shardInfo = Shards_[shard]; + auto& batchingState = shardInfo.LoadBatchingState; + auto batchIt = batchingState.BatchesExecuting.find(batchId); + Y_VERIFY(batchIt != batchingState.BatchesExecuting.end()); + auto batch = batchIt->second; + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); bool ignoreMessageLoadingErrors = false; - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue value(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue list(value["result"]); - Y_VERIFY(list.Size() == batch->Size()); - + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue value(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue list(value["result"]); + Y_VERIFY(list.Size() == batch->Size()); + if (const ui64 movedMessagesCount = value["movedMessagesCount"]) { ADD_COUNTER(Counters_, MessagesMovedToDLQ, movedMessagesCount); - - const i64 newMessagesCount = value["newMessagesCount"]; - Y_VERIFY(newMessagesCount >= 0); - shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); - } - - THashMap<ui64, const TLoadBatchEntry*> offset2entry; - offset2entry.reserve(batch->Entries.size()); - for (const TLoadBatchEntry& entry : batch->Entries) { - offset2entry.emplace(entry.Offset, &entry); - } - - for (size_t i = 0; i < list.Size(); ++i) { - auto msg = list[i]; - const ui64 offset = msg["Offset"]; - const auto entry = offset2entry.find(offset); - Y_VERIFY(entry != offset2entry.end()); - OnLoadStdMessageResult(entry->second->RequestId, offset, reply, &msg, ignoreMessageLoadingErrors); - } - } else { + + const i64 newMessagesCount = value["newMessagesCount"]; + Y_VERIFY(newMessagesCount >= 0); + shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); + } + + THashMap<ui64, const TLoadBatchEntry*> offset2entry; + offset2entry.reserve(batch->Entries.size()); + for (const TLoadBatchEntry& entry : batch->Entries) { + offset2entry.emplace(entry.Offset, &entry); + } + + for (size_t i = 0; i < list.Size(); ++i) { + auto msg = list[i]; + const ui64 offset = msg["Offset"]; + const auto entry = offset2entry.find(offset); + Y_VERIFY(entry != offset2entry.end()); + OnLoadStdMessageResult(entry->second->RequestId, offset, reply, &msg, ignoreMessageLoadingErrors); + } + } else { const auto errStatus = NKikimr::NTxProxy::TResultStatus::EStatus(reply.GetStatus()); if (usedDLQ && !NTxProxy::TResultStatus::IsSoftErrorWithoutSideEffects(errStatus)) { // it's possible that DLQ was removed, hence it'd be wise to refresh corresponding info @@ -874,524 +874,524 @@ void TQueueLeader::OnLoadStdMessagesBatchExecuted(ui64 shard, ui64 batchId, cons ignoreMessageLoadingErrors = true; } - const TString* prevRequestId = nullptr; - for (size_t i = 0; i < batch->Size(); ++i) { - const TLoadBatchEntry& entry = batch->Entries[i]; - if (!prevRequestId || *prevRequestId != entry.RequestId) { - prevRequestId = &entry.RequestId; - RLOG_SQS_REQ_ERROR(entry.RequestId, "Batch transaction failed: " << reply << ". BatchId: " << batch->BatchId); - } - OnLoadStdMessageResult(entry.RequestId, entry.Offset, reply, nullptr, ignoreMessageLoadingErrors); - } - } - batchingState.BatchesExecuting.erase(batchId); - batchingState.TryExecute(this); -} - + const TString* prevRequestId = nullptr; + for (size_t i = 0; i < batch->Size(); ++i) { + const TLoadBatchEntry& entry = batch->Entries[i]; + if (!prevRequestId || *prevRequestId != entry.RequestId) { + prevRequestId = &entry.RequestId; + RLOG_SQS_REQ_ERROR(entry.RequestId, "Batch transaction failed: " << reply << ". BatchId: " << batch->BatchId); + } + OnLoadStdMessageResult(entry.RequestId, entry.Offset, reply, nullptr, ignoreMessageLoadingErrors); + } + } + batchingState.BatchesExecuting.erase(batchId); + batchingState.TryExecute(this); +} + void TQueueLeader::TryReceiveAnotherShard(TReceiveMessageBatchRequestProcessing& reqInfo) { - const TString& requestId = reqInfo.Event->Get()->RequestId; - const TInstant waitDeadline = reqInfo.Event->Get()->WaitDeadline; - const TInstant now = TActivationContext::Now(); - if (!Cfg().GetCheckAllShardsInReceiveMessage() && now >= waitDeadline) { - if (waitDeadline) { - RLOG_SQS_REQ_DEBUG(requestId, "Wait time expired. Overworked " << (now - waitDeadline).MilliSeconds() << "ms"); - } - } else if (reqInfo.CurrentShardIndex + 1 < reqInfo.Shards.size()) { - DecActiveMessageRequests(reqInfo.GetCurrentShard()); - ++reqInfo.CurrentShardIndex; - RLOG_SQS_REQ_DEBUG(requestId, "Trying another shard: " << reqInfo.GetCurrentShard()); - reqInfo.LockCount = 0; - reqInfo.TriedAddMessagesToInfly = false; - reqInfo.Answer->Retried = true; - ProcessReceiveMessageBatch(reqInfo); - return; - } - Reply(reqInfo); -} - + const TString& requestId = reqInfo.Event->Get()->RequestId; + const TInstant waitDeadline = reqInfo.Event->Get()->WaitDeadline; + const TInstant now = TActivationContext::Now(); + if (!Cfg().GetCheckAllShardsInReceiveMessage() && now >= waitDeadline) { + if (waitDeadline) { + RLOG_SQS_REQ_DEBUG(requestId, "Wait time expired. Overworked " << (now - waitDeadline).MilliSeconds() << "ms"); + } + } else if (reqInfo.CurrentShardIndex + 1 < reqInfo.Shards.size()) { + DecActiveMessageRequests(reqInfo.GetCurrentShard()); + ++reqInfo.CurrentShardIndex; + RLOG_SQS_REQ_DEBUG(requestId, "Trying another shard: " << reqInfo.GetCurrentShard()); + reqInfo.LockCount = 0; + reqInfo.TriedAddMessagesToInfly = false; + reqInfo.Answer->Retried = true; + ProcessReceiveMessageBatch(reqInfo); + return; + } + Reply(reqInfo); +} + void TQueueLeader::WaitAddMessagesToInflyOrTryAnotherShard(TReceiveMessageBatchRequestProcessing& reqInfo) { - const ui64 shard = reqInfo.GetCurrentShard(); - auto& shardInfo = Shards_[shard]; - const TString& requestId = reqInfo.Event->Get()->RequestId; - const TInstant waitDeadline = reqInfo.Event->Get()->WaitDeadline; - const TInstant now = TActivationContext::Now(); - if (!Cfg().GetCheckAllShardsInReceiveMessage() && waitDeadline != TInstant::Zero() && now >= waitDeadline) { - RLOG_SQS_REQ_DEBUG(requestId, "Wait time expired. Overworked " << (now - waitDeadline).MilliSeconds() << "ms"); - Reply(reqInfo); - } else { - if (!IsDlqQueue_ && !shardInfo.HasMessagesToAddToInfly() && !shardInfo.NeedAddMessagesToInflyCheckInDatabase()) { - RLOG_SQS_REQ_DEBUG(requestId, "No known messages in this shard. Skip attempt to add messages to infly"); - ++shardInfo.AddMessagesToInflyCheckAttempts; - reqInfo.TriedAddMessagesToInfly = true; - } - - if (reqInfo.TriedAddMessagesToInfly) { - RLOG_SQS_REQ_DEBUG(requestId, "Already tried to add messages to infly"); - TryReceiveAnotherShard(reqInfo); - return; - } - - reqInfo.TriedAddMessagesToInfly = true; - reqInfo.WaitingAddMessagesToInfly = true; - DecActiveMessageRequests(reqInfo.GetCurrentShard()); - RLOG_SQS_REQ_DEBUG(requestId, "Waiting for adding messages to infly. AddingMessagesToInfly: " << shardInfo.AddingMessagesToInfly << ". NeedInflyReload: " << shardInfo.NeedInflyReload); - if (shardInfo.AddingMessagesToInfly) { - return; - } - if (shardInfo.NeedInflyReload) { - shardInfo.NeedAddingMessagesToInfly = true; - StartLoadingInfly(shard); - } else { - AddMessagesToInfly(shard); - } - } -} - + const ui64 shard = reqInfo.GetCurrentShard(); + auto& shardInfo = Shards_[shard]; + const TString& requestId = reqInfo.Event->Get()->RequestId; + const TInstant waitDeadline = reqInfo.Event->Get()->WaitDeadline; + const TInstant now = TActivationContext::Now(); + if (!Cfg().GetCheckAllShardsInReceiveMessage() && waitDeadline != TInstant::Zero() && now >= waitDeadline) { + RLOG_SQS_REQ_DEBUG(requestId, "Wait time expired. Overworked " << (now - waitDeadline).MilliSeconds() << "ms"); + Reply(reqInfo); + } else { + if (!IsDlqQueue_ && !shardInfo.HasMessagesToAddToInfly() && !shardInfo.NeedAddMessagesToInflyCheckInDatabase()) { + RLOG_SQS_REQ_DEBUG(requestId, "No known messages in this shard. Skip attempt to add messages to infly"); + ++shardInfo.AddMessagesToInflyCheckAttempts; + reqInfo.TriedAddMessagesToInfly = true; + } + + if (reqInfo.TriedAddMessagesToInfly) { + RLOG_SQS_REQ_DEBUG(requestId, "Already tried to add messages to infly"); + TryReceiveAnotherShard(reqInfo); + return; + } + + reqInfo.TriedAddMessagesToInfly = true; + reqInfo.WaitingAddMessagesToInfly = true; + DecActiveMessageRequests(reqInfo.GetCurrentShard()); + RLOG_SQS_REQ_DEBUG(requestId, "Waiting for adding messages to infly. AddingMessagesToInfly: " << shardInfo.AddingMessagesToInfly << ". NeedInflyReload: " << shardInfo.NeedInflyReload); + if (shardInfo.AddingMessagesToInfly) { + return; + } + if (shardInfo.NeedInflyReload) { + shardInfo.NeedAddingMessagesToInfly = true; + StartLoadingInfly(shard); + } else { + AddMessagesToInfly(shard); + } + } +} + void TQueueLeader::Reply(TReceiveMessageBatchRequestProcessing& reqInfo) { - const ui64 shard = reqInfo.GetCurrentShard(); - Send(reqInfo.Event->Sender, std::move(reqInfo.Answer)); - ReceiveMessageRequests_.erase(reqInfo.Event->Get()->RequestId); - DecActiveMessageRequests(shard); -} - + const ui64 shard = reqInfo.GetCurrentShard(); + Send(reqInfo.Event->Sender, std::move(reqInfo.Answer)); + ReceiveMessageRequests_.erase(reqInfo.Event->Get()->RequestId); + DecActiveMessageRequests(shard); +} + void TQueueLeader::HandleDeleteMessageBatchWhileIniting(TSqsEvents::TEvDeleteMessageBatch::TPtr& ev) { - auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); - Y_VERIFY(DeleteMessageRequests_.emplace(std::move(key), std::move(ev)).second); -} - + auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); + Y_VERIFY(DeleteMessageRequests_.emplace(std::move(key), std::move(ev)).second); +} + void TQueueLeader::HandleDeleteMessageBatchWhileWorking(TSqsEvents::TEvDeleteMessageBatch::TPtr& ev) { - auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); - auto [reqIter, inserted] = DeleteMessageRequests_.emplace(std::move(key), std::move(ev)); - Y_VERIFY(inserted); - ProcessDeleteMessageBatch(reqIter->second); -} - + auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); + auto [reqIter, inserted] = DeleteMessageRequests_.emplace(std::move(key), std::move(ev)); + Y_VERIFY(inserted); + ProcessDeleteMessageBatch(reqIter->second); +} + void TQueueLeader::ProcessDeleteMessageBatch(TDeleteMessageBatchRequestProcessing& reqInfo) { - auto& req = reqInfo.Event; - if (!IncActiveMessageRequests(req->Get()->Shard, req->Get()->RequestId)) { - return; - } - - if (!IsFifoQueue_) { - for (const auto& messageReq : req->Get()->Messages) { - THolder<TInflyMessage> inflyMessage = Shards_[req->Get()->Shard].Infly->Delete(messageReq.Offset); - if (inflyMessage) { - reqInfo.InflyMessages.emplace_back(std::move(inflyMessage)); - } else { - reqInfo.InflyMessages.emplace_back(); // nullptr - RLOG_SQS_REQ_WARN(req->Get()->RequestId, "Message with offset " << messageReq.Offset << " was not found in infly"); - } - } - } - - auto& shardInfo = Shards_[reqInfo.Event->Get()->Shard]; - shardInfo.DeleteBatchingState.AddRequest(reqInfo); - shardInfo.DeleteBatchingState.TryExecute(this); -} - + auto& req = reqInfo.Event; + if (!IncActiveMessageRequests(req->Get()->Shard, req->Get()->RequestId)) { + return; + } + + if (!IsFifoQueue_) { + for (const auto& messageReq : req->Get()->Messages) { + THolder<TInflyMessage> inflyMessage = Shards_[req->Get()->Shard].Infly->Delete(messageReq.Offset); + if (inflyMessage) { + reqInfo.InflyMessages.emplace_back(std::move(inflyMessage)); + } else { + reqInfo.InflyMessages.emplace_back(); // nullptr + RLOG_SQS_REQ_WARN(req->Get()->RequestId, "Message with offset " << messageReq.Offset << " was not found in infly"); + } + } + } + + auto& shardInfo = Shards_[reqInfo.Event->Get()->Shard]; + shardInfo.DeleteBatchingState.AddRequest(reqInfo); + shardInfo.DeleteBatchingState.TryExecute(this); +} + void TQueueLeader::OnMessageDeleted(const TString& requestId, ui64 shard, size_t index, const TSqsEvents::TEvExecuted::TRecord& reply, const NKikimr::NClient::TValue* messageRecord) { - auto key = std::make_pair(requestId, shard); - auto reqIt = DeleteMessageRequests_.find(key); - Y_VERIFY(reqIt != DeleteMessageRequests_.end()); - auto& reqInfo = reqIt->second; - auto& req = reqInfo.Event; - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); - RLOG_SQS_REQ_TRACE(req->Get()->RequestId, "Received reply from DB: " << status); - ++reqInfo.AnswersGot; - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - reqInfo.Answer->Statuses[index].Status = messageRecord ? - TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::OK - : TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::NotFound; - - if (!IsFifoQueue_ && !reqInfo.InflyMessages[index]) { // concurrent receives & change visibilities - const ui64 offset = reqInfo.Event->Get()->Messages[index].Offset; - if (!Shards_[shard].Infly->Delete(offset)) { - bool deleted = false; - // search in receive requests - for (auto& [receiveRequestId, receiveRequestInfo] : ReceiveMessageRequests_) { - if (receiveRequestInfo.CurrentShardIndex < receiveRequestInfo.Shards.size() - && receiveRequestInfo.Shards[receiveRequestInfo.CurrentShardIndex] == shard - && receiveRequestInfo.ReceiveCandidates - && receiveRequestInfo.ReceiveCandidates.Delete(offset)) { - deleted = true; - break; - } - } - // search in change visibility requests - if (!deleted) { - for (auto& [changeVisibilityRequestIdAndShard, changeVisibilityRequestInfo] : ChangeMessageVisibilityRequests_) { - if (changeVisibilityRequestIdAndShard.second == shard - && changeVisibilityRequestInfo.Candidates - && changeVisibilityRequestInfo.Candidates.Delete(offset)) { - deleted = true; - break; - } - } - } - } - } - } else { - reqInfo.Answer->Statuses[index].Status = TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::Failed; - - // return back to infly - if (!IsFifoQueue_ && reqInfo.InflyMessages[index]) { - Shards_[req->Get()->Shard].Infly->Add(std::move(reqInfo.InflyMessages[index])); - } - } - - if (reqInfo.AnswersGot == req->Get()->Messages.size()) { - Send(req->Sender, reqInfo.Answer.Release()); - DeleteMessageRequests_.erase(key); - DecActiveMessageRequests(shard); - } -} - + auto key = std::make_pair(requestId, shard); + auto reqIt = DeleteMessageRequests_.find(key); + Y_VERIFY(reqIt != DeleteMessageRequests_.end()); + auto& reqInfo = reqIt->second; + auto& req = reqInfo.Event; + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); + RLOG_SQS_REQ_TRACE(req->Get()->RequestId, "Received reply from DB: " << status); + ++reqInfo.AnswersGot; + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + reqInfo.Answer->Statuses[index].Status = messageRecord ? + TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::OK + : TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::NotFound; + + if (!IsFifoQueue_ && !reqInfo.InflyMessages[index]) { // concurrent receives & change visibilities + const ui64 offset = reqInfo.Event->Get()->Messages[index].Offset; + if (!Shards_[shard].Infly->Delete(offset)) { + bool deleted = false; + // search in receive requests + for (auto& [receiveRequestId, receiveRequestInfo] : ReceiveMessageRequests_) { + if (receiveRequestInfo.CurrentShardIndex < receiveRequestInfo.Shards.size() + && receiveRequestInfo.Shards[receiveRequestInfo.CurrentShardIndex] == shard + && receiveRequestInfo.ReceiveCandidates + && receiveRequestInfo.ReceiveCandidates.Delete(offset)) { + deleted = true; + break; + } + } + // search in change visibility requests + if (!deleted) { + for (auto& [changeVisibilityRequestIdAndShard, changeVisibilityRequestInfo] : ChangeMessageVisibilityRequests_) { + if (changeVisibilityRequestIdAndShard.second == shard + && changeVisibilityRequestInfo.Candidates + && changeVisibilityRequestInfo.Candidates.Delete(offset)) { + deleted = true; + break; + } + } + } + } + } + } else { + reqInfo.Answer->Statuses[index].Status = TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::Failed; + + // return back to infly + if (!IsFifoQueue_ && reqInfo.InflyMessages[index]) { + Shards_[req->Get()->Shard].Infly->Add(std::move(reqInfo.InflyMessages[index])); + } + } + + if (reqInfo.AnswersGot == req->Get()->Messages.size()) { + Send(req->Sender, reqInfo.Answer.Release()); + DeleteMessageRequests_.erase(key); + DecActiveMessageRequests(shard); + } +} + void TQueueLeader::OnDeleteBatchExecuted(ui64 shard, ui64 batchId, const TSqsEvents::TEvExecuted::TRecord& reply) { - auto& shardInfo = Shards_[shard]; - auto& batchingState = shardInfo.DeleteBatchingState; - auto batchIt = batchingState.BatchesExecuting.find(batchId); - Y_VERIFY(batchIt != batchingState.BatchesExecuting.end()); - auto batch = batchIt->second; - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue list(val["deleted"]); - for (size_t i = 0; i < list.Size(); ++i) { - auto messageResult = list[i]; - const ui64 offset = messageResult["Offset"]; - const auto [first, last] = batch->Offset2Entry.equal_range(offset); - Y_VERIFY(first != last); - for (auto el = first; el != last; ++el) { - const TDeleteBatchEntry& entry = batch->Entries[el->second]; - OnMessageDeleted(entry.RequestId, shard, entry.IndexInRequest, reply, &messageResult); - } - batch->Offset2Entry.erase(first, last); - } - // others are already deleted messages: - for (const auto& [offset, entryIndex] : batch->Offset2Entry) { - const TDeleteBatchEntry& entry = batch->Entries[entryIndex]; - OnMessageDeleted(entry.RequestId, shard, entry.IndexInRequest, reply, nullptr); - } - - if (!IsFifoQueue_) { - const i64 newMessagesCount = val["newMessagesCount"]; - Y_VERIFY(newMessagesCount >= 0); - shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); - } - } else { - const TString* prevRequestId = nullptr; - for (size_t i = 0; i < batch->Size(); ++i) { - const TDeleteBatchEntry& entry = batch->Entries[i]; - if (!prevRequestId || *prevRequestId != entry.RequestId) { - prevRequestId = &entry.RequestId; - RLOG_SQS_REQ_ERROR(entry.RequestId, "Batch transaction failed: " << reply << ". BatchId: " << batch->BatchId); - } - OnMessageDeleted(entry.RequestId, shard, entry.IndexInRequest, reply, nullptr); - } - } - batchingState.BatchesExecuting.erase(batchId); - batchingState.TryExecute(this); -} - + auto& shardInfo = Shards_[shard]; + auto& batchingState = shardInfo.DeleteBatchingState; + auto batchIt = batchingState.BatchesExecuting.find(batchId); + Y_VERIFY(batchIt != batchingState.BatchesExecuting.end()); + auto batch = batchIt->second; + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue list(val["deleted"]); + for (size_t i = 0; i < list.Size(); ++i) { + auto messageResult = list[i]; + const ui64 offset = messageResult["Offset"]; + const auto [first, last] = batch->Offset2Entry.equal_range(offset); + Y_VERIFY(first != last); + for (auto el = first; el != last; ++el) { + const TDeleteBatchEntry& entry = batch->Entries[el->second]; + OnMessageDeleted(entry.RequestId, shard, entry.IndexInRequest, reply, &messageResult); + } + batch->Offset2Entry.erase(first, last); + } + // others are already deleted messages: + for (const auto& [offset, entryIndex] : batch->Offset2Entry) { + const TDeleteBatchEntry& entry = batch->Entries[entryIndex]; + OnMessageDeleted(entry.RequestId, shard, entry.IndexInRequest, reply, nullptr); + } + + if (!IsFifoQueue_) { + const i64 newMessagesCount = val["newMessagesCount"]; + Y_VERIFY(newMessagesCount >= 0); + shardInfo.MessagesCount = static_cast<ui64>(newMessagesCount); + } + } else { + const TString* prevRequestId = nullptr; + for (size_t i = 0; i < batch->Size(); ++i) { + const TDeleteBatchEntry& entry = batch->Entries[i]; + if (!prevRequestId || *prevRequestId != entry.RequestId) { + prevRequestId = &entry.RequestId; + RLOG_SQS_REQ_ERROR(entry.RequestId, "Batch transaction failed: " << reply << ". BatchId: " << batch->BatchId); + } + OnMessageDeleted(entry.RequestId, shard, entry.IndexInRequest, reply, nullptr); + } + } + batchingState.BatchesExecuting.erase(batchId); + batchingState.TryExecute(this); +} + void TQueueLeader::HandleChangeMessageVisibilityBatchWhileIniting(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr& ev) { - auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); - Y_VERIFY(ChangeMessageVisibilityRequests_.emplace(std::move(key), std::move(ev)).second); -} - + auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); + Y_VERIFY(ChangeMessageVisibilityRequests_.emplace(std::move(key), std::move(ev)).second); +} + void TQueueLeader::HandleChangeMessageVisibilityBatchWhileWorking(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr& ev) { - auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); - auto [reqIter, inserted] = ChangeMessageVisibilityRequests_.emplace(std::move(key), std::move(ev)); - Y_VERIFY(inserted); - ProcessChangeMessageVisibilityBatch(reqIter->second); -} - + auto key = std::make_pair(ev->Get()->RequestId, ev->Get()->Shard); + auto [reqIter, inserted] = ChangeMessageVisibilityRequests_.emplace(std::move(key), std::move(ev)); + Y_VERIFY(inserted); + ProcessChangeMessageVisibilityBatch(reqIter->second); +} + void TQueueLeader::ProcessChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequestProcessing& reqInfo) { - auto& req = *reqInfo.Event->Get(); - if (!IncActiveMessageRequests(req.Shard, req.RequestId)) { - return; - } - TExecutorBuilder builder(SelfId(), req.RequestId); - builder - .User(UserName_) - .Queue(QueueName_) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) - .Shard(req.Shard) + auto& req = *reqInfo.Event->Get(); + if (!IncActiveMessageRequests(req.Shard, req.RequestId)) { + return; + } + TExecutorBuilder builder(SelfId(), req.RequestId); + builder + .User(UserName_) + .Queue(QueueName_) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) + .Shard(req.Shard) .QueueLeader(SelfId()) - .QueryId(CHANGE_VISIBILITY_ID) - .Counters(Counters_) - .RetryOnTimeout() - .OnExecuted([this, requestId = req.RequestId, shard = req.Shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnVisibilityChanged(requestId, shard, ev); }); - - builder.Params() - .Uint64("NOW", req.NowTimestamp.MilliSeconds()) - .Uint64("GROUPS_READ_ATTEMPT_IDS_PERIOD", Cfg().GetGroupsReadAttemptIdsPeriodMs()); - NClient::TWriteValue params = builder.ParamsValue(); - if (!IsFifoQueue_) { - reqInfo.Candidates = TInflyMessages::TChangeVisibilityCandidates(Shards_[req.Shard].Infly); - } - for (const auto& messageReq : req.Messages) { - if (!IsFifoQueue_) { - if (!reqInfo.Candidates.Add(messageReq.Offset)) { - RLOG_SQS_REQ_WARN(req.RequestId, "Message with offset " << messageReq.Offset << " was not found in infly"); - } - } - auto key = params["KEYS"].AddListItem(); - - if (IsFifoQueue_) { - key["GroupId"].Bytes(messageReq.MessageGroupId); - key["ReceiveAttemptId"] = messageReq.ReceiveAttemptId; - } - key["LockTimestamp"] = ui64(messageReq.LockTimestamp.MilliSeconds()); - key["Offset"] = ui64(messageReq.Offset); - key["NewVisibilityDeadline"] = ui64(messageReq.VisibilityDeadline.MilliSeconds()); - } - - builder.Start(); -} - + .QueryId(CHANGE_VISIBILITY_ID) + .Counters(Counters_) + .RetryOnTimeout() + .OnExecuted([this, requestId = req.RequestId, shard = req.Shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnVisibilityChanged(requestId, shard, ev); }); + + builder.Params() + .Uint64("NOW", req.NowTimestamp.MilliSeconds()) + .Uint64("GROUPS_READ_ATTEMPT_IDS_PERIOD", Cfg().GetGroupsReadAttemptIdsPeriodMs()); + NClient::TWriteValue params = builder.ParamsValue(); + if (!IsFifoQueue_) { + reqInfo.Candidates = TInflyMessages::TChangeVisibilityCandidates(Shards_[req.Shard].Infly); + } + for (const auto& messageReq : req.Messages) { + if (!IsFifoQueue_) { + if (!reqInfo.Candidates.Add(messageReq.Offset)) { + RLOG_SQS_REQ_WARN(req.RequestId, "Message with offset " << messageReq.Offset << " was not found in infly"); + } + } + auto key = params["KEYS"].AddListItem(); + + if (IsFifoQueue_) { + key["GroupId"].Bytes(messageReq.MessageGroupId); + key["ReceiveAttemptId"] = messageReq.ReceiveAttemptId; + } + key["LockTimestamp"] = ui64(messageReq.LockTimestamp.MilliSeconds()); + key["Offset"] = ui64(messageReq.Offset); + key["NewVisibilityDeadline"] = ui64(messageReq.VisibilityDeadline.MilliSeconds()); + } + + builder.Start(); +} + void TQueueLeader::OnVisibilityChanged(const TString& requestId, ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply) { - auto key = std::make_pair(requestId, shard); - auto reqIt = ChangeMessageVisibilityRequests_.find(key); - Y_VERIFY(reqIt != ChangeMessageVisibilityRequests_.end()); - auto& reqInfo = reqIt->second; - auto& req = *reqInfo.Event->Get(); - auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); - RLOG_SQS_REQ_TRACE(req.RequestId, "Received reply from DB: " << status); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue list(val["result"]); - for (size_t i = 0; i < list.Size(); ++i) { - const bool exists = list[i]["Exists"]; - if (exists) { - const bool changeCond = list[i]["ChangeCond"]; - if (changeCond) { - reqInfo.Answer->Statuses[i].Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::OK; - if (!IsFifoQueue_) { - const auto& messageReq = req.Messages[i]; - reqInfo.Candidates.SetVisibilityDeadline(messageReq.Offset, messageReq.VisibilityDeadline); - } - } else { - reqInfo.Answer->Statuses[i].Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotInFly; - // Correct visibility deadline - if (!IsFifoQueue_) { - const auto& messageReq = req.Messages[i]; - const ui64 currentVisibilityDeadline = list[i]["CurrentVisibilityDeadline"]; - reqInfo.Candidates.SetVisibilityDeadline(messageReq.Offset, TInstant::MilliSeconds(currentVisibilityDeadline)); - } - } - } else { - reqInfo.Answer->Statuses[i].Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotFound; - if (!IsFifoQueue_) { - reqInfo.Candidates.Delete(req.Messages[i].Offset); - } - } - } - } else { - for (auto& status : reqInfo.Answer->Statuses) { - status.Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::Failed; - } - - // If timeout, it's better to change infly so that if visibility deadline was changed. - // It won't break consistency (because everything is done through database), - // but the message may be processed as with new visibility timeout. - if (!IsFifoQueue_ && status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecTimeout) { - for (const auto& messageReq : req.Messages) { - reqInfo.Candidates.SetVisibilityDeadline(messageReq.Offset, messageReq.VisibilityDeadline); - } - } - } - - Send(reqInfo.Event->Sender, reqInfo.Answer.Release()); - ChangeMessageVisibilityRequests_.erase(key); - - DecActiveMessageRequests(shard); -} - + auto key = std::make_pair(requestId, shard); + auto reqIt = ChangeMessageVisibilityRequests_.find(key); + Y_VERIFY(reqIt != ChangeMessageVisibilityRequests_.end()); + auto& reqInfo = reqIt->second; + auto& req = *reqInfo.Event->Get(); + auto status = TEvTxUserProxy::TEvProposeTransactionStatus::EStatus(reply.GetStatus()); + RLOG_SQS_REQ_TRACE(req.RequestId, "Received reply from DB: " << status); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue list(val["result"]); + for (size_t i = 0; i < list.Size(); ++i) { + const bool exists = list[i]["Exists"]; + if (exists) { + const bool changeCond = list[i]["ChangeCond"]; + if (changeCond) { + reqInfo.Answer->Statuses[i].Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::OK; + if (!IsFifoQueue_) { + const auto& messageReq = req.Messages[i]; + reqInfo.Candidates.SetVisibilityDeadline(messageReq.Offset, messageReq.VisibilityDeadline); + } + } else { + reqInfo.Answer->Statuses[i].Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotInFly; + // Correct visibility deadline + if (!IsFifoQueue_) { + const auto& messageReq = req.Messages[i]; + const ui64 currentVisibilityDeadline = list[i]["CurrentVisibilityDeadline"]; + reqInfo.Candidates.SetVisibilityDeadline(messageReq.Offset, TInstant::MilliSeconds(currentVisibilityDeadline)); + } + } + } else { + reqInfo.Answer->Statuses[i].Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::NotFound; + if (!IsFifoQueue_) { + reqInfo.Candidates.Delete(req.Messages[i].Offset); + } + } + } + } else { + for (auto& status : reqInfo.Answer->Statuses) { + status.Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::Failed; + } + + // If timeout, it's better to change infly so that if visibility deadline was changed. + // It won't break consistency (because everything is done through database), + // but the message may be processed as with new visibility timeout. + if (!IsFifoQueue_ && status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecTimeout) { + for (const auto& messageReq : req.Messages) { + reqInfo.Candidates.SetVisibilityDeadline(messageReq.Offset, messageReq.VisibilityDeadline); + } + } + } + + Send(reqInfo.Event->Sender, reqInfo.Answer.Release()); + ChangeMessageVisibilityRequests_.erase(key); + + DecActiveMessageRequests(shard); +} + void TQueueLeader::AnswerGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& req) { - auto resp = MakeHolder<TSqsEvents::TEvConfiguration>(); - - resp->RootUrl = RootUrl_; + auto resp = MakeHolder<TSqsEvents::TEvConfiguration>(); + + resp->RootUrl = RootUrl_; resp->SqsCoreCounters = Counters_->RootCounters.SqsCounters; - resp->QueueCounters = Counters_; - resp->UserCounters = UserCounters_; - resp->Shards = ShardsCount_; - resp->UserExists = true; - resp->QueueExists = true; - resp->Fifo = IsFifoQueue_; - resp->SchemeCache = SchemeCache_; + resp->QueueCounters = Counters_; + resp->UserCounters = UserCounters_; + resp->Shards = ShardsCount_; + resp->UserExists = true; + resp->QueueExists = true; + resp->Fifo = IsFifoQueue_; + resp->SchemeCache = SchemeCache_; resp->QueueLeader = SelfId(); - resp->QuoterResources = QuoterResources_; - - if (req->Get()->NeedQueueAttributes) { - Y_VERIFY(QueueAttributes_); - resp->QueueAttributes = QueueAttributes_; - } - - Send(req->Sender, std::move(resp)); -} - + resp->QuoterResources = QuoterResources_; + + if (req->Get()->NeedQueueAttributes) { + Y_VERIFY(QueueAttributes_); + resp->QueueAttributes = QueueAttributes_; + } + + Send(req->Sender, std::move(resp)); +} + void TQueueLeader::AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev) { - auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); - answer->RootUrl = RootUrl_; + auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); + answer->RootUrl = RootUrl_; answer->SqsCoreCounters = Counters_->RootCounters.SqsCounters; - answer->QueueCounters = Counters_; - answer->UserCounters = UserCounters_; - answer->Fail = true; - answer->SchemeCache = SchemeCache_; - answer->QuoterResources = QuoterResources_; - Send(ev->Sender, answer.Release()); -} - + answer->QueueCounters = Counters_; + answer->UserCounters = UserCounters_; + answer->Fail = true; + answer->SchemeCache = SchemeCache_; + answer->QuoterResources = QuoterResources_; + Send(ev->Sender, answer.Release()); +} + void TQueueLeader::RequestConfiguration() { - TExecutorBuilder(SelfId(), "") - .User(UserName_) - .Queue(QueueName_) - .RetryOnTimeout() - .Text(Sprintf(GetQueueParamsQuery, Cfg().GetRoot().c_str())) - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueueConfiguration(ev); }) - .Counters(Counters_) - .Params() - .Utf8("NAME", QueueName_) - .Utf8("USER_NAME", UserName_) - .ParentBuilder().StartExecutorActor(); -} - + TExecutorBuilder(SelfId(), "") + .User(UserName_) + .Queue(QueueName_) + .RetryOnTimeout() + .Text(Sprintf(GetQueueParamsQuery, Cfg().GetRoot().c_str())) + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueueConfiguration(ev); }) + .Counters(Counters_) + .Params() + .Utf8("NAME", QueueName_) + .Utf8("USER_NAME", UserName_) + .ParentBuilder().StartExecutorActor(); +} + void TQueueLeader::OnQueueConfiguration(const TSqsEvents::TEvExecuted::TRecord& ev) { - if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - - if (bool(val["exists"])) { - const auto data(val["queue"]); - ShardsCount_ = data["Shards"]; - PartitionsCount_ = data["Partitions"]; - QueueId_ = data["QueueId"]; + if (ev.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + + if (bool(val["exists"])) { + const auto data(val["queue"]); + ShardsCount_ = data["Shards"]; + PartitionsCount_ = data["Partitions"]; + QueueId_ = data["QueueId"]; if (data["Version"].HaveValue()) { QueueVersion_ = ui64(data["Version"]); } IsFifoQueue_ = bool(data["FifoQueue"]); - Shards_.resize(ShardsCount_); - const auto& cfg = Cfg(); - if (IsFifoQueue_) { - for (size_t i = 0; i < ShardsCount_; ++i) { - auto& shard = Shards_[i]; - shard.InflyLoadState = TShardInfo::EInflyLoadState::Fifo; - shard.SendBatchingState.Init(cfg.GetFifoQueueSendBatchingPolicy(), i, true); - shard.DeleteBatchingState.Init(cfg.GetFifoQueueDeleteBatchingPolicy(), i, true); - } - } else { - for (size_t i = 0; i < ShardsCount_; ++i) { - auto& shard = Shards_[i]; - shard.SendBatchingState.Init(cfg.GetStdQueueSendBatchingPolicy(), i, false); - shard.DeleteBatchingState.Init(cfg.GetStdQueueDeleteBatchingPolicy(), i, false); - shard.LoadBatchingState.Init(cfg.GetStdQueueLoadBatchingPolicy(), i, false); - } - } - - std::vector<TSqsEvents::TEvGetConfiguration::TPtr> needAttributesRequests; - for (auto& req : GetConfigurationRequests_) { - if (req->Get()->NeedQueueAttributes) { - needAttributesRequests.emplace_back(std::move(req)); - continue; - } - - AnswerGetConfiguration(req); - } - GetConfigurationRequests_.swap(needAttributesRequests); - - if (!GetConfigurationRequests_.empty()) { - AskQueueAttributes(); - } - - if (!IsFifoQueue_) { - StartLoadingInfly(); - } - - InitQuoterResources(); - - BecomeWorking(); - } else { - INC_COUNTER(Counters_, QueueMasterStartProblems); + Shards_.resize(ShardsCount_); + const auto& cfg = Cfg(); + if (IsFifoQueue_) { + for (size_t i = 0; i < ShardsCount_; ++i) { + auto& shard = Shards_[i]; + shard.InflyLoadState = TShardInfo::EInflyLoadState::Fifo; + shard.SendBatchingState.Init(cfg.GetFifoQueueSendBatchingPolicy(), i, true); + shard.DeleteBatchingState.Init(cfg.GetFifoQueueDeleteBatchingPolicy(), i, true); + } + } else { + for (size_t i = 0; i < ShardsCount_; ++i) { + auto& shard = Shards_[i]; + shard.SendBatchingState.Init(cfg.GetStdQueueSendBatchingPolicy(), i, false); + shard.DeleteBatchingState.Init(cfg.GetStdQueueDeleteBatchingPolicy(), i, false); + shard.LoadBatchingState.Init(cfg.GetStdQueueLoadBatchingPolicy(), i, false); + } + } + + std::vector<TSqsEvents::TEvGetConfiguration::TPtr> needAttributesRequests; + for (auto& req : GetConfigurationRequests_) { + if (req->Get()->NeedQueueAttributes) { + needAttributesRequests.emplace_back(std::move(req)); + continue; + } + + AnswerGetConfiguration(req); + } + GetConfigurationRequests_.swap(needAttributesRequests); + + if (!GetConfigurationRequests_.empty()) { + AskQueueAttributes(); + } + + if (!IsFifoQueue_) { + StartLoadingInfly(); + } + + InitQuoterResources(); + + BecomeWorking(); + } else { + INC_COUNTER(Counters_, QueueMasterStartProblems); INC_COUNTER(Counters_, QueueLeaderStartProblems); - - for (auto& req : GetConfigurationRequests_) { - RLOG_SQS_REQ_DEBUG(req->Get()->RequestId, "Queue [" << req->Get()->QueueName << "] was not found in Queues table for user [" << req->Get()->UserName << "]"); - auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); - answer->UserExists = true; - answer->QueueExists = false; - answer->RootUrl = RootUrl_; + + for (auto& req : GetConfigurationRequests_) { + RLOG_SQS_REQ_DEBUG(req->Get()->RequestId, "Queue [" << req->Get()->QueueName << "] was not found in Queues table for user [" << req->Get()->UserName << "]"); + auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); + answer->UserExists = true; + answer->QueueExists = false; + answer->RootUrl = RootUrl_; answer->SqsCoreCounters = Counters_->RootCounters.SqsCounters; - answer->QueueCounters = Counters_; - answer->UserCounters = UserCounters_; - answer->Fail = false; - answer->SchemeCache = SchemeCache_; - answer->QuoterResources = QuoterResources_; - Send(req->Sender, answer.Release()); - } - GetConfigurationRequests_.clear(); - - ScheduleGetConfigurationRetry(); - } - } else { - INC_COUNTER(Counters_, QueueMasterStartProblems); + answer->QueueCounters = Counters_; + answer->UserCounters = UserCounters_; + answer->Fail = false; + answer->SchemeCache = SchemeCache_; + answer->QuoterResources = QuoterResources_; + Send(req->Sender, answer.Release()); + } + GetConfigurationRequests_.clear(); + + ScheduleGetConfigurationRetry(); + } + } else { + INC_COUNTER(Counters_, QueueMasterStartProblems); INC_COUNTER(Counters_, QueueLeaderStartProblems); - FailRequestsDuringStartProblems(); - ScheduleGetConfigurationRetry(); - } -} - + FailRequestsDuringStartProblems(); + ScheduleGetConfigurationRetry(); + } +} + void TQueueLeader::FailRequestsDuringStartProblems() { - for (auto& req : GetConfigurationRequests_) { - AnswerFailed(req); - } - GetConfigurationRequests_.clear(); -} - + for (auto& req : GetConfigurationRequests_) { + AnswerFailed(req); + } + GetConfigurationRequests_.clear(); +} + void TQueueLeader::ScheduleGetConfigurationRetry() { - Schedule(TDuration::MilliSeconds(100 + RandomNumber<ui32>(300)), new TEvWakeup(REQUEST_CONFIGURATION_TAG)); -} - + Schedule(TDuration::MilliSeconds(100 + RandomNumber<ui32>(300)), new TEvWakeup(REQUEST_CONFIGURATION_TAG)); +} + void TQueueLeader::AskQueueAttributes() { - const TString reqId = CreateGuidAsString(); - LOG_SQS_DEBUG("Executing queue " << TLogQueueName(UserName_, QueueName_) << " attributes cache request. Req id: " << reqId); - TExecutorBuilder(SelfId(), reqId) - .User(UserName_) - .Queue(QueueName_) + const TString reqId = CreateGuidAsString(); + LOG_SQS_DEBUG("Executing queue " << TLogQueueName(UserName_, QueueName_) << " attributes cache request. Req id: " << reqId); + TExecutorBuilder(SelfId(), reqId) + .User(UserName_) + .Queue(QueueName_) .QueueLeader(SelfId()) - .QueryId(INTERNAL_GET_QUEUE_ATTRIBUTES_ID) + .QueryId(INTERNAL_GET_QUEUE_ATTRIBUTES_ID) .QueueVersion(QueueVersion_) .Fifo(IsFifoQueue_) - .RetryOnTimeout() - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueueAttributes(ev); }) - .Counters(Counters_) - .Start(); -} - + .RetryOnTimeout() + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueueAttributes(ev); }) + .Counters(Counters_) + .Start(); +} + void TQueueLeader::OnQueueAttributes(const TSqsEvents::TEvExecuted::TRecord& ev) { - const ui32 status = ev.GetStatus(); - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const TValue& attrs(val["attrs"]); - - TSqsEvents::TQueueAttributes attributes; - attributes.ContentBasedDeduplication = attrs["ContentBasedDeduplication"]; - attributes.DelaySeconds = TDuration::MilliSeconds(attrs["DelaySeconds"]); - attributes.FifoQueue = attrs["FifoQueue"]; - attributes.MaximumMessageSize = attrs["MaximumMessageSize"]; - attributes.MessageRetentionPeriod = TDuration::MilliSeconds(attrs["MessageRetentionPeriod"]); - attributes.ReceiveMessageWaitTime = TDuration::MilliSeconds(attrs["ReceiveMessageWaitTime"]); - attributes.VisibilityTimeout = TDuration::MilliSeconds(attrs["VisibilityTimeout"]); - - const TValue showDetailedCountersDeadline = attrs["ShowDetailedCountersDeadline"]; - if (showDetailedCountersDeadline.HaveValue()) { - const ui64 ms = showDetailedCountersDeadline; - Counters_->ShowDetailedCounters(TInstant::MilliSeconds(ms)); - } - + const ui32 status = ev.GetStatus(); + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const TValue& attrs(val["attrs"]); + + TSqsEvents::TQueueAttributes attributes; + attributes.ContentBasedDeduplication = attrs["ContentBasedDeduplication"]; + attributes.DelaySeconds = TDuration::MilliSeconds(attrs["DelaySeconds"]); + attributes.FifoQueue = attrs["FifoQueue"]; + attributes.MaximumMessageSize = attrs["MaximumMessageSize"]; + attributes.MessageRetentionPeriod = TDuration::MilliSeconds(attrs["MessageRetentionPeriod"]); + attributes.ReceiveMessageWaitTime = TDuration::MilliSeconds(attrs["ReceiveMessageWaitTime"]); + attributes.VisibilityTimeout = TDuration::MilliSeconds(attrs["VisibilityTimeout"]); + + const TValue showDetailedCountersDeadline = attrs["ShowDetailedCountersDeadline"]; + if (showDetailedCountersDeadline.HaveValue()) { + const ui64 ms = showDetailedCountersDeadline; + Counters_->ShowDetailedCounters(TInstant::MilliSeconds(ms)); + } + // update dead letter queue info const auto& dlqNameVal(attrs["DlqName"]); const auto& maxReceiveCountVal(attrs["MaxReceiveCount"]); @@ -1402,40 +1402,40 @@ void TQueueLeader::OnQueueAttributes(const TSqsEvents::TEvExecuted::TRecord& ev) if (info.DlqName && info.MaxReceiveCount) { DlqInfo_ = info; // now we have to discover queue id and version - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId("DLQ", UserName_, info.DlqName, FolderId_)); + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId("DLQ", UserName_, info.DlqName, FolderId_)); } else { DlqInfo_.Clear(); } } - QueueAttributes_ = attributes; - AttributesUpdateTime_ = TActivationContext::Now(); - for (auto& req : GetConfigurationRequests_) { - AnswerGetConfiguration(req); - } - GetConfigurationRequests_.clear(); - } else { - for (auto& req : GetConfigurationRequests_) { - AnswerFailed(req); - } - GetConfigurationRequests_.clear(); - } -} - + QueueAttributes_ = attributes; + AttributesUpdateTime_ = TActivationContext::Now(); + for (auto& req : GetConfigurationRequests_) { + AnswerGetConfiguration(req); + } + GetConfigurationRequests_.clear(); + } else { + for (auto& req : GetConfigurationRequests_) { + AnswerFailed(req); + } + GetConfigurationRequests_.clear(); + } +} + void TQueueLeader::HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { if (!DlqInfo_) { return; } if (ev->Get()->Failed) { - LOG_SQS_DEBUG("Dlq discovering failed"); + LOG_SQS_DEBUG("Dlq discovering failed"); } else { if (ev->Get()->Exists) { DlqInfo_->QueueId = ev->Get()->QueueId; DlqInfo_->QueueVersion = ev->Get()->Version; DlqInfo_->ShardsCount = ev->Get()->ShardsCount; - LOG_SQS_DEBUG("Discovered DLQ: name: " << DlqInfo_->DlqName << ", maxReceiveCount: " << DlqInfo_->MaxReceiveCount << ", queueId: " << DlqInfo_->QueueId << ", version: " << DlqInfo_->QueueVersion << ", shards count: " << DlqInfo_->ShardsCount); + LOG_SQS_DEBUG("Discovered DLQ: name: " << DlqInfo_->DlqName << ", maxReceiveCount: " << DlqInfo_->MaxReceiveCount << ", queueId: " << DlqInfo_->QueueId << ", version: " << DlqInfo_->QueueVersion << ", shards count: " << DlqInfo_->ShardsCount); return; } } @@ -1444,1100 +1444,1100 @@ void TQueueLeader::HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { } void TQueueLeader::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); -} - + ev->Get()->Call(); +} + void TQueueLeader::HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev) { - CreateBackgroundActors(); - Send(PurgeActor_, MakeHolder<TSqsEvents::TEvPurgeQueue>(*ev->Get())); -} - + CreateBackgroundActors(); + Send(PurgeActor_, MakeHolder<TSqsEvents::TEvPurgeQueue>(*ev->Get())); +} + void TQueueLeader::StartGatheringMetrics() { - if (!IsFifoQueue_ && (TActivationContext::Now() - LatestDlqNotificationTs_ >= TDuration::MilliSeconds(Cfg().GetDlqNotificationGracePeriodMs()))) { - if (IsDlqQueue_) { - LOG_SQS_INFO("Stopped periodic message counting for queue " << TLogQueueName(UserName_, QueueName_) - << ". Latest dlq notification was at " << LatestDlqNotificationTs_); + if (!IsFifoQueue_ && (TActivationContext::Now() - LatestDlqNotificationTs_ >= TDuration::MilliSeconds(Cfg().GetDlqNotificationGracePeriodMs()))) { + if (IsDlqQueue_) { + LOG_SQS_INFO("Stopped periodic message counting for queue " << TLogQueueName(UserName_, QueueName_) + << ". Latest dlq notification was at " << LatestDlqNotificationTs_); } - IsDlqQueue_ = false; + IsDlqQueue_ = false; } - for (ui64 shard = 0; shard < ShardsCount_; ++shard) { - if (IsFifoQueue_ || IsDlqQueue_) { - RequestMessagesCountMetrics(shard); - } - RequestOldestTimestampMetrics(shard); - } -} - + for (ui64 shard = 0; shard < ShardsCount_; ++shard) { + if (IsFifoQueue_ || IsDlqQueue_) { + RequestMessagesCountMetrics(shard); + } + RequestOldestTimestampMetrics(shard); + } +} + void TQueueLeader::RequestMessagesCountMetrics(ui64 shard) { - if (Shards_[shard].MessagesCountIsRequesting) { - LOG_SQS_DEBUG("Messages count for " << TLogQueueName(UserName_, QueueName_, shard) << " is already requesting"); - return; - } - TExecutorBuilder(SelfId(), "") - .User(UserName_) - .Queue(QueueName_) + if (Shards_[shard].MessagesCountIsRequesting) { + LOG_SQS_DEBUG("Messages count for " << TLogQueueName(UserName_, QueueName_, shard) << " is already requesting"); + return; + } + TExecutorBuilder(SelfId(), "") + .User(UserName_) + .Queue(QueueName_) .QueueLeader(SelfId()) - .QueryId(GET_MESSAGE_COUNT_METRIC_ID) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) - .RetryOnTimeout() - .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { ReceiveMessagesCountMetrics(shard, ev); }) - .Counters(Counters_) - .Params() - .Uint64("SHARD", shard) - .ParentBuilder().Start(); - ++MetricsQueriesInfly_; - - Shards_[shard].MessagesCountIsRequesting = true; -} - + .QueryId(GET_MESSAGE_COUNT_METRIC_ID) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) + .RetryOnTimeout() + .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { ReceiveMessagesCountMetrics(shard, ev); }) + .Counters(Counters_) + .Params() + .Uint64("SHARD", shard) + .ParentBuilder().Start(); + ++MetricsQueriesInfly_; + + Shards_[shard].MessagesCountIsRequesting = true; +} + void TQueueLeader::RequestOldestTimestampMetrics(ui64 shard) { - if (Shards_[shard].OldestMessageAgeIsRequesting) { - LOG_SQS_DEBUG("Oldest message timestamp " << TLogQueueName(UserName_, QueueName_, shard) << " is already requesting"); - return; - } - TExecutorBuilder(SelfId(), "") - .User(UserName_) - .Queue(QueueName_) - .Shard(shard) + if (Shards_[shard].OldestMessageAgeIsRequesting) { + LOG_SQS_DEBUG("Oldest message timestamp " << TLogQueueName(UserName_, QueueName_, shard) << " is already requesting"); + return; + } + TExecutorBuilder(SelfId(), "") + .User(UserName_) + .Queue(QueueName_) + .Shard(shard) .QueueLeader(SelfId()) - .QueryId(GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) - .RetryOnTimeout() - .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { ReceiveOldestTimestampMetrics(shard, ev); }) - .Counters(Counters_) - .Params() - .Uint64("TIME_FROM", Shards_[shard].LastSuccessfulOldestMessageTimestampValueMs) // optimization for accurate range selection // timestamp is always nondecreasing - .ParentBuilder().Start(); - ++MetricsQueriesInfly_; - - Shards_[shard].OldestMessageAgeIsRequesting = true; -} - + .QueryId(GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) + .RetryOnTimeout() + .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { ReceiveOldestTimestampMetrics(shard, ev); }) + .Counters(Counters_) + .Params() + .Uint64("TIME_FROM", Shards_[shard].LastSuccessfulOldestMessageTimestampValueMs) // optimization for accurate range selection // timestamp is always nondecreasing + .ParentBuilder().Start(); + ++MetricsQueriesInfly_; + + Shards_[shard].OldestMessageAgeIsRequesting = true; +} + void TQueueLeader::ReceiveMessagesCountMetrics(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply) { - LOG_SQS_DEBUG("Handle message count metrics for " << TLogQueueName(UserName_, QueueName_, shard)); - Y_VERIFY(MetricsQueriesInfly_ > 0); - --MetricsQueriesInfly_; - if (MetricsQueriesInfly_ == 0) { - ScheduleMetricsRequest(); - } - Y_VERIFY(shard < Shards_.size()); - Shards_[shard].MessagesCountIsRequesting = false; - Shards_[shard].MessagesCountWasGot = true; - if (reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue messagesCount = val["messagesCount"]; - if (!messagesCount.IsNull()) { // can be null in case of parallel queue deletion (SQS-292) - Y_VERIFY(i64(messagesCount) >= 0); - Shards_[shard].MessagesCount = static_cast<ui64>(i64(messagesCount)); // MessageCount is Int64 type in database - } - const TValue createdTimestamp = val["createdTimestamp"]; - if (!createdTimestamp.IsNull()) { - Shards_[shard].CreatedTimestamp = TInstant::MilliSeconds(ui64(createdTimestamp)); - } - const TValue inflyMessagesCount = val["inflyMessagesCount"]; - if (!inflyMessagesCount.IsNull()) { // can be null in case of parallel queue deletion (SQS-292) - Shards_[shard].InflyMessagesCount = static_cast<ui64>(i64(inflyMessagesCount)); // InflyCount is Int64 type in database - } - ProcessGetRuntimeQueueAttributes(shard); - } else { - LOG_SQS_ERROR("Failed to get message count metrics for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); - // leave old metrics values - FailGetRuntimeQueueAttributesForShard(shard); - } - ReportMessagesCountMetricsIfReady(); -} - + LOG_SQS_DEBUG("Handle message count metrics for " << TLogQueueName(UserName_, QueueName_, shard)); + Y_VERIFY(MetricsQueriesInfly_ > 0); + --MetricsQueriesInfly_; + if (MetricsQueriesInfly_ == 0) { + ScheduleMetricsRequest(); + } + Y_VERIFY(shard < Shards_.size()); + Shards_[shard].MessagesCountIsRequesting = false; + Shards_[shard].MessagesCountWasGot = true; + if (reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue messagesCount = val["messagesCount"]; + if (!messagesCount.IsNull()) { // can be null in case of parallel queue deletion (SQS-292) + Y_VERIFY(i64(messagesCount) >= 0); + Shards_[shard].MessagesCount = static_cast<ui64>(i64(messagesCount)); // MessageCount is Int64 type in database + } + const TValue createdTimestamp = val["createdTimestamp"]; + if (!createdTimestamp.IsNull()) { + Shards_[shard].CreatedTimestamp = TInstant::MilliSeconds(ui64(createdTimestamp)); + } + const TValue inflyMessagesCount = val["inflyMessagesCount"]; + if (!inflyMessagesCount.IsNull()) { // can be null in case of parallel queue deletion (SQS-292) + Shards_[shard].InflyMessagesCount = static_cast<ui64>(i64(inflyMessagesCount)); // InflyCount is Int64 type in database + } + ProcessGetRuntimeQueueAttributes(shard); + } else { + LOG_SQS_ERROR("Failed to get message count metrics for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); + // leave old metrics values + FailGetRuntimeQueueAttributesForShard(shard); + } + ReportMessagesCountMetricsIfReady(); +} + void TQueueLeader::ReceiveOldestTimestampMetrics(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply) { - LOG_SQS_DEBUG("Handle oldest timestamp metrics for " << TLogQueueName(UserName_, QueueName_, shard)); - Y_VERIFY(MetricsQueriesInfly_ > 0); - --MetricsQueriesInfly_; - if (MetricsQueriesInfly_ == 0) { - ScheduleMetricsRequest(); - } - Y_VERIFY(shard < Shards_.size()); - Shards_[shard].OldestMessageAgeIsRequesting = false; - if (reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue list = val["messages"]; - if (list.Size()) { - Shards_[shard].LastSuccessfulOldestMessageTimestampValueMs = Shards_[shard].OldestMessageTimestampMs = list[0]["SentTimestamp"]; - } else { - Shards_[shard].OldestMessageTimestampMs = Max(); - } - } else { - LOG_SQS_ERROR("Failed to get oldest timestamp metrics for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); - // leave old metrics values - } - ReportOldestTimestampMetricsIfReady(); -} - + LOG_SQS_DEBUG("Handle oldest timestamp metrics for " << TLogQueueName(UserName_, QueueName_, shard)); + Y_VERIFY(MetricsQueriesInfly_ > 0); + --MetricsQueriesInfly_; + if (MetricsQueriesInfly_ == 0) { + ScheduleMetricsRequest(); + } + Y_VERIFY(shard < Shards_.size()); + Shards_[shard].OldestMessageAgeIsRequesting = false; + if (reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue list = val["messages"]; + if (list.Size()) { + Shards_[shard].LastSuccessfulOldestMessageTimestampValueMs = Shards_[shard].OldestMessageTimestampMs = list[0]["SentTimestamp"]; + } else { + Shards_[shard].OldestMessageTimestampMs = Max(); + } + } else { + LOG_SQS_ERROR("Failed to get oldest timestamp metrics for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); + // leave old metrics values + } + ReportOldestTimestampMetricsIfReady(); +} + void TQueueLeader::ScheduleMetricsRequest() { - const ui64 updateTime = Cfg().GetBackgroundMetricsUpdateTimeMs(); - const ui64 randomTimeToWait = RandomNumber<ui32>(updateTime / 4); - Schedule(TDuration::MilliSeconds(updateTime + randomTimeToWait), new TEvWakeup(UPDATE_COUNTERS_TAG)); -} - + const ui64 updateTime = Cfg().GetBackgroundMetricsUpdateTimeMs(); + const ui64 randomTimeToWait = RandomNumber<ui32>(updateTime / 4); + Schedule(TDuration::MilliSeconds(updateTime + randomTimeToWait), new TEvWakeup(UPDATE_COUNTERS_TAG)); +} + void TQueueLeader::ReportMessagesCountMetricsIfReady() { - ui64 messagesCount = 0; - ui64 inflyMessagesCount = 0; - const TInstant now = TActivationContext::Now(); - for (const auto& shardInfo : Shards_) { - if (IsFifoQueue_) { - if (shardInfo.MessagesCountIsRequesting) { - return; - } - } else { - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Loaded) { - return; - } - inflyMessagesCount += shardInfo.Infly->GetInflyCount(now); - } - messagesCount += shardInfo.MessagesCount; - } - - if (Counters_) { + ui64 messagesCount = 0; + ui64 inflyMessagesCount = 0; + const TInstant now = TActivationContext::Now(); + for (const auto& shardInfo : Shards_) { + if (IsFifoQueue_) { + if (shardInfo.MessagesCountIsRequesting) { + return; + } + } else { + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Loaded) { + return; + } + inflyMessagesCount += shardInfo.Infly->GetInflyCount(now); + } + messagesCount += shardInfo.MessagesCount; + } + + if (Counters_) { SET_COUNTER_COUPLE(Counters_, MessagesCount, stored_count, messagesCount); - if (!IsFifoQueue_) { // for fifo queues infly is always 0 + if (!IsFifoQueue_) { // for fifo queues infly is always 0 SET_COUNTER_COUPLE(Counters_, InflyMessagesCount, inflight_count, inflyMessagesCount); - } - } -} - + } + } +} + void TQueueLeader::ReportOldestTimestampMetricsIfReady() { - ui64 oldestMessagesTimestamp = Max(); - for (const auto& shardInfo : Shards_) { - if (shardInfo.OldestMessageAgeIsRequesting) { - return; - } - oldestMessagesTimestamp = Min(oldestMessagesTimestamp, shardInfo.OldestMessageTimestampMs); - } - if (Counters_) { - if (oldestMessagesTimestamp != Max<ui64>()) { + ui64 oldestMessagesTimestamp = Max(); + for (const auto& shardInfo : Shards_) { + if (shardInfo.OldestMessageAgeIsRequesting) { + return; + } + oldestMessagesTimestamp = Min(oldestMessagesTimestamp, shardInfo.OldestMessageTimestampMs); + } + if (Counters_) { + if (oldestMessagesTimestamp != Max<ui64>()) { auto age = (TActivationContext::Now() - TInstant::MilliSeconds(oldestMessagesTimestamp)).Seconds(); SET_COUNTER_COUPLE(Counters_, OldestMessageAgeSeconds, oldest_age_milliseconds, age); - } else { + } else { SET_COUNTER_COUPLE(Counters_, OldestMessageAgeSeconds, oldest_age_milliseconds, 0); - } - } -} - + } + } +} + void TQueueLeader::CreateBackgroundActors() { - if ((!IsFifoQueue_ || DeduplicationCleanupActor_) && (!IsFifoQueue_ || ReadsCleanupActor_) && RetentionActor_ && PurgeActor_) { - return; - } - - if (IsFifoQueue_) { - if (!DeduplicationCleanupActor_) { - DeduplicationCleanupActor_ = Register(new TCleanupActor(GetQueuePath(), SelfId(), TCleanupActor::ECleanupType::Deduplication)); - LOG_SQS_DEBUG("Created new deduplication cleanup actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << DeduplicationCleanupActor_); - } - if (!ReadsCleanupActor_) { - ReadsCleanupActor_ = Register(new TCleanupActor(GetQueuePath(), SelfId(), TCleanupActor::ECleanupType::Reads)); - LOG_SQS_DEBUG("Created new reads cleanup actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << ReadsCleanupActor_); - } - } - if (!RetentionActor_) { - RetentionActor_ = Register(new TRetentionActor(GetQueuePath(), SelfId())); - LOG_SQS_DEBUG("Created new retention actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << RetentionActor_); - } - if (!PurgeActor_) { - PurgeActor_ = Register(new TPurgeActor(GetQueuePath(), Counters_, SelfId(), IsFifoQueue_)); - LOG_SQS_DEBUG("Created new purge actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << PurgeActor_); - } -} - + if ((!IsFifoQueue_ || DeduplicationCleanupActor_) && (!IsFifoQueue_ || ReadsCleanupActor_) && RetentionActor_ && PurgeActor_) { + return; + } + + if (IsFifoQueue_) { + if (!DeduplicationCleanupActor_) { + DeduplicationCleanupActor_ = Register(new TCleanupActor(GetQueuePath(), SelfId(), TCleanupActor::ECleanupType::Deduplication)); + LOG_SQS_DEBUG("Created new deduplication cleanup actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << DeduplicationCleanupActor_); + } + if (!ReadsCleanupActor_) { + ReadsCleanupActor_ = Register(new TCleanupActor(GetQueuePath(), SelfId(), TCleanupActor::ECleanupType::Reads)); + LOG_SQS_DEBUG("Created new reads cleanup actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << ReadsCleanupActor_); + } + } + if (!RetentionActor_) { + RetentionActor_ = Register(new TRetentionActor(GetQueuePath(), SelfId())); + LOG_SQS_DEBUG("Created new retention actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << RetentionActor_); + } + if (!PurgeActor_) { + PurgeActor_ = Register(new TPurgeActor(GetQueuePath(), Counters_, SelfId(), IsFifoQueue_)); + LOG_SQS_DEBUG("Created new purge actor for queue " << TLogQueueName(UserName_, QueueName_) << ". Actor id: " << PurgeActor_); + } +} + void TQueueLeader::MarkInflyReloading(ui64 shard, size_t invalidatedCount, const TString& invalidationReason) { - LWPROBE(InflyInvalidation, UserName_, QueueName_, shard, invalidatedCount, invalidationReason); - auto& shardInfo = Shards_[shard]; - if (!shardInfo.NeedInflyReload) { - shardInfo.NeedInflyReload = true; - LOG_SQS_WARN("Mark infly " << TLogQueueName(UserName_, QueueName_, shard) << " for reloading. Reason: " << invalidationReason); - } -} - + LWPROBE(InflyInvalidation, UserName_, QueueName_, shard, invalidatedCount, invalidationReason); + auto& shardInfo = Shards_[shard]; + if (!shardInfo.NeedInflyReload) { + shardInfo.NeedInflyReload = true; + LOG_SQS_WARN("Mark infly " << TLogQueueName(UserName_, QueueName_, shard) << " for reloading. Reason: " << invalidationReason); + } +} + void TQueueLeader::StartLoadingInfly() { - for (ui64 shard = 0; shard < Shards_.size(); ++shard) { - StartLoadingInfly(shard); - } -} - + for (ui64 shard = 0; shard < Shards_.size(); ++shard) { + StartLoadingInfly(shard); + } +} + void TQueueLeader::StartLoadingInfly(ui64 shard, bool afterFailure) { - auto& shardInfo = Shards_[shard]; - if (shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::Fifo - || shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::WaitingForActiveRequests && shardInfo.ActiveMessageRequests > 0 - || shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::WaitingForDbAnswer - || shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::Failed && !afterFailure) { - LOG_SQS_TRACE("Start loading infly for queue " << TLogQueueName(UserName_, QueueName_, shard) - << ". Skipping. State: " << static_cast<int>(shardInfo.InflyLoadState) - << ". ActiveMessageRequests: " << shardInfo.ActiveMessageRequests - << ". After failure: " << afterFailure); - return; - } - - if (shardInfo.ActiveMessageRequests > 0) { - LOG_SQS_DEBUG("Start loading infly for queue " << TLogQueueName(UserName_, QueueName_, shard) << ". Waiting for active message requests. Requests count: " << shardInfo.ActiveMessageRequests); - shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::WaitingForActiveRequests; - return; - } - - LOG_SQS_INFO("Start loading infly for queue " << TLogQueueName(UserName_, QueueName_, shard)); - shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::WaitingForDbAnswer; - Y_VERIFY(shardInfo.LoadInflyRequests == 0); - shardInfo.LoadInflyRequests = 2; - shardInfo.NeedInflyReload = false; - shardInfo.Infly = nullptr; - TExecutorBuilder(SelfId(), "") - .User(UserName_) - .Queue(QueueName_) - .Shard(shard) + auto& shardInfo = Shards_[shard]; + if (shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::Fifo + || shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::WaitingForActiveRequests && shardInfo.ActiveMessageRequests > 0 + || shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::WaitingForDbAnswer + || shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::Failed && !afterFailure) { + LOG_SQS_TRACE("Start loading infly for queue " << TLogQueueName(UserName_, QueueName_, shard) + << ". Skipping. State: " << static_cast<int>(shardInfo.InflyLoadState) + << ". ActiveMessageRequests: " << shardInfo.ActiveMessageRequests + << ". After failure: " << afterFailure); + return; + } + + if (shardInfo.ActiveMessageRequests > 0) { + LOG_SQS_DEBUG("Start loading infly for queue " << TLogQueueName(UserName_, QueueName_, shard) << ". Waiting for active message requests. Requests count: " << shardInfo.ActiveMessageRequests); + shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::WaitingForActiveRequests; + return; + } + + LOG_SQS_INFO("Start loading infly for queue " << TLogQueueName(UserName_, QueueName_, shard)); + shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::WaitingForDbAnswer; + Y_VERIFY(shardInfo.LoadInflyRequests == 0); + shardInfo.LoadInflyRequests = 2; + shardInfo.NeedInflyReload = false; + shardInfo.Infly = nullptr; + TExecutorBuilder(SelfId(), "") + .User(UserName_) + .Queue(QueueName_) + .Shard(shard) .QueueLeader(SelfId()) - .QueryId(LOAD_INFLY_ID) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) - .RetryOnTimeout() - .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnInflyLoaded(shard, ev); }) - .Counters(Counters_) - .Params() - .Uint64("SHARD", shard) - .ParentBuilder().Start(); - - TExecutorBuilder(SelfId(), "") - .User(UserName_) - .Queue(QueueName_) - .Shard(shard) + .QueryId(LOAD_INFLY_ID) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) + .RetryOnTimeout() + .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnInflyLoaded(shard, ev); }) + .Counters(Counters_) + .Params() + .Uint64("SHARD", shard) + .ParentBuilder().Start(); + + TExecutorBuilder(SelfId(), "") + .User(UserName_) + .Queue(QueueName_) + .Shard(shard) .QueueLeader(SelfId()) - .QueryId(GET_STATE_ID) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) - .RetryOnTimeout() - .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnStateLoaded(shard, ev); }) - .Counters(Counters_) - .Params() - .Uint64("SHARD", shard) - .ParentBuilder().Start(); -} - + .QueryId(GET_STATE_ID) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) + .RetryOnTimeout() + .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnStateLoaded(shard, ev); }) + .Counters(Counters_) + .Params() + .Uint64("SHARD", shard) + .ParentBuilder().Start(); +} + void TQueueLeader::OnInflyLoaded(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply) { - LOG_SQS_TRACE("Infly load reply for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); - auto& shardInfo = Shards_[shard]; - Y_VERIFY(shardInfo.LoadInflyRequests > 0); - --shardInfo.LoadInflyRequests; - const bool ok = reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete; - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed && ok) { - shardInfo.Infly = MakeIntrusive<TInflyMessages>(); - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue list = val["infly"]; - const TInstant now = TActivationContext::Now(); - for (size_t i = 0, size = list.Size(); i < size; ++i) { - const TValue& message = list[i]; - const TValue& visibilityDeadlineValue = message["VisibilityDeadline"]; - const ui64 visibilityDeadlineMs = visibilityDeadlineValue.HaveValue() ? ui64(visibilityDeadlineValue) : 0; - const TValue& delayDeadlineValue = message["DelayDeadline"]; - const ui64 delayDeadlineMs = delayDeadlineValue.HaveValue() ? ui64(delayDeadlineValue) : 0; - const TInstant delayDeadline = TInstant::MilliSeconds(delayDeadlineMs); - if (delayDeadline && !shardInfo.DelayStatisticsInited && delayDeadline > now) { - DelayStatistics_.AddDelayedMessage(delayDeadline, now); - } - const ui64 offset = message["Offset"]; + LOG_SQS_TRACE("Infly load reply for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); + auto& shardInfo = Shards_[shard]; + Y_VERIFY(shardInfo.LoadInflyRequests > 0); + --shardInfo.LoadInflyRequests; + const bool ok = reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete; + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed && ok) { + shardInfo.Infly = MakeIntrusive<TInflyMessages>(); + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue list = val["infly"]; + const TInstant now = TActivationContext::Now(); + for (size_t i = 0, size = list.Size(); i < size; ++i) { + const TValue& message = list[i]; + const TValue& visibilityDeadlineValue = message["VisibilityDeadline"]; + const ui64 visibilityDeadlineMs = visibilityDeadlineValue.HaveValue() ? ui64(visibilityDeadlineValue) : 0; + const TValue& delayDeadlineValue = message["DelayDeadline"]; + const ui64 delayDeadlineMs = delayDeadlineValue.HaveValue() ? ui64(delayDeadlineValue) : 0; + const TInstant delayDeadline = TInstant::MilliSeconds(delayDeadlineMs); + if (delayDeadline && !shardInfo.DelayStatisticsInited && delayDeadline > now) { + DelayStatistics_.AddDelayedMessage(delayDeadline, now); + } + const ui64 offset = message["Offset"]; const ui32 receiveCount = message["ReceiveCount"]; - const TInstant maxVisibilityDeadline = TInstant::MilliSeconds(Max(visibilityDeadlineMs, delayDeadlineMs)); - LOG_SQS_TRACE("Adding message to infly struct for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": { Offset: " << offset << ", VisibilityDeadline: " << maxVisibilityDeadline << ", ReceiveCount: " << receiveCount << " }"); + const TInstant maxVisibilityDeadline = TInstant::MilliSeconds(Max(visibilityDeadlineMs, delayDeadlineMs)); + LOG_SQS_TRACE("Adding message to infly struct for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": { Offset: " << offset << ", VisibilityDeadline: " << maxVisibilityDeadline << ", ReceiveCount: " << receiveCount << " }"); shardInfo.Infly->Add(MakeHolder<TInflyMessage>(offset, message["RandomId"], maxVisibilityDeadline, receiveCount)); - } - LWPROBE(LoadInfly, UserName_, QueueName_, shard, list.Size()); - shardInfo.InflyVersion = val["inflyVersion"]; - LOG_SQS_DEBUG("Infly version for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": " << shardInfo.InflyVersion); - shardInfo.DelayStatisticsInited = true; - - if (shardInfo.NeedAddingMessagesToInfly) { - const ui64 limit = INFLY_LIMIT / ShardsCount_; - if (shardInfo.MessagesCount == 0 || shardInfo.Infly->GetCapacity() >= limit) { - ProcessReceivesAfterAddedMessagesToInfly(shard); - } else { - AddMessagesToInfly(shard); - } - } - - if (shardInfo.LoadInflyRequests == 0) { - shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Loaded; - StartMessageRequestsAfterInflyLoaded(shard); - ProcessGetRuntimeQueueAttributes(shard); - } - } else { - if (!ok) { - LOG_SQS_ERROR("Failed to load infly for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); - } - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed) { - FailMessageRequestsAfterInflyLoadFailure(shard); - FailGetRuntimeQueueAttributesForShard(shard); - } - shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Failed; - if (shardInfo.LoadInflyRequests == 0) { - ScheduleInflyLoadAfterFailure(shard); - } - } -} - + } + LWPROBE(LoadInfly, UserName_, QueueName_, shard, list.Size()); + shardInfo.InflyVersion = val["inflyVersion"]; + LOG_SQS_DEBUG("Infly version for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": " << shardInfo.InflyVersion); + shardInfo.DelayStatisticsInited = true; + + if (shardInfo.NeedAddingMessagesToInfly) { + const ui64 limit = INFLY_LIMIT / ShardsCount_; + if (shardInfo.MessagesCount == 0 || shardInfo.Infly->GetCapacity() >= limit) { + ProcessReceivesAfterAddedMessagesToInfly(shard); + } else { + AddMessagesToInfly(shard); + } + } + + if (shardInfo.LoadInflyRequests == 0) { + shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Loaded; + StartMessageRequestsAfterInflyLoaded(shard); + ProcessGetRuntimeQueueAttributes(shard); + } + } else { + if (!ok) { + LOG_SQS_ERROR("Failed to load infly for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); + } + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed) { + FailMessageRequestsAfterInflyLoadFailure(shard); + FailGetRuntimeQueueAttributesForShard(shard); + } + shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Failed; + if (shardInfo.LoadInflyRequests == 0) { + ScheduleInflyLoadAfterFailure(shard); + } + } +} + void TQueueLeader::OnStateLoaded(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply) { - auto& shardInfo = Shards_[shard]; - Y_VERIFY(shardInfo.LoadInflyRequests > 0); - --shardInfo.LoadInflyRequests; - const bool ok = reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete; - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed && ok) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue state = val["state"]; - auto messagesCount = state["MessageCount"]; - auto inflyCount = state["InflyCount"]; - Y_VERIFY(i64(messagesCount) >= 0); - Y_VERIFY(i64(inflyCount) >= 0); - shardInfo.MessagesCount = static_cast<ui64>(i64(messagesCount)); - shardInfo.InflyMessagesCount = static_cast<ui64>(i64(inflyCount)); - shardInfo.ReadOffset = state["ReadOffset"]; - shardInfo.CreatedTimestamp = TInstant::MilliSeconds(ui64(state["CreatedTimestamp"])); - - if (shardInfo.LoadInflyRequests == 0) { - shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Loaded; - StartMessageRequestsAfterInflyLoaded(shard); - ProcessGetRuntimeQueueAttributes(shard); - } - } else { - if (!ok) { - LOG_SQS_ERROR("Failed to load state for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); - } - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed) { - FailMessageRequestsAfterInflyLoadFailure(shard); - FailGetRuntimeQueueAttributesForShard(shard); - } - shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Failed; - if (shardInfo.LoadInflyRequests == 0) { - ScheduleInflyLoadAfterFailure(shard); - } - } -} - + auto& shardInfo = Shards_[shard]; + Y_VERIFY(shardInfo.LoadInflyRequests > 0); + --shardInfo.LoadInflyRequests; + const bool ok = reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete; + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed && ok) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue state = val["state"]; + auto messagesCount = state["MessageCount"]; + auto inflyCount = state["InflyCount"]; + Y_VERIFY(i64(messagesCount) >= 0); + Y_VERIFY(i64(inflyCount) >= 0); + shardInfo.MessagesCount = static_cast<ui64>(i64(messagesCount)); + shardInfo.InflyMessagesCount = static_cast<ui64>(i64(inflyCount)); + shardInfo.ReadOffset = state["ReadOffset"]; + shardInfo.CreatedTimestamp = TInstant::MilliSeconds(ui64(state["CreatedTimestamp"])); + + if (shardInfo.LoadInflyRequests == 0) { + shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Loaded; + StartMessageRequestsAfterInflyLoaded(shard); + ProcessGetRuntimeQueueAttributes(shard); + } + } else { + if (!ok) { + LOG_SQS_ERROR("Failed to load state for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); + } + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Failed) { + FailMessageRequestsAfterInflyLoadFailure(shard); + FailGetRuntimeQueueAttributesForShard(shard); + } + shardInfo.InflyLoadState = TShardInfo::EInflyLoadState::Failed; + if (shardInfo.LoadInflyRequests == 0) { + ScheduleInflyLoadAfterFailure(shard); + } + } +} + bool TQueueLeader::AddMessagesToInfly(ui64 shard) { - auto& shardInfo = Shards_[shard]; - LOG_SQS_INFO("Adding messages to infly for queue " << TLogQueueName(UserName_, QueueName_, shard)); - shardInfo.AddingMessagesToInfly = true; - shardInfo.AddMessagesToInflyCheckAttempts = 0; - const ui64 limit = INFLY_LIMIT / ShardsCount_; - TExecutorBuilder(SelfId(), "") - .User(UserName_) - .Queue(QueueName_) - .Shard(shard) + auto& shardInfo = Shards_[shard]; + LOG_SQS_INFO("Adding messages to infly for queue " << TLogQueueName(UserName_, QueueName_, shard)); + shardInfo.AddingMessagesToInfly = true; + shardInfo.AddMessagesToInflyCheckAttempts = 0; + const ui64 limit = INFLY_LIMIT / ShardsCount_; + TExecutorBuilder(SelfId(), "") + .User(UserName_) + .Queue(QueueName_) + .Shard(shard) .QueueLeader(SelfId()) - .QueryId(ADD_MESSAGES_TO_INFLY_ID) - .QueueVersion(QueueVersion_) - .Fifo(IsFifoQueue_) - .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnAddedMessagesToInfly(shard, ev); }) - .Counters(Counters_) - .Params() - .Uint64("SHARD", shard) - .Uint64("INFLY_LIMIT", limit) - .Uint64("FROM", shardInfo.ReadOffset) - .Uint64("EXPECTED_MAX_COUNT", Min(limit - shardInfo.Infly->GetCapacity(), Cfg().GetAddMesagesToInflyBatchSize())) - .ParentBuilder().Start(); - return true; -} - + .QueryId(ADD_MESSAGES_TO_INFLY_ID) + .QueueVersion(QueueVersion_) + .Fifo(IsFifoQueue_) + .OnExecuted([this, shard](const TSqsEvents::TEvExecuted::TRecord& ev) { OnAddedMessagesToInfly(shard, ev); }) + .Counters(Counters_) + .Params() + .Uint64("SHARD", shard) + .Uint64("INFLY_LIMIT", limit) + .Uint64("FROM", shardInfo.ReadOffset) + .Uint64("EXPECTED_MAX_COUNT", Min(limit - shardInfo.Infly->GetCapacity(), Cfg().GetAddMesagesToInflyBatchSize())) + .ParentBuilder().Start(); + return true; +} + void TQueueLeader::OnAddedMessagesToInfly(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply) { - auto& shardInfo = Shards_[shard]; - Y_VERIFY(shardInfo.AddingMessagesToInfly); - shardInfo.AddingMessagesToInfly = false; - shardInfo.LastAddMessagesToInfly = TActivationContext::Now(); - - bool markInflyReloading = false; - size_t inflyVersionDiff = 0; - if (reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); - const TValue list = val["messages"]; - for (size_t i = 0, size = list.Size(); i < size; ++i) { - const TValue& message = list[i]; - const TValue& delayDeadlineValue = message["DelayDeadline"]; - const ui64 delayDeadlineMs = delayDeadlineValue.HaveValue() ? ui64(delayDeadlineValue) : 0; - const TInstant delayDeadline = TInstant::MilliSeconds(delayDeadlineMs); - const ui64 offset = message["Offset"]; + auto& shardInfo = Shards_[shard]; + Y_VERIFY(shardInfo.AddingMessagesToInfly); + shardInfo.AddingMessagesToInfly = false; + shardInfo.LastAddMessagesToInfly = TActivationContext::Now(); + + bool markInflyReloading = false; + size_t inflyVersionDiff = 0; + if (reply.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(reply.GetExecutionEngineEvaluatedResponse())); + const TValue list = val["messages"]; + for (size_t i = 0, size = list.Size(); i < size; ++i) { + const TValue& message = list[i]; + const TValue& delayDeadlineValue = message["DelayDeadline"]; + const ui64 delayDeadlineMs = delayDeadlineValue.HaveValue() ? ui64(delayDeadlineValue) : 0; + const TInstant delayDeadline = TInstant::MilliSeconds(delayDeadlineMs); + const ui64 offset = message["Offset"]; const ui32 receiveCount = 0; // as in transaction - LOG_SQS_TRACE("Adding message to infly struct for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": { Offset: " << offset << ", DelayDeadline: " << delayDeadline << ", ReceiveCount: " << receiveCount << " }"); + LOG_SQS_TRACE("Adding message to infly struct for shard " << TLogQueueName(UserName_, QueueName_, shard) << ": { Offset: " << offset << ", DelayDeadline: " << delayDeadline << ", ReceiveCount: " << receiveCount << " }"); shardInfo.Infly->Add(MakeHolder<TInflyMessage>(offset, message["RandomId"], delayDeadline, receiveCount)); - } - LWPROBE(AddMessagesToInfly, UserName_, QueueName_, shard, list.Size()); - shardInfo.ReadOffset = val["readOffset"]; - const ui64 currentInflyVersion = val["currentInflyVersion"]; - if (shardInfo.InflyVersion != currentInflyVersion) { - Y_VERIFY(shardInfo.InflyVersion < currentInflyVersion); - inflyVersionDiff = currentInflyVersion - shardInfo.InflyVersion; - LOG_SQS_WARN("Concurrent infly version change detected for " << TLogQueueName(UserName_, QueueName_, shard) << ". Expected " - << shardInfo.InflyVersion << ", but got: " << currentInflyVersion << ". Mark infly for reloading"); - markInflyReloading = true; - } - shardInfo.InflyVersion = val["newInflyVersion"]; - - // Update messages count - shardInfo.MessagesCount = static_cast<ui64>(i64(val["messagesCount"])); - } else { - LOG_SQS_ERROR("Failed to add new messages to infly for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); - } - - ProcessReceivesAfterAddedMessagesToInfly(shard); - - // First process requests and then reload infly - if (markInflyReloading) { - MarkInflyReloading(shard, inflyVersionDiff, INFLY_INVALIDATION_REASON_VERSION_CHANGED); - } -} - + } + LWPROBE(AddMessagesToInfly, UserName_, QueueName_, shard, list.Size()); + shardInfo.ReadOffset = val["readOffset"]; + const ui64 currentInflyVersion = val["currentInflyVersion"]; + if (shardInfo.InflyVersion != currentInflyVersion) { + Y_VERIFY(shardInfo.InflyVersion < currentInflyVersion); + inflyVersionDiff = currentInflyVersion - shardInfo.InflyVersion; + LOG_SQS_WARN("Concurrent infly version change detected for " << TLogQueueName(UserName_, QueueName_, shard) << ". Expected " + << shardInfo.InflyVersion << ", but got: " << currentInflyVersion << ". Mark infly for reloading"); + markInflyReloading = true; + } + shardInfo.InflyVersion = val["newInflyVersion"]; + + // Update messages count + shardInfo.MessagesCount = static_cast<ui64>(i64(val["messagesCount"])); + } else { + LOG_SQS_ERROR("Failed to add new messages to infly for " << TLogQueueName(UserName_, QueueName_, shard) << ": " << reply); + } + + ProcessReceivesAfterAddedMessagesToInfly(shard); + + // First process requests and then reload infly + if (markInflyReloading) { + MarkInflyReloading(shard, inflyVersionDiff, INFLY_INVALIDATION_REASON_VERSION_CHANGED); + } +} + void TQueueLeader::ProcessReceivesAfterAddedMessagesToInfly(ui64 shard) { - std::vector<TReceiveMessageBatchRequestProcessing*> requestsToContinue; - requestsToContinue.reserve(ReceiveMessageRequests_.size()); - for (auto&& [reqId, req] : ReceiveMessageRequests_) { - if (req.GetCurrentShard() == shard && req.WaitingAddMessagesToInfly) { - requestsToContinue.push_back(&req); - } - } - for (auto* req : requestsToContinue) { - req->WaitingAddMessagesToInfly = false; - ProcessReceiveMessageBatch(*req); - } -} - + std::vector<TReceiveMessageBatchRequestProcessing*> requestsToContinue; + requestsToContinue.reserve(ReceiveMessageRequests_.size()); + for (auto&& [reqId, req] : ReceiveMessageRequests_) { + if (req.GetCurrentShard() == shard && req.WaitingAddMessagesToInfly) { + requestsToContinue.push_back(&req); + } + } + for (auto* req : requestsToContinue) { + req->WaitingAddMessagesToInfly = false; + ProcessReceiveMessageBatch(*req); + } +} + void TQueueLeader::FailMessageRequestsAfterInflyLoadFailure(ui64 shard) { - std::vector<TString> requestsToDelete; - requestsToDelete.reserve(Max(ReceiveMessageRequests_.size(), SendMessageRequests_.size())); - for (auto&& [reqId, req] : ReceiveMessageRequests_) { - if (req.GetCurrentShard() == shard) { - const TString& requestId = reqId; - requestsToDelete.emplace_back(requestId); - RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); - req.Answer->Failed = true; - Send(req.Event->Sender, std::move(req.Answer)); - } - } - for (const auto& reqId : requestsToDelete) { - ReceiveMessageRequests_.erase(reqId); - } - Shards_[shard].LoadBatchingState.CancelRequestsAfterInflyLoadFailure(); - requestsToDelete.clear(); - - for (auto&& [reqId, req] : SendMessageRequests_) { - if (req.Shard == shard) { - const TString& requestId = reqId; - requestsToDelete.emplace_back(requestId); - RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); - auto answer = MakeHolder<TSqsEvents::TEvSendMessageBatchResponse>(); - answer->Statuses.resize(req.Event->Get()->Messages.size()); - for (auto& s : answer->Statuses) { - s.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::Failed; - } - Send(req.Event->Sender, answer.Release()); - } - } - for (const auto& reqId : requestsToDelete) { - SendMessageRequests_.erase(reqId); - } - Shards_[shard].SendBatchingState.CancelRequestsAfterInflyLoadFailure(); - - { - std::vector<std::pair<TString, ui64>> failedDeleteRequests; - failedDeleteRequests.reserve(DeleteMessageRequests_.size()); - for (auto&& [reqIdAndShard, reqInfo] : DeleteMessageRequests_) { - if (reqInfo.Event->Get()->Shard == shard) { - failedDeleteRequests.emplace_back(reqIdAndShard); - const TString& requestId = reqIdAndShard.first; - RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); - auto answer = MakeHolder<TSqsEvents::TEvDeleteMessageBatchResponse>(); - answer->Shard = shard; - answer->Statuses.resize(reqInfo.Event->Get()->Messages.size()); - for (auto& status : answer->Statuses) { - status.Status = TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::Failed; - } - - Send(reqInfo.Event->Sender, answer.Release()); - } - } - for (const auto& reqIdAndShard : failedDeleteRequests) { - DeleteMessageRequests_.erase(reqIdAndShard); - } - Shards_[shard].DeleteBatchingState.CancelRequestsAfterInflyLoadFailure(); - } - - { - std::vector<std::pair<TString, ui64>> failedChangeMessageVisibilityRequests; - failedChangeMessageVisibilityRequests.reserve(ChangeMessageVisibilityRequests_.size()); - for (auto&& [reqIdAndShard, reqInfo] : ChangeMessageVisibilityRequests_) { - if (reqInfo.Event->Get()->Shard == shard) { - failedChangeMessageVisibilityRequests.emplace_back(reqIdAndShard); - const TString& requestId = reqIdAndShard.first; - RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); - for (auto& status : reqInfo.Answer->Statuses) { - status.Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::Failed; - } - - Send(reqInfo.Event->Sender, reqInfo.Answer.Release()); - } - } - for (const auto& reqIdAndShard : failedChangeMessageVisibilityRequests) { - ChangeMessageVisibilityRequests_.erase(reqIdAndShard); - } - } -} - + std::vector<TString> requestsToDelete; + requestsToDelete.reserve(Max(ReceiveMessageRequests_.size(), SendMessageRequests_.size())); + for (auto&& [reqId, req] : ReceiveMessageRequests_) { + if (req.GetCurrentShard() == shard) { + const TString& requestId = reqId; + requestsToDelete.emplace_back(requestId); + RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); + req.Answer->Failed = true; + Send(req.Event->Sender, std::move(req.Answer)); + } + } + for (const auto& reqId : requestsToDelete) { + ReceiveMessageRequests_.erase(reqId); + } + Shards_[shard].LoadBatchingState.CancelRequestsAfterInflyLoadFailure(); + requestsToDelete.clear(); + + for (auto&& [reqId, req] : SendMessageRequests_) { + if (req.Shard == shard) { + const TString& requestId = reqId; + requestsToDelete.emplace_back(requestId); + RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); + auto answer = MakeHolder<TSqsEvents::TEvSendMessageBatchResponse>(); + answer->Statuses.resize(req.Event->Get()->Messages.size()); + for (auto& s : answer->Statuses) { + s.Status = TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::Failed; + } + Send(req.Event->Sender, answer.Release()); + } + } + for (const auto& reqId : requestsToDelete) { + SendMessageRequests_.erase(reqId); + } + Shards_[shard].SendBatchingState.CancelRequestsAfterInflyLoadFailure(); + + { + std::vector<std::pair<TString, ui64>> failedDeleteRequests; + failedDeleteRequests.reserve(DeleteMessageRequests_.size()); + for (auto&& [reqIdAndShard, reqInfo] : DeleteMessageRequests_) { + if (reqInfo.Event->Get()->Shard == shard) { + failedDeleteRequests.emplace_back(reqIdAndShard); + const TString& requestId = reqIdAndShard.first; + RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); + auto answer = MakeHolder<TSqsEvents::TEvDeleteMessageBatchResponse>(); + answer->Shard = shard; + answer->Statuses.resize(reqInfo.Event->Get()->Messages.size()); + for (auto& status : answer->Statuses) { + status.Status = TSqsEvents::TEvDeleteMessageBatchResponse::EDeleteMessageStatus::Failed; + } + + Send(reqInfo.Event->Sender, answer.Release()); + } + } + for (const auto& reqIdAndShard : failedDeleteRequests) { + DeleteMessageRequests_.erase(reqIdAndShard); + } + Shards_[shard].DeleteBatchingState.CancelRequestsAfterInflyLoadFailure(); + } + + { + std::vector<std::pair<TString, ui64>> failedChangeMessageVisibilityRequests; + failedChangeMessageVisibilityRequests.reserve(ChangeMessageVisibilityRequests_.size()); + for (auto&& [reqIdAndShard, reqInfo] : ChangeMessageVisibilityRequests_) { + if (reqInfo.Event->Get()->Shard == shard) { + failedChangeMessageVisibilityRequests.emplace_back(reqIdAndShard); + const TString& requestId = reqIdAndShard.first; + RLOG_SQS_REQ_ERROR(requestId, "Failed to load infly for shard " << shard); + for (auto& status : reqInfo.Answer->Statuses) { + status.Status = TSqsEvents::TEvChangeMessageVisibilityBatchResponse::EMessageStatus::Failed; + } + + Send(reqInfo.Event->Sender, reqInfo.Answer.Release()); + } + } + for (const auto& reqIdAndShard : failedChangeMessageVisibilityRequests) { + ChangeMessageVisibilityRequests_.erase(reqIdAndShard); + } + } +} + void TQueueLeader::StartMessageRequestsAfterInflyLoaded(ui64 shard) { - { - std::vector<TReceiveMessageBatchRequestProcessing*> receiveRequests; - receiveRequests.reserve(ReceiveMessageRequests_.size()); - for (auto&& [reqId, req] : ReceiveMessageRequests_) { - if (req.GetCurrentShard() == shard) { - receiveRequests.push_back(&req); - } - } - for (auto* req : receiveRequests) { - ProcessReceiveMessageBatch(*req); - } - } - - { - std::vector<TSendMessageBatchRequestProcessing*> sendRequests; - sendRequests.reserve(SendMessageRequests_.size()); - for (auto&& [reqId, req] : SendMessageRequests_) { - if (req.Shard == shard) { - sendRequests.push_back(&req); - } - } - for (auto* req : sendRequests) { - ProcessSendMessageBatch(*req); - } - } - - { - std::vector<TDeleteMessageBatchRequestProcessing*> deleteRequests; - deleteRequests.reserve(DeleteMessageRequests_.size()); - for (auto&& [reqIdAndShard, reqInfo] : DeleteMessageRequests_) { - if (reqInfo.Event->Get()->Shard == shard) { - deleteRequests.push_back(&reqInfo); - } - } - for (auto* reqInfo : deleteRequests) { - ProcessDeleteMessageBatch(*reqInfo); - } - } - - { - std::vector<TChangeMessageVisibilityBatchRequestProcessing*> changeMessageVisibilityRequests; - changeMessageVisibilityRequests.reserve(ChangeMessageVisibilityRequests_.size()); - for (auto&& [reqIdAndShard, reqInfo] : ChangeMessageVisibilityRequests_) { - if (reqInfo.Event->Get()->Shard == shard) { - changeMessageVisibilityRequests.push_back(&reqInfo); - } - } - for (auto* reqInfo : changeMessageVisibilityRequests) { - ProcessChangeMessageVisibilityBatch(*reqInfo); - } - } -} - + { + std::vector<TReceiveMessageBatchRequestProcessing*> receiveRequests; + receiveRequests.reserve(ReceiveMessageRequests_.size()); + for (auto&& [reqId, req] : ReceiveMessageRequests_) { + if (req.GetCurrentShard() == shard) { + receiveRequests.push_back(&req); + } + } + for (auto* req : receiveRequests) { + ProcessReceiveMessageBatch(*req); + } + } + + { + std::vector<TSendMessageBatchRequestProcessing*> sendRequests; + sendRequests.reserve(SendMessageRequests_.size()); + for (auto&& [reqId, req] : SendMessageRequests_) { + if (req.Shard == shard) { + sendRequests.push_back(&req); + } + } + for (auto* req : sendRequests) { + ProcessSendMessageBatch(*req); + } + } + + { + std::vector<TDeleteMessageBatchRequestProcessing*> deleteRequests; + deleteRequests.reserve(DeleteMessageRequests_.size()); + for (auto&& [reqIdAndShard, reqInfo] : DeleteMessageRequests_) { + if (reqInfo.Event->Get()->Shard == shard) { + deleteRequests.push_back(&reqInfo); + } + } + for (auto* reqInfo : deleteRequests) { + ProcessDeleteMessageBatch(*reqInfo); + } + } + + { + std::vector<TChangeMessageVisibilityBatchRequestProcessing*> changeMessageVisibilityRequests; + changeMessageVisibilityRequests.reserve(ChangeMessageVisibilityRequests_.size()); + for (auto&& [reqIdAndShard, reqInfo] : ChangeMessageVisibilityRequests_) { + if (reqInfo.Event->Get()->Shard == shard) { + changeMessageVisibilityRequests.push_back(&reqInfo); + } + } + for (auto* reqInfo : changeMessageVisibilityRequests) { + ProcessChangeMessageVisibilityBatch(*reqInfo); + } + } +} + bool TQueueLeader::IncActiveMessageRequests(ui64 shard, const TString& requestId) { - if (!IsFifoQueue_) { - auto& shardInfo = Shards_[shard]; - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Loaded) { - RLOG_SQS_REQ_TRACE(requestId, "Waiting for loading infly for " << TLogQueueName(UserName_, QueueName_, shard)); - return false; - } - ++shardInfo.ActiveMessageRequests; - LOG_SQS_TRACE("Increment active message requests for " << TLogQueueName(UserName_, QueueName_, shard) << ". ActiveMessageRequests: " << shardInfo.ActiveMessageRequests); - } - return true; -} - + if (!IsFifoQueue_) { + auto& shardInfo = Shards_[shard]; + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Loaded) { + RLOG_SQS_REQ_TRACE(requestId, "Waiting for loading infly for " << TLogQueueName(UserName_, QueueName_, shard)); + return false; + } + ++shardInfo.ActiveMessageRequests; + LOG_SQS_TRACE("Increment active message requests for " << TLogQueueName(UserName_, QueueName_, shard) << ". ActiveMessageRequests: " << shardInfo.ActiveMessageRequests); + } + return true; +} + void TQueueLeader::DecActiveMessageRequests(ui64 shard) { - if (!IsFifoQueue_) { - auto& shardInfo = Shards_[shard]; - Y_VERIFY(shardInfo.ActiveMessageRequests > 0); - --shardInfo.ActiveMessageRequests; - LOG_SQS_TRACE("Decrement active message requests for [" << TLogQueueName(UserName_, QueueName_, shard) << ". ActiveMessageRequests: " << shardInfo.ActiveMessageRequests); - if (shardInfo.ActiveMessageRequests == 0 && shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::WaitingForActiveRequests) { - StartLoadingInfly(shard); - } - } -} - + if (!IsFifoQueue_) { + auto& shardInfo = Shards_[shard]; + Y_VERIFY(shardInfo.ActiveMessageRequests > 0); + --shardInfo.ActiveMessageRequests; + LOG_SQS_TRACE("Decrement active message requests for [" << TLogQueueName(UserName_, QueueName_, shard) << ". ActiveMessageRequests: " << shardInfo.ActiveMessageRequests); + if (shardInfo.ActiveMessageRequests == 0 && shardInfo.InflyLoadState == TShardInfo::EInflyLoadState::WaitingForActiveRequests) { + StartLoadingInfly(shard); + } + } +} + void TQueueLeader::ScheduleInflyLoadAfterFailure(ui64 shard) { - const ui32 randomMs = 100 + RandomNumber<ui32>(300); - LOG_SQS_INFO("Scheduling retry after infly " << TLogQueueName(UserName_, QueueName_, shard) << " load failure in " << randomMs << "ms"); - Schedule(TDuration::MilliSeconds(randomMs), new TEvWakeup(RELOAD_INFLY_TAG + shard)); -} - + const ui32 randomMs = 100 + RandomNumber<ui32>(300); + LOG_SQS_INFO("Scheduling retry after infly " << TLogQueueName(UserName_, QueueName_, shard) << " load failure in " << randomMs << "ms"); + Schedule(TDuration::MilliSeconds(randomMs), new TEvWakeup(RELOAD_INFLY_TAG + shard)); +} + void TQueueLeader::HandleInflyIsPurgingNotification(TSqsEvents::TEvInflyIsPurgingNotification::TPtr& ev) { - LOG_SQS_TRACE("Handle infly purged notification for " << TLogQueueName(UserName_, QueueName_, ev->Get()->Shard) << ". Messages: " << ev->Get()->Offsets.size()); - if (!IsFifoQueue_) { - auto& shardInfo = Shards_[ev->Get()->Shard]; - if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Loaded) { - LOG_SQS_TRACE("Skipping infly " << TLogQueueName(UserName_, QueueName_, ev->Get()->Shard) << " purged notification. Infly load state: " << static_cast<int>(shardInfo.InflyLoadState)); - return; - } - for (ui64 offset : ev->Get()->Offsets) { - if (!shardInfo.Infly->Delete(offset)) { - // maybe there are several receive message requests that are about to get this message - for (auto& [receiveRequestId, receiveRequestInfo] : ReceiveMessageRequests_) { - if (receiveRequestInfo.CurrentShardIndex < receiveRequestInfo.Shards.size() - && receiveRequestInfo.Shards[receiveRequestInfo.CurrentShardIndex] == ev->Get()->Shard - && receiveRequestInfo.ReceiveCandidates - && receiveRequestInfo.ReceiveCandidates.Delete(offset)) { - break; - } - } - } - } - } -} - + LOG_SQS_TRACE("Handle infly purged notification for " << TLogQueueName(UserName_, QueueName_, ev->Get()->Shard) << ". Messages: " << ev->Get()->Offsets.size()); + if (!IsFifoQueue_) { + auto& shardInfo = Shards_[ev->Get()->Shard]; + if (shardInfo.InflyLoadState != TShardInfo::EInflyLoadState::Loaded) { + LOG_SQS_TRACE("Skipping infly " << TLogQueueName(UserName_, QueueName_, ev->Get()->Shard) << " purged notification. Infly load state: " << static_cast<int>(shardInfo.InflyLoadState)); + return; + } + for (ui64 offset : ev->Get()->Offsets) { + if (!shardInfo.Infly->Delete(offset)) { + // maybe there are several receive message requests that are about to get this message + for (auto& [receiveRequestId, receiveRequestInfo] : ReceiveMessageRequests_) { + if (receiveRequestInfo.CurrentShardIndex < receiveRequestInfo.Shards.size() + && receiveRequestInfo.Shards[receiveRequestInfo.CurrentShardIndex] == ev->Get()->Shard + && receiveRequestInfo.ReceiveCandidates + && receiveRequestInfo.ReceiveCandidates.Delete(offset)) { + break; + } + } + } + } + } +} + void TQueueLeader::HandleQueuePurgedNotification(TSqsEvents::TEvQueuePurgedNotification::TPtr& ev) { - auto& shardInfo = Shards_[ev->Get()->Shard]; - shardInfo.MessagesCount = ev->Get()->NewMessagesCount; -} - + auto& shardInfo = Shards_[ev->Get()->Shard]; + shardInfo.MessagesCount = ev->Get()->NewMessagesCount; +} + void TQueueLeader::HandleGetRuntimeQueueAttributesWhileIniting(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr& ev) { - auto&& [reqInfoIt, inserted] = GetRuntimeQueueAttributesRequests_.emplace(ev->Get()->RequestId, std::move(ev)); - Y_VERIFY(inserted); -} - + auto&& [reqInfoIt, inserted] = GetRuntimeQueueAttributesRequests_.emplace(ev->Get()->RequestId, std::move(ev)); + Y_VERIFY(inserted); +} + void TQueueLeader::HandleGetRuntimeQueueAttributesWhileWorking(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr& ev) { - auto&& [reqInfoIt, inserted] = GetRuntimeQueueAttributesRequests_.emplace(ev->Get()->RequestId, std::move(ev)); - Y_VERIFY(inserted); - ProcessGetRuntimeQueueAttributes(reqInfoIt->second); -} - + auto&& [reqInfoIt, inserted] = GetRuntimeQueueAttributesRequests_.emplace(ev->Get()->RequestId, std::move(ev)); + Y_VERIFY(inserted); + ProcessGetRuntimeQueueAttributes(reqInfoIt->second); +} + void TQueueLeader::HandleDeadLetterQueueNotification(TSqsEvents::TEvDeadLetterQueueNotification::TPtr&) { - LatestDlqNotificationTs_ = TActivationContext::Now(); + LatestDlqNotificationTs_ = TActivationContext::Now(); - if (!IsFifoQueue_ && !IsDlqQueue_) { + if (!IsFifoQueue_ && !IsDlqQueue_) { // we need to start the process only once - IsDlqQueue_ = true; - LOG_SQS_INFO("Started periodic message counting for queue " << TLogQueueName(UserName_, QueueName_) - << ". Latest dlq notification was at " << LatestDlqNotificationTs_); + IsDlqQueue_ = true; + LOG_SQS_INFO("Started periodic message counting for queue " << TLogQueueName(UserName_, QueueName_) + << ". Latest dlq notification was at " << LatestDlqNotificationTs_); - StartGatheringMetrics(); + StartGatheringMetrics(); } } void TQueueLeader::ProcessGetRuntimeQueueAttributes(TGetRuntimeQueueAttributesRequestProcessing& reqInfo) { - if (reqInfo.ShardProcessFlags.empty()) { - Y_VERIFY(ShardsCount_ > 0); - reqInfo.ShardProcessFlags.resize(ShardsCount_); - } - - for (ui64 shard = 0; shard < ShardsCount_; ++shard) { - ProcessGetRuntimeQueueAttributes(shard, reqInfo); - } -} - + if (reqInfo.ShardProcessFlags.empty()) { + Y_VERIFY(ShardsCount_ > 0); + reqInfo.ShardProcessFlags.resize(ShardsCount_); + } + + for (ui64 shard = 0; shard < ShardsCount_; ++shard) { + ProcessGetRuntimeQueueAttributes(shard, reqInfo); + } +} + void TQueueLeader::ProcessGetRuntimeQueueAttributes(ui64 shard, TGetRuntimeQueueAttributesRequestProcessing& reqInfo) { - Y_VERIFY(shard < reqInfo.ShardProcessFlags.size()); - if (reqInfo.ShardProcessFlags[shard]) { - return; - } - - if (IsFifoQueue_) { - if (Shards_[shard].MessagesCountWasGot) { - reqInfo.Answer->MessagesCount += Shards_[shard].MessagesCount; - reqInfo.Answer->CreatedTimestamp = Min(Shards_[shard].CreatedTimestamp, reqInfo.Answer->CreatedTimestamp); - - ++reqInfo.ShardsProcessed; - reqInfo.ShardProcessFlags[shard] = true; - } else { - RequestMessagesCountMetrics(shard); - } - } else { - if (Shards_[shard].InflyLoadState == TShardInfo::EInflyLoadState::Loaded) { - const TInstant now = TActivationContext::Now(); - reqInfo.Answer->MessagesCount += Shards_[shard].MessagesCount; - reqInfo.Answer->InflyMessagesCount += Shards_[shard].Infly->GetInflyCount(now); - reqInfo.Answer->CreatedTimestamp = Min(Shards_[shard].CreatedTimestamp, reqInfo.Answer->CreatedTimestamp); - - ++reqInfo.ShardsProcessed; - reqInfo.ShardProcessFlags[shard] = true; - } - } - - if (reqInfo.ShardsProcessed == reqInfo.ShardProcessFlags.size()) { - reqInfo.Answer->MessagesDelayed = DelayStatistics_.UpdateAndGetMessagesDelayed(TActivationContext::Now()); - Send(reqInfo.Event->Sender, std::move(reqInfo.Answer)); - GetRuntimeQueueAttributesRequests_.erase(reqInfo.Event->Get()->RequestId); - } -} - + Y_VERIFY(shard < reqInfo.ShardProcessFlags.size()); + if (reqInfo.ShardProcessFlags[shard]) { + return; + } + + if (IsFifoQueue_) { + if (Shards_[shard].MessagesCountWasGot) { + reqInfo.Answer->MessagesCount += Shards_[shard].MessagesCount; + reqInfo.Answer->CreatedTimestamp = Min(Shards_[shard].CreatedTimestamp, reqInfo.Answer->CreatedTimestamp); + + ++reqInfo.ShardsProcessed; + reqInfo.ShardProcessFlags[shard] = true; + } else { + RequestMessagesCountMetrics(shard); + } + } else { + if (Shards_[shard].InflyLoadState == TShardInfo::EInflyLoadState::Loaded) { + const TInstant now = TActivationContext::Now(); + reqInfo.Answer->MessagesCount += Shards_[shard].MessagesCount; + reqInfo.Answer->InflyMessagesCount += Shards_[shard].Infly->GetInflyCount(now); + reqInfo.Answer->CreatedTimestamp = Min(Shards_[shard].CreatedTimestamp, reqInfo.Answer->CreatedTimestamp); + + ++reqInfo.ShardsProcessed; + reqInfo.ShardProcessFlags[shard] = true; + } + } + + if (reqInfo.ShardsProcessed == reqInfo.ShardProcessFlags.size()) { + reqInfo.Answer->MessagesDelayed = DelayStatistics_.UpdateAndGetMessagesDelayed(TActivationContext::Now()); + Send(reqInfo.Event->Sender, std::move(reqInfo.Answer)); + GetRuntimeQueueAttributesRequests_.erase(reqInfo.Event->Get()->RequestId); + } +} + void TQueueLeader::FailGetRuntimeQueueAttributesForShard(ui64 shard) { - std::vector<TString> reqIds; - reqIds.reserve(GetRuntimeQueueAttributesRequests_.size()); - for (auto& [reqId, reqInfo] : GetRuntimeQueueAttributesRequests_) { - Y_VERIFY(shard < reqInfo.ShardProcessFlags.size()); - if (!reqInfo.ShardProcessFlags[shard]) { // don't fail requests that are already passed this shard - const TString& requestId = reqId; - RLOG_SQS_REQ_ERROR(requestId, "Failed to get runtime queue attributes for shard " << shard); - reqInfo.Answer->Failed = true; - Send(reqInfo.Event->Sender, std::move(reqInfo.Answer)); - reqIds.push_back(reqId); - } - } - for (const TString& reqId : reqIds) { - GetRuntimeQueueAttributesRequests_.erase(reqId); - } -} - + std::vector<TString> reqIds; + reqIds.reserve(GetRuntimeQueueAttributesRequests_.size()); + for (auto& [reqId, reqInfo] : GetRuntimeQueueAttributesRequests_) { + Y_VERIFY(shard < reqInfo.ShardProcessFlags.size()); + if (!reqInfo.ShardProcessFlags[shard]) { // don't fail requests that are already passed this shard + const TString& requestId = reqId; + RLOG_SQS_REQ_ERROR(requestId, "Failed to get runtime queue attributes for shard " << shard); + reqInfo.Answer->Failed = true; + Send(reqInfo.Event->Sender, std::move(reqInfo.Answer)); + reqIds.push_back(reqId); + } + } + for (const TString& reqId : reqIds) { + GetRuntimeQueueAttributesRequests_.erase(reqId); + } +} + void TQueueLeader::ProcessGetRuntimeQueueAttributes(ui64 shard) { - std::vector<TGetRuntimeQueueAttributesRequestProcessing*> requestsToProcess; - requestsToProcess.reserve(GetRuntimeQueueAttributesRequests_.size()); - for (auto& [reqId, reqInfo] : GetRuntimeQueueAttributesRequests_) { - requestsToProcess.push_back(&reqInfo); - } - for (auto* reqInfo : requestsToProcess) { - ProcessGetRuntimeQueueAttributes(shard, *reqInfo); - } -} - + std::vector<TGetRuntimeQueueAttributesRequestProcessing*> requestsToProcess; + requestsToProcess.reserve(GetRuntimeQueueAttributesRequests_.size()); + for (auto& [reqId, reqInfo] : GetRuntimeQueueAttributesRequests_) { + requestsToProcess.push_back(&reqInfo); + } + for (auto* reqInfo : requestsToProcess) { + ProcessGetRuntimeQueueAttributes(shard, *reqInfo); + } +} + void TQueueLeader::InitQuoterResources() { - const auto& cfg = Cfg().GetQuotingConfig(); - if (cfg.GetEnableQuoting()) { - Y_VERIFY(cfg.HasLocalRateLimiterConfig() != cfg.HasKesusQuoterConfig()); // exactly one must be set - if (cfg.HasLocalRateLimiterConfig()) { // the only one that is fully supported - Y_VERIFY(QuoterResources_); - const auto& rates = cfg.GetLocalRateLimiterConfig().GetRates(); - // allocate resources - SendMessageQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoSendMessageRate() : rates.GetStdSendMessageRate()); - ReceiveMessageQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoReceiveMessageRate() : rates.GetStdReceiveMessageRate()); - DeleteMessageQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoDeleteMessageRate() : rates.GetStdDeleteMessageRate()); - ChangeMessageVisibilityQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoChangeMessageVisibilityRate() : rates.GetStdChangeMessageVisibilityRate()); - // fill map - { - TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, SendMessageQuoterResource_}; - QuoterResources_->ActionsResources.emplace(EAction::SendMessage, res); - QuoterResources_->ActionsResources.emplace(EAction::SendMessageBatch, res); - } - { - TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, ReceiveMessageQuoterResource_}; - QuoterResources_->ActionsResources.emplace(EAction::ReceiveMessage, res); - } - { - TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, DeleteMessageQuoterResource_}; - QuoterResources_->ActionsResources.emplace(EAction::DeleteMessage, res); - QuoterResources_->ActionsResources.emplace(EAction::DeleteMessageBatch, res); - } - { - TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, ChangeMessageVisibilityQuoterResource_}; - QuoterResources_->ActionsResources.emplace(EAction::ChangeMessageVisibility, res); - QuoterResources_->ActionsResources.emplace(EAction::ChangeMessageVisibilityBatch, res); - } - } - } -} - + const auto& cfg = Cfg().GetQuotingConfig(); + if (cfg.GetEnableQuoting()) { + Y_VERIFY(cfg.HasLocalRateLimiterConfig() != cfg.HasKesusQuoterConfig()); // exactly one must be set + if (cfg.HasLocalRateLimiterConfig()) { // the only one that is fully supported + Y_VERIFY(QuoterResources_); + const auto& rates = cfg.GetLocalRateLimiterConfig().GetRates(); + // allocate resources + SendMessageQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoSendMessageRate() : rates.GetStdSendMessageRate()); + ReceiveMessageQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoReceiveMessageRate() : rates.GetStdReceiveMessageRate()); + DeleteMessageQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoDeleteMessageRate() : rates.GetStdDeleteMessageRate()); + ChangeMessageVisibilityQuoterResource_ = TLocalRateLimiterResource(IsFifoQueue_ ? rates.GetFifoChangeMessageVisibilityRate() : rates.GetStdChangeMessageVisibilityRate()); + // fill map + { + TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, SendMessageQuoterResource_}; + QuoterResources_->ActionsResources.emplace(EAction::SendMessage, res); + QuoterResources_->ActionsResources.emplace(EAction::SendMessageBatch, res); + } + { + TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, ReceiveMessageQuoterResource_}; + QuoterResources_->ActionsResources.emplace(EAction::ReceiveMessage, res); + } + { + TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, DeleteMessageQuoterResource_}; + QuoterResources_->ActionsResources.emplace(EAction::DeleteMessage, res); + QuoterResources_->ActionsResources.emplace(EAction::DeleteMessageBatch, res); + } + { + TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, ChangeMessageVisibilityQuoterResource_}; + QuoterResources_->ActionsResources.emplace(EAction::ChangeMessageVisibility, res); + QuoterResources_->ActionsResources.emplace(EAction::ChangeMessageVisibilityBatch, res); + } + } + } +} + TQueueLeader::TShardInfo::~TShardInfo() = default; - + TQueueLeader::TSendMessageBatchRequestProcessing::TSendMessageBatchRequestProcessing(TSqsEvents::TEvSendMessageBatch::TPtr&& ev) - : Event(std::move(ev)) -{ - Statuses.resize(Event->Get()->Messages.size()); -} - + : Event(std::move(ev)) +{ + Statuses.resize(Event->Get()->Messages.size()); +} + void TQueueLeader::TSendMessageBatchRequestProcessing::Init(ui64 shardsCount) { - if (!Inited) { - Shard = RandomNumber<ui64>() % shardsCount; - Inited = true; - } -} - + if (!Inited) { + Shard = RandomNumber<ui64>() % shardsCount; + Inited = true; + } +} + TQueueLeader::TReceiveMessageBatchRequestProcessing::TReceiveMessageBatchRequestProcessing(TSqsEvents::TEvReceiveMessageBatch::TPtr&& ev) - : Event(std::move(ev)) - , Answer(MakeHolder<TSqsEvents::TEvReceiveMessageBatchResponse>()) -{ - Answer->Messages.reserve(Event->Get()->MaxMessagesCount); -} - + : Event(std::move(ev)) + , Answer(MakeHolder<TSqsEvents::TEvReceiveMessageBatchResponse>()) +{ + Answer->Messages.reserve(Event->Get()->MaxMessagesCount); +} + void TQueueLeader::TReceiveMessageBatchRequestProcessing::Init(ui64 shardsCount) { - if (!Inited) { - Shards.resize(shardsCount); - for (ui64 i = 0; i < shardsCount; ++i) { - Shards[i] = i; - } - - Shuffle(Shards.begin(), Shards.end()); - - Inited = true; - } -} - + if (!Inited) { + Shards.resize(shardsCount); + for (ui64 i = 0; i < shardsCount; ++i) { + Shards[i] = i; + } + + Shuffle(Shards.begin(), Shards.end()); + + Inited = true; + } +} + TQueueLeader::TDeleteMessageBatchRequestProcessing::TDeleteMessageBatchRequestProcessing(TSqsEvents::TEvDeleteMessageBatch::TPtr&& ev) - : Event(std::move(ev)) - , Answer(MakeHolder<TSqsEvents::TEvDeleteMessageBatchResponse>()) -{ - Answer->Shard = Event->Get()->Shard; - Answer->Statuses.resize(Event->Get()->Messages.size()); - InflyMessages.reserve(Event->Get()->Messages.size()); -} - + : Event(std::move(ev)) + , Answer(MakeHolder<TSqsEvents::TEvDeleteMessageBatchResponse>()) +{ + Answer->Shard = Event->Get()->Shard; + Answer->Statuses.resize(Event->Get()->Messages.size()); + InflyMessages.reserve(Event->Get()->Messages.size()); +} + TQueueLeader::TChangeMessageVisibilityBatchRequestProcessing::TChangeMessageVisibilityBatchRequestProcessing(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr&& ev) - : Event(std::move(ev)) - , Answer(MakeHolder<TSqsEvents::TEvChangeMessageVisibilityBatchResponse>()) -{ - Answer->Statuses.resize(Event->Get()->Messages.size()); - Answer->Shard = Event->Get()->Shard; -} - + : Event(std::move(ev)) + , Answer(MakeHolder<TSqsEvents::TEvChangeMessageVisibilityBatchResponse>()) +{ + Answer->Statuses.resize(Event->Get()->Messages.size()); + Answer->Shard = Event->Get()->Shard; +} + TQueueLeader::TGetRuntimeQueueAttributesRequestProcessing::TGetRuntimeQueueAttributesRequestProcessing(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr&& ev) - : Event(std::move(ev)) - , Answer(MakeHolder<TSqsEvents::TEvGetRuntimeQueueAttributesResponse>()) -{ - Answer->CreatedTimestamp = TInstant::Max(); // for proper min operation -} - -template <class TBatch> + : Event(std::move(ev)) + , Answer(MakeHolder<TSqsEvents::TEvGetRuntimeQueueAttributesResponse>()) +{ + Answer->CreatedTimestamp = TInstant::Max(); // for proper min operation +} + +template <class TBatch> TQueueLeader::TBatchingState<TBatch>::~TBatchingState() = default; - -template <class TBatch> + +template <class TBatch> void TQueueLeader::TBatchingState<TBatch>::Init(const NKikimrConfig::TSqsConfig::TBatchingPolicy& policy, ui64 shard, bool isFifo) { - Policy = policy; - Shard = shard; - IsFifoQueue = isFifo; -} - -template <class TBatch> + Policy = policy; + Shard = shard; + IsFifoQueue = isFifo; +} + +template <class TBatch> void TQueueLeader::TBatchingState<TBatch>::TryExecute(TQueueLeader* leader) { - while (BatchesExecuting.size() < Policy.GetTransactionsMaxInflyPerShard() && !BatchesIniting.empty()) { - auto& batchPtr = BatchesIniting.front(); - if (!BatchesExecuting.empty() && !CanExecute(*batchPtr)) { - break; - } - - BatchesExecuting[batchPtr->BatchId] = batchPtr; + while (BatchesExecuting.size() < Policy.GetTransactionsMaxInflyPerShard() && !BatchesIniting.empty()) { + auto& batchPtr = BatchesIniting.front(); + if (!BatchesExecuting.empty() && !CanExecute(*batchPtr)) { + break; + } + + BatchesExecuting[batchPtr->BatchId] = batchPtr; batchPtr->Execute(leader); - BatchesIniting.pop_front(); - } -} - -template <class TBatch> + BatchesIniting.pop_front(); + } +} + +template <class TBatch> TBatch& TQueueLeader::TBatchingState<TBatch>::NewBatch() { - auto newBatch = MakeIntrusive<TBatch>(Shard, Policy.GetBatchSize(), IsFifoQueue); - newBatch->BatchId = NextBatchId++; - BatchesIniting.push_back(newBatch); - return *newBatch; -} - -template <class TBatch> + auto newBatch = MakeIntrusive<TBatch>(Shard, Policy.GetBatchSize(), IsFifoQueue); + newBatch->BatchId = NextBatchId++; + BatchesIniting.push_back(newBatch); + return *newBatch; +} + +template <class TBatch> void TQueueLeader::TBatchingState<TBatch>::CancelRequestsAfterInflyLoadFailure() { - Y_VERIFY(BatchesExecuting.empty()); - BatchesIniting.clear(); -} - -template <class TBatch> -template <class TRequestProcessing> + Y_VERIFY(BatchesExecuting.empty()); + BatchesIniting.clear(); +} + +template <class TBatch> +template <class TRequestProcessing> void TQueueLeader::TBatchingStateWithGroupsRestrictions<TBatch>::AddRequest(TRequestProcessing& reqInfo) { - const auto& msgs = reqInfo.Event->Get()->Messages; - if (this->IsFifoQueue) { - for (size_t i = 0; i < msgs.size(); ++i) { - const TString& groupId = msgs[i].MessageGroupId; - bool added = false; - for (const auto& batch : this->BatchesIniting) { - if (!batch->IsFull() && !batch->HasGroup(groupId)) { - batch->AddEntry(reqInfo, i); - added = true; - break; - } - } - if (!added) { - this->NewBatch().AddEntry(reqInfo, i); - } - } - } else { - size_t i = 0; - while (i < msgs.size()) { - if (this->BatchesIniting.empty() || this->BatchesIniting.back()->IsFull()) { - this->NewBatch(); - } - auto& batch = *this->BatchesIniting.back(); - do { - batch.AddEntry(reqInfo, i); - ++i; - } while (i < msgs.size() && !batch.IsFull()); - } - } -} - -template <class TBatch> + const auto& msgs = reqInfo.Event->Get()->Messages; + if (this->IsFifoQueue) { + for (size_t i = 0; i < msgs.size(); ++i) { + const TString& groupId = msgs[i].MessageGroupId; + bool added = false; + for (const auto& batch : this->BatchesIniting) { + if (!batch->IsFull() && !batch->HasGroup(groupId)) { + batch->AddEntry(reqInfo, i); + added = true; + break; + } + } + if (!added) { + this->NewBatch().AddEntry(reqInfo, i); + } + } + } else { + size_t i = 0; + while (i < msgs.size()) { + if (this->BatchesIniting.empty() || this->BatchesIniting.back()->IsFull()) { + this->NewBatch(); + } + auto& batch = *this->BatchesIniting.back(); + do { + batch.AddEntry(reqInfo, i); + ++i; + } while (i < msgs.size() && !batch.IsFull()); + } + } +} + +template <class TBatch> bool TQueueLeader::TBatchingStateWithGroupsRestrictions<TBatch>::CanExecute(const TBatch& batch) const { - using TBatchingState = TBatchingState<TBatch>; - if (this->IsFifoQueue) { - // find whether groups from batch are already executing - for (const auto& [id, executingBatch] : this->BatchesExecuting) { - auto executingIt = executingBatch->Groups.begin(); - auto it = batch.Groups.begin(); - while (executingIt != executingBatch->Groups.end() && it != batch.Groups.end()) { - if (*executingIt == *it) { - return false; // there is already executing request with such group - } else if (*executingIt < *it) { - ++executingIt; - } else { - ++it; - } - } - } - return true; - } else { - return TBatchingState::CanExecute(batch); - } -} - + using TBatchingState = TBatchingState<TBatch>; + if (this->IsFifoQueue) { + // find whether groups from batch are already executing + for (const auto& [id, executingBatch] : this->BatchesExecuting) { + auto executingIt = executingBatch->Groups.begin(); + auto it = batch.Groups.begin(); + while (executingIt != executingBatch->Groups.end() && it != batch.Groups.end()) { + if (*executingIt == *it) { + return false; // there is already executing request with such group + } else if (*executingIt < *it) { + ++executingIt; + } else { + ++it; + } + } + } + return true; + } else { + return TBatchingState::CanExecute(batch); + } +} + void TQueueLeader::TSendBatch::AddEntry(TSendMessageBatchRequestProcessing& reqInfo, size_t i) { - RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Add message[" << i << "] to send batch. BatchId: " << BatchId); - Entries.emplace_back(reqInfo.Event->Get()->RequestId, reqInfo.Event->Get()->SenderId, reqInfo.Event->Get()->Messages[i], i); - if (IsFifoQueue) { - AddGroup(reqInfo.Event->Get()->Messages[i].MessageGroupId); - } -} - + RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Add message[" << i << "] to send batch. BatchId: " << BatchId); + Entries.emplace_back(reqInfo.Event->Get()->RequestId, reqInfo.Event->Get()->SenderId, reqInfo.Event->Get()->Messages[i], i); + if (IsFifoQueue) { + AddGroup(reqInfo.Event->Get()->Messages[i].MessageGroupId); + } +} + void TQueueLeader::TSendBatch::Execute(TQueueLeader* leader) { RLOG_SQS_DEBUG(TLogQueueName(leader->UserName_, leader->QueueName_, Shard) << " Executing send batch. BatchId: " << BatchId << ". Size: " << Size()); - TransactionStartedTime = TActivationContext::Now(); - TExecutorBuilder builder(SelfId(), RequestId_); - builder + TransactionStartedTime = TActivationContext::Now(); + TExecutorBuilder builder(SelfId(), RequestId_); + builder .User(leader->UserName_) .Queue(leader->QueueName_) - .Shard(Shard) + .Shard(Shard) .QueueVersion(leader->QueueVersion_) .QueueLeader(SelfId()) - .QueryId(WRITE_MESSAGE_ID) - .Fifo(IsFifoQueue) + .QueryId(WRITE_MESSAGE_ID) + .Fifo(IsFifoQueue) .Counters(leader->Counters_) - .RetryOnTimeout(IsFifoQueue) // Fifo queues have deduplication, so we can retry even on unknown transaction state + .RetryOnTimeout(IsFifoQueue) // Fifo queues have deduplication, so we can retry even on unknown transaction state .OnExecuted([leader, shard = Shard, batchId = BatchId](const TSqsEvents::TEvExecuted::TRecord& ev) { leader->OnSendBatchExecuted(shard, batchId, ev); }) - .Params() - .Uint64("RANDOM_ID", RandomNumber<ui64>()) - .Uint64("TIMESTAMP", TransactionStartedTime.MilliSeconds()) - .Uint64("SHARD", Shard) - .Uint64("DEDUPLICATION_PERIOD", Cfg().GetDeduplicationPeriodMs()); - - NClient::TWriteValue params = builder.ParamsValue(); - const TString* prevRequestId = nullptr; - for (ui64 i = 0; i < Entries.size(); ++i) { - const TSendBatchEntry& entry = Entries[i]; - const TSqsEvents::TEvSendMessageBatch::TMessageEntry& msgEntry = entry.Message; - auto message = params["MESSAGES"].AddListItem(); - message["Attributes"].Bytes(msgEntry.Attributes); - message["Data"].Bytes(msgEntry.Body); - message["MessageId"].Bytes(msgEntry.MessageId); - message["SenderId"].Bytes(entry.SenderId); - message["Delay"] = ui64(msgEntry.Delay.MilliSeconds()); - message["Index"] = i; - if (IsFifoQueue) { - message["GroupId"].Bytes(msgEntry.MessageGroupId); - message["DeduplicationId"].Bytes(msgEntry.DeduplicationId); - } - if (!prevRequestId || *prevRequestId != entry.RequestId) { - prevRequestId = &entry.RequestId; - RLOG_SQS_REQ_DEBUG(entry.RequestId, "Send batch transaction to database. BatchId: " << BatchId); - } - } - - builder.Start(); -} - + .Params() + .Uint64("RANDOM_ID", RandomNumber<ui64>()) + .Uint64("TIMESTAMP", TransactionStartedTime.MilliSeconds()) + .Uint64("SHARD", Shard) + .Uint64("DEDUPLICATION_PERIOD", Cfg().GetDeduplicationPeriodMs()); + + NClient::TWriteValue params = builder.ParamsValue(); + const TString* prevRequestId = nullptr; + for (ui64 i = 0; i < Entries.size(); ++i) { + const TSendBatchEntry& entry = Entries[i]; + const TSqsEvents::TEvSendMessageBatch::TMessageEntry& msgEntry = entry.Message; + auto message = params["MESSAGES"].AddListItem(); + message["Attributes"].Bytes(msgEntry.Attributes); + message["Data"].Bytes(msgEntry.Body); + message["MessageId"].Bytes(msgEntry.MessageId); + message["SenderId"].Bytes(entry.SenderId); + message["Delay"] = ui64(msgEntry.Delay.MilliSeconds()); + message["Index"] = i; + if (IsFifoQueue) { + message["GroupId"].Bytes(msgEntry.MessageGroupId); + message["DeduplicationId"].Bytes(msgEntry.DeduplicationId); + } + if (!prevRequestId || *prevRequestId != entry.RequestId) { + prevRequestId = &entry.RequestId; + RLOG_SQS_REQ_DEBUG(entry.RequestId, "Send batch transaction to database. BatchId: " << BatchId); + } + } + + builder.Start(); +} + void TQueueLeader::TDeleteBatch::AddEntry(TDeleteMessageBatchRequestProcessing& reqInfo, size_t i) { - RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Add message[" << i << "] to delete batch. BatchId: " << BatchId); - Entries.emplace_back(reqInfo.Event->Get()->RequestId, reqInfo.Event->Get()->Messages[i], i); - if (IsFifoQueue) { - AddGroup(reqInfo.Event->Get()->Messages[i].MessageGroupId); - } -} - + RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Add message[" << i << "] to delete batch. BatchId: " << BatchId); + Entries.emplace_back(reqInfo.Event->Get()->RequestId, reqInfo.Event->Get()->Messages[i], i); + if (IsFifoQueue) { + AddGroup(reqInfo.Event->Get()->Messages[i].MessageGroupId); + } +} + void TQueueLeader::TDeleteBatch::Execute(TQueueLeader* leader) { RLOG_SQS_DEBUG(TLogQueueName(leader->UserName_, leader->QueueName_, Shard) << " Executing delete batch. BatchId: " << BatchId << ". Size: " << Size()); - TExecutorBuilder builder(SelfId(), RequestId_); - builder + TExecutorBuilder builder(SelfId(), RequestId_); + builder .User(leader->UserName_) .Queue(leader->QueueName_) - .Shard(Shard) + .Shard(Shard) .QueueVersion(leader->QueueVersion_) .QueueLeader(SelfId()) - .Fifo(IsFifoQueue) - .QueryId(DELETE_MESSAGE_ID) + .Fifo(IsFifoQueue) + .QueryId(DELETE_MESSAGE_ID) .Counters(leader->Counters_) - .RetryOnTimeout() + .RetryOnTimeout() .OnExecuted([leader, shard = Shard, batchId = BatchId](const TSqsEvents::TEvExecuted::TRecord& ev) { leader->OnDeleteBatchExecuted(shard, batchId, ev); }) - .Params() - .Uint64("NOW", TActivationContext::Now().MilliSeconds()) - .Uint64("SHARD", Shard) - .Uint64("GROUPS_READ_ATTEMPT_IDS_PERIOD", Cfg().GetGroupsReadAttemptIdsPeriodMs()); - - NClient::TWriteValue params = builder.ParamsValue(); - const TString* prevRequestId = nullptr; - - Offset2Entry.reserve(Entries.size()); - for (ui64 i = 0; i < Entries.size(); ++i) { - const TDeleteBatchEntry& entry = Entries[i]; - const TSqsEvents::TEvDeleteMessageBatch::TMessageEntry& msgEntry = entry.Message; - const bool hasOffset = Offset2Entry.find(msgEntry.Offset) != Offset2Entry.end(); - Offset2Entry.emplace(msgEntry.Offset, i); - if (!hasOffset) { - auto key = params["KEYS"].AddListItem(); - if (IsFifoQueue) { - key["GroupId"].Bytes(msgEntry.MessageGroupId); - key["ReceiveAttemptId"] = msgEntry.ReceiveAttemptId; - } - key["Offset"] = ui64(msgEntry.Offset); - key["LockTimestamp"] = ui64(msgEntry.LockTimestamp.MilliSeconds()); - } - if (!prevRequestId || *prevRequestId != entry.RequestId) { - prevRequestId = &entry.RequestId; - RLOG_SQS_REQ_DEBUG(entry.RequestId, "Send batch transaction to database. BatchId: " << BatchId); - } - } - - builder.Start(); -} - + .Params() + .Uint64("NOW", TActivationContext::Now().MilliSeconds()) + .Uint64("SHARD", Shard) + .Uint64("GROUPS_READ_ATTEMPT_IDS_PERIOD", Cfg().GetGroupsReadAttemptIdsPeriodMs()); + + NClient::TWriteValue params = builder.ParamsValue(); + const TString* prevRequestId = nullptr; + + Offset2Entry.reserve(Entries.size()); + for (ui64 i = 0; i < Entries.size(); ++i) { + const TDeleteBatchEntry& entry = Entries[i]; + const TSqsEvents::TEvDeleteMessageBatch::TMessageEntry& msgEntry = entry.Message; + const bool hasOffset = Offset2Entry.find(msgEntry.Offset) != Offset2Entry.end(); + Offset2Entry.emplace(msgEntry.Offset, i); + if (!hasOffset) { + auto key = params["KEYS"].AddListItem(); + if (IsFifoQueue) { + key["GroupId"].Bytes(msgEntry.MessageGroupId); + key["ReceiveAttemptId"] = msgEntry.ReceiveAttemptId; + } + key["Offset"] = ui64(msgEntry.Offset); + key["LockTimestamp"] = ui64(msgEntry.LockTimestamp.MilliSeconds()); + } + if (!prevRequestId || *prevRequestId != entry.RequestId) { + prevRequestId = &entry.RequestId; + RLOG_SQS_REQ_DEBUG(entry.RequestId, "Send batch transaction to database. BatchId: " << BatchId); + } + } + + builder.Start(); +} + void TQueueLeader::TLoadBatchingState::AddRequest(TReceiveMessageBatchRequestProcessing& reqInfo) { - auto msg = reqInfo.ReceiveCandidates.Begin(); - const auto end = reqInfo.ReceiveCandidates.End(); - while (msg != end) { - if (BatchesIniting.empty() || BatchesIniting.back()->IsFull()) { - NewBatch(); - } - auto& batch = *BatchesIniting.back(); - RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Add load batch request. BatchId: " << batch.BatchId); - do { - batch.Entries.emplace_back(reqInfo.Event->Get()->RequestId, &msg->Message(), reqInfo.Event->Get()->VisibilityTimeout); - ++msg; - } while (msg != end && !batch.IsFull()); - } -} - + auto msg = reqInfo.ReceiveCandidates.Begin(); + const auto end = reqInfo.ReceiveCandidates.End(); + while (msg != end) { + if (BatchesIniting.empty() || BatchesIniting.back()->IsFull()) { + NewBatch(); + } + auto& batch = *BatchesIniting.back(); + RLOG_SQS_REQ_DEBUG(reqInfo.Event->Get()->RequestId, "Add load batch request. BatchId: " << batch.BatchId); + do { + batch.Entries.emplace_back(reqInfo.Event->Get()->RequestId, &msg->Message(), reqInfo.Event->Get()->VisibilityTimeout); + ++msg; + } while (msg != end && !batch.IsFull()); + } +} + void TQueueLeader::TLoadBatch::Execute(TQueueLeader* leader) { RLOG_SQS_DEBUG(TLogQueueName(leader->UserName_, leader->QueueName_, Shard) << " Executing load batch. BatchId: " << BatchId << ". Size: " << Size()); - TExecutorBuilder builder(SelfId(), RequestId_); - const auto now = TActivationContext::Now(); - builder + TExecutorBuilder builder(SelfId(), RequestId_); + const auto now = TActivationContext::Now(); + builder .User(leader->UserName_) .Queue(leader->QueueName_) - .Shard(Shard) + .Shard(Shard) .QueueVersion(leader->QueueVersion_) - .Fifo(IsFifoQueue) + .Fifo(IsFifoQueue) .QueueLeader(SelfId()) .Counters(leader->Counters_) - .RetryOnTimeout() - .Params() - .Uint64("NOW", now.MilliSeconds()) - .Uint64("READ_ID", RandomNumber<ui64>()) - .Uint64("SHARD", Shard); - + .RetryOnTimeout() + .Params() + .Uint64("NOW", now.MilliSeconds()) + .Uint64("READ_ID", RandomNumber<ui64>()) + .Uint64("SHARD", Shard); + ui32 maxReceiveCount = 0; // not set if (Cfg().GetEnableDeadLetterQueues() && leader->DlqInfo_) { const auto& dlqInfo(*leader->DlqInfo_); @@ -2547,18 +2547,18 @@ void TQueueLeader::TLoadBatch::Execute(TQueueLeader* leader) { } } - NClient::TWriteValue params = builder.ParamsValue(); - const TString* prevRequestId = nullptr; + NClient::TWriteValue params = builder.ParamsValue(); + const TString* prevRequestId = nullptr; size_t deadLettersCounter = 0; THashSet<ui64> offsets; // check for duplicates - for (const TLoadBatchEntry& entry : Entries) { + for (const TLoadBatchEntry& entry : Entries) { Y_VERIFY(offsets.insert(entry.Offset).second); - auto item = params["KEYS"].AddListItem(); - item["RandomId"] = entry.RandomId; - item["Offset"] = entry.Offset; - item["CurrentVisibilityDeadline"] = ui64(entry.CurrentVisibilityDeadline.MilliSeconds()); - item["VisibilityDeadline"] = ui64((now + entry.VisibilityTimeout).MilliSeconds()); + auto item = params["KEYS"].AddListItem(); + item["RandomId"] = entry.RandomId; + item["Offset"] = entry.Offset; + item["CurrentVisibilityDeadline"] = ui64(entry.CurrentVisibilityDeadline.MilliSeconds()); + item["VisibilityDeadline"] = ui64((now + entry.VisibilityTimeout).MilliSeconds()); if (maxReceiveCount && entry.ReceiveCount >= maxReceiveCount) { item["DlqIndex"] = ui64(deadLettersCounter); ++deadLettersCounter; @@ -2568,12 +2568,12 @@ void TQueueLeader::TLoadBatch::Execute(TQueueLeader* leader) { item["IsDeadLetter"] = false; } - if (!prevRequestId || *prevRequestId != entry.RequestId) { - prevRequestId = &entry.RequestId; - RLOG_SQS_REQ_DEBUG(entry.RequestId, "Send batch transaction to database. BatchId: " << BatchId); - } - } - + if (!prevRequestId || *prevRequestId != entry.RequestId) { + prevRequestId = &entry.RequestId; + RLOG_SQS_REQ_DEBUG(entry.RequestId, "Send batch transaction to database. BatchId: " << BatchId); + } + } + if (deadLettersCounter) { // perform heavy read and move transaction (DLQ) Y_VERIFY(leader->DlqInfo_); @@ -2597,41 +2597,41 @@ void TQueueLeader::TLoadBatch::Execute(TQueueLeader* leader) { const bool usedDLQ = deadLettersCounter; builder.OnExecuted([leader, shard = Shard, batchId = BatchId, usedDLQ] (const TSqsEvents::TEvExecuted::TRecord& ev) { leader->OnLoadStdMessagesBatchExecuted(shard, batchId, usedDLQ, ev); - }); - - builder.Start(); -} - + }); + + builder.Start(); +} + bool TQueueLeader::TShardInfo::HasMessagesToAddToInfly() const { - return Infly ? Infly->GetCapacity() < MessagesCount : MessagesCount > 0; -} - + return Infly ? Infly->GetCapacity() < MessagesCount : MessagesCount > 0; +} + bool TQueueLeader::TShardInfo::NeedAddMessagesToInflyCheckInDatabase() const { - const NKikimrConfig::TSqsConfig& cfg = Cfg(); - if (AddMessagesToInflyCheckAttempts < cfg.GetAddMessagesToInflyMinCheckAttempts()) { - return false; - } - const TInstant now = TActivationContext::Now(); - return now - LastAddMessagesToInfly > TDuration::MilliSeconds(cfg.GetAddMessagesToInflyCheckPeriodMs()); -} - -} // namespace NKikimr::NSQS - -template<> -void Out<NKikimr::NSQS::TSqsEvents::TQueueAttributes>(IOutputStream& o, - typename TTypeTraits<NKikimr::NSQS::TSqsEvents::TQueueAttributes>::TFuncParam x) { - o << "{ ContentBasedDeduplication: " << x.ContentBasedDeduplication; - o << " DelaySeconds: " << x.DelaySeconds; - o << " FifoQueue: " << x.FifoQueue; - o << " MaximumMessageSize: " << x.MaximumMessageSize; - o << " MessageRetentionPeriod: " << x.MessageRetentionPeriod; - o << " ReceiveMessageWaitTime: " << x.ReceiveMessageWaitTime; - o << " VisibilityTimeout: " << x.VisibilityTimeout; - o << " }"; -} - -template<> -void Out<NKikimr::NSQS::TQueuePath>(IOutputStream& o, - typename TTypeTraits<NKikimr::NSQS::TQueuePath>::TFuncParam path) { - o << path.GetQueuePath(); -} + const NKikimrConfig::TSqsConfig& cfg = Cfg(); + if (AddMessagesToInflyCheckAttempts < cfg.GetAddMessagesToInflyMinCheckAttempts()) { + return false; + } + const TInstant now = TActivationContext::Now(); + return now - LastAddMessagesToInfly > TDuration::MilliSeconds(cfg.GetAddMessagesToInflyCheckPeriodMs()); +} + +} // namespace NKikimr::NSQS + +template<> +void Out<NKikimr::NSQS::TSqsEvents::TQueueAttributes>(IOutputStream& o, + typename TTypeTraits<NKikimr::NSQS::TSqsEvents::TQueueAttributes>::TFuncParam x) { + o << "{ ContentBasedDeduplication: " << x.ContentBasedDeduplication; + o << " DelaySeconds: " << x.DelaySeconds; + o << " FifoQueue: " << x.FifoQueue; + o << " MaximumMessageSize: " << x.MaximumMessageSize; + o << " MessageRetentionPeriod: " << x.MessageRetentionPeriod; + o << " ReceiveMessageWaitTime: " << x.ReceiveMessageWaitTime; + o << " VisibilityTimeout: " << x.VisibilityTimeout; + o << " }"; +} + +template<> +void Out<NKikimr::NSQS::TQueuePath>(IOutputStream& o, + typename TTypeTraits<NKikimr::NSQS::TQueuePath>::TFuncParam path) { + o << path.GetQueuePath(); +} diff --git a/ydb/core/ymq/actor/queue_leader.h b/ydb/core/ymq/actor/queue_leader.h index 3504e4ddb1a..f0a3f06d557 100644 --- a/ydb/core/ymq/actor/queue_leader.h +++ b/ydb/core/ymq/actor/queue_leader.h @@ -1,182 +1,182 @@ -#pragma once -#include "defs.h" -#include "cfg.h" -#include "events.h" -#include "local_rate_limiter_allocator.h" - +#pragma once +#include "defs.h" +#include "cfg.h" +#include "events.h" +#include "local_rate_limiter_allocator.h" + #include <ydb/core/protos/services.pb.h> #include <ydb/public/lib/value/value.h> #include <ydb/core/ymq/actor/infly.h> #include <ydb/core/ymq/actor/message_delay_stats.h> #include <ydb/core/ymq/base/counters.h> - + #include <library/cpp/actors/core/actorid.h> #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/monlib/dynamic_counters/counters.h> - -#include <util/generic/guid.h> -#include <util/generic/ptr.h> -#include <util/generic/queue.h> - -#include <deque> - -namespace NKikimr::NSQS { - + +#include <util/generic/guid.h> +#include <util/generic/ptr.h> +#include <util/generic/queue.h> + +#include <deque> + +namespace NKikimr::NSQS { + class TQueueLeader : public TActorBootstrapped<TQueueLeader> { - struct TSendMessageBatchRequestProcessing; - struct TReceiveMessageBatchRequestProcessing; - struct TDeleteMessageBatchRequestProcessing; - struct TChangeMessageVisibilityBatchRequestProcessing; - struct TGetRuntimeQueueAttributesRequestProcessing; - -public: + struct TSendMessageBatchRequestProcessing; + struct TReceiveMessageBatchRequestProcessing; + struct TDeleteMessageBatchRequestProcessing; + struct TChangeMessageVisibilityBatchRequestProcessing; + struct TGetRuntimeQueueAttributesRequestProcessing; + +public: TQueueLeader(TString userName, TString queueName, TString folderId, TString rootUrl, TIntrusivePtr<TQueueCounters> counters, TIntrusivePtr<TUserCounters> userCounters, const TActorId& schemeCache, const TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions>& quoterResourcesForUser); ~TQueueLeader(); - - void Bootstrap(); - + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_QUEUE_LEADER_ACTOR; - } - -private: - STATEFN(StateInit); - STATEFN(StateWorking); - - void PassAway() override; - void HandleWakeup(TEvWakeup::TPtr& ev); - void HandleGetConfigurationWhileIniting(TSqsEvents::TEvGetConfiguration::TPtr& ev); - void HandleGetConfigurationWhileWorking(TSqsEvents::TEvGetConfiguration::TPtr& ev); - void HandleExecuteWhileIniting(TSqsEvents::TEvExecute::TPtr& ev); - void HandleExecuteWhileWorking(TSqsEvents::TEvExecute::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev); - void HandleClearQueueAttributesCache(TSqsEvents::TEvClearQueueAttributesCache::TPtr& ev); - void HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev); - void HandleSendMessageBatchWhileIniting(TSqsEvents::TEvSendMessageBatch::TPtr& ev); - void HandleSendMessageBatchWhileWorking(TSqsEvents::TEvSendMessageBatch::TPtr& ev); - void HandleReceiveMessageBatchWhileIniting(TSqsEvents::TEvReceiveMessageBatch::TPtr& ev); - void HandleReceiveMessageBatchWhileWorking(TSqsEvents::TEvReceiveMessageBatch::TPtr& ev); - void HandleDeleteMessageBatchWhileIniting(TSqsEvents::TEvDeleteMessageBatch::TPtr& ev); - void HandleDeleteMessageBatchWhileWorking(TSqsEvents::TEvDeleteMessageBatch::TPtr& ev); - void HandleChangeMessageVisibilityBatchWhileIniting(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr& ev); - void HandleChangeMessageVisibilityBatchWhileWorking(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr& ev); - void HandleInflyIsPurgingNotification(TSqsEvents::TEvInflyIsPurgingNotification::TPtr& ev); - void HandleQueuePurgedNotification(TSqsEvents::TEvQueuePurgedNotification::TPtr& ev); - void HandleGetRuntimeQueueAttributesWhileIniting(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr& ev); - void HandleGetRuntimeQueueAttributesWhileWorking(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr& ev); - void HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev); - void HandleDeadLetterQueueNotification(TSqsEvents::TEvDeadLetterQueueNotification::TPtr& ev); - - void BecomeWorking(); - void RequestConfiguration(); - void StartGatheringMetrics(); - void RequestMessagesCountMetrics(ui64 shard); - void RequestOldestTimestampMetrics(ui64 shard); - void ReceiveMessagesCountMetrics(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); - void ReceiveOldestTimestampMetrics(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); - void ReportMessagesCountMetricsIfReady(); - void ReportOldestTimestampMetricsIfReady(); - void OnQueueConfiguration(const TSqsEvents::TEvExecuted::TRecord& ev); - void ScheduleGetConfigurationRetry(); - void Prepare(TSqsEvents::TEvExecute::TPtr& ev); - void OnQueryPrepared(TSqsEvents::TEvExecute::TPtr& ev, const TSqsEvents::TEvExecuted::TRecord& record); - void ExecuteRequest(TSqsEvents::TEvExecute::TPtr& ev, const TString& compiled); - void OnQueryExecuted(TSqsEvents::TEvExecute::TPtr& ev, const TSqsEvents::TEvExecuted::TRecord& record); - void RemoveCachedRequest(size_t shard, size_t idx); - void CreateBackgroundActors(); - void AnswerGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& req); - void AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev); - void AskQueueAttributes(); - void OnQueueAttributes(const TSqsEvents::TEvExecuted::TRecord& ev); - void MarkInflyReloading(ui64 shard, size_t invalidatedCount, const TString& invalidationReason); - void StartLoadingInfly(); - void StartLoadingInfly(ui64 shard, bool afterFailure = false); - void OnInflyLoaded(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); - void OnStateLoaded(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); - void ScheduleInflyLoadAfterFailure(ui64 shard); - bool AddMessagesToInfly(ui64 shard); - void OnAddedMessagesToInfly(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); - void ProcessReceivesAfterAddedMessagesToInfly(ui64 shard); - void StartMessageRequestsAfterInflyLoaded(ui64 shard); - void FailMessageRequestsAfterInflyLoadFailure(ui64 shard); - void ProcessGetRuntimeQueueAttributes(TGetRuntimeQueueAttributesRequestProcessing& reqInfo); - void ProcessGetRuntimeQueueAttributes(ui64 shard, TGetRuntimeQueueAttributesRequestProcessing& reqInfo); - void ProcessGetRuntimeQueueAttributes(ui64 shard); - void FailGetRuntimeQueueAttributesForShard(ui64 shard); - void FailRequestsDuringStartProblems(); - - // send - void ProcessSendMessageBatch(TSendMessageBatchRequestProcessing& reqInfo); - void OnMessageSent(const TString& requestId, size_t index, const TSqsEvents::TEvExecuted::TRecord& reply, const NKikimr::NClient::TValue* messageRecord); - // batching - void OnSendBatchExecuted(ui64 shard, ui64 batchId, const TSqsEvents::TEvExecuted::TRecord& reply); - - // receive - void ProcessReceiveMessageBatch(TReceiveMessageBatchRequestProcessing& reqInfo); - void LockFifoGroup(TReceiveMessageBatchRequestProcessing& reqInfo); - void OnFifoGroupLocked(const TString& requestId, const TSqsEvents::TEvExecuted::TRecord& ev); - void ReadFifoMessages(TReceiveMessageBatchRequestProcessing& reqInfo); - void OnFifoMessagesRead(const TString& requestId, const TSqsEvents::TEvExecuted::TRecord& ev, bool usedDLQ); - - void GetMessagesFromInfly(TReceiveMessageBatchRequestProcessing& reqInfo); - void LoadStdMessages(TReceiveMessageBatchRequestProcessing& reqInfo); - void OnLoadStdMessageResult(const TString& requestId, ui64 offset, const TSqsEvents::TEvExecuted::TRecord& ev, const NKikimr::NClient::TValue* messageRecord, bool ignoreMessageLoadingErrors); - void TryReceiveAnotherShard(TReceiveMessageBatchRequestProcessing& reqInfo); - void WaitAddMessagesToInflyOrTryAnotherShard(TReceiveMessageBatchRequestProcessing& reqInfo); - void Reply(TReceiveMessageBatchRequestProcessing& reqInfo); - // batching - void OnLoadStdMessagesBatchExecuted(ui64 shard, ui64 batchId, const bool usedDLQ, const TSqsEvents::TEvExecuted::TRecord& reply); - - // delete - void ProcessDeleteMessageBatch(TDeleteMessageBatchRequestProcessing& reqInfo); - void OnMessageDeleted(const TString& requestId, ui64 shard, size_t index, const TSqsEvents::TEvExecuted::TRecord& reply, const NKikimr::NClient::TValue* messageRecord); - // batching - void OnDeleteBatchExecuted(ui64 shard, ui64 batchId, const TSqsEvents::TEvExecuted::TRecord& reply); - - // change message visibility - void ProcessChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequestProcessing& reqInfo); - void OnVisibilityChanged(const TString& requestId, ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); - - TQueuePath GetQueuePath() { - return TQueuePath(Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_); - } - - void ScheduleMetricsRequest(); - - bool IncActiveMessageRequests(ui64 shard, const TString& requestId); - void DecActiveMessageRequests(ui64 shard); - - void InitQuoterResources(); - -private: - // const info - TString UserName_; - TString QueueName_; + } + +private: + STATEFN(StateInit); + STATEFN(StateWorking); + + void PassAway() override; + void HandleWakeup(TEvWakeup::TPtr& ev); + void HandleGetConfigurationWhileIniting(TSqsEvents::TEvGetConfiguration::TPtr& ev); + void HandleGetConfigurationWhileWorking(TSqsEvents::TEvGetConfiguration::TPtr& ev); + void HandleExecuteWhileIniting(TSqsEvents::TEvExecute::TPtr& ev); + void HandleExecuteWhileWorking(TSqsEvents::TEvExecute::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev); + void HandleClearQueueAttributesCache(TSqsEvents::TEvClearQueueAttributesCache::TPtr& ev); + void HandlePurgeQueue(TSqsEvents::TEvPurgeQueue::TPtr& ev); + void HandleSendMessageBatchWhileIniting(TSqsEvents::TEvSendMessageBatch::TPtr& ev); + void HandleSendMessageBatchWhileWorking(TSqsEvents::TEvSendMessageBatch::TPtr& ev); + void HandleReceiveMessageBatchWhileIniting(TSqsEvents::TEvReceiveMessageBatch::TPtr& ev); + void HandleReceiveMessageBatchWhileWorking(TSqsEvents::TEvReceiveMessageBatch::TPtr& ev); + void HandleDeleteMessageBatchWhileIniting(TSqsEvents::TEvDeleteMessageBatch::TPtr& ev); + void HandleDeleteMessageBatchWhileWorking(TSqsEvents::TEvDeleteMessageBatch::TPtr& ev); + void HandleChangeMessageVisibilityBatchWhileIniting(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr& ev); + void HandleChangeMessageVisibilityBatchWhileWorking(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr& ev); + void HandleInflyIsPurgingNotification(TSqsEvents::TEvInflyIsPurgingNotification::TPtr& ev); + void HandleQueuePurgedNotification(TSqsEvents::TEvQueuePurgedNotification::TPtr& ev); + void HandleGetRuntimeQueueAttributesWhileIniting(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr& ev); + void HandleGetRuntimeQueueAttributesWhileWorking(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr& ev); + void HandleMigrationDone(TSqsEvents::TEvMigrationDone::TPtr& ev); + void HandleDeadLetterQueueNotification(TSqsEvents::TEvDeadLetterQueueNotification::TPtr& ev); + + void BecomeWorking(); + void RequestConfiguration(); + void StartGatheringMetrics(); + void RequestMessagesCountMetrics(ui64 shard); + void RequestOldestTimestampMetrics(ui64 shard); + void ReceiveMessagesCountMetrics(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); + void ReceiveOldestTimestampMetrics(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); + void ReportMessagesCountMetricsIfReady(); + void ReportOldestTimestampMetricsIfReady(); + void OnQueueConfiguration(const TSqsEvents::TEvExecuted::TRecord& ev); + void ScheduleGetConfigurationRetry(); + void Prepare(TSqsEvents::TEvExecute::TPtr& ev); + void OnQueryPrepared(TSqsEvents::TEvExecute::TPtr& ev, const TSqsEvents::TEvExecuted::TRecord& record); + void ExecuteRequest(TSqsEvents::TEvExecute::TPtr& ev, const TString& compiled); + void OnQueryExecuted(TSqsEvents::TEvExecute::TPtr& ev, const TSqsEvents::TEvExecuted::TRecord& record); + void RemoveCachedRequest(size_t shard, size_t idx); + void CreateBackgroundActors(); + void AnswerGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& req); + void AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev); + void AskQueueAttributes(); + void OnQueueAttributes(const TSqsEvents::TEvExecuted::TRecord& ev); + void MarkInflyReloading(ui64 shard, size_t invalidatedCount, const TString& invalidationReason); + void StartLoadingInfly(); + void StartLoadingInfly(ui64 shard, bool afterFailure = false); + void OnInflyLoaded(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); + void OnStateLoaded(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); + void ScheduleInflyLoadAfterFailure(ui64 shard); + bool AddMessagesToInfly(ui64 shard); + void OnAddedMessagesToInfly(ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); + void ProcessReceivesAfterAddedMessagesToInfly(ui64 shard); + void StartMessageRequestsAfterInflyLoaded(ui64 shard); + void FailMessageRequestsAfterInflyLoadFailure(ui64 shard); + void ProcessGetRuntimeQueueAttributes(TGetRuntimeQueueAttributesRequestProcessing& reqInfo); + void ProcessGetRuntimeQueueAttributes(ui64 shard, TGetRuntimeQueueAttributesRequestProcessing& reqInfo); + void ProcessGetRuntimeQueueAttributes(ui64 shard); + void FailGetRuntimeQueueAttributesForShard(ui64 shard); + void FailRequestsDuringStartProblems(); + + // send + void ProcessSendMessageBatch(TSendMessageBatchRequestProcessing& reqInfo); + void OnMessageSent(const TString& requestId, size_t index, const TSqsEvents::TEvExecuted::TRecord& reply, const NKikimr::NClient::TValue* messageRecord); + // batching + void OnSendBatchExecuted(ui64 shard, ui64 batchId, const TSqsEvents::TEvExecuted::TRecord& reply); + + // receive + void ProcessReceiveMessageBatch(TReceiveMessageBatchRequestProcessing& reqInfo); + void LockFifoGroup(TReceiveMessageBatchRequestProcessing& reqInfo); + void OnFifoGroupLocked(const TString& requestId, const TSqsEvents::TEvExecuted::TRecord& ev); + void ReadFifoMessages(TReceiveMessageBatchRequestProcessing& reqInfo); + void OnFifoMessagesRead(const TString& requestId, const TSqsEvents::TEvExecuted::TRecord& ev, bool usedDLQ); + + void GetMessagesFromInfly(TReceiveMessageBatchRequestProcessing& reqInfo); + void LoadStdMessages(TReceiveMessageBatchRequestProcessing& reqInfo); + void OnLoadStdMessageResult(const TString& requestId, ui64 offset, const TSqsEvents::TEvExecuted::TRecord& ev, const NKikimr::NClient::TValue* messageRecord, bool ignoreMessageLoadingErrors); + void TryReceiveAnotherShard(TReceiveMessageBatchRequestProcessing& reqInfo); + void WaitAddMessagesToInflyOrTryAnotherShard(TReceiveMessageBatchRequestProcessing& reqInfo); + void Reply(TReceiveMessageBatchRequestProcessing& reqInfo); + // batching + void OnLoadStdMessagesBatchExecuted(ui64 shard, ui64 batchId, const bool usedDLQ, const TSqsEvents::TEvExecuted::TRecord& reply); + + // delete + void ProcessDeleteMessageBatch(TDeleteMessageBatchRequestProcessing& reqInfo); + void OnMessageDeleted(const TString& requestId, ui64 shard, size_t index, const TSqsEvents::TEvExecuted::TRecord& reply, const NKikimr::NClient::TValue* messageRecord); + // batching + void OnDeleteBatchExecuted(ui64 shard, ui64 batchId, const TSqsEvents::TEvExecuted::TRecord& reply); + + // change message visibility + void ProcessChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequestProcessing& reqInfo); + void OnVisibilityChanged(const TString& requestId, ui64 shard, const TSqsEvents::TEvExecuted::TRecord& reply); + + TQueuePath GetQueuePath() { + return TQueuePath(Cfg().GetRoot(), UserName_, QueueName_, QueueVersion_); + } + + void ScheduleMetricsRequest(); + + bool IncActiveMessageRequests(ui64 shard, const TString& requestId); + void DecActiveMessageRequests(ui64 shard); + + void InitQuoterResources(); + +private: + // const info + TString UserName_; + TString QueueName_; TString FolderId_; - TString RootUrl_; - ui64 ShardsCount_ = 0; - ui64 PartitionsCount_ = 0; + TString RootUrl_; + ui64 ShardsCount_ = 0; + ui64 PartitionsCount_ = 0; bool IsFifoQueue_ = false; - TString QueueId_; + TString QueueId_; ui64 QueueVersion_ = 0; TActorId SchemeCache_; - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; - TLocalRateLimiterResource SendMessageQuoterResource_; - TLocalRateLimiterResource ReceiveMessageQuoterResource_; - TLocalRateLimiterResource DeleteMessageQuoterResource_; - TLocalRateLimiterResource ChangeMessageVisibilityQuoterResource_; - - // attributes cache - TDuration QueueAttributesCacheTime_ = TDuration::Zero(); - TInstant AttributesUpdateTime_ = TInstant::Zero(); - TMaybe<TSqsEvents::TQueueAttributes> QueueAttributes_; - - // counters - TIntrusivePtr<TQueueCounters> Counters_; - TIntrusivePtr<TUserCounters> UserCounters_; - size_t MetricsQueriesInfly_ = 0; - + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; + TLocalRateLimiterResource SendMessageQuoterResource_; + TLocalRateLimiterResource ReceiveMessageQuoterResource_; + TLocalRateLimiterResource DeleteMessageQuoterResource_; + TLocalRateLimiterResource ChangeMessageVisibilityQuoterResource_; + + // attributes cache + TDuration QueueAttributesCacheTime_ = TDuration::Zero(); + TInstant AttributesUpdateTime_ = TInstant::Zero(); + TMaybe<TSqsEvents::TQueueAttributes> QueueAttributes_; + + // counters + TIntrusivePtr<TQueueCounters> Counters_; + TIntrusivePtr<TUserCounters> UserCounters_; + size_t MetricsQueriesInfly_ = 0; + // dead letter queue params struct TTargetDlqInfo { // from attributes @@ -188,312 +188,312 @@ private: ui64 ShardsCount = 0; }; TMaybe<TTargetDlqInfo> DlqInfo_; - bool IsDlqQueue_ = false; + bool IsDlqQueue_ = false; TInstant LatestDlqNotificationTs_ = TInstant::Zero(); - // shards - enum class EQueryState { - Empty, - Preparing, - Cached, - }; - - struct TQuery { + // shards + enum class EQueryState { + Empty, + Preparing, + Cached, + }; + + struct TQuery { /// Compiled program - TString Compiled; + TString Compiled; /// A vector of queries that are awaiting compilation completion TVector<TSqsEvents::TEvExecute::TPtr> Deferred; /// Program state - EQueryState State = EQueryState::Empty; - }; - - // send/receive/delete batching - template <class TEntry> - struct TBatchBase : TSimpleRefCount<TBatchBase<TEntry>> { - explicit TBatchBase(ui64 shard, ui64 sizeLimit, bool isFifo) - : Shard(shard) - , SizeLimit(sizeLimit) - , IsFifoQueue(isFifo) - { - Entries.reserve(SizeLimit); - } - - virtual ~TBatchBase() = default; - + EQueryState State = EQueryState::Empty; + }; + + // send/receive/delete batching + template <class TEntry> + struct TBatchBase : TSimpleRefCount<TBatchBase<TEntry>> { + explicit TBatchBase(ui64 shard, ui64 sizeLimit, bool isFifo) + : Shard(shard) + , SizeLimit(sizeLimit) + , IsFifoQueue(isFifo) + { + Entries.reserve(SizeLimit); + } + + virtual ~TBatchBase() = default; + void Execute(TQueueLeader*) { - } - - size_t Size() const { - return Entries.size(); - } - - bool IsFull() const { - return Size() >= SizeLimit; - } - - TActorIdentity SelfId() const { - return TActorIdentity(TActivationContext::AsActorContext().SelfID); - } - - ui64 BatchId = 0; - std::vector<TEntry> Entries; - ui64 Shard = 0; - ui64 SizeLimit = 0; - bool IsFifoQueue = false; - const TString RequestId_ = CreateGuidAsString(); // Debug request id to take part in sample logging. - }; - - template <class TBatch> - struct TBatchingState { + } + + size_t Size() const { + return Entries.size(); + } + + bool IsFull() const { + return Size() >= SizeLimit; + } + + TActorIdentity SelfId() const { + return TActorIdentity(TActivationContext::AsActorContext().SelfID); + } + + ui64 BatchId = 0; + std::vector<TEntry> Entries; + ui64 Shard = 0; + ui64 SizeLimit = 0; + bool IsFifoQueue = false; + const TString RequestId_ = CreateGuidAsString(); // Debug request id to take part in sample logging. + }; + + template <class TBatch> + struct TBatchingState { virtual ~TBatchingState(); - void Init(const NKikimrConfig::TSqsConfig::TBatchingPolicy& policy, ui64 shard, bool isFifo); + void Init(const NKikimrConfig::TSqsConfig::TBatchingPolicy& policy, ui64 shard, bool isFifo); void TryExecute(TQueueLeader* leader); - virtual bool CanExecute(const TBatch& batch) const { // Called for next batches when we have some batches in flight. - return batch.IsFull(); - } - TBatch& NewBatch(); - void CancelRequestsAfterInflyLoadFailure(); - - NKikimrConfig::TSqsConfig::TBatchingPolicy Policy; - std::deque<TIntrusivePtr<TBatch>> BatchesIniting; - THashMap<ui64, TIntrusivePtr<TBatch>> BatchesExecuting; // Id -> batch - ui64 Shard = 0; - bool IsFifoQueue = false; - ui64 NextBatchId = 1; - }; - - struct TSendBatchEntry { - TSendBatchEntry(const TString& requestId, const TString& senderId, const TSqsEvents::TEvSendMessageBatch::TMessageEntry& message, size_t index) - : RequestId(requestId) - , SenderId(senderId) - , Message(message) - , IndexInRequest(index) - { - } - - TString RequestId; - TString SenderId; - TSqsEvents::TEvSendMessageBatch::TMessageEntry Message; - size_t IndexInRequest = 0; - }; - - struct TDeleteBatchEntry { - TDeleteBatchEntry(const TString& requestId, const TSqsEvents::TEvDeleteMessageBatch::TMessageEntry& message, size_t index) - : RequestId(requestId) - , Message(message) - , IndexInRequest(index) - { - } - TString RequestId; - TSqsEvents::TEvDeleteMessageBatch::TMessageEntry Message; - size_t IndexInRequest = 0; - }; - - struct TLoadBatchEntry { - TLoadBatchEntry(const TString& requestId, TInflyMessage* msg, TDuration visibilityTimeout) - : RequestId(requestId) - , RandomId(msg->GetRandomId()) - , Offset(msg->GetOffset()) + virtual bool CanExecute(const TBatch& batch) const { // Called for next batches when we have some batches in flight. + return batch.IsFull(); + } + TBatch& NewBatch(); + void CancelRequestsAfterInflyLoadFailure(); + + NKikimrConfig::TSqsConfig::TBatchingPolicy Policy; + std::deque<TIntrusivePtr<TBatch>> BatchesIniting; + THashMap<ui64, TIntrusivePtr<TBatch>> BatchesExecuting; // Id -> batch + ui64 Shard = 0; + bool IsFifoQueue = false; + ui64 NextBatchId = 1; + }; + + struct TSendBatchEntry { + TSendBatchEntry(const TString& requestId, const TString& senderId, const TSqsEvents::TEvSendMessageBatch::TMessageEntry& message, size_t index) + : RequestId(requestId) + , SenderId(senderId) + , Message(message) + , IndexInRequest(index) + { + } + + TString RequestId; + TString SenderId; + TSqsEvents::TEvSendMessageBatch::TMessageEntry Message; + size_t IndexInRequest = 0; + }; + + struct TDeleteBatchEntry { + TDeleteBatchEntry(const TString& requestId, const TSqsEvents::TEvDeleteMessageBatch::TMessageEntry& message, size_t index) + : RequestId(requestId) + , Message(message) + , IndexInRequest(index) + { + } + TString RequestId; + TSqsEvents::TEvDeleteMessageBatch::TMessageEntry Message; + size_t IndexInRequest = 0; + }; + + struct TLoadBatchEntry { + TLoadBatchEntry(const TString& requestId, TInflyMessage* msg, TDuration visibilityTimeout) + : RequestId(requestId) + , RandomId(msg->GetRandomId()) + , Offset(msg->GetOffset()) , ReceiveCount(msg->GetReceiveCount()) - , CurrentVisibilityDeadline(msg->GetVisibilityDeadline()) - , VisibilityTimeout(visibilityTimeout) - { - } - - TString RequestId; - ui64 RandomId = 0; - ui64 Offset = 0; + , CurrentVisibilityDeadline(msg->GetVisibilityDeadline()) + , VisibilityTimeout(visibilityTimeout) + { + } + + TString RequestId; + ui64 RandomId = 0; + ui64 Offset = 0; ui32 ReceiveCount = 0; - TInstant CurrentVisibilityDeadline; - TDuration VisibilityTimeout; - }; - - template <class TEntry> - struct TBatchWithGroupInfo : public TBatchBase<TEntry> { - using TBatchBase<TEntry>::TBatchBase; - - bool HasGroup(const TString& group) { - return Groups.find(group) != Groups.end(); - } - - void AddGroup(const TString& group) { - Groups.insert(group); - } - - std::set<TString> Groups; - }; - - struct TSendBatch : public TBatchWithGroupInfo<TSendBatchEntry> { - using TBatchWithGroupInfo<TSendBatchEntry>::TBatchWithGroupInfo; - - void AddEntry(TSendMessageBatchRequestProcessing& reqInfo, size_t i); + TInstant CurrentVisibilityDeadline; + TDuration VisibilityTimeout; + }; + + template <class TEntry> + struct TBatchWithGroupInfo : public TBatchBase<TEntry> { + using TBatchBase<TEntry>::TBatchBase; + + bool HasGroup(const TString& group) { + return Groups.find(group) != Groups.end(); + } + + void AddGroup(const TString& group) { + Groups.insert(group); + } + + std::set<TString> Groups; + }; + + struct TSendBatch : public TBatchWithGroupInfo<TSendBatchEntry> { + using TBatchWithGroupInfo<TSendBatchEntry>::TBatchWithGroupInfo; + + void AddEntry(TSendMessageBatchRequestProcessing& reqInfo, size_t i); void Execute(TQueueLeader* leader); - - TInstant TransactionStartedTime; - }; - - struct TDeleteBatch : public TBatchWithGroupInfo<TDeleteBatchEntry> { - using TBatchWithGroupInfo<TDeleteBatchEntry>::TBatchWithGroupInfo; - - void AddEntry(TDeleteMessageBatchRequestProcessing& reqInfo, size_t i); + + TInstant TransactionStartedTime; + }; + + struct TDeleteBatch : public TBatchWithGroupInfo<TDeleteBatchEntry> { + using TBatchWithGroupInfo<TDeleteBatchEntry>::TBatchWithGroupInfo; + + void AddEntry(TDeleteMessageBatchRequestProcessing& reqInfo, size_t i); void Execute(TQueueLeader* leader); - - THashMultiMap<ui64, size_t> Offset2Entry; - }; - - struct TLoadBatch : public TBatchBase<TLoadBatchEntry> { - using TBatchBase<TLoadBatchEntry>::TBatchBase; - + + THashMultiMap<ui64, size_t> Offset2Entry; + }; + + struct TLoadBatch : public TBatchBase<TLoadBatchEntry> { + using TBatchBase<TLoadBatchEntry>::TBatchBase; + void Execute(TQueueLeader* leader); - }; - - template <class TBatch> - struct TBatchingStateWithGroupsRestrictions : public TBatchingState<TBatch> { - template <class TRequestProcessing> - void AddRequest(TRequestProcessing& reqInfo); - bool CanExecute(const TBatch& batch) const override; - }; - - struct TSendBatchingState : public TBatchingStateWithGroupsRestrictions<TSendBatch> { - }; - - struct TDeleteBatchingState : public TBatchingStateWithGroupsRestrictions<TDeleteBatch> { - }; - - struct TLoadBatchingState : public TBatchingState<TLoadBatch> { - void AddRequest(TReceiveMessageBatchRequestProcessing& reqInfo); - }; - - struct TShardInfo { - ~TShardInfo(); - + }; + + template <class TBatch> + struct TBatchingStateWithGroupsRestrictions : public TBatchingState<TBatch> { + template <class TRequestProcessing> + void AddRequest(TRequestProcessing& reqInfo); + bool CanExecute(const TBatch& batch) const override; + }; + + struct TSendBatchingState : public TBatchingStateWithGroupsRestrictions<TSendBatch> { + }; + + struct TDeleteBatchingState : public TBatchingStateWithGroupsRestrictions<TDeleteBatch> { + }; + + struct TLoadBatchingState : public TBatchingState<TLoadBatch> { + void AddRequest(TReceiveMessageBatchRequestProcessing& reqInfo); + }; + + struct TShardInfo { + ~TShardInfo(); + /// Counters - ui64 MessagesCount = 0; - ui64 InflyMessagesCount = 0; - ui64 OldestMessageTimestampMs = Max(); - ui64 LastSuccessfulOldestMessageTimestampValueMs = 0; // for query optimization - more accurate range - - bool MessagesCountIsRequesting = false; - bool MessagesCountWasGot = false; - bool OldestMessageAgeIsRequesting = false; - - TInstant CreatedTimestamp; - + ui64 MessagesCount = 0; + ui64 InflyMessagesCount = 0; + ui64 OldestMessageTimestampMs = Max(); + ui64 LastSuccessfulOldestMessageTimestampValueMs = 0; // for query optimization - more accurate range + + bool MessagesCountIsRequesting = false; + bool MessagesCountWasGot = false; + bool OldestMessageAgeIsRequesting = false; + + TInstant CreatedTimestamp; + /// Compiled queries - TQuery Queries[QUERY_VECTOR_SIZE]; - - TIntrusivePtr<TInflyMessages> Infly; // Infly for standard queues - enum class EInflyLoadState { - New, // initial state for std queues - Fifo, // doesn't need to load - Loaded, // we can use infly - WaitingForActiveRequests, // waiting for active requests to finish - WaitingForDbAnswer, // db query was sent - Failed, // waiting for scheduled retry - }; - EInflyLoadState InflyLoadState = EInflyLoadState::New; - size_t LoadInflyRequests = 0; - size_t ActiveMessageRequests = 0; - ui64 ReadOffset = 0; - bool AddingMessagesToInfly = false; - bool NeedInflyReload = false; - bool NeedAddingMessagesToInfly = false; - ui64 InflyVersion = 0; - bool DelayStatisticsInited = false; - TInstant LastAddMessagesToInfly; // Time when AddMessagesToInfly procedure worked - ui64 AddMessagesToInflyCheckAttempts = 0; - - TSendBatchingState SendBatchingState; - TDeleteBatchingState DeleteBatchingState; - TLoadBatchingState LoadBatchingState; - - bool HasMessagesToAddToInfly() const; - bool NeedAddMessagesToInflyCheckInDatabase() const; - }; - std::vector<TShardInfo> Shards_; - TMessageDelayStatistics DelayStatistics_; - - // background actors + TQuery Queries[QUERY_VECTOR_SIZE]; + + TIntrusivePtr<TInflyMessages> Infly; // Infly for standard queues + enum class EInflyLoadState { + New, // initial state for std queues + Fifo, // doesn't need to load + Loaded, // we can use infly + WaitingForActiveRequests, // waiting for active requests to finish + WaitingForDbAnswer, // db query was sent + Failed, // waiting for scheduled retry + }; + EInflyLoadState InflyLoadState = EInflyLoadState::New; + size_t LoadInflyRequests = 0; + size_t ActiveMessageRequests = 0; + ui64 ReadOffset = 0; + bool AddingMessagesToInfly = false; + bool NeedInflyReload = false; + bool NeedAddingMessagesToInfly = false; + ui64 InflyVersion = 0; + bool DelayStatisticsInited = false; + TInstant LastAddMessagesToInfly; // Time when AddMessagesToInfly procedure worked + ui64 AddMessagesToInflyCheckAttempts = 0; + + TSendBatchingState SendBatchingState; + TDeleteBatchingState DeleteBatchingState; + TLoadBatchingState LoadBatchingState; + + bool HasMessagesToAddToInfly() const; + bool NeedAddMessagesToInflyCheckInDatabase() const; + }; + std::vector<TShardInfo> Shards_; + TMessageDelayStatistics DelayStatistics_; + + // background actors TActorId DeduplicationCleanupActor_; TActorId ReadsCleanupActor_; TActorId RetentionActor_; TActorId PurgeActor_; - - struct TSendMessageBatchRequestProcessing { - TSendMessageBatchRequestProcessing(TSqsEvents::TEvSendMessageBatch::TPtr&& ev); - void Init(ui64 shardsCount); - - TSqsEvents::TEvSendMessageBatch::TPtr Event; - size_t AnswersGot = 0; - std::vector<TSqsEvents::TEvSendMessageBatchResponse::TMessageResult> Statuses; - ui64 Shard = 0; - bool Inited = false; - TInstant TransactionStartedTime; - }; - - struct TReceiveMessageBatchRequestProcessing { - TReceiveMessageBatchRequestProcessing(TSqsEvents::TEvReceiveMessageBatch::TPtr&& ev); - void Init(ui64 shardsCount); - - ui64 GetCurrentShard() const { - return Shards[CurrentShardIndex]; - } - - TSqsEvents::TEvReceiveMessageBatch::TPtr Event; - THolder<TSqsEvents::TEvReceiveMessageBatchResponse> Answer; - std::vector<ui64> Shards; - size_t CurrentShardIndex = 0; - TInstant LockSendTs = TInstant::Zero(); - size_t LockCount = 0; - TInflyMessages::TReceiveCandidates ReceiveCandidates; - size_t LoadAnswersLeft = 0; - bool LoadError = false; - bool Inited = false; - bool TriedAddMessagesToInfly = false; - bool WaitingAddMessagesToInfly = false; - TString FromGroup; // Start group position in LOCK_GROUPS query - struct TLockedFifoMessage { - ui64 RandomId = 0; - ui64 Offset = 0; + + struct TSendMessageBatchRequestProcessing { + TSendMessageBatchRequestProcessing(TSqsEvents::TEvSendMessageBatch::TPtr&& ev); + void Init(ui64 shardsCount); + + TSqsEvents::TEvSendMessageBatch::TPtr Event; + size_t AnswersGot = 0; + std::vector<TSqsEvents::TEvSendMessageBatchResponse::TMessageResult> Statuses; + ui64 Shard = 0; + bool Inited = false; + TInstant TransactionStartedTime; + }; + + struct TReceiveMessageBatchRequestProcessing { + TReceiveMessageBatchRequestProcessing(TSqsEvents::TEvReceiveMessageBatch::TPtr&& ev); + void Init(ui64 shardsCount); + + ui64 GetCurrentShard() const { + return Shards[CurrentShardIndex]; + } + + TSqsEvents::TEvReceiveMessageBatch::TPtr Event; + THolder<TSqsEvents::TEvReceiveMessageBatchResponse> Answer; + std::vector<ui64> Shards; + size_t CurrentShardIndex = 0; + TInstant LockSendTs = TInstant::Zero(); + size_t LockCount = 0; + TInflyMessages::TReceiveCandidates ReceiveCandidates; + size_t LoadAnswersLeft = 0; + bool LoadError = false; + bool Inited = false; + bool TriedAddMessagesToInfly = false; + bool WaitingAddMessagesToInfly = false; + TString FromGroup; // Start group position in LOCK_GROUPS query + struct TLockedFifoMessage { + ui64 RandomId = 0; + ui64 Offset = 0; TString GroupId; - }; - std::vector<TLockedFifoMessage> LockedFifoMessages; - }; - - struct TDeleteMessageBatchRequestProcessing { - TDeleteMessageBatchRequestProcessing(TSqsEvents::TEvDeleteMessageBatch::TPtr&& ev); - - TSqsEvents::TEvDeleteMessageBatch::TPtr Event; - THolder<TSqsEvents::TEvDeleteMessageBatchResponse> Answer; - std::vector<THolder<TInflyMessage>> InflyMessages; - size_t AnswersGot = 0; - }; - - struct TChangeMessageVisibilityBatchRequestProcessing { - TChangeMessageVisibilityBatchRequestProcessing(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr&& ev); - - TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr Event; - THolder<TSqsEvents::TEvChangeMessageVisibilityBatchResponse> Answer; - TInflyMessages::TChangeVisibilityCandidates Candidates; - }; - - struct TGetRuntimeQueueAttributesRequestProcessing { - TGetRuntimeQueueAttributesRequestProcessing(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr&& ev); - - TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr Event; - THolder<TSqsEvents::TEvGetRuntimeQueueAttributesResponse> Answer; - std::vector<bool> ShardProcessFlags; - ui64 ShardsProcessed = 0; - }; - - // requests - std::vector<TSqsEvents::TEvGetConfiguration::TPtr> GetConfigurationRequests_; + }; + std::vector<TLockedFifoMessage> LockedFifoMessages; + }; + + struct TDeleteMessageBatchRequestProcessing { + TDeleteMessageBatchRequestProcessing(TSqsEvents::TEvDeleteMessageBatch::TPtr&& ev); + + TSqsEvents::TEvDeleteMessageBatch::TPtr Event; + THolder<TSqsEvents::TEvDeleteMessageBatchResponse> Answer; + std::vector<THolder<TInflyMessage>> InflyMessages; + size_t AnswersGot = 0; + }; + + struct TChangeMessageVisibilityBatchRequestProcessing { + TChangeMessageVisibilityBatchRequestProcessing(TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr&& ev); + + TSqsEvents::TEvChangeMessageVisibilityBatch::TPtr Event; + THolder<TSqsEvents::TEvChangeMessageVisibilityBatchResponse> Answer; + TInflyMessages::TChangeVisibilityCandidates Candidates; + }; + + struct TGetRuntimeQueueAttributesRequestProcessing { + TGetRuntimeQueueAttributesRequestProcessing(TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr&& ev); + + TSqsEvents::TEvGetRuntimeQueueAttributes::TPtr Event; + THolder<TSqsEvents::TEvGetRuntimeQueueAttributesResponse> Answer; + std::vector<bool> ShardProcessFlags; + ui64 ShardsProcessed = 0; + }; + + // requests + std::vector<TSqsEvents::TEvGetConfiguration::TPtr> GetConfigurationRequests_; std::vector<TSqsEvents::TEvExecute::TPtr> ExecuteRequests_; // execute requests that wait for queue leader init - THashMap<TString, TSendMessageBatchRequestProcessing> SendMessageRequests_; // request id -> request - THashMap<TString, TReceiveMessageBatchRequestProcessing> ReceiveMessageRequests_; // request id -> request - THashMap<std::pair<TString, ui64>, TDeleteMessageBatchRequestProcessing> DeleteMessageRequests_; // (request id, shard) -> request - THashMap<std::pair<TString, ui64>, TChangeMessageVisibilityBatchRequestProcessing> ChangeMessageVisibilityRequests_; // (request id, shard) -> request - THashMap<TString, TGetRuntimeQueueAttributesRequestProcessing> GetRuntimeQueueAttributesRequests_; // request id -> request -}; - -} // namespace NKikimr::NSQS + THashMap<TString, TSendMessageBatchRequestProcessing> SendMessageRequests_; // request id -> request + THashMap<TString, TReceiveMessageBatchRequestProcessing> ReceiveMessageRequests_; // request id -> request + THashMap<std::pair<TString, ui64>, TDeleteMessageBatchRequestProcessing> DeleteMessageRequests_; // (request id, shard) -> request + THashMap<std::pair<TString, ui64>, TChangeMessageVisibilityBatchRequestProcessing> ChangeMessageVisibilityRequests_; // (request id, shard) -> request + THashMap<TString, TGetRuntimeQueueAttributesRequestProcessing> GetRuntimeQueueAttributesRequests_; // request id -> request +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/queue_schema.cpp b/ydb/core/ymq/actor/queue_schema.cpp index 91d3c5d3df4..bf8aa2ea698 100644 --- a/ydb/core/ymq/actor/queue_schema.cpp +++ b/ydb/core/ymq/actor/queue_schema.cpp @@ -1,4 +1,4 @@ -#include "cfg.h" +#include "cfg.h" #include "executor.h" #include "log.h" #include "params.h" @@ -10,12 +10,12 @@ #include <ydb/core/ymq/queues/std/schema.h> #include <util/digest/city.h> -#include <util/generic/utility.h> +#include <util/generic/utility.h> #include <util/string/join.h> - + using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { constexpr TStringBuf FIFO_TABLES_DIR = ".FIFO"; constexpr TStringBuf STD_TABLES_DIR = ".STD"; @@ -43,8 +43,8 @@ TCreateQueueSchemaActorV2::TCreateQueueSchemaActorV2(const TQueuePath& path, const TString& folderId, const bool isCloudMode, const bool enableQueueAttributesValidation, - TIntrusivePtr<TUserCounters> userCounters, - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> quoterResources) + TIntrusivePtr<TUserCounters> userCounters, + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> quoterResources) : QueuePath_(path) , Request_(req) , Sender_(sender) @@ -55,8 +55,8 @@ TCreateQueueSchemaActorV2::TCreateQueueSchemaActorV2(const TQueuePath& path, , QueueCreationTimestamp_(TInstant::Now()) , IsCloudMode_(isCloudMode) , EnableQueueAttributesValidation_(enableQueueAttributesValidation) - , UserCounters_(std::move(userCounters)) - , QuoterResources_(std::move(quoterResources)) + , UserCounters_(std::move(userCounters)) + , QuoterResources_(std::move(quoterResources)) { IsFifo_ = AsciiHasSuffixIgnoreCase(IsCloudMode_ ? CustomQueueName_ : QueuePath_.QueueName, ".fifo"); @@ -65,17 +65,17 @@ TCreateQueueSchemaActorV2::TCreateQueueSchemaActorV2(const TQueuePath& path, RequiredTables_ = GetFifoTables(); } else { RequiredShardsCount_ = Request_.GetShards(); - RequiredTables_ = GetStandardTables(RequiredShardsCount_, req.GetPartitions(), req.GetEnableAutosplit(), req.GetSizeToSplit()); + RequiredTables_ = GetStandardTables(RequiredShardsCount_, req.GetPartitions(), req.GetEnableAutosplit(), req.GetSizeToSplit()); } } TCreateQueueSchemaActorV2::~TCreateQueueSchemaActorV2() = default; -static THolder<TSqsEvents::TEvQueueCreated> MakeErrorResponse(const TErrorClass& errorClass) { +static THolder<TSqsEvents::TEvQueueCreated> MakeErrorResponse(const TErrorClass& errorClass) { auto resp = MakeHolder<TSqsEvents::TEvQueueCreated>(); resp->Success = false; resp->State = EQueueState::Active; - resp->ErrorClass = &errorClass; + resp->ErrorClass = &errorClass; return resp; } @@ -105,7 +105,7 @@ void TCreateQueueSchemaActorV2::InitMissingQueueAttributes(const NKikimrConfig:: // RedrivePolicy could be unspecified } -void TCreateQueueSchemaActorV2::Bootstrap() { +void TCreateQueueSchemaActorV2::Bootstrap() { Become(&TCreateQueueSchemaActorV2::Preamble); THashMap<TString, TString> attributes; @@ -114,71 +114,71 @@ void TCreateQueueSchemaActorV2::Bootstrap() { } const bool clampValues = !EnableQueueAttributesValidation_; - ValidatedAttributes_ = TQueueAttributes::FromAttributesAndConfig(attributes, Cfg(), IsFifo_, clampValues); + ValidatedAttributes_ = TQueueAttributes::FromAttributesAndConfig(attributes, Cfg(), IsFifo_, clampValues); if (!ValidatedAttributes_.Validate()) { - auto resp = MakeErrorResponse(NErrors::VALIDATION_ERROR); + auto resp = MakeErrorResponse(NErrors::VALIDATION_ERROR); resp->Error = ValidatedAttributes_.ErrorText; - Send(Sender_, std::move(resp)); - PassAway(); + Send(Sender_, std::move(resp)); + PassAway(); return; } if (ValidatedAttributes_.HasClampedAttributes()) { - RLOG_SQS_WARN("Clamped some queue attribute values for account " << QueuePath_.UserName << " and queue name " << QueuePath_.QueueName); + RLOG_SQS_WARN("Clamped some queue attribute values for account " << QueuePath_.UserName << " and queue name " << QueuePath_.QueueName); } - InitMissingQueueAttributes(Cfg()); + InitMissingQueueAttributes(Cfg()); if (ValidatedAttributes_.RedrivePolicy.TargetQueueName) { const TString createdQueueName = IsCloudMode_ ? CustomQueueName_ : QueuePath_.QueueName; - auto resp = MakeErrorResponse(NErrors::VALIDATION_ERROR); + auto resp = MakeErrorResponse(NErrors::VALIDATION_ERROR); if (ValidatedAttributes_.RedrivePolicy.TargetQueueName->empty()) { - resp->Error = "Empty target dead letter queue name."; + resp->Error = "Empty target dead letter queue name."; } else if (*ValidatedAttributes_.RedrivePolicy.TargetQueueName == createdQueueName) { resp->Error = "Using the queue itself as a dead letter queue is not allowed."; } else { - Send(MakeSqsServiceID(SelfId().NodeId()), + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId(RequestId_, QueuePath_.UserName, *ValidatedAttributes_.RedrivePolicy.TargetQueueName, FolderId_)); return; } - Send(Sender_, std::move(resp)); - PassAway(); + Send(Sender_, std::move(resp)); + PassAway(); return; } - RequestQueueParams(); + RequestQueueParams(); } static const char* const ReadQueueParamsQueryCloud = R"__( ( (let customName (Parameter 'CUSTOMNAME (DataType 'Utf8String))) (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) - (let defaultMaxQueuesCount (Parameter 'DEFAULT_MAX_QUEUES_COUNT (DataType 'Uint64))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%1$s/.Queues) - (let settingsTable '%1$s/.Settings) - - (let maxQueuesCountSettingRow '( - '('Account userName) - '('Name (Utf8String '"MaxQueuesCount")))) - (let maxQueuesCountSettingSelect '( - 'Value)) - (let maxQueuesCountSettingRead (SelectRow settingsTable maxQueuesCountSettingRow maxQueuesCountSettingSelect)) - (let maxQueuesCountSetting (Coalesce (If (Exists maxQueuesCountSettingRead) (Cast (Member maxQueuesCountSettingRead 'Value) 'Uint64) defaultMaxQueuesCount) (Uint64 '0))) - - (let queuesRange '( - '('Account userName userName) + (let defaultMaxQueuesCount (Parameter 'DEFAULT_MAX_QUEUES_COUNT (DataType 'Uint64))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + + (let queuesTable '%1$s/.Queues) + (let settingsTable '%1$s/.Settings) + + (let maxQueuesCountSettingRow '( + '('Account userName) + '('Name (Utf8String '"MaxQueuesCount")))) + (let maxQueuesCountSettingSelect '( + 'Value)) + (let maxQueuesCountSettingRead (SelectRow settingsTable maxQueuesCountSettingRow maxQueuesCountSettingSelect)) + (let maxQueuesCountSetting (Coalesce (If (Exists maxQueuesCountSettingRead) (Cast (Member maxQueuesCountSettingRead 'Value) 'Uint64) defaultMaxQueuesCount) (Uint64 '0))) + + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queues - (Member (SelectRange queuesTable queuesRange '('QueueName 'CustomQueueName 'Version 'FolderId) '()) 'List)) + (Member (SelectRange queuesTable queuesRange '('QueueName 'CustomQueueName 'Version 'FolderId) '()) 'List)) (let overLimit - (LessOrEqual maxQueuesCountSetting (Length queues))) + (LessOrEqual maxQueuesCountSetting (Length queues))) (let existingQueuesWithSameNameAndFolderId (Filter queues (lambda '(item) (block '( @@ -211,47 +211,47 @@ static const char* const ReadQueueParamsQueryCloud = R"__( static const char* const ReadQueueParamsQueryYandex = R"__( ( (let name (Parameter 'NAME (DataType 'Utf8String))) - (let defaultMaxQueuesCount (Parameter 'DEFAULT_MAX_QUEUES_COUNT (DataType 'Uint64))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%1$s/.Queues) - (let settingsTable '%1$s/.Settings) - - (let maxQueuesCountSettingRow '( - '('Account userName) - '('Name (Utf8String '"MaxQueuesCount")))) - (let maxQueuesCountSettingSelect '( - 'Value)) - (let maxQueuesCountSettingRead (SelectRow settingsTable maxQueuesCountSettingRow maxQueuesCountSettingSelect)) - (let maxQueuesCountSetting (Coalesce (If (Exists maxQueuesCountSettingRead) (Cast (Member maxQueuesCountSettingRead 'Value) 'Uint64) defaultMaxQueuesCount) (Uint64 '0))) - - (let queuesRange '( - '('Account userName userName) + (let defaultMaxQueuesCount (Parameter 'DEFAULT_MAX_QUEUES_COUNT (DataType 'Uint64))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + + (let queuesTable '%1$s/.Queues) + (let settingsTable '%1$s/.Settings) + + (let maxQueuesCountSettingRow '( + '('Account userName) + '('Name (Utf8String '"MaxQueuesCount")))) + (let maxQueuesCountSettingSelect '( + 'Value)) + (let maxQueuesCountSettingRead (SelectRow settingsTable maxQueuesCountSettingRow maxQueuesCountSettingSelect)) + (let maxQueuesCountSetting (Coalesce (If (Exists maxQueuesCountSettingRead) (Cast (Member maxQueuesCountSettingRead 'Value) 'Uint64) defaultMaxQueuesCount) (Uint64 '0))) + + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queues - (Member (SelectRange queuesTable queuesRange '('QueueState) '()) 'List)) + (Member (SelectRange queuesTable queuesRange '('QueueState) '()) 'List)) (let overLimit - (LessOrEqual maxQueuesCountSetting (Length queues))) + (LessOrEqual maxQueuesCountSetting (Length queues))) - (let queuesRow '( - '('Account userName) + (let queuesRow '( + '('Account userName) '('QueueName name))) - (let queuesSelect '( + (let queuesSelect '( 'QueueState 'Version)) - (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) + (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) (let queueExists (Coalesce (Or - (Equal (Uint64 '1) (Member queuesRead 'QueueState)) - (Equal (Uint64 '3) (Member queuesRead 'QueueState)) + (Equal (Uint64 '1) (Member queuesRead 'QueueState)) + (Equal (Uint64 '3) (Member queuesRead 'QueueState)) ) (Bool 'false))) (let currentVersion (Coalesce - (Member queuesRead 'Version) + (Member queuesRead 'Version) (Uint64 '0) ) ) @@ -263,62 +263,62 @@ static const char* const ReadQueueParamsQueryYandex = R"__( ) )__"; -void TCreateQueueSchemaActorV2::RequestQueueParams() { +void TCreateQueueSchemaActorV2::RequestQueueParams() { if (IsCloudMode_) { - auto ev = MakeExecuteEvent(Sprintf(ReadQueueParamsQueryCloud, Cfg().GetRoot().c_str())); + auto ev = MakeExecuteEvent(Sprintf(ReadQueueParamsQueryCloud, Cfg().GetRoot().c_str())); auto* trans = ev->Record.MutableTransaction()->MutableMiniKQLTransaction(); TParameters(trans->MutableParams()->MutableProto()) .Utf8("CUSTOMNAME", CustomQueueName_) - .Utf8("FOLDERID", FolderId_) - .Uint64("DEFAULT_MAX_QUEUES_COUNT", Cfg().GetAccountSettingsDefaults().GetMaxQueuesCount()) - .Utf8("USER_NAME", QueuePath_.UserName); + .Utf8("FOLDERID", FolderId_) + .Uint64("DEFAULT_MAX_QUEUES_COUNT", Cfg().GetAccountSettingsDefaults().GetMaxQueuesCount()) + .Utf8("USER_NAME", QueuePath_.UserName); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); } else { - auto ev = MakeExecuteEvent(Sprintf(ReadQueueParamsQueryYandex, Cfg().GetRoot().c_str())); + auto ev = MakeExecuteEvent(Sprintf(ReadQueueParamsQueryYandex, Cfg().GetRoot().c_str())); auto* trans = ev->Record.MutableTransaction()->MutableMiniKQLTransaction(); TParameters(trans->MutableParams()->MutableProto()) - .Utf8("NAME", QueuePath_.QueueName) - .Uint64("DEFAULT_MAX_QUEUES_COUNT", Cfg().GetAccountSettingsDefaults().GetMaxQueuesCount()) - .Utf8("USER_NAME", QueuePath_.UserName); + .Utf8("NAME", QueuePath_.QueueName) + .Uint64("DEFAULT_MAX_QUEUES_COUNT", Cfg().GetAccountSettingsDefaults().GetMaxQueuesCount()) + .Utf8("USER_NAME", QueuePath_.UserName); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); } } -STATEFN(TCreateQueueSchemaActorV2::Preamble) { +STATEFN(TCreateQueueSchemaActorV2::Preamble) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvQueueId, HandleQueueId); - hFunc(TSqsEvents::TEvExecuted, OnReadQueueParams); - hFunc(TEvQuota::TEvClearance, OnCreateQueueQuota); - hFunc(TSqsEvents::TEvAtomicCounterIncrementResult, OnAtomicCounterIncrement); - cFunc(TEvPoisonPill::EventType, PassAway); + hFunc(TSqsEvents::TEvQueueId, HandleQueueId); + hFunc(TSqsEvents::TEvExecuted, OnReadQueueParams); + hFunc(TEvQuota::TEvClearance, OnCreateQueueQuota); + hFunc(TSqsEvents::TEvAtomicCounterIncrementResult, OnAtomicCounterIncrement); + cFunc(TEvPoisonPill::EventType, PassAway); } } -void TCreateQueueSchemaActorV2::HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { - THolder<TSqsEvents::TEvQueueCreated> resp; +void TCreateQueueSchemaActorV2::HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { + THolder<TSqsEvents::TEvQueueCreated> resp; if (ev->Get()->Failed) { - RLOG_SQS_WARN("Get queue id failed"); - resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + RLOG_SQS_WARN("Get queue id failed"); + resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); } else if (!ev->Get()->Exists) { - resp = MakeErrorResponse(NErrors::VALIDATION_ERROR); - resp->Error = "Target DLQ does not exist"; + resp = MakeErrorResponse(NErrors::VALIDATION_ERROR); + resp->Error = "Target DLQ does not exist"; } else { - RequestQueueParams(); + RequestQueueParams(); return; } - Y_VERIFY(resp); - Send(Sender_, std::move(resp)); - PassAway(); + Y_VERIFY(resp); + Send(Sender_, std::move(resp)); + PassAway(); } -void TCreateQueueSchemaActorV2::OnReadQueueParams(TSqsEvents::TEvExecuted::TPtr& ev) { +void TCreateQueueSchemaActorV2::OnReadQueueParams(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; const auto status = record.GetStatus(); - THolder<TSqsEvents::TEvQueueCreated> resp; + THolder<TSqsEvents::TEvQueueCreated> resp; if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); @@ -327,88 +327,88 @@ void TCreateQueueSchemaActorV2::OnReadQueueParams(TSqsEvents::TEvExecuted::TPtr& ExistingQueueResourceId_ = TString(val["resourceId"]); } const ui64 currentVersion = ui64(val["version"]); - MatchQueueAttributes(currentVersion); + MatchQueueAttributes(currentVersion); return; } else { if (bool(val["overLimit"])) { - resp = MakeErrorResponse(NErrors::OVER_LIMIT); - resp->Error = "Too many queues."; + resp = MakeErrorResponse(NErrors::OVER_LIMIT); + resp->Error = "Too many queues."; } else { - if (Cfg().GetQuotingConfig().GetEnableQuoting() && QuoterResources_) { - RequestCreateQueueQuota(); - } else { - RunAtomicCounterIncrement(); - } + if (Cfg().GetQuotingConfig().GetEnableQuoting() && QuoterResources_) { + RequestCreateQueueQuota(); + } else { + RunAtomicCounterIncrement(); + } return; } } } else { - resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); - resp->Error = "Failed to read queue params."; + resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + resp->Error = "Failed to read queue params."; } - Y_VERIFY(resp); - Send(Sender_, std::move(resp)); - PassAway(); -} - -void TCreateQueueSchemaActorV2::RequestCreateQueueQuota() { - TDuration deadline = TDuration::Max(); - const auto& quotingConfig = Cfg().GetQuotingConfig(); - if (quotingConfig.HasQuotaDeadlineMs()) { - deadline = TDuration::MilliSeconds(quotingConfig.GetQuotaDeadlineMs()); - } - Send(MakeQuoterServiceID(), - new TEvQuota::TEvRequest( - TEvQuota::EResourceOperator::And, - { TEvQuota::TResourceLeaf(QuoterResources_->CreateQueueAction.QuoterId, QuoterResources_->CreateQueueAction.ResourceId, 1) }, - deadline)); -} - -void TCreateQueueSchemaActorV2::OnCreateQueueQuota(TEvQuota::TEvClearance::TPtr& ev) { - switch (ev->Get()->Result) { - case TEvQuota::TEvClearance::EResult::GenericError: - case TEvQuota::TEvClearance::EResult::UnknownResource: { - RLOG_SQS_ERROR("Failed to get quota for queue creation: " << ev->Get()->Result); - Send(Sender_, MakeErrorResponse(NErrors::INTERNAL_FAILURE)); - PassAway(); - break; - } - case TEvQuota::TEvClearance::EResult::Deadline: { - RLOG_SQS_WARN("Failed to get quota for queue creation: deadline expired"); - Send(Sender_, MakeErrorResponse(NErrors::THROTTLING_EXCEPTION)); - PassAway(); - break; - } - case TEvQuota::TEvClearance::EResult::Success: { - RLOG_SQS_DEBUG("Successfully got quota for create queue request"); - RunAtomicCounterIncrement(); - break; - } - } -} - -void TCreateQueueSchemaActorV2::RunAtomicCounterIncrement() { - Register(new TAtomicCounterActor(SelfId(), QueuePath_.GetRootPath(), RequestId_)); + Y_VERIFY(resp); + Send(Sender_, std::move(resp)); + PassAway(); } -void TCreateQueueSchemaActorV2::OnAtomicCounterIncrement(TSqsEvents::TEvAtomicCounterIncrementResult::TPtr& ev) { +void TCreateQueueSchemaActorV2::RequestCreateQueueQuota() { + TDuration deadline = TDuration::Max(); + const auto& quotingConfig = Cfg().GetQuotingConfig(); + if (quotingConfig.HasQuotaDeadlineMs()) { + deadline = TDuration::MilliSeconds(quotingConfig.GetQuotaDeadlineMs()); + } + Send(MakeQuoterServiceID(), + new TEvQuota::TEvRequest( + TEvQuota::EResourceOperator::And, + { TEvQuota::TResourceLeaf(QuoterResources_->CreateQueueAction.QuoterId, QuoterResources_->CreateQueueAction.ResourceId, 1) }, + deadline)); +} + +void TCreateQueueSchemaActorV2::OnCreateQueueQuota(TEvQuota::TEvClearance::TPtr& ev) { + switch (ev->Get()->Result) { + case TEvQuota::TEvClearance::EResult::GenericError: + case TEvQuota::TEvClearance::EResult::UnknownResource: { + RLOG_SQS_ERROR("Failed to get quota for queue creation: " << ev->Get()->Result); + Send(Sender_, MakeErrorResponse(NErrors::INTERNAL_FAILURE)); + PassAway(); + break; + } + case TEvQuota::TEvClearance::EResult::Deadline: { + RLOG_SQS_WARN("Failed to get quota for queue creation: deadline expired"); + Send(Sender_, MakeErrorResponse(NErrors::THROTTLING_EXCEPTION)); + PassAway(); + break; + } + case TEvQuota::TEvClearance::EResult::Success: { + RLOG_SQS_DEBUG("Successfully got quota for create queue request"); + RunAtomicCounterIncrement(); + break; + } + } +} + +void TCreateQueueSchemaActorV2::RunAtomicCounterIncrement() { + Register(new TAtomicCounterActor(SelfId(), QueuePath_.GetRootPath(), RequestId_)); +} + +void TCreateQueueSchemaActorV2::OnAtomicCounterIncrement(TSqsEvents::TEvAtomicCounterIncrementResult::TPtr& ev) { auto event = ev->Get(); if (event->Success) { Become(&TCreateQueueSchemaActorV2::CreateComponentsState); Version_ = event->NewValue; VersionName_ = "v" + ToString(Version_); // add "v" prefix to provide the difference with deprecated version shards VersionedQueueFullPath_ = TString::Join(QueuePath_.GetQueuePath(), '/', VersionName_); - CreateComponents(); + CreateComponents(); return; } else { - auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); - resp->Error = "Failed to create unique id."; + auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + resp->Error = "Failed to create unique id."; resp->State = EQueueState::Creating; - Send(Sender_, std::move(resp)); + Send(Sender_, std::move(resp)); } - PassAway(); + PassAway(); } static const char* const GetTablesFormatQuery = R"__( @@ -445,7 +445,7 @@ void TCreateQueueSchemaActorV2::RequestTablesFormatSettings(const TString& accou Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); } -void TCreateQueueSchemaActorV2::RegisterMakeDirActor(const TString& workingDir, const TString& dirName) { +void TCreateQueueSchemaActorV2::RegisterMakeDirActor(const TString& workingDir, const TString& dirName) { auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); @@ -453,7 +453,7 @@ void TCreateQueueSchemaActorV2::RegisterMakeDirActor(const TString& workingDir, trans->SetOperationType(NKikimrSchemeOp::ESchemeOpMkDir); trans->MutableMkDir()->SetName(dirName); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); } void TCreateQueueSchemaActorV2::RequestLeaderTabletId() { @@ -461,26 +461,26 @@ void TCreateQueueSchemaActorV2::RequestLeaderTabletId() { THolder<TEvTxUserProxy::TEvNavigate> request(new TEvTxUserProxy::TEvNavigate()); request->Record.MutableDescribePath()->SetSchemeshardId(TableWithLeaderPathId_.first); request->Record.MutableDescribePath()->SetPathId(TableWithLeaderPathId_.second); - Send(MakeTxProxyID(), std::move(request)); + Send(MakeTxProxyID(), std::move(request)); } -void TCreateQueueSchemaActorV2::CreateComponents() { +void TCreateQueueSchemaActorV2::CreateComponents() { switch (CurrentCreationStep_) { case ECreateComponentsStep::GetTablesFormatSetting: { RequestTablesFormatSettings(QueuePath_.UserName); break; } case ECreateComponentsStep::MakeQueueDir: { - RegisterMakeDirActor(QueuePath_.GetUserPath(), QueuePath_.QueueName); + RegisterMakeDirActor(QueuePath_.GetUserPath(), QueuePath_.QueueName); break; } case ECreateComponentsStep::MakeQueueVersionDir: { - RegisterMakeDirActor(QueuePath_.GetQueuePath(), VersionName_); + RegisterMakeDirActor(QueuePath_.GetQueuePath(), VersionName_); break; } case ECreateComponentsStep::MakeShards: { for (ui64 shardIdx = 0; shardIdx < RequiredShardsCount_; ++shardIdx) { - RegisterMakeDirActor(VersionedQueueFullPath_, ToString(shardIdx)); + RegisterMakeDirActor(VersionedQueueFullPath_, ToString(shardIdx)); } break; } @@ -491,7 +491,7 @@ void TCreateQueueSchemaActorV2::CreateComponents() { cmd->MutablePartitionConfig()->MutablePipelineConfig()->SetEnableOutOfOrder(Request_.GetEnableOutOfOrderTransactionsExecution()); const TActorId actorId = Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); if (table.HasLeaderTablet && !CreateTableWithLeaderTabletActorId_) { CreateTableWithLeaderTabletActorId_ = actorId; @@ -508,25 +508,25 @@ void TCreateQueueSchemaActorV2::CreateComponents() { RequestLeaderTabletId(); break; } - case ECreateComponentsStep::AddQuoterResource: { - AddRPSQuota(); - break; - } + case ECreateComponentsStep::AddQuoterResource: { + AddRPSQuota(); + break; + } } } -STATEFN(TCreateQueueSchemaActorV2::CreateComponentsState) { +STATEFN(TCreateQueueSchemaActorV2::CreateComponentsState) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, OnExecuted); + hFunc(TSqsEvents::TEvExecuted, OnExecuted); hFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, OnDescribeSchemeResult); hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, HandleTableDescription); - hFunc(NKesus::TEvKesus::TEvAddQuoterResourceResult, HandleAddQuoterResource); - cFunc(TEvPoisonPill::EventType, PassAway); + hFunc(NKesus::TEvKesus::TEvAddQuoterResourceResult, HandleAddQuoterResource); + cFunc(TEvPoisonPill::EventType, PassAway); } } -void TCreateQueueSchemaActorV2::Step() { - RLOG_SQS_TRACE("Next step. Step: " << (int)CurrentCreationStep_); +void TCreateQueueSchemaActorV2::Step() { + RLOG_SQS_TRACE("Next step. Step: " << (int)CurrentCreationStep_); switch (CurrentCreationStep_) { case ECreateComponentsStep::GetTablesFormatSetting: { CurrentCreationStep_ = ECreateComponentsStep::MakeQueueDir; @@ -573,23 +573,23 @@ void TCreateQueueSchemaActorV2::Step() { break; } case ECreateComponentsStep::DiscoverLeaderTabletId: { - Y_VERIFY(Cfg().GetQuotingConfig().GetEnableQuoting() && Cfg().GetQuotingConfig().HasKesusQuoterConfig()); - CurrentCreationStep_ = ECreateComponentsStep::AddQuoterResource; - break; - } - case ECreateComponentsStep::AddQuoterResource: { + Y_VERIFY(Cfg().GetQuotingConfig().GetEnableQuoting() && Cfg().GetQuotingConfig().HasKesusQuoterConfig()); + CurrentCreationStep_ = ECreateComponentsStep::AddQuoterResource; + break; + } + case ECreateComponentsStep::AddQuoterResource: { Y_VERIFY(false); // unreachable - break; + break; } } - CreateComponents(); + CreateComponents(); } -void TCreateQueueSchemaActorV2::OnExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { +void TCreateQueueSchemaActorV2::OnExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; const auto status = record.GetStatus(); - RLOG_SQS_TRACE("OnExecuted: " << ev->Get()->Record); + RLOG_SQS_TRACE("OnExecuted: " << ev->Get()->Record); if (ev->Sender == CreateTableWithLeaderTabletActorId_) { CreateTableWithLeaderTabletTxId_ = record.GetTxId(); @@ -600,7 +600,7 @@ void TCreateQueueSchemaActorV2::OnExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { // Note: // SS finishes transaction immediately if the specified path already exists // DO NOT add any special logic based on the result type (except for an error) - if (IsGoodStatusCode(status)) { + if (IsGoodStatusCode(status)) { if (CurrentCreationStep_ == ECreateComponentsStep::GetTablesFormatSetting) { const TValue value(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); const TValue formatValue = value["tablesFormat"]; @@ -623,22 +623,22 @@ void TCreateQueueSchemaActorV2::OnExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { << QueuePath_.UserName << record); } - Step(); + Step(); } else { - RLOG_SQS_WARN("Component creation request execution error: " << record); + RLOG_SQS_WARN("Component creation request execution error: " << record); - auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); resp->State = EQueueState::Creating; if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::WrongRequest) { - resp->Error = TStringBuilder() << "Missing account: " << QueuePath_.UserName << "."; + resp->Error = TStringBuilder() << "Missing account: " << QueuePath_.UserName << "."; } else { resp->Error = record.GetMiniKQLErrors(); } - Send(Sender_, std::move(resp)); + Send(Sender_, std::move(resp)); - PassAway(); + PassAway(); } } @@ -648,23 +648,23 @@ void TCreateQueueSchemaActorV2::OnDescribeSchemeResult(NSchemeShard::TEvSchemeSh if (ev->Get()->GetRecord().GetStatus() != NKikimrScheme::StatusSuccess || pathDescription.TablePartitionsSize() == 0 || !pathDescription.GetTablePartitions(0).GetDatashardId()) { // fail - auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); resp->State = EQueueState::Creating; resp->Error = "Failed to discover leader."; - Send(Sender_, std::move(resp)); + Send(Sender_, std::move(resp)); - PassAway(); + PassAway(); return; } LeaderTabletId_ = pathDescription.GetTablePartitions(0).GetDatashardId(); - if (Cfg().GetQuotingConfig().GetEnableQuoting() && Cfg().GetQuotingConfig().HasKesusQuoterConfig()) { - Step(); - } else { - CommitNewVersion(); - } + if (Cfg().GetQuotingConfig().GetEnableQuoting() && Cfg().GetQuotingConfig().HasKesusQuoterConfig()) { + Step(); + } else { + CommitNewVersion(); + } } void TCreateQueueSchemaActorV2::SendDescribeTable() { @@ -691,33 +691,33 @@ void TCreateQueueSchemaActorV2::HandleTableDescription(TEvTxProxySchemeCache::TE Step(); } -void TCreateQueueSchemaActorV2::AddRPSQuota() { - NKikimrKesus::TEvAddQuoterResource cmd; - auto& res = *cmd.MutableResource(); - res.SetResourcePath(TStringBuilder() << RPS_QUOTA_NAME << "/" << QueuePath_.QueueName); - res.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(Cfg().GetQuotingConfig().GetKesusQuoterConfig().GetDefaultLimits().GetStdSendMessageRate()); - AddQuoterResourceActor_ = RunAddQuoterResource(TStringBuilder() << QueuePath_.GetUserPath() << "/" << QUOTER_KESUS_NAME, cmd, RequestId_); -} - -void TCreateQueueSchemaActorV2::HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev) { +void TCreateQueueSchemaActorV2::AddRPSQuota() { + NKikimrKesus::TEvAddQuoterResource cmd; + auto& res = *cmd.MutableResource(); + res.SetResourcePath(TStringBuilder() << RPS_QUOTA_NAME << "/" << QueuePath_.QueueName); + res.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(Cfg().GetQuotingConfig().GetKesusQuoterConfig().GetDefaultLimits().GetStdSendMessageRate()); + AddQuoterResourceActor_ = RunAddQuoterResource(TStringBuilder() << QueuePath_.GetUserPath() << "/" << QUOTER_KESUS_NAME, cmd, RequestId_); +} + +void TCreateQueueSchemaActorV2::HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev) { AddQuoterResourceActor_ = TActorId(); - auto status = ev->Get()->Record.GetError().GetStatus(); - if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::ALREADY_EXISTS) { - RLOG_SQS_DEBUG("Successfully added quoter resource. Id: " << ev->Get()->Record.GetResourceId()); - CommitNewVersion(); - } else { - RLOG_SQS_WARN("Failed to add quoter resource: " << ev->Get()->Record); - auto resp = MakeErrorResponse(status == Ydb::StatusIds::BAD_REQUEST ? NErrors::VALIDATION_ERROR : NErrors::INTERNAL_FAILURE); - resp->State = EQueueState::Creating; - resp->Error = "Failed to add quoter resource."; - - Send(Sender_, std::move(resp)); - - PassAway(); - return; - } -} - + auto status = ev->Get()->Record.GetError().GetStatus(); + if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::ALREADY_EXISTS) { + RLOG_SQS_DEBUG("Successfully added quoter resource. Id: " << ev->Get()->Record.GetResourceId()); + CommitNewVersion(); + } else { + RLOG_SQS_WARN("Failed to add quoter resource: " << ev->Get()->Record); + auto resp = MakeErrorResponse(status == Ydb::StatusIds::BAD_REQUEST ? NErrors::VALIDATION_ERROR : NErrors::INTERNAL_FAILURE); + resp->State = EQueueState::Creating; + resp->Error = "Failed to add quoter resource."; + + Send(Sender_, std::move(resp)); + + PassAway(); + return; + } +} + static const char* const CommitQueueParamsQuery = R"__( ( (let name (Parameter 'NAME (DataType 'Utf8String))) @@ -725,7 +725,7 @@ static const char* const CommitQueueParamsQuery = R"__( (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) (let id (Parameter 'ID (DataType 'String))) (let fifo (Parameter 'FIFO (DataType 'Bool))) - (let contentBasedDeduplication (Parameter 'CONTENT_BASED_DEDUPLICATION (DataType 'Bool))) + (let contentBasedDeduplication (Parameter 'CONTENT_BASED_DEDUPLICATION (DataType 'Bool))) (let now (Parameter 'NOW (DataType 'Uint64))) (let shards (Parameter 'SHARDS (DataType 'Uint64))) (let partitions (Parameter 'PARTITIONS (DataType 'Uint64))) @@ -737,12 +737,12 @@ static const char* const CommitQueueParamsQuery = R"__( (let delay (Parameter 'DELAY (DataType 'Uint64))) (let visibility (Parameter 'VISIBILITY (DataType 'Uint64))) (let retention (Parameter 'RETENTION (DataType 'Uint64))) - (let receiveMessageWaitTime (Parameter 'RECEIVE_MESSAGE_WAIT_TIME (DataType 'Uint64))) + (let receiveMessageWaitTime (Parameter 'RECEIVE_MESSAGE_WAIT_TIME (DataType 'Uint64))) (let dlqArn (Parameter 'DLQ_TARGET_ARN (DataType 'Utf8String))) (let dlqName (Parameter 'DLQ_TARGET_NAME (DataType 'Utf8String))) (let maxReceiveCount (Parameter 'MAX_RECEIVE_COUNT (DataType 'Uint64))) - (let defaultMaxQueuesCount (Parameter 'DEFAULT_MAX_QUEUES_COUNT (DataType 'Uint64))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let defaultMaxQueuesCount (Parameter 'DEFAULT_MAX_QUEUES_COUNT (DataType 'Uint64))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) (let attrsTable '%1$s/Attributes) (let stateTable '%1$s/State) @@ -750,24 +750,24 @@ static const char* const CommitQueueParamsQuery = R"__( (let queuesTable '%2$s/.Queues) (let eventsTable '%2$s/.Events) - (let maxQueuesCountSettingRow '( - '('Account userName) - '('Name (Utf8String '"MaxQueuesCount")))) - (let maxQueuesCountSettingSelect '( - 'Value)) - (let maxQueuesCountSettingRead (SelectRow settingsTable maxQueuesCountSettingRow maxQueuesCountSettingSelect)) - (let maxQueuesCountSetting (Coalesce (If (Exists maxQueuesCountSettingRead) (Cast (Member maxQueuesCountSettingRead 'Value) 'Uint64) defaultMaxQueuesCount) (Uint64 '0))) - - (let queuesRange '( - '('Account userName userName) + (let maxQueuesCountSettingRow '( + '('Account userName) + '('Name (Utf8String '"MaxQueuesCount")))) + (let maxQueuesCountSettingSelect '( + 'Value)) + (let maxQueuesCountSettingRead (SelectRow settingsTable maxQueuesCountSettingRow maxQueuesCountSettingSelect)) + (let maxQueuesCountSetting (Coalesce (If (Exists maxQueuesCountSettingRead) (Cast (Member maxQueuesCountSettingRead 'Value) 'Uint64) defaultMaxQueuesCount) (Uint64 '0))) + + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queues - (Member (SelectRange queuesTable queuesRange '('QueueName 'CustomQueueName 'Version 'FolderId 'QueueState) '()) 'List)) + (Member (SelectRange queuesTable queuesRange '('QueueName 'CustomQueueName 'Version 'FolderId 'QueueState) '()) 'List)) (let overLimit - (LessOrEqual maxQueuesCountSetting (Length queues))) + (LessOrEqual maxQueuesCountSetting (Length queues))) - (let queuesRow '( - '('Account userName) + (let queuesRow '( + '('Account userName) '('QueueName name))) (let eventsRow '( @@ -775,14 +775,14 @@ static const char* const CommitQueueParamsQuery = R"__( '('QueueName name) '('EventType (Uint64 '1)))) - (let queuesSelect '( + (let queuesSelect '( 'QueueState 'QueueId 'FifoQueue 'Shards 'Partitions 'Version)) - (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) + (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) (let existingQueuesWithSameNameAndFolderId (If (Equal (Utf8String '"") customName) @@ -806,8 +806,8 @@ static const char* const CommitQueueParamsQuery = R"__( (Coalesce (Or (Or - (Equal (Uint64 '1) (Member queuesRead 'QueueState)) - (Equal (Uint64 '3) (Member queuesRead 'QueueState)) + (Equal (Uint64 '1) (Member queuesRead 'QueueState)) + (Equal (Uint64 '3) (Member queuesRead 'QueueState)) ) (NotEqual (Utf8String '"") existingResourceId) ) @@ -816,12 +816,12 @@ static const char* const CommitQueueParamsQuery = R"__( (let currentVersion (Coalesce (Member (ToOptional existingQueuesWithSameNameAndFolderId) 'Version) - (Member queuesRead 'Version) + (Member queuesRead 'Version) (Uint64 '0) ) ) - (let queuesUpdate '( + (let queuesUpdate '( '('QueueId id) '('CustomQueueName customName) '('FolderId folderId) @@ -844,12 +844,12 @@ static const char* const CommitQueueParamsQuery = R"__( (let attrRow '(%3$s)) (let attrUpdate '( - '('ContentBasedDeduplication contentBasedDeduplication) + '('ContentBasedDeduplication contentBasedDeduplication) '('DelaySeconds delay) '('FifoQueue fifo) '('MaximumMessageSize maxSize) '('MessageRetentionPeriod retention) - '('ReceiveMessageWaitTime receiveMessageWaitTime) + '('ReceiveMessageWaitTime receiveMessageWaitTime) '('MaxReceiveCount maxReceiveCount) '('DlqArn dlqArn) '('DlqName dlqName) @@ -881,9 +881,9 @@ static const char* const CommitQueueParamsQuery = R"__( (SetResult 'resourceId existingResourceId) (SetResult 'commited willCommit)) - (ListIf queueExists (SetResult 'meta queuesRead)) + (ListIf queueExists (SetResult 'meta queuesRead)) - (ListIf willCommit (UpdateRow queuesTable queuesRow queuesUpdate)) + (ListIf willCommit (UpdateRow queuesTable queuesRow queuesUpdate)) (ListIf willCommit (UpdateRow eventsTable eventsRow eventsUpdate)) (ListIf willCommit (UpdateRow attrsTable attrRow attrUpdate)) @@ -903,7 +903,7 @@ static const char* const CommitQueueParamsQuery = R"__( '('RetentionBoundary (Uint64 '0)) '('ReadOffset (Uint64 '0)) '('WriteOffset (Uint64 '0)) - '('CleanupVersion (Uint64 '0)))) + '('CleanupVersion (Uint64 '0)))) (return (UpdateRow stateTable row update))))))) )) ) @@ -945,7 +945,7 @@ TString GetQueueIdAndShardHashesList(ui64 version, ui32 shards) { return hashes; } -void TCreateQueueSchemaActorV2::CommitNewVersion() { +void TCreateQueueSchemaActorV2::CommitNewVersion() { Become(&TCreateQueueSchemaActorV2::FinalizeAndCommit); TString queuePath; @@ -988,25 +988,25 @@ void TCreateQueueSchemaActorV2::CommitNewVersion() { .Uint64("RECEIVE_MESSAGE_WAIT_TIME", SecondsToMs(*ValidatedAttributes_.ReceiveMessageWaitTimeSeconds)) .Utf8("DLQ_TARGET_ARN", ValidatedAttributes_.RedrivePolicy.TargetArn ? *ValidatedAttributes_.RedrivePolicy.TargetArn : "") .Utf8("DLQ_TARGET_NAME", ValidatedAttributes_.RedrivePolicy.TargetQueueName ? *ValidatedAttributes_.RedrivePolicy.TargetQueueName : "") - .Uint64("MAX_RECEIVE_COUNT", ValidatedAttributes_.RedrivePolicy.MaxReceiveCount ? *ValidatedAttributes_.RedrivePolicy.MaxReceiveCount : 0) - .Uint64("DEFAULT_MAX_QUEUES_COUNT", Cfg().GetAccountSettingsDefaults().GetMaxQueuesCount()) - .Utf8("USER_NAME", QueuePath_.UserName); + .Uint64("MAX_RECEIVE_COUNT", ValidatedAttributes_.RedrivePolicy.MaxReceiveCount ? *ValidatedAttributes_.RedrivePolicy.MaxReceiveCount : 0) + .Uint64("DEFAULT_MAX_QUEUES_COUNT", Cfg().GetAccountSettingsDefaults().GetMaxQueuesCount()) + .Utf8("USER_NAME", QueuePath_.UserName); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); } -STATEFN(TCreateQueueSchemaActorV2::FinalizeAndCommit) { +STATEFN(TCreateQueueSchemaActorV2::FinalizeAndCommit) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, OnCommit); - cFunc(TEvPoisonPill::EventType, PassAway); + hFunc(TSqsEvents::TEvExecuted, OnCommit); + cFunc(TEvPoisonPill::EventType, PassAway); } } -void TCreateQueueSchemaActorV2::OnCommit(TSqsEvents::TEvExecuted::TPtr& ev) { +void TCreateQueueSchemaActorV2::OnCommit(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; const auto status = record.GetStatus(); - auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); @@ -1014,29 +1014,29 @@ void TCreateQueueSchemaActorV2::OnCommit(TSqsEvents::TEvExecuted::TPtr& ev) { // a new born queue is here! resp->QueueId = GeneratedQueueId_; resp->Success = true; - resp->ErrorClass = nullptr; + resp->ErrorClass = nullptr; } else { // something is off if (bool(val["overLimit"])) { - resp->ErrorClass = &NErrors::OVER_LIMIT; - resp->Error = "Too many queues."; + resp->ErrorClass = &NErrors::OVER_LIMIT; + resp->Error = "Too many queues."; } else if (bool(val["exists"])) { if (IsCloudMode_) { ExistingQueueResourceId_ = TString(val["resourceId"]); } const ui64 currentVersion = ui64(val["version"]); - MatchQueueAttributes(currentVersion); + MatchQueueAttributes(currentVersion); return; } else { Y_VERIFY(false); // unreachable } } } else { - resp->Error = "Failed to commit new queue version."; + resp->Error = "Failed to commit new queue version."; } - Send(Sender_, std::move(resp)); - PassAway(); + Send(Sender_, std::move(resp)); + PassAway(); } static const char* const MatchQueueAttributesQuery = R"__( @@ -1052,21 +1052,21 @@ static const char* const MatchQueueAttributesQuery = R"__( (let retention (Parameter 'RETENTION (DataType 'Uint64))) (let dlqName (Parameter 'DLQ_TARGET_NAME (DataType 'Utf8String))) (let maxReceiveCount (Parameter 'MAX_RECEIVE_COUNT (DataType 'Uint64))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) (let attrsTable '%1$s/%2$s/Attributes) - (let queuesTable '%3$s/.Queues) + (let queuesTable '%3$s/.Queues) - (let queuesRange '( - '('Account userName userName) + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queues - (Member (SelectRange queuesTable queuesRange '('QueueState) '()) 'List)) + (Member (SelectRange queuesTable queuesRange '('QueueState) '()) 'List)) - (let queuesRow '( - '('Account userName) + (let queuesRow '( + '('Account userName) '('QueueName name))) - (let queuesSelect '( + (let queuesSelect '( 'QueueState 'QueueId 'FifoQueue @@ -1074,19 +1074,19 @@ static const char* const MatchQueueAttributesQuery = R"__( 'Partitions 'DlqName 'Version)) - (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) + (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) (let queueExists (Coalesce (Or - (Equal (Uint64 '1) (Member queuesRead 'QueueState)) - (Equal (Uint64 '3) (Member queuesRead 'QueueState)) + (Equal (Uint64 '1) (Member queuesRead 'QueueState)) + (Equal (Uint64 '3) (Member queuesRead 'QueueState)) ) (Bool 'false))) (let currentVersion (Coalesce - (Member queuesRead 'Version) + (Member queuesRead 'Version) (Uint64 '0) ) ) @@ -1095,10 +1095,10 @@ static const char* const MatchQueueAttributesQuery = R"__( (Coalesce (And (And - (And (Equal (Member queuesRead 'Shards) shards) - (Equal (Member queuesRead 'Partitions) partitions)) - (Equal (Member queuesRead 'FifoQueue) fifo)) - (Equal (Coalesce (Member queuesRead 'DlqName) (Utf8String '"")) dlqName)) + (And (Equal (Member queuesRead 'Shards) shards) + (Equal (Member queuesRead 'Partitions) partitions)) + (Equal (Member queuesRead 'FifoQueue) fifo)) + (Equal (Coalesce (Member queuesRead 'DlqName) (Utf8String '"")) dlqName)) (Bool 'true))) (let attrRow '( @@ -1136,7 +1136,7 @@ static const char* const MatchQueueAttributesQuery = R"__( (let existingQueueId (Coalesce - (Member queuesRead 'QueueId) + (Member queuesRead 'QueueId) (String '""))) (return (AsList @@ -1147,7 +1147,7 @@ static const char* const MatchQueueAttributesQuery = R"__( ) )__"; -void TCreateQueueSchemaActorV2::MatchQueueAttributes(const ui64 currentVersion) { +void TCreateQueueSchemaActorV2::MatchQueueAttributes(const ui64 currentVersion) { Become(&TCreateQueueSchemaActorV2::MatchAttributes); TString versionedQueuePath = IsCloudMode_ ? ExistingQueueResourceId_ : QueuePath_.QueueName; @@ -1156,7 +1156,7 @@ void TCreateQueueSchemaActorV2::MatchQueueAttributes(const ui64 currentVersion) versionedQueuePath = TString::Join(versionedQueuePath, "/v", ToString(currentVersion)); } auto ev = MakeExecuteEvent(Sprintf( - MatchQueueAttributesQuery, QueuePath_.GetUserPath().c_str(), versionedQueuePath.c_str(), Cfg().GetRoot().c_str() + MatchQueueAttributesQuery, QueuePath_.GetUserPath().c_str(), versionedQueuePath.c_str(), Cfg().GetRoot().c_str() )); auto* trans = ev->Record.MutableTransaction()->MutableMiniKQLTransaction(); TParameters(trans->MutableParams()->MutableProto()) @@ -1170,90 +1170,90 @@ void TCreateQueueSchemaActorV2::MatchQueueAttributes(const ui64 currentVersion) .Uint64("VISIBILITY", SecondsToMs(*ValidatedAttributes_.VisibilityTimeout)) .Uint64("RETENTION", SecondsToMs(*ValidatedAttributes_.MessageRetentionPeriod)) .Utf8("DLQ_TARGET_NAME", ValidatedAttributes_.RedrivePolicy.TargetQueueName ? *ValidatedAttributes_.RedrivePolicy.TargetQueueName : "") - .Uint64("MAX_RECEIVE_COUNT", ValidatedAttributes_.RedrivePolicy.MaxReceiveCount ? *ValidatedAttributes_.RedrivePolicy.MaxReceiveCount : 0) - .Utf8("USER_NAME", QueuePath_.UserName); + .Uint64("MAX_RECEIVE_COUNT", ValidatedAttributes_.RedrivePolicy.MaxReceiveCount ? *ValidatedAttributes_.RedrivePolicy.MaxReceiveCount : 0) + .Utf8("USER_NAME", QueuePath_.UserName); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); } -STATEFN(TCreateQueueSchemaActorV2::MatchAttributes) { +STATEFN(TCreateQueueSchemaActorV2::MatchAttributes) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, OnAttributesMatch); - cFunc(TEvPoisonPill::EventType, PassAway); + hFunc(TSqsEvents::TEvExecuted, OnAttributesMatch); + cFunc(TEvPoisonPill::EventType, PassAway); } } -void TCreateQueueSchemaActorV2::OnAttributesMatch(TSqsEvents::TEvExecuted::TPtr& ev) { +void TCreateQueueSchemaActorV2::OnAttributesMatch(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; const auto status = record.GetStatus(); - auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); + auto resp = MakeErrorResponse(NErrors::INTERNAL_FAILURE); if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); if (bool(val["exists"])) { resp->AlreadyExists = true; - resp->ErrorClass = nullptr; + resp->ErrorClass = nullptr; resp->ExistingQueueResourceId = IsCloudMode_ ? ExistingQueueResourceId_ : QueuePath_.QueueName; const bool isSame = bool(val["isSame"]); if (isSame || !EnableQueueAttributesValidation_) { resp->Success = true; - resp->ErrorClass = nullptr; + resp->ErrorClass = nullptr; resp->QueueId = TString(val["id"]); if (!isSame) { - RLOG_SQS_WARN("Queue attributes do not match for account " << QueuePath_.UserName << " and queue name " << QueuePath_.QueueName); + RLOG_SQS_WARN("Queue attributes do not match for account " << QueuePath_.UserName << " and queue name " << QueuePath_.QueueName); } } else { - resp->Error = "queue with specified name already exists and has different attributes."; - resp->ErrorClass = &NErrors::VALIDATION_ERROR; + resp->Error = "queue with specified name already exists and has different attributes."; + resp->ErrorClass = &NErrors::VALIDATION_ERROR; } if (CurrentCreationStep_ == ECreateComponentsStep::DiscoverLeaderTabletId) { // call the special version of cleanup actor - RLOG_SQS_WARN("Removing redundant queue version: " << Version_ << " for queue " << + RLOG_SQS_WARN("Removing redundant queue version: " << Version_ << " for queue " << QueuePath_.GetQueuePath() << ". Shards: " << RequiredShardsCount_ << " IsFifo: " << IsFifo_); - Register(new TDeleteQueueSchemaActorV2(QueuePath_, SelfId(), RequestId_, UserCounters_, + Register(new TDeleteQueueSchemaActorV2(QueuePath_, SelfId(), RequestId_, UserCounters_, Version_, RequiredShardsCount_, IsFifo_)); } } else { - resp->Error = "Queue was removed recently."; - resp->ErrorClass = &NErrors::QUEUE_DELETED_RECENTLY; - resp->State = EQueueState::Deleting; + resp->Error = "Queue was removed recently."; + resp->ErrorClass = &NErrors::QUEUE_DELETED_RECENTLY; + resp->State = EQueueState::Deleting; } } else { - resp->Error = "Failed to compare queue attributes."; + resp->Error = "Failed to compare queue attributes."; } - Send(Sender_, std::move(resp)); - PassAway(); + Send(Sender_, std::move(resp)); + PassAway(); } -void TCreateQueueSchemaActorV2::PassAway() { - if (AddQuoterResourceActor_) { - Send(AddQuoterResourceActor_, new TEvPoisonPill()); +void TCreateQueueSchemaActorV2::PassAway() { + if (AddQuoterResourceActor_) { + Send(AddQuoterResourceActor_, new TEvPoisonPill()); AddQuoterResourceActor_ = TActorId(); - } - TActorBootstrapped<TCreateQueueSchemaActorV2>::PassAway(); -} - + } + TActorBootstrapped<TCreateQueueSchemaActorV2>::PassAway(); +} + TDeleteQueueSchemaActorV2::TDeleteQueueSchemaActorV2(const TQueuePath& path, const TActorId& sender, const TString& requestId, - TIntrusivePtr<TUserCounters> userCounters) + TIntrusivePtr<TUserCounters> userCounters) : QueuePath_(path) , Sender_(sender) , SI_(0) , RequestId_(requestId) - , UserCounters_(std::move(userCounters)) + , UserCounters_(std::move(userCounters)) { } TDeleteQueueSchemaActorV2::TDeleteQueueSchemaActorV2(const TQueuePath& path, const TActorId& sender, const TString& requestId, - TIntrusivePtr<TUserCounters> userCounters, + TIntrusivePtr<TUserCounters> userCounters, const ui64 advisedQueueVersion, const ui64 advisedShardCount, const bool advisedIsFifoFlag) @@ -1261,7 +1261,7 @@ TDeleteQueueSchemaActorV2::TDeleteQueueSchemaActorV2(const TQueuePath& path, , Sender_(sender) , SI_(static_cast<ui32>(EDeleting::RemoveTables)) , RequestId_(requestId) - , UserCounters_(std::move(userCounters)) + , UserCounters_(std::move(userCounters)) { Y_VERIFY(advisedQueueVersion > 0); @@ -1270,8 +1270,8 @@ TDeleteQueueSchemaActorV2::TDeleteQueueSchemaActorV2(const TQueuePath& path, PrepareCleanupPlan(advisedIsFifoFlag, advisedShardCount); } -void TDeleteQueueSchemaActorV2::Bootstrap() { - NextAction(); +void TDeleteQueueSchemaActorV2::Bootstrap() { + NextAction(); Become(&TThis::StateFunc); } @@ -1298,21 +1298,21 @@ static TString GetVersionedQueueDir(const TString& baseQueueDir, const ui64 vers static const char* EraseQueueRecordQuery = R"__( ( (let name (Parameter 'NAME (DataType 'Utf8String))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) (let now (Parameter 'NOW (DataType 'Uint64))) - (let queuesTable '%2$s/.Queues) + (let queuesTable '%2$s/.Queues) (let eventsTable '%2$s/.Events) - (let queuesRow '( - '('Account userName) + (let queuesRow '( + '('Account userName) '('QueueName name))) (let eventsRow '( '('Account userName) '('QueueName name) '('EventType (Uint64 '0)))) - (let queuesSelect '( + (let queuesSelect '( 'QueueState 'Version 'FifoQueue @@ -1320,11 +1320,11 @@ static const char* EraseQueueRecordQuery = R"__( 'CustomQueueName 'CreatedTimestamp 'FolderId)) - (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) + (let queuesRead (SelectRow queuesTable queuesRow queuesSelect)) (let currentVersion (Coalesce - (Member queuesRead 'Version) + (Member queuesRead 'Version) (Uint64 '0) ) ) @@ -1354,8 +1354,8 @@ static const char* EraseQueueRecordQuery = R"__( (let queueExists (Coalesce (Or - (Equal (Uint64 '1) (Member queuesRead 'QueueState)) - (Equal (Uint64 '3) (Member queuesRead 'QueueState)) + (Equal (Uint64 '1) (Member queuesRead 'QueueState)) + (Equal (Uint64 '3) (Member queuesRead 'QueueState)) ) (Bool 'false))) @@ -1367,66 +1367,66 @@ static const char* EraseQueueRecordQuery = R"__( (return (AsList (SetResult 'exists queueExists) (SetResult 'version currentVersion) - (SetResult 'fields queuesRead) + (SetResult 'fields queuesRead) (If queueExists (UpdateRow eventsTable eventsRow eventsUpdate) (Void)) - (If queueExists (EraseRow queuesTable queuesRow) (Void)))) + (If queueExists (EraseRow queuesTable queuesRow) (Void)))) ) )__"; -void TDeleteQueueSchemaActorV2::NextAction() { +void TDeleteQueueSchemaActorV2::NextAction() { switch (EDeleting(SI_)) { case EDeleting::EraseQueueRecord: { auto ev = MakeExecuteEvent(Sprintf(EraseQueueRecordQuery, QueuePath_.GetUserPath().c_str(), Cfg().GetRoot().c_str())); auto* trans = ev->Record.MutableTransaction()->MutableMiniKQLTransaction(); auto nowMs = TInstant::Now().MilliSeconds(); TParameters(trans->MutableParams()->MutableProto()) - .Utf8("NAME", QueuePath_.QueueName) + .Utf8("NAME", QueuePath_.QueueName) .Utf8("USER_NAME", QueuePath_.UserName) .Uint64("NOW", nowMs); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), false, QueuePath_, GetTransactionCounters(UserCounters_))); break; } case EDeleting::RemoveTables: { - Y_VERIFY(!Tables_.empty()); + Y_VERIFY(!Tables_.empty()); - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeDeleteTableEvent(GetVersionedQueueDir(QueuePath_, Version_), Tables_.back()), false, QueuePath_, GetTransactionCounters(UserCounters_)) + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeDeleteTableEvent(GetVersionedQueueDir(QueuePath_, Version_), Tables_.back()), false, QueuePath_, GetTransactionCounters(UserCounters_)) ); break; } case EDeleting::RemoveShards: { - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeRemoveDirectoryEvent(GetVersionedQueueDir(QueuePath_, Version_), ToString(Shards_.back())), false, QueuePath_, GetTransactionCounters(UserCounters_)) + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeRemoveDirectoryEvent(GetVersionedQueueDir(QueuePath_, Version_), ToString(Shards_.back())), false, QueuePath_, GetTransactionCounters(UserCounters_)) ); break; } case EDeleting::RemoveQueueVersionDirectory: { - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeRemoveDirectoryEvent(QueuePath_, TString::Join("v", ToString(Version_))), false, QueuePath_, GetTransactionCounters(UserCounters_)) + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeRemoveDirectoryEvent(QueuePath_, TString::Join("v", ToString(Version_))), false, QueuePath_, GetTransactionCounters(UserCounters_)) ); break; } case EDeleting::RemoveQueueDirectory: { // this may silently fail for versioned queues - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeRemoveDirectoryEvent(QueuePath_.GetUserPath(), QueuePath_.QueueName), false, QueuePath_, GetTransactionCounters(UserCounters_)) + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeRemoveDirectoryEvent(QueuePath_.GetUserPath(), QueuePath_.QueueName), false, QueuePath_, GetTransactionCounters(UserCounters_)) ); break; } - case EDeleting::DeleteQuoterResource: { - DeleteRPSQuota(); - break; - } + case EDeleting::DeleteQuoterResource: { + DeleteRPSQuota(); + break; + } case EDeleting::Finish: { - Send(Sender_, MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, true)); - PassAway(); + Send(Sender_, MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, true)); + PassAway(); break; } } } -void TDeleteQueueSchemaActorV2::DoSuccessOperation() { +void TDeleteQueueSchemaActorV2::DoSuccessOperation() { if (EDeleting(SI_) == EDeleting::RemoveTables) { Tables_.pop_back(); @@ -1445,23 +1445,23 @@ void TDeleteQueueSchemaActorV2::DoSuccessOperation() { } } else { SI_++; - if ((!Cfg().GetQuotingConfig().GetEnableQuoting() || !Cfg().GetQuotingConfig().HasKesusQuoterConfig()) && EDeleting(SI_) == EDeleting::DeleteQuoterResource) { - SI_++; - } + if ((!Cfg().GetQuotingConfig().GetEnableQuoting() || !Cfg().GetQuotingConfig().HasKesusQuoterConfig()) && EDeleting(SI_) == EDeleting::DeleteQuoterResource) { + SI_++; + } } - NextAction(); + NextAction(); } -void TDeleteQueueSchemaActorV2::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { +void TDeleteQueueSchemaActorV2::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; - if (IsGoodStatusCode(record.GetStatus())) { + if (IsGoodStatusCode(record.GetStatus())) { if (EDeleting(SI_) == EDeleting::EraseQueueRecord) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); if (!bool(val["exists"])) { - Send(Sender_, + Send(Sender_, MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, false, "Queue does not exist.")); - PassAway(); + PassAway(); return; } else { Version_ = ui64(val["version"]); @@ -1470,50 +1470,50 @@ void TDeleteQueueSchemaActorV2::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev } } - DoSuccessOperation(); + DoSuccessOperation(); } else { - RLOG_SQS_WARN("request execution error: " << record); - - if (EDeleting(SI_) == EDeleting::EraseQueueRecord) { - Send(Sender_, - MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, false, "Failed to erase queue record.")); - PassAway(); - return; - } - + RLOG_SQS_WARN("request execution error: " << record); + + if (EDeleting(SI_) == EDeleting::EraseQueueRecord) { + Send(Sender_, + MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, false, "Failed to erase queue record.")); + PassAway(); + return; + } + // we don't really care if some components are already deleted - DoSuccessOperation(); + DoSuccessOperation(); } } -void TDeleteQueueSchemaActorV2::DeleteRPSQuota() { - NKikimrKesus::TEvDeleteQuoterResource cmd; - cmd.SetResourcePath(TStringBuilder() << RPS_QUOTA_NAME << "/" << QueuePath_.QueueName); - DeleteQuoterResourceActor_ = RunDeleteQuoterResource(TStringBuilder() << QueuePath_.GetUserPath() << "/" << QUOTER_KESUS_NAME, cmd, RequestId_); -} - -void TDeleteQueueSchemaActorV2::HandleDeleteQuoterResource(NKesus::TEvKesus::TEvDeleteQuoterResourceResult::TPtr& ev) { +void TDeleteQueueSchemaActorV2::DeleteRPSQuota() { + NKikimrKesus::TEvDeleteQuoterResource cmd; + cmd.SetResourcePath(TStringBuilder() << RPS_QUOTA_NAME << "/" << QueuePath_.QueueName); + DeleteQuoterResourceActor_ = RunDeleteQuoterResource(TStringBuilder() << QueuePath_.GetUserPath() << "/" << QUOTER_KESUS_NAME, cmd, RequestId_); +} + +void TDeleteQueueSchemaActorV2::HandleDeleteQuoterResource(NKesus::TEvKesus::TEvDeleteQuoterResourceResult::TPtr& ev) { DeleteQuoterResourceActor_ = TActorId(); - auto status = ev->Get()->Record.GetError().GetStatus(); - if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::NOT_FOUND) { - RLOG_SQS_DEBUG("Successfully deleted quoter resource"); - - DoSuccessOperation(); - } else { - RLOG_SQS_WARN("Failed to delete quoter resource: " << ev->Get()->Record); - - Send(Sender_, - MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, false, "Failed to delete RPS quoter resource.")); - PassAway(); - } -} - -void TDeleteQueueSchemaActorV2::PassAway() { - if (DeleteQuoterResourceActor_) { - Send(DeleteQuoterResourceActor_, new TEvPoisonPill()); + auto status = ev->Get()->Record.GetError().GetStatus(); + if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::NOT_FOUND) { + RLOG_SQS_DEBUG("Successfully deleted quoter resource"); + + DoSuccessOperation(); + } else { + RLOG_SQS_WARN("Failed to delete quoter resource: " << ev->Get()->Record); + + Send(Sender_, + MakeHolder<TSqsEvents::TEvQueueDeleted>(QueuePath_, false, "Failed to delete RPS quoter resource.")); + PassAway(); + } +} + +void TDeleteQueueSchemaActorV2::PassAway() { + if (DeleteQuoterResourceActor_) { + Send(DeleteQuoterResourceActor_, new TEvPoisonPill()); DeleteQuoterResourceActor_ = TActorId(); - } - TActorBootstrapped<TDeleteQueueSchemaActorV2>::PassAway(); -} - -} // namespace NKikimr::NSQS + } + TActorBootstrapped<TDeleteQueueSchemaActorV2>::PassAway(); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/queue_schema.h b/ydb/core/ymq/actor/queue_schema.h index 793ba1c06ad..a5737f64232 100644 --- a/ydb/core/ymq/actor/queue_schema.h +++ b/ydb/core/ymq/actor/queue_schema.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include "schema.h" #include <ydb/core/base/quoter.h> @@ -10,10 +10,10 @@ #include <util/generic/maybe.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TCreateQueueSchemaActorV2 - : public TActorBootstrapped<TCreateQueueSchemaActorV2> + : public TActorBootstrapped<TCreateQueueSchemaActorV2> { public: TCreateQueueSchemaActorV2(const TQueuePath& path, @@ -24,41 +24,41 @@ public: const TString& folderId, const bool isCloudMode, const bool enableQueueAttributesValidation, - TIntrusivePtr<TUserCounters> userCounters, - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> quoterResources); + TIntrusivePtr<TUserCounters> userCounters, + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> quoterResources); ~TCreateQueueSchemaActorV2(); void InitMissingQueueAttributes(const NKikimrConfig::TSqsConfig& config); - void Bootstrap(); + void Bootstrap(); - void RequestQueueParams(); + void RequestQueueParams(); - STATEFN(Preamble); + STATEFN(Preamble); - void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev); + void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev); - void OnReadQueueParams(TSqsEvents::TEvExecuted::TPtr& ev); + void OnReadQueueParams(TSqsEvents::TEvExecuted::TPtr& ev); - void RunAtomicCounterIncrement(); - void OnAtomicCounterIncrement(TSqsEvents::TEvAtomicCounterIncrementResult::TPtr& ev); - - void RequestCreateQueueQuota(); - void OnCreateQueueQuota(TEvQuota::TEvClearance::TPtr& ev); + void RunAtomicCounterIncrement(); + void OnAtomicCounterIncrement(TSqsEvents::TEvAtomicCounterIncrementResult::TPtr& ev); + void RequestCreateQueueQuota(); + void OnCreateQueueQuota(TEvQuota::TEvClearance::TPtr& ev); + void RequestTablesFormatSettings(const TString& accountName); - void RegisterMakeDirActor(const TString& workingDir, const TString& dirName); + void RegisterMakeDirActor(const TString& workingDir, const TString& dirName); void RequestLeaderTabletId(); - void CreateComponents(); + void CreateComponents(); - STATEFN(CreateComponentsState); + STATEFN(CreateComponentsState); - void Step(); + void Step(); - void OnExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void OnExecuted(TSqsEvents::TEvExecuted::TPtr& ev); void OnDescribeSchemeResult(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev); @@ -68,24 +68,24 @@ public: template <typename T> T GetAttribute(const TStringBuf name, const T& defaultValue) const; - void CommitNewVersion(); - - STATEFN(FinalizeAndCommit); - - void OnCommit(TSqsEvents::TEvExecuted::TPtr& ev); - - void MatchQueueAttributes(const ui64 currentVersion); + void CommitNewVersion(); - STATEFN(MatchAttributes); + STATEFN(FinalizeAndCommit); - void OnAttributesMatch(TSqsEvents::TEvExecuted::TPtr& ev); + void OnCommit(TSqsEvents::TEvExecuted::TPtr& ev); - void AddRPSQuota(); + void MatchQueueAttributes(const ui64 currentVersion); - void HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev); + STATEFN(MatchAttributes); - void PassAway() override; + void OnAttributesMatch(TSqsEvents::TEvExecuted::TPtr& ev); + void AddRPSQuota(); + + void HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev); + + void PassAway() override; + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; } @@ -99,7 +99,7 @@ private: MakeTables, DescribeTableForSetSchemeShardId, DiscoverLeaderTabletId, - AddQuoterResource, + AddQuoterResource, }; const TQueuePath QueuePath_; @@ -116,13 +116,13 @@ private: bool EnableQueueAttributesValidation_ = true; ui32 TablesFormat_ = 0; - ui64 Version_ = 0; + ui64 Version_ = 0; TString VersionName_; TString VersionedQueueFullPath_; TString ExistingQueueResourceId_; - TIntrusivePtr<TUserCounters> UserCounters_; - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; + TIntrusivePtr<TUserCounters> UserCounters_; + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; ui64 RequiredShardsCount_ = 0; ui64 CreatedShardsCount_ = 0; TVector<TTable> RequiredTables_; @@ -135,28 +135,28 @@ private: std::pair<ui64, ui64> TableWithLeaderPathId_ = std::make_pair(0, 0); // (scheme shard, path id) are required for describing table ECreateComponentsStep CurrentCreationStep_ = ECreateComponentsStep::GetTablesFormatSetting; - + TActorId AddQuoterResourceActor_; }; class TDeleteQueueSchemaActorV2 - : public TActorBootstrapped<TDeleteQueueSchemaActorV2> + : public TActorBootstrapped<TDeleteQueueSchemaActorV2> { public: TDeleteQueueSchemaActorV2(const TQueuePath& path, const TActorId& sender, const TString& requestId, - TIntrusivePtr<TUserCounters> userCounters); + TIntrusivePtr<TUserCounters> userCounters); TDeleteQueueSchemaActorV2(const TQueuePath& path, const TActorId& sender, const TString& requestId, - TIntrusivePtr<TUserCounters> userCounters, + TIntrusivePtr<TUserCounters> userCounters, const ui64 advisedQueueVersion, const ui64 advisedShardCount, const bool advisedIsFifoFlag); - void Bootstrap(); + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; @@ -165,24 +165,24 @@ public: private: void PrepareCleanupPlan(const bool isFifo, const ui64 shardCount); - void NextAction(); - - void DoSuccessOperation(); + void NextAction(); - void DeleteRPSQuota(); + void DoSuccessOperation(); + void DeleteRPSQuota(); + private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(NKesus::TEvKesus::TEvDeleteQuoterResourceResult, HandleDeleteQuoterResource); - cFunc(TEvPoisonPill::EventType, PassAway); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(NKesus::TEvKesus::TEvDeleteQuoterResourceResult, HandleDeleteQuoterResource); + cFunc(TEvPoisonPill::EventType, PassAway); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandleDeleteQuoterResource(NKesus::TEvKesus::TEvDeleteQuoterResourceResult::TPtr& ev); - void PassAway() override; + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandleDeleteQuoterResource(NKesus::TEvKesus::TEvDeleteQuoterResourceResult::TPtr& ev); + void PassAway() override; private: enum class EDeleting : ui32 { @@ -191,7 +191,7 @@ private: RemoveShards, RemoveQueueVersionDirectory, RemoveQueueDirectory, - DeleteQuoterResource, + DeleteQuoterResource, Finish, }; @@ -201,9 +201,9 @@ private: TVector<int> Shards_; ui32 SI_; const TString RequestId_; - TIntrusivePtr<TUserCounters> UserCounters_; + TIntrusivePtr<TUserCounters> UserCounters_; ui64 Version_ = 0; TActorId DeleteQuoterResourceActor_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/queues_list_reader.cpp b/ydb/core/ymq/actor/queues_list_reader.cpp index 37b8b734263..6cbdf93f9dc 100644 --- a/ydb/core/ymq/actor/queues_list_reader.cpp +++ b/ydb/core/ymq/actor/queues_list_reader.cpp @@ -1,150 +1,150 @@ -#include "queues_list_reader.h" -#include "cfg.h" -#include "executor.h" -#include "events.h" - -namespace NKikimr::NSQS { - -TQueuesListReader::TQueuesListReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters) - : TActor(&TQueuesListReader::StateFunc) - , TransactionCounters(transactionCounters) -{ -} - -TQueuesListReader::~TQueuesListReader() { -} - -STATEFN(TQueuesListReader::StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvReadQueuesList, HandleReadQueuesList); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - default: - LOG_SQS_ERROR("Unknown type of event came to SQS user settings reader actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); - } -} - -void TQueuesListReader::HandleReadQueuesList(TSqsEvents::TEvReadQueuesList::TPtr& ev) { +#include "queues_list_reader.h" +#include "cfg.h" +#include "executor.h" +#include "events.h" + +namespace NKikimr::NSQS { + +TQueuesListReader::TQueuesListReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters) + : TActor(&TQueuesListReader::StateFunc) + , TransactionCounters(transactionCounters) +{ +} + +TQueuesListReader::~TQueuesListReader() { +} + +STATEFN(TQueuesListReader::StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TSqsEvents::TEvReadQueuesList, HandleReadQueuesList); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + default: + LOG_SQS_ERROR("Unknown type of event came to SQS user settings reader actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); + } +} + +void TQueuesListReader::HandleReadQueuesList(TSqsEvents::TEvReadQueuesList::TPtr& ev) { Recipients.insert(ev->Sender); - if (!ListingQueues) { - ListingQueues = true; - Result = MakeHolder<TSqsEvents::TEvQueuesList>(); - CurrentUser = TString(); - CurrentQueue = TString(); - if (CompiledQuery) { - NextRequest(); - } else { - CompileRequest(); - } - } -} - -void TQueuesListReader::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); -} - -void TQueuesListReader::CompileRequest() { - TExecutorBuilder(SelfId(), "") - .Mode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) - .QueryId(GET_QUEUES_LIST_ID) - .RetryOnTimeout() - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnRequestCompiled(ev); }) - .Counters(TransactionCounters) - .Start(); -} - -void TQueuesListReader::OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record) { - LOG_SQS_TRACE("Handle compiled get queues list query: " << record); - if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - CompiledQuery = record.GetMiniKQLCompileResults().GetCompiledProgram(); - NextRequest(); - } else { - LOG_SQS_WARN("Get queues list request compilation failed: " << record); + if (!ListingQueues) { + ListingQueues = true; + Result = MakeHolder<TSqsEvents::TEvQueuesList>(); + CurrentUser = TString(); + CurrentQueue = TString(); + if (CompiledQuery) { + NextRequest(); + } else { + CompileRequest(); + } + } +} + +void TQueuesListReader::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); +} + +void TQueuesListReader::CompileRequest() { + TExecutorBuilder(SelfId(), "") + .Mode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) + .QueryId(GET_QUEUES_LIST_ID) + .RetryOnTimeout() + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnRequestCompiled(ev); }) + .Counters(TransactionCounters) + .Start(); +} + +void TQueuesListReader::OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record) { + LOG_SQS_TRACE("Handle compiled get queues list query: " << record); + if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + CompiledQuery = record.GetMiniKQLCompileResults().GetCompiledProgram(); + NextRequest(); + } else { + LOG_SQS_WARN("Get queues list request compilation failed: " << record); Fail(); - } -} - -void TQueuesListReader::NextRequest() { - TExecutorBuilder(SelfId(), "") - .QueryId(GET_QUEUES_LIST_ID) - .Bin(CompiledQuery) - .RetryOnTimeout() - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueuesList(ev); }) - .Counters(TransactionCounters) - .Params() - .Utf8("FROM_USER", CurrentUser) - .Utf8("FROM_QUEUE", CurrentQueue) - .Uint64("BATCH_SIZE", Cfg().GetQueuesListReadBatchSize()) - .ParentBuilder().Start(); -} - -void TQueuesListReader::OnQueuesList(const TSqsEvents::TEvExecuted::TRecord& record) { - LOG_SQS_TRACE("Handle queues list: " << record); - if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - const TValue queuesVal(val["queues"]); - const bool cloudMode = Cfg().GetYandexCloudMode(); - std::tuple<TString, TString> maxUserQueue; - for (size_t i = 0; i < queuesVal.Size(); ++i) { - const TValue row = queuesVal[i]; - TString user = row["Account"]; - TString queue = row["QueueName"]; - if (user == CurrentUser && queue == CurrentQueue) { - continue; - } - - if (std::tie(user, queue) > maxUserQueue) { - std::get<0>(maxUserQueue) = user; - std::get<1>(maxUserQueue) = queue; - } - - const ui64 state = row["QueueState"]; - if (state != 1 && state != 3) { // not finished queue creation - continue; - } - + } +} + +void TQueuesListReader::NextRequest() { + TExecutorBuilder(SelfId(), "") + .QueryId(GET_QUEUES_LIST_ID) + .Bin(CompiledQuery) + .RetryOnTimeout() + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnQueuesList(ev); }) + .Counters(TransactionCounters) + .Params() + .Utf8("FROM_USER", CurrentUser) + .Utf8("FROM_QUEUE", CurrentQueue) + .Uint64("BATCH_SIZE", Cfg().GetQueuesListReadBatchSize()) + .ParentBuilder().Start(); +} + +void TQueuesListReader::OnQueuesList(const TSqsEvents::TEvExecuted::TRecord& record) { + LOG_SQS_TRACE("Handle queues list: " << record); + if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); + const TValue queuesVal(val["queues"]); + const bool cloudMode = Cfg().GetYandexCloudMode(); + std::tuple<TString, TString> maxUserQueue; + for (size_t i = 0; i < queuesVal.Size(); ++i) { + const TValue row = queuesVal[i]; + TString user = row["Account"]; + TString queue = row["QueueName"]; + if (user == CurrentUser && queue == CurrentQueue) { + continue; + } + + if (std::tie(user, queue) > maxUserQueue) { + std::get<0>(maxUserQueue) = user; + std::get<1>(maxUserQueue) = queue; + } + + const ui64 state = row["QueueState"]; + if (state != 1 && state != 3) { // not finished queue creation + continue; + } + const TValue leaderTabletId = row["MasterTabletId"]; if (!leaderTabletId.HaveValue()) { LOG_SQS_ERROR("Queue [" << user << "/" << queue << "] without leader tablet id detected"); - continue; - } - - Result->SortedQueues.emplace_back(); - auto& rec = Result->SortedQueues.back(); - rec.UserName = std::move(user); - rec.QueueName = std::move(queue); + continue; + } + + Result->SortedQueues.emplace_back(); + auto& rec = Result->SortedQueues.back(); + rec.UserName = std::move(user); + rec.QueueName = std::move(queue); rec.LeaderTabletId = leaderTabletId; - if (cloudMode) { - rec.CustomName = row["CustomQueueName"]; - } else { - rec.CustomName = rec.QueueName; - } - const TValue version = row["Version"]; - if (version.HaveValue()) { - rec.Version = version; - } - rec.FolderId = row["FolderId"]; - rec.ShardsCount = row["Shards"]; + if (cloudMode) { + rec.CustomName = row["CustomQueueName"]; + } else { + rec.CustomName = rec.QueueName; + } + const TValue version = row["Version"]; + if (version.HaveValue()) { + rec.Version = version; + } + rec.FolderId = row["FolderId"]; + rec.ShardsCount = row["Shards"]; rec.DlqName = row["DlqName"]; - rec.CreatedTimestamp = TInstant::MilliSeconds(ui64(row["CreatedTimestamp"])); - } - - const bool truncated = val["truncated"]; - if (truncated) { - CurrentUser = std::get<0>(maxUserQueue); - CurrentQueue = std::get<1>(maxUserQueue); - NextRequest(); - } else { - std::sort(Result->SortedQueues.begin(), Result->SortedQueues.end()); // If .Queues table consists of many shards, result is possibly not sorted. + rec.CreatedTimestamp = TInstant::MilliSeconds(ui64(row["CreatedTimestamp"])); + } + + const bool truncated = val["truncated"]; + if (truncated) { + CurrentUser = std::get<0>(maxUserQueue); + CurrentQueue = std::get<1>(maxUserQueue); + NextRequest(); + } else { + std::sort(Result->SortedQueues.begin(), Result->SortedQueues.end()); // If .Queues table consists of many shards, result is possibly not sorted. Success(); - } - } else { - LOG_SQS_WARN("Get queues list request failed: " << record); + } + } else { + LOG_SQS_WARN("Get queues list request failed: " << record); Fail(); - } -} - + } +} + void TQueuesListReader::Success() { if (Recipients.size() == 1) { Send(*Recipients.begin(), std::move(Result)); @@ -159,9 +159,9 @@ void TQueuesListReader::Success() { Recipients.clear(); - ListingQueues = false; -} - + ListingQueues = false; +} + void TQueuesListReader::Fail() { for (const auto& recipientId : Recipients) { Send(recipientId, new TSqsEvents::TEvQueuesList(false)); @@ -169,7 +169,7 @@ void TQueuesListReader::Fail() { Recipients.clear(); - ListingQueues = false; -} - -} // namespace NKikimr::NSQS + ListingQueues = false; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/queues_list_reader.h b/ydb/core/ymq/actor/queues_list_reader.h index 6562ad4036f..087b6cd1ee5 100644 --- a/ydb/core/ymq/actor/queues_list_reader.h +++ b/ydb/core/ymq/actor/queues_list_reader.h @@ -1,48 +1,48 @@ -#pragma once -#include "defs.h" -#include "events.h" -#include "log.h" -#include "serviceid.h" - +#pragma once +#include "defs.h" +#include "events.h" +#include "log.h" +#include "serviceid.h" + #include <library/cpp/actors/core/actor.h> - -#include <util/generic/hash.h> -#include <util/generic/ptr.h> - -namespace NKikimr::NSQS { - -class TQueuesListReader : public TActor<TQueuesListReader> { -public: - explicit TQueuesListReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters); - ~TQueuesListReader(); - - static constexpr NKikimrServices::TActivity::EType ActorActivityType() { - return NKikimrServices::TActivity::SQS_QUEUES_LIST_READER_ACTOR; - } - -private: - STATEFN(StateFunc); - void HandleReadQueuesList(TSqsEvents::TEvReadQueuesList::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - - void CompileRequest(); - void OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record); - - void NextRequest(); - void OnQueuesList(const TSqsEvents::TEvExecuted::TRecord& record); - + +#include <util/generic/hash.h> +#include <util/generic/ptr.h> + +namespace NKikimr::NSQS { + +class TQueuesListReader : public TActor<TQueuesListReader> { +public: + explicit TQueuesListReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters); + ~TQueuesListReader(); + + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { + return NKikimrServices::TActivity::SQS_QUEUES_LIST_READER_ACTOR; + } + +private: + STATEFN(StateFunc); + void HandleReadQueuesList(TSqsEvents::TEvReadQueuesList::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + + void CompileRequest(); + void OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record); + + void NextRequest(); + void OnQueuesList(const TSqsEvents::TEvExecuted::TRecord& record); + void Success(); void Fail(); - -private: - TString CompiledQuery; - - TIntrusivePtr<TTransactionCounters> TransactionCounters; - TString CurrentUser; - TString CurrentQueue; - THolder<TSqsEvents::TEvQueuesList> Result; - bool ListingQueues = false; + +private: + TString CompiledQuery; + + TIntrusivePtr<TTransactionCounters> TransactionCounters; + TString CurrentUser; + TString CurrentQueue; + THolder<TSqsEvents::TEvQueuesList> Result; + bool ListingQueues = false; THashSet<TActorId> Recipients; -}; - -} // namespace NKikimr::NSQS +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/receive_message.cpp b/ydb/core/ymq/actor/receive_message.cpp index 2b5bf0e5174..df4d523cddb 100644 --- a/ydb/core/ymq/actor/receive_message.cpp +++ b/ydb/core/ymq/actor/receive_message.cpp @@ -1,9 +1,9 @@ #include "action.h" -#include "attributes_md5.h" -#include "cfg.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "attributes_md5.h" +#include "cfg.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include <ydb/core/ymq/base/limits.h> @@ -16,250 +16,250 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TReceiveMessageActor : public TActionActor<TReceiveMessageActor> { public: - static constexpr bool NeedQueueAttributes() { - return true; - } - - TReceiveMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::ReceiveMessage, std::move(cb)) + static constexpr bool NeedQueueAttributes() { + return true; + } + + TReceiveMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::ReceiveMessage, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableReceiveMessage()->SetRequestId(RequestId_); - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: - TDuration GetVisibilityTimeout() const { - if (Request().HasVisibilityTimeout()) { - return TDuration::Seconds(Request().GetVisibilityTimeout()); - } else { - return QueueAttributes_->VisibilityTimeout; - } + TDuration GetVisibilityTimeout() const { + if (Request().HasVisibilityTimeout()) { + return TDuration::Seconds(Request().GetVisibilityTimeout()); + } else { + return QueueAttributes_->VisibilityTimeout; + } } - TInstant WaitDeadline() const { - return StartTs_ + WaitTime_; + TInstant WaitDeadline() const { + return StartTs_ + WaitTime_; } - bool MaybeScheduleWait() { - const TInstant waitDeadline = WaitDeadline(); - const TInstant now = TActivationContext::Now(); - const TDuration timeLeft = now < waitDeadline ? waitDeadline - now : TDuration::Zero(); - if (timeLeft >= TDuration::MilliSeconds(Cfg().GetMinTimeLeftForReceiveMessageWaitMs())) { - const TDuration waitStep = Min(TDuration::Seconds(1), waitDeadline - now); - this->Schedule(waitStep, new TEvWakeup()); - TotalWaitDuration_ += waitStep; - RLOG_SQS_DEBUG("Schedule wait for " << waitStep.MilliSeconds() << "ms"); - Retried_ = true; - return true; + bool MaybeScheduleWait() { + const TInstant waitDeadline = WaitDeadline(); + const TInstant now = TActivationContext::Now(); + const TDuration timeLeft = now < waitDeadline ? waitDeadline - now : TDuration::Zero(); + if (timeLeft >= TDuration::MilliSeconds(Cfg().GetMinTimeLeftForReceiveMessageWaitMs())) { + const TDuration waitStep = Min(TDuration::Seconds(1), waitDeadline - now); + this->Schedule(waitStep, new TEvWakeup()); + TotalWaitDuration_ += waitStep; + RLOG_SQS_DEBUG("Schedule wait for " << waitStep.MilliSeconds() << "ms"); + Retried_ = true; + return true; } else { - return false; + return false; } } bool DoValidate() override { if (!GetQueueName()) { - MakeError(Response_.MutableReceiveMessage(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); - return false; - } - - const auto& cfg = Cfg(); - - if (Request().GetWaitTimeSeconds() - && cfg.GetMaxWaitTimeoutMs() - && TDuration::Seconds(Request().GetWaitTimeSeconds()) > TDuration::MilliSeconds(cfg.GetMaxWaitTimeoutMs())) { - MakeError(Response_.MutableReceiveMessage(), NErrors::INVALID_PARAMETER_VALUE, - TStringBuilder() << "WaitTimeSeconds parameter must be less than or equal to " - << TDuration::MilliSeconds(cfg.GetMaxWaitTimeoutMs()).Seconds() << " seconds."); - return false; - } - - if (Request().GetMaxNumberOfMessages() > cfg.GetMaxNumberOfReceiveMessages()) { - MakeError(Response_.MutableReceiveMessage(), NErrors::INVALID_PARAMETER_VALUE, - TStringBuilder() << "MaxNumberOfMessages parameter must be between 1 and " << cfg.GetMaxNumberOfReceiveMessages() - << ", if provided."); - return false; - } - - if (Request().HasVisibilityTimeout() && TDuration::Seconds(Request().GetVisibilityTimeout()) > TLimits::MaxVisibilityTimeout) { - MakeError(Response_.MutableReceiveMessage(), NErrors::INVALID_PARAMETER_VALUE, "VisibilityTimeout parameter must be less than or equal to 12 hours."); + MakeError(Response_.MutableReceiveMessage(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } + const auto& cfg = Cfg(); + + if (Request().GetWaitTimeSeconds() + && cfg.GetMaxWaitTimeoutMs() + && TDuration::Seconds(Request().GetWaitTimeSeconds()) > TDuration::MilliSeconds(cfg.GetMaxWaitTimeoutMs())) { + MakeError(Response_.MutableReceiveMessage(), NErrors::INVALID_PARAMETER_VALUE, + TStringBuilder() << "WaitTimeSeconds parameter must be less than or equal to " + << TDuration::MilliSeconds(cfg.GetMaxWaitTimeoutMs()).Seconds() << " seconds."); + return false; + } + + if (Request().GetMaxNumberOfMessages() > cfg.GetMaxNumberOfReceiveMessages()) { + MakeError(Response_.MutableReceiveMessage(), NErrors::INVALID_PARAMETER_VALUE, + TStringBuilder() << "MaxNumberOfMessages parameter must be between 1 and " << cfg.GetMaxNumberOfReceiveMessages() + << ", if provided."); + return false; + } + + if (Request().HasVisibilityTimeout() && TDuration::Seconds(Request().GetVisibilityTimeout()) > TLimits::MaxVisibilityTimeout) { + MakeError(Response_.MutableReceiveMessage(), NErrors::INVALID_PARAMETER_VALUE, "VisibilityTimeout parameter must be less than or equal to 12 hours."); + return false; + } + return true; } - TError* MutableErrorDesc() override { - return Response_.MutableReceiveMessage()->MutableError(); - } - - void InitParams() { - if (ParamsAreInited_) { - return; - } - ParamsAreInited_ = true; - - if (Request().HasWaitTimeSeconds()) { - WaitTime_ = TDuration::Seconds(Request().GetWaitTimeSeconds()); - } else { - WaitTime_ = QueueAttributes_->ReceiveMessageWaitTime; - } - + TError* MutableErrorDesc() override { + return Response_.MutableReceiveMessage()->MutableError(); + } + + void InitParams() { + if (ParamsAreInited_) { + return; + } + ParamsAreInited_ = true; + + if (Request().HasWaitTimeSeconds()) { + WaitTime_ = TDuration::Seconds(Request().GetWaitTimeSeconds()); + } else { + WaitTime_ = QueueAttributes_->ReceiveMessageWaitTime; + } + if (IsFifoQueue()) { - if (Request().GetReceiveRequestAttemptId()) { - ReceiveAttemptId_ = Request().GetReceiveRequestAttemptId(); + if (Request().GetReceiveRequestAttemptId()) { + ReceiveAttemptId_ = Request().GetReceiveRequestAttemptId(); } else { - ReceiveAttemptId_ = CreateGuidAsString(); + ReceiveAttemptId_ = CreateGuidAsString(); } + } + + MaxMessagesCount_ = ClampVal(static_cast<size_t>(Request().GetMaxNumberOfMessages()), TLimits::MinBatchSize, TLimits::MaxBatchSize); + } + + void DoAction() override { + Become(&TThis::StateFunc); + Y_VERIFY(QueueAttributes_.Defined()); + + InitParams(); + + auto receiveRequest = MakeHolder<TSqsEvents::TEvReceiveMessageBatch>(); + receiveRequest->RequestId = RequestId_; + receiveRequest->MaxMessagesCount = MaxMessagesCount_; + receiveRequest->ReceiveAttemptId = ReceiveAttemptId_; + receiveRequest->VisibilityTimeout = GetVisibilityTimeout(); + if (WaitTime_) { + receiveRequest->WaitDeadline = WaitDeadline(); } - - MaxMessagesCount_ = ClampVal(static_cast<size_t>(Request().GetMaxNumberOfMessages()), TLimits::MinBatchSize, TLimits::MaxBatchSize); - } - - void DoAction() override { - Become(&TThis::StateFunc); - Y_VERIFY(QueueAttributes_.Defined()); - - InitParams(); - - auto receiveRequest = MakeHolder<TSqsEvents::TEvReceiveMessageBatch>(); - receiveRequest->RequestId = RequestId_; - receiveRequest->MaxMessagesCount = MaxMessagesCount_; - receiveRequest->ReceiveAttemptId = ReceiveAttemptId_; - receiveRequest->VisibilityTimeout = GetVisibilityTimeout(); - if (WaitTime_) { - receiveRequest->WaitDeadline = WaitDeadline(); - } - + Send(QueueLeader_, std::move(receiveRequest)); } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } void DoFinish() override { - if (auto* detailedCounters = QueueCounters_ ? QueueCounters_->GetDetailedCounters() : nullptr; !Retried_ && detailedCounters) { - const TDuration duration = GetRequestDuration(); - COLLECT_HISTOGRAM_COUNTER(detailedCounters, ReceiveMessageImmediate_Duration, duration.MilliSeconds()); - } + if (auto* detailedCounters = QueueCounters_ ? QueueCounters_->GetDetailedCounters() : nullptr; !Retried_ && detailedCounters) { + const TDuration duration = GetRequestDuration(); + COLLECT_HISTOGRAM_COUNTER(detailedCounters, ReceiveMessageImmediate_Duration, duration.MilliSeconds()); + } } -private: - STATEFN(StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvReceiveMessageBatchResponse, HandleReceiveMessageBatchResponse); +private: + STATEFN(StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvReceiveMessageBatchResponse, HandleReceiveMessageBatchResponse); } } - void HandleReceiveMessageBatchResponse(TSqsEvents::TEvReceiveMessageBatchResponse::TPtr& ev) { - if (ev->Get()->Retried) { - Retried_ = true; + void HandleReceiveMessageBatchResponse(TSqsEvents::TEvReceiveMessageBatchResponse::TPtr& ev) { + if (ev->Get()->Retried) { + Retried_ = true; } - if (ev->Get()->Failed) { - MakeError(Response_.MutableReceiveMessage(), NErrors::INTERNAL_FAILURE); - } else if (ev->Get()->OverLimit) { - MakeError(Response_.MutableReceiveMessage(), NErrors::OVER_LIMIT); - } else { - if (ev->Get()->Messages.empty() && MaybeScheduleWait()) { - return; - } - for (auto& message : ev->Get()->Messages) { - auto* item = Response_.MutableReceiveMessage()->AddMessages(); - item->SetApproximateFirstReceiveTimestamp(message.FirstReceiveTimestamp.MilliSeconds()); - item->SetApproximateReceiveCount(message.ReceiveCount); - item->SetMessageId(message.MessageId); - item->SetData(message.Data); - item->SetMD5OfMessageBody(MD5::Calc(message.Data)); - item->SetReceiptHandle(EncodeReceiptHandle(message.ReceiptHandle)); - RLOG_SQS_DEBUG("Encoded receipt handle: " << message.ReceiptHandle); - item->SetSentTimestamp(message.SentTimestamp.MilliSeconds()); - if (message.SenderId) { - item->SetSenderId(message.SenderId); + if (ev->Get()->Failed) { + MakeError(Response_.MutableReceiveMessage(), NErrors::INTERNAL_FAILURE); + } else if (ev->Get()->OverLimit) { + MakeError(Response_.MutableReceiveMessage(), NErrors::OVER_LIMIT); + } else { + if (ev->Get()->Messages.empty() && MaybeScheduleWait()) { + return; + } + for (auto& message : ev->Get()->Messages) { + auto* item = Response_.MutableReceiveMessage()->AddMessages(); + item->SetApproximateFirstReceiveTimestamp(message.FirstReceiveTimestamp.MilliSeconds()); + item->SetApproximateReceiveCount(message.ReceiveCount); + item->SetMessageId(message.MessageId); + item->SetData(message.Data); + item->SetMD5OfMessageBody(MD5::Calc(message.Data)); + item->SetReceiptHandle(EncodeReceiptHandle(message.ReceiptHandle)); + RLOG_SQS_DEBUG("Encoded receipt handle: " << message.ReceiptHandle); + item->SetSentTimestamp(message.SentTimestamp.MilliSeconds()); + if (message.SenderId) { + item->SetSenderId(message.SenderId); } - if (message.MessageAttributes) { - TMessageAttributeList attrs; - if (attrs.ParseFromString(message.MessageAttributes)) { - for (auto& a : *attrs.MutableAttributes()) { - item->AddMessageAttributes()->Swap(&a); + if (message.MessageAttributes) { + TMessageAttributeList attrs; + if (attrs.ParseFromString(message.MessageAttributes)) { + for (auto& a : *attrs.MutableAttributes()) { + item->AddMessageAttributes()->Swap(&a); } } - if (item->MessageAttributesSize() > 0) { - const TString md5 = CalcMD5OfMessageAttributes(item->GetMessageAttributes()); - item->SetMD5OfMessageAttributes(md5); - } - } - - if (IsFifoQueue()) { - item->SetMessageDeduplicationId(message.MessageDeduplicationId); - item->SetMessageGroupId(message.MessageGroupId); - item->SetSequenceNumber(message.SequenceNumber); - } - - // counters - const TDuration messageResideDuration = TActivationContext::Now() - message.SentTimestamp; - COLLECT_HISTOGRAM_COUNTER(QueueCounters_, MessageReceiveAttempts, message.ReceiveCount); + if (item->MessageAttributesSize() > 0) { + const TString md5 = CalcMD5OfMessageAttributes(item->GetMessageAttributes()); + item->SetMD5OfMessageAttributes(md5); + } + } + + if (IsFifoQueue()) { + item->SetMessageDeduplicationId(message.MessageDeduplicationId); + item->SetMessageGroupId(message.MessageGroupId); + item->SetSequenceNumber(message.SequenceNumber); + } + + // counters + const TDuration messageResideDuration = TActivationContext::Now() - message.SentTimestamp; + COLLECT_HISTOGRAM_COUNTER(QueueCounters_, MessageReceiveAttempts, message.ReceiveCount); COLLECT_HISTOGRAM_COUNTER(QueueCounters_, receive_attempts_count_rate, message.ReceiveCount); - COLLECT_HISTOGRAM_COUNTER(QueueCounters_, MessageReside_Duration, messageResideDuration.MilliSeconds()); + COLLECT_HISTOGRAM_COUNTER(QueueCounters_, MessageReside_Duration, messageResideDuration.MilliSeconds()); COLLECT_HISTOGRAM_COUNTER(QueueCounters_, reside_duration_milliseconds, messageResideDuration.MilliSeconds()); INC_COUNTER_COUPLE(QueueCounters_, ReceiveMessage_Count, received_count_per_second); ADD_COUNTER_COUPLE(QueueCounters_, ReceiveMessage_BytesRead, received_bytes_per_second, message.Data.size()); - } - if (ev->Get()->Messages.empty()) { + } + if (ev->Get()->Messages.empty()) { INC_COUNTER_COUPLE(QueueCounters_, ReceiveMessage_EmptyCount, empty_receive_attempts_count_per_second); } } - SendReplyAndDie(); - } - - bool HandleWakeup(TEvWakeup::TPtr& ev) override { - if (!TActionActor::HandleWakeup(ev)) { - DoAction(); - } - return true; - } - - TDuration GetRequestWaitDuration() const override { - return TotalWaitDuration_; - } - - TString DumpState() override { - TStringBuilder ret; - ret << TActionActor::DumpState() - << " Retried: " << Retried_ - << " WaitTime: " << WaitTime_ - << " ParamsAreInited: " << ParamsAreInited_ - << " MaxMessagesCount: " << MaxMessagesCount_ - << " TotalWaitDuration: " << TotalWaitDuration_ - << " ReceiveAttemptId: " << ReceiveAttemptId_; - return std::move(ret); + SendReplyAndDie(); } - const TReceiveMessageRequest& Request() const { - return SourceSqsRequest_.GetReceiveMessage(); + bool HandleWakeup(TEvWakeup::TPtr& ev) override { + if (!TActionActor::HandleWakeup(ev)) { + DoAction(); + } + return true; } + TDuration GetRequestWaitDuration() const override { + return TotalWaitDuration_; + } + + TString DumpState() override { + TStringBuilder ret; + ret << TActionActor::DumpState() + << " Retried: " << Retried_ + << " WaitTime: " << WaitTime_ + << " ParamsAreInited: " << ParamsAreInited_ + << " MaxMessagesCount: " << MaxMessagesCount_ + << " TotalWaitDuration: " << TotalWaitDuration_ + << " ReceiveAttemptId: " << ReceiveAttemptId_; + return std::move(ret); + } + + const TReceiveMessageRequest& Request() const { + return SourceSqsRequest_.GetReceiveMessage(); + } + private: - TString ReceiveAttemptId_; - bool Retried_ = false; - TDuration WaitTime_ = TDuration::Zero(); - bool ParamsAreInited_ = false; - size_t MaxMessagesCount_ = 0; - TDuration TotalWaitDuration_; + TString ReceiveAttemptId_; + bool Retried_ = false; + TDuration WaitTime_ = TDuration::Zero(); + bool ParamsAreInited_ = false; + size_t MaxMessagesCount_ = 0; + TDuration TotalWaitDuration_; }; -IActor* CreateReceiveMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TReceiveMessageActor(sourceSqsRequest, std::move(cb)); +IActor* CreateReceiveMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TReceiveMessageActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/retention.cpp b/ydb/core/ymq/actor/retention.cpp index 9e610e8ed29..c1aaa47400d 100644 --- a/ydb/core/ymq/actor/retention.cpp +++ b/ydb/core/ymq/actor/retention.cpp @@ -1,103 +1,103 @@ -#include "retention.h" -#include "cfg.h" -#include "log.h" -#include "executor.h" - +#include "retention.h" +#include "cfg.h" +#include "log.h" +#include "executor.h" + #include <ydb/public/lib/value/value.h> #include <ydb/core/base/appdata.h> #include <ydb/core/ymq/base/debug_info.h> - + #include <library/cpp/actors/core/hfunc.h> -#include <util/random/random.h> - -namespace NKikimr::NSQS { - +#include <util/random/random.h> + +namespace NKikimr::NSQS { + TRetentionActor::TRetentionActor(const TQueuePath& queuePath, const TActorId& queueLeader) - : QueuePath_(queuePath) - , RequestId_(CreateGuidAsString()) + : QueuePath_(queuePath) + , RequestId_(CreateGuidAsString()) , QueueLeader_(queueLeader) -{ - DebugInfo->QueueRetentionActors.emplace(TStringBuilder() << TLogQueueName(QueuePath_), this); -} - -TRetentionActor::~TRetentionActor() { - DebugInfo->QueueRetentionActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueuePath_), this); -} - -void TRetentionActor::Bootstrap() { - RLOG_SQS_INFO("Bootstrap retention actor for queue " << TLogQueueName(QueuePath_)); - Become(&TThis::StateFunc); - Schedule(RandomRetentionPeriod(), new TEvWakeup()); -} - -TDuration TRetentionActor::RandomRetentionPeriod() const { - const TDuration minPeriod = TDuration::MilliSeconds(Cfg().GetMinMessageRetentionPeriodMs()); - return minPeriod + - TDuration::MilliSeconds(RandomNumber<ui32>(minPeriod.MilliSeconds() / 2)); -} - -void TRetentionActor::SetRetentionBoundary() { - auto onExecuted = [this](const TSqsEvents::TEvExecuted::TRecord& ev) { - ui32 status = ev.GetStatus(); - - if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); - const TValue list(val["result"]); - - for (size_t i = 0; i < list.Size(); ++i) { - auto req = MakeHolder<TSqsEvents::TEvPurgeQueue>(); - req->QueuePath = QueuePath_; - req->Boundary = TInstant::MilliSeconds(ui64(list[i]["RetentionBoundary"])); - req->Shard = ui64(list[i]["Shard"]); - - RLOG_SQS_INFO("Set retention boundary for queue " << TLogQueueName(QueuePath_, req->Shard) << " to " << req->Boundary.MilliSeconds() << " (" << req->Boundary << ")"); - +{ + DebugInfo->QueueRetentionActors.emplace(TStringBuilder() << TLogQueueName(QueuePath_), this); +} + +TRetentionActor::~TRetentionActor() { + DebugInfo->QueueRetentionActors.EraseKeyValue(TStringBuilder() << TLogQueueName(QueuePath_), this); +} + +void TRetentionActor::Bootstrap() { + RLOG_SQS_INFO("Bootstrap retention actor for queue " << TLogQueueName(QueuePath_)); + Become(&TThis::StateFunc); + Schedule(RandomRetentionPeriod(), new TEvWakeup()); +} + +TDuration TRetentionActor::RandomRetentionPeriod() const { + const TDuration minPeriod = TDuration::MilliSeconds(Cfg().GetMinMessageRetentionPeriodMs()); + return minPeriod + + TDuration::MilliSeconds(RandomNumber<ui32>(minPeriod.MilliSeconds() / 2)); +} + +void TRetentionActor::SetRetentionBoundary() { + auto onExecuted = [this](const TSqsEvents::TEvExecuted::TRecord& ev) { + ui32 status = ev.GetStatus(); + + if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(ev.GetExecutionEngineEvaluatedResponse())); + const TValue list(val["result"]); + + for (size_t i = 0; i < list.Size(); ++i) { + auto req = MakeHolder<TSqsEvents::TEvPurgeQueue>(); + req->QueuePath = QueuePath_; + req->Boundary = TInstant::MilliSeconds(ui64(list[i]["RetentionBoundary"])); + req->Shard = ui64(list[i]["Shard"]); + + RLOG_SQS_INFO("Set retention boundary for queue " << TLogQueueName(QueuePath_, req->Shard) << " to " << req->Boundary.MilliSeconds() << " (" << req->Boundary << ")"); + Send(QueueLeader_, std::move(req)); - } - } else { - RLOG_SQS_ERROR("Failed to set retention boundary for queue " << TLogQueueName(QueuePath_)); - } - - Schedule(RandomRetentionPeriod(), new TEvWakeup()); - }; - - TExecutorBuilder(SelfId(), RequestId_) - .User(QueuePath_.UserName) - .Queue(QueuePath_.QueueName) + } + } else { + RLOG_SQS_ERROR("Failed to set retention boundary for queue " << TLogQueueName(QueuePath_)); + } + + Schedule(RandomRetentionPeriod(), new TEvWakeup()); + }; + + TExecutorBuilder(SelfId(), RequestId_) + .User(QueuePath_.UserName) + .Queue(QueuePath_.QueueName) .QueueLeader(QueueLeader_) - .QueryId(SET_RETENTION_ID) - .RetryOnTimeout() - .OnExecuted(onExecuted) - .Params() - .Uint64("NOW", Now().MilliSeconds()) - .Bool("PURGE", false) - .ParentBuilder().Start(); - - RLOG_SQS_TRACE("Executing retention request for queue " << TLogQueueName(QueuePath_)); -} - -void TRetentionActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - RLOG_SQS_DEBUG("Handle executed in retention actor for queue " << TLogQueueName(QueuePath_)); - ev->Get()->Call(); -} - -void TRetentionActor::HandlePoisonPill(TEvPoisonPill::TPtr&) { - RLOG_SQS_DEBUG("Handle poison pill in retention actor for queue " << TLogQueueName(QueuePath_)); - PassAway(); -} - -void TRetentionActor::HandleWakeup() { - RLOG_SQS_DEBUG("Handle wakeup in retention actor for queue " << TLogQueueName(QueuePath_)); - SetRetentionBoundary(); -} - -STATEFN(TRetentionActor::StateFunc) { - switch (ev->GetTypeRewrite()) { - cFunc(TEvWakeup::EventType, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TEvPoisonPill, HandlePoisonPill); - } -} - -} // namespace NKikimr::NSQS + .QueryId(SET_RETENTION_ID) + .RetryOnTimeout() + .OnExecuted(onExecuted) + .Params() + .Uint64("NOW", Now().MilliSeconds()) + .Bool("PURGE", false) + .ParentBuilder().Start(); + + RLOG_SQS_TRACE("Executing retention request for queue " << TLogQueueName(QueuePath_)); +} + +void TRetentionActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + RLOG_SQS_DEBUG("Handle executed in retention actor for queue " << TLogQueueName(QueuePath_)); + ev->Get()->Call(); +} + +void TRetentionActor::HandlePoisonPill(TEvPoisonPill::TPtr&) { + RLOG_SQS_DEBUG("Handle poison pill in retention actor for queue " << TLogQueueName(QueuePath_)); + PassAway(); +} + +void TRetentionActor::HandleWakeup() { + RLOG_SQS_DEBUG("Handle wakeup in retention actor for queue " << TLogQueueName(QueuePath_)); + SetRetentionBoundary(); +} + +STATEFN(TRetentionActor::StateFunc) { + switch (ev->GetTypeRewrite()) { + cFunc(TEvWakeup::EventType, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvPoisonPill, HandlePoisonPill); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/retention.h b/ydb/core/ymq/actor/retention.h index 724baddff3f..dbde16bd25a 100644 --- a/ydb/core/ymq/actor/retention.h +++ b/ydb/core/ymq/actor/retention.h @@ -1,39 +1,39 @@ -#pragma once -#include "defs.h" +#pragma once +#include "defs.h" #include <ydb/core/ymq/actor/events.h> #include <ydb/core/protos/services.pb.h> - + #include <library/cpp/actors/core/actor.h> - -namespace NKikimr::NSQS { - -class TRetentionActor : public TActorBootstrapped<TRetentionActor> { -public: + +namespace NKikimr::NSQS { + +class TRetentionActor : public TActorBootstrapped<TRetentionActor> { +public: TRetentionActor(const TQueuePath& queuePath, const TActorId& queueLeader); - ~TRetentionActor(); - - void Bootstrap(); - + ~TRetentionActor(); + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_RETENTION_BACKGROUND_ACTOR; - } - -private: - TDuration RandomRetentionPeriod() const; - - void SetRetentionBoundary(); - - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandlePoisonPill(TEvPoisonPill::TPtr&); - void HandleWakeup(); - -private: - STATEFN(StateFunc); - -private: - const TQueuePath QueuePath_; - const TString RequestId_; + } + +private: + TDuration RandomRetentionPeriod() const; + + void SetRetentionBoundary(); + + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandlePoisonPill(TEvPoisonPill::TPtr&); + void HandleWakeup(); + +private: + STATEFN(StateFunc); + +private: + const TQueuePath QueuePath_; + const TString RequestId_; const TActorId QueueLeader_; -}; - -} // namespace NKikimr::NSQS +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/schema.cpp b/ydb/core/ymq/actor/schema.cpp index 20e84581fe5..f026a8fa4ff 100644 --- a/ydb/core/ymq/actor/schema.cpp +++ b/ydb/core/ymq/actor/schema.cpp @@ -1,6 +1,6 @@ -#include "cfg.h" +#include "cfg.h" #include "executor.h" -#include "log.h" +#include "log.h" #include "params.h" #include "schema.h" #include <ydb/core/ymq/base/constants.h> @@ -13,18 +13,18 @@ #include <ydb/public/lib/value/value.h> #include <util/generic/guid.h> -#include <util/generic/utility.h> +#include <util/generic/utility.h> #include <util/string/ascii.h> #include <util/string/cast.h> #include <util/string/join.h> using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { - -extern const TString QUOTER_KESUS_NAME = ".Quoter"; -extern const TString RPS_QUOTA_NAME = "RPSQuota"; - +namespace NKikimr::NSQS { + +extern const TString QUOTER_KESUS_NAME = ".Quoter"; +extern const TString RPS_QUOTA_NAME = "RPSQuota"; + namespace { static const char* const GetNextAtomicValueQuery = R"__( @@ -70,7 +70,7 @@ static void SetCompactionPolicyForSmallTable(NKikimrSchemeOp::TPartitionConfig& } static void SetCompactionPolicyForTimestampOrdering(NKikimrSchemeOp::TPartitionConfig& partitionConfig) { - const bool enableCompression = false; + const bool enableCompression = false; auto& policyPb = *partitionConfig.MutableCompactionPolicy(); policyPb.SetInMemSizeToSnapshot(1*1024*1024); @@ -104,12 +104,12 @@ static void SetCompactionPolicyForTimestampOrdering(NKikimrSchemeOp::TPartitionC } static void SetOnePartitionPerShardSettings(NKikimrSchemeOp::TTableDescription& desc, size_t queueShardsCount) { - for (size_t boundary = 1; boundary < queueShardsCount; ++boundary) { + for (size_t boundary = 1; boundary < queueShardsCount; ++boundary) { NKikimrSchemeOp::TSplitBoundary* splitBoundarySetting = desc.AddSplitBoundary(); - splitBoundarySetting->MutableKeyPrefix()->AddTuple()->MutableOptional()->SetUint64(boundary); - } -} - + splitBoundarySetting->MutableKeyPrefix()->AddTuple()->MutableOptional()->SetUint64(boundary); + } +} + } // namespace THolder<TEvTxUserProxy::TEvProposeTransaction> @@ -126,12 +126,12 @@ THolder<TEvTxUserProxy::TEvProposeTransaction> THolder<TEvTxUserProxy::TEvProposeTransaction> MakeCreateTableEvent(const TString& root, - const TTable& table, + const TTable& table, size_t queueShardsCount) { auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); // Transaction info - auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); + auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); if (table.Shard != -1) { trans->SetWorkingDir(TString::Join(root, "/", ToString(table.Shard))); @@ -140,24 +140,24 @@ THolder<TEvTxUserProxy::TEvProposeTransaction> } trans->SetOperationType(NKikimrSchemeOp::ESchemeOpCreateTable); // Table info - auto* desc = trans->MutableCreateTable(); + auto* desc = trans->MutableCreateTable(); desc->SetName(table.Name); // Columns info for (const auto& col : table.Columns) { - if (col.Partitions && !table.EnableAutosplit) { + if (col.Partitions && !table.EnableAutosplit) { desc->SetUniformPartitionsCount(col.Partitions); } if (col.Key) { desc->AddKeyColumnNames(col.Name); } - auto* item = desc->AddColumns(); + auto* item = desc->AddColumns(); item->SetName(col.Name); item->SetType(NScheme::TypeName(col.TypeId)); } if (table.InMemory) { - auto* def = desc->MutablePartitionConfig()->AddColumnFamilies(); + auto* def = desc->MutablePartitionConfig()->AddColumnFamilies(); def->SetId(0); def->SetColumnCache(NKikimrSchemeOp::ColumnCacheEver); } @@ -167,17 +167,17 @@ THolder<TEvTxUserProxy::TEvProposeTransaction> } else if (table.Small) { SetCompactionPolicyForSmallTable(*desc->MutablePartitionConfig()); } - if (table.OnePartitionPerShard) { - Y_VERIFY(queueShardsCount > 0); - SetOnePartitionPerShardSettings(*desc, queueShardsCount); - } - if (table.EnableAutosplit) { - auto* partitioningPolicy = desc->MutablePartitionConfig()->MutablePartitioningPolicy(); - Y_VERIFY(table.SizeToSplit > 0); - partitioningPolicy->SetSizeToSplit(table.SizeToSplit); - partitioningPolicy->SetMinPartitionsCount(1); - partitioningPolicy->SetMaxPartitionsCount(MAX_PARTITIONS_COUNT); - } + if (table.OnePartitionPerShard) { + Y_VERIFY(queueShardsCount > 0); + SetOnePartitionPerShardSettings(*desc, queueShardsCount); + } + if (table.EnableAutosplit) { + auto* partitioningPolicy = desc->MutablePartitionConfig()->MutablePartitioningPolicy(); + Y_VERIFY(table.SizeToSplit > 0); + partitioningPolicy->SetSizeToSplit(table.SizeToSplit); + partitioningPolicy->SetMinPartitionsCount(1); + partitioningPolicy->SetMaxPartitionsCount(MAX_PARTITIONS_COUNT); + } return ev; } @@ -213,248 +213,248 @@ THolder<TEvTxUserProxy::TEvProposeTransaction> return ev; } -THolder<TEvTxUserProxy::TEvProposeTransaction> - MakeCreateKesusEvent(const TString& root, - const TString& kesusName) -{ - auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); - - // Transaction info - auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); - trans->SetWorkingDir(root); +THolder<TEvTxUserProxy::TEvProposeTransaction> + MakeCreateKesusEvent(const TString& root, + const TString& kesusName) +{ + auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); + + // Transaction info + auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); + trans->SetWorkingDir(root); trans->SetOperationType(NKikimrSchemeOp::ESchemeOpCreateKesus); - - // Kesus info - auto* kesusDesc = trans->MutableKesus(); - kesusDesc->SetName(kesusName); - - return ev; -} - -THolder<TEvTxUserProxy::TEvProposeTransaction> - MakeRemoveKesusEvent(const TString& root, - const TString& kesusName) -{ - auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); - - // Transaction info - auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); - trans->SetWorkingDir(root); + + // Kesus info + auto* kesusDesc = trans->MutableKesus(); + kesusDesc->SetName(kesusName); + + return ev; +} + +THolder<TEvTxUserProxy::TEvProposeTransaction> + MakeRemoveKesusEvent(const TString& root, + const TString& kesusName) +{ + auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); + + // Transaction info + auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); + trans->SetWorkingDir(root); trans->SetOperationType(NKikimrSchemeOp::ESchemeOpDropKesus); - auto* drop = trans->MutableDrop(); - drop->SetName(kesusName); - - return ev; -} - + auto* drop = trans->MutableDrop(); + drop->SetName(kesusName); + + return ev; +} + TCreateUserSchemaActor::TCreateUserSchemaActor(const TString& root, const TString& userName, const TActorId& sender, - const TString& requestId, - TIntrusivePtr<TUserCounters> userCounters) + const TString& requestId, + TIntrusivePtr<TUserCounters> userCounters) : Root_(root) , UserName_(userName) , Sender_(sender) - , SI_(static_cast<int>(ECreating::MakeDirectory)) - , RequestId_(requestId) - , UserCounters_(std::move(userCounters)) + , SI_(static_cast<int>(ECreating::MakeDirectory)) + , RequestId_(requestId) + , UserCounters_(std::move(userCounters)) { } -TCreateUserSchemaActor::~TCreateUserSchemaActor() = default; +TCreateUserSchemaActor::~TCreateUserSchemaActor() = default; -void TCreateUserSchemaActor::Bootstrap() { - Process(); +void TCreateUserSchemaActor::Bootstrap() { + Process(); Become(&TThis::StateFunc); } -void TCreateUserSchemaActor::NextAction() { - SI_++; +void TCreateUserSchemaActor::NextAction() { + SI_++; - if (!Cfg().GetQuotingConfig().GetEnableQuoting() || !Cfg().GetQuotingConfig().HasKesusQuoterConfig()) { - while (ECreating(SI_) == ECreating::Quoter || ECreating(SI_) == ECreating::RPSQuota) { - SI_++; - } + if (!Cfg().GetQuotingConfig().GetEnableQuoting() || !Cfg().GetQuotingConfig().HasKesusQuoterConfig()) { + while (ECreating(SI_) == ECreating::Quoter || ECreating(SI_) == ECreating::RPSQuota) { + SI_++; + } } - Process(); + Process(); } -THolder<TEvTxUserProxy::TEvProposeTransaction> TCreateUserSchemaActor::MakeMkDirRequest(const TString& root, const TString& dirName) { - auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); - auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); - - trans->SetWorkingDir(root); +THolder<TEvTxUserProxy::TEvProposeTransaction> TCreateUserSchemaActor::MakeMkDirRequest(const TString& root, const TString& dirName) { + auto ev = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>(); + auto* trans = ev->Record.MutableTransaction()->MutableModifyScheme(); + + trans->SetWorkingDir(root); trans->SetOperationType(NKikimrSchemeOp::ESchemeOpMkDir); - trans->MutableMkDir()->SetName(dirName); - RLOG_SQS_INFO("Making directory [" << dirName << "] as subdir of [" << root << "]"); - return ev; -} - -void TCreateUserSchemaActor::Process() { + trans->MutableMkDir()->SetName(dirName); + RLOG_SQS_INFO("Making directory [" << dirName << "] as subdir of [" << root << "]"); + return ev; +} + +void TCreateUserSchemaActor::Process() { switch (ECreating(SI_)) { - case ECreating::MakeRootSqsDirectory: { - CreateRootSqsDirAttemptWasMade_ = true; - TStringBuf rootSqs = Root_; - TStringBuf mainRoot, sqsDirName; - rootSqs.RSplit('/', mainRoot, sqsDirName); + case ECreating::MakeRootSqsDirectory: { + CreateRootSqsDirAttemptWasMade_ = true; + TStringBuf rootSqs = Root_; + TStringBuf mainRoot, sqsDirName; + rootSqs.RSplit('/', mainRoot, sqsDirName); if (mainRoot.empty() || sqsDirName.empty()) { - RLOG_SQS_WARN("Failed to split root directory into components: [" << Root_ << "]"); - Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(false)); - PassAway(); - return; - } - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, MakeMkDirRequest(TString(mainRoot), TString(sqsDirName)), false, TQueuePath(Root_, UserName_, TString()), GetTransactionCounters(UserCounters_))); - break; - } + RLOG_SQS_WARN("Failed to split root directory into components: [" << Root_ << "]"); + Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(false)); + PassAway(); + return; + } + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, MakeMkDirRequest(TString(mainRoot), TString(sqsDirName)), false, TQueuePath(Root_, UserName_, TString()), GetTransactionCounters(UserCounters_))); + break; + } case ECreating::MakeDirectory: { - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, MakeMkDirRequest(Root_, UserName_), false, TQueuePath(Root_, UserName_, TString()), GetTransactionCounters(UserCounters_))); - break; - } - case ECreating::Quoter: { - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeCreateKesusEvent(Root_ + "/" + UserName_, QUOTER_KESUS_NAME), false, TQueuePath(Root_, UserName_, TString()), GetTransactionCounters(UserCounters_)) - ); - break; - } - case ECreating::RPSQuota: { - AddRPSQuota(); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, MakeMkDirRequest(Root_, UserName_), false, TQueuePath(Root_, UserName_, TString()), GetTransactionCounters(UserCounters_))); break; } + case ECreating::Quoter: { + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeCreateKesusEvent(Root_ + "/" + UserName_, QUOTER_KESUS_NAME), false, TQueuePath(Root_, UserName_, TString()), GetTransactionCounters(UserCounters_)) + ); + break; + } + case ECreating::RPSQuota: { + AddRPSQuota(); + break; + } case ECreating::Finish: { - Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(true)); - PassAway(); + Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(true)); + PassAway(); break; } } } -void TCreateUserSchemaActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { +void TCreateUserSchemaActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; - const auto status = record.GetStatus(); - - if (SuccessStatusCode(status)) { - if (ECreating(SI_) == ECreating::Quoter) { - KesusPathId_ = std::make_pair(record.GetSchemeShardTabletId(), record.GetPathId()); - } - - NextAction(); + const auto status = record.GetStatus(); + + if (SuccessStatusCode(status)) { + if (ECreating(SI_) == ECreating::Quoter) { + KesusPathId_ = std::make_pair(record.GetSchemeShardTabletId(), record.GetPathId()); + } + + NextAction(); } else { - // Try to create /Root/SQS directory only if error occurs, because this is very rare operation. - if (ECreating(SI_) == ECreating::MakeDirectory + // Try to create /Root/SQS directory only if error occurs, because this is very rare operation. + if (ECreating(SI_) == ECreating::MakeDirectory && record.GetSchemeShardStatus() == NKikimrScheme::EStatus::StatusPathDoesNotExist - && !CreateRootSqsDirAttemptWasMade_) - { - RLOG_SQS_INFO("Root SQS directory does not exist, making it. Error record: " << record); - SI_ = static_cast<int>(ECreating::MakeRootSqsDirectory); - Process(); - // race with two creations of root sqs directory will result in success code on the second creation too. - return; - } - RLOG_SQS_WARN("request execution error: " << record); - - Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(false)); - PassAway(); + && !CreateRootSqsDirAttemptWasMade_) + { + RLOG_SQS_INFO("Root SQS directory does not exist, making it. Error record: " << record); + SI_ = static_cast<int>(ECreating::MakeRootSqsDirectory); + Process(); + // race with two creations of root sqs directory will result in success code on the second creation too. + return; + } + RLOG_SQS_WARN("request execution error: " << record); + + Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(false)); + PassAway(); } } -void TCreateUserSchemaActor::HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev) { +void TCreateUserSchemaActor::HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev) { AddQuoterResourceActor_ = TActorId(); - auto status = ev->Get()->Record.GetError().GetStatus(); - if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::ALREADY_EXISTS) { - RLOG_SQS_DEBUG("Successfully added quoter resource. Id: " << ev->Get()->Record.GetResourceId()); - NextAction(); - } else { - RLOG_SQS_WARN("Failed to add quoter resource: " << ev->Get()->Record); - Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(false)); - PassAway(); - } -} - -void TCreateUserSchemaActor::AddRPSQuota() { - NKikimrKesus::TEvAddQuoterResource cmd; - auto& res = *cmd.MutableResource(); - res.SetResourcePath(RPS_QUOTA_NAME); - res.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(1000); - AddQuoterResourceActor_ = RunAddQuoterResource(KesusPathId_.first, KesusPathId_.second, cmd, RequestId_); -} - -void TCreateUserSchemaActor::PassAway() { - if (AddQuoterResourceActor_) { - Send(AddQuoterResourceActor_, new TEvPoisonPill()); + auto status = ev->Get()->Record.GetError().GetStatus(); + if (status == Ydb::StatusIds::SUCCESS || status == Ydb::StatusIds::ALREADY_EXISTS) { + RLOG_SQS_DEBUG("Successfully added quoter resource. Id: " << ev->Get()->Record.GetResourceId()); + NextAction(); + } else { + RLOG_SQS_WARN("Failed to add quoter resource: " << ev->Get()->Record); + Send(Sender_, MakeHolder<TSqsEvents::TEvUserCreated>(false)); + PassAway(); + } +} + +void TCreateUserSchemaActor::AddRPSQuota() { + NKikimrKesus::TEvAddQuoterResource cmd; + auto& res = *cmd.MutableResource(); + res.SetResourcePath(RPS_QUOTA_NAME); + res.MutableHierarhicalDRRResourceConfig()->SetMaxUnitsPerSecond(1000); + AddQuoterResourceActor_ = RunAddQuoterResource(KesusPathId_.first, KesusPathId_.second, cmd, RequestId_); +} + +void TCreateUserSchemaActor::PassAway() { + if (AddQuoterResourceActor_) { + Send(AddQuoterResourceActor_, new TEvPoisonPill()); AddQuoterResourceActor_ = TActorId(); - } - TActorBootstrapped<TCreateUserSchemaActor>::PassAway(); -} - + } + TActorBootstrapped<TCreateUserSchemaActor>::PassAway(); +} + TDeleteUserSchemaActor::TDeleteUserSchemaActor(const TString& root, const TString& name, const TActorId& sender, - const TString& requestId, - TIntrusivePtr<TUserCounters> userCounters) + const TString& requestId, + TIntrusivePtr<TUserCounters> userCounters) : Root_(root) , Name_(name) , Sender_(sender) , SI_(0) - , RequestId_(requestId) - , UserCounters_(std::move(userCounters)) + , RequestId_(requestId) + , UserCounters_(std::move(userCounters)) { } -TDeleteUserSchemaActor::~TDeleteUserSchemaActor() = default; +TDeleteUserSchemaActor::~TDeleteUserSchemaActor() = default; -void TDeleteUserSchemaActor::Bootstrap() { - Process(); +void TDeleteUserSchemaActor::Bootstrap() { + Process(); Become(&TThis::StateFunc); } -void TDeleteUserSchemaActor::SkipQuoterStages() { - if (EDeleting(SI_) == EDeleting::RemoveQuoter && (!Cfg().GetQuotingConfig().GetEnableQuoting() || !Cfg().GetQuotingConfig().HasKesusQuoterConfig())) { - SI_++; - } +void TDeleteUserSchemaActor::SkipQuoterStages() { + if (EDeleting(SI_) == EDeleting::RemoveQuoter && (!Cfg().GetQuotingConfig().GetEnableQuoting() || !Cfg().GetQuotingConfig().HasKesusQuoterConfig())) { + SI_++; + } +} + +void TDeleteUserSchemaActor::NextAction() { + SI_++; + + Process(); } -void TDeleteUserSchemaActor::NextAction() { - SI_++; - - Process(); -} - -void TDeleteUserSchemaActor::Process() { - SkipQuoterStages(); - +void TDeleteUserSchemaActor::Process() { + SkipQuoterStages(); + switch (EDeleting(SI_)) { - case EDeleting::RemoveQuoter: { - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeRemoveKesusEvent(Root_ + "/" + Name_, QUOTER_KESUS_NAME), false, TQueuePath(Root_, Name_, TString()), GetTransactionCounters(UserCounters_)) - ); - break; - } + case EDeleting::RemoveQuoter: { + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeRemoveKesusEvent(Root_ + "/" + Name_, QUOTER_KESUS_NAME), false, TQueuePath(Root_, Name_, TString()), GetTransactionCounters(UserCounters_)) + ); + break; + } case EDeleting::RemoveDirectory: { - Register(new TMiniKqlExecutionActor( - SelfId(), RequestId_, MakeRemoveDirectoryEvent(Root_, Name_), false, TQueuePath(Root_, Name_, TString()), GetTransactionCounters(UserCounters_)) + Register(new TMiniKqlExecutionActor( + SelfId(), RequestId_, MakeRemoveDirectoryEvent(Root_, Name_), false, TQueuePath(Root_, Name_, TString()), GetTransactionCounters(UserCounters_)) ); break; } case EDeleting::Finish: { - Send(Sender_, MakeHolder<TSqsEvents::TEvUserDeleted>(true)); - PassAway(); + Send(Sender_, MakeHolder<TSqsEvents::TEvUserDeleted>(true)); + PassAway(); break; } } } -void TDeleteUserSchemaActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { +void TDeleteUserSchemaActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; - const auto status = record.GetStatus(); + const auto status = record.GetStatus(); - if (SuccessStatusCode(status)) { - NextAction(); + if (SuccessStatusCode(status)) { + NextAction(); } else { - RLOG_SQS_WARN("request execution error: " << record); + RLOG_SQS_WARN("request execution error: " << record); - Send(Sender_, MakeHolder<TSqsEvents::TEvUserDeleted>(false, record.GetMiniKQLErrors())); - PassAway(); + Send(Sender_, MakeHolder<TSqsEvents::TEvUserDeleted>(false, record.GetMiniKQLErrors())); + PassAway(); } } @@ -467,186 +467,186 @@ TAtomicCounterActor::TAtomicCounterActor(const TActorId& sender, const TString& TAtomicCounterActor::~TAtomicCounterActor() = default; -void TAtomicCounterActor::Bootstrap() { +void TAtomicCounterActor::Bootstrap() { Become(&TThis::StateFunc); auto ev = MakeExecuteEvent(Sprintf(GetNextAtomicValueQuery, RootPath_.c_str())); - Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), true, TQueuePath(), nullptr)); + Register(new TMiniKqlExecutionActor(SelfId(), RequestId_, std::move(ev), true, TQueuePath(), nullptr)); } -void TAtomicCounterActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { +void TAtomicCounterActor::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { const auto& record = ev->Get()->Record; const auto status = record.GetStatus(); if (SuccessStatusCode(status)) { const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - Send(Sender_, + Send(Sender_, MakeHolder<TSqsEvents::TEvAtomicCounterIncrementResult>(true, "ok", val["value"])); } else { - RLOG_SQS_ERROR("Failed to increment the atomic counter: bad status code"); - Send(Sender_, + RLOG_SQS_ERROR("Failed to increment the atomic counter: bad status code"); + Send(Sender_, MakeHolder<TSqsEvents::TEvAtomicCounterIncrementResult>(false)); } - PassAway(); + PassAway(); } -template <class TEvCmd, class TEvCmdResult> -class TQuoterCmdRunner : public TActorBootstrapped<TQuoterCmdRunner<TEvCmd, TEvCmdResult>> { -public: - TQuoterCmdRunner( - ui64 quoterSchemeShardId, - ui64 quoterPathId, - const typename TEvCmd::ProtoRecordType& cmd, - const TString& requestId, +template <class TEvCmd, class TEvCmdResult> +class TQuoterCmdRunner : public TActorBootstrapped<TQuoterCmdRunner<TEvCmd, TEvCmdResult>> { +public: + TQuoterCmdRunner( + ui64 quoterSchemeShardId, + ui64 quoterPathId, + const typename TEvCmd::ProtoRecordType& cmd, + const TString& requestId, const TActorId& parent - ) - : QuoterSchemeShardId(quoterSchemeShardId) - , QuoterPathId(quoterPathId) - , Cmd(cmd) - , RequestId_(requestId) - , Parent(parent) - { - } - - TQuoterCmdRunner( - const TString& quoterPath, - const typename TEvCmd::ProtoRecordType& cmd, - const TString& requestId, + ) + : QuoterSchemeShardId(quoterSchemeShardId) + , QuoterPathId(quoterPathId) + , Cmd(cmd) + , RequestId_(requestId) + , Parent(parent) + { + } + + TQuoterCmdRunner( + const TString& quoterPath, + const typename TEvCmd::ProtoRecordType& cmd, + const TString& requestId, const TActorId& parent - ) - : QuoterPath(quoterPath) - , Cmd(cmd) - , RequestId_(requestId) - , Parent(parent) - { - } - - void Bootstrap() { - this->Become(&TQuoterCmdRunner::StateFunc); - RequestQuoterTabletId(); - } - + ) + : QuoterPath(quoterPath) + , Cmd(cmd) + , RequestId_(requestId) + , Parent(parent) + { + } + + void Bootstrap() { + this->Become(&TQuoterCmdRunner::StateFunc); + RequestQuoterTabletId(); + } + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; - } - -private: - void RequestQuoterTabletId() { - THolder<TEvTxUserProxy::TEvNavigate> request(new TEvTxUserProxy::TEvNavigate()); - auto& descCmd = *request->Record.MutableDescribePath(); - if (QuoterPath.empty()) { - RLOG_SQS_TRACE("Requesting quoter tablet id for path id " << QuoterPathId); - descCmd.SetSchemeshardId(QuoterSchemeShardId); - descCmd.SetPathId(QuoterPathId); - } else { - RLOG_SQS_TRACE("Requesting quoter tablet id for path \"" << QuoterPath << "\""); - descCmd.SetPath(QuoterPath); - } - this->Send(MakeTxProxyID(), std::move(request)); - } - + } + +private: + void RequestQuoterTabletId() { + THolder<TEvTxUserProxy::TEvNavigate> request(new TEvTxUserProxy::TEvNavigate()); + auto& descCmd = *request->Record.MutableDescribePath(); + if (QuoterPath.empty()) { + RLOG_SQS_TRACE("Requesting quoter tablet id for path id " << QuoterPathId); + descCmd.SetSchemeshardId(QuoterSchemeShardId); + descCmd.SetPathId(QuoterPathId); + } else { + RLOG_SQS_TRACE("Requesting quoter tablet id for path \"" << QuoterPath << "\""); + descCmd.SetPath(QuoterPath); + } + this->Send(MakeTxProxyID(), std::move(request)); + } + void HandleDescribeSchemeResult(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev) { - RLOG_SQS_TRACE("HandleDescribeSchemeResult for quoter: " << ev->Get()->GetRecord()); - const auto& pathDescription = ev->Get()->GetRecord().GetPathDescription(); + RLOG_SQS_TRACE("HandleDescribeSchemeResult for quoter: " << ev->Get()->GetRecord()); + const auto& pathDescription = ev->Get()->GetRecord().GetPathDescription(); if (ev->Get()->GetRecord().GetStatus() != NKikimrScheme::StatusSuccess || !pathDescription.GetKesus().GetKesusTabletId()) { - RLOG_SQS_ERROR("Describe scheme failed: " << ev->Get()->GetRecord()); - SendErrorAndDie("Describe scheme failed."); - return; - } - - QuoterTabletId = pathDescription.GetKesus().GetKesusTabletId(); - CreatePipe(false); - SendCmdToKesus(); - } - - void HandleCmdResult(typename TEvCmdResult::TPtr& ev) { - RLOG_SQS_TRACE("Received answer from quoter: " << ev->Get()->Record); - TActivationContext::Send(ev->Forward(Parent)); - PassAway(); - } - - void CreatePipe(bool retry) { - NTabletPipe::TClientConfig clientConfig; + RLOG_SQS_ERROR("Describe scheme failed: " << ev->Get()->GetRecord()); + SendErrorAndDie("Describe scheme failed."); + return; + } + + QuoterTabletId = pathDescription.GetKesus().GetKesusTabletId(); + CreatePipe(false); + SendCmdToKesus(); + } + + void HandleCmdResult(typename TEvCmdResult::TPtr& ev) { + RLOG_SQS_TRACE("Received answer from quoter: " << ev->Get()->Record); + TActivationContext::Send(ev->Forward(Parent)); + PassAway(); + } + + void CreatePipe(bool retry) { + NTabletPipe::TClientConfig clientConfig; clientConfig.RetryPolicy = {.RetryLimitCount = 5, .MinRetryTime = TDuration::MilliSeconds(100), .DoFirstRetryInstantly = !retry}; - PipeClient = this->Register(NTabletPipe::CreateClient(this->SelfId(), QuoterTabletId, clientConfig)); - RLOG_SQS_TRACE("Created pipe client to Kesus: " << PipeClient); - } - - void SendCmdToKesus() { - RLOG_SQS_DEBUG("Send command to Kesus: " << Cmd); - NTabletPipe::SendData(this->SelfId(), PipeClient, new TEvCmd(Cmd)); - } - - void PassAway() override { - if (PipeClient) { - NTabletPipe::CloseClient(this->SelfId(), PipeClient); + PipeClient = this->Register(NTabletPipe::CreateClient(this->SelfId(), QuoterTabletId, clientConfig)); + RLOG_SQS_TRACE("Created pipe client to Kesus: " << PipeClient); + } + + void SendCmdToKesus() { + RLOG_SQS_DEBUG("Send command to Kesus: " << Cmd); + NTabletPipe::SendData(this->SelfId(), PipeClient, new TEvCmd(Cmd)); + } + + void PassAway() override { + if (PipeClient) { + NTabletPipe::CloseClient(this->SelfId(), PipeClient); PipeClient = TActorId(); - } - TActorBootstrapped<TQuoterCmdRunner>::PassAway(); - } - - void SendErrorAndDie(const TString& reason) { - this->Send(Parent, MakeHolder<TEvCmdResult>(Ydb::StatusIds::INTERNAL_ERROR, reason)); - PassAway(); - } - - void HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev) { - if (ev->Get()->Status != NKikimrProto::OK) { - RLOG_SQS_WARN("Failed to connect to pipe: " << ev->Get()->Status << ". Reconnecting"); - CreatePipe(true); - SendCmdToKesus(); - } - } - - void HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) { - Y_UNUSED(ev); - RLOG_SQS_WARN("Pipe disconnected. Reconnecting"); - CreatePipe(true); - SendCmdToKesus(); - } - - STATEFN(StateFunc) { - switch (ev->GetTypeRewrite()) { + } + TActorBootstrapped<TQuoterCmdRunner>::PassAway(); + } + + void SendErrorAndDie(const TString& reason) { + this->Send(Parent, MakeHolder<TEvCmdResult>(Ydb::StatusIds::INTERNAL_ERROR, reason)); + PassAway(); + } + + void HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev) { + if (ev->Get()->Status != NKikimrProto::OK) { + RLOG_SQS_WARN("Failed to connect to pipe: " << ev->Get()->Status << ". Reconnecting"); + CreatePipe(true); + SendCmdToKesus(); + } + } + + void HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) { + Y_UNUSED(ev); + RLOG_SQS_WARN("Pipe disconnected. Reconnecting"); + CreatePipe(true); + SendCmdToKesus(); + } + + STATEFN(StateFunc) { + switch (ev->GetTypeRewrite()) { hFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, HandleDescribeSchemeResult); - hFunc(TEvCmdResult, HandleCmdResult); - hFunc(TEvTabletPipe::TEvClientDestroyed, HandlePipeClientDisconnected); - hFunc(TEvTabletPipe::TEvClientConnected, HandlePipeClientConnected); - cFunc(TEvPoisonPill::EventType, PassAway); - } - } - -private: - const ui64 QuoterSchemeShardId = 0; - const ui64 QuoterPathId = 0; - const TString QuoterPath; - const typename TEvCmd::ProtoRecordType Cmd; - const TString RequestId_; + hFunc(TEvCmdResult, HandleCmdResult); + hFunc(TEvTabletPipe::TEvClientDestroyed, HandlePipeClientDisconnected); + hFunc(TEvTabletPipe::TEvClientConnected, HandlePipeClientConnected); + cFunc(TEvPoisonPill::EventType, PassAway); + } + } + +private: + const ui64 QuoterSchemeShardId = 0; + const ui64 QuoterPathId = 0; + const TString QuoterPath; + const typename TEvCmd::ProtoRecordType Cmd; + const TString RequestId_; const TActorId Parent; - ui64 QuoterTabletId = 0; + ui64 QuoterTabletId = 0; TActorId PipeClient; -}; - +}; + TActorId RunAddQuoterResource(ui64 quoterSchemeShardId, ui64 quoterPathId, const NKikimrKesus::TEvAddQuoterResource& cmd, const TString& requestId) { - return TActivationContext::Register( - new TQuoterCmdRunner<NKesus::TEvKesus::TEvAddQuoterResource, NKesus::TEvKesus::TEvAddQuoterResourceResult>( - quoterSchemeShardId, quoterPathId, cmd, requestId, TActivationContext::AsActorContext().SelfID - ) - ); -} - + return TActivationContext::Register( + new TQuoterCmdRunner<NKesus::TEvKesus::TEvAddQuoterResource, NKesus::TEvKesus::TEvAddQuoterResourceResult>( + quoterSchemeShardId, quoterPathId, cmd, requestId, TActivationContext::AsActorContext().SelfID + ) + ); +} + TActorId RunAddQuoterResource(const TString& quoterPath, const NKikimrKesus::TEvAddQuoterResource& cmd, const TString& requestId) { - return TActivationContext::Register( - new TQuoterCmdRunner<NKesus::TEvKesus::TEvAddQuoterResource, NKesus::TEvKesus::TEvAddQuoterResourceResult>( - quoterPath, cmd, requestId, TActivationContext::AsActorContext().SelfID - ) - ); -} - + return TActivationContext::Register( + new TQuoterCmdRunner<NKesus::TEvKesus::TEvAddQuoterResource, NKesus::TEvKesus::TEvAddQuoterResourceResult>( + quoterPath, cmd, requestId, TActivationContext::AsActorContext().SelfID + ) + ); +} + TActorId RunDeleteQuoterResource(const TString& quoterPath, const NKikimrKesus::TEvDeleteQuoterResource& cmd, const TString& requestId) { - return TActivationContext::Register( - new TQuoterCmdRunner<NKesus::TEvKesus::TEvDeleteQuoterResource, NKesus::TEvKesus::TEvDeleteQuoterResourceResult>( - quoterPath, cmd, requestId, TActivationContext::AsActorContext().SelfID - ) - ); -} - -} // namespace NKikimr::NSQS + return TActivationContext::Register( + new TQuoterCmdRunner<NKesus::TEvKesus::TEvDeleteQuoterResource, NKesus::TEvKesus::TEvDeleteQuoterResourceResult>( + quoterPath, cmd, requestId, TActivationContext::AsActorContext().SelfID + ) + ); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/schema.h b/ydb/core/ymq/actor/schema.h index c88face921b..7f1e6ff3fde 100644 --- a/ydb/core/ymq/actor/schema.h +++ b/ydb/core/ymq/actor/schema.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include "events.h" #include <ydb/core/ymq/base/table_info.h> @@ -16,11 +16,11 @@ #include <util/generic/hash.h> -namespace NKikimr::NSQS { - -extern const TString QUOTER_KESUS_NAME; -extern const TString RPS_QUOTA_NAME; +namespace NKikimr::NSQS { +extern const TString QUOTER_KESUS_NAME; +extern const TString RPS_QUOTA_NAME; + THolder<TEvTxUserProxy::TEvProposeTransaction> MakeExecuteEvent(const TString& query); @@ -36,106 +36,106 @@ THolder<TEvTxUserProxy::TEvProposeTransaction> THolder<TEvTxUserProxy::TEvProposeTransaction> MakeRemoveDirectoryEvent(const TString& root, const TString& name); -// Create actor that calls AddQuoterResource and handles pipe errors and retries +// Create actor that calls AddQuoterResource and handles pipe errors and retries TActorId RunAddQuoterResource(ui64 quoterSchemeShardId, ui64 quoterPathId, const NKikimrKesus::TEvAddQuoterResource& cmd, const TString& requestId); TActorId RunAddQuoterResource(const TString& quoterPath, const NKikimrKesus::TEvAddQuoterResource& cmd, const TString& requestId); TActorId RunDeleteQuoterResource(const TString& quoterPath, const NKikimrKesus::TEvDeleteQuoterResource& cmd, const TString& requestId); - -inline TIntrusivePtr<TTransactionCounters> GetTransactionCounters(const TIntrusivePtr<TUserCounters>& userCounters) { - if (userCounters) { - return userCounters->GetTransactionCounters(); - } - return nullptr; -} - + +inline TIntrusivePtr<TTransactionCounters> GetTransactionCounters(const TIntrusivePtr<TUserCounters>& userCounters) { + if (userCounters) { + return userCounters->GetTransactionCounters(); + } + return nullptr; +} + class TCreateUserSchemaActor - : public TActorBootstrapped<TCreateUserSchemaActor> + : public TActorBootstrapped<TCreateUserSchemaActor> { public: TCreateUserSchemaActor(const TString& root, const TString& userName, const TActorId& sender, const TString& requestId, TIntrusivePtr<TUserCounters> userCounters); ~TCreateUserSchemaActor(); - void Bootstrap(); + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; - } - + } + private: - void NextAction(); + void NextAction(); - void Process(); + void Process(); private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(NKesus::TEvKesus::TEvAddQuoterResourceResult, HandleAddQuoterResource); - cFunc(TEvPoisonPill::EventType, PassAway); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(NKesus::TEvKesus::TEvAddQuoterResourceResult, HandleAddQuoterResource); + cFunc(TEvPoisonPill::EventType, PassAway); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev); - - THolder<TEvTxUserProxy::TEvProposeTransaction> MakeMkDirRequest(const TString& root, const TString& dirName); - - void AddRPSQuota(); - - void PassAway() override; + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandleAddQuoterResource(NKesus::TEvKesus::TEvAddQuoterResourceResult::TPtr& ev); + THolder<TEvTxUserProxy::TEvProposeTransaction> MakeMkDirRequest(const TString& root, const TString& dirName); + + void AddRPSQuota(); + + void PassAway() override; + private: - enum class ECreating : int { - MakeRootSqsDirectory = -1, // optional state + enum class ECreating : int { + MakeRootSqsDirectory = -1, // optional state MakeDirectory = 0, - Quoter, - RPSQuota, - Finish, + Quoter, + RPSQuota, + Finish, }; const TString Root_; const TString UserName_; const TActorId Sender_; - int SI_; - const TString RequestId_; - bool CreateRootSqsDirAttemptWasMade_ = false; - TIntrusivePtr<TUserCounters> UserCounters_; - std::pair<ui64, ui64> KesusPathId_ = {}; // SchemeShardTableId, PathId for quoter kesus + int SI_; + const TString RequestId_; + bool CreateRootSqsDirAttemptWasMade_ = false; + TIntrusivePtr<TUserCounters> UserCounters_; + std::pair<ui64, ui64> KesusPathId_ = {}; // SchemeShardTableId, PathId for quoter kesus TActorId AddQuoterResourceActor_; }; class TDeleteUserSchemaActor - : public TActorBootstrapped<TDeleteUserSchemaActor> + : public TActorBootstrapped<TDeleteUserSchemaActor> { public: TDeleteUserSchemaActor(const TString& root, const TString& name, const TActorId& sender, const TString& requestId, TIntrusivePtr<TUserCounters> userCounters); ~TDeleteUserSchemaActor(); - void Bootstrap(); + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; - } - + } + private: - void NextAction(); + void NextAction(); - void Process(); + void Process(); private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - - void SkipQuoterStages(); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void SkipQuoterStages(); + private: enum class EDeleting : ui32 { - RemoveQuoter = 0, + RemoveQuoter = 0, RemoveDirectory, Finish }; @@ -144,31 +144,31 @@ private: const TString Name_; const TActorId Sender_; ui32 SI_; - const TString RequestId_; - TIntrusivePtr<TUserCounters> UserCounters_; + const TString RequestId_; + TIntrusivePtr<TUserCounters> UserCounters_; }; class TAtomicCounterActor - : public TActorBootstrapped<TAtomicCounterActor> + : public TActorBootstrapped<TAtomicCounterActor> { public: TAtomicCounterActor(const TActorId& sender, const TString& rootPath, const TString& requestId); ~TAtomicCounterActor(); - void Bootstrap(); + void Bootstrap(); static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_ACTOR; } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); } } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); private: const TActorId Sender_; @@ -176,4 +176,4 @@ private: const TString RequestId_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/send_message.cpp b/ydb/core/ymq/actor/send_message.cpp index e596b8f2e33..1710cb8f686 100644 --- a/ydb/core/ymq/actor/send_message.cpp +++ b/ydb/core/ymq/actor/send_message.cpp @@ -1,8 +1,8 @@ #include "action.h" -#include "attributes_md5.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "attributes_md5.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include "serviceid.h" @@ -18,29 +18,29 @@ using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TSendMessageActor : public TActionActor<TSendMessageActor> { public: - static constexpr bool NeedQueueAttributes() { - return true; - } - - TSendMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, bool isBatch, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, isBatch ? EAction::SendMessageBatch : EAction::SendMessage, std::move(cb)) - , IsBatch_(isBatch) + static constexpr bool NeedQueueAttributes() { + return true; + } + + TSendMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, bool isBatch, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, isBatch ? EAction::SendMessageBatch : EAction::SendMessage, std::move(cb)) + , IsBatch_(isBatch) { - if (IsBatch_) { - CopyAccountName(BatchRequest()); - Response_.MutableSendMessageBatch()->SetRequestId(RequestId_); - CopySecurityToken(BatchRequest()); - } else { - CopyAccountName(Request()); - Response_.MutableSendMessage()->SetRequestId(RequestId_); - CopySecurityToken(Request()); - } + if (IsBatch_) { + CopyAccountName(BatchRequest()); + Response_.MutableSendMessageBatch()->SetRequestId(RequestId_); + CopySecurityToken(BatchRequest()); + } else { + CopyAccountName(Request()); + Response_.MutableSendMessage()->SetRequestId(RequestId_); + CopySecurityToken(Request()); + } } private: @@ -53,107 +53,107 @@ private: return ret; } - TDuration GetDelay(const TSendMessageRequest& request) const { - if (request.HasDelaySeconds()) { - return TDuration::Seconds(request.GetDelaySeconds()); - } else { - return QueueAttributes_->DelaySeconds; - } - } - - bool ValidateSingleRequest(const TSendMessageRequest& req, TSendMessageResponse* resp) { - if (IsFifoQueue()) { - if (!req.GetMessageGroupId()) { - MakeError(resp, NErrors::MISSING_PARAMETER, "No MessageGroupId parameter."); - return false; - } - - if (!req.GetMessageDeduplicationId() && !QueueAttributes_->ContentBasedDeduplication) { - MakeError(resp, NErrors::MISSING_PARAMETER, "No MessageDeduplicationId parameter."); - return false; - } - } - - if (req.GetDelaySeconds() > TLimits::MaxDelaySeconds) { - MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, "Delay seconds are too big."); - return false; - } - - if (req.MessageAttributesSize() > TLimits::MaxMessageAttributes) { - MakeError(resp, NErrors::INVALID_PARAMETER_COMBINATION, TStringBuilder() << "Message has more than " << TLimits::MaxMessageAttributes << " attributes."); - return false; - } - - TString description; - if (!ValidateMessageBody(req.GetMessageBody(), description)) { - if (Cfg().GetValidateMessageBody()) { - MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, TStringBuilder() << "Message body validation failed: " << description << "."); - return false; - } else { - RLOG_SQS_WARN("Message body validation failed: " << description << "."); - } - } - for (const auto& a : req.GetMessageAttributes()) { - if (!ValidateMessageBody(a.GetStringValue(), description)) { - if (Cfg().GetValidateMessageBody()) { - MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, TStringBuilder() << "Message attribute \"" << a.GetName() << "\" validation failed: " << description << "."); - return false; - } else { - RLOG_SQS_WARN("Message attribute \"" << a.GetName() << "\" validation failed: " << description << "."); - } - } - } - return true; + TDuration GetDelay(const TSendMessageRequest& request) const { + if (request.HasDelaySeconds()) { + return TDuration::Seconds(request.GetDelaySeconds()); + } else { + return QueueAttributes_->DelaySeconds; + } } + bool ValidateSingleRequest(const TSendMessageRequest& req, TSendMessageResponse* resp) { + if (IsFifoQueue()) { + if (!req.GetMessageGroupId()) { + MakeError(resp, NErrors::MISSING_PARAMETER, "No MessageGroupId parameter."); + return false; + } + + if (!req.GetMessageDeduplicationId() && !QueueAttributes_->ContentBasedDeduplication) { + MakeError(resp, NErrors::MISSING_PARAMETER, "No MessageDeduplicationId parameter."); + return false; + } + } + + if (req.GetDelaySeconds() > TLimits::MaxDelaySeconds) { + MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, "Delay seconds are too big."); + return false; + } + + if (req.MessageAttributesSize() > TLimits::MaxMessageAttributes) { + MakeError(resp, NErrors::INVALID_PARAMETER_COMBINATION, TStringBuilder() << "Message has more than " << TLimits::MaxMessageAttributes << " attributes."); + return false; + } + + TString description; + if (!ValidateMessageBody(req.GetMessageBody(), description)) { + if (Cfg().GetValidateMessageBody()) { + MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, TStringBuilder() << "Message body validation failed: " << description << "."); + return false; + } else { + RLOG_SQS_WARN("Message body validation failed: " << description << "."); + } + } + for (const auto& a : req.GetMessageAttributes()) { + if (!ValidateMessageBody(a.GetStringValue(), description)) { + if (Cfg().GetValidateMessageBody()) { + MakeError(resp, NErrors::INVALID_PARAMETER_VALUE, TStringBuilder() << "Message attribute \"" << a.GetName() << "\" validation failed: " << description << "."); + return false; + } else { + RLOG_SQS_WARN("Message attribute \"" << a.GetName() << "\" validation failed: " << description << "."); + } + } + } + return true; + } + bool DoValidate() override { - const size_t maxMessageSize = Min(TLimits::MaxMessageSize, QueueAttributes_->MaximumMessageSize); + const size_t maxMessageSize = Min(TLimits::MaxMessageSize, QueueAttributes_->MaximumMessageSize); if (IsBatch_) { size_t size = 0; size_t count = 0; - bool tooBig = false; - - for (const auto& req : BatchRequest().GetEntries()) { - const size_t msgSize = CalculateMessageSize(req); - if (msgSize > maxMessageSize) { - tooBig = true; - break; - } - size += msgSize; + bool tooBig = false; + + for (const auto& req : BatchRequest().GetEntries()) { + const size_t msgSize = CalculateMessageSize(req); + if (msgSize > maxMessageSize) { + tooBig = true; + break; + } + size += msgSize; count += 1; } - if (tooBig) { - MakeError(Response_.MutableSendMessageBatch(), NErrors::INVALID_PARAMETER_VALUE, - TStringBuilder() << "Each message must be shorter than " << maxMessageSize << " bytes."); - return false; - } - - if (size > TLimits::MaxMessageSize) { - MakeError(Response_.MutableSendMessageBatch(), NErrors::BATCH_REQUEST_TOO_LONG); + if (tooBig) { + MakeError(Response_.MutableSendMessageBatch(), NErrors::INVALID_PARAMETER_VALUE, + TStringBuilder() << "Each message must be shorter than " << maxMessageSize << " bytes."); + return false; + } + + if (size > TLimits::MaxMessageSize) { + MakeError(Response_.MutableSendMessageBatch(), NErrors::BATCH_REQUEST_TOO_LONG); return false; } if (count == 0) { - MakeError(Response_.MutableSendMessageBatch(), NErrors::EMPTY_BATCH_REQUEST); + MakeError(Response_.MutableSendMessageBatch(), NErrors::EMPTY_BATCH_REQUEST); return false; } if (count > TLimits::MaxBatchSize) { - MakeError(Response_.MutableSendMessageBatch(), NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); + MakeError(Response_.MutableSendMessageBatch(), NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); return false; } if (!GetQueueName()) { - MakeError(Response_.MutableSendMessageBatch(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutableSendMessageBatch(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } } else { - if (CalculateMessageSize(Request()) > maxMessageSize) { - MakeError(Response_.MutableSendMessage(), NErrors::INVALID_PARAMETER_VALUE, TStringBuilder() << "Message must be shorter than " << maxMessageSize << " bytes."); + if (CalculateMessageSize(Request()) > maxMessageSize) { + MakeError(Response_.MutableSendMessage(), NErrors::INVALID_PARAMETER_VALUE, TStringBuilder() << "Message must be shorter than " << maxMessageSize << " bytes."); return false; } if (!GetQueueName()) { - MakeError(Response_.MutableSendMessage(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutableSendMessage(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } } @@ -161,76 +161,76 @@ private: return true; } - TError* MutableErrorDesc() override { - return IsBatch_ ? Response_.MutableSendMessageBatch()->MutableError() : Response_.MutableSendMessage()->MutableError(); - } - - // coverity[var_deref_model]: false positive - void DoAction() override { + TError* MutableErrorDesc() override { + return IsBatch_ ? Response_.MutableSendMessageBatch()->MutableError() : Response_.MutableSendMessage()->MutableError(); + } + + // coverity[var_deref_model]: false positive + void DoAction() override { Become(&TThis::StateFunc); - Y_VERIFY(QueueAttributes_.Defined()); - - const bool isFifo = IsFifoQueue(); - THolder<TSqsEvents::TEvSendMessageBatch> req; - for (size_t i = 0, size = IsBatch_ ? BatchRequest().EntriesSize() : 1; i < size; ++i) { - auto* currentRequest = IsBatch_ ? &BatchRequest().GetEntries(i) : &Request(); - auto* currentResponse = IsBatch_ ? Response_.MutableSendMessageBatch()->AddEntries() : Response_.MutableSendMessage(); - - currentResponse->SetId(currentRequest->GetId()); - if (!ValidateSingleRequest(*currentRequest, currentResponse)) { - continue; - } - - TString deduplicationId; - if (isFifo) { - const TString& dedupParam = currentRequest->GetMessageDeduplicationId(); - if (dedupParam) { - deduplicationId = dedupParam; - } else if (QueueAttributes_->ContentBasedDeduplication) { - try { - deduplicationId = CalcSHA256(currentRequest->GetMessageBody()); - } catch (const std::exception& ex) { - RLOG_SQS_ERROR("Failed to calculate SHA-256 of message body: " << ex.what()); - MakeError(currentResponse, NErrors::INTERNAL_FAILURE); - continue; - } - } - } - - if (!req) { - req = MakeHolder<TSqsEvents::TEvSendMessageBatch>(); - req->RequestId = RequestId_; - req->SenderId = UserSID_; - req->Messages.reserve(size); - } - RequestToReplyIndexMapping_.push_back(i); - req->Messages.emplace_back(); - auto& messageReq = req->Messages.back(); - messageReq.MessageId = CreateGuidAsString(); - messageReq.Body = currentRequest->GetMessageBody(); - messageReq.Delay = GetDelay(*currentRequest); - messageReq.DeduplicationId = std::move(deduplicationId); - messageReq.MessageGroupId = currentRequest->GetMessageGroupId(); - - { - TMessageAttributeList attrs; - for (const auto& a : currentRequest->GetMessageAttributes()) { - attrs.AddAttributes()->CopyFrom(a); - } - messageReq.Attributes = ProtobufToString(attrs); - } - } - - if (req) { + Y_VERIFY(QueueAttributes_.Defined()); + + const bool isFifo = IsFifoQueue(); + THolder<TSqsEvents::TEvSendMessageBatch> req; + for (size_t i = 0, size = IsBatch_ ? BatchRequest().EntriesSize() : 1; i < size; ++i) { + auto* currentRequest = IsBatch_ ? &BatchRequest().GetEntries(i) : &Request(); + auto* currentResponse = IsBatch_ ? Response_.MutableSendMessageBatch()->AddEntries() : Response_.MutableSendMessage(); + + currentResponse->SetId(currentRequest->GetId()); + if (!ValidateSingleRequest(*currentRequest, currentResponse)) { + continue; + } + + TString deduplicationId; + if (isFifo) { + const TString& dedupParam = currentRequest->GetMessageDeduplicationId(); + if (dedupParam) { + deduplicationId = dedupParam; + } else if (QueueAttributes_->ContentBasedDeduplication) { + try { + deduplicationId = CalcSHA256(currentRequest->GetMessageBody()); + } catch (const std::exception& ex) { + RLOG_SQS_ERROR("Failed to calculate SHA-256 of message body: " << ex.what()); + MakeError(currentResponse, NErrors::INTERNAL_FAILURE); + continue; + } + } + } + + if (!req) { + req = MakeHolder<TSqsEvents::TEvSendMessageBatch>(); + req->RequestId = RequestId_; + req->SenderId = UserSID_; + req->Messages.reserve(size); + } + RequestToReplyIndexMapping_.push_back(i); + req->Messages.emplace_back(); + auto& messageReq = req->Messages.back(); + messageReq.MessageId = CreateGuidAsString(); + messageReq.Body = currentRequest->GetMessageBody(); + messageReq.Delay = GetDelay(*currentRequest); + messageReq.DeduplicationId = std::move(deduplicationId); + messageReq.MessageGroupId = currentRequest->GetMessageGroupId(); + + { + TMessageAttributeList attrs; + for (const auto& a : currentRequest->GetMessageAttributes()) { + attrs.AddAttributes()->CopyFrom(a); + } + messageReq.Attributes = ProtobufToString(attrs); + } + } + + if (req) { Send(QueueLeader_, req.Release()); - } else { - SendReplyAndDie(); - } + } else { + SendReplyAndDie(); + } } - + TString DoGetQueueName() const override { - return IsBatch_ ? BatchRequest().GetQueueName() : Request().GetQueueName(); + return IsBatch_ ? BatchRequest().GetQueueName() : Request().GetQueueName(); } static TString ProtobufToString(const NProtoBuf::Message& proto) { @@ -240,68 +240,68 @@ private: } private: - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvSendMessageBatchResponse, HandleSendResponse); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvSendMessageBatchResponse, HandleSendResponse); } } - void HandleSendResponse(TSqsEvents::TEvSendMessageBatchResponse::TPtr& ev) { - const bool isFifo = IsFifoQueue(); - for (size_t i = 0, size = ev->Get()->Statuses.size(); i < size; ++i) { - const auto& status = ev->Get()->Statuses[i]; - Y_VERIFY(!IsBatch_ || RequestToReplyIndexMapping_[i] < BatchRequest().EntriesSize()); - auto* currentResponse = IsBatch_ ? Response_.MutableSendMessageBatch()->MutableEntries(RequestToReplyIndexMapping_[i]) : Response_.MutableSendMessage(); - auto* currentRequest = IsBatch_ ? &BatchRequest().GetEntries(RequestToReplyIndexMapping_[i]) : &Request(); - if (status.Status == TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::OK - || status.Status == TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::AlreadySent) { - currentResponse->SetMessageId(status.MessageId); - if (isFifo) { - currentResponse->SetSequenceNumber(status.SequenceNumber); - } - currentResponse->SetMD5OfMessageBody(MD5::Calc(currentRequest->GetMessageBody())); - if (currentRequest->MessageAttributesSize() > 0) { - const TString md5 = CalcMD5OfMessageAttributes(currentRequest->GetMessageAttributes()); - currentResponse->SetMD5OfMessageAttributes(md5); - RLOG_SQS_DEBUG("Calculating MD5 of message attributes. Request: " << *currentRequest << "\nMD5 of message attributes: " << md5); - } - - // counters - if (status.Status == TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::AlreadySent) { + void HandleSendResponse(TSqsEvents::TEvSendMessageBatchResponse::TPtr& ev) { + const bool isFifo = IsFifoQueue(); + for (size_t i = 0, size = ev->Get()->Statuses.size(); i < size; ++i) { + const auto& status = ev->Get()->Statuses[i]; + Y_VERIFY(!IsBatch_ || RequestToReplyIndexMapping_[i] < BatchRequest().EntriesSize()); + auto* currentResponse = IsBatch_ ? Response_.MutableSendMessageBatch()->MutableEntries(RequestToReplyIndexMapping_[i]) : Response_.MutableSendMessage(); + auto* currentRequest = IsBatch_ ? &BatchRequest().GetEntries(RequestToReplyIndexMapping_[i]) : &Request(); + if (status.Status == TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::OK + || status.Status == TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::AlreadySent) { + currentResponse->SetMessageId(status.MessageId); + if (isFifo) { + currentResponse->SetSequenceNumber(status.SequenceNumber); + } + currentResponse->SetMD5OfMessageBody(MD5::Calc(currentRequest->GetMessageBody())); + if (currentRequest->MessageAttributesSize() > 0) { + const TString md5 = CalcMD5OfMessageAttributes(currentRequest->GetMessageAttributes()); + currentResponse->SetMD5OfMessageAttributes(md5); + RLOG_SQS_DEBUG("Calculating MD5 of message attributes. Request: " << *currentRequest << "\nMD5 of message attributes: " << md5); + } + + // counters + if (status.Status == TSqsEvents::TEvSendMessageBatchResponse::ESendMessageStatus::AlreadySent) { INC_COUNTER_COUPLE(QueueCounters_, SendMessage_DeduplicationCount, deduplicated_count_per_second); - } else { + } else { INC_COUNTER_COUPLE(QueueCounters_, SendMessage_Count, sent_count_per_second); ADD_COUNTER_COUPLE(QueueCounters_, SendMessage_BytesWritten, sent_bytes_per_second, CalculateMessageSize(*currentRequest)); - } - } else { - MakeError(currentResponse, NErrors::INTERNAL_FAILURE); + } + } else { + MakeError(currentResponse, NErrors::INTERNAL_FAILURE); } } - SendReplyAndDie(); - } - - const TSendMessageRequest& Request() const { - return SourceSqsRequest_.GetSendMessage(); - } - - const TSendMessageBatchRequest& BatchRequest() const { - return SourceSqsRequest_.GetSendMessageBatch(); + SendReplyAndDie(); } + const TSendMessageRequest& Request() const { + return SourceSqsRequest_.GetSendMessage(); + } + + const TSendMessageBatchRequest& BatchRequest() const { + return SourceSqsRequest_.GetSendMessageBatch(); + } + private: - std::vector<size_t> RequestToReplyIndexMapping_; + std::vector<size_t> RequestToReplyIndexMapping_; const bool IsBatch_; }; -IActor* CreateSendMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TSendMessageActor(sourceSqsRequest, false, std::move(cb)); +IActor* CreateSendMessageActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TSendMessageActor(sourceSqsRequest, false, std::move(cb)); } -IActor* CreateSendMessageBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TSendMessageActor(sourceSqsRequest, true, std::move(cb)); +IActor* CreateSendMessageBatchActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TSendMessageActor(sourceSqsRequest, true, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/service.cpp b/ydb/core/ymq/actor/service.cpp index 34c9a4ae46b..4061290d724 100644 --- a/ydb/core/ymq/actor/service.cpp +++ b/ydb/core/ymq/actor/service.cpp @@ -1,18 +1,18 @@ -#include "service.h" +#include "service.h" #include "auth_factory.h" -#include "cfg.h" -#include "executor.h" +#include "cfg.h" +#include "executor.h" #include "garbage_collector.h" -#include "local_rate_limiter_allocator.h" -#include "params.h" -#include "proxy_service.h" +#include "local_rate_limiter_allocator.h" +#include "params.h" +#include "proxy_service.h" #include "queue_leader.h" -#include "queues_list_reader.h" -#include "user_settings_names.h" -#include "user_settings_reader.h" +#include "queues_list_reader.h" +#include "user_settings_names.h" +#include "user_settings_reader.h" #include "index_events_processor.h" - + #include <ydb/public/lib/value/value.h> #include <ydb/public/sdk/cpp/client/ydb_types/credentials/credentials.h> #include <ydb/core/base/quoter.h> @@ -25,103 +25,103 @@ #include <ydb/core/tx/scheme_cache/scheme_cache.h> #include <ydb/core/base/counters.h> #include <library/cpp/lwtrace/mon/mon_lwtrace.h> - + #include <library/cpp/actors/core/events.h> #include <library/cpp/actors/core/hfunc.h> #include <library/cpp/logger/global/global.h> - -#include <util/generic/algorithm.h> -#include <util/generic/hash_set.h> + +#include <util/generic/algorithm.h> +#include <util/generic/hash_set.h> #include <util/stream/file.h> -#include <util/string/builder.h> -#include <util/string/cast.h> -#include <util/system/hostname.h> - -LWTRACE_USING(SQS_PROVIDER); - -template <> +#include <util/string/builder.h> +#include <util/string/cast.h> +#include <util/system/hostname.h> + +LWTRACE_USING(SQS_PROVIDER); + +template <> struct THash<NKikimr::NSQS::TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr> : THash<const NActors::TEventHandle<NKikimr::NSQS::TSqsEvents::TEvGetLeaderNodeForQueueRequest>*> { using TParent = THash<const NActors::TEventHandle<NKikimr::NSQS::TSqsEvents::TEvGetLeaderNodeForQueueRequest>*>; - using TParent::operator(); + using TParent::operator(); size_t operator()(const NKikimr::NSQS::TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ptr) const { - return TParent::operator()(ptr.Get()); - } -}; - -namespace NKikimr::NSQS { - -using NKikimr::NClient::TValue; - + return TParent::operator()(ptr.Get()); + } +}; + +namespace NKikimr::NSQS { + +using NKikimr::NClient::TValue; + const TString LEADER_CREATE_REASON_USER_REQUEST = "UserRequestOnNode"; const TString LEADER_CREATE_REASON_LOCAL_TABLET = "LocalTablet"; const TString LEADER_DESTROY_REASON_LAST_REF = "LastReference"; const TString LEADER_DESTROY_REASON_TABLET_PIPE_CLOSED = "TabletPipeClosed"; - -constexpr ui64 LIST_USERS_WAKEUP_TAG = 1; -constexpr ui64 LIST_QUEUES_WAKEUP_TAG = 2; - -constexpr size_t EARLY_REQUEST_USERS_LIST_MAX_BUDGET = 10; -constexpr i64 EARLY_REQUEST_QUEUES_LIST_MAX_BUDGET = 5; // per user - + +constexpr ui64 LIST_USERS_WAKEUP_TAG = 1; +constexpr ui64 LIST_QUEUES_WAKEUP_TAG = 2; + +constexpr size_t EARLY_REQUEST_USERS_LIST_MAX_BUDGET = 10; +constexpr i64 EARLY_REQUEST_QUEUES_LIST_MAX_BUDGET = 5; // per user + bool IsInternalFolder(const TString& folder) { return folder.StartsWith(".sys"); } -struct TSqsService::TQueueInfo : public TAtomicRefCount<TQueueInfo> { +struct TSqsService::TQueueInfo : public TAtomicRefCount<TQueueInfo> { TQueueInfo( TString userName, TString queueName, TString rootUrl, ui64 leaderTabletId, TString customName, TString folderId, ui64 version, ui64 shardsCount, const TIntrusivePtr<TUserCounters>& userCounters, const TActorId& schemeCache, TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> quoterResourcesForUser, bool insertCounters ) - : UserName_(std::move(userName)) - , QueueName_(std::move(queueName)) - , CustomName_(std::move(customName)) - , FolderId_(std::move(folderId)) + : UserName_(std::move(userName)) + , QueueName_(std::move(queueName)) + , CustomName_(std::move(customName)) + , FolderId_(std::move(folderId)) , Version_(version) , ShardsCount_(shardsCount) - , RootUrl_(std::move(rootUrl)) + , RootUrl_(std::move(rootUrl)) , LeaderTabletId_(leaderTabletId) - , Counters_(userCounters->CreateQueueCounters(QueueName_, FolderId_, insertCounters)) - , UserCounters_(userCounters) - , SchemeCache_(schemeCache) - , QuoterResourcesForUser_(std::move(quoterResourcesForUser)) - { - } - + , Counters_(userCounters->CreateQueueCounters(QueueName_, FolderId_, insertCounters)) + , UserCounters_(userCounters) + , SchemeCache_(schemeCache) + , QuoterResourcesForUser_(std::move(quoterResourcesForUser)) + { + } + void ConnectToLeaderTablet(bool firstTime = true) { if (ConnectingToLeaderTablet_) { - return; - } + return; + } ClosePipeToLeaderTablet(); ConnectingToLeaderTablet_ = true; - NTabletPipe::TClientConfig cfg; + NTabletPipe::TClientConfig cfg; cfg.AllowFollower = false; - cfg.CheckAliveness = true; + cfg.CheckAliveness = true; cfg.RetryPolicy = {.RetryLimitCount = 3, .MinRetryTime = TDuration::MilliSeconds(100), .DoFirstRetryInstantly = firstTime}; PipeClient_ = TActivationContext::Register(NTabletPipe::CreateClient(SelfId(), LeaderTabletId_, cfg)); LOG_SQS_DEBUG("Connect to leader tablet [" << LeaderTabletId_ << "] for queue [" << UserName_ << "/" << QueueName_ << "]. Pipe client actor: " << PipeClient_); - } - + } + void SetLeaderPipeServer(const TActorId& pipeServer) { LeaderPipeServer_ = pipeServer; - + const ui64 nodeId = LeaderPipeServer_.NodeId(); - if (nodeId == SelfId().NodeId()) { + if (nodeId == SelfId().NodeId()) { IncLocalLeaderRef(LEADER_CREATE_REASON_LOCAL_TABLET); // ref for service - } - } - + } + } + void ClosePipeToLeaderTablet() { if (LeaderPipeServer_.NodeId() == SelfId().NodeId()) { DecLocalLeaderRef(LEADER_DESTROY_REASON_TABLET_PIPE_CLOSED); // ref for service - } - if (PipeClient_) { - NTabletPipe::CloseClient(SelfId(), PipeClient_); + } + if (PipeClient_) { + NTabletPipe::CloseClient(SelfId(), PipeClient_); PipeClient_ = LeaderPipeServer_ = TActorId(); - } - } - + } + } + void StartLocalLeader(const TString& reason) { if (!LocalLeader_) { Counters_ = Counters_->GetCountersForLeaderNode(); @@ -138,9 +138,9 @@ struct TSqsService::TQueueInfo : public TAtomicRefCount<TQueueInfo> { (*counter)++; } } - } - } - + } + } + void StopLocalLeader(const TString& reason) { if (LocalLeader_) { Counters_ = Counters_->GetCountersForNotLeaderNode(); @@ -158,87 +158,87 @@ struct TSqsService::TQueueInfo : public TAtomicRefCount<TQueueInfo> { } } } - } - } - + } + } + void IncLocalLeaderRef(const TString& reason) { StartLocalLeader(reason); ++LocalLeaderRefCount_; - } - + } + void DecLocalLeaderRef(const TString& reason) { Y_VERIFY(LocalLeaderRefCount_ > 0); --LocalLeaderRefCount_; if (LocalLeaderRefCount_ == 0) { StopLocalLeader(reason); - } - } - - TActorIdentity SelfId() const { - return TActorIdentity(TActivationContext::AsActorContext().SelfID); - } - - TString UserName_; - TString QueueName_; - TString CustomName_; - TString FolderId_; + } + } + + TActorIdentity SelfId() const { + return TActorIdentity(TActivationContext::AsActorContext().SelfID); + } + + TString UserName_; + TString QueueName_; + TString CustomName_; + TString FolderId_; ui64 Version_; ui64 ShardsCount_; - TString RootUrl_; + TString RootUrl_; ui64 LeaderTabletId_ = 0; - TIntrusivePtr<TQueueCounters> Counters_; - TIntrusivePtr<TUserCounters> UserCounters_; + TIntrusivePtr<TQueueCounters> Counters_; + TIntrusivePtr<TUserCounters> UserCounters_; TActorId PipeClient_; TActorId LeaderPipeServer_; TActorId LocalLeader_; TActorId SchemeCache_; ui64 LocalLeaderRefCount_ = 0; - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResourcesForUser_; - - // State machine + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResourcesForUser_; + + // State machine bool ConnectingToLeaderTablet_ = false; - TInstant DisconnectedFrom_ = TInstant::Now(); + TInstant DisconnectedFrom_ = TInstant::Now(); THashSet<TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr> GetLeaderNodeRequests_; -}; - -struct TSqsService::TUserInfo : public TAtomicRefCount<TUserInfo> { - TUserInfo(TString userName, TIntrusivePtr<TUserCounters> userCounters) - : UserName_(std::move(userName)) - , Counters_(std::move(userCounters)) - { - } - - void InitQuoterResources() { - const auto& cfg = Cfg().GetQuotingConfig(); - if (cfg.GetEnableQuoting()) { - Y_VERIFY(cfg.HasLocalRateLimiterConfig() != cfg.HasKesusQuoterConfig()); // exactly one must be set - if (cfg.HasLocalRateLimiterConfig()) { // the only one that is fully supported - const auto& rates = cfg.GetLocalRateLimiterConfig().GetRates(); - // allocate resources - CreateObjectsQuoterResource_ = TLocalRateLimiterResource(rates.GetCreateObjectsRate()); - DeleteObjectsQuoterResource_ = TLocalRateLimiterResource(rates.GetDeleteObjectsRate()); - OtherActionsQuoterResource_ = TLocalRateLimiterResource(rates.GetOtherRequestsRate()); - // fill map - QuoterResources_ = new TSqsEvents::TQuoterResourcesForActions(); - { - TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, CreateObjectsQuoterResource_}; - QuoterResources_->ActionsResources.emplace(EAction::CreateUser, res); - - // https://st.yandex-team.ru/SQS-620 - QuoterResources_->CreateQueueAction = res; - } - { - TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, DeleteObjectsQuoterResource_}; - QuoterResources_->ActionsResources.emplace(EAction::DeleteQueue, res); - QuoterResources_->ActionsResources.emplace(EAction::DeleteQueueBatch, res); - QuoterResources_->ActionsResources.emplace(EAction::DeleteUser, res); - } - QuoterResources_->OtherActions.QuoterId = TEvQuota::TResourceLeaf::QuoterSystem; - QuoterResources_->OtherActions.ResourceId = OtherActionsQuoterResource_; - } - } - } - +}; + +struct TSqsService::TUserInfo : public TAtomicRefCount<TUserInfo> { + TUserInfo(TString userName, TIntrusivePtr<TUserCounters> userCounters) + : UserName_(std::move(userName)) + , Counters_(std::move(userCounters)) + { + } + + void InitQuoterResources() { + const auto& cfg = Cfg().GetQuotingConfig(); + if (cfg.GetEnableQuoting()) { + Y_VERIFY(cfg.HasLocalRateLimiterConfig() != cfg.HasKesusQuoterConfig()); // exactly one must be set + if (cfg.HasLocalRateLimiterConfig()) { // the only one that is fully supported + const auto& rates = cfg.GetLocalRateLimiterConfig().GetRates(); + // allocate resources + CreateObjectsQuoterResource_ = TLocalRateLimiterResource(rates.GetCreateObjectsRate()); + DeleteObjectsQuoterResource_ = TLocalRateLimiterResource(rates.GetDeleteObjectsRate()); + OtherActionsQuoterResource_ = TLocalRateLimiterResource(rates.GetOtherRequestsRate()); + // fill map + QuoterResources_ = new TSqsEvents::TQuoterResourcesForActions(); + { + TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, CreateObjectsQuoterResource_}; + QuoterResources_->ActionsResources.emplace(EAction::CreateUser, res); + + // https://st.yandex-team.ru/SQS-620 + QuoterResources_->CreateQueueAction = res; + } + { + TSqsEvents::TQuoterResourcesForActions::TResourceDescription res{TEvQuota::TResourceLeaf::QuoterSystem, DeleteObjectsQuoterResource_}; + QuoterResources_->ActionsResources.emplace(EAction::DeleteQueue, res); + QuoterResources_->ActionsResources.emplace(EAction::DeleteQueueBatch, res); + QuoterResources_->ActionsResources.emplace(EAction::DeleteUser, res); + } + QuoterResources_->OtherActions.QuoterId = TEvQuota::TResourceLeaf::QuoterSystem; + QuoterResources_->OtherActions.ResourceId = OtherActionsQuoterResource_; + } + } + } + size_t CountQueuesInFolder(const TString& folderId) const { if (!folderId) { return QueueByNameAndFolder_.size(); // for YaSQS @@ -247,73 +247,73 @@ struct TSqsService::TUserInfo : public TAtomicRefCount<TUserInfo> { return std::count_if(QueueByNameAndFolder_.begin(), QueueByNameAndFolder_.end(), [&folderId](const auto& p) { return p.first.second == folderId; }); } - TString UserName_; + TString UserName_; std::shared_ptr<const std::map<TString, TString>> Settings_ = std::make_shared<const std::map<TString, TString>>(); - TIntrusivePtr<TUserCounters> Counters_; - std::map<TString, TSqsService::TQueueInfoPtr> Queues_; - THashMap<std::pair<TString, TString>, TSqsService::TQueueInfoPtr> QueueByNameAndFolder_; // <custom name, folder id> -> queue info - TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; - TLocalRateLimiterResource CreateObjectsQuoterResource_; - TLocalRateLimiterResource DeleteObjectsQuoterResource_; - TLocalRateLimiterResource OtherActionsQuoterResource_; - i64 EarlyRequestQueuesListBudget_ = EARLY_REQUEST_QUEUES_LIST_MAX_BUDGET; // Defence from continuously requesting queues list. - - // State machine + TIntrusivePtr<TUserCounters> Counters_; + std::map<TString, TSqsService::TQueueInfoPtr> Queues_; + THashMap<std::pair<TString, TString>, TSqsService::TQueueInfoPtr> QueueByNameAndFolder_; // <custom name, folder id> -> queue info + TIntrusivePtr<TSqsEvents::TQuoterResourcesForActions> QuoterResources_; + TLocalRateLimiterResource CreateObjectsQuoterResource_; + TLocalRateLimiterResource DeleteObjectsQuoterResource_; + TLocalRateLimiterResource OtherActionsQuoterResource_; + i64 EarlyRequestQueuesListBudget_ = EARLY_REQUEST_QUEUES_LIST_MAX_BUDGET; // Defence from continuously requesting queues list. + + // State machine THashMultiMap<TString, TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr> GetLeaderNodeRequests_; // queue name -> request - THashMultiMap<TString, TSqsEvents::TEvGetConfiguration::TPtr> GetConfigurationRequests_; // queue name -> request - THashMultiMap<std::pair<TString, TString>, TSqsEvents::TEvGetQueueId::TPtr> GetQueueIdRequests_; // <queue custom name, folder id> -> request + THashMultiMap<TString, TSqsEvents::TEvGetConfiguration::TPtr> GetConfigurationRequests_; // queue name -> request + THashMultiMap<std::pair<TString, TString>, TSqsEvents::TEvGetQueueId::TPtr> GetQueueIdRequests_; // <queue custom name, folder id> -> request THashMultiMap<TString, TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr> GetQueueFolderIdAndCustomNameRequests_; // queue name -> request THashMultiMap<TString, TSqsEvents::TEvCountQueues::TPtr> CountQueuesRequests_; // folder id -> request -}; - -static TString GetEndpoint(const NKikimrConfig::TSqsConfig& config) { - const TString& endpoint = config.GetEndpoint(); - if (endpoint) { - return endpoint; - } else { - return TStringBuilder() << "http://" << FQDNHostName() << ":" << config.GetHttpServerConfig().GetPort(); - } -} - +}; + +static TString GetEndpoint(const NKikimrConfig::TSqsConfig& config) { + const TString& endpoint = config.GetEndpoint(); + if (endpoint) { + return endpoint; + } else { + return TStringBuilder() << "http://" << FQDNHostName() << ":" << config.GetHttpServerConfig().GetPort(); + } +} + TSqsService::TSqsService(const TMaybe<ui32>& ydbPort) { if (ydbPort.Defined()) { YcSearchEventsConfig.GrpcPort = *ydbPort; } - DebugInfo->SqsServiceActorPtr = this; -} -TSqsService::~TSqsService() { - DebugInfo->SqsServiceActorPtr = nullptr; -} - -void TSqsService::Bootstrap() { - LOG_SQS_INFO("Start SQS service actor"); - LOG_SQS_DEBUG("SQS service config: " << Cfg()); - Become(&TSqsService::StateFunc); - - EarlyRequestUsersListBudget_ = EARLY_REQUEST_USERS_LIST_MAX_BUDGET; - - NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(SQS_PROVIDER)); - - RootUrl_ = GetEndpoint(Cfg()); - - // Counters. - SqsCoreCounters_ = GetSqsServiceCounters(AppData()->Counters, "core"); + DebugInfo->SqsServiceActorPtr = this; +} +TSqsService::~TSqsService() { + DebugInfo->SqsServiceActorPtr = nullptr; +} + +void TSqsService::Bootstrap() { + LOG_SQS_INFO("Start SQS service actor"); + LOG_SQS_DEBUG("SQS service config: " << Cfg()); + Become(&TSqsService::StateFunc); + + EarlyRequestUsersListBudget_ = EARLY_REQUEST_USERS_LIST_MAX_BUDGET; + + NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(SQS_PROVIDER)); + + RootUrl_ = GetEndpoint(Cfg()); + + // Counters. + SqsCoreCounters_ = GetSqsServiceCounters(AppData()->Counters, "core"); YmqRootCounters_ = GetYmqPublicCounters(AppData()->Counters); AllocPoolCounters_ = std::make_shared<TAlignedPagePoolCounters>(AppData()->Counters, "sqs"); AggregatedUserCounters_ = MakeIntrusive<TUserCounters>( Cfg(), SqsCoreCounters_, nullptr, AllocPoolCounters_, TOTAL_COUNTER_LABEL, nullptr, true ); - AggregatedUserCounters_->ShowDetailedCounters(TInstant::Max()); - - InitSchemeCache(); - - Register(new TUserSettingsReader(AggregatedUserCounters_->GetTransactionCounters())); - QueuesListReader_ = Register(new TQueuesListReader(AggregatedUserCounters_->GetTransactionCounters())); - + AggregatedUserCounters_->ShowDetailedCounters(TInstant::Max()); + + InitSchemeCache(); + + Register(new TUserSettingsReader(AggregatedUserCounters_->GetTransactionCounters())); + QueuesListReader_ = Register(new TQueuesListReader(AggregatedUserCounters_->GetTransactionCounters())); + Register(CreateGarbageCollector(SchemeCache_, QueuesListReader_)); - RequestSqsUsersList(); - RequestSqsQueuesList(); + RequestSqsUsersList(); + RequestSqsQueuesList(); if (Cfg().HasYcSearchEventsConfig() && YcSearchEventsConfig.GrpcPort) { auto& ycSearchCfg = Cfg().GetYcSearchEventsConfig(); @@ -337,538 +337,538 @@ void TSqsService::Bootstrap() { YcSearchEventsConfig.Driver = MakeHolder<NYdb::TDriver>(driverConfig); MakeAndRegisterYcEventsProcessor(); } -} - -STATEFN(TSqsService::StateFunc) { - switch (ev->GetTypeRewrite()) { - // Interface events +} + +STATEFN(TSqsService::StateFunc) { + switch (ev->GetTypeRewrite()) { + // Interface events hFunc(TSqsEvents::TEvGetLeaderNodeForQueueRequest, HandleGetLeaderNodeForQueueRequest); hFunc(TSqsEvents::TEvQueueLeaderDecRef, HandleQueueLeaderDecRef); - hFunc(TSqsEvents::TEvGetQueueId, HandleGetQueueId); - hFunc(TSqsEvents::TEvGetQueueFolderIdAndCustomName, HandleGetQueueFolderIdAndCustomName); - hFunc(TSqsEvents::TEvCountQueues, HandleCountQueues); - - // Details - hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvGetQueueId, HandleGetQueueId); + hFunc(TSqsEvents::TEvGetQueueFolderIdAndCustomName, HandleGetQueueFolderIdAndCustomName); + hFunc(TSqsEvents::TEvCountQueues, HandleCountQueues); + + // Details + hFunc(TEvWakeup, HandleWakeup); hFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, HandleDescribeSchemeResult); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TEvTabletPipe::TEvClientDestroyed, HandlePipeClientDisconnected); - hFunc(TEvTabletPipe::TEvClientConnected, HandlePipeClientConnected); - hFunc(TSqsEvents::TEvGetConfiguration, HandleGetConfiguration); - hFunc(TSqsEvents::TEvSqsRequest, HandleSqsRequest); - hFunc(TSqsEvents::TEvInsertQueueCounters, HandleInsertQueueCounters); - hFunc(TSqsEvents::TEvUserSettingsChanged, HandleUserSettingsChanged); - hFunc(TSqsEvents::TEvQueuesList, HandleQueuesList); - default: - LOG_SQS_ERROR("Unknown type of event came to SQS service actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); - } -} - -void TSqsService::InitSchemeCache() { + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TEvTabletPipe::TEvClientDestroyed, HandlePipeClientDisconnected); + hFunc(TEvTabletPipe::TEvClientConnected, HandlePipeClientConnected); + hFunc(TSqsEvents::TEvGetConfiguration, HandleGetConfiguration); + hFunc(TSqsEvents::TEvSqsRequest, HandleSqsRequest); + hFunc(TSqsEvents::TEvInsertQueueCounters, HandleInsertQueueCounters); + hFunc(TSqsEvents::TEvUserSettingsChanged, HandleUserSettingsChanged); + hFunc(TSqsEvents::TEvQueuesList, HandleQueuesList); + default: + LOG_SQS_ERROR("Unknown type of event came to SQS service actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); + } +} + +void TSqsService::InitSchemeCache() { LOG_SQS_DEBUG("Enable scheme board scheme cache"); auto cacheCounters = GetServiceCounters(AppData()->Counters, "sqs")->GetSubgroup("subsystem", "schemecache"); auto cacheConfig = MakeIntrusive<NSchemeCache::TSchemeCacheConfig>(AppData(), cacheCounters); SchemeCache_ = Register(CreateSchemeBoardSchemeCache(cacheConfig.Get())); -} - -void TSqsService::ScheduleRequestSqsUsersList() { - if (!ScheduledRequestingUsersList_) { - ScheduledRequestingUsersList_ = true; - const TInstant now = TActivationContext::Now(); +} + +void TSqsService::ScheduleRequestSqsUsersList() { + if (!ScheduledRequestingUsersList_) { + ScheduledRequestingUsersList_ = true; + const TInstant now = TActivationContext::Now(); const TInstant whenToRequest = Max(LastRequestUsersListTime_ + TDuration::MilliSeconds(GetLeadersDescriberUpdateTimeMs()), now); - Schedule(whenToRequest - now, new TEvWakeup(LIST_USERS_WAKEUP_TAG)); - } -} - -void TSqsService::RequestSqsUsersList() { - if (RequestingUsersList_) { - return; - } - RequestingUsersList_ = true; - LOG_SQS_INFO("Request SQS users list"); - THolder<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate()); + Schedule(whenToRequest - now, new TEvWakeup(LIST_USERS_WAKEUP_TAG)); + } +} + +void TSqsService::RequestSqsUsersList() { + if (RequestingUsersList_) { + return; + } + RequestingUsersList_ = true; + LOG_SQS_INFO("Request SQS users list"); + THolder<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate()); NKikimrSchemeOp::TDescribePath* record = navigateRequest->Record.MutableDescribePath(); - record->SetPath(Cfg().GetRoot()); - Send(MakeTxProxyID(), navigateRequest.Release()); -} - -void TSqsService::ScheduleRequestSqsQueuesList() { - if (!ScheduledRequestingQueuesList_) { - ScheduledRequestingQueuesList_ = true; - const TInstant now = TActivationContext::Now(); + record->SetPath(Cfg().GetRoot()); + Send(MakeTxProxyID(), navigateRequest.Release()); +} + +void TSqsService::ScheduleRequestSqsQueuesList() { + if (!ScheduledRequestingQueuesList_) { + ScheduledRequestingQueuesList_ = true; + const TInstant now = TActivationContext::Now(); const TInstant whenToRequest = Max(LastRequestQueuesListTime_ + TDuration::MilliSeconds(GetLeadersDescriberUpdateTimeMs()), now); - Schedule(whenToRequest - now, new TEvWakeup(LIST_QUEUES_WAKEUP_TAG)); - } -} - -void TSqsService::RequestSqsQueuesList() { - if (!RequestingQueuesList_) { - RequestingQueuesList_ = true; - LOG_SQS_DEBUG("Request SQS queues list"); - Send(QueuesListReader_, new TSqsEvents::TEvReadQueuesList()); - } -} - -Y_WARN_UNUSED_RESULT bool TSqsService::RequestQueueListForUser(const TUserInfoPtr& user, const TString& reqId) { - if (RequestingQueuesList_) { - return true; - } - const i64 budget = Min(user->EarlyRequestQueuesListBudget_, EarlyRequestQueuesListMinBudget_ + EARLY_REQUEST_QUEUES_LIST_MAX_BUDGET); - if (budget <= EarlyRequestQueuesListMinBudget_) { - RLOG_SQS_REQ_WARN(reqId, "No budget to request queues list for user [" << user->UserName_ << "]. Min budget: " << EarlyRequestQueuesListMinBudget_ << ". User's budget: " << user->EarlyRequestQueuesListBudget_); - return false; // no budget - } - - RLOG_SQS_REQ_DEBUG(reqId, "Using budget to request queues list for user [" << user->UserName_ << "]. Current budget: " << budget << ". Min budget: " << EarlyRequestQueuesListMinBudget_); - user->EarlyRequestQueuesListBudget_ = budget - 1; - RequestSqsQueuesList(); - return true; -} - + Schedule(whenToRequest - now, new TEvWakeup(LIST_QUEUES_WAKEUP_TAG)); + } +} + +void TSqsService::RequestSqsQueuesList() { + if (!RequestingQueuesList_) { + RequestingQueuesList_ = true; + LOG_SQS_DEBUG("Request SQS queues list"); + Send(QueuesListReader_, new TSqsEvents::TEvReadQueuesList()); + } +} + +Y_WARN_UNUSED_RESULT bool TSqsService::RequestQueueListForUser(const TUserInfoPtr& user, const TString& reqId) { + if (RequestingQueuesList_) { + return true; + } + const i64 budget = Min(user->EarlyRequestQueuesListBudget_, EarlyRequestQueuesListMinBudget_ + EARLY_REQUEST_QUEUES_LIST_MAX_BUDGET); + if (budget <= EarlyRequestQueuesListMinBudget_) { + RLOG_SQS_REQ_WARN(reqId, "No budget to request queues list for user [" << user->UserName_ << "]. Min budget: " << EarlyRequestQueuesListMinBudget_ << ". User's budget: " << user->EarlyRequestQueuesListBudget_); + return false; // no budget + } + + RLOG_SQS_REQ_DEBUG(reqId, "Using budget to request queues list for user [" << user->UserName_ << "]. Current budget: " << budget << ". Min budget: " << EarlyRequestQueuesListMinBudget_); + user->EarlyRequestQueuesListBudget_ = budget - 1; + RequestSqsQueuesList(); + return true; +} + void TSqsService::HandleGetLeaderNodeForQueueRequest(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ev) { - TUserInfoPtr user = GetUserOrWait(ev); - if (!user) { - return; - } - - const TString& reqId = ev->Get()->RequestId; - const TString& userName = ev->Get()->UserName; - const TString& queueName = ev->Get()->QueueName; - - const auto queueIt = user->Queues_.find(queueName); - if (queueIt == user->Queues_.end()) { - LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); - if (RequestQueueListForUser(user, reqId)) { - RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] was not found in sqs service list. Requesting queues list"); + TUserInfoPtr user = GetUserOrWait(ev); + if (!user) { + return; + } + + const TString& reqId = ev->Get()->RequestId; + const TString& userName = ev->Get()->UserName; + const TString& queueName = ev->Get()->QueueName; + + const auto queueIt = user->Queues_.find(queueName); + if (queueIt == user->Queues_.end()) { + LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); + if (RequestQueueListForUser(user, reqId)) { + RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] was not found in sqs service list. Requesting queues list"); user->GetLeaderNodeRequests_.emplace(queueName, std::move(ev)); - } else { + } else { Send(ev->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(reqId, userName, queueName, TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoQueue)); - } - return; - } - + } + return; + } + if (!queueIt->second->LeaderPipeServer_) { - LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); + LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] is waiting for connection to leader tablet."); - auto& queue = queueIt->second; + auto& queue = queueIt->second; queue->GetLeaderNodeRequests_.emplace(std::move(ev)); - return; - } - + return; + } + const ui64 nodeId = queueIt->second->LeaderPipeServer_.NodeId(); RLOG_SQS_REQ_DEBUG(reqId, "Leader node for queue [" << userName << "/" << queueName << "] is " << nodeId); Send(ev->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(reqId, userName, queueName, nodeId)); -} - -void TSqsService::HandleGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& ev) { - TUserInfoPtr user = GetUserOrWait(ev); - if (!user) { - return; - } - - const TString& reqId = ev->Get()->RequestId; - const TString& userName = ev->Get()->UserName; - const TString& queueName = ev->Get()->QueueName; - if (!queueName) { // common user configuration - RLOG_SQS_REQ_DEBUG(reqId, "Asked common user [" << userName << "] configuration"); - AnswerNotExists(ev, user); // exists = false, but all configuration details are present - return; - } - - const auto queueIt = user->Queues_.find(queueName); - if (queueIt == user->Queues_.end()) { - if (RequestQueueListForUser(user, reqId)) { - LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); - RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] was not found in sqs service list. Requesting queues list"); - user->GetConfigurationRequests_.emplace(queueName, std::move(ev)); - } else { - AnswerNotExists(ev, user); - } - return; - } - - ProcessConfigurationRequestForQueue(ev, user, queueIt->second); -} - -void TSqsService::AnswerNotExists(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo) { - if (ev->Get()->UserName && ev->Get()->QueueName) { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue [" << ev->Get()->QueueName << "] found in user [" << ev->Get()->UserName << "] record"); - } - auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); - answer->UserExists = userInfo != nullptr; - answer->QueueExists = false; - answer->RootUrl = RootUrl_; - answer->SqsCoreCounters = SqsCoreCounters_; - answer->UserCounters = userInfo ? userInfo->Counters_ : nullptr; - answer->Fail = false; - answer->SchemeCache = SchemeCache_; - answer->QuoterResources = userInfo ? userInfo->QuoterResources_ : nullptr; - Send(ev->Sender, answer.Release()); -} - +} + +void TSqsService::HandleGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& ev) { + TUserInfoPtr user = GetUserOrWait(ev); + if (!user) { + return; + } + + const TString& reqId = ev->Get()->RequestId; + const TString& userName = ev->Get()->UserName; + const TString& queueName = ev->Get()->QueueName; + if (!queueName) { // common user configuration + RLOG_SQS_REQ_DEBUG(reqId, "Asked common user [" << userName << "] configuration"); + AnswerNotExists(ev, user); // exists = false, but all configuration details are present + return; + } + + const auto queueIt = user->Queues_.find(queueName); + if (queueIt == user->Queues_.end()) { + if (RequestQueueListForUser(user, reqId)) { + LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); + RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] was not found in sqs service list. Requesting queues list"); + user->GetConfigurationRequests_.emplace(queueName, std::move(ev)); + } else { + AnswerNotExists(ev, user); + } + return; + } + + ProcessConfigurationRequestForQueue(ev, user, queueIt->second); +} + +void TSqsService::AnswerNotExists(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo) { + if (ev->Get()->UserName && ev->Get()->QueueName) { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue [" << ev->Get()->QueueName << "] found in user [" << ev->Get()->UserName << "] record"); + } + auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); + answer->UserExists = userInfo != nullptr; + answer->QueueExists = false; + answer->RootUrl = RootUrl_; + answer->SqsCoreCounters = SqsCoreCounters_; + answer->UserCounters = userInfo ? userInfo->Counters_ : nullptr; + answer->Fail = false; + answer->SchemeCache = SchemeCache_; + answer->QuoterResources = userInfo ? userInfo->QuoterResources_ : nullptr; + Send(ev->Sender, answer.Release()); +} + void TSqsService::AnswerNotExists(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ev, const TUserInfoPtr& userInfo) { const TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus status = userInfo ? TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoQueue : TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoUser; if (status == TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoUser) { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found"); - } else { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue [" << ev->Get()->QueueName << "] found for user [" << ev->Get()->UserName << "]"); - } - Send(ev->Sender, + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found"); + } else { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue [" << ev->Get()->QueueName << "] found for user [" << ev->Get()->UserName << "]"); + } + Send(ev->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(ev->Get()->RequestId, - ev->Get()->UserName, - ev->Get()->QueueName, - status)); -} - -void TSqsService::AnswerNotExists(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr& userInfo) { - if (userInfo) { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue [" << ev->Get()->QueueName << "] found for user [" << ev->Get()->UserName << "] while getting queue folder id"); - } else { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found while getting queue folder id"); - } - Send(ev->Sender, new TSqsEvents::TEvQueueFolderIdAndCustomName()); -} - -void TSqsService::AnswerNotExists(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr& userInfo) { - if (userInfo) { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue with custom name [" << ev->Get()->CustomQueueName << "] and folder id [" << ev->Get()->FolderId << "] found for user [" << ev->Get()->UserName << "]"); - } else { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found while getting queue id"); - } - Send(ev->Sender, new TSqsEvents::TEvQueueId()); -} - -void TSqsService::AnswerNotExists(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr&) { - RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found while counting queues"); - Send(ev->Sender, new TSqsEvents::TEvCountQueuesResponse(false)); + ev->Get()->UserName, + ev->Get()->QueueName, + status)); +} + +void TSqsService::AnswerNotExists(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr& userInfo) { + if (userInfo) { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue [" << ev->Get()->QueueName << "] found for user [" << ev->Get()->UserName << "] while getting queue folder id"); + } else { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found while getting queue folder id"); + } + Send(ev->Sender, new TSqsEvents::TEvQueueFolderIdAndCustomName()); +} + +void TSqsService::AnswerNotExists(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr& userInfo) { + if (userInfo) { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No queue with custom name [" << ev->Get()->CustomQueueName << "] and folder id [" << ev->Get()->FolderId << "] found for user [" << ev->Get()->UserName << "]"); + } else { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found while getting queue id"); + } + Send(ev->Sender, new TSqsEvents::TEvQueueId()); +} + +void TSqsService::AnswerNotExists(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr&) { + RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "No user [" << ev->Get()->UserName << "] found while counting queues"); + Send(ev->Sender, new TSqsEvents::TEvCountQueuesResponse(false)); } void TSqsService::AnswerFailed(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ev, const TUserInfoPtr&) { Send(ev->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(ev->Get()->RequestId, ev->Get()->UserName, ev->Get()->QueueName, TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::Error)); -} - -void TSqsService::AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo) { - auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); - answer->RootUrl = RootUrl_; - answer->SqsCoreCounters = SqsCoreCounters_; - answer->UserCounters = userInfo ? userInfo->Counters_ : nullptr; - answer->Fail = true; - answer->SchemeCache = SchemeCache_; - answer->QuoterResources = userInfo ? userInfo->QuoterResources_ : nullptr; - Send(ev->Sender, answer.Release()); -} - -void TSqsService::AnswerFailed(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr&) { - Send(ev->Sender, new TSqsEvents::TEvQueueId(true)); -} - -void TSqsService::AnswerFailed(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr&) { - Send(ev->Sender, new TSqsEvents::TEvQueueFolderIdAndCustomName(true)); -} - -void TSqsService::AnswerFailed(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr&) { - Send(ev->Sender, new TSqsEvents::TEvCountQueuesResponse(true)); -} - -void TSqsService::Answer(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TQueueInfoPtr& queueInfo) { - Send(ev->Sender, new TSqsEvents::TEvQueueFolderIdAndCustomName(queueInfo->FolderId_, queueInfo->CustomName_)); -} - +} + +void TSqsService::AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo) { + auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); + answer->RootUrl = RootUrl_; + answer->SqsCoreCounters = SqsCoreCounters_; + answer->UserCounters = userInfo ? userInfo->Counters_ : nullptr; + answer->Fail = true; + answer->SchemeCache = SchemeCache_; + answer->QuoterResources = userInfo ? userInfo->QuoterResources_ : nullptr; + Send(ev->Sender, answer.Release()); +} + +void TSqsService::AnswerFailed(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr&) { + Send(ev->Sender, new TSqsEvents::TEvQueueId(true)); +} + +void TSqsService::AnswerFailed(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr&) { + Send(ev->Sender, new TSqsEvents::TEvQueueFolderIdAndCustomName(true)); +} + +void TSqsService::AnswerFailed(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr&) { + Send(ev->Sender, new TSqsEvents::TEvCountQueuesResponse(true)); +} + +void TSqsService::Answer(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TQueueInfoPtr& queueInfo) { + Send(ev->Sender, new TSqsEvents::TEvQueueFolderIdAndCustomName(queueInfo->FolderId_, queueInfo->CustomName_)); +} + void TSqsService::AnswerLeaderlessConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo, const TQueueInfoPtr& queueInfo) { - auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); - answer->UserExists = true; - answer->QueueExists = true; - answer->RootUrl = RootUrl_; - answer->SqsCoreCounters = SqsCoreCounters_; - answer->QueueCounters = queueInfo->Counters_; - answer->UserCounters = userInfo->Counters_; - answer->Fail = false; - answer->SchemeCache = SchemeCache_; - answer->QuoterResources = queueInfo ? queueInfo->QuoterResourcesForUser_ : nullptr; - Send(ev->Sender, answer.Release()); -} - -void TSqsService::ProcessConfigurationRequestForQueue(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo, const TQueueInfoPtr& queueInfo) { + auto answer = MakeHolder<TSqsEvents::TEvConfiguration>(); + answer->UserExists = true; + answer->QueueExists = true; + answer->RootUrl = RootUrl_; + answer->SqsCoreCounters = SqsCoreCounters_; + answer->QueueCounters = queueInfo->Counters_; + answer->UserCounters = userInfo->Counters_; + answer->Fail = false; + answer->SchemeCache = SchemeCache_; + answer->QuoterResources = queueInfo ? queueInfo->QuoterResourcesForUser_ : nullptr; + Send(ev->Sender, answer.Release()); +} + +void TSqsService::ProcessConfigurationRequestForQueue(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo, const TQueueInfoPtr& queueInfo) { if (ev->Get()->Flags & TSqsEvents::TEvGetConfiguration::EFlags::NeedQueueLeader) { IncLocalLeaderRef(ev->Sender, queueInfo, LEADER_CREATE_REASON_USER_REQUEST); RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "Forward configuration request to queue [" << queueInfo->UserName_ << "/" << queueInfo->QueueName_ << "] leader"); TActivationContext::Send(ev->Forward(queueInfo->LocalLeader_)); - } else { + } else { RLOG_SQS_REQ_DEBUG(ev->Get()->RequestId, "Answer configuration for queue [" << queueInfo->UserName_ << "/" << queueInfo->QueueName_ << "] without leader"); AnswerLeaderlessConfiguration(ev, userInfo, queueInfo); - } -} - + } +} + void TSqsService::HandleDescribeSchemeResult(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev) { - RequestingUsersList_ = false; - LastRequestUsersListTime_ = TActivationContext::Now(); - const auto& record = ev->Get()->GetRecord(); - const auto& desc = record.GetPathDescription(); - - LOG_SQS_DEBUG("Got info for main folder (user list): " << record); + RequestingUsersList_ = false; + LastRequestUsersListTime_ = TActivationContext::Now(); + const auto& record = ev->Get()->GetRecord(); + const auto& desc = record.GetPathDescription(); + + LOG_SQS_DEBUG("Got info for main folder (user list): " << record); if (record.GetStatus() != NKikimrScheme::StatusSuccess) { - LOG_SQS_WARN("Failed to get user list: " << record); - AnswerErrorToRequests(); - - ScheduleRequestSqsUsersList(); - return; - } - - THashSet<TString> usersNotProcessed; - usersNotProcessed.reserve(Users_.size()); - for (const auto& [userName, userInfo] : Users_) { - usersNotProcessed.insert(userName); - } - - for (const auto& child : desc.children()) { + LOG_SQS_WARN("Failed to get user list: " << record); + AnswerErrorToRequests(); + + ScheduleRequestSqsUsersList(); + return; + } + + THashSet<TString> usersNotProcessed; + usersNotProcessed.reserve(Users_.size()); + for (const auto& [userName, userInfo] : Users_) { + usersNotProcessed.insert(userName); + } + + for (const auto& child : desc.children()) { if (child.GetPathType() == NKikimrSchemeOp::EPathTypeDir) { - bool moved = false; - TUserInfoPtr user = MutableUser(child.GetName(), true, &moved); - usersNotProcessed.erase(child.GetName()); - if (moved) { - if (RequestQueueListForUser(user, "")) { - } else { - AnswerNoQueueToRequests(user); - AnswerCountQueuesRequests(user); - } - } - } - } - AnswerNoUserToRequests(); - - for (const TString& userName : usersNotProcessed) { - RemoveUser(userName); - } - - ScheduleRequestSqsUsersList(); -} - + bool moved = false; + TUserInfoPtr user = MutableUser(child.GetName(), true, &moved); + usersNotProcessed.erase(child.GetName()); + if (moved) { + if (RequestQueueListForUser(user, "")) { + } else { + AnswerNoQueueToRequests(user); + AnswerCountQueuesRequests(user); + } + } + } + } + AnswerNoUserToRequests(); + + for (const TString& userName : usersNotProcessed) { + RemoveUser(userName); + } + + ScheduleRequestSqsUsersList(); +} + void TSqsService::HandleQueueLeaderDecRef(TSqsEvents::TEvQueueLeaderDecRef::TPtr& ev) { DecLocalLeaderRef(ev->Sender, LEADER_DESTROY_REASON_LAST_REF); -} - -void TSqsService::HandleGetQueueId(TSqsEvents::TEvGetQueueId::TPtr& ev) { - TUserInfoPtr user = GetUserOrWait(ev); - if (!user) { - return; - } - - const TString& reqId = ev->Get()->RequestId; - const TString& userName = ev->Get()->UserName; - const auto queueIt = user->QueueByNameAndFolder_.find(std::make_pair(ev->Get()->CustomQueueName, ev->Get()->FolderId)); - if (queueIt == user->QueueByNameAndFolder_.end()) { - if (RequestQueueListForUser(user, reqId)) { - RLOG_SQS_REQ_DEBUG(reqId, - "Queue with custom name [" << ev->Get()->CustomQueueName << "] and folder id [" - << ev->Get()->FolderId << "] was not found in sqs service list for user [" - << userName << "]. Requesting queues list"); - user->GetQueueIdRequests_.emplace(std::make_pair(ev->Get()->CustomQueueName, ev->Get()->FolderId), std::move(ev)); - } else { - AnswerNotExists(ev, user); - } - return; - } - - RLOG_SQS_REQ_DEBUG(reqId, "Queue id is " << queueIt->second->QueueName_ << " and version is " << queueIt->second->Version_ << " with shards count: " << queueIt->second->ShardsCount_); - Send(ev->Sender, new TSqsEvents::TEvQueueId(queueIt->second->QueueName_, queueIt->second->Version_, queueIt->second->ShardsCount_)); -} - -void TSqsService::HandleGetQueueFolderIdAndCustomName(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev) { - TUserInfoPtr user = GetUserOrWait(ev); - if (!user) { - return; - } - - const TString& reqId = ev->Get()->RequestId; - const TString& userName = ev->Get()->UserName; - const TString& queueName = ev->Get()->QueueName; - const auto queueIt = user->Queues_.find(queueName); - if (queueIt == user->Queues_.end()) { - if (RequestQueueListForUser(user, reqId)) { - LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); - RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] was not found in sqs service list. Requesting queues list"); - user->GetQueueFolderIdAndCustomNameRequests_.emplace(queueName, std::move(ev)); - } else { - AnswerNotExists(ev, user); - } - return; - } - - Answer(ev, queueIt->second); -} - -void TSqsService::HandleCountQueues(TSqsEvents::TEvCountQueues::TPtr& ev) { - TUserInfoPtr user = GetUserOrWait(ev); +} + +void TSqsService::HandleGetQueueId(TSqsEvents::TEvGetQueueId::TPtr& ev) { + TUserInfoPtr user = GetUserOrWait(ev); + if (!user) { + return; + } + + const TString& reqId = ev->Get()->RequestId; + const TString& userName = ev->Get()->UserName; + const auto queueIt = user->QueueByNameAndFolder_.find(std::make_pair(ev->Get()->CustomQueueName, ev->Get()->FolderId)); + if (queueIt == user->QueueByNameAndFolder_.end()) { + if (RequestQueueListForUser(user, reqId)) { + RLOG_SQS_REQ_DEBUG(reqId, + "Queue with custom name [" << ev->Get()->CustomQueueName << "] and folder id [" + << ev->Get()->FolderId << "] was not found in sqs service list for user [" + << userName << "]. Requesting queues list"); + user->GetQueueIdRequests_.emplace(std::make_pair(ev->Get()->CustomQueueName, ev->Get()->FolderId), std::move(ev)); + } else { + AnswerNotExists(ev, user); + } + return; + } + + RLOG_SQS_REQ_DEBUG(reqId, "Queue id is " << queueIt->second->QueueName_ << " and version is " << queueIt->second->Version_ << " with shards count: " << queueIt->second->ShardsCount_); + Send(ev->Sender, new TSqsEvents::TEvQueueId(queueIt->second->QueueName_, queueIt->second->Version_, queueIt->second->ShardsCount_)); +} + +void TSqsService::HandleGetQueueFolderIdAndCustomName(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev) { + TUserInfoPtr user = GetUserOrWait(ev); + if (!user) { + return; + } + + const TString& reqId = ev->Get()->RequestId; + const TString& userName = ev->Get()->UserName; + const TString& queueName = ev->Get()->QueueName; + const auto queueIt = user->Queues_.find(queueName); + if (queueIt == user->Queues_.end()) { + if (RequestQueueListForUser(user, reqId)) { + LWPROBE(QueueRequestCacheMiss, userName, queueName, reqId, ev->Get()->ToStringHeader()); + RLOG_SQS_REQ_DEBUG(reqId, "Queue [" << userName << "/" << queueName << "] was not found in sqs service list. Requesting queues list"); + user->GetQueueFolderIdAndCustomNameRequests_.emplace(queueName, std::move(ev)); + } else { + AnswerNotExists(ev, user); + } + return; + } + + Answer(ev, queueIt->second); +} + +void TSqsService::HandleCountQueues(TSqsEvents::TEvCountQueues::TPtr& ev) { + TUserInfoPtr user = GetUserOrWait(ev); if (!user) { return; } - Send(ev->Sender, new TSqsEvents::TEvCountQueuesResponse(false, true, user->CountQueuesInFolder(ev->Get()->FolderId))); -} - -template <class TEvent> -TSqsService::TUserInfoPtr TSqsService::GetUserOrWait(TAutoPtr<TEvent>& ev) { - const TString& reqId = ev->Get()->RequestId; - const TString& userName = ev->Get()->UserName; - if (!userName) { // common configuration - RLOG_SQS_REQ_DEBUG(reqId, "Asked common request " << ev->Get()->ToStringHeader()); - AnswerNotExists(ev, nullptr); - return nullptr; - } - - const auto userIt = Users_.find(userName); - if (userIt == Users_.end()) { - if (!RequestingUsersList_) { - RLOG_SQS_REQ_DEBUG(reqId, "User [" << userName << "] was not found in sqs service list. EarlyRequestUsersListBudget: " << EarlyRequestUsersListBudget_); - if (EarlyRequestUsersListBudget_ > 0) { - --EarlyRequestUsersListBudget_; - RequestSqsUsersList(); - } - } - if (RequestingUsersList_) { - LWPROBE(QueueRequestCacheMiss, userName, "", reqId, ev->Get()->ToStringHeader()); - RLOG_SQS_REQ_DEBUG(reqId, "User [" << userName << "] was not found in sqs service list. Wait for user list answer"); - InsertWaitingRequest(std::move(ev)); - } else { - RLOG_SQS_REQ_DEBUG(reqId, "User [" << userName << "] was not found in sqs service list"); - AnswerNotExists(ev, nullptr); - } - return nullptr; - } - return userIt->second; -} - -void TSqsService::HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev) { + Send(ev->Sender, new TSqsEvents::TEvCountQueuesResponse(false, true, user->CountQueuesInFolder(ev->Get()->FolderId))); +} + +template <class TEvent> +TSqsService::TUserInfoPtr TSqsService::GetUserOrWait(TAutoPtr<TEvent>& ev) { + const TString& reqId = ev->Get()->RequestId; + const TString& userName = ev->Get()->UserName; + if (!userName) { // common configuration + RLOG_SQS_REQ_DEBUG(reqId, "Asked common request " << ev->Get()->ToStringHeader()); + AnswerNotExists(ev, nullptr); + return nullptr; + } + + const auto userIt = Users_.find(userName); + if (userIt == Users_.end()) { + if (!RequestingUsersList_) { + RLOG_SQS_REQ_DEBUG(reqId, "User [" << userName << "] was not found in sqs service list. EarlyRequestUsersListBudget: " << EarlyRequestUsersListBudget_); + if (EarlyRequestUsersListBudget_ > 0) { + --EarlyRequestUsersListBudget_; + RequestSqsUsersList(); + } + } + if (RequestingUsersList_) { + LWPROBE(QueueRequestCacheMiss, userName, "", reqId, ev->Get()->ToStringHeader()); + RLOG_SQS_REQ_DEBUG(reqId, "User [" << userName << "] was not found in sqs service list. Wait for user list answer"); + InsertWaitingRequest(std::move(ev)); + } else { + RLOG_SQS_REQ_DEBUG(reqId, "User [" << userName << "] was not found in sqs service list"); + AnswerNotExists(ev, nullptr); + } + return nullptr; + } + return userIt->second; +} + +void TSqsService::HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev) { auto queueIt = LeaderTabletIdToQueue_.find(ev->Get()->TabletId); if (queueIt == LeaderTabletIdToQueue_.end()) { LOG_SQS_WARN("Connected to unknown queue leader. Tablet id: [" << ev->Get()->TabletId << "]. Client pipe actor: " << ev->Get()->ClientId << ". Server pipe actor: " << ev->Get()->ServerId); - return; - } - const auto& queue = queueIt->second; + return; + } + const auto& queue = queueIt->second; queue->ConnectingToLeaderTablet_ = false; - - if (ev->Get()->Status != NKikimrProto::OK) { + + if (ev->Get()->Status != NKikimrProto::OK) { LOG_SQS_WARN("Failed to connect to queue [" << queue->UserName_ << "/" << queue->QueueName_ << "] leader tablet. Tablet id: [" << ev->Get()->TabletId << "]. Status: " << NKikimrProto::EReplyStatus_Name(ev->Get()->Status)); - const TInstant now = TActivationContext::Now(); - const TDuration timeDisconnecned = now - queue->DisconnectedFrom_; - const TDuration leaderConnectTimeout = TDuration::MilliSeconds(Cfg().GetLeaderConnectTimeoutMs()); + const TInstant now = TActivationContext::Now(); + const TDuration timeDisconnecned = now - queue->DisconnectedFrom_; + const TDuration leaderConnectTimeout = TDuration::MilliSeconds(Cfg().GetLeaderConnectTimeoutMs()); if (timeDisconnecned >= leaderConnectTimeout) { for (auto& req : queue->GetLeaderNodeRequests_) { RLOG_SQS_REQ_WARN(req->Get()->RequestId, "Can't connect to leader tablet for " << timeDisconnecned); Send(req->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(req->Get()->RequestId, req->Get()->UserName, req->Get()->QueueName, TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::FailedToConnectToLeader)); - } + } queue->GetLeaderNodeRequests_.clear(); - } + } queue->ConnectToLeaderTablet(false); - return; - } - + return; + } + LOG_SQS_DEBUG("Connected to queue [" << queueIt->second->UserName_ << "/" << queueIt->second->QueueName_ << "] leader. Tablet id: [" << ev->Get()->TabletId << "]. Client pipe actor: " << ev->Get()->ClientId << ". Server pipe actor: " << ev->Get()->ServerId); queue->SetLeaderPipeServer(ev->Get()->ServerId); for (auto& req : queue->GetLeaderNodeRequests_) { RLOG_SQS_REQ_DEBUG(req->Get()->RequestId, "Connected to leader tablet. Node id: " << queue->LeaderPipeServer_.NodeId()); Send(req->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(req->Get()->RequestId, req->Get()->UserName, req->Get()->QueueName, queue->LeaderPipeServer_.NodeId())); - } + } queue->GetLeaderNodeRequests_.clear(); -} - -void TSqsService::HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) { +} + +void TSqsService::HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) { auto queueIt = LeaderTabletIdToQueue_.find(ev->Get()->TabletId); if (queueIt != LeaderTabletIdToQueue_.end()) { queueIt->second->ConnectingToLeaderTablet_ = false; - queueIt->second->DisconnectedFrom_ = TActivationContext::Now(); + queueIt->second->DisconnectedFrom_ = TActivationContext::Now(); LOG_SQS_DEBUG("Disconnected from queue [" << queueIt->second->UserName_ << "/" << queueIt->second->QueueName_ << "] leader. Tablet id: [" << ev->Get()->TabletId << "]. Client pipe actor: " << ev->Get()->ClientId << ". Server pipe actor: " << ev->Get()->ServerId); queueIt->second->ConnectToLeaderTablet(false); - } else { + } else { LOG_SQS_WARN("Disconnected from unknown queue leader. Tablet id: [" << ev->Get()->TabletId << "]. Client pipe actor: " << ev->Get()->ClientId << ". Server pipe actor: " << ev->Get()->ServerId); - } -} - -void TSqsService::HandleQueuesList(TSqsEvents::TEvQueuesList::TPtr& ev) { - RequestingQueuesList_ = false; - LastRequestQueuesListTime_ = TActivationContext::Now(); - ScheduleRequestSqsQueuesList(); - if (ev->Get()->Success) { - auto newListIt = ev->Get()->SortedQueues.begin(); - auto usersIt = Users_.begin(); - while (newListIt != ev->Get()->SortedQueues.end() || usersIt != Users_.end()) { - if (usersIt == Users_.end() || newListIt != ev->Get()->SortedQueues.end() && newListIt->UserName < usersIt->second->UserName_) { - usersIt = MutableUserIter(newListIt->UserName); // insert new user - } - const TUserInfoPtr user = usersIt->second; - auto oldListIt = user->Queues_.begin(); - while (oldListIt != user->Queues_.end() && newListIt != ev->Get()->SortedQueues.end() && newListIt->UserName == user->UserName_) { - if (oldListIt->first == newListIt->QueueName) { // the same queue + } +} + +void TSqsService::HandleQueuesList(TSqsEvents::TEvQueuesList::TPtr& ev) { + RequestingQueuesList_ = false; + LastRequestQueuesListTime_ = TActivationContext::Now(); + ScheduleRequestSqsQueuesList(); + if (ev->Get()->Success) { + auto newListIt = ev->Get()->SortedQueues.begin(); + auto usersIt = Users_.begin(); + while (newListIt != ev->Get()->SortedQueues.end() || usersIt != Users_.end()) { + if (usersIt == Users_.end() || newListIt != ev->Get()->SortedQueues.end() && newListIt->UserName < usersIt->second->UserName_) { + usersIt = MutableUserIter(newListIt->UserName); // insert new user + } + const TUserInfoPtr user = usersIt->second; + auto oldListIt = user->Queues_.begin(); + while (oldListIt != user->Queues_.end() && newListIt != ev->Get()->SortedQueues.end() && newListIt->UserName == user->UserName_) { + if (oldListIt->first == newListIt->QueueName) { // the same queue if (oldListIt->second->LeaderTabletId_ != newListIt->LeaderTabletId) { LOG_SQS_WARN("Leader tablet id for queue " << oldListIt->first << " has been changed from " << oldListIt->second->LeaderTabletId_ << " to " << newListIt->LeaderTabletId << " (queue was recreated)"); THashSet<TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr> oldQueueRequests; oldQueueRequests.swap(oldListIt->second->GetLeaderNodeRequests_); - - RemoveQueue(user->UserName_, newListIt->QueueName); - oldListIt = AddQueue(user->UserName_, - newListIt->QueueName, + + RemoveQueue(user->UserName_, newListIt->QueueName); + oldListIt = AddQueue(user->UserName_, + newListIt->QueueName, newListIt->LeaderTabletId, - newListIt->CustomName, - newListIt->FolderId, - newListIt->Version, - newListIt->ShardsCount, - newListIt->CreatedTimestamp); + newListIt->CustomName, + newListIt->FolderId, + newListIt->Version, + newListIt->ShardsCount, + newListIt->CreatedTimestamp); Y_VERIFY(oldListIt->second->ConnectingToLeaderTablet_); oldQueueRequests.swap(oldListIt->second->GetLeaderNodeRequests_); - } - ++oldListIt; - ++newListIt; - } else if (oldListIt->first < newListIt->QueueName) { - const TString name = oldListIt->first; - ++oldListIt; - RemoveQueue(user->UserName_, name); - } else { - oldListIt = AddQueue(user->UserName_, - newListIt->QueueName, + } + ++oldListIt; + ++newListIt; + } else if (oldListIt->first < newListIt->QueueName) { + const TString name = oldListIt->first; + ++oldListIt; + RemoveQueue(user->UserName_, name); + } else { + oldListIt = AddQueue(user->UserName_, + newListIt->QueueName, newListIt->LeaderTabletId, - newListIt->CustomName, - newListIt->FolderId, - newListIt->Version, - newListIt->ShardsCount, - newListIt->CreatedTimestamp); - ++oldListIt; - ++newListIt; - } - } - while (oldListIt != user->Queues_.end()) { - TString name = oldListIt->first; - ++oldListIt; - RemoveQueue(user->UserName_, name); - } - while (newListIt != ev->Get()->SortedQueues.end() && newListIt->UserName == user->UserName_) { - AddQueue(user->UserName_, - newListIt->QueueName, + newListIt->CustomName, + newListIt->FolderId, + newListIt->Version, + newListIt->ShardsCount, + newListIt->CreatedTimestamp); + ++oldListIt; + ++newListIt; + } + } + while (oldListIt != user->Queues_.end()) { + TString name = oldListIt->first; + ++oldListIt; + RemoveQueue(user->UserName_, name); + } + while (newListIt != ev->Get()->SortedQueues.end() && newListIt->UserName == user->UserName_) { + AddQueue(user->UserName_, + newListIt->QueueName, newListIt->LeaderTabletId, - newListIt->CustomName, - newListIt->FolderId, - newListIt->Version, - newListIt->ShardsCount, - newListIt->CreatedTimestamp); - ++newListIt; - } - - // answer to all CountQueues requests - AnswerCountQueuesRequests(user); - AnswerNoQueueToRequests(user); - - if (usersIt != Users_.end()) { - ++usersIt; - } - } + newListIt->CustomName, + newListIt->FolderId, + newListIt->Version, + newListIt->ShardsCount, + newListIt->CreatedTimestamp); + ++newListIt; + } + + // answer to all CountQueues requests + AnswerCountQueuesRequests(user); + AnswerNoQueueToRequests(user); + + if (usersIt != Users_.end()) { + ++usersIt; + } + } NotifyLocalDeadLetterQueuesLeaders(ev->Get()->SortedQueues); - } else { - for (const auto& [userName, user] : Users_) { - AnswerErrorToRequests(user); - } - } -} + } else { + for (const auto& [userName, user] : Users_) { + AnswerErrorToRequests(user); + } + } +} void TSqsService::NotifyLocalDeadLetterQueuesLeaders(const std::vector<TSqsEvents::TEvQueuesList::TQueueRecord>& sortedQueues) const { using TKnownDeadLetterQueues = THashMap<TString, THashSet<std::pair<TString, TString>>>; @@ -896,57 +896,57 @@ void TSqsService::NotifyLocalDeadLetterQueuesLeaders(const std::vector<TSqsEvent } } -void TSqsService::AnswerCountQueuesRequests(const TUserInfoPtr& user) { - while (!user->CountQueuesRequests_.empty()) { - const TString folderId = user->CountQueuesRequests_.begin()->first; - const auto queuesCount = user->CountQueuesInFolder(folderId); +void TSqsService::AnswerCountQueuesRequests(const TUserInfoPtr& user) { + while (!user->CountQueuesRequests_.empty()) { + const TString folderId = user->CountQueuesRequests_.begin()->first; + const auto queuesCount = user->CountQueuesInFolder(folderId); - auto requests = user->CountQueuesRequests_.equal_range(folderId); + auto requests = user->CountQueuesRequests_.equal_range(folderId); - for (auto i = requests.first; i != requests.second; ++i) { - auto& req = i->second; - Send(req->Sender, new TSqsEvents::TEvCountQueuesResponse(false, true, queuesCount)); + for (auto i = requests.first; i != requests.second; ++i) { + auto& req = i->second; + Send(req->Sender, new TSqsEvents::TEvCountQueuesResponse(false, true, queuesCount)); } - user->CountQueuesRequests_.erase(requests.first, requests.second); - } -} - -void TSqsService::HandleUserSettingsChanged(TSqsEvents::TEvUserSettingsChanged::TPtr& ev) { - LOG_SQS_TRACE("User [" << ev->Get()->UserName << "] settings changed. Changed " << ev->Get()->Diff->size() << " items"); - auto user = MutableUser(ev->Get()->UserName, false); - const auto& diff = ev->Get()->Diff; - const auto& newSettings = ev->Get()->Settings; - if (IsIn(*diff, USER_SETTING_DISABLE_COUNTERS)) { - const auto value = newSettings->find(USER_SETTING_DISABLE_COUNTERS); - Y_VERIFY(value != newSettings->end()); - const bool disableCounters = FromStringWithDefault(value->second, false); - user->Counters_->DisableCounters(disableCounters); - } - - if (IsIn(*diff, USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS)) { - const auto value = newSettings->find(USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS); - Y_VERIFY(value != newSettings->end()); - const ui64 deadline = FromStringWithDefault(value->second, 0ULL); - user->Counters_->ShowDetailedCounters(TInstant::MilliSeconds(deadline)); - } - - if (IsIn(*diff, USER_SETTING_EXPORT_TRANSACTION_COUNTERS)) { - const auto value = newSettings->find(USER_SETTING_EXPORT_TRANSACTION_COUNTERS); - Y_VERIFY(value != newSettings->end()); - const bool needExport = FromStringWithDefault(value->second, false); - user->Counters_->ExportTransactionCounters(needExport); - } -} - -TSqsService::TUserInfoPtr TSqsService::MutableUser(const TString& userName, bool moveUserRequestsToUserRecord, bool* requestsWereMoved) { - return MutableUserIter(userName, moveUserRequestsToUserRecord, requestsWereMoved)->second; -} - -TSqsService::TUsersMap::iterator TSqsService::MutableUserIter(const TString& userName, bool moveUserRequestsToUserRecord, bool* requestsWereMoved) { - auto userIt = Users_.find(userName); - if (userIt == Users_.end()) { - LOG_SQS_INFO("Creating user info record for user [" << userName << "]"); + user->CountQueuesRequests_.erase(requests.first, requests.second); + } +} + +void TSqsService::HandleUserSettingsChanged(TSqsEvents::TEvUserSettingsChanged::TPtr& ev) { + LOG_SQS_TRACE("User [" << ev->Get()->UserName << "] settings changed. Changed " << ev->Get()->Diff->size() << " items"); + auto user = MutableUser(ev->Get()->UserName, false); + const auto& diff = ev->Get()->Diff; + const auto& newSettings = ev->Get()->Settings; + if (IsIn(*diff, USER_SETTING_DISABLE_COUNTERS)) { + const auto value = newSettings->find(USER_SETTING_DISABLE_COUNTERS); + Y_VERIFY(value != newSettings->end()); + const bool disableCounters = FromStringWithDefault(value->second, false); + user->Counters_->DisableCounters(disableCounters); + } + + if (IsIn(*diff, USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS)) { + const auto value = newSettings->find(USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS); + Y_VERIFY(value != newSettings->end()); + const ui64 deadline = FromStringWithDefault(value->second, 0ULL); + user->Counters_->ShowDetailedCounters(TInstant::MilliSeconds(deadline)); + } + + if (IsIn(*diff, USER_SETTING_EXPORT_TRANSACTION_COUNTERS)) { + const auto value = newSettings->find(USER_SETTING_EXPORT_TRANSACTION_COUNTERS); + Y_VERIFY(value != newSettings->end()); + const bool needExport = FromStringWithDefault(value->second, false); + user->Counters_->ExportTransactionCounters(needExport); + } +} + +TSqsService::TUserInfoPtr TSqsService::MutableUser(const TString& userName, bool moveUserRequestsToUserRecord, bool* requestsWereMoved) { + return MutableUserIter(userName, moveUserRequestsToUserRecord, requestsWereMoved)->second; +} + +TSqsService::TUsersMap::iterator TSqsService::MutableUserIter(const TString& userName, bool moveUserRequestsToUserRecord, bool* requestsWereMoved) { + auto userIt = Users_.find(userName); + if (userIt == Users_.end()) { + LOG_SQS_INFO("Creating user info record for user [" << userName << "]"); bool isInternal = IsInternalFolder(userName); if (isInternal) { LOG_SQS_INFO("[" << userName << "] is considered and internal service folder, will not create YMQ counters"); @@ -959,337 +959,337 @@ TSqsService::TUsersMap::iterator TSqsService::MutableUserIter(const TString& use AllocPoolCounters_, userName, AggregatedUserCounters_, false ) ); - user->InitQuoterResources(); - userIt = Users_.emplace(userName, user).first; - - if (moveUserRequestsToUserRecord) { - // move user's requests to user info - size_t moved = 0; + user->InitQuoterResources(); + userIt = Users_.emplace(userName, user).first; + + if (moveUserRequestsToUserRecord) { + // move user's requests to user info + size_t moved = 0; moved += MoveUserRequests(user, GetLeaderNodeRequests_); - moved += MoveUserRequests(user, GetConfigurationRequests_); - moved += MoveUserRequests(user, GetQueueIdRequests_); - moved += MoveUserRequests(user, GetQueueFolderIdAndCustomNameRequests_); - moved += MoveUserRequests(user, CountQueuesRequests_); - - if (requestsWereMoved) { - *requestsWereMoved = moved != 0; - } - } - } - return userIt; -} - -void TSqsService::RemoveUser(const TString& userName) { - const auto userIt = Users_.find(userName); - if (userIt == Users_.end()) { - return; - } - - LOG_SQS_INFO("Removing user info record for user [" << userName << "]"); - const auto user = userIt->second; - while (!user->Queues_.empty()) { - TString queueName = user->Queues_.begin()->first; - RemoveQueue(userName, queueName); - } - - AnswerNoQueueToRequests(user); - for (auto&& [folderId, req] : user->CountQueuesRequests_) { - Send(req->Sender, new TSqsEvents::TEvCountQueuesResponse(false)); - } - user->CountQueuesRequests_.clear(); - - user->Counters_->RemoveCounters(); - Users_.erase(userIt); -} - -void TSqsService::RemoveQueue(const TString& userName, const TString& queue) { - LOG_SQS_INFO("Removing queue record for queue [" << userName << "/" << queue << "]"); - const auto userIt = Users_.find(userName); - if (userIt == Users_.end()) { - LOG_SQS_WARN("Attempt to remove queue record for queue [" << userName << "/" << queue << "], but there is no user record"); - return; - } - const auto queueIt = userIt->second->Queues_.find(queue); - if (queueIt == userIt->second->Queues_.end()) { - LOG_SQS_WARN("Attempt to remove queue record for queue [" << userName << "/" << queue << "], but there is no queue record"); - return; - } - - auto queuePtr = queueIt->second; + moved += MoveUserRequests(user, GetConfigurationRequests_); + moved += MoveUserRequests(user, GetQueueIdRequests_); + moved += MoveUserRequests(user, GetQueueFolderIdAndCustomNameRequests_); + moved += MoveUserRequests(user, CountQueuesRequests_); + + if (requestsWereMoved) { + *requestsWereMoved = moved != 0; + } + } + } + return userIt; +} + +void TSqsService::RemoveUser(const TString& userName) { + const auto userIt = Users_.find(userName); + if (userIt == Users_.end()) { + return; + } + + LOG_SQS_INFO("Removing user info record for user [" << userName << "]"); + const auto user = userIt->second; + while (!user->Queues_.empty()) { + TString queueName = user->Queues_.begin()->first; + RemoveQueue(userName, queueName); + } + + AnswerNoQueueToRequests(user); + for (auto&& [folderId, req] : user->CountQueuesRequests_) { + Send(req->Sender, new TSqsEvents::TEvCountQueuesResponse(false)); + } + user->CountQueuesRequests_.clear(); + + user->Counters_->RemoveCounters(); + Users_.erase(userIt); +} + +void TSqsService::RemoveQueue(const TString& userName, const TString& queue) { + LOG_SQS_INFO("Removing queue record for queue [" << userName << "/" << queue << "]"); + const auto userIt = Users_.find(userName); + if (userIt == Users_.end()) { + LOG_SQS_WARN("Attempt to remove queue record for queue [" << userName << "/" << queue << "], but there is no user record"); + return; + } + const auto queueIt = userIt->second->Queues_.find(queue); + if (queueIt == userIt->second->Queues_.end()) { + LOG_SQS_WARN("Attempt to remove queue record for queue [" << userName << "/" << queue << "], but there is no queue record"); + return; + } + + auto queuePtr = queueIt->second; queuePtr->ClosePipeToLeaderTablet(); for (auto& req : queuePtr->GetLeaderNodeRequests_) { - RLOG_SQS_REQ_DEBUG(req->Get()->RequestId, "Removing queue [" << req->Get()->UserName << "/" << req->Get()->QueueName << "] from sqs service info"); + RLOG_SQS_REQ_DEBUG(req->Get()->RequestId, "Removing queue [" << req->Get()->UserName << "/" << req->Get()->QueueName << "] from sqs service info"); Send(req->Sender, new TSqsEvents::TEvGetLeaderNodeForQueueResponse(req->Get()->RequestId, req->Get()->UserName, req->Get()->QueueName, TSqsEvents::TEvGetLeaderNodeForQueueResponse::EStatus::NoQueue)); - } + } queuePtr->GetLeaderNodeRequests_.clear(); LeaderTabletIdToQueue_.erase(queuePtr->LeaderTabletId_); - userIt->second->QueueByNameAndFolder_.erase(std::make_pair(queuePtr->CustomName_, queuePtr->FolderId_)); - - userIt->second->Queues_.erase(queueIt); - queuePtr->Counters_->RemoveCounters(); -} - -std::map<TString, TSqsService::TQueueInfoPtr>::iterator TSqsService::AddQueue(const TString& userName, - const TString& queue, + userIt->second->QueueByNameAndFolder_.erase(std::make_pair(queuePtr->CustomName_, queuePtr->FolderId_)); + + userIt->second->Queues_.erase(queueIt); + queuePtr->Counters_->RemoveCounters(); +} + +std::map<TString, TSqsService::TQueueInfoPtr>::iterator TSqsService::AddQueue(const TString& userName, + const TString& queue, ui64 leaderTabletId, - const TString& customName, - const TString& folderId, + const TString& customName, + const TString& folderId, const ui64 version, const ui64 shardsCount, - const TInstant createdTimestamp) { - auto user = MutableUser(userName, false); // don't move requests because they are already moved in our caller - const TInstant now = TActivationContext::Now(); - const TInstant timeToInsertCounters = createdTimestamp + TDuration::MilliSeconds(Cfg().GetQueueCountersExportDelayMs()); - const bool insertCounters = now >= timeToInsertCounters; + const TInstant createdTimestamp) { + auto user = MutableUser(userName, false); // don't move requests because they are already moved in our caller + const TInstant now = TActivationContext::Now(); + const TInstant timeToInsertCounters = createdTimestamp + TDuration::MilliSeconds(Cfg().GetQueueCountersExportDelayMs()); + const bool insertCounters = now >= timeToInsertCounters; auto ret = user->Queues_.insert(std::make_pair(queue, TQueueInfoPtr(new TQueueInfo(userName, queue, RootUrl_, leaderTabletId, customName, folderId, version, shardsCount, user->Counters_, SchemeCache_, user->QuoterResources_, insertCounters)))).first; - auto queueInfo = ret->second; + auto queueInfo = ret->second; LeaderTabletIdToQueue_[leaderTabletId] = queueInfo; - user->QueueByNameAndFolder_.emplace(std::make_pair(customName, folderId), queueInfo); - - if (!insertCounters) { + user->QueueByNameAndFolder_.emplace(std::make_pair(customName, folderId), queueInfo); + + if (!insertCounters) { Schedule(timeToInsertCounters - now, new TSqsEvents::TEvInsertQueueCounters(userName, queue, leaderTabletId)); - } - - { + } + + { auto requests = user->GetLeaderNodeRequests_.equal_range(queue); - for (auto i = requests.first; i != requests.second; ++i) { - auto& req = i->second; + for (auto i = requests.first; i != requests.second; ++i) { + auto& req = i->second; RLOG_SQS_REQ_DEBUG(req->Get()->RequestId, "Adding queue [" << req->Get()->UserName << "/" << req->Get()->QueueName << "] to sqs service. Move get leader node request to queue info"); queueInfo->GetLeaderNodeRequests_.emplace(std::move(req)); - } + } user->GetLeaderNodeRequests_.erase(requests.first, requests.second); - } - - { - auto requests = user->GetConfigurationRequests_.equal_range(queue); - for (auto i = requests.first; i != requests.second; ++i) { - auto& req = i->second; - ProcessConfigurationRequestForQueue(req, user, queueInfo); - } - user->GetConfigurationRequests_.erase(requests.first, requests.second); - } - - { - auto requests = user->GetQueueIdRequests_.equal_range(std::make_pair(customName, folderId)); - for (auto i = requests.first; i != requests.second; ++i) { - auto& req = i->second; - Send(req->Sender, new TSqsEvents::TEvQueueId(queueInfo->QueueName_, queueInfo->Version_, queueInfo->ShardsCount_)); - } - user->GetQueueIdRequests_.erase(requests.first, requests.second); - } - - { + } + + { + auto requests = user->GetConfigurationRequests_.equal_range(queue); + for (auto i = requests.first; i != requests.second; ++i) { + auto& req = i->second; + ProcessConfigurationRequestForQueue(req, user, queueInfo); + } + user->GetConfigurationRequests_.erase(requests.first, requests.second); + } + + { + auto requests = user->GetQueueIdRequests_.equal_range(std::make_pair(customName, folderId)); + for (auto i = requests.first; i != requests.second; ++i) { + auto& req = i->second; + Send(req->Sender, new TSqsEvents::TEvQueueId(queueInfo->QueueName_, queueInfo->Version_, queueInfo->ShardsCount_)); + } + user->GetQueueIdRequests_.erase(requests.first, requests.second); + } + + { auto requests = user->GetQueueFolderIdAndCustomNameRequests_.equal_range(queue); - for (auto i = requests.first; i != requests.second; ++i) { - auto& req = i->second; - Answer(req, queueInfo); - } + for (auto i = requests.first; i != requests.second; ++i) { + auto& req = i->second; + Answer(req, queueInfo); + } user->GetQueueFolderIdAndCustomNameRequests_.erase(requests.first, requests.second); - } - + } + queueInfo->ConnectToLeaderTablet(); LOG_SQS_DEBUG("Created queue record. Queue: [" << queue << "]. Leader tablet id: [" << leaderTabletId << "]. Pipe client actor: " << queueInfo->PipeClient_); - return ret; -} - -void TSqsService::AnswerNoUserToRequests() { + return ret; +} + +void TSqsService::AnswerNoUserToRequests() { AnswerNoUserToRequests(GetLeaderNodeRequests_); - AnswerNoUserToRequests(GetConfigurationRequests_); - AnswerNoUserToRequests(GetQueueIdRequests_); - AnswerNoUserToRequests(GetQueueFolderIdAndCustomNameRequests_); - AnswerNoUserToRequests(CountQueuesRequests_); -} - -void TSqsService::AnswerNoQueueToRequests(const TUserInfoPtr& user) { + AnswerNoUserToRequests(GetConfigurationRequests_); + AnswerNoUserToRequests(GetQueueIdRequests_); + AnswerNoUserToRequests(GetQueueFolderIdAndCustomNameRequests_); + AnswerNoUserToRequests(CountQueuesRequests_); +} + +void TSqsService::AnswerNoQueueToRequests(const TUserInfoPtr& user) { AnswerNoQueueToRequests(user, user->GetLeaderNodeRequests_); - AnswerNoQueueToRequests(user, user->GetConfigurationRequests_); - AnswerNoQueueToRequests(user, user->GetQueueIdRequests_); - AnswerNoQueueToRequests(user, user->GetQueueFolderIdAndCustomNameRequests_); -} - -void TSqsService::AnswerErrorToRequests() { + AnswerNoQueueToRequests(user, user->GetConfigurationRequests_); + AnswerNoQueueToRequests(user, user->GetQueueIdRequests_); + AnswerNoQueueToRequests(user, user->GetQueueFolderIdAndCustomNameRequests_); +} + +void TSqsService::AnswerErrorToRequests() { AnswerErrorToRequests(nullptr, GetLeaderNodeRequests_); - AnswerErrorToRequests(nullptr, GetConfigurationRequests_); - AnswerErrorToRequests(nullptr, GetQueueIdRequests_); - AnswerErrorToRequests(nullptr, GetQueueFolderIdAndCustomNameRequests_); - AnswerErrorToRequests(nullptr, CountQueuesRequests_); -} - -void TSqsService::AnswerErrorToRequests(const TUserInfoPtr& user) { + AnswerErrorToRequests(nullptr, GetConfigurationRequests_); + AnswerErrorToRequests(nullptr, GetQueueIdRequests_); + AnswerErrorToRequests(nullptr, GetQueueFolderIdAndCustomNameRequests_); + AnswerErrorToRequests(nullptr, CountQueuesRequests_); +} + +void TSqsService::AnswerErrorToRequests(const TUserInfoPtr& user) { AnswerErrorToRequests(user, user->GetLeaderNodeRequests_); - AnswerErrorToRequests(user, user->GetConfigurationRequests_); - AnswerErrorToRequests(user, user->GetQueueIdRequests_); - AnswerErrorToRequests(user, user->GetQueueFolderIdAndCustomNameRequests_); - AnswerErrorToRequests(user, user->CountQueuesRequests_); -} - -void TSqsService::HandleWakeup(TEvWakeup::TPtr& ev) { - Y_VERIFY(ev->Get()->Tag != 0); - switch (ev->Get()->Tag) { - case LIST_USERS_WAKEUP_TAG: - ScheduledRequestingUsersList_ = false; + AnswerErrorToRequests(user, user->GetConfigurationRequests_); + AnswerErrorToRequests(user, user->GetQueueIdRequests_); + AnswerErrorToRequests(user, user->GetQueueFolderIdAndCustomNameRequests_); + AnswerErrorToRequests(user, user->CountQueuesRequests_); +} + +void TSqsService::HandleWakeup(TEvWakeup::TPtr& ev) { + Y_VERIFY(ev->Get()->Tag != 0); + switch (ev->Get()->Tag) { + case LIST_USERS_WAKEUP_TAG: + ScheduledRequestingUsersList_ = false; if (TActivationContext::Now() < LastRequestUsersListTime_ + TDuration::MilliSeconds(GetLeadersDescriberUpdateTimeMs())) { - ScheduleRequestSqsUsersList(); - } else { - EarlyRequestUsersListBudget_ = Min(EarlyRequestUsersListBudget_ + 1, EARLY_REQUEST_USERS_LIST_MAX_BUDGET); - RequestSqsUsersList(); - } - break; - case LIST_QUEUES_WAKEUP_TAG: - ScheduledRequestingQueuesList_ = false; + ScheduleRequestSqsUsersList(); + } else { + EarlyRequestUsersListBudget_ = Min(EarlyRequestUsersListBudget_ + 1, EARLY_REQUEST_USERS_LIST_MAX_BUDGET); + RequestSqsUsersList(); + } + break; + case LIST_QUEUES_WAKEUP_TAG: + ScheduledRequestingQueuesList_ = false; if (TActivationContext::Now() < LastRequestQueuesListTime_ + TDuration::MilliSeconds(GetLeadersDescriberUpdateTimeMs())) { - ScheduleRequestSqsQueuesList(); - } else { - --EarlyRequestQueuesListMinBudget_; - RequestSqsQueuesList(); - } - break; - } -} - -void TSqsService::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); -} - -void TSqsService::HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev) { - LOG_SQS_TRACE("HandleSqsRequest " << SecureShortUtf8DebugString(ev->Get()->Record)); - auto replier = MakeHolder<TReplierToSenderActorCallback>(ev); - const auto& request = replier->Request->Get()->Record; - Register(CreateActionActor(request, std::move(replier))); -} - -void TSqsService::HandleInsertQueueCounters(TSqsEvents::TEvInsertQueueCounters::TPtr& ev) { - const auto userIt = Users_.find(ev->Get()->User); - if (userIt == Users_.end()) { - LOG_SQS_WARN("No user [" << ev->Get()->User << "]. Don't insert queue [" << ev->Get()->Queue << "] counters"); - return; - } - const auto& user = userIt->second; - const auto queueIt = user->Queues_.find(ev->Get()->Queue); - if (queueIt == user->Queues_.end()) { - LOG_SQS_WARN("Don't insert queue [" << ev->Get()->Queue << "] counters: no queue"); - return; - } - const auto& queue = queueIt->second; + ScheduleRequestSqsQueuesList(); + } else { + --EarlyRequestQueuesListMinBudget_; + RequestSqsQueuesList(); + } + break; + } +} + +void TSqsService::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); +} + +void TSqsService::HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev) { + LOG_SQS_TRACE("HandleSqsRequest " << SecureShortUtf8DebugString(ev->Get()->Record)); + auto replier = MakeHolder<TReplierToSenderActorCallback>(ev); + const auto& request = replier->Request->Get()->Record; + Register(CreateActionActor(request, std::move(replier))); +} + +void TSqsService::HandleInsertQueueCounters(TSqsEvents::TEvInsertQueueCounters::TPtr& ev) { + const auto userIt = Users_.find(ev->Get()->User); + if (userIt == Users_.end()) { + LOG_SQS_WARN("No user [" << ev->Get()->User << "]. Don't insert queue [" << ev->Get()->Queue << "] counters"); + return; + } + const auto& user = userIt->second; + const auto queueIt = user->Queues_.find(ev->Get()->Queue); + if (queueIt == user->Queues_.end()) { + LOG_SQS_WARN("Don't insert queue [" << ev->Get()->Queue << "] counters: no queue"); + return; + } + const auto& queue = queueIt->second; if (queue->LeaderTabletId_ != ev->Get()->LeaderTabletId) { LOG_SQS_WARN("Don't insert queue [" << ev->Get()->Queue << "] counters: leader tablet is not as expected. Expected: " << ev->Get()->LeaderTabletId << ". Real: " << queue->LeaderTabletId_); - return; - } - - queue->Counters_->InsertCounters(); -} - + return; + } + + queue->Counters_->InsertCounters(); +} + void TSqsService::IncLocalLeaderRef(const TActorId& referer, const TQueueInfoPtr& queueInfo, const TString& reason) { LWPROBE(IncLeaderRef, queueInfo->UserName_, queueInfo->QueueName_, referer.ToString()); const auto [iter, inserted] = LocalLeaderRefs_.emplace(referer, queueInfo); - if (inserted) { + if (inserted) { LOG_SQS_TRACE("Inc local leader ref for actor " << referer); queueInfo->IncLocalLeaderRef(reason); - } else { + } else { LWPROBE(IncLeaderRefAlreadyHasRef, queueInfo->UserName_, queueInfo->QueueName_, referer.ToString()); LOG_SQS_WARN("Inc local leader ref for actor " << referer << ". Ignore because this actor already presents in referers set"); - } -} - + } +} + void TSqsService::DecLocalLeaderRef(const TActorId& referer, const TString& reason) { LWPROBE(DecLeaderRef, referer.ToString()); const auto iter = LocalLeaderRefs_.find(referer); LOG_SQS_TRACE("Dec local leader ref for actor " << referer << ". Found: " << (iter != LocalLeaderRefs_.end())); if (iter != LocalLeaderRefs_.end()) { - auto queueInfo = iter->second; + auto queueInfo = iter->second; queueInfo->DecLocalLeaderRef(reason); LocalLeaderRefs_.erase(iter); - } else { + } else { LWPROBE(DecLeaderRefNotInRefSet, referer.ToString()); - } -} - -void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev) { - GetQueueIdRequests_.emplace(ev->Get()->UserName, std::move(ev)); -} - + } +} + +void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev) { + GetQueueIdRequests_.emplace(ev->Get()->UserName, std::move(ev)); +} + void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr&& ev) { GetQueueFolderIdAndCustomNameRequests_.emplace(ev->Get()->UserName, std::move(ev)); -} - -void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev) { - GetConfigurationRequests_.emplace(ev->Get()->UserName, std::move(ev)); -} - +} + +void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev) { + GetConfigurationRequests_.emplace(ev->Get()->UserName, std::move(ev)); +} + void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr&& ev) { GetLeaderNodeRequests_.emplace(ev->Get()->UserName, std::move(ev)); -} - +} + void TSqsService::InsertWaitingRequest(TSqsEvents::TEvCountQueues::TPtr&& ev) { CountQueuesRequests_.emplace(ev->Get()->UserName, std::move(ev)); } -void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev, const TUserInfoPtr& userInfo) { - userInfo->GetQueueIdRequests_.emplace(std::make_pair(ev->Get()->CustomQueueName, ev->Get()->FolderId), std::move(ev)); -} - +void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev, const TUserInfoPtr& userInfo) { + userInfo->GetQueueIdRequests_.emplace(std::make_pair(ev->Get()->CustomQueueName, ev->Get()->FolderId), std::move(ev)); +} + void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr&& ev, const TUserInfoPtr& userInfo) { userInfo->GetQueueFolderIdAndCustomNameRequests_.emplace(ev->Get()->QueueName, std::move(ev)); -} - -void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev, const TUserInfoPtr& userInfo) { - userInfo->GetConfigurationRequests_.emplace(ev->Get()->QueueName, std::move(ev)); -} - +} + +void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev, const TUserInfoPtr& userInfo) { + userInfo->GetConfigurationRequests_.emplace(ev->Get()->QueueName, std::move(ev)); +} + void TSqsService::InsertWaitingRequest(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr&& ev, const TUserInfoPtr& userInfo) { userInfo->GetLeaderNodeRequests_.emplace(ev->Get()->QueueName, std::move(ev)); -} - +} + void TSqsService::InsertWaitingRequest(TSqsEvents::TEvCountQueues::TPtr&& ev, const TUserInfoPtr& userInfo) { userInfo->CountQueuesRequests_.emplace(ev->Get()->FolderId, std::move(ev)); } -template <class TMultimap> -size_t TSqsService::MoveUserRequests(const TUserInfoPtr& userInfo, TMultimap& map) { - size_t moved = 0; - auto requests = map.equal_range(userInfo->UserName_); - for (auto i = requests.first; i != requests.second; ++i) { - RLOG_SQS_REQ_DEBUG(i->second->Get()->RequestId, "Got user in sqs service. Move request " << i->second->Get()->ToStringHeader() << " to user info"); - InsertWaitingRequest(std::move(i->second), userInfo); - ++moved; - } - if (moved) { - map.erase(requests.first, requests.second); - } - return moved; -} - -template <class TMultimap> -void TSqsService::AnswerNoUserToRequests(TMultimap& map) { - for (auto& userToRequest : map) { - AnswerNotExists(userToRequest.second, nullptr); - } - map.clear(); -} - -template <class TMultimap> -void TSqsService::AnswerNoQueueToRequests(const TUserInfoPtr& user, TMultimap& map) { - for (auto& queueToRequest : map) { - auto& req = queueToRequest.second; - AnswerNotExists(req, user); - } - map.clear(); -} - -template <class TMultimap> -void TSqsService::AnswerErrorToRequests(const TUserInfoPtr& user, TMultimap& map) { - for (auto& queueToRequest : map) { - auto& req = queueToRequest.second; - if (user) { - RLOG_SQS_REQ_ERROR(req->Get()->RequestId, "Error in sqs service for user [" << user->UserName_ << "]. Request " << req->Get()->ToStringHeader()); - } else { - RLOG_SQS_REQ_ERROR(req->Get()->RequestId, "Error in sqs service. Request " << req->Get()->ToStringHeader()); - } - AnswerFailed(req, user); - } - map.clear(); -} +template <class TMultimap> +size_t TSqsService::MoveUserRequests(const TUserInfoPtr& userInfo, TMultimap& map) { + size_t moved = 0; + auto requests = map.equal_range(userInfo->UserName_); + for (auto i = requests.first; i != requests.second; ++i) { + RLOG_SQS_REQ_DEBUG(i->second->Get()->RequestId, "Got user in sqs service. Move request " << i->second->Get()->ToStringHeader() << " to user info"); + InsertWaitingRequest(std::move(i->second), userInfo); + ++moved; + } + if (moved) { + map.erase(requests.first, requests.second); + } + return moved; +} + +template <class TMultimap> +void TSqsService::AnswerNoUserToRequests(TMultimap& map) { + for (auto& userToRequest : map) { + AnswerNotExists(userToRequest.second, nullptr); + } + map.clear(); +} + +template <class TMultimap> +void TSqsService::AnswerNoQueueToRequests(const TUserInfoPtr& user, TMultimap& map) { + for (auto& queueToRequest : map) { + auto& req = queueToRequest.second; + AnswerNotExists(req, user); + } + map.clear(); +} + +template <class TMultimap> +void TSqsService::AnswerErrorToRequests(const TUserInfoPtr& user, TMultimap& map) { + for (auto& queueToRequest : map) { + auto& req = queueToRequest.second; + if (user) { + RLOG_SQS_REQ_ERROR(req->Get()->RequestId, "Error in sqs service for user [" << user->UserName_ << "]. Request " << req->Get()->ToStringHeader()); + } else { + RLOG_SQS_REQ_ERROR(req->Get()->RequestId, "Error in sqs service. Request " << req->Get()->ToStringHeader()); + } + AnswerFailed(req, user); + } + map.clear(); +} void TSqsService::MakeAndRegisterYcEventsProcessor() { if (!YcSearchEventsConfig.Enabled) @@ -1309,9 +1309,9 @@ void TSqsService::MakeAndRegisterYcEventsProcessor() { //IActor* CreateSqsService(const TYcSearchEventsConfig& ycSearchEventsConfig) { // return new TSqsService(ycSearchEventsConfig); //} - + IActor* CreateSqsService(TMaybe<ui32> ydbPort) { return new TSqsService(ydbPort); -} - -} // namespace NKikimr::NSQS +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/service.h b/ydb/core/ymq/actor/service.h index caf091c24b8..c9da5c03f44 100644 --- a/ydb/core/ymq/actor/service.h +++ b/ydb/core/ymq/actor/service.h @@ -1,161 +1,161 @@ -#pragma once -#include "defs.h" -#include "events.h" -#include "log.h" -#include "serviceid.h" +#pragma once +#include "defs.h" +#include "events.h" +#include "log.h" +#include "serviceid.h" #include "index_events_processor.h" - + #include <ydb/core/ymq/base/events_writer_iface.h> #include <ydb/core/base/tablet_pipe.h> #include <ydb/core/protos/config.pb.h> #include <ydb/core/tx/schemeshard/schemeshard.h> #include <ydb/public/sdk/cpp/client/ydb_table/table.h> - + #include <library/cpp/actors/core/actor_bootstrapped.h> -#include <util/generic/hash.h> -#include <util/generic/ptr.h> +#include <util/generic/hash.h> +#include <util/generic/ptr.h> #include <library/cpp/logger/log.h> - -namespace NKikimr::NSQS { - -class TSqsService - : public TActorBootstrapped<TSqsService> -{ -public: + +namespace NKikimr::NSQS { + +class TSqsService + : public TActorBootstrapped<TSqsService> +{ +public: TSqsService(const TMaybe<ui32>& ydbPort); - ~TSqsService(); - - void Bootstrap(); - + ~TSqsService(); + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_SERVICE_ACTOR; - } - - struct TUserInfo; - using TUserInfoPtr = TIntrusivePtr<TUserInfo>; - using TUsersMap = std::map<TString, TUserInfoPtr>; - -private: - struct TQueueInfo; - - using TQueueInfoPtr = TIntrusivePtr<TQueueInfo>; - - STATEFN(StateFunc); - - void InitSchemeCache(); - - void HandleWakeup(TEvWakeup::TPtr& ev); + } + + struct TUserInfo; + using TUserInfoPtr = TIntrusivePtr<TUserInfo>; + using TUsersMap = std::map<TString, TUserInfoPtr>; + +private: + struct TQueueInfo; + + using TQueueInfoPtr = TIntrusivePtr<TQueueInfo>; + + STATEFN(StateFunc); + + void InitSchemeCache(); + + void HandleWakeup(TEvWakeup::TPtr& ev); void HandleDescribeSchemeResult(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - void HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev); - void HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + void HandlePipeClientConnected(TEvTabletPipe::TEvClientConnected::TPtr& ev); + void HandlePipeClientDisconnected(TEvTabletPipe::TEvClientDestroyed::TPtr& ev); void HandleGetLeaderNodeForQueueRequest(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ev); - void HandleGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& ev); - void HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev); // request from nodes with old version + void HandleGetConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& ev); + void HandleSqsRequest(TSqsEvents::TEvSqsRequest::TPtr& ev); // request from nodes with old version void HandleQueueLeaderDecRef(TSqsEvents::TEvQueueLeaderDecRef::TPtr& ev); - void HandleGetQueueId(TSqsEvents::TEvGetQueueId::TPtr& ev); - void HandleGetQueueFolderIdAndCustomName(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev); - void HandleCountQueues(TSqsEvents::TEvCountQueues::TPtr& ev); - void HandleInsertQueueCounters(TSqsEvents::TEvInsertQueueCounters::TPtr& ev); - void HandleUserSettingsChanged(TSqsEvents::TEvUserSettingsChanged::TPtr& ev); - void HandleQueuesList(TSqsEvents::TEvQueuesList::TPtr& ev); - - void ScheduleRequestSqsUsersList(); - void RequestSqsUsersList(); - - void ScheduleRequestSqsQueuesList(); - void RequestSqsQueuesList(); - bool RequestQueueListForUser(const TUserInfoPtr& user, const TString& reqId) Y_WARN_UNUSED_RESULT; - - void RemoveQueue(const TString& userName, const TString& queue); - TUsersMap::iterator MutableUserIter(const TString& userName, bool moveUserRequestsToUserRecord = true, bool* requestsWereMoved = nullptr); - TUserInfoPtr MutableUser(const TString& userName, bool moveUserRequestsToUserRecord = true, bool* requestsWereMoved = nullptr); - void RemoveUser(const TString& userName); + void HandleGetQueueId(TSqsEvents::TEvGetQueueId::TPtr& ev); + void HandleGetQueueFolderIdAndCustomName(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev); + void HandleCountQueues(TSqsEvents::TEvCountQueues::TPtr& ev); + void HandleInsertQueueCounters(TSqsEvents::TEvInsertQueueCounters::TPtr& ev); + void HandleUserSettingsChanged(TSqsEvents::TEvUserSettingsChanged::TPtr& ev); + void HandleQueuesList(TSqsEvents::TEvQueuesList::TPtr& ev); + + void ScheduleRequestSqsUsersList(); + void RequestSqsUsersList(); + + void ScheduleRequestSqsQueuesList(); + void RequestSqsQueuesList(); + bool RequestQueueListForUser(const TUserInfoPtr& user, const TString& reqId) Y_WARN_UNUSED_RESULT; + + void RemoveQueue(const TString& userName, const TString& queue); + TUsersMap::iterator MutableUserIter(const TString& userName, bool moveUserRequestsToUserRecord = true, bool* requestsWereMoved = nullptr); + TUserInfoPtr MutableUser(const TString& userName, bool moveUserRequestsToUserRecord = true, bool* requestsWereMoved = nullptr); + void RemoveUser(const TString& userName); std::map<TString, TQueueInfoPtr>::iterator AddQueue(const TString& userName, const TString& queue, ui64 leaderTabletId, const TString& customName, const TString& folderId, const ui64 version, - const ui64 shardsCount, const TInstant createdTimestamp); - - void AnswerNoUserToRequests(); - void AnswerNoQueueToRequests(const TUserInfoPtr& user); - - void AnswerErrorToRequests(); - void AnswerErrorToRequests(const TUserInfoPtr& user); - + const ui64 shardsCount, const TInstant createdTimestamp); + + void AnswerNoUserToRequests(); + void AnswerNoQueueToRequests(const TUserInfoPtr& user); + + void AnswerErrorToRequests(); + void AnswerErrorToRequests(const TUserInfoPtr& user); + void AnswerLeaderlessConfiguration(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo, const TQueueInfoPtr& queueInfo); - void ProcessConfigurationRequestForQueue(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo, const TQueueInfoPtr& queueInfo); - + void ProcessConfigurationRequestForQueue(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo, const TQueueInfoPtr& queueInfo); + void IncLocalLeaderRef(const TActorId& referer, const TQueueInfoPtr& queueInfo, const TString& reason); void DecLocalLeaderRef(const TActorId& referer, const TString& reason); - - template <class TEvent> - TUserInfoPtr GetUserOrWait(TAutoPtr<TEvent>& ev); - - void InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev); + + template <class TEvent> + TUserInfoPtr GetUserOrWait(TAutoPtr<TEvent>& ev); + + void InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev); void InsertWaitingRequest(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr&& ev); - void InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev); + void InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev); void InsertWaitingRequest(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr&& ev); void InsertWaitingRequest(TSqsEvents::TEvCountQueues::TPtr&& ev); - - void InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev, const TUserInfoPtr& userInfo); + + void InsertWaitingRequest(TSqsEvents::TEvGetQueueId::TPtr&& ev, const TUserInfoPtr& userInfo); void InsertWaitingRequest(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr&& ev, const TUserInfoPtr& userInfo); - void InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev, const TUserInfoPtr& userInfo); + void InsertWaitingRequest(TSqsEvents::TEvGetConfiguration::TPtr&& ev, const TUserInfoPtr& userInfo); void InsertWaitingRequest(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr&& ev, const TUserInfoPtr& userInfo); void InsertWaitingRequest(TSqsEvents::TEvCountQueues::TPtr&& ev, const TUserInfoPtr& userInfo); - - template <class TMultimap> - size_t MoveUserRequests(const TUserInfoPtr& userInfo, TMultimap& map); // returns moved requests count - template <class TMultimap> - void AnswerNoUserToRequests(TMultimap& map); - template <class TMultimap> - void AnswerNoQueueToRequests(const TUserInfoPtr& user, TMultimap& map); - template <class TMultimap> - void AnswerErrorToRequests(const TUserInfoPtr& user, TMultimap& map); - + + template <class TMultimap> + size_t MoveUserRequests(const TUserInfoPtr& userInfo, TMultimap& map); // returns moved requests count + template <class TMultimap> + void AnswerNoUserToRequests(TMultimap& map); + template <class TMultimap> + void AnswerNoQueueToRequests(const TUserInfoPtr& user, TMultimap& map); + template <class TMultimap> + void AnswerErrorToRequests(const TUserInfoPtr& user, TMultimap& map); + void AnswerNotExists(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerNotExists(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerNotExists(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerNotExists(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerNotExists(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr& userInfo); - + void AnswerNotExists(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerNotExists(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerNotExists(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerNotExists(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerFailed(TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerFailed(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerFailed(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr& userInfo); - void AnswerFailed(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr& userInfo); - - void Answer(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TQueueInfoPtr& queueInfo); - - void AnswerCountQueuesRequests(const TUserInfoPtr& user); - + void AnswerFailed(TSqsEvents::TEvGetConfiguration::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerFailed(TSqsEvents::TEvGetQueueId::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerFailed(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TUserInfoPtr& userInfo); + void AnswerFailed(TSqsEvents::TEvCountQueues::TPtr& ev, const TUserInfoPtr& userInfo); + + void Answer(TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr& ev, const TQueueInfoPtr& queueInfo); + + void AnswerCountQueuesRequests(const TUserInfoPtr& user); + void NotifyLocalDeadLetterQueuesLeaders(const std::vector<TSqsEvents::TEvQueuesList::TQueueRecord>& sortedQueues) const; void MakeAndRegisterYcEventsProcessor(); -private: - TString RootUrl_; - TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCoreCounters_; +private: + TString RootUrl_; + TIntrusivePtr<NMonitoring::TDynamicCounters> SqsCoreCounters_; TIntrusivePtr<NMonitoring::TDynamicCounters> YmqRootCounters_; std::shared_ptr<TAlignedPagePoolCounters> AllocPoolCounters_; - TIntrusivePtr<TUserCounters> AggregatedUserCounters_; - TUsersMap Users_; + TIntrusivePtr<TUserCounters> AggregatedUserCounters_; + TUsersMap Users_; THashMap<ui64, TQueueInfoPtr> LeaderTabletIdToQueue_; THashMap<TActorId, TQueueInfoPtr> LocalLeaderRefs_; // referer -> queue info TActorId SchemeCache_; TActorId QueuesListReader_; - - // State machine - bool RequestingUsersList_ = false; - bool ScheduledRequestingUsersList_ = false; - size_t EarlyRequestUsersListBudget_ = 0; // Defence from continuously requesting users list. - TInstant LastRequestUsersListTime_; - - bool RequestingQueuesList_ = false; - bool ScheduledRequestingQueuesList_ = false; - i64 EarlyRequestQueuesListMinBudget_ = 0; // Defence from continuously requesting queues list. - TInstant LastRequestQueuesListTime_; + + // State machine + bool RequestingUsersList_ = false; + bool ScheduledRequestingUsersList_ = false; + size_t EarlyRequestUsersListBudget_ = 0; // Defence from continuously requesting users list. + TInstant LastRequestUsersListTime_; + + bool RequestingQueuesList_ = false; + bool ScheduledRequestingQueuesList_ = false; + i64 EarlyRequestQueuesListMinBudget_ = 0; // Defence from continuously requesting queues list. + TInstant LastRequestQueuesListTime_; THashMultiMap<TString, TSqsEvents::TEvGetLeaderNodeForQueueRequest::TPtr> GetLeaderNodeRequests_; // user name -> request - THashMultiMap<TString, TSqsEvents::TEvGetConfiguration::TPtr> GetConfigurationRequests_; // user name -> request - THashMultiMap<TString, TSqsEvents::TEvGetQueueId::TPtr> GetQueueIdRequests_; // user name -> request + THashMultiMap<TString, TSqsEvents::TEvGetConfiguration::TPtr> GetConfigurationRequests_; // user name -> request + THashMultiMap<TString, TSqsEvents::TEvGetQueueId::TPtr> GetQueueIdRequests_; // user name -> request THashMultiMap<TString, TSqsEvents::TEvGetQueueFolderIdAndCustomName::TPtr> GetQueueFolderIdAndCustomNameRequests_; // user name -> request THashMultiMap<TString, TSqsEvents::TEvCountQueues::TPtr> CountQueuesRequests_; // user name -> request @@ -169,6 +169,6 @@ private: TDuration RescanInterval = TDuration::Minutes(1); }; TYcSearchEventsConfig YcSearchEventsConfig; -}; - -} // namespace NKikimr::NSQS +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/serviceid.h b/ydb/core/ymq/actor/serviceid.h index 7219d918ba0..4b0d222c674 100644 --- a/ydb/core/ymq/actor/serviceid.h +++ b/ydb/core/ymq/actor/serviceid.h @@ -1,5 +1,5 @@ #pragma once -#include "defs.h" +#include "defs.h" #include <library/cpp/actors/core/actor.h> @@ -7,18 +7,18 @@ #include <util/generic/strbuf.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { inline TActorId MakeSqsServiceID(ui32 nodeId) { - Y_VERIFY(nodeId != 0); + Y_VERIFY(nodeId != 0); return TActorId(nodeId, TStringBuf("SQS_SERVICE")); } inline TActorId MakeSqsProxyServiceID(ui32 nodeId) { - Y_VERIFY(nodeId != 0); + Y_VERIFY(nodeId != 0); return TActorId(nodeId, TStringBuf("SQS_PROXY")); -} - +} + inline TActorId MakeSqsAccessServiceID() { return TActorId(0, TStringBuf("SQS_ACCESS")); } @@ -32,10 +32,10 @@ inline TActorId MakeSqsMeteringServiceID() { } IActor* CreateSqsService(TMaybe<ui32> ydbPort = Nothing()); -IActor* CreateSqsProxyService(); -IActor* CreateSqsAccessService(const TString& address, const TString& pathToRootCA); -IActor* CreateSqsFolderService(const TString& address, const TString& pathToRootCA); +IActor* CreateSqsProxyService(); +IActor* CreateSqsAccessService(const TString& address, const TString& pathToRootCA); +IActor* CreateSqsFolderService(const TString& address, const TString& pathToRootCA); IActor* CreateMockSqsFolderService(); -IActor* CreateSqsMeteringService(); - -} // namespace NKikimr::NSQS +IActor* CreateSqsMeteringService(); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/set_queue_attributes.cpp b/ydb/core/ymq/actor/set_queue_attributes.cpp index df33ad99e6a..ee308ca4733 100644 --- a/ydb/core/ymq/actor/set_queue_attributes.cpp +++ b/ydb/core/ymq/actor/set_queue_attributes.cpp @@ -1,7 +1,7 @@ #include "action.h" -#include "error.h" -#include "executor.h" -#include "log.h" +#include "error.h" +#include "executor.h" +#include "log.h" #include "params.h" #include "serviceid.h" @@ -13,28 +13,28 @@ #include <library/cpp/scheme/scheme.h> #include <util/generic/maybe.h> -#include <util/generic/utility.h> +#include <util/generic/utility.h> #include <util/string/cast.h> using NKikimr::NClient::TValue; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TSetQueueAttributesActor : public TActionActor<TSetQueueAttributesActor> { public: - TSetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) - : TActionActor(sourceSqsRequest, EAction::SetQueueAttributes, std::move(cb)) + TSetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) + : TActionActor(sourceSqsRequest, EAction::SetQueueAttributes, std::move(cb)) { - CopyAccountName(Request()); + CopyAccountName(Request()); Response_.MutableSetQueueAttributes()->SetRequestId(RequestId_); - for (const auto& attr : Request().attributes()) { + for (const auto& attr : Request().attributes()) { Attributes_[attr.GetName()] = attr.GetValue(); } - CopySecurityToken(Request()); + CopySecurityToken(Request()); } private: @@ -47,33 +47,33 @@ private: bool DoValidate() override { if (!GetQueueName()) { - MakeError(Response_.MutableSetQueueAttributes(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); + MakeError(Response_.MutableSetQueueAttributes(), NErrors::MISSING_PARAMETER, "No QueueName parameter."); return false; } - const bool clampValues = !Cfg().GetEnableQueueAttributesValidation(); - ValidatedAttributes_ = TQueueAttributes::FromAttributesAndConfig(Attributes_, Cfg(), IsFifoQueue(), clampValues); + const bool clampValues = !Cfg().GetEnableQueueAttributesValidation(); + ValidatedAttributes_ = TQueueAttributes::FromAttributesAndConfig(Attributes_, Cfg(), IsFifoQueue(), clampValues); if (!ValidatedAttributes_.Validate()) { MakeError(Response_.MutableSetQueueAttributes(), *ValidatedAttributes_.Error, ValidatedAttributes_.ErrorText); return false; - } - + } + return true; } - TError* MutableErrorDesc() override { - return Response_.MutableSetQueueAttributes()->MutableError(); - } - - void RequestAttributesChange() const { - TExecutorBuilder builder(SelfId(), RequestId_); - builder - .User(UserName_) - .Queue(GetQueueName()) + TError* MutableErrorDesc() override { + return Response_.MutableSetQueueAttributes()->MutableError(); + } + + void RequestAttributesChange() const { + TExecutorBuilder builder(SelfId(), RequestId_); + builder + .User(UserName_) + .Queue(GetQueueName()) .QueueLeader(QueueLeader_) - .QueryId(SET_QUEUE_ATTRIBUTES_ID) - .Counters(QueueCounters_) - .RetryOnTimeout(); + .QueryId(SET_QUEUE_ATTRIBUTES_ID) + .Counters(QueueCounters_) + .RetryOnTimeout(); builder.Params().OptionalUint64("MAX_RECEIVE_COUNT", ValidatedAttributes_.RedrivePolicy.MaxReceiveCount); builder.Params().OptionalUtf8("DLQ_TARGET_ARN", ValidatedAttributes_.RedrivePolicy.TargetArn); @@ -85,86 +85,86 @@ private: builder.Params().OptionalUint64("WAIT", ToMilliSeconds(ValidatedAttributes_.ReceiveMessageWaitTimeSeconds)); builder.Params().OptionalUint64("MAX_MESSAGE_SIZE", ValidatedAttributes_.MaximumMessageSize); - if (IsFifoQueue()) { + if (IsFifoQueue()) { builder.Params().OptionalBool("CONTENT_BASED_DEDUPLICATION", ValidatedAttributes_.ContentBasedDeduplication); - } - - builder.Params().Utf8("USER_NAME", UserName_); - - builder.Start(); + } + + builder.Params().Utf8("USER_NAME", UserName_); + + builder.Start(); } - void DoAction() override { + void DoAction() override { Become(&TThis::StateFunc); if (ValidatedAttributes_.HasClampedAttributes()) { - RLOG_SQS_WARN("Clamped some queue attribute values for account " << UserName_ << " and queue name " << GetQueueName()); + RLOG_SQS_WARN("Clamped some queue attribute values for account " << UserName_ << " and queue name " << GetQueueName()); } if (ValidatedAttributes_.RedrivePolicy.TargetQueueName && *ValidatedAttributes_.RedrivePolicy.TargetQueueName) { - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId(RequestId_, UserName_, *ValidatedAttributes_.RedrivePolicy.TargetQueueName, FolderId_)); + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvGetQueueId(RequestId_, UserName_, *ValidatedAttributes_.RedrivePolicy.TargetQueueName, FolderId_)); } else { - RequestAttributesChange(); + RequestAttributesChange(); } } TString DoGetQueueName() const override { - return Request().GetQueueName(); + return Request().GetQueueName(); } - STATEFN(StateFunc) { + STATEFN(StateFunc) { switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - hFunc(TSqsEvents::TEvQueueId, HandleQueueId); + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + hFunc(TSqsEvents::TEvQueueId, HandleQueueId); } } - void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { + void HandleQueueId(TSqsEvents::TEvQueueId::TPtr& ev) { if (ev->Get()->Failed) { - RLOG_SQS_WARN("Get queue id failed"); + RLOG_SQS_WARN("Get queue id failed"); MakeError(MutableErrorDesc(), NErrors::INTERNAL_FAILURE); } else if (!ev->Get()->Exists) { - MakeError(MutableErrorDesc(), NErrors::NON_EXISTENT_QUEUE, "Target DLQ does not exist."); + MakeError(MutableErrorDesc(), NErrors::NON_EXISTENT_QUEUE, "Target DLQ does not exist."); } else if (ev->Get()->QueueId == GetQueueName()) { MakeError(MutableErrorDesc(), NErrors::INVALID_PARAMETER_VALUE, "Using the queue itself as a dead letter queue is not allowed."); } else { - RequestAttributesChange(); + RequestAttributesChange(); return; } - SendReplyAndDie(); + SendReplyAndDie(); } - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - const auto& record = ev->Get()->Record; - const ui32 status = record.GetStatus(); - auto* result = Response_.MutableSetQueueAttributes(); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + const auto& record = ev->Get()->Record; + const ui32 status = record.GetStatus(); + auto* result = Response_.MutableSetQueueAttributes(); if (status == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { // OK - RLOG_SQS_DEBUG("Sending clear attributes cache event for queue [" << UserName_ << "/" << GetQueueName() << "]"); + RLOG_SQS_DEBUG("Sending clear attributes cache event for queue [" << UserName_ << "/" << GetQueueName() << "]"); Send(QueueLeader_, MakeHolder<TSqsEvents::TEvClearQueueAttributesCache>()); } else { - RLOG_SQS_WARN("Request failed: " << record); - MakeError(result, NErrors::INTERNAL_FAILURE); + RLOG_SQS_WARN("Request failed: " << record); + MakeError(result, NErrors::INTERNAL_FAILURE); } - SendReplyAndDie(); - } - - const TSetQueueAttributesRequest& Request() const { - return SourceSqsRequest_.GetSetQueueAttributes(); + SendReplyAndDie(); } + const TSetQueueAttributesRequest& Request() const { + return SourceSqsRequest_.GetSetQueueAttributes(); + } + private: THashMap<TString, TString> Attributes_; TQueueAttributes ValidatedAttributes_; }; -IActor* CreateSetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { - return new TSetQueueAttributesActor(sourceSqsRequest, std::move(cb)); +IActor* CreateSetQueueAttributesActor(const NKikimrClient::TSqsRequest& sourceSqsRequest, THolder<IReplyCallback> cb) { + return new TSetQueueAttributesActor(sourceSqsRequest, std::move(cb)); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/sha256.cpp b/ydb/core/ymq/actor/sha256.cpp index 294c8ab2710..2a9553c480c 100644 --- a/ydb/core/ymq/actor/sha256.cpp +++ b/ydb/core/ymq/actor/sha256.cpp @@ -1,28 +1,28 @@ -#include "sha256.h" - -#include <util/generic/strbuf.h> -#include <util/generic/string.h> -#include <util/string/hex.h> - -#include <openssl/sha.h> - -#include <stdexcept> - -namespace NKikimr::NSQS { - -TString CalcSHA256(TStringBuf data) { - unsigned char hash[SHA256_DIGEST_LENGTH]; - SHA256_CTX sha256; - if (!SHA256_Init(&sha256)) { - throw std::runtime_error("Failed to init SHA-256"); - } +#include "sha256.h" + +#include <util/generic/strbuf.h> +#include <util/generic/string.h> +#include <util/string/hex.h> + +#include <openssl/sha.h> + +#include <stdexcept> + +namespace NKikimr::NSQS { + +TString CalcSHA256(TStringBuf data) { + unsigned char hash[SHA256_DIGEST_LENGTH]; + SHA256_CTX sha256; + if (!SHA256_Init(&sha256)) { + throw std::runtime_error("Failed to init SHA-256"); + } if (!SHA256_Update(&sha256, data.data(), data.size())) { - throw std::runtime_error("Failed to update SHA-256"); - } - if (!SHA256_Final(hash, &sha256)) { - throw std::runtime_error("Failed to finalize SHA-256"); - } - return HexEncode(TStringBuf(reinterpret_cast<const char*>(hash), SHA256_DIGEST_LENGTH)); -} - -} // namespace NKikimr::NSQS + throw std::runtime_error("Failed to update SHA-256"); + } + if (!SHA256_Final(hash, &sha256)) { + throw std::runtime_error("Failed to finalize SHA-256"); + } + return HexEncode(TStringBuf(reinterpret_cast<const char*>(hash), SHA256_DIGEST_LENGTH)); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/sha256.h b/ydb/core/ymq/actor/sha256.h index baece5bac64..c34af61af10 100644 --- a/ydb/core/ymq/actor/sha256.h +++ b/ydb/core/ymq/actor/sha256.h @@ -1,12 +1,12 @@ -#pragma once -#include "defs.h" - -#include <util/generic/strbuf.h> -#include <util/generic/string.h> - -namespace NKikimr::NSQS { - -// SHA-256 encoded in hex -TString CalcSHA256(TStringBuf); - -} // namespace NKikimr::NSQS +#pragma once +#include "defs.h" + +#include <util/generic/strbuf.h> +#include <util/generic/string.h> + +namespace NKikimr::NSQS { + +// SHA-256 encoded in hex +TString CalcSHA256(TStringBuf); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/user_settings_names.cpp b/ydb/core/ymq/actor/user_settings_names.cpp index 3d315490bc8..500d180cb66 100644 --- a/ydb/core/ymq/actor/user_settings_names.cpp +++ b/ydb/core/ymq/actor/user_settings_names.cpp @@ -1,10 +1,10 @@ -#include "user_settings_names.h" - -namespace NKikimr::NSQS { - -extern const TString USER_SETTING_MAX_QUEUES_COUNT = "MaxQueuesCount"; -extern const TString USER_SETTING_DISABLE_COUNTERS = "DisableCounters"; -extern const TString USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS = "ShowDetailedCountersDeadlineMs"; -extern const TString USER_SETTING_EXPORT_TRANSACTION_COUNTERS = "ExportTransactionCounters"; - -} // namespace NKikimr::NSQS +#include "user_settings_names.h" + +namespace NKikimr::NSQS { + +extern const TString USER_SETTING_MAX_QUEUES_COUNT = "MaxQueuesCount"; +extern const TString USER_SETTING_DISABLE_COUNTERS = "DisableCounters"; +extern const TString USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS = "ShowDetailedCountersDeadlineMs"; +extern const TString USER_SETTING_EXPORT_TRANSACTION_COUNTERS = "ExportTransactionCounters"; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/user_settings_names.h b/ydb/core/ymq/actor/user_settings_names.h index 11ee80f43a7..fc77c4d11ab 100644 --- a/ydb/core/ymq/actor/user_settings_names.h +++ b/ydb/core/ymq/actor/user_settings_names.h @@ -1,13 +1,13 @@ -#pragma once -#include "defs.h" - -#include <util/generic/string.h> - -namespace NKikimr::NSQS { - -extern const TString USER_SETTING_MAX_QUEUES_COUNT; -extern const TString USER_SETTING_DISABLE_COUNTERS; -extern const TString USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS; -extern const TString USER_SETTING_EXPORT_TRANSACTION_COUNTERS; - -} // namespace NKikimr::NSQS +#pragma once +#include "defs.h" + +#include <util/generic/string.h> + +namespace NKikimr::NSQS { + +extern const TString USER_SETTING_MAX_QUEUES_COUNT; +extern const TString USER_SETTING_DISABLE_COUNTERS; +extern const TString USER_SETTING_SHOW_DETAILED_COUNTERS_DEADLINE_MS; +extern const TString USER_SETTING_EXPORT_TRANSACTION_COUNTERS; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/user_settings_reader.cpp b/ydb/core/ymq/actor/user_settings_reader.cpp index 0637b655dd2..7bbc1d94efd 100644 --- a/ydb/core/ymq/actor/user_settings_reader.cpp +++ b/ydb/core/ymq/actor/user_settings_reader.cpp @@ -1,204 +1,204 @@ -#include "user_settings_reader.h" -#include "cfg.h" -#include "executor.h" -#include "events.h" - -namespace NKikimr::NSQS { - -TUserSettingsReader::TUserSettingsReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters) - : TransactionCounters(transactionCounters) -{ -} - -TUserSettingsReader::~TUserSettingsReader() { -} - -void TUserSettingsReader::Bootstrap() { - Become(&TUserSettingsReader::StateFunc); - StartReading(); -} - -STATEFN(TUserSettingsReader::StateFunc) { - switch (ev->GetTypeRewrite()) { - hFunc(TEvWakeup, HandleWakeup); - hFunc(TSqsEvents::TEvExecuted, HandleExecuted); - default: - LOG_SQS_ERROR("Unknown type of event came to SQS user settings reader actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); - } -} - -void TUserSettingsReader::HandleWakeup(TEvWakeup::TPtr& ev) { - Y_UNUSED(ev); - StartReading(); -} - -void TUserSettingsReader::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { - ev->Get()->Call(); -} - -void TUserSettingsReader::StartReading() { - CurrentUser = TString(); - CurrentName = TString(); - OldSettings = std::move(CurrentSettings); +#include "user_settings_reader.h" +#include "cfg.h" +#include "executor.h" +#include "events.h" + +namespace NKikimr::NSQS { + +TUserSettingsReader::TUserSettingsReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters) + : TransactionCounters(transactionCounters) +{ +} + +TUserSettingsReader::~TUserSettingsReader() { +} + +void TUserSettingsReader::Bootstrap() { + Become(&TUserSettingsReader::StateFunc); + StartReading(); +} + +STATEFN(TUserSettingsReader::StateFunc) { + switch (ev->GetTypeRewrite()) { + hFunc(TEvWakeup, HandleWakeup); + hFunc(TSqsEvents::TEvExecuted, HandleExecuted); + default: + LOG_SQS_ERROR("Unknown type of event came to SQS user settings reader actor: " << ev->Type << " (" << ev->GetBase()->ToString() << "), sender: " << ev->Sender); + } +} + +void TUserSettingsReader::HandleWakeup(TEvWakeup::TPtr& ev) { + Y_UNUSED(ev); + StartReading(); +} + +void TUserSettingsReader::HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev) { + ev->Get()->Call(); +} + +void TUserSettingsReader::StartReading() { + CurrentUser = TString(); + CurrentName = TString(); + OldSettings = std::move(CurrentSettings); CurrentSettings = std::make_shared<TUserSettings>(); - if (CompiledQuery) { - NextRequest(); - } else { - CompileRequest(); - } -} - -void TUserSettingsReader::CompileRequest() { - TExecutorBuilder(SelfId(), "") - .Mode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) - .QueryId(GET_USER_SETTINGS_ID) - .RetryOnTimeout() - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnRequestCompiled(ev); }) - .Counters(TransactionCounters) - .Start(); -} - -void TUserSettingsReader::OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record) { - LOG_SQS_TRACE("Handle compiled user settings query: " << record); - if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - CompiledQuery = record.GetMiniKQLCompileResults().GetCompiledProgram(); - NextRequest(); - } else { - LOG_SQS_WARN("Get user setting request compilation failed: " << record); - CurrentSettings = OldSettings; - OldSettings = nullptr; - ScheduleNextUpdate(); - } -} - -void TUserSettingsReader::NextRequest() { - TExecutorBuilder(SelfId(), "") - .QueryId(GET_USER_SETTINGS_ID) - .Bin(CompiledQuery) - .RetryOnTimeout() - .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnUserSettingsRead(ev); }) - .Counters(TransactionCounters) - .Params() - .Utf8("FROM_USER", CurrentUser) - .Utf8("FROM_NAME", CurrentName) - .Uint64("BATCH_SIZE", Cfg().GetUserSettingsReadBatchSize()) - .ParentBuilder().Start(); -} - -void TUserSettingsReader::OnUserSettingsRead(const TSqsEvents::TEvExecuted::TRecord& record) { - LOG_SQS_TRACE("Handle user settings: " << record); - if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { - using NKikimr::NClient::TValue; - const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); - const TValue settingsVal(val["settings"]); - const bool truncated = val["truncated"]; - for (size_t i = 0; i < settingsVal.Size(); ++i) { - const TValue row = settingsVal[i]; - TString user = row["Account"]; - TString name = row["Name"]; - TString value = row["Value"]; - auto& settings = (*CurrentSettings)[user]; - if (!settings) { + if (CompiledQuery) { + NextRequest(); + } else { + CompileRequest(); + } +} + +void TUserSettingsReader::CompileRequest() { + TExecutorBuilder(SelfId(), "") + .Mode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE) + .QueryId(GET_USER_SETTINGS_ID) + .RetryOnTimeout() + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnRequestCompiled(ev); }) + .Counters(TransactionCounters) + .Start(); +} + +void TUserSettingsReader::OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record) { + LOG_SQS_TRACE("Handle compiled user settings query: " << record); + if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + CompiledQuery = record.GetMiniKQLCompileResults().GetCompiledProgram(); + NextRequest(); + } else { + LOG_SQS_WARN("Get user setting request compilation failed: " << record); + CurrentSettings = OldSettings; + OldSettings = nullptr; + ScheduleNextUpdate(); + } +} + +void TUserSettingsReader::NextRequest() { + TExecutorBuilder(SelfId(), "") + .QueryId(GET_USER_SETTINGS_ID) + .Bin(CompiledQuery) + .RetryOnTimeout() + .OnExecuted([this](const TSqsEvents::TEvExecuted::TRecord& ev) { OnUserSettingsRead(ev); }) + .Counters(TransactionCounters) + .Params() + .Utf8("FROM_USER", CurrentUser) + .Utf8("FROM_NAME", CurrentName) + .Uint64("BATCH_SIZE", Cfg().GetUserSettingsReadBatchSize()) + .ParentBuilder().Start(); +} + +void TUserSettingsReader::OnUserSettingsRead(const TSqsEvents::TEvExecuted::TRecord& record) { + LOG_SQS_TRACE("Handle user settings: " << record); + if (record.GetStatus() == TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete) { + using NKikimr::NClient::TValue; + const TValue val(TValue::Create(record.GetExecutionEngineEvaluatedResponse())); + const TValue settingsVal(val["settings"]); + const bool truncated = val["truncated"]; + for (size_t i = 0; i < settingsVal.Size(); ++i) { + const TValue row = settingsVal[i]; + TString user = row["Account"]; + TString name = row["Name"]; + TString value = row["Value"]; + auto& settings = (*CurrentSettings)[user]; + if (!settings) { settings = std::make_shared<TSettings>(); - } - settings->emplace(std::move(name), std::move(value)); - } - - const bool scanCompleted = !truncated || settingsVal.Size() == 0; - if (scanCompleted) { - FinishScan(); - ScheduleNextUpdate(); - } else { - CurrentUser = TString(settingsVal[settingsVal.Size() - 1]["Account"]); - CurrentName = TString(settingsVal[settingsVal.Size() - 1]["Name"]); - NextRequest(); - } - } else { - LOG_SQS_WARN("Get user setting request failed: " << record); - CurrentSettings = OldSettings; - OldSettings = nullptr; - ScheduleNextUpdate(); - } -} - -void TUserSettingsReader::FinishScan() { - auto oldIt = OldSettings->begin(); - auto newIt = CurrentSettings->begin(); - while (oldIt != OldSettings->end() && newIt != CurrentSettings->end()) { - if (oldIt->first == newIt->first) { - CompareUserSettings(oldIt->first, oldIt->second, newIt->second); - ++oldIt; - ++newIt; - } else if (oldIt->first < newIt->first) { - OnRemoveUserSettings(oldIt->first, oldIt->second); - ++oldIt; - } else { - OnAddUserSettings(newIt->first, newIt->second); - ++newIt; - } - } - while (oldIt != OldSettings->end()) { - OnRemoveUserSettings(oldIt->first, oldIt->second); - ++oldIt; - } - while (newIt != CurrentSettings->end()) { - OnAddUserSettings(newIt->first, newIt->second); - ++newIt; - } -} - -void TUserSettingsReader::CompareUserSettings(const TString& userName, const TSettingsPtr& oldSettings, const TSettingsPtr& newSettings) { + } + settings->emplace(std::move(name), std::move(value)); + } + + const bool scanCompleted = !truncated || settingsVal.Size() == 0; + if (scanCompleted) { + FinishScan(); + ScheduleNextUpdate(); + } else { + CurrentUser = TString(settingsVal[settingsVal.Size() - 1]["Account"]); + CurrentName = TString(settingsVal[settingsVal.Size() - 1]["Name"]); + NextRequest(); + } + } else { + LOG_SQS_WARN("Get user setting request failed: " << record); + CurrentSettings = OldSettings; + OldSettings = nullptr; + ScheduleNextUpdate(); + } +} + +void TUserSettingsReader::FinishScan() { + auto oldIt = OldSettings->begin(); + auto newIt = CurrentSettings->begin(); + while (oldIt != OldSettings->end() && newIt != CurrentSettings->end()) { + if (oldIt->first == newIt->first) { + CompareUserSettings(oldIt->first, oldIt->second, newIt->second); + ++oldIt; + ++newIt; + } else if (oldIt->first < newIt->first) { + OnRemoveUserSettings(oldIt->first, oldIt->second); + ++oldIt; + } else { + OnAddUserSettings(newIt->first, newIt->second); + ++newIt; + } + } + while (oldIt != OldSettings->end()) { + OnRemoveUserSettings(oldIt->first, oldIt->second); + ++oldIt; + } + while (newIt != CurrentSettings->end()) { + OnAddUserSettings(newIt->first, newIt->second); + ++newIt; + } +} + +void TUserSettingsReader::CompareUserSettings(const TString& userName, const TSettingsPtr& oldSettings, const TSettingsPtr& newSettings) { std::shared_ptr<std::set<TString>> diff = std::make_shared<std::set<TString>>(); - auto oldIt = oldSettings->begin(); - auto newIt = newSettings->begin(); - while (oldIt != oldSettings->end() && newIt != newSettings->end()) { - if (oldIt->first == newIt->first) { - if (oldIt->second != newIt->second) { - diff->emplace(oldIt->first); - } - ++oldIt; - ++newIt; - } else if (oldIt->first < newIt->first) { - diff->emplace(oldIt->first); - ++oldIt; - } else { - diff->emplace(newIt->first); - ++newIt; - } - } - while (oldIt != oldSettings->end()) { - diff->emplace(oldIt->first); - ++oldIt; - } - while (newIt != newSettings->end()) { - diff->emplace(newIt->first); - ++newIt; - } - if (!diff->empty()) { - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvUserSettingsChanged(userName, newSettings, std::move(diff))); - } -} - -void TUserSettingsReader::OnRemoveUserSettings(const TString& userName, const TSettingsPtr& oldSettings) { + auto oldIt = oldSettings->begin(); + auto newIt = newSettings->begin(); + while (oldIt != oldSettings->end() && newIt != newSettings->end()) { + if (oldIt->first == newIt->first) { + if (oldIt->second != newIt->second) { + diff->emplace(oldIt->first); + } + ++oldIt; + ++newIt; + } else if (oldIt->first < newIt->first) { + diff->emplace(oldIt->first); + ++oldIt; + } else { + diff->emplace(newIt->first); + ++newIt; + } + } + while (oldIt != oldSettings->end()) { + diff->emplace(oldIt->first); + ++oldIt; + } + while (newIt != newSettings->end()) { + diff->emplace(newIt->first); + ++newIt; + } + if (!diff->empty()) { + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvUserSettingsChanged(userName, newSettings, std::move(diff))); + } +} + +void TUserSettingsReader::OnRemoveUserSettings(const TString& userName, const TSettingsPtr& oldSettings) { auto diff = std::make_shared<std::set<TString>>(); - for (const auto& [name, value] : *oldSettings) { - diff->insert(name); - } + for (const auto& [name, value] : *oldSettings) { + diff->insert(name); + } Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvUserSettingsChanged(userName, std::make_shared<const std::map<TString, TString>>(), std::move(diff))); -} - -void TUserSettingsReader::OnAddUserSettings(const TString& userName, const TSettingsPtr& currentSettings) { +} + +void TUserSettingsReader::OnAddUserSettings(const TString& userName, const TSettingsPtr& currentSettings) { auto diff = std::make_shared<std::set<TString>>(); - for (const auto& [name, value] : *currentSettings) { - diff->insert(name); - } - Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvUserSettingsChanged(userName, currentSettings, std::move(diff))); -} - -void TUserSettingsReader::ScheduleNextUpdate() { - const ui64 period = Cfg().GetUserSettingsUpdateTimeMs(); - const TDuration randomTime = TDuration::MilliSeconds(period + RandomNumber(period / 2)); - Schedule(randomTime, new TEvWakeup()); -} - -} // namespace NKikimr::NSQS + for (const auto& [name, value] : *currentSettings) { + diff->insert(name); + } + Send(MakeSqsServiceID(SelfId().NodeId()), new TSqsEvents::TEvUserSettingsChanged(userName, currentSettings, std::move(diff))); +} + +void TUserSettingsReader::ScheduleNextUpdate() { + const ui64 period = Cfg().GetUserSettingsUpdateTimeMs(); + const TDuration randomTime = TDuration::MilliSeconds(period + RandomNumber(period / 2)); + Schedule(randomTime, new TEvWakeup()); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/user_settings_reader.h b/ydb/core/ymq/actor/user_settings_reader.h index c85d6922e4a..504a46a2e01 100644 --- a/ydb/core/ymq/actor/user_settings_reader.h +++ b/ydb/core/ymq/actor/user_settings_reader.h @@ -1,57 +1,57 @@ -#pragma once -#include "defs.h" -#include "events.h" -#include "log.h" -#include "serviceid.h" - +#pragma once +#include "defs.h" +#include "events.h" +#include "log.h" +#include "serviceid.h" + #include <library/cpp/actors/core/actor_bootstrapped.h> - -#include <util/generic/hash.h> -#include <util/generic/ptr.h> - -namespace NKikimr::NSQS { - -class TUserSettingsReader : public TActorBootstrapped<TUserSettingsReader> { -public: - explicit TUserSettingsReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters); - ~TUserSettingsReader(); - - void Bootstrap(); - + +#include <util/generic/hash.h> +#include <util/generic/ptr.h> + +namespace NKikimr::NSQS { + +class TUserSettingsReader : public TActorBootstrapped<TUserSettingsReader> { +public: + explicit TUserSettingsReader(const TIntrusivePtr<TTransactionCounters>& transactionCounters); + ~TUserSettingsReader(); + + void Bootstrap(); + static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::SQS_USER_SETTINGS_READER_ACTOR; - } - -private: - using TSettings = std::map<TString, TString>; // name -> value. + } + +private: + using TSettings = std::map<TString, TString>; // name -> value. using TSettingsPtr = std::shared_ptr<TSettings>; - using TUserSettings = std::map<TString, TSettingsPtr>; // user -> settings. + using TUserSettings = std::map<TString, TSettingsPtr>; // user -> settings. using TUserSettingsPtr = std::shared_ptr<TUserSettings>; - - STATEFN(StateFunc); - void HandleWakeup(TEvWakeup::TPtr& ev); - void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); - - void StartReading(); - void NextRequest(); - void OnUserSettingsRead(const TSqsEvents::TEvExecuted::TRecord& record); - void CompileRequest(); - void OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record); - void ScheduleNextUpdate(); - void FinishScan(); - void CompareUserSettings(const TString& userName, const TSettingsPtr& oldSettings, const TSettingsPtr& newSettings); - void OnRemoveUserSettings(const TString& userName, const TSettingsPtr& oldSettings); - void OnAddUserSettings(const TString& userName, const TSettingsPtr& currentSettings); - -private: - TString CompiledQuery; - - TIntrusivePtr<TTransactionCounters> TransactionCounters; - TString CurrentUser; - TString CurrentName; - - TUserSettingsPtr OldSettings; + + STATEFN(StateFunc); + void HandleWakeup(TEvWakeup::TPtr& ev); + void HandleExecuted(TSqsEvents::TEvExecuted::TPtr& ev); + + void StartReading(); + void NextRequest(); + void OnUserSettingsRead(const TSqsEvents::TEvExecuted::TRecord& record); + void CompileRequest(); + void OnRequestCompiled(const TSqsEvents::TEvExecuted::TRecord& record); + void ScheduleNextUpdate(); + void FinishScan(); + void CompareUserSettings(const TString& userName, const TSettingsPtr& oldSettings, const TSettingsPtr& newSettings); + void OnRemoveUserSettings(const TString& userName, const TSettingsPtr& oldSettings); + void OnAddUserSettings(const TString& userName, const TSettingsPtr& currentSettings); + +private: + TString CompiledQuery; + + TIntrusivePtr<TTransactionCounters> TransactionCounters; + TString CurrentUser; + TString CurrentName; + + TUserSettingsPtr OldSettings; TUserSettingsPtr CurrentSettings = std::make_shared<TUserSettings>(); -}; - -} // namespace NKikimr::NSQS +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/ut/attributes_md5_ut.cpp b/ydb/core/ymq/actor/ut/attributes_md5_ut.cpp index a6b00f7030d..d921f9940db 100644 --- a/ydb/core/ymq/actor/ut/attributes_md5_ut.cpp +++ b/ydb/core/ymq/actor/ut/attributes_md5_ut.cpp @@ -1,36 +1,36 @@ #include <ydb/core/ymq/actor/attributes_md5.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -void AddStringAttr(google::protobuf::RepeatedPtrField<TMessageAttribute>& attrs, const TString& name, const TString& value, const TString& type = "String") { - auto* a = attrs.Add(); - a->SetName(name); - a->SetStringValue(value); - a->SetDataType(type); -} - -void AddBinaryAttr(google::protobuf::RepeatedPtrField<TMessageAttribute>& attrs, const TString& name, const TString& value) { - auto* a = attrs.Add(); - a->SetName(name); - a->SetBinaryValue(value); - a->SetDataType("Binary"); -} - + +namespace NKikimr::NSQS { + +void AddStringAttr(google::protobuf::RepeatedPtrField<TMessageAttribute>& attrs, const TString& name, const TString& value, const TString& type = "String") { + auto* a = attrs.Add(); + a->SetName(name); + a->SetStringValue(value); + a->SetDataType(type); +} + +void AddBinaryAttr(google::protobuf::RepeatedPtrField<TMessageAttribute>& attrs, const TString& name, const TString& value) { + auto* a = attrs.Add(); + a->SetName(name); + a->SetBinaryValue(value); + a->SetDataType("Binary"); +} + Y_UNIT_TEST_SUITE(AttributesMD5Test) { Y_UNIT_TEST(AmazonSampleWithString) { - google::protobuf::RepeatedPtrField<TMessageAttribute> attrs; - AddStringAttr(attrs, "test_attribute_name_2", "test_attribute_value_2"); - AddStringAttr(attrs, "test_attribute_name_1", "test_attribute_value_1"); - UNIT_ASSERT_STRINGS_EQUAL(CalcMD5OfMessageAttributes(attrs), "d53f3b558fe951154770f25cb63dbba9"); - } - + google::protobuf::RepeatedPtrField<TMessageAttribute> attrs; + AddStringAttr(attrs, "test_attribute_name_2", "test_attribute_value_2"); + AddStringAttr(attrs, "test_attribute_name_1", "test_attribute_value_1"); + UNIT_ASSERT_STRINGS_EQUAL(CalcMD5OfMessageAttributes(attrs), "d53f3b558fe951154770f25cb63dbba9"); + } + Y_UNIT_TEST(AmazonSampleWithBinary) { - google::protobuf::RepeatedPtrField<TMessageAttribute> attrs; - AddBinaryAttr(attrs, "test_attribute_name", "test_attribute_value"); - UNIT_ASSERT_STRINGS_EQUAL(CalcMD5OfMessageAttributes(attrs), "23f6bd27ea87aab7dfeadcc9aebee495"); - } -} - -} // namespace NKikimr::NSQS + google::protobuf::RepeatedPtrField<TMessageAttribute> attrs; + AddBinaryAttr(attrs, "test_attribute_name", "test_attribute_value"); + UNIT_ASSERT_STRINGS_EQUAL(CalcMD5OfMessageAttributes(attrs), "23f6bd27ea87aab7dfeadcc9aebee495"); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/ut/infly_ut.cpp b/ydb/core/ymq/actor/ut/infly_ut.cpp index f6fed85aff1..4c1bed5ca67 100644 --- a/ydb/core/ymq/actor/ut/infly_ut.cpp +++ b/ydb/core/ymq/actor/ut/infly_ut.cpp @@ -1,146 +1,146 @@ #include <ydb/core/ymq/actor/infly.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(InflyTest) { - Y_UNIT_TEST(AddMessage) { - TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(42)), 0); + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(InflyTest) { + Y_UNIT_TEST(AddMessage) { + TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(42)), 0); infly->Add(MakeHolder<TInflyMessage>(1ull, 0ull, TInstant::Seconds(42), 0)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 1); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(42)), 1); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(43)), 0); - + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 1); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(42)), 1); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(43)), 0); + infly->Add(MakeHolder<TInflyMessage>(2ull, 0ull, TInstant::Seconds(12), 0)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(1)), 2); - } - - Y_UNIT_TEST(DeleteMessage) { - TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(1)), 2); + } + + Y_UNIT_TEST(DeleteMessage) { + TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); infly->Add(MakeHolder<TInflyMessage>(1ull, 0ull, TInstant::Seconds(42), 0)); infly->Add(MakeHolder<TInflyMessage>(2ull, 0ull, TInstant::Seconds(12), 0)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - UNIT_ASSERT(!infly->Delete(5)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - UNIT_ASSERT(infly->Delete(2)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 1); - UNIT_ASSERT(!infly->Delete(2)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 1); - UNIT_ASSERT(infly->Delete(1)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 0); - } - - Y_UNIT_TEST(ChangeMesageVisibility) { - TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + UNIT_ASSERT(!infly->Delete(5)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + UNIT_ASSERT(infly->Delete(2)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 1); + UNIT_ASSERT(!infly->Delete(2)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 1); + UNIT_ASSERT(infly->Delete(1)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 0); + } + + Y_UNIT_TEST(ChangeMesageVisibility) { + TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); infly->Add(MakeHolder<TInflyMessage>(1ull, 0ull, TInstant::Seconds(42), 0)); infly->Add(MakeHolder<TInflyMessage>(2ull, 0ull, TInstant::Seconds(12), 0)); infly->Add(MakeHolder<TInflyMessage>(5ull, 0ull, TInstant::Seconds(150), 0)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); - - { - TInflyMessages::TChangeVisibilityCandidates changeVisibilityCandidates(infly); - UNIT_ASSERT(!changeVisibilityCandidates.Add(3)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); - - UNIT_ASSERT(changeVisibilityCandidates.Add(2)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); - - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(40)), 2); - - changeVisibilityCandidates.SetVisibilityDeadline(2, TInstant::Seconds(100)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); - - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 2); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(40)), 3); - - UNIT_ASSERT(changeVisibilityCandidates.Add(5)); - UNIT_ASSERT(changeVisibilityCandidates.Delete(5)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - - // test offsets that doesn't exist - UNIT_ASSERT(!changeVisibilityCandidates.Has(42)); - changeVisibilityCandidates.SetVisibilityDeadline(42, TInstant::Seconds(50)); - UNIT_ASSERT(!changeVisibilityCandidates.Delete(42)); - } - { - TInflyMessages::TChangeVisibilityCandidates changeVisibilityCandidates(infly); - // test empty candidates - UNIT_ASSERT(!changeVisibilityCandidates.Has(42)); - UNIT_ASSERT(!changeVisibilityCandidates.Delete(10)); - changeVisibilityCandidates.SetVisibilityDeadline(100500, TInstant::Seconds(100500)); - } - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(40)), 2); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - } - - Y_UNIT_TEST(ReceiveMessages) { - TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); + + { + TInflyMessages::TChangeVisibilityCandidates changeVisibilityCandidates(infly); + UNIT_ASSERT(!changeVisibilityCandidates.Add(3)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); + + UNIT_ASSERT(changeVisibilityCandidates.Add(2)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); + + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(40)), 2); + + changeVisibilityCandidates.SetVisibilityDeadline(2, TInstant::Seconds(100)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); + + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 2); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(40)), 3); + + UNIT_ASSERT(changeVisibilityCandidates.Add(5)); + UNIT_ASSERT(changeVisibilityCandidates.Delete(5)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + + // test offsets that doesn't exist + UNIT_ASSERT(!changeVisibilityCandidates.Has(42)); + changeVisibilityCandidates.SetVisibilityDeadline(42, TInstant::Seconds(50)); + UNIT_ASSERT(!changeVisibilityCandidates.Delete(42)); + } + { + TInflyMessages::TChangeVisibilityCandidates changeVisibilityCandidates(infly); + // test empty candidates + UNIT_ASSERT(!changeVisibilityCandidates.Has(42)); + UNIT_ASSERT(!changeVisibilityCandidates.Delete(10)); + changeVisibilityCandidates.SetVisibilityDeadline(100500, TInstant::Seconds(100500)); + } + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 1); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(40)), 2); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + } + + Y_UNIT_TEST(ReceiveMessages) { + TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); infly->Add(MakeHolder<TInflyMessage>(1ull, 0ull, TInstant::Seconds(1), 0)); infly->Add(MakeHolder<TInflyMessage>(2ull, 0ull, TInstant::Seconds(2), 0)); infly->Add(MakeHolder<TInflyMessage>(3ull, 0ull, TInstant::Seconds(3), 0)); infly->Add(MakeHolder<TInflyMessage>(4ull, 0ull, TInstant::Seconds(4), 0)); infly->Add(MakeHolder<TInflyMessage>(5ull, 0ull, TInstant::Seconds(5), 0)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 5); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 0); - { - auto messages = infly->Receive(10, TInstant::Seconds(5)); - UNIT_ASSERT(messages); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 5); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(0)), 1); - UNIT_ASSERT(!infly->Receive(10, TInstant::Seconds(5))); - UNIT_ASSERT(!messages.Has(5)); - UNIT_ASSERT(messages.Has(1)); - UNIT_ASSERT(messages.Has(2)); - UNIT_ASSERT(messages.Has(3)); - UNIT_ASSERT(messages.Has(4)); - auto i = messages.Begin(); - ++i; - UNIT_ASSERT(i != messages.End()); - i->Message().SetVisibilityDeadline(TInstant::Seconds(100)); - - ++i; - UNIT_ASSERT(i != messages.End()); - i->Message().SetVisibilityDeadline(TInstant::Seconds(100)); - - ++i; - UNIT_ASSERT(i != messages.End()); - i->Message().SetVisibilityDeadline(TInstant::Seconds(100)); - - ++i; - UNIT_ASSERT(i == messages.End()); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 0); - } - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 3); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 5); - } - - Y_UNIT_TEST(DeleteReceivedMessage) { - TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 5); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 0); + { + auto messages = infly->Receive(10, TInstant::Seconds(5)); + UNIT_ASSERT(messages); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 5); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(0)), 1); + UNIT_ASSERT(!infly->Receive(10, TInstant::Seconds(5))); + UNIT_ASSERT(!messages.Has(5)); + UNIT_ASSERT(messages.Has(1)); + UNIT_ASSERT(messages.Has(2)); + UNIT_ASSERT(messages.Has(3)); + UNIT_ASSERT(messages.Has(4)); + auto i = messages.Begin(); + ++i; + UNIT_ASSERT(i != messages.End()); + i->Message().SetVisibilityDeadline(TInstant::Seconds(100)); + + ++i; + UNIT_ASSERT(i != messages.End()); + i->Message().SetVisibilityDeadline(TInstant::Seconds(100)); + + ++i; + UNIT_ASSERT(i != messages.End()); + i->Message().SetVisibilityDeadline(TInstant::Seconds(100)); + + ++i; + UNIT_ASSERT(i == messages.End()); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 0); + } + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(50)), 3); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 5); + } + + Y_UNIT_TEST(DeleteReceivedMessage) { + TIntrusivePtr<TInflyMessages> infly = MakeIntrusive<TInflyMessages>(); infly->Add(MakeHolder<TInflyMessage>(1ull, 0ull, TInstant::Seconds(1), 0)); infly->Add(MakeHolder<TInflyMessage>(2ull, 0ull, TInstant::Seconds(2), 0)); infly->Add(MakeHolder<TInflyMessage>(3ull, 0ull, TInstant::Seconds(3), 0)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(2)), 2); - { - auto messages = infly->Receive(10, TInstant::Seconds(2)); - UNIT_ASSERT(messages); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); - UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(2)), 2); - auto i = messages.Begin(); - UNIT_ASSERT(i != messages.End()); - UNIT_ASSERT(messages.Delete(1)); - UNIT_ASSERT(!messages.Delete(1)); - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - } - UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); - } -} - -} // namespace NKikimr::NSQS + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(2)), 2); + { + auto messages = infly->Receive(10, TInstant::Seconds(2)); + UNIT_ASSERT(messages); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 3); + UNIT_ASSERT_VALUES_EQUAL(infly->GetInflyCount(TInstant::Seconds(2)), 2); + auto i = messages.Begin(); + UNIT_ASSERT(i != messages.End()); + UNIT_ASSERT(messages.Delete(1)); + UNIT_ASSERT(!messages.Delete(1)); + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + } + UNIT_ASSERT_VALUES_EQUAL(infly->GetCapacity(), 2); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/ut/message_delay_stats_ut.cpp b/ydb/core/ymq/actor/ut/message_delay_stats_ut.cpp index 0baffb3ef7e..1ab7d6046d3 100644 --- a/ydb/core/ymq/actor/ut/message_delay_stats_ut.cpp +++ b/ydb/core/ymq/actor/ut/message_delay_stats_ut.cpp @@ -1,57 +1,57 @@ #include <ydb/core/ymq/actor/message_delay_stats.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(MessageDelayStatsTest) { - Y_UNIT_TEST(All) { - TMessageDelayStatistics s; - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(42)), 0); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(40)), 0); - - s.AddDelayedMessage(TInstant::MilliSeconds(150), TInstant::MilliSeconds(100)); // start == 100 - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(100)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(120)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(200)), 1); - - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(1099)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(1100)), 0); - s.AddDelayedMessage(TInstant::MilliSeconds(5000), TInstant::MilliSeconds(2000)); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(2000)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(2200)), 1); - - s.AddDelayedMessage(TInstant::MilliSeconds(5000), TInstant::MilliSeconds(3000)); - s.AddDelayedMessage(TInstant::MilliSeconds(5000), TInstant::MilliSeconds(2900)); // decrease "now" is OK - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(3000)), 3); - - s.AddDelayedMessage(TInstant::MilliSeconds(6000), TInstant::MilliSeconds(3000)); - s.AddDelayedMessage(TInstant::MilliSeconds(7000), TInstant::MilliSeconds(3000)); - s.AddDelayedMessage(TInstant::MilliSeconds(8000), TInstant::MilliSeconds(3000)); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(3000)), 6); - - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(5500)), 3); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(6500)), 2); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(7500)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(8500)), 0); - } - - Y_UNIT_TEST(BigTimeDiff) { - TMessageDelayStatistics s; - s.AddDelayedMessage(TInstant::Seconds(1000), TInstant::Seconds(500)); // start from 500 - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(500)), 1); - - s.AddDelayedMessage(TInstant::Seconds(100000), TInstant::Seconds(99990)); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(99990)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(101000)), 0); - } - - Y_UNIT_TEST(MaxMessageDelay) { - TMessageDelayStatistics s; - s.AddDelayedMessage(TInstant::Seconds(1900), TInstant::Seconds(1000)); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(1000)), 1); - UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(1001)), 1); - } -} - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(MessageDelayStatsTest) { + Y_UNIT_TEST(All) { + TMessageDelayStatistics s; + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(42)), 0); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(40)), 0); + + s.AddDelayedMessage(TInstant::MilliSeconds(150), TInstant::MilliSeconds(100)); // start == 100 + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(100)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(120)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(200)), 1); + + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(1099)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(1100)), 0); + s.AddDelayedMessage(TInstant::MilliSeconds(5000), TInstant::MilliSeconds(2000)); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(2000)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(2200)), 1); + + s.AddDelayedMessage(TInstant::MilliSeconds(5000), TInstant::MilliSeconds(3000)); + s.AddDelayedMessage(TInstant::MilliSeconds(5000), TInstant::MilliSeconds(2900)); // decrease "now" is OK + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(3000)), 3); + + s.AddDelayedMessage(TInstant::MilliSeconds(6000), TInstant::MilliSeconds(3000)); + s.AddDelayedMessage(TInstant::MilliSeconds(7000), TInstant::MilliSeconds(3000)); + s.AddDelayedMessage(TInstant::MilliSeconds(8000), TInstant::MilliSeconds(3000)); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(3000)), 6); + + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(5500)), 3); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(6500)), 2); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(7500)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::MilliSeconds(8500)), 0); + } + + Y_UNIT_TEST(BigTimeDiff) { + TMessageDelayStatistics s; + s.AddDelayedMessage(TInstant::Seconds(1000), TInstant::Seconds(500)); // start from 500 + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(500)), 1); + + s.AddDelayedMessage(TInstant::Seconds(100000), TInstant::Seconds(99990)); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(99990)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(101000)), 0); + } + + Y_UNIT_TEST(MaxMessageDelay) { + TMessageDelayStatistics s; + s.AddDelayedMessage(TInstant::Seconds(1900), TInstant::Seconds(1000)); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(1000)), 1); + UNIT_ASSERT_VALUES_EQUAL(s.UpdateAndGetMessagesDelayed(TInstant::Seconds(1001)), 1); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/ut/sha256_ut.cpp b/ydb/core/ymq/actor/ut/sha256_ut.cpp index a0f2b5286b2..95cff12bd3d 100644 --- a/ydb/core/ymq/actor/ut/sha256_ut.cpp +++ b/ydb/core/ymq/actor/ut/sha256_ut.cpp @@ -1,18 +1,18 @@ #include <ydb/core/ymq/actor/sha256.h> - + #include <ydb/core/ymq/base/helpers.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(SHA256Test) { - Y_UNIT_TEST(SHA256Test) { - UNIT_ASSERT_STRINGS_EQUAL(CalcSHA256(""), "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855"); - UNIT_ASSERT_STRINGS_EQUAL(CalcSHA256("123"), "A665A45920422F9D417E4867EFDC4FB8A04A1F3FFF1FA07E998E86F7F7A27AE3"); - - UNIT_ASSERT_STRINGS_UNEQUAL(CalcSHA256("1"), CalcSHA256("2")); - } -} - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(SHA256Test) { + Y_UNIT_TEST(SHA256Test) { + UNIT_ASSERT_STRINGS_EQUAL(CalcSHA256(""), "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855"); + UNIT_ASSERT_STRINGS_EQUAL(CalcSHA256("123"), "A665A45920422F9D417E4867EFDC4FB8A04A1F3FFF1FA07E998E86F7F7A27AE3"); + + UNIT_ASSERT_STRINGS_UNEQUAL(CalcSHA256("1"), CalcSHA256("2")); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/actor/ut/ya.make b/ydb/core/ymq/actor/ut/ya.make index a3b4f59f372..b0a46da6529 100644 --- a/ydb/core/ymq/actor/ut/ya.make +++ b/ydb/core/ymq/actor/ut/ya.make @@ -1,28 +1,28 @@ -OWNER( - galaxycrab +OWNER( + galaxycrab g:kikimr - g:sqs -) - -UNITTEST() - -PEERDIR( + g:sqs +) + +UNITTEST() + +PEERDIR( contrib/libs/yaml-cpp ydb/core/mind/address_classification ydb/core/testlib ydb/core/ymq/actor ydb/core/ymq/base ydb/core/ymq/http -) +) -SRCS( - attributes_md5_ut.cpp - infly_ut.cpp - message_delay_stats_ut.cpp - sha256_ut.cpp +SRCS( + attributes_md5_ut.cpp + infly_ut.cpp + message_delay_stats_ut.cpp + sha256_ut.cpp metering_ut.cpp -) - +) + YQL_LAST_ABI_VERSION() -END() +END() diff --git a/ydb/core/ymq/actor/ya.make b/ydb/core/ymq/actor/ya.make index 070653bb577..589531aae74 100644 --- a/ydb/core/ymq/actor/ya.make +++ b/ydb/core/ymq/actor/ya.make @@ -1,15 +1,15 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + LIBRARY() SRCS( actor.cpp auth_factory.cpp - attributes_md5.cpp - cfg.cpp + attributes_md5.cpp + cfg.cpp change_visibility.cpp count_queues.cpp create_queue.cpp @@ -17,44 +17,44 @@ SRCS( delete_message.cpp delete_queue.cpp delete_user.cpp - error.cpp + error.cpp executor.cpp - fifo_cleanup.cpp + fifo_cleanup.cpp garbage_collector.cpp get_queue_attributes.cpp get_queue_url.cpp index_events_processor.cpp - infly.cpp - log.cpp + infly.cpp + log.cpp list_dead_letter_source_queues.cpp list_permissions.cpp list_queues.cpp list_users.cpp - local_rate_limiter_allocator.cpp - message_delay_stats.cpp + local_rate_limiter_allocator.cpp + message_delay_stats.cpp metering.cpp - migration.cpp + migration.cpp modify_permissions.cpp - proxy_actor.cpp + proxy_actor.cpp purge.cpp purge_queue.cpp queue_leader.cpp receive_message.cpp - retention.cpp + retention.cpp schema.cpp - sha256.cpp + sha256.cpp send_message.cpp service.cpp set_queue_attributes.cpp - proxy_service.cpp - queues_list_reader.cpp + proxy_service.cpp + queues_list_reader.cpp queue_schema.cpp - user_settings_names.cpp - user_settings_reader.cpp + user_settings_names.cpp + user_settings_reader.cpp ) PEERDIR( - contrib/libs/openssl + contrib/libs/openssl contrib/libs/protobuf library/cpp/actors/core library/cpp/containers/intrusive_rb_tree @@ -94,8 +94,8 @@ PEERDIR( YQL_LAST_ABI_VERSION() -GENERATE_ENUM_SERIALIZATION(events.h) +GENERATE_ENUM_SERIALIZATION(events.h) GENERATE_ENUM_SERIALIZATION(metering.h) - + END() diff --git a/ydb/core/ymq/base/acl.cpp b/ydb/core/ymq/base/acl.cpp index 683611e88ce..3b71a9750f9 100644 --- a/ydb/core/ymq/base/acl.cpp +++ b/ydb/core/ymq/base/acl.cpp @@ -6,8 +6,8 @@ #include <map> -namespace NKikimr::NSQS { - +namespace NKikimr::NSQS { + class TSQSACLMappings { public: TSQSACLMappings() { @@ -137,5 +137,5 @@ TString GetActionMatchingACE(const TString& actionName) { TVector<TStringBuf> GetAccessMatchingACE(const ui32 access) { return Mappings().GetAccessMatchingACEImpl(access); } - -} // namespace NKikimr::NSQS + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/acl.h b/ydb/core/ymq/base/acl.h index 18c3f07a39e..e18a1849517 100644 --- a/ydb/core/ymq/base/acl.h +++ b/ydb/core/ymq/base/acl.h @@ -2,8 +2,8 @@ #include <ydb/library/aclib/aclib.h> -namespace NKikimr::NSQS { - +namespace NKikimr::NSQS { + enum class EACLSourceType : ui32 { Unknown, RootDir, @@ -17,5 +17,5 @@ ui32 GetActionRequiredAccess(const TString& actionName); ui32 GetACERequiredAccess(const TString& aceName); TString GetActionMatchingACE(const TString& actionName); TVector<TStringBuf> GetAccessMatchingACE(const ui32 access); - -} // namespace NKikimr::NSQS + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/action.cpp b/ydb/core/ymq/base/action.cpp index b601d72d879..86e91deb00c 100644 --- a/ydb/core/ymq/base/action.cpp +++ b/ydb/core/ymq/base/action.cpp @@ -1,31 +1,31 @@ -#include "action.h" +#include "action.h" -#include <util/generic/is_in.h> +#include <util/generic/is_in.h> #include <util/generic/hash.h> -#include <util/generic/hash_set.h> - -namespace NKikimr::NSQS { - -namespace { - -constexpr ui32 FOR_QUEUE = 1; -constexpr ui32 FOR_USER = 2; -constexpr ui32 FOR_MESSAGE = 4; -constexpr ui32 BATCH = 8; -constexpr ui32 FAST = 16; -constexpr ui32 PRIVATE = 32; +#include <util/generic/hash_set.h> + +namespace NKikimr::NSQS { + +namespace { + +constexpr ui32 FOR_QUEUE = 1; +constexpr ui32 FOR_USER = 2; +constexpr ui32 FOR_MESSAGE = 4; +constexpr ui32 BATCH = 8; +constexpr ui32 FAST = 16; +constexpr ui32 PRIVATE = 32; constexpr ui32 YMQ_FOR_QUEUE = 64; constexpr ui32 YMQ_FOR_USER = 128; - -struct TActionProps { - TString StringName; + +struct TActionProps { + TString StringName; TString ConvMethodName; - EAction Action; - ui32 Flags; - EAction NonBatchAction; -}; - -static const TActionProps ActionProps[] = { + EAction Action; + ui32 Flags; + EAction NonBatchAction; +}; + +static const TActionProps ActionProps[] = { {"Unknown", "unknown", EAction::Unknown, 0, EAction::Unknown}, {"ChangeMessageVisibility", "change_message_visibility", EAction::ChangeMessageVisibility, FOR_QUEUE | FOR_MESSAGE | FAST, EAction::ChangeMessageVisibility}, {"ChangeMessageVisibilityBatch", "change_message_visibility_batch", EAction::ChangeMessageVisibilityBatch, FOR_QUEUE | FOR_MESSAGE | BATCH | FAST, EAction::ChangeMessageVisibility}, @@ -51,99 +51,99 @@ static const TActionProps ActionProps[] = { {"ListPermissions", "list_permissions", EAction::ListPermissions, FOR_USER | FAST, EAction::ListPermissions}, {"ListDeadLetterSourceQueues", "list_dead_letter_source_queues", EAction::ListDeadLetterSourceQueues, FOR_QUEUE | FAST, EAction::ListDeadLetterSourceQueues}, {"CountQueues", "count_queues", EAction::CountQueues, FOR_USER | FAST | PRIVATE, EAction::CountQueues}, -}; - -static_assert(Y_ARRAY_SIZE(ActionProps) == EAction::ActionsArraySize); - -THashMap<TString, EAction> GetStringToAction() { - THashMap<TString, EAction> ret; - for (int action = EAction::Unknown + 1; action < EAction::ActionsArraySize; ++action) { - const TActionProps& props = ActionProps[action]; - ret[props.StringName] = props.Action; - } - return ret; -} - -const TActionProps& GetProps(EAction action) { - int index = static_cast<int>(action); - if (index < 0 || index >= EAction::ActionsArraySize) { - index = EAction::Unknown; - } - return ActionProps[index]; -} - -const THashMap<TString, EAction> StringToAction = GetStringToAction(); - -} // namespace - +}; + +static_assert(Y_ARRAY_SIZE(ActionProps) == EAction::ActionsArraySize); + +THashMap<TString, EAction> GetStringToAction() { + THashMap<TString, EAction> ret; + for (int action = EAction::Unknown + 1; action < EAction::ActionsArraySize; ++action) { + const TActionProps& props = ActionProps[action]; + ret[props.StringName] = props.Action; + } + return ret; +} + +const TActionProps& GetProps(EAction action) { + int index = static_cast<int>(action); + if (index < 0 || index >= EAction::ActionsArraySize) { + index = EAction::Unknown; + } + return ActionProps[index]; +} + +const THashMap<TString, EAction> StringToAction = GetStringToAction(); + +} // namespace + EAction ActionFromString(const TString& name) { - auto ai = StringToAction.find(name); - if (ai == StringToAction.end()) { + auto ai = StringToAction.find(name); + if (ai == StringToAction.end()) { return EAction::Unknown; } return ai->second; } -const TString& ActionToString(EAction action) { - return GetProps(action).StringName; -} - +const TString& ActionToString(EAction action) { + return GetProps(action).StringName; +} + const TString& ActionToCloudConvMethod(EAction action) { return GetProps(action).ConvMethodName; } -bool IsBatchAction(EAction action) { - return GetProps(action).Flags & BATCH; -} - -EAction GetNonBatchAction(EAction action) { - return GetProps(action).NonBatchAction; -} - -bool IsActionForQueue(EAction action) { - return GetProps(action).Flags & FOR_QUEUE; -} - +bool IsBatchAction(EAction action) { + return GetProps(action).Flags & BATCH; +} + +EAction GetNonBatchAction(EAction action) { + return GetProps(action).NonBatchAction; +} + +bool IsActionForQueue(EAction action) { + return GetProps(action).Flags & FOR_QUEUE; +} + bool IsActionForQueueYMQ(EAction action) { return GetProps(action).Flags & YMQ_FOR_QUEUE; } -bool IsActionForUser(EAction action) { - return GetProps(action).Flags & FOR_USER; -} - +bool IsActionForUser(EAction action) { + return GetProps(action).Flags & FOR_USER; +} + bool IsActionForUserYMQ(EAction action) { return GetProps(action).Flags & YMQ_FOR_USER; } -bool IsActionForMessage(EAction action) { - return GetProps(action).Flags & FOR_MESSAGE; -} - -bool IsFastAction(EAction action) { - return GetProps(action).Flags & FAST; -} - -bool IsPrivateAction(EAction action) { - return GetProps(action).Flags & PRIVATE; -} - -bool IsProxyAction(EAction action) { -#define ACTION_CASE(a) case EAction::a: \ - return true; - - switch (action) { - ENUMERATE_PROXY_ACTIONS(ACTION_CASE) - default: - return false; - } - -#undef ACTION_CASE -} - -} // namespace NKikimr::NSQS - -template<> -void Out<NKikimr::NSQS::EAction>(IOutputStream& out, typename TTypeTraits<NKikimr::NSQS::EAction>::TFuncParam action) { - out << ActionToString(action); -} +bool IsActionForMessage(EAction action) { + return GetProps(action).Flags & FOR_MESSAGE; +} + +bool IsFastAction(EAction action) { + return GetProps(action).Flags & FAST; +} + +bool IsPrivateAction(EAction action) { + return GetProps(action).Flags & PRIVATE; +} + +bool IsProxyAction(EAction action) { +#define ACTION_CASE(a) case EAction::a: \ + return true; + + switch (action) { + ENUMERATE_PROXY_ACTIONS(ACTION_CASE) + default: + return false; + } + +#undef ACTION_CASE +} + +} // namespace NKikimr::NSQS + +template<> +void Out<NKikimr::NSQS::EAction>(IOutputStream& out, typename TTypeTraits<NKikimr::NSQS::EAction>::TFuncParam action) { + out << ActionToString(action); +} diff --git a/ydb/core/ymq/base/action.h b/ydb/core/ymq/base/action.h index ede7922dd16..5d9657d3518 100644 --- a/ydb/core/ymq/base/action.h +++ b/ydb/core/ymq/base/action.h @@ -2,68 +2,68 @@ #include <util/generic/string.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -enum EAction { +enum EAction { Unknown = 0, ChangeMessageVisibility, ChangeMessageVisibilityBatch, CreateQueue, CreateUser, GetQueueAttributes, - GetQueueAttributesBatch, + GetQueueAttributesBatch, GetQueueUrl, DeleteMessage, DeleteMessageBatch, DeleteQueue, - DeleteQueueBatch, + DeleteQueueBatch, DeleteUser, ListQueues, ListUsers, PurgeQueue, - PurgeQueueBatch, + PurgeQueueBatch, ReceiveMessage, SendMessage, SendMessageBatch, SetQueueAttributes, - ModifyPermissions, + ModifyPermissions, ListPermissions, ListDeadLetterSourceQueues, CountQueues, - ActionsArraySize, + ActionsArraySize, }; EAction ActionFromString(const TString& name); -const TString& ActionToString(EAction action); +const TString& ActionToString(EAction action); const TString& ActionToCloudConvMethod(EAction action); -bool IsBatchAction(EAction action); -bool IsActionForQueue(EAction action); +bool IsBatchAction(EAction action); +bool IsActionForQueue(EAction action); bool IsActionForQueueYMQ(EAction action); -bool IsActionForUser(EAction action); +bool IsActionForUser(EAction action); bool IsActionForUserYMQ(EAction action); -bool IsProxyAction(EAction action); -bool IsActionForMessage(EAction action); -bool IsFastAction(EAction action); -bool IsPrivateAction(EAction action); - -// get nonbatch action variant for given action -EAction GetNonBatchAction(EAction action); - -// Actions with proxy -#define ENUMERATE_PROXY_ACTIONS(macro) \ - macro(ChangeMessageVisibility) \ - macro(ChangeMessageVisibilityBatch) \ - macro(DeleteMessage) \ - macro(DeleteMessageBatch) \ - macro(DeleteQueue) \ - macro(GetQueueAttributes) \ - macro(PurgeQueue) \ - macro(ReceiveMessage) \ - macro(SendMessage) \ - macro(SendMessageBatch) \ +bool IsProxyAction(EAction action); +bool IsActionForMessage(EAction action); +bool IsFastAction(EAction action); +bool IsPrivateAction(EAction action); + +// get nonbatch action variant for given action +EAction GetNonBatchAction(EAction action); + +// Actions with proxy +#define ENUMERATE_PROXY_ACTIONS(macro) \ + macro(ChangeMessageVisibility) \ + macro(ChangeMessageVisibilityBatch) \ + macro(DeleteMessage) \ + macro(DeleteMessageBatch) \ + macro(DeleteQueue) \ + macro(GetQueueAttributes) \ + macro(PurgeQueue) \ + macro(ReceiveMessage) \ + macro(SendMessage) \ + macro(SendMessageBatch) \ macro(ListDeadLetterSourceQueues) \ - macro(SetQueueAttributes) - -} // namespace NKikimr::NSQS + macro(SetQueueAttributes) + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/constants.h b/ydb/core/ymq/base/constants.h index 670e427ff82..2219926e25c 100644 --- a/ydb/core/ymq/base/constants.h +++ b/ydb/core/ymq/base/constants.h @@ -1,16 +1,16 @@ -#pragma once -#include <cstddef> - +#pragma once +#include <cstddef> + #include <util/generic/string.h> -#define INFLY_LIMIT 120000 - -namespace NKikimr::NSQS { - -constexpr size_t MAX_SHARDS_COUNT = 32; -constexpr size_t MAX_PARTITIONS_COUNT = 128; - -static const TString yaSqsArnPrefix = "yrn:ya:sqs"; -static const TString cloudArnPrefix = "yrn:yc:ymq"; +#define INFLY_LIMIT 120000 + +namespace NKikimr::NSQS { + +constexpr size_t MAX_SHARDS_COUNT = 32; +constexpr size_t MAX_PARTITIONS_COUNT = 128; + +static const TString yaSqsArnPrefix = "yrn:ya:sqs"; +static const TString cloudArnPrefix = "yrn:yc:ymq"; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/counters.cpp b/ydb/core/ymq/base/counters.cpp index 90a9f5000f8..6ae8222c922 100644 --- a/ydb/core/ymq/base/counters.cpp +++ b/ydb/core/ymq/base/counters.cpp @@ -1,45 +1,45 @@ -#include "counters.h" - +#include "counters.h" + #include <ydb/core/base/appdata.h> #include <ydb/core/base/counters.h> #include <ydb/library/http_proxy/error/error.h> - -#include <util/datetime/base.h> -#include <util/string/builder.h> -#include <util/system/defaults.h> - -#include <vector> -#include <type_traits> - -namespace NKikimr::NSQS { - -extern const TString DEFAULT_COUNTER_NAME = "sensor"; + +#include <util/datetime/base.h> +#include <util/string/builder.h> +#include <util/system/defaults.h> + +#include <vector> +#include <type_traits> + +namespace NKikimr::NSQS { + +extern const TString DEFAULT_COUNTER_NAME = "sensor"; extern const TString DEFAULT_YMQ_COUNTER_NAME = "name"; extern const TString ACTION_CNTR_PREFIX = "api.http."; extern const TString QUEUE_CNTR_PREFIX = "queue.messages."; extern const TString METHOD_LABLE = "method"; -static const TString USER_LABEL = "user"; +static const TString USER_LABEL = "user"; static const TString CLOUD_LABEL = "cloud"; -static const TString FOLDER_LABEL = "folder"; -static const TString QUEUE_LABEL = "queue"; -extern const TString TOTAL_COUNTER_LABEL = "total"; -static const TString QUERY_TYPE = "query_type"; -static const TString STATUS_CODE = "status_code"; - -static const NMonitoring::TBucketBounds FastActionsDurationBucketsMs = { - 5, - 10, - 25, - 50, - 75, - 100, - 150, - 300, - 500, - 1000, - 5000, -}; - +static const TString FOLDER_LABEL = "folder"; +static const TString QUEUE_LABEL = "queue"; +extern const TString TOTAL_COUNTER_LABEL = "total"; +static const TString QUERY_TYPE = "query_type"; +static const TString STATUS_CODE = "status_code"; + +static const NMonitoring::TBucketBounds FastActionsDurationBucketsMs = { + 5, + 10, + 25, + 50, + 75, + 100, + 150, + 300, + 500, + 1000, + 5000, +}; + static const NMonitoring::TBucketBounds YmqFastActionsDurationBucketsMs = { 5, 10, @@ -55,15 +55,15 @@ static const NMonitoring::TBucketBounds YmqFastActionsDurationBucketsMs = { }; -static const NMonitoring::TBucketBounds SlowActionsDurationBucketsMs = { - 100, - 150, - 300, - 500, - 1000, - 5000, -}; - +static const NMonitoring::TBucketBounds SlowActionsDurationBucketsMs = { + 100, + 150, + 300, + 500, + 1000, + 5000, +}; + static const NMonitoring::TBucketBounds YmqSlowActionsDurationBucketsMs = { 100, 150, @@ -76,48 +76,48 @@ static const NMonitoring::TBucketBounds YmqSlowActionsDurationBucketsMs = { 50'000 }; -static const NMonitoring::TBucketBounds DurationBucketsMs = { - 5, - 10, - 25, - 50, - 75, - 100, - 125, - 150, - 250, - 500, - 750, - 1'000, - 2'500, - 5'000, - 10'000, - 30'000, - 50'000, -}; - -static const NMonitoring::TBucketBounds ClientDurationBucketsMs = { - 100, - 250, - 500, - 750, - 1'000, - 2'500, - 5'000, - 7'500, - 10'000, - 25'000, - 50'000, - 100'000, - 250'000, - 500'000, - 1'000'000, - 2'500'000, - 5'000'000, - 10'000'000, - 25'000'000, - 50'000'000, -}; +static const NMonitoring::TBucketBounds DurationBucketsMs = { + 5, + 10, + 25, + 50, + 75, + 100, + 125, + 150, + 250, + 500, + 750, + 1'000, + 2'500, + 5'000, + 10'000, + 30'000, + 50'000, +}; + +static const NMonitoring::TBucketBounds ClientDurationBucketsMs = { + 100, + 250, + 500, + 750, + 1'000, + 2'500, + 5'000, + 7'500, + 10'000, + 25'000, + 50'000, + 100'000, + 250'000, + 500'000, + 1'000'000, + 2'500'000, + 5'000'000, + 10'000'000, + 25'000'000, + 50'000'000, +}; static const NMonitoring::TBucketBounds YmqClientDurationBucketsMs = { 100, 200, @@ -134,178 +134,178 @@ static const NMonitoring::TBucketBounds YmqClientDurationBucketsMs = { 3'600'000, 7'200'000, }; - -static const NMonitoring::TBucketBounds GetQuotaDurationBucketsMs = { - 1, - 2, - 5, - 10, - 25, - 50, - 75, - 100, - 125, - 150, - 250, - 500, - 750, - 1'000, - 2'500, -}; - -static const NMonitoring::TBucketBounds MessageReceiveAttemptsBuckets = { - 1, - 2, - 5, -}; - -template <class T> -struct TSizeOfMemberType; - -template <class T, class TPack> -struct TSizeOfMemberType<T TPack::*> { - static constexpr size_t Value = sizeof(T); -}; - -template <class T> -constexpr size_t SizeOfMember = TSizeOfMemberType<T>::Value; - -template <class... T> -struct TMemberCountersDescriptor { - TMemberCountersDescriptor(T... memberCounterPointers) - : MemberCounterPointers(memberCounterPointers...) - { - } - - template <class TCountersPack, size_t i = 0> - void SetAggregatedParent(TCountersPack* pack, TCountersPack* parent) const { - auto pointerToMemberCounter = std::get<i>(MemberCounterPointers); - (pack->*pointerToMemberCounter).SetAggregatedParent(parent ? &(parent->*pointerToMemberCounter) : nullptr); - if constexpr (i + 1 < MembersCount) { - SetAggregatedParent<TCountersPack, i + 1>(pack, parent); - } - } - - template <size_t i = 0> - constexpr size_t SizeOfCounters() const { - constexpr size_t currentElemSize = SizeOfMember<std::tuple_element_t<i, decltype(MemberCounterPointers)>>; - if constexpr (i + 1 < MembersCount) { - return currentElemSize + SizeOfCounters<i + 1>(); - } else { - return currentElemSize; - } - } - - std::tuple<T...> MemberCounterPointers; - static constexpr size_t MembersCount = sizeof...(T); -}; - -static constexpr size_t AbsDiff(size_t a, size_t b) { - return a < b ? b - a : a - b; -} - -static constexpr bool AbsDiffLessThanCounter(size_t a, size_t b) { - return AbsDiff(a, b) < sizeof(TLazyCachedCounter); -} - -static const auto ActionCountersDescriptor = - TMemberCountersDescriptor(&TActionCounters::Success, - &TActionCounters::Errors, - &TActionCounters::Infly, - &TActionCounters::Duration, - &TActionCounters::WorkingDuration); - -static_assert(AbsDiffLessThanCounter(ActionCountersDescriptor.SizeOfCounters(), sizeof(TActionCounters))); - -static const auto QueryTypeCountersDescriptor = - TMemberCountersDescriptor(&TQueryTypeCounters::TransactionsCount, - &TQueryTypeCounters::TransactionsFailed, - &TQueryTypeCounters::TransactionDuration); - -static_assert(AbsDiffLessThanCounter(QueryTypeCountersDescriptor.SizeOfCounters(), sizeof(TQueryTypeCounters))); - -static const auto TransactionCountersDescriptor = - TMemberCountersDescriptor(&TTransactionCounters::CompileQueryCount, - &TTransactionCounters::TransactionsCount, - &TTransactionCounters::TransactionsInfly, - &TTransactionCounters::TransactionRetryTimeouts, - &TTransactionCounters::TransactionRetries, - &TTransactionCounters::TransactionsFailed); - -static_assert(AbsDiffLessThanCounter(TransactionCountersDescriptor.SizeOfCounters() + - SizeOfMember<decltype(&TTransactionCounters::AllocPoolCounters)> + - SizeOfMember<decltype(&TTransactionCounters::AggregatedParent)> + - SizeOfMember<decltype(&TTransactionCounters::QueryTypeCounters)>, - sizeof(TTransactionCounters))); - -static const auto UserDetailedCountersDescriptor = - TMemberCountersDescriptor(&TUserCounters::TDetailedCounters::APIStatuses, - &TUserCounters::TDetailedCounters::GetConfiguration_Duration, - &TUserCounters::TDetailedCounters::GetQuota_Duration, - &TUserCounters::TDetailedCounters::CreateAccountOnTheFly_Success, - &TUserCounters::TDetailedCounters::CreateAccountOnTheFly_Errors); - -static_assert(AbsDiffLessThanCounter(UserDetailedCountersDescriptor.SizeOfCounters() + - SizeOfMember<decltype(&TUserCounters::TDetailedCounters::TransactionCounters)>, - sizeof(TUserCounters::TDetailedCounters))); - -static const auto UserCountersDescriptor = - TMemberCountersDescriptor(&TUserCounters::RequestTimeouts, - &TUserCounters::UnauthenticatedAccess); - -static const auto QueueDetailedCountersDescriptor = - TMemberCountersDescriptor(&TQueueCounters::TDetailedCounters::GetConfiguration_Duration, - &TQueueCounters::TDetailedCounters::ReceiveMessage_KeysInvalidated, - &TQueueCounters::TDetailedCounters::ReceiveMessageImmediate_Duration); - -static_assert(AbsDiffLessThanCounter(QueueDetailedCountersDescriptor.SizeOfCounters() + - SizeOfMember<decltype(&TQueueCounters::TDetailedCounters::TransactionCounters)>, - sizeof(TQueueCounters::TDetailedCounters))); - -static const auto QueueCountersDescriptor = - TMemberCountersDescriptor(&TQueueCounters::RequestTimeouts, - &TQueueCounters::RequestsThrottled, - &TQueueCounters::QueueMasterStartProblems, + +static const NMonitoring::TBucketBounds GetQuotaDurationBucketsMs = { + 1, + 2, + 5, + 10, + 25, + 50, + 75, + 100, + 125, + 150, + 250, + 500, + 750, + 1'000, + 2'500, +}; + +static const NMonitoring::TBucketBounds MessageReceiveAttemptsBuckets = { + 1, + 2, + 5, +}; + +template <class T> +struct TSizeOfMemberType; + +template <class T, class TPack> +struct TSizeOfMemberType<T TPack::*> { + static constexpr size_t Value = sizeof(T); +}; + +template <class T> +constexpr size_t SizeOfMember = TSizeOfMemberType<T>::Value; + +template <class... T> +struct TMemberCountersDescriptor { + TMemberCountersDescriptor(T... memberCounterPointers) + : MemberCounterPointers(memberCounterPointers...) + { + } + + template <class TCountersPack, size_t i = 0> + void SetAggregatedParent(TCountersPack* pack, TCountersPack* parent) const { + auto pointerToMemberCounter = std::get<i>(MemberCounterPointers); + (pack->*pointerToMemberCounter).SetAggregatedParent(parent ? &(parent->*pointerToMemberCounter) : nullptr); + if constexpr (i + 1 < MembersCount) { + SetAggregatedParent<TCountersPack, i + 1>(pack, parent); + } + } + + template <size_t i = 0> + constexpr size_t SizeOfCounters() const { + constexpr size_t currentElemSize = SizeOfMember<std::tuple_element_t<i, decltype(MemberCounterPointers)>>; + if constexpr (i + 1 < MembersCount) { + return currentElemSize + SizeOfCounters<i + 1>(); + } else { + return currentElemSize; + } + } + + std::tuple<T...> MemberCounterPointers; + static constexpr size_t MembersCount = sizeof...(T); +}; + +static constexpr size_t AbsDiff(size_t a, size_t b) { + return a < b ? b - a : a - b; +} + +static constexpr bool AbsDiffLessThanCounter(size_t a, size_t b) { + return AbsDiff(a, b) < sizeof(TLazyCachedCounter); +} + +static const auto ActionCountersDescriptor = + TMemberCountersDescriptor(&TActionCounters::Success, + &TActionCounters::Errors, + &TActionCounters::Infly, + &TActionCounters::Duration, + &TActionCounters::WorkingDuration); + +static_assert(AbsDiffLessThanCounter(ActionCountersDescriptor.SizeOfCounters(), sizeof(TActionCounters))); + +static const auto QueryTypeCountersDescriptor = + TMemberCountersDescriptor(&TQueryTypeCounters::TransactionsCount, + &TQueryTypeCounters::TransactionsFailed, + &TQueryTypeCounters::TransactionDuration); + +static_assert(AbsDiffLessThanCounter(QueryTypeCountersDescriptor.SizeOfCounters(), sizeof(TQueryTypeCounters))); + +static const auto TransactionCountersDescriptor = + TMemberCountersDescriptor(&TTransactionCounters::CompileQueryCount, + &TTransactionCounters::TransactionsCount, + &TTransactionCounters::TransactionsInfly, + &TTransactionCounters::TransactionRetryTimeouts, + &TTransactionCounters::TransactionRetries, + &TTransactionCounters::TransactionsFailed); + +static_assert(AbsDiffLessThanCounter(TransactionCountersDescriptor.SizeOfCounters() + + SizeOfMember<decltype(&TTransactionCounters::AllocPoolCounters)> + + SizeOfMember<decltype(&TTransactionCounters::AggregatedParent)> + + SizeOfMember<decltype(&TTransactionCounters::QueryTypeCounters)>, + sizeof(TTransactionCounters))); + +static const auto UserDetailedCountersDescriptor = + TMemberCountersDescriptor(&TUserCounters::TDetailedCounters::APIStatuses, + &TUserCounters::TDetailedCounters::GetConfiguration_Duration, + &TUserCounters::TDetailedCounters::GetQuota_Duration, + &TUserCounters::TDetailedCounters::CreateAccountOnTheFly_Success, + &TUserCounters::TDetailedCounters::CreateAccountOnTheFly_Errors); + +static_assert(AbsDiffLessThanCounter(UserDetailedCountersDescriptor.SizeOfCounters() + + SizeOfMember<decltype(&TUserCounters::TDetailedCounters::TransactionCounters)>, + sizeof(TUserCounters::TDetailedCounters))); + +static const auto UserCountersDescriptor = + TMemberCountersDescriptor(&TUserCounters::RequestTimeouts, + &TUserCounters::UnauthenticatedAccess); + +static const auto QueueDetailedCountersDescriptor = + TMemberCountersDescriptor(&TQueueCounters::TDetailedCounters::GetConfiguration_Duration, + &TQueueCounters::TDetailedCounters::ReceiveMessage_KeysInvalidated, + &TQueueCounters::TDetailedCounters::ReceiveMessageImmediate_Duration); + +static_assert(AbsDiffLessThanCounter(QueueDetailedCountersDescriptor.SizeOfCounters() + + SizeOfMember<decltype(&TQueueCounters::TDetailedCounters::TransactionCounters)>, + sizeof(TQueueCounters::TDetailedCounters))); + +static const auto QueueCountersDescriptor = + TMemberCountersDescriptor(&TQueueCounters::RequestTimeouts, + &TQueueCounters::RequestsThrottled, + &TQueueCounters::QueueMasterStartProblems, &TQueueCounters::QueueLeaderStartProblems, - &TQueueCounters::MessagesPurged, - &TQueueCounters::MessageReceiveAttempts, - &TQueueCounters::ClientMessageProcessing_Duration, - &TQueueCounters::MessageReside_Duration, - &TQueueCounters::DeleteMessage_Count, - &TQueueCounters::ReceiveMessage_EmptyCount, - &TQueueCounters::ReceiveMessage_Count, - &TQueueCounters::ReceiveMessage_BytesRead, - &TQueueCounters::MessagesMovedToDLQ, - &TQueueCounters::SendMessage_DeduplicationCount, - &TQueueCounters::SendMessage_Count, - &TQueueCounters::SendMessage_BytesWritten, - //&TQueueCounters::OldestMessageAgeSeconds, // not aggregated - &TQueueCounters::MessagesCount, - &TQueueCounters::InflyMessagesCount); - -static const auto HttpActionCountersDescriptor = - TMemberCountersDescriptor(&THttpActionCounters::Requests); - -static_assert(AbsDiffLessThanCounter(HttpActionCountersDescriptor.SizeOfCounters(), sizeof(THttpActionCounters))); - -static const auto HttpUserCountersDescriptor = - TMemberCountersDescriptor(&THttpUserCounters::RequestExceptions); - -static_assert(AbsDiffLessThanCounter(HttpUserCountersDescriptor.SizeOfCounters() + - SizeOfMember<decltype(&THttpUserCounters::ActionCounters)> + - SizeOfMember<decltype(&THttpUserCounters::SqsHttpCounters)> + - SizeOfMember<decltype(&THttpUserCounters::UserCounters)> + - sizeof(NKikimrConfig::TSqsConfig*) + - sizeof(TIntrusivePtr<THttpUserCounters>), sizeof(THttpUserCounters))); - + &TQueueCounters::MessagesPurged, + &TQueueCounters::MessageReceiveAttempts, + &TQueueCounters::ClientMessageProcessing_Duration, + &TQueueCounters::MessageReside_Duration, + &TQueueCounters::DeleteMessage_Count, + &TQueueCounters::ReceiveMessage_EmptyCount, + &TQueueCounters::ReceiveMessage_Count, + &TQueueCounters::ReceiveMessage_BytesRead, + &TQueueCounters::MessagesMovedToDLQ, + &TQueueCounters::SendMessage_DeduplicationCount, + &TQueueCounters::SendMessage_Count, + &TQueueCounters::SendMessage_BytesWritten, + //&TQueueCounters::OldestMessageAgeSeconds, // not aggregated + &TQueueCounters::MessagesCount, + &TQueueCounters::InflyMessagesCount); + +static const auto HttpActionCountersDescriptor = + TMemberCountersDescriptor(&THttpActionCounters::Requests); + +static_assert(AbsDiffLessThanCounter(HttpActionCountersDescriptor.SizeOfCounters(), sizeof(THttpActionCounters))); + +static const auto HttpUserCountersDescriptor = + TMemberCountersDescriptor(&THttpUserCounters::RequestExceptions); + +static_assert(AbsDiffLessThanCounter(HttpUserCountersDescriptor.SizeOfCounters() + + SizeOfMember<decltype(&THttpUserCounters::ActionCounters)> + + SizeOfMember<decltype(&THttpUserCounters::SqsHttpCounters)> + + SizeOfMember<decltype(&THttpUserCounters::UserCounters)> + + sizeof(NKikimrConfig::TSqsConfig*) + + sizeof(TIntrusivePtr<THttpUserCounters>), sizeof(THttpUserCounters))); + TIntrusivePtr<NMonitoring::TDynamicCounters> GetSqsServiceCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& countersRoot, const TString& subgroup) { return GetServiceCounters(countersRoot, "sqs")->GetSubgroup("subsystem", subgroup); -} +} TIntrusivePtr<NMonitoring::TDynamicCounters> GetYmqPublicCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& countersRoot) { // Remove subgroup and don't have subsystem (is this correct - ?) return GetServiceCounters(countersRoot, "ymq_public"); } - + static TIntrusivePtrCntrCouple GetUserCounters( const TIntrusivePtrCntrCouple & sqsCoreCounters, const TString& userName ) { @@ -313,15 +313,15 @@ static TIntrusivePtrCntrCouple GetUserCounters( sqsCoreCounters.SqsCounters->GetSubgroup(USER_LABEL, userName), sqsCoreCounters.YmqCounters ? sqsCoreCounters.YmqCounters->GetSubgroup(CLOUD_LABEL, userName) : nullptr }; -} - +} + TIntrusivePtrCntrCouple GetFolderCounters(const TIntrusivePtrCntrCouple& userCounters, const TString& folderId) { return { userCounters.SqsCounters->GetSubgroup(FOLDER_LABEL, folderId), userCounters.YmqCounters ? userCounters.YmqCounters->GetSubgroup(FOLDER_LABEL, folderId) : nullptr }; -} - +} + void RemoveFolderCounters(const TIntrusivePtrCntrCouple& userCounters, const TString& folderId) { if (userCounters.YmqCounters) { userCounters.YmqCounters->RemoveSubgroup(FOLDER_LABEL, folderId); @@ -333,44 +333,44 @@ static TIntrusivePtrCntrCouple GetQueueCounters(const TIntrusivePtrCntrCouple & userOrFolderCounters.SqsCounters->GetSubgroup(QUEUE_LABEL, queueName), userOrFolderCounters.YmqCounters ? userOrFolderCounters.YmqCounters->GetSubgroup(QUEUE_LABEL, queueName) : nullptr }; -} - +} + std::pair<TIntrusivePtrCntrCouple, TIntrusivePtrCntrCouple> GetUserAndQueueCounters( const TIntrusivePtrCntrCouple& sqsCounters, const TQueuePath& queuePath ) { TIntrusivePtrCntrCouple userCounters; TIntrusivePtrCntrCouple queueCounters; if (queuePath.UserName && sqsCounters.SqsCounters) { - userCounters = GetUserCounters(sqsCounters, queuePath.UserName); - if (queuePath.QueueName) { - queueCounters = GetQueueCounters(userCounters, queuePath.QueueName); - } - } - return { std::move(userCounters), std::move(queueCounters) }; -} - + userCounters = GetUserCounters(sqsCounters, queuePath.UserName); + if (queuePath.QueueName) { + queueCounters = GetQueueCounters(userCounters, queuePath.QueueName); + } + } + return { std::move(userCounters), std::move(queueCounters) }; +} + TIntrusivePtr<NMonitoring::TDynamicCounters> GetAggregatedCountersFromSqsCoreCounters( const TIntrusivePtrCntrCouple& rootCounters, const NKikimrConfig::TSqsConfig& cfg ) { return GetAggregatedCountersFromUserCounters(GetUserCounters(rootCounters, TOTAL_COUNTER_LABEL), cfg); -} - +} + TIntrusivePtr<NMonitoring::TDynamicCounters> GetAggregatedCountersFromUserCounters( const TIntrusivePtrCntrCouple& userCounters, const NKikimrConfig::TSqsConfig& cfg ) { - if (cfg.GetYandexCloudMode()) { + if (cfg.GetYandexCloudMode()) { return GetQueueCounters(GetFolderCounters(userCounters, TOTAL_COUNTER_LABEL), TOTAL_COUNTER_LABEL).SqsCounters; - } else { + } else { return GetQueueCounters(userCounters, TOTAL_COUNTER_LABEL).SqsCounters; - } -} - -ELaziness Lazy(const NKikimrConfig::TSqsConfig& cfg) { - return cfg.GetCreateLazyCounters() ? ELaziness::OnDemand : ELaziness::OnStart; -} - -#define INIT_COUNTER_WITH_NAME(rootCounters, variable, name, expiring, valueType, lazy) \ - variable.Init(rootCounters, expiring, valueType, name, lazy) + } +} + +ELaziness Lazy(const NKikimrConfig::TSqsConfig& cfg) { + return cfg.GetCreateLazyCounters() ? ELaziness::OnDemand : ELaziness::OnStart; +} + +#define INIT_COUNTER_WITH_NAME(rootCounters, variable, name, expiring, valueType, lazy) \ + variable.Init(rootCounters, expiring, valueType, name, lazy) #define INIT_COUNTER_WITH_NAME_AND_LABEL(rootCounters, variable, labelName, name, expiring, valueType, lazy) \ variable.Init(rootCounters, expiring, valueType, labelName, name, lazy) @@ -380,12 +380,12 @@ ELaziness Lazy(const NKikimrConfig::TSqsConfig& cfg) { ymqCounter.Init(rootCounters.YmqCounters, expiring, valueType, DEFAULT_YMQ_COUNTER_NAME, ymqName, ELaziness::OnStart); \ } -#define INIT_COUNTER(rootCounters, variable, expiring, valueType, lazy) \ - INIT_COUNTER_WITH_NAME(rootCounters, variable, Y_STRINGIZE(variable), expiring, valueType, lazy) +#define INIT_COUNTER(rootCounters, variable, expiring, valueType, lazy) \ + INIT_COUNTER_WITH_NAME(rootCounters, variable, Y_STRINGIZE(variable), expiring, valueType, lazy) #define INIT_COUNTERS_COUPLE(rootCounters, sqsCounter, ymqCounter, expiring, valueType, lazy, aggr) \ INIT_COUNTERS_COUPLE_WITH_NAMES(rootCounters, sqsCounter, ymqCounter, Y_STRINGIZE(sqsCounter), TString(QUEUE_CNTR_PREFIX) + Y_STRINGIZE(ymqCounter), expiring, valueType, lazy, aggr) - + #define INIT_HISTOGRAMS_COUPLE_WITH_NAMES(rootCounters, sqsHistogram, ymqHistogram, sqsName, ymqName, expiring, sqsBuckets, ymqBuckets, lazy, aggr) \ sqsHistogram.Init(rootCounters.SqsCounters, expiring, sqsBuckets, sqsName, lazy); \ if (rootCounters.YmqCounters && !aggr) { \ @@ -398,25 +398,25 @@ ELaziness Lazy(const NKikimrConfig::TSqsConfig& cfg) { #define INIT_HISTOGRAMS_COUPLE(rootCounters, sqsHistogram, ymqHistogram, expiring, buckets, lazy, aggr) \ INIT_HISTOGRAMS_COUPLE_WITH_BUCKETS(rootCounters, sqsHistogram, ymqHistogram, expiring, buckets, Y_CAT(Ymq, buckets), lazy, aggr) -#define INIT_HISTOGRAM_COUNTER_WITH_NAME(rootCounters, variable, name, expiring, buckets, lazy) \ - variable.Init(rootCounters, expiring, buckets, name, lazy) -#define INIT_HISTOGRAM_COUNTER(rootCounters, variable, expiring, buckets, lazy) \ - INIT_HISTOGRAM_COUNTER_WITH_NAME(rootCounters, variable, Y_STRINGIZE(variable), expiring, buckets, lazy) - -void TActionCounters::Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action, ELifetime lifetime) { - const ELaziness laziness = IsActionForMessage(action) ? Lazy(cfg) : ELaziness::OnDemand; - const EAction nonBatch = GetNonBatchAction(action); - INIT_COUNTER_WITH_NAME(rootCounters, Success, TStringBuilder() << nonBatch << "_Success", lifetime, EValueType::Derivative, laziness); - INIT_COUNTER_WITH_NAME(rootCounters, Errors, TStringBuilder() << nonBatch << "_Errors", lifetime, EValueType::Derivative, IsActionForMessage(action) ? ELaziness::OnStart : laziness); - INIT_COUNTER_WITH_NAME(rootCounters, Infly, TStringBuilder() << nonBatch << "_Infly", lifetime, EValueType::Absolute, ELaziness::OnDemand); - - Duration.Init(rootCounters, lifetime, IsFastAction(action) ? FastActionsDurationBucketsMs : SlowActionsDurationBucketsMs, TStringBuilder() << nonBatch << "_Duration", laziness); - - if (action == EAction::ReceiveMessage) { - INIT_HISTOGRAM_COUNTER_WITH_NAME(rootCounters, WorkingDuration, TStringBuilder() << action << "_WorkingDuration", lifetime, DurationBucketsMs, Lazy(cfg)); - } -} - +#define INIT_HISTOGRAM_COUNTER_WITH_NAME(rootCounters, variable, name, expiring, buckets, lazy) \ + variable.Init(rootCounters, expiring, buckets, name, lazy) +#define INIT_HISTOGRAM_COUNTER(rootCounters, variable, expiring, buckets, lazy) \ + INIT_HISTOGRAM_COUNTER_WITH_NAME(rootCounters, variable, Y_STRINGIZE(variable), expiring, buckets, lazy) + +void TActionCounters::Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action, ELifetime lifetime) { + const ELaziness laziness = IsActionForMessage(action) ? Lazy(cfg) : ELaziness::OnDemand; + const EAction nonBatch = GetNonBatchAction(action); + INIT_COUNTER_WITH_NAME(rootCounters, Success, TStringBuilder() << nonBatch << "_Success", lifetime, EValueType::Derivative, laziness); + INIT_COUNTER_WITH_NAME(rootCounters, Errors, TStringBuilder() << nonBatch << "_Errors", lifetime, EValueType::Derivative, IsActionForMessage(action) ? ELaziness::OnStart : laziness); + INIT_COUNTER_WITH_NAME(rootCounters, Infly, TStringBuilder() << nonBatch << "_Infly", lifetime, EValueType::Absolute, ELaziness::OnDemand); + + Duration.Init(rootCounters, lifetime, IsFastAction(action) ? FastActionsDurationBucketsMs : SlowActionsDurationBucketsMs, TStringBuilder() << nonBatch << "_Duration", laziness); + + if (action == EAction::ReceiveMessage) { + INIT_HISTOGRAM_COUNTER_WITH_NAME(rootCounters, WorkingDuration, TStringBuilder() << action << "_WorkingDuration", lifetime, DurationBucketsMs, Lazy(cfg)); + } +} + void TYmqActionCounters::Init( const NKikimrConfig::TSqsConfig&, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action, const TString& labelName, const TString& namePrefix, ELifetime lifetime @@ -439,128 +439,128 @@ void TYmqActionCounters::Init( ); } -void TActionCounters::SetAggregatedParent(TActionCounters* parent) { - ActionCountersDescriptor.SetAggregatedParent(this, parent); -} - -void TQueryTypeCounters::SetAggregatedParent(TQueryTypeCounters* parent) { - QueryTypeCountersDescriptor.SetAggregatedParent(this, parent); -} - +void TActionCounters::SetAggregatedParent(TActionCounters* parent) { + ActionCountersDescriptor.SetAggregatedParent(this, parent); +} + +void TQueryTypeCounters::SetAggregatedParent(TQueryTypeCounters* parent) { + QueryTypeCountersDescriptor.SetAggregatedParent(this, parent); +} + void TTransactionCounters::Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, std::shared_ptr<TAlignedPagePoolCounters> poolCounters, bool forQueue) { - AllocPoolCounters = std::move(poolCounters); - - const ELifetime lifetime = forQueue ? ELifetime::Expiring : ELifetime::Persistent; - - auto transactionsByType = rootCounters->GetSubgroup(DEFAULT_COUNTER_NAME, "TransactionsByType"); - auto transactionsDurationByType = rootCounters->GetSubgroup(DEFAULT_COUNTER_NAME, "TransactionsDurationsByType"); - auto transactionsFailedByType = rootCounters->GetSubgroup(DEFAULT_COUNTER_NAME, "TransactionsFailedByType"); - for (size_t i = 0; i < EQueryId::QUERY_VECTOR_SIZE; ++i) { - const auto& typeStr = ToString(EQueryId(i)); - QueryTypeCounters[i].TransactionsCount.Init(transactionsByType, lifetime, EValueType::Derivative, QUERY_TYPE, typeStr, ELaziness::OnDemand); - QueryTypeCounters[i].TransactionsFailed.Init(transactionsFailedByType, lifetime, EValueType::Derivative, QUERY_TYPE, typeStr, ELaziness::OnDemand); - QueryTypeCounters[i].TransactionDuration.Init(transactionsDurationByType, lifetime, DurationBucketsMs, QUERY_TYPE, typeStr, ELaziness::OnDemand); - } - - INIT_COUNTER(rootCounters, CompileQueryCount, lifetime, EValueType::Derivative, ELaziness::OnDemand); - INIT_COUNTER(rootCounters, TransactionsCount, lifetime, EValueType::Derivative, ELaziness::OnDemand); - INIT_COUNTER(rootCounters, TransactionsInfly, lifetime, EValueType::Absolute, ELaziness::OnDemand); - INIT_COUNTER(rootCounters, TransactionRetryTimeouts, lifetime, EValueType::Derivative, ELaziness::OnDemand); - INIT_COUNTER(rootCounters, TransactionRetries, lifetime, EValueType::Derivative, ELaziness::OnDemand); - INIT_COUNTER(rootCounters, TransactionsFailed, lifetime, EValueType::Derivative, ELaziness::OnDemand); -} - -void TTransactionCounters::SetAggregatedParent(const TIntrusivePtr<TTransactionCounters>& parent) { - AggregatedParent = parent; - TransactionCountersDescriptor.SetAggregatedParent(this, parent.Get()); - for (size_t i = 0; i < EQueryId::QUERY_VECTOR_SIZE; ++i) { - QueryTypeCounters[i].SetAggregatedParent(parent ? &parent->QueryTypeCounters[i] : nullptr); - } -} - -void TAPIStatusesCounters::Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& root) { - auto statusesByType = root->GetSubgroup(DEFAULT_COUNTER_NAME, "StatusesByType"); - for (const TString& errorCode : TErrorClass::GetAvailableErrorCodes()) { - ErrorToCounter[errorCode].Init(statusesByType, ELifetime::Persistent, EValueType::Derivative, STATUS_CODE, errorCode, ELaziness::OnDemand); - } - OkCounter.Init(statusesByType, ELifetime::Persistent, EValueType::Derivative, STATUS_CODE, "OK", ELaziness::OnDemand); - UnknownCounter.Init(statusesByType, ELifetime::Persistent, EValueType::Derivative, STATUS_CODE, "Unknown", ELaziness::OnDemand); -} - -void TAPIStatusesCounters::AddError(const TString& errorCode, size_t count) { - const auto counter = ErrorToCounter.find(errorCode); - if (counter != ErrorToCounter.end()) { - *counter->second += count; - } else { - *UnknownCounter += count; - } -} - -void TAPIStatusesCounters::AddOk(size_t count) { - *OkCounter += count; -} - -void TAPIStatusesCounters::SetAggregatedParent(TAPIStatusesCounters* parent) { - for (auto& [err, counter] : ErrorToCounter) { - if (parent) { - counter.SetAggregatedParent(&parent->ErrorToCounter[err]); - } else { - counter.SetAggregatedParent(nullptr); - } - } - OkCounter.SetAggregatedParent(parent ? &parent->OkCounter : nullptr); - UnknownCounter.SetAggregatedParent(parent ? &parent->UnknownCounter : nullptr); -} - -TQueueCounters::TQueueCounters(const NKikimrConfig::TSqsConfig& cfg, + AllocPoolCounters = std::move(poolCounters); + + const ELifetime lifetime = forQueue ? ELifetime::Expiring : ELifetime::Persistent; + + auto transactionsByType = rootCounters->GetSubgroup(DEFAULT_COUNTER_NAME, "TransactionsByType"); + auto transactionsDurationByType = rootCounters->GetSubgroup(DEFAULT_COUNTER_NAME, "TransactionsDurationsByType"); + auto transactionsFailedByType = rootCounters->GetSubgroup(DEFAULT_COUNTER_NAME, "TransactionsFailedByType"); + for (size_t i = 0; i < EQueryId::QUERY_VECTOR_SIZE; ++i) { + const auto& typeStr = ToString(EQueryId(i)); + QueryTypeCounters[i].TransactionsCount.Init(transactionsByType, lifetime, EValueType::Derivative, QUERY_TYPE, typeStr, ELaziness::OnDemand); + QueryTypeCounters[i].TransactionsFailed.Init(transactionsFailedByType, lifetime, EValueType::Derivative, QUERY_TYPE, typeStr, ELaziness::OnDemand); + QueryTypeCounters[i].TransactionDuration.Init(transactionsDurationByType, lifetime, DurationBucketsMs, QUERY_TYPE, typeStr, ELaziness::OnDemand); + } + + INIT_COUNTER(rootCounters, CompileQueryCount, lifetime, EValueType::Derivative, ELaziness::OnDemand); + INIT_COUNTER(rootCounters, TransactionsCount, lifetime, EValueType::Derivative, ELaziness::OnDemand); + INIT_COUNTER(rootCounters, TransactionsInfly, lifetime, EValueType::Absolute, ELaziness::OnDemand); + INIT_COUNTER(rootCounters, TransactionRetryTimeouts, lifetime, EValueType::Derivative, ELaziness::OnDemand); + INIT_COUNTER(rootCounters, TransactionRetries, lifetime, EValueType::Derivative, ELaziness::OnDemand); + INIT_COUNTER(rootCounters, TransactionsFailed, lifetime, EValueType::Derivative, ELaziness::OnDemand); +} + +void TTransactionCounters::SetAggregatedParent(const TIntrusivePtr<TTransactionCounters>& parent) { + AggregatedParent = parent; + TransactionCountersDescriptor.SetAggregatedParent(this, parent.Get()); + for (size_t i = 0; i < EQueryId::QUERY_VECTOR_SIZE; ++i) { + QueryTypeCounters[i].SetAggregatedParent(parent ? &parent->QueryTypeCounters[i] : nullptr); + } +} + +void TAPIStatusesCounters::Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& root) { + auto statusesByType = root->GetSubgroup(DEFAULT_COUNTER_NAME, "StatusesByType"); + for (const TString& errorCode : TErrorClass::GetAvailableErrorCodes()) { + ErrorToCounter[errorCode].Init(statusesByType, ELifetime::Persistent, EValueType::Derivative, STATUS_CODE, errorCode, ELaziness::OnDemand); + } + OkCounter.Init(statusesByType, ELifetime::Persistent, EValueType::Derivative, STATUS_CODE, "OK", ELaziness::OnDemand); + UnknownCounter.Init(statusesByType, ELifetime::Persistent, EValueType::Derivative, STATUS_CODE, "Unknown", ELaziness::OnDemand); +} + +void TAPIStatusesCounters::AddError(const TString& errorCode, size_t count) { + const auto counter = ErrorToCounter.find(errorCode); + if (counter != ErrorToCounter.end()) { + *counter->second += count; + } else { + *UnknownCounter += count; + } +} + +void TAPIStatusesCounters::AddOk(size_t count) { + *OkCounter += count; +} + +void TAPIStatusesCounters::SetAggregatedParent(TAPIStatusesCounters* parent) { + for (auto& [err, counter] : ErrorToCounter) { + if (parent) { + counter.SetAggregatedParent(&parent->ErrorToCounter[err]); + } else { + counter.SetAggregatedParent(nullptr); + } + } + OkCounter.SetAggregatedParent(parent ? &parent->OkCounter : nullptr); + UnknownCounter.SetAggregatedParent(parent ? &parent->UnknownCounter : nullptr); +} + +TQueueCounters::TQueueCounters(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtrCntrCouple& rootCounters, - const TUserCounters* userCounters, - const TString& queueName, - const TString& folderId, - bool insertCounters, - bool aggregated) + const TUserCounters* userCounters, + const TString& queueName, + const TString& folderId, + bool insertCounters, + bool aggregated) : RootCounters(rootCounters) - , UserCounters(userCounters->UserCounters) + , UserCounters(userCounters->UserCounters) , FolderCounters(folderId ? GetFolderCounters(UserCounters, folderId) : TIntrusivePtrCntrCouple{}) - , Cfg(&cfg) - , QueueName(queueName) - , AggregatedCounters(aggregated) - , UserShowDetailedCountersDeadline(userCounters->ShowDetailedCountersDeadline) - , AllocPoolCounters(userCounters->DetailedCounters.TransactionCounters->AllocPoolCounters) -{ - if (insertCounters) { + , Cfg(&cfg) + , QueueName(queueName) + , AggregatedCounters(aggregated) + , UserShowDetailedCountersDeadline(userCounters->ShowDetailedCountersDeadline) + , AllocPoolCounters(userCounters->DetailedCounters.TransactionCounters->AllocPoolCounters) +{ + if (insertCounters) { QueueCounters = GetQueueCounters(FolderCounters.Defined() ? FolderCounters : UserCounters, queueName); - } else { + } else { QueueCounters = {new NMonitoring::TDynamicCounters(), new NMonitoring::TDynamicCounters()}; - } - InitCounters(); -} - + } + InitCounters(); +} + void TQueueCounters::InitCounters(bool forLeaderNode) { - if (!RequestTimeouts) { + if (!RequestTimeouts) { INIT_COUNTERS_COUPLE( QueueCounters, RequestTimeouts, request_timeouts_count_per_second, ELifetime::Persistent, EValueType::Derivative, Lazy(*Cfg), AggregatedCounters ); - } - + } + if (forLeaderNode) { INIT_COUNTER(QueueCounters.SqsCounters, RequestsThrottled, ELifetime::Expiring, EValueType::Derivative, ELaziness::OnStart); - + INIT_COUNTER(QueueCounters.SqsCounters, QueueMasterStartProblems, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); INIT_COUNTER(QueueCounters.SqsCounters, QueueLeaderStartProblems, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); - + INIT_COUNTERS_COUPLE( QueueCounters, MessagesPurged, purged_count_per_second, ELifetime::Expiring, EValueType::Derivative, ELaziness::OnStart, AggregatedCounters ); - + INIT_HISTOGRAMS_COUPLE_WITH_BUCKETS( QueueCounters, MessageReceiveAttempts, receive_attempts_count_rate, ELifetime::Expiring, MessageReceiveAttemptsBuckets, MessageReceiveAttemptsBuckets, @@ -584,7 +584,7 @@ void TQueueCounters::InitCounters(bool forLeaderNode) { ELifetime::Expiring, EValueType::Derivative, Lazy(*Cfg), AggregatedCounters ); - + INIT_COUNTERS_COUPLE( QueueCounters, ReceiveMessage_EmptyCount, empty_receive_attempts_count_per_second, @@ -603,7 +603,7 @@ void TQueueCounters::InitCounters(bool forLeaderNode) { ELifetime::Expiring, EValueType::Derivative, Lazy(*Cfg), AggregatedCounters ); - + INIT_COUNTER(QueueCounters.SqsCounters, MessagesMovedToDLQ, ELifetime::Expiring, EValueType::Derivative, ELaziness::OnStart); INIT_COUNTERS_COUPLE( @@ -624,7 +624,7 @@ void TQueueCounters::InitCounters(bool forLeaderNode) { ELifetime::Expiring, EValueType::Derivative, Lazy(*Cfg), AggregatedCounters ); - + INIT_COUNTERS_COUPLE( QueueCounters, MessagesCount, stored_count, @@ -637,22 +637,22 @@ void TQueueCounters::InitCounters(bool forLeaderNode) { ELifetime::Expiring, EValueType::Absolute, Lazy(*Cfg), AggregatedCounters ); - if (!AggregatedCounters) { // OldestMessageAgeSeconds will not be aggregated properly. + if (!AggregatedCounters) { // OldestMessageAgeSeconds will not be aggregated properly. INIT_COUNTERS_COUPLE( QueueCounters, OldestMessageAgeSeconds, oldest_age_milliseconds, ELifetime::Expiring, EValueType::Absolute, ELaziness::OnStart, false ); - } - } - - for (EAction action = static_cast<EAction>(EAction::Unknown + 1); action < EAction::ActionsArraySize; action = static_cast<EAction>(action + 1)) { - if (IsActionForQueue(action)) { + } + } + + for (EAction action = static_cast<EAction>(EAction::Unknown + 1); action < EAction::ActionsArraySize; action = static_cast<EAction>(action + 1)) { + if (IsActionForQueue(action)) { if (forLeaderNode && IsProxyAction(action) || !forLeaderNode && !IsProxyAction(action)) { SqsActionCounters[action].Init(*Cfg, QueueCounters.SqsCounters, action, forLeaderNode ? ELifetime::Expiring : ELifetime::Persistent); - } - } + } + } if (IsActionForQueueYMQ(action) && QueueCounters.YmqCounters && !AggregatedCounters) { if (forLeaderNode && IsProxyAction(action) || !forLeaderNode && !IsProxyAction(action)) { YmqActionCounters[action].Init( @@ -661,226 +661,226 @@ void TQueueCounters::InitCounters(bool forLeaderNode) { ); } } - } - + } + DetailedCounters.Init(QueueCounters.SqsCounters, AllocPoolCounters, forLeaderNode); -} - +} + void TQueueCounters::TDetailedCounters::Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& queueCounters, const std::shared_ptr<TAlignedPagePoolCounters>& allocPoolCounters, bool forLeaderNode) { - if (!GetConfiguration_Duration) { - INIT_HISTOGRAM_COUNTER(queueCounters, GetConfiguration_Duration, ELifetime::Expiring, DurationBucketsMs, ELaziness::OnDemand); - } - + if (!GetConfiguration_Duration) { + INIT_HISTOGRAM_COUNTER(queueCounters, GetConfiguration_Duration, ELifetime::Expiring, DurationBucketsMs, ELaziness::OnDemand); + } + if (forLeaderNode) { - TransactionCounters = new TTransactionCounters(); - TransactionCounters->Init(queueCounters, allocPoolCounters, true); - - INIT_COUNTER(queueCounters, ReceiveMessage_KeysInvalidated, ELifetime::Expiring, EValueType::Derivative, ELaziness::OnDemand); - - INIT_HISTOGRAM_COUNTER(queueCounters, ReceiveMessageImmediate_Duration, ELifetime::Expiring, DurationBucketsMs, ELaziness::OnDemand); - } -} - -void TQueueCounters::TDetailedCounters::SetAggregatedParent(TQueueCounters::TDetailedCounters* parent) { - if (TransactionCounters) { - TransactionCounters->SetAggregatedParent(parent ? parent->TransactionCounters : nullptr); - } - QueueDetailedCountersDescriptor.SetAggregatedParent(this, parent); -} - -void TQueueCounters::InsertCounters() { - auto insert = [&](auto& parent) { + TransactionCounters = new TTransactionCounters(); + TransactionCounters->Init(queueCounters, allocPoolCounters, true); + + INIT_COUNTER(queueCounters, ReceiveMessage_KeysInvalidated, ELifetime::Expiring, EValueType::Derivative, ELaziness::OnDemand); + + INIT_HISTOGRAM_COUNTER(queueCounters, ReceiveMessageImmediate_Duration, ELifetime::Expiring, DurationBucketsMs, ELaziness::OnDemand); + } +} + +void TQueueCounters::TDetailedCounters::SetAggregatedParent(TQueueCounters::TDetailedCounters* parent) { + if (TransactionCounters) { + TransactionCounters->SetAggregatedParent(parent ? parent->TransactionCounters : nullptr); + } + QueueDetailedCountersDescriptor.SetAggregatedParent(this, parent); +} + +void TQueueCounters::InsertCounters() { + auto insert = [&](auto& parent) { if (!parent.SqsCounters->FindSubgroup(QUEUE_LABEL, QueueName)) { QueueCounters.SqsCounters->ResetCounters(); parent.SqsCounters->RegisterSubgroup(QUEUE_LABEL, QueueName, QueueCounters.SqsCounters); - } + } if (parent.YmqCounters && !parent.YmqCounters->FindSubgroup(QUEUE_LABEL, QueueName)) { QueueCounters.YmqCounters->ResetCounters(); parent.YmqCounters->RegisterSubgroup(QUEUE_LABEL, QueueName, QueueCounters.YmqCounters); } - }; - + }; + if (FolderCounters.Defined()) { - insert(FolderCounters); - } else { - insert(UserCounters); - } -} - -void TQueueCounters::SetAggregatedParent(const TIntrusivePtr<TQueueCounters>& parent) { - AggregatedParent = parent; - QueueCountersDescriptor.SetAggregatedParent(this, parent.Get()); - DetailedCounters.SetAggregatedParent(parent ? &parent->DetailedCounters : nullptr); - for (size_t i = 0; i < EAction::ActionsArraySize; ++i) { + insert(FolderCounters); + } else { + insert(UserCounters); + } +} + +void TQueueCounters::SetAggregatedParent(const TIntrusivePtr<TQueueCounters>& parent) { + AggregatedParent = parent; + QueueCountersDescriptor.SetAggregatedParent(this, parent.Get()); + DetailedCounters.SetAggregatedParent(parent ? &parent->DetailedCounters : nullptr); + for (size_t i = 0; i < EAction::ActionsArraySize; ++i) { SqsActionCounters[i].SetAggregatedParent(parent ? &parent->SqsActionCounters[i] : nullptr); - } -} - -void TQueueCounters::RemoveCounters() { + } +} + +void TQueueCounters::RemoveCounters() { auto couple = FolderCounters.Defined() ? FolderCounters : UserCounters; couple.SqsCounters->RemoveSubgroup(QUEUE_LABEL, QueueName); if (couple.YmqCounters) couple.YmqCounters->RemoveSubgroup(QUEUE_LABEL, QueueName); -} - +} + TIntrusivePtr<TQueueCounters> TQueueCounters::GetCountersForLeaderNode() { - TIntrusivePtr<TQueueCounters> counters = new TQueueCounters(*this); + TIntrusivePtr<TQueueCounters> counters = new TQueueCounters(*this); counters->NotLeaderNodeCounters = this; - counters->InitCounters(true); - if (AggregatedParent) { - counters->SetAggregatedParent(AggregatedParent); - } - return counters; -} - + counters->InitCounters(true); + if (AggregatedParent) { + counters->SetAggregatedParent(AggregatedParent); + } + return counters; +} + TIntrusivePtr<TQueueCounters> TQueueCounters::GetCountersForNotLeaderNode() { return NotLeaderNodeCounters; -} - +} + void TUserCounters::InitCounters(const TString& userName, const std::shared_ptr<TAlignedPagePoolCounters>& allocPoolCounters) { - UserCounters = GetUserCounters(SqsCoreCounters, userName); - + UserCounters = GetUserCounters(SqsCoreCounters, userName); + INIT_COUNTER(UserCounters.SqsCounters, RequestTimeouts, ELifetime::Persistent, EValueType::Derivative, Lazy(*Cfg)); - - if (Cfg->GetForceAccessControl() && Cfg->AccountsWithoutMandatoryAuthSize() && (userName == TOTAL_COUNTER_LABEL || IsIn(Cfg->GetAccountsWithoutMandatoryAuth(), userName))) { + + if (Cfg->GetForceAccessControl() && Cfg->AccountsWithoutMandatoryAuthSize() && (userName == TOTAL_COUNTER_LABEL || IsIn(Cfg->GetAccountsWithoutMandatoryAuth(), userName))) { INIT_COUNTER(UserCounters.SqsCounters, UnauthenticatedAccess, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); - } - - for (EAction action = static_cast<EAction>(EAction::Unknown + 1); action < EAction::ActionsArraySize; action = static_cast<EAction>(action + 1)) { - if (IsActionForUser(action)) { + } + + for (EAction action = static_cast<EAction>(EAction::Unknown + 1); action < EAction::ActionsArraySize; action = static_cast<EAction>(action + 1)) { + if (IsActionForUser(action)) { SqsActionCounters[action].Init(*Cfg, UserCounters.SqsCounters, action); - } + } if (IsActionForUserYMQ(action) && UserCounters.YmqCounters && !IsAggregatedCounters) { YmqActionCounters[action].Init(*Cfg, UserCounters.YmqCounters, action, METHOD_LABLE, ACTION_CNTR_PREFIX); } - } - + } + // ToDo. Errors codes here. Will probably need this in Ymq counters further - DetailedCounters.Init(UserCounters, allocPoolCounters, *Cfg); - + DetailedCounters.Init(UserCounters, allocPoolCounters, *Cfg); + AggregatedQueueCounters = CreateQueueCountersImpl(TOTAL_COUNTER_LABEL, Cfg->GetYandexCloudMode() ? TOTAL_COUNTER_LABEL : TString(), true, true)->GetCountersForLeaderNode(); - - if (AggregatedParent) { - AggregatedQueueCounters->SetAggregatedParent(AggregatedParent->AggregatedQueueCounters); - UserCountersDescriptor.SetAggregatedParent(this, AggregatedParent.Get()); - DetailedCounters.SetAggregatedParent(&AggregatedParent->DetailedCounters); - for (size_t i = 0; i < EAction::ActionsArraySize; ++i) { + + if (AggregatedParent) { + AggregatedQueueCounters->SetAggregatedParent(AggregatedParent->AggregatedQueueCounters); + UserCountersDescriptor.SetAggregatedParent(this, AggregatedParent.Get()); + DetailedCounters.SetAggregatedParent(&AggregatedParent->DetailedCounters); + for (size_t i = 0; i < EAction::ActionsArraySize; ++i) { SqsActionCounters[i].SetAggregatedParent(&AggregatedParent->SqsActionCounters[i]); - } - } -} - + } + } +} + void TUserCounters::TDetailedCounters::Init(const TIntrusivePtrCntrCouple& userCounters, const std::shared_ptr<TAlignedPagePoolCounters>& allocPoolCounters, const NKikimrConfig::TSqsConfig& cfg) { - TransactionCounters = new TTransactionCounters(); - TransactionCounters->Init(GetAggregatedCountersFromUserCounters(userCounters, cfg), allocPoolCounters, false); - + TransactionCounters = new TTransactionCounters(); + TransactionCounters->Init(GetAggregatedCountersFromUserCounters(userCounters, cfg), allocPoolCounters, false); + APIStatuses.Init(userCounters.SqsCounters); - + INIT_HISTOGRAM_COUNTER(userCounters.SqsCounters, GetConfiguration_Duration, ELifetime::Persistent, DurationBucketsMs, ELaziness::OnDemand); INIT_HISTOGRAM_COUNTER(userCounters.SqsCounters, GetQuota_Duration, ELifetime::Persistent, GetQuotaDurationBucketsMs, ELaziness::OnDemand); - + INIT_COUNTER(userCounters.SqsCounters, CreateAccountOnTheFly_Success, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnDemand); INIT_COUNTER(userCounters.SqsCounters, CreateAccountOnTheFly_Errors, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnDemand); -} - -void TUserCounters::TDetailedCounters::SetAggregatedParent(TUserCounters::TDetailedCounters* parent) { - TransactionCounters->SetAggregatedParent(parent ? parent->TransactionCounters : nullptr); - UserDetailedCountersDescriptor.SetAggregatedParent(this, parent); -} - -TIntrusivePtr<TQueueCounters> TUserCounters::CreateQueueCounters(const TString& queueName, const TString& folderId, bool insertCounters) { +} + +void TUserCounters::TDetailedCounters::SetAggregatedParent(TUserCounters::TDetailedCounters* parent) { + TransactionCounters->SetAggregatedParent(parent ? parent->TransactionCounters : nullptr); + UserDetailedCountersDescriptor.SetAggregatedParent(this, parent); +} + +TIntrusivePtr<TQueueCounters> TUserCounters::CreateQueueCounters(const TString& queueName, const TString& folderId, bool insertCounters) { auto counters = CreateQueueCountersImpl(queueName, folderId, insertCounters, IsAggregatedCounters); - counters->SetAggregatedParent(AggregatedQueueCounters); - return counters; -} - -TIntrusivePtr<TQueueCounters> TUserCounters::CreateQueueCountersImpl(const TString& queueName, const TString& folderId, bool insertCounters, bool aggregated) { - return new TQueueCounters(*Cfg, SqsCoreCounters, this, queueName, folderId, insertCounters, aggregated); -} - -void TUserCounters::RemoveCounters() { + counters->SetAggregatedParent(AggregatedQueueCounters); + return counters; +} + +TIntrusivePtr<TQueueCounters> TUserCounters::CreateQueueCountersImpl(const TString& queueName, const TString& folderId, bool insertCounters, bool aggregated) { + return new TQueueCounters(*Cfg, SqsCoreCounters, this, queueName, folderId, insertCounters, aggregated); +} + +void TUserCounters::RemoveCounters() { SqsCoreCounters.SqsCounters->RemoveSubgroup(USER_LABEL, UserName); if (SqsCoreCounters.YmqCounters) SqsCoreCounters.YmqCounters->RemoveSubgroup(CLOUD_LABEL, UserName); -} - -void TUserCounters::DisableCounters(bool disable) { - if (disable) { +} + +void TUserCounters::DisableCounters(bool disable) { + if (disable) { SqsCoreCounters.SqsCounters->RemoveSubgroup(USER_LABEL, UserName); if (SqsCoreCounters.YmqCounters) SqsCoreCounters.YmqCounters->RemoveSubgroup(CLOUD_LABEL, UserName); - } else { + } else { if (!SqsCoreCounters.SqsCounters->FindSubgroup(USER_LABEL, UserName)) { UserCounters.SqsCounters->ResetCounters(); SqsCoreCounters.SqsCounters->RegisterSubgroup(USER_LABEL, UserName, UserCounters.SqsCounters); - } + } if (SqsCoreCounters.YmqCounters && !SqsCoreCounters.YmqCounters->FindSubgroup(CLOUD_LABEL, UserName)) { UserCounters.YmqCounters->ResetCounters(); SqsCoreCounters.YmqCounters->RegisterSubgroup(CLOUD_LABEL, UserName, UserCounters.YmqCounters); } - } -} - -TIntrusivePtr<THttpUserCounters> THttpCounters::GetUserCountersImpl(const TString& userName, const TIntrusivePtr<THttpUserCounters>& aggregatedUserCounters) { - { - auto guard = Guard(Lock); - auto countersIt = UserCounters.find(userName); - if (countersIt != UserCounters.end()) { - return countersIt->second; - } - } - - TIntrusivePtr<THttpUserCounters> userCounters = new THttpUserCounters(*Cfg, SqsHttpCounters, userName); - if (aggregatedUserCounters) { - userCounters->SetAggregatedParent(aggregatedUserCounters); - } - - auto guard = Guard(Lock); - auto [iter, inserted] = UserCounters.emplace(userName, std::move(userCounters)); - return iter->second; -} - -TIntrusivePtr<THttpUserCounters> THttpCounters::GetUserCounters(const TString& userName) { - return GetUserCountersImpl(userName, AggregatedUserCounters); -} - -void THttpCounters::InitCounters() { - INIT_COUNTER(SqsHttpCounters, RequestExceptions, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); - INIT_COUNTER(SqsHttpCounters, InternalExceptions, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); - INIT_COUNTER(SqsHttpCounters, ConnectionsCount, ELifetime::Persistent, EValueType::Absolute, ELaziness::OnStart); - - AggregatedUserCounters = GetUserCountersImpl(TOTAL_COUNTER_LABEL, nullptr); -} - -void THttpUserCounters::InitCounters(const TString& userName) { - UserCounters = SqsHttpCounters->GetSubgroup(USER_LABEL, userName); - - for (EAction action = static_cast<EAction>(EAction::Unknown + 1); action < EAction::ActionsArraySize; action = static_cast<EAction>(action + 1)) { - ActionCounters[action].Init(*Cfg, UserCounters, action); - } - - INIT_COUNTER(UserCounters, RequestExceptions, ELifetime::Persistent, EValueType::Derivative, Lazy(*Cfg)); -} - -void THttpUserCounters::SetAggregatedParent(const TIntrusivePtr<THttpUserCounters>& parent) { - AggregatedParent = parent; - HttpUserCountersDescriptor.SetAggregatedParent(this, parent.Get()); - for (size_t i = 0; i < EAction::ActionsArraySize; ++i) { - ActionCounters[i].SetAggregatedParent(parent ? &parent->ActionCounters[i] : nullptr); - } -} - -void THttpActionCounters::Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action) { - Cfg = &cfg; - Requests.Init(rootCounters, ELifetime::Persistent, EValueType::Derivative, TStringBuilder() << action << "Request", Lazy(*Cfg)); -} - -void THttpActionCounters::SetAggregatedParent(THttpActionCounters* parent) { - HttpActionCountersDescriptor.SetAggregatedParent(this, parent); -} - + } +} + +TIntrusivePtr<THttpUserCounters> THttpCounters::GetUserCountersImpl(const TString& userName, const TIntrusivePtr<THttpUserCounters>& aggregatedUserCounters) { + { + auto guard = Guard(Lock); + auto countersIt = UserCounters.find(userName); + if (countersIt != UserCounters.end()) { + return countersIt->second; + } + } + + TIntrusivePtr<THttpUserCounters> userCounters = new THttpUserCounters(*Cfg, SqsHttpCounters, userName); + if (aggregatedUserCounters) { + userCounters->SetAggregatedParent(aggregatedUserCounters); + } + + auto guard = Guard(Lock); + auto [iter, inserted] = UserCounters.emplace(userName, std::move(userCounters)); + return iter->second; +} + +TIntrusivePtr<THttpUserCounters> THttpCounters::GetUserCounters(const TString& userName) { + return GetUserCountersImpl(userName, AggregatedUserCounters); +} + +void THttpCounters::InitCounters() { + INIT_COUNTER(SqsHttpCounters, RequestExceptions, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); + INIT_COUNTER(SqsHttpCounters, InternalExceptions, ELifetime::Persistent, EValueType::Derivative, ELaziness::OnStart); + INIT_COUNTER(SqsHttpCounters, ConnectionsCount, ELifetime::Persistent, EValueType::Absolute, ELaziness::OnStart); + + AggregatedUserCounters = GetUserCountersImpl(TOTAL_COUNTER_LABEL, nullptr); +} + +void THttpUserCounters::InitCounters(const TString& userName) { + UserCounters = SqsHttpCounters->GetSubgroup(USER_LABEL, userName); + + for (EAction action = static_cast<EAction>(EAction::Unknown + 1); action < EAction::ActionsArraySize; action = static_cast<EAction>(action + 1)) { + ActionCounters[action].Init(*Cfg, UserCounters, action); + } + + INIT_COUNTER(UserCounters, RequestExceptions, ELifetime::Persistent, EValueType::Derivative, Lazy(*Cfg)); +} + +void THttpUserCounters::SetAggregatedParent(const TIntrusivePtr<THttpUserCounters>& parent) { + AggregatedParent = parent; + HttpUserCountersDescriptor.SetAggregatedParent(this, parent.Get()); + for (size_t i = 0; i < EAction::ActionsArraySize; ++i) { + ActionCounters[i].SetAggregatedParent(parent ? &parent->ActionCounters[i] : nullptr); + } +} + +void THttpActionCounters::Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action) { + Cfg = &cfg; + Requests.Init(rootCounters, ELifetime::Persistent, EValueType::Derivative, TStringBuilder() << action << "Request", Lazy(*Cfg)); +} + +void THttpActionCounters::SetAggregatedParent(THttpActionCounters* parent) { + HttpActionCountersDescriptor.SetAggregatedParent(this, parent); +} + static const TString& StringifyGrpcStatus(int grpcStatus) { if (grpcStatus < 0 || grpcStatus > TCloudAuthCounters::GRPC_STATUSES_COUNT - 2) { grpcStatus = TCloudAuthCounters::GRPC_STATUSES_COUNT - 1; @@ -928,14 +928,14 @@ void TCloudAuthCounters::InitCounters(TIntrusivePtr<NMonitoring::TDynamicCounter const auto credentialTypeStr = ToString(static_cast<NCloudAuth::ECredentialType>(credentialType)); const auto actionAndCredentialCounters = actionCounters->GetSubgroup("credential_type", credentialTypeStr); for (size_t grpcStatus = 0; grpcStatus < GRPC_STATUSES_COUNT; ++grpcStatus) { - INIT_COUNTER_WITH_NAME(actionAndCredentialCounters, CloudAuthCounters[actionType][credentialType][grpcStatus], StringifyGrpcStatus(grpcStatus), ELifetime::Persistent, EValueType::Derivative, Lazy(*Cfg)); + INIT_COUNTER_WITH_NAME(actionAndCredentialCounters, CloudAuthCounters[actionType][credentialType][grpcStatus], StringifyGrpcStatus(grpcStatus), ELifetime::Persistent, EValueType::Derivative, Lazy(*Cfg)); } } } - - INIT_HISTOGRAM_COUNTER(cloudAuthCounters, AuthenticateDuration, ELifetime::Persistent, DurationBucketsMs, Lazy(*Cfg)); - INIT_HISTOGRAM_COUNTER(cloudAuthCounters, AuthorizeDuration, ELifetime::Persistent, DurationBucketsMs, Lazy(*Cfg)); - INIT_HISTOGRAM_COUNTER(cloudAuthCounters, GetFolderIdDuration, ELifetime::Persistent, DurationBucketsMs, Lazy(*Cfg)); + + INIT_HISTOGRAM_COUNTER(cloudAuthCounters, AuthenticateDuration, ELifetime::Persistent, DurationBucketsMs, Lazy(*Cfg)); + INIT_HISTOGRAM_COUNTER(cloudAuthCounters, AuthorizeDuration, ELifetime::Persistent, DurationBucketsMs, Lazy(*Cfg)); + INIT_HISTOGRAM_COUNTER(cloudAuthCounters, GetFolderIdDuration, ELifetime::Persistent, DurationBucketsMs, Lazy(*Cfg)); } void TMeteringCounters::InitCounters(const TVector<TString>& classifierLabels) { @@ -947,4 +947,4 @@ void TMeteringCounters::InitCounters(const TVector<TString>& classifierLabels) { } } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/counters.h b/ydb/core/ymq/base/counters.h index 3c2667b2410..8390bea2aa0 100644 --- a/ydb/core/ymq/base/counters.h +++ b/ydb/core/ymq/base/counters.h @@ -1,50 +1,50 @@ -#pragma once +#pragma once #include <ydb/core/protos/config.pb.h> - + #include <ydb/core/ymq/base/action.h> #include <ydb/core/ymq/base/cloud_enums.h> #include <ydb/core/ymq/base/query_id.h> #include <ydb/core/ymq/base/queue_path.h> - + #include <ydb/library/yql/minikql/aligned_page_pool.h> - + #include <library/cpp/monlib/dynamic_counters/counters.h> - -#include <util/system/spinlock.h> + +#include <util/system/spinlock.h> #include <util/generic/hash.h> - -#include <atomic> -#include <utility> - -namespace NKikimr::NSQS { - -struct TUserCounters; -struct TQueueCounters; -struct THttpCounters; + +#include <atomic> +#include <utility> + +namespace NKikimr::NSQS { + +struct TUserCounters; +struct TQueueCounters; +struct THttpCounters; struct TCloudAuthCounters; - -#define INC_COUNTER(countersPack, counter) \ - if (countersPack) { \ - ++*countersPack->counter; \ - } - + +#define INC_COUNTER(countersPack, counter) \ + if (countersPack) { \ + ++*countersPack->counter; \ + } + #define INC_COUNTER_COUPLE(countersPack, sqsCounter, ymqCounter) \ if (countersPack) { \ ++*countersPack->sqsCounter; \ ++*countersPack->ymqCounter; \ } -#define DEC_COUNTER(countersPack, counter) \ - if (countersPack) { \ - --*countersPack->counter; \ - } - -#define ADD_COUNTER(countersPack, counter, count) \ - if (countersPack) { \ - *countersPack->counter += (count); \ - } - +#define DEC_COUNTER(countersPack, counter) \ + if (countersPack) { \ + --*countersPack->counter; \ + } + +#define ADD_COUNTER(countersPack, counter, count) \ + if (countersPack) { \ + *countersPack->counter += (count); \ + } + #define ADD_COUNTER_COUPLE(countersPack, sqsCounter, ymqCounter, count) \ if (countersPack) { \ *countersPack->sqsCounter += (count); \ @@ -57,11 +57,11 @@ struct TCloudAuthCounters; *countersPack->ymqCounter = (count); \ } -#define COLLECT_HISTOGRAM_COUNTER(countersPack, counter, count) \ - if (countersPack) { \ - countersPack->counter->Collect(count); \ - } - +#define COLLECT_HISTOGRAM_COUNTER(countersPack, counter, count) \ + if (countersPack) { \ + countersPack->counter->Collect(count); \ + } + #define COLLECT_HISTOGRAM_COUNTER_COUPLE(countersCouple, counter, count) \ if (countersCouple.SqsCounters) { \ countersCouple.SqsCounters->counter->Collect(count); \ @@ -70,25 +70,25 @@ struct TCloudAuthCounters; countersCouple.YmqCounters->counter->Collect(count); \ } -// Enums for code readability and for static types guarantee that all parameters passed correctly. -enum class ELaziness { - OnStart, - OnDemand, -}; - -enum class ELifetime { - Persistent, - Expiring, -}; - -enum class EValueType { - Absolute, - Derivative, -}; - -extern const TString TOTAL_COUNTER_LABEL; +// Enums for code readability and for static types guarantee that all parameters passed correctly. +enum class ELaziness { + OnStart, + OnDemand, +}; + +enum class ELifetime { + Persistent, + Expiring, +}; + +enum class EValueType { + Absolute, + Derivative, +}; + +extern const TString TOTAL_COUNTER_LABEL; //constexpr static std::array<int, 10> RESPONSE_CODES = {200, 400, 403, 404, 500, 503, 504}; - + template<typename TCounterPtrType> struct TCountersCouple { TCounterPtrType SqsCounters = nullptr; @@ -97,7 +97,7 @@ struct TCountersCouple { return SqsCounters != nullptr; } }; - + using TIntrusivePtrCntrCouple = TCountersCouple<TIntrusivePtr<NMonitoring::TDynamicCounters>>; TIntrusivePtr<NMonitoring::TDynamicCounters> GetSqsServiceCounters( @@ -114,278 +114,278 @@ TIntrusivePtr<NMonitoring::TDynamicCounters> GetAggregatedCountersFromSqsCoreCou TIntrusivePtr<NMonitoring::TDynamicCounters> GetAggregatedCountersFromUserCounters( const TIntrusivePtrCntrCouple& sqsCoreCounters, const NKikimrConfig::TSqsConfig& cfg); -extern const TString DEFAULT_COUNTER_NAME; +extern const TString DEFAULT_COUNTER_NAME; extern const TString DEFAULT_YMQ_COUNTER_NAME; //extern const TString ACTION_CNTR_PREFIX; - -namespace NDetails { - -template <class TTargetCounterType, class TDerived> -struct TLazyCachedCounterBase { - // Facade for counters aggregation. - class TAggregatingCounterFacade { - friend struct TLazyCachedCounterBase; - - TAggregatingCounterFacade(TLazyCachedCounterBase* parent) - : Parent(parent) - { - } - - TAggregatingCounterFacade(const TAggregatingCounterFacade&) = delete; - TAggregatingCounterFacade(TAggregatingCounterFacade&&) = delete; - - public: - ~TAggregatingCounterFacade() = default; - -#define FOR_EACH_AGGREGATED_COUNTER(action) \ - auto* lazyCounter = Parent; \ - while (lazyCounter) { \ - auto& counter = *lazyCounter->EnsureCreated(); \ - action; \ - lazyCounter = lazyCounter->AggregatedParent; \ - } - - TAggregatingCounterFacade& operator++() { - FOR_EACH_AGGREGATED_COUNTER(++counter); - return *this; - } - - TAggregatingCounterFacade& operator--() { - FOR_EACH_AGGREGATED_COUNTER(--counter); - return *this; - } - - TAggregatingCounterFacade& operator+=(i64 value) { - FOR_EACH_AGGREGATED_COUNTER(counter += value); - return *this; - } - - TAggregatingCounterFacade& operator=(i64 value) { - auto* lazyCounter = Parent; - i64 diff = 0; - if (lazyCounter) { - auto& counter = *lazyCounter->EnsureCreated(); - auto prevValue = AtomicSwap(&counter.GetAtomic(), value); - diff = value - prevValue; - lazyCounter = lazyCounter->AggregatedParent; - } - - // Aggregation - while (lazyCounter) { - auto& counter = *lazyCounter->EnsureCreated(); - counter += diff; - lazyCounter = lazyCounter->AggregatedParent; - } - return *this; - } - - void Collect(i64 value) { - FOR_EACH_AGGREGATED_COUNTER(counter.Collect(value)); - } - - void Add(i64 value) { - FOR_EACH_AGGREGATED_COUNTER(counter += value); - } - - void Inc() { - FOR_EACH_AGGREGATED_COUNTER(++counter); - } - -#undef FOR_EACH_AGGREGATED_COUNTER - - TAggregatingCounterFacade* operator->() { - return this; - } - - private: - TLazyCachedCounterBase* const Parent = nullptr; - }; - - TLazyCachedCounterBase() - : Counter(nullptr) - { - } - - TLazyCachedCounterBase(const TLazyCachedCounterBase& other) { - TTargetCounterType* counter = other.Counter; - std::atomic_init(&Counter, counter); - if (counter) { - counter->Ref(); - } else { - RootCounters = other.RootCounters; - Value = other.Value; - Lifetime = other.Lifetime; - } - } - - ~TLazyCachedCounterBase() { - TTargetCounterType* const counter = Counter; - if (counter) { - counter->UnRef(); - } - } - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const TString& value, ELaziness laziness) { - Init(rootCounters, lifetime, DEFAULT_COUNTER_NAME, value, laziness); - } - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const TString& name, const TString& value, ELaziness laziness) { - RootCounters = rootCounters; - Name = name; - Value = value; - Lifetime = lifetime; - if (laziness == ELaziness::OnStart) { - EnsureCreated(); - } - } - - bool operator!() const { - TTargetCounterType* const counter = Counter; - return counter == nullptr; - } - - TAggregatingCounterFacade operator*() { - return this; - } - - TAggregatingCounterFacade operator->() { - return this; - } - - void SetAggregatedParent(TLazyCachedCounterBase* parent) { - AggregatedParent = parent; - Y_ASSERT(!HasCycle()); - } - -protected: - TTargetCounterType* EnsureCreated() { - TTargetCounterType* const counter = Counter; - if (counter) { - return counter; - } - TIntrusivePtr<TTargetCounterType> newCounter = static_cast<TDerived*>(this)->Create(); - if (!newCounter) { - newCounter = TDerived::Default(); - Y_ASSERT(newCounter); - } - TTargetCounterType* expected = nullptr; - if (Counter.compare_exchange_strong(expected, newCounter.Get())) { - newCounter->Ref(); - return newCounter.Get(); - } - Y_ASSERT(Counter.load() != nullptr); - return Counter; - } - - bool HasCycle() const { - TLazyCachedCounterBase* item = AggregatedParent; - while (item) { - if (item == this) { - return true; - } - item = item->AggregatedParent; - } - return false; - } - -protected: - TLazyCachedCounterBase* AggregatedParent = nullptr; - TIntrusivePtr<NMonitoring::TDynamicCounters> RootCounters; - TString Name; - TString Value; - ELifetime Lifetime = ELifetime::Persistent; - std::atomic<TTargetCounterType*> Counter; -}; - -} // namespace NDetails - -struct TLazyCachedCounter : public NDetails::TLazyCachedCounterBase<NMonitoring::TCounterForPtr, TLazyCachedCounter> { - TLazyCachedCounter() = default; - TLazyCachedCounter(const TLazyCachedCounter&) = default; - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, EValueType valueType, const TString& value, ELaziness laziness) { - Init(rootCounters, lifetime, valueType, DEFAULT_COUNTER_NAME, value, laziness); - } - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, EValueType valueType, const TString& name, const TString& value, ELaziness laziness) { - TLazyCachedCounterBase::Init(rootCounters, lifetime, name, value, ELaziness::OnDemand); - ValueType = valueType; - if (laziness == ELaziness::OnStart) { - EnsureCreated(); - } - } - - TIntrusivePtr<NMonitoring::TCounterForPtr> Create() { - if (RootCounters && Name && Value) { - const bool derivative = ValueType == EValueType::Derivative; - return Lifetime == ELifetime::Expiring ? RootCounters->GetExpiringNamedCounter(Name, Value, derivative) : RootCounters->GetNamedCounter(Name, Value, derivative); - } else { - return nullptr; - } - } - - static TIntrusivePtr<NMonitoring::TCounterForPtr> Default() { - static TIntrusivePtr<NMonitoring::TCounterForPtr> counter = new NMonitoring::TCounterForPtr(); - return counter; - } - -private: - EValueType ValueType = EValueType::Absolute; -}; - -struct TLazyCachedHistogram : public NDetails::TLazyCachedCounterBase<NMonitoring::THistogramCounter, TLazyCachedHistogram> { - TLazyCachedHistogram() = default; - TLazyCachedHistogram(const TLazyCachedHistogram&) = default; - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const NMonitoring::TBucketBounds& buckets, const TString& value, ELaziness laziness) { - Init(rootCounters, lifetime, buckets, DEFAULT_COUNTER_NAME, value, laziness); - } - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const NMonitoring::TBucketBounds& buckets, const TString& name, const TString& value, ELaziness laziness) { + +namespace NDetails { + +template <class TTargetCounterType, class TDerived> +struct TLazyCachedCounterBase { + // Facade for counters aggregation. + class TAggregatingCounterFacade { + friend struct TLazyCachedCounterBase; + + TAggregatingCounterFacade(TLazyCachedCounterBase* parent) + : Parent(parent) + { + } + + TAggregatingCounterFacade(const TAggregatingCounterFacade&) = delete; + TAggregatingCounterFacade(TAggregatingCounterFacade&&) = delete; + + public: + ~TAggregatingCounterFacade() = default; + +#define FOR_EACH_AGGREGATED_COUNTER(action) \ + auto* lazyCounter = Parent; \ + while (lazyCounter) { \ + auto& counter = *lazyCounter->EnsureCreated(); \ + action; \ + lazyCounter = lazyCounter->AggregatedParent; \ + } + + TAggregatingCounterFacade& operator++() { + FOR_EACH_AGGREGATED_COUNTER(++counter); + return *this; + } + + TAggregatingCounterFacade& operator--() { + FOR_EACH_AGGREGATED_COUNTER(--counter); + return *this; + } + + TAggregatingCounterFacade& operator+=(i64 value) { + FOR_EACH_AGGREGATED_COUNTER(counter += value); + return *this; + } + + TAggregatingCounterFacade& operator=(i64 value) { + auto* lazyCounter = Parent; + i64 diff = 0; + if (lazyCounter) { + auto& counter = *lazyCounter->EnsureCreated(); + auto prevValue = AtomicSwap(&counter.GetAtomic(), value); + diff = value - prevValue; + lazyCounter = lazyCounter->AggregatedParent; + } + + // Aggregation + while (lazyCounter) { + auto& counter = *lazyCounter->EnsureCreated(); + counter += diff; + lazyCounter = lazyCounter->AggregatedParent; + } + return *this; + } + + void Collect(i64 value) { + FOR_EACH_AGGREGATED_COUNTER(counter.Collect(value)); + } + + void Add(i64 value) { + FOR_EACH_AGGREGATED_COUNTER(counter += value); + } + + void Inc() { + FOR_EACH_AGGREGATED_COUNTER(++counter); + } + +#undef FOR_EACH_AGGREGATED_COUNTER + + TAggregatingCounterFacade* operator->() { + return this; + } + + private: + TLazyCachedCounterBase* const Parent = nullptr; + }; + + TLazyCachedCounterBase() + : Counter(nullptr) + { + } + + TLazyCachedCounterBase(const TLazyCachedCounterBase& other) { + TTargetCounterType* counter = other.Counter; + std::atomic_init(&Counter, counter); + if (counter) { + counter->Ref(); + } else { + RootCounters = other.RootCounters; + Value = other.Value; + Lifetime = other.Lifetime; + } + } + + ~TLazyCachedCounterBase() { + TTargetCounterType* const counter = Counter; + if (counter) { + counter->UnRef(); + } + } + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const TString& value, ELaziness laziness) { + Init(rootCounters, lifetime, DEFAULT_COUNTER_NAME, value, laziness); + } + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const TString& name, const TString& value, ELaziness laziness) { + RootCounters = rootCounters; + Name = name; + Value = value; + Lifetime = lifetime; + if (laziness == ELaziness::OnStart) { + EnsureCreated(); + } + } + + bool operator!() const { + TTargetCounterType* const counter = Counter; + return counter == nullptr; + } + + TAggregatingCounterFacade operator*() { + return this; + } + + TAggregatingCounterFacade operator->() { + return this; + } + + void SetAggregatedParent(TLazyCachedCounterBase* parent) { + AggregatedParent = parent; + Y_ASSERT(!HasCycle()); + } + +protected: + TTargetCounterType* EnsureCreated() { + TTargetCounterType* const counter = Counter; + if (counter) { + return counter; + } + TIntrusivePtr<TTargetCounterType> newCounter = static_cast<TDerived*>(this)->Create(); + if (!newCounter) { + newCounter = TDerived::Default(); + Y_ASSERT(newCounter); + } + TTargetCounterType* expected = nullptr; + if (Counter.compare_exchange_strong(expected, newCounter.Get())) { + newCounter->Ref(); + return newCounter.Get(); + } + Y_ASSERT(Counter.load() != nullptr); + return Counter; + } + + bool HasCycle() const { + TLazyCachedCounterBase* item = AggregatedParent; + while (item) { + if (item == this) { + return true; + } + item = item->AggregatedParent; + } + return false; + } + +protected: + TLazyCachedCounterBase* AggregatedParent = nullptr; + TIntrusivePtr<NMonitoring::TDynamicCounters> RootCounters; + TString Name; + TString Value; + ELifetime Lifetime = ELifetime::Persistent; + std::atomic<TTargetCounterType*> Counter; +}; + +} // namespace NDetails + +struct TLazyCachedCounter : public NDetails::TLazyCachedCounterBase<NMonitoring::TCounterForPtr, TLazyCachedCounter> { + TLazyCachedCounter() = default; + TLazyCachedCounter(const TLazyCachedCounter&) = default; + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, EValueType valueType, const TString& value, ELaziness laziness) { + Init(rootCounters, lifetime, valueType, DEFAULT_COUNTER_NAME, value, laziness); + } + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, EValueType valueType, const TString& name, const TString& value, ELaziness laziness) { + TLazyCachedCounterBase::Init(rootCounters, lifetime, name, value, ELaziness::OnDemand); + ValueType = valueType; + if (laziness == ELaziness::OnStart) { + EnsureCreated(); + } + } + + TIntrusivePtr<NMonitoring::TCounterForPtr> Create() { + if (RootCounters && Name && Value) { + const bool derivative = ValueType == EValueType::Derivative; + return Lifetime == ELifetime::Expiring ? RootCounters->GetExpiringNamedCounter(Name, Value, derivative) : RootCounters->GetNamedCounter(Name, Value, derivative); + } else { + return nullptr; + } + } + + static TIntrusivePtr<NMonitoring::TCounterForPtr> Default() { + static TIntrusivePtr<NMonitoring::TCounterForPtr> counter = new NMonitoring::TCounterForPtr(); + return counter; + } + +private: + EValueType ValueType = EValueType::Absolute; +}; + +struct TLazyCachedHistogram : public NDetails::TLazyCachedCounterBase<NMonitoring::THistogramCounter, TLazyCachedHistogram> { + TLazyCachedHistogram() = default; + TLazyCachedHistogram(const TLazyCachedHistogram&) = default; + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const NMonitoring::TBucketBounds& buckets, const TString& value, ELaziness laziness) { + Init(rootCounters, lifetime, buckets, DEFAULT_COUNTER_NAME, value, laziness); + } + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, ELifetime lifetime, const NMonitoring::TBucketBounds& buckets, const TString& name, const TString& value, ELaziness laziness) { Buckets = &buckets; - TLazyCachedCounterBase::Init(rootCounters, lifetime, name, value, ELaziness::OnDemand); - if (laziness == ELaziness::OnStart) { - EnsureCreated(); - } - } - - TIntrusivePtr<NMonitoring::THistogramCounter> Create() { - if (RootCounters && Name && Value && Buckets) { - return Lifetime == ELifetime::Expiring ? RootCounters->GetExpiringNamedHistogram(Name, Value, NMonitoring::ExplicitHistogram(*Buckets)) : RootCounters->GetNamedHistogram(Name, Value, NMonitoring::ExplicitHistogram(*Buckets)); - } else { - return nullptr; - } - } - - static TIntrusivePtr<NMonitoring::THistogramCounter> Default() { - static TIntrusivePtr<NMonitoring::THistogramCounter> counter = new NMonitoring::THistogramCounter(NMonitoring::ExplicitHistogram({1})); - return counter; - } - -private: - const NMonitoring::TBucketBounds* Buckets = nullptr; -}; - -// Counters for actions (like SendMessage, CreateQueue or GetQueueUrl). -struct TActionCounters { -public: - TLazyCachedCounter Success; - TLazyCachedCounter Errors; // User metric for cloud console (SendMessage/ReceiveMessage/DeleteMessage). - TLazyCachedCounter Infly; - - TLazyCachedHistogram Duration; // Histogram with buckets for durations (== 18 counters). // User metric for cloud console (SendMessage/DeleteMessage). - - // only for receive message action - TLazyCachedHistogram WorkingDuration; // Special duration except wait time for ReceiveMessage action (== 18 counters). // User metric for cloud console (ReceiveMessage). - + TLazyCachedCounterBase::Init(rootCounters, lifetime, name, value, ELaziness::OnDemand); + if (laziness == ELaziness::OnStart) { + EnsureCreated(); + } + } + + TIntrusivePtr<NMonitoring::THistogramCounter> Create() { + if (RootCounters && Name && Value && Buckets) { + return Lifetime == ELifetime::Expiring ? RootCounters->GetExpiringNamedHistogram(Name, Value, NMonitoring::ExplicitHistogram(*Buckets)) : RootCounters->GetNamedHistogram(Name, Value, NMonitoring::ExplicitHistogram(*Buckets)); + } else { + return nullptr; + } + } + + static TIntrusivePtr<NMonitoring::THistogramCounter> Default() { + static TIntrusivePtr<NMonitoring::THistogramCounter> counter = new NMonitoring::THistogramCounter(NMonitoring::ExplicitHistogram({1})); + return counter; + } + +private: + const NMonitoring::TBucketBounds* Buckets = nullptr; +}; + +// Counters for actions (like SendMessage, CreateQueue or GetQueueUrl). +struct TActionCounters { public: + TLazyCachedCounter Success; + TLazyCachedCounter Errors; // User metric for cloud console (SendMessage/ReceiveMessage/DeleteMessage). + TLazyCachedCounter Infly; + + TLazyCachedHistogram Duration; // Histogram with buckets for durations (== 18 counters). // User metric for cloud console (SendMessage/DeleteMessage). + + // only for receive message action + TLazyCachedHistogram WorkingDuration; // Special duration except wait time for ReceiveMessage action (== 18 counters). // User metric for cloud console (ReceiveMessage). + +public: void Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action, ELifetime lifetime = ELifetime::Persistent); - + virtual void SetAggregatedParent(TActionCounters* parent); virtual ~TActionCounters() {} -}; - +}; + struct TYmqActionCounters : public TActionCounters { void Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action, const TString& labelName, const TString& namePrefix, @@ -400,97 +400,97 @@ private: NMonitoring::TDynamicCounterPtr SubGroup; }; -// Counters for typed queries (WRITE_MESSAGE_ID, PURGE_QUEUE_ID and etc). -struct TQueryTypeCounters { - TLazyCachedCounter TransactionsCount; - TLazyCachedCounter TransactionsFailed; - TLazyCachedHistogram TransactionDuration; // Histogram with buckets for durations (== 18 counters). - - void SetAggregatedParent(TQueryTypeCounters* parent); -}; - -// Counters for transactions processing. -// These counters are present in queue counters and in user counters. -struct TTransactionCounters : public TAtomicRefCount<TTransactionCounters> { +// Counters for typed queries (WRITE_MESSAGE_ID, PURGE_QUEUE_ID and etc). +struct TQueryTypeCounters { + TLazyCachedCounter TransactionsCount; + TLazyCachedCounter TransactionsFailed; + TLazyCachedHistogram TransactionDuration; // Histogram with buckets for durations (== 18 counters). + + void SetAggregatedParent(TQueryTypeCounters* parent); +}; + +// Counters for transactions processing. +// These counters are present in queue counters and in user counters. +struct TTransactionCounters : public TAtomicRefCount<TTransactionCounters> { // Query types are declared in ydb/core/ymq/base/query_id.h. - TQueryTypeCounters QueryTypeCounters[EQueryId::QUERY_VECTOR_SIZE]; // 23 types at all. 15 for std. 16 for fifo. 2 for user. - + TQueryTypeCounters QueryTypeCounters[EQueryId::QUERY_VECTOR_SIZE]; // 23 types at all. 15 for std. 16 for fifo. 2 for user. + std::shared_ptr<TAlignedPagePoolCounters> AllocPoolCounters; // counters for kikimr core. - - TLazyCachedCounter CompileQueryCount; // Compiles count. - TLazyCachedCounter TransactionsCount; // Transactions processed. - TLazyCachedCounter TransactionsInfly; // Current transactions count inflight. - TLazyCachedCounter TransactionRetryTimeouts; // Count of times when we got temporary error from transaction, but can't retry, because there is no time left. - TLazyCachedCounter TransactionRetries; // Transactions retries due to temporary errors. - TLazyCachedCounter TransactionsFailed; // Transactions that failed. - - TIntrusivePtr<TTransactionCounters> AggregatedParent; // Less detailed transaction counters aggregated by queue/user. - - void SetAggregatedParent(const TIntrusivePtr<TTransactionCounters>& parent); - -public: + + TLazyCachedCounter CompileQueryCount; // Compiles count. + TLazyCachedCounter TransactionsCount; // Transactions processed. + TLazyCachedCounter TransactionsInfly; // Current transactions count inflight. + TLazyCachedCounter TransactionRetryTimeouts; // Count of times when we got temporary error from transaction, but can't retry, because there is no time left. + TLazyCachedCounter TransactionRetries; // Transactions retries due to temporary errors. + TLazyCachedCounter TransactionsFailed; // Transactions that failed. + + TIntrusivePtr<TTransactionCounters> AggregatedParent; // Less detailed transaction counters aggregated by queue/user. + + void SetAggregatedParent(const TIntrusivePtr<TTransactionCounters>& parent); + +public: void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, std::shared_ptr<TAlignedPagePoolCounters> poolCounters, bool forQueue); -}; - -// Amazon status codes. -struct TAPIStatusesCounters { -public: - void AddError(const TString& errorCode, size_t count = 1); - void AddOk(size_t count = 1); - - void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& root); - - void SetAggregatedParent(TAPIStatusesCounters* parent); - -private: +}; + +// Amazon status codes. +struct TAPIStatusesCounters { +public: + void AddError(const TString& errorCode, size_t count = 1); + void AddOk(size_t count = 1); + + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& root); + + void SetAggregatedParent(TAPIStatusesCounters* parent); + +private: THashMap<TString, TLazyCachedCounter> ErrorToCounter; // Map with different status codes. See ydb/core/ymq/base/error.cpp. - TLazyCachedCounter OkCounter; // Special status for successful requests. - TLazyCachedCounter UnknownCounter; // Special status for statuses that are not in map. -}; - -// User counters in SQS core subsystem. -struct TUserCounters : public TAtomicRefCount<TUserCounters> { + TLazyCachedCounter OkCounter; // Special status for successful requests. + TLazyCachedCounter UnknownCounter; // Special status for statuses that are not in map. +}; + +// User counters in SQS core subsystem. +struct TUserCounters : public TAtomicRefCount<TUserCounters> { // Action types are declared in ydb/core/ymq/base/action.h. TActionCounters SqsActionCounters[EAction::ActionsArraySize]; // 11 actions. See IsActionForUser() function in ydb/core/ymq/base/action.cpp. TYmqActionCounters YmqActionCounters[EAction::ActionsArraySize]; // see IsActionForUserYMQ() function. - - TLazyCachedCounter RequestTimeouts; // Requests that weren't processed in 10 minutes. They are almost sure hanged. - - TLazyCachedCounter UnauthenticatedAccess; // For users in exception list and total (aggregated) - unauthenticated accesses count. - - struct TDetailedCounters { - TIntrusivePtr<TTransactionCounters> TransactionCounters; - - TAPIStatusesCounters APIStatuses; - - TLazyCachedHistogram GetConfiguration_Duration; // Part of request initialization. Histogram with buckets for durations (== 18 counters). - TLazyCachedHistogram GetQuota_Duration; // Histogram with buckets for quota durations (== 16 counters). - - TLazyCachedCounter CreateAccountOnTheFly_Success; // Account created on the fly (Yandex Cloud mode). - TLazyCachedCounter CreateAccountOnTheFly_Errors; // Account that were failed to create on the fly (Yandex Cloud mode). - + + TLazyCachedCounter RequestTimeouts; // Requests that weren't processed in 10 minutes. They are almost sure hanged. + + TLazyCachedCounter UnauthenticatedAccess; // For users in exception list and total (aggregated) - unauthenticated accesses count. + + struct TDetailedCounters { + TIntrusivePtr<TTransactionCounters> TransactionCounters; + + TAPIStatusesCounters APIStatuses; + + TLazyCachedHistogram GetConfiguration_Duration; // Part of request initialization. Histogram with buckets for durations (== 18 counters). + TLazyCachedHistogram GetQuota_Duration; // Histogram with buckets for quota durations (== 16 counters). + + TLazyCachedCounter CreateAccountOnTheFly_Success; // Account created on the fly (Yandex Cloud mode). + TLazyCachedCounter CreateAccountOnTheFly_Errors; // Account that were failed to create on the fly (Yandex Cloud mode). + void Init(const TIntrusivePtrCntrCouple& userCounters, const std::shared_ptr<TAlignedPagePoolCounters>& allocPoolCounters, const NKikimrConfig::TSqsConfig& cfg); - void SetAggregatedParent(TDetailedCounters* parent); - }; - - TDetailedCounters* GetDetailedCounters() { - if (NeedToShowDetailedCounters()) { - return &DetailedCounters; - } else if (AggregatedParent) { - return AggregatedParent->GetDetailedCounters(); - } else { - return nullptr; - } - } - - // Raw counters interface - // Don't use counters by name! + void SetAggregatedParent(TDetailedCounters* parent); + }; + + TDetailedCounters* GetDetailedCounters() { + if (NeedToShowDetailedCounters()) { + return &DetailedCounters; + } else if (AggregatedParent) { + return AggregatedParent->GetDetailedCounters(); + } else { + return nullptr; + } + } + + // Raw counters interface + // Don't use counters by name! TIntrusivePtrCntrCouple SqsCoreCounters; // Sqs core subsystem TIntrusivePtrCntrCouple UserCounters; // User tree in core subsystem - + TUserCounters( const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& sqsCoreCounters, const TIntrusivePtr<NMonitoring::TDynamicCounters>& ymqRootCounters, @@ -499,293 +499,293 @@ struct TUserCounters : public TAtomicRefCount<TUserCounters> { bool isAggregatedCounters = false ) : SqsCoreCounters{sqsCoreCounters, ymqRootCounters} - , Cfg(&cfg) - , UserName(userName) - , AggregatedParent(aggregatedParent) + , Cfg(&cfg) + , UserName(userName) + , AggregatedParent(aggregatedParent) , IsAggregatedCounters(isAggregatedCounters) - { - InitCounters(userName, allocPoolCounters); - } - - TIntrusivePtr<TQueueCounters> CreateQueueCounters(const TString& queueName, const TString& folderId, bool insertCounters); - - void RemoveCounters(); - - void ShowDetailedCounters(TInstant deadline) { - Y_ASSERT(ShowDetailedCountersDeadline); - *ShowDetailedCountersDeadline = deadline.GetValue(); - } - - bool NeedToShowDetailedCounters() const { - Y_ASSERT(ShowDetailedCountersDeadline); - return TInstant::Now().GetValue() < *ShowDetailedCountersDeadline; - } - - void DisableCounters(bool disable); - - void ExportTransactionCounters(bool needExport) { - NeedExportTransactionCounters = needExport; - } - - TIntrusivePtr<TTransactionCounters> GetTransactionCounters() const { - if (NeedExportTransactionCounters || NeedToShowDetailedCounters()) { - return DetailedCounters.TransactionCounters; - } else if (AggregatedParent) { - return AggregatedParent->GetTransactionCounters(); - } else { - return nullptr; - } - } - -private: + { + InitCounters(userName, allocPoolCounters); + } + + TIntrusivePtr<TQueueCounters> CreateQueueCounters(const TString& queueName, const TString& folderId, bool insertCounters); + + void RemoveCounters(); + + void ShowDetailedCounters(TInstant deadline) { + Y_ASSERT(ShowDetailedCountersDeadline); + *ShowDetailedCountersDeadline = deadline.GetValue(); + } + + bool NeedToShowDetailedCounters() const { + Y_ASSERT(ShowDetailedCountersDeadline); + return TInstant::Now().GetValue() < *ShowDetailedCountersDeadline; + } + + void DisableCounters(bool disable); + + void ExportTransactionCounters(bool needExport) { + NeedExportTransactionCounters = needExport; + } + + TIntrusivePtr<TTransactionCounters> GetTransactionCounters() const { + if (NeedExportTransactionCounters || NeedToShowDetailedCounters()) { + return DetailedCounters.TransactionCounters; + } else if (AggregatedParent) { + return AggregatedParent->GetTransactionCounters(); + } else { + return nullptr; + } + } + +private: void InitCounters(const TString& userName, const std::shared_ptr<TAlignedPagePoolCounters>& allocPoolCounters); - TIntrusivePtr<TQueueCounters> CreateQueueCountersImpl(const TString& queueName, const TString& folderId, bool insertCounters, bool aggregated = false); - - friend struct TQueueCounters; - -private: - const NKikimrConfig::TSqsConfig* Cfg = nullptr; - const TString UserName; + TIntrusivePtr<TQueueCounters> CreateQueueCountersImpl(const TString& queueName, const TString& folderId, bool insertCounters, bool aggregated = false); + + friend struct TQueueCounters; + +private: + const NKikimrConfig::TSqsConfig* Cfg = nullptr; + const TString UserName; std::shared_ptr<std::atomic<ui64>> ShowDetailedCountersDeadline = std::make_shared<std::atomic<ui64>>(0ul); // TInstant value - std::atomic<bool> NeedExportTransactionCounters = false; - TDetailedCounters DetailedCounters; - TIntrusivePtr<TUserCounters> AggregatedParent; - TIntrusivePtr<TQueueCounters> AggregatedQueueCounters; + std::atomic<bool> NeedExportTransactionCounters = false; + TDetailedCounters DetailedCounters; + TIntrusivePtr<TUserCounters> AggregatedParent; + TIntrusivePtr<TQueueCounters> AggregatedQueueCounters; bool IsAggregatedCounters; -}; - -// Queue counters in SQS core subsystem. -struct TQueueCounters : public TAtomicRefCount<TQueueCounters> { +}; + +// Queue counters in SQS core subsystem. +struct TQueueCounters : public TAtomicRefCount<TQueueCounters> { // Action types are declared in ydb/core/ymq/base/action.h. TActionCounters SqsActionCounters[EAction::ActionsArraySize]; // 12 actions. See IsActionForQueue() function in ydb/core/ymq/base/action.cpp. TYmqActionCounters YmqActionCounters[EAction::ActionsArraySize]; // See IsActionForQueueYMQ() function in ydb/core/ymq/sqs/base/action.cpp. - - TLazyCachedCounter RequestTimeouts; // Requests that weren't processed in 10 minutes. They are almost sure hanged. + + TLazyCachedCounter RequestTimeouts; // Requests that weren't processed in 10 minutes. They are almost sure hanged. TLazyCachedCounter request_timeouts_count_per_second; // Requests that weren't processed in 10 minutes. They are almost sure hanged. - TLazyCachedCounter RequestsThrottled; // Request that ended with ThrottlingException + TLazyCachedCounter RequestsThrottled; // Request that ended with ThrottlingException TLazyCachedCounter QueueMasterStartProblems; // TODO: remove after migration TLazyCachedCounter QueueLeaderStartProblems; // Critical problems during leader start. - - TLazyCachedCounter MessagesPurged; + + TLazyCachedCounter MessagesPurged; TLazyCachedCounter purged_count_per_second; - - TLazyCachedHistogram MessageReceiveAttempts; // User attempts for receive messages. Histogram with buckets for receive attempts (== 4 counters). // User metric for cloud console. + + TLazyCachedHistogram MessageReceiveAttempts; // User attempts for receive messages. Histogram with buckets for receive attempts (== 4 counters). // User metric for cloud console. TLazyCachedHistogram receive_attempts_count_rate; - TLazyCachedHistogram ClientMessageProcessing_Duration; // Time between receive and delete for deleted message. Histogram with buckets for client processing (== 21 counters). // User metric for cloud console. + TLazyCachedHistogram ClientMessageProcessing_Duration; // Time between receive and delete for deleted message. Histogram with buckets for client processing (== 21 counters). // User metric for cloud console. TLazyCachedHistogram client_processing_duration_milliseconds; - TLazyCachedHistogram MessageReside_Duration; // Time between send and receive for received messages. Histogram with buckets for client processing (== 21 counters). // User metric for cloud console. + TLazyCachedHistogram MessageReside_Duration; // Time between send and receive for received messages. Histogram with buckets for client processing (== 21 counters). // User metric for cloud console. TLazyCachedHistogram reside_duration_milliseconds; - - TLazyCachedCounter DeleteMessage_Count; // Messages count that were deleted. // User metric for cloud console. + + TLazyCachedCounter DeleteMessage_Count; // Messages count that were deleted. // User metric for cloud console. TLazyCachedCounter deleted_count_per_second; - - TLazyCachedCounter ReceiveMessage_EmptyCount; // Receive message requests count that returned empty results. + + TLazyCachedCounter ReceiveMessage_EmptyCount; // Receive message requests count that returned empty results. TLazyCachedCounter empty_receive_attempts_count_per_second; - TLazyCachedCounter ReceiveMessage_Count; // Messages count that were received. // User metric for cloud console. + TLazyCachedCounter ReceiveMessage_Count; // Messages count that were received. // User metric for cloud console. TLazyCachedCounter received_count_per_second; - TLazyCachedCounter ReceiveMessage_BytesRead; // Bytes of message bodies that were received. // User metric for cloud console. + TLazyCachedCounter ReceiveMessage_BytesRead; // Bytes of message bodies that were received. // User metric for cloud console. TLazyCachedCounter received_bytes_per_second; + + TLazyCachedCounter MessagesMovedToDLQ; // Count of messages that were moved to DLQ. - TLazyCachedCounter MessagesMovedToDLQ; // Count of messages that were moved to DLQ. - - TLazyCachedCounter SendMessage_DeduplicationCount; // Count of messages that were deduplicated (for fifo only). + TLazyCachedCounter SendMessage_DeduplicationCount; // Count of messages that were deduplicated (for fifo only). TLazyCachedCounter deduplicated_count_per_second; - TLazyCachedCounter SendMessage_Count; // Count of messages that were sent. // User metric for cloud console. + TLazyCachedCounter SendMessage_Count; // Count of messages that were sent. // User metric for cloud console. TLazyCachedCounter sent_count_per_second; - TLazyCachedCounter SendMessage_BytesWritten; // Bytes of message bodies that were sent. + TLazyCachedCounter SendMessage_BytesWritten; // Bytes of message bodies that were sent. TLazyCachedCounter sent_bytes_per_second; - - TLazyCachedCounter MessagesCount; // Messages count in queue. // User metric for cloud console. + + TLazyCachedCounter MessagesCount; // Messages count in queue. // User metric for cloud console. TLazyCachedCounter stored_count; - TLazyCachedCounter InflyMessagesCount; // Messages count in queue that are inflight. // User metric for cloud console. + TLazyCachedCounter InflyMessagesCount; // Messages count in queue that are inflight. // User metric for cloud console. TLazyCachedCounter inflight_count; - TLazyCachedCounter OldestMessageAgeSeconds; // Age of the oldest message in queue. // User metric for cloud console. + TLazyCachedCounter OldestMessageAgeSeconds; // Age of the oldest message in queue. // User metric for cloud console. TLazyCachedCounter oldest_age_milliseconds; - - struct TDetailedCounters { - TIntrusivePtr<TTransactionCounters> TransactionCounters; - - TLazyCachedHistogram GetConfiguration_Duration; // Part of request initialization. Histogram with buckets for durations (== 18 counters). - - TLazyCachedCounter ReceiveMessage_KeysInvalidated; // Count of attempts to receive a message, but race occured. - - TLazyCachedHistogram ReceiveMessageImmediate_Duration; // Time for receive message request that was processed with only one attempt (without wait or try many shards). Histogram with buckets for durations (== 18 counters). - + + struct TDetailedCounters { + TIntrusivePtr<TTransactionCounters> TransactionCounters; + + TLazyCachedHistogram GetConfiguration_Duration; // Part of request initialization. Histogram with buckets for durations (== 18 counters). + + TLazyCachedCounter ReceiveMessage_KeysInvalidated; // Count of attempts to receive a message, but race occured. + + TLazyCachedHistogram ReceiveMessageImmediate_Duration; // Time for receive message request that was processed with only one attempt (without wait or try many shards). Histogram with buckets for durations (== 18 counters). + void Init(const TIntrusivePtr<NMonitoring::TDynamicCounters>& queueCounters, const std::shared_ptr<TAlignedPagePoolCounters>& allocPoolCounters, bool forLeaderNode); - void SetAggregatedParent(TDetailedCounters* parent); - }; - - TDetailedCounters* GetDetailedCounters() { - if (NeedToShowDetailedCounters()) { - return &DetailedCounters; - } else if (AggregatedParent) { - return AggregatedParent->GetDetailedCounters(); - } else { - return nullptr; - } - } - + void SetAggregatedParent(TDetailedCounters* parent); + }; + + TDetailedCounters* GetDetailedCounters() { + if (NeedToShowDetailedCounters()) { + return &DetailedCounters; + } else if (AggregatedParent) { + return AggregatedParent->GetDetailedCounters(); + } else { + return nullptr; + } + } + // Raw counters interface.RootCounters - // Don't use counters by name! + // Don't use counters by name! TIntrusivePtrCntrCouple RootCounters; // Sqs core subsystem. TIntrusivePtrCntrCouple UserCounters; // User tree in core subsystem TIntrusivePtrCntrCouple FolderCounters; // Folder subtree in user tree (only for Yandex Cloud). TIntrusivePtrCntrCouple QueueCounters; // Queue subtree in user (or folder) tree. - + // Creates counters for not leader node. - TQueueCounters(const NKikimrConfig::TSqsConfig& cfg, + TQueueCounters(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtrCntrCouple& sqsCoreCounters, - const TUserCounters* userCounters, - const TString& queueName, - const TString& folderId, - bool insertCounters, - bool aggregated); - - TQueueCounters(const TQueueCounters&) = default; - - void InsertCounters(); - void RemoveCounters(); - + const TUserCounters* userCounters, + const TString& queueName, + const TString& folderId, + bool insertCounters, + bool aggregated); + + TQueueCounters(const TQueueCounters&) = default; + + void InsertCounters(); + void RemoveCounters(); + TIntrusivePtr<TQueueCounters> GetCountersForLeaderNode(); TIntrusivePtr<TQueueCounters> GetCountersForNotLeaderNode(); - - void ShowDetailedCounters(TInstant deadline) { - Y_ASSERT(ShowDetailedCountersDeadline); - *ShowDetailedCountersDeadline = deadline.GetValue(); - } - - bool NeedToShowDetailedCounters() const { - Y_ASSERT(ShowDetailedCountersDeadline); - Y_ASSERT(UserShowDetailedCountersDeadline); - const TInstant now = TInstant::Now(); - return now.GetValue() < *ShowDetailedCountersDeadline || now.GetValue() < *UserShowDetailedCountersDeadline; - } - - TIntrusivePtr<TTransactionCounters> GetTransactionCounters() const { - if (NeedToShowDetailedCounters()) { - return DetailedCounters.TransactionCounters; - } else if (AggregatedParent) { - return AggregatedParent->GetTransactionCounters(); - } else { - return nullptr; - } - } - - void SetAggregatedParent(const TIntrusivePtr<TQueueCounters>& parent); - -private: + + void ShowDetailedCounters(TInstant deadline) { + Y_ASSERT(ShowDetailedCountersDeadline); + *ShowDetailedCountersDeadline = deadline.GetValue(); + } + + bool NeedToShowDetailedCounters() const { + Y_ASSERT(ShowDetailedCountersDeadline); + Y_ASSERT(UserShowDetailedCountersDeadline); + const TInstant now = TInstant::Now(); + return now.GetValue() < *ShowDetailedCountersDeadline || now.GetValue() < *UserShowDetailedCountersDeadline; + } + + TIntrusivePtr<TTransactionCounters> GetTransactionCounters() const { + if (NeedToShowDetailedCounters()) { + return DetailedCounters.TransactionCounters; + } else if (AggregatedParent) { + return AggregatedParent->GetTransactionCounters(); + } else { + return nullptr; + } + } + + void SetAggregatedParent(const TIntrusivePtr<TQueueCounters>& parent); + +private: void InitCounters(bool forLeaderNode = false); - -private: - const NKikimrConfig::TSqsConfig* Cfg = nullptr; - const TString QueueName; - bool AggregatedCounters = false; + +private: + const NKikimrConfig::TSqsConfig* Cfg = nullptr; + const TString QueueName; + bool AggregatedCounters = false; TIntrusivePtr<TQueueCounters> NotLeaderNodeCounters; std::shared_ptr<std::atomic<ui64>> ShowDetailedCountersDeadline = std::make_shared<std::atomic<ui64>>(0ul); // TInstant value std::shared_ptr<std::atomic<ui64>> UserShowDetailedCountersDeadline; - TDetailedCounters DetailedCounters; + TDetailedCounters DetailedCounters; std::shared_ptr<TAlignedPagePoolCounters> AllocPoolCounters; // Transaction counters for kikimr core. - TIntrusivePtr<TQueueCounters> AggregatedParent; -}; - -// -// Http subsystem counters -// - -// Http counters for actions (like SendMessage, CreateQueue or GetQueueUrl). -struct THttpActionCounters { - TLazyCachedCounter Requests; // Requests count of given type. - - void Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action); - void SetAggregatedParent(THttpActionCounters* parent); - -private: - const NKikimrConfig::TSqsConfig* Cfg = nullptr; -}; - -struct THttpUserCounters : public TAtomicRefCount<THttpUserCounters> { + TIntrusivePtr<TQueueCounters> AggregatedParent; +}; + +// +// Http subsystem counters +// + +// Http counters for actions (like SendMessage, CreateQueue or GetQueueUrl). +struct THttpActionCounters { + TLazyCachedCounter Requests; // Requests count of given type. + + void Init(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& rootCounters, EAction action); + void SetAggregatedParent(THttpActionCounters* parent); + +private: + const NKikimrConfig::TSqsConfig* Cfg = nullptr; +}; + +struct THttpUserCounters : public TAtomicRefCount<THttpUserCounters> { // Action types are declared in ydb/core/ymq/base/action.h. - THttpActionCounters ActionCounters[EAction::ActionsArraySize]; // 23 actions. - - TLazyCachedCounter RequestExceptions; // Exceptions count during http request processing. - - // Raw counters interface - // Don't use counters by name! - TIntrusivePtr<NMonitoring::TDynamicCounters> SqsHttpCounters; // Sqs http subsystem - TIntrusivePtr<NMonitoring::TDynamicCounters> UserCounters; // User tree in core subsystem - - THttpUserCounters(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& sqsHttpCounters, const TString& userName) - : SqsHttpCounters(sqsHttpCounters) - , Cfg(&cfg) - { - InitCounters(userName); - } - - void SetAggregatedParent(const TIntrusivePtr<THttpUserCounters>& parent); - -private: - void InitCounters(const TString& userName); - -private: - const NKikimrConfig::TSqsConfig* Cfg = nullptr; - TIntrusivePtr<THttpUserCounters> AggregatedParent; -}; - -struct THttpCounters : public TAtomicRefCount<THttpCounters> { - TLazyCachedCounter RequestExceptions; // Exceptions count during http request processing. - TLazyCachedCounter InternalExceptions; // Internal exceptions count. - TLazyCachedCounter ConnectionsCount; // Connections count that kikimr http server sees. - - // Raw counters interface - // Don't use counters by name! - TIntrusivePtr<NMonitoring::TDynamicCounters> SqsHttpCounters; // Sqs http subsystem - - THttpCounters(const NKikimrConfig::TSqsConfig& cfg, TIntrusivePtr<NMonitoring::TDynamicCounters> sqsHttpCounters) - : SqsHttpCounters(std::move(sqsHttpCounters)) - , Cfg(&cfg) - { - InitCounters(); - } - - TIntrusivePtr<THttpUserCounters> GetUserCounters(const TString& userName); - -private: - void InitCounters(); - TIntrusivePtr<THttpUserCounters> GetUserCountersImpl(const TString& userName, const TIntrusivePtr<THttpUserCounters>& aggregatedUserCounters); - -private: - const NKikimrConfig::TSqsConfig* Cfg = nullptr; - TAdaptiveLock Lock; - THashMap<TString, TIntrusivePtr<THttpUserCounters>> UserCounters; - TIntrusivePtr<THttpUserCounters> AggregatedUserCounters; -}; - -// Cloud specific counters. + THttpActionCounters ActionCounters[EAction::ActionsArraySize]; // 23 actions. + + TLazyCachedCounter RequestExceptions; // Exceptions count during http request processing. + + // Raw counters interface + // Don't use counters by name! + TIntrusivePtr<NMonitoring::TDynamicCounters> SqsHttpCounters; // Sqs http subsystem + TIntrusivePtr<NMonitoring::TDynamicCounters> UserCounters; // User tree in core subsystem + + THttpUserCounters(const NKikimrConfig::TSqsConfig& cfg, const TIntrusivePtr<NMonitoring::TDynamicCounters>& sqsHttpCounters, const TString& userName) + : SqsHttpCounters(sqsHttpCounters) + , Cfg(&cfg) + { + InitCounters(userName); + } + + void SetAggregatedParent(const TIntrusivePtr<THttpUserCounters>& parent); + +private: + void InitCounters(const TString& userName); + +private: + const NKikimrConfig::TSqsConfig* Cfg = nullptr; + TIntrusivePtr<THttpUserCounters> AggregatedParent; +}; + +struct THttpCounters : public TAtomicRefCount<THttpCounters> { + TLazyCachedCounter RequestExceptions; // Exceptions count during http request processing. + TLazyCachedCounter InternalExceptions; // Internal exceptions count. + TLazyCachedCounter ConnectionsCount; // Connections count that kikimr http server sees. + + // Raw counters interface + // Don't use counters by name! + TIntrusivePtr<NMonitoring::TDynamicCounters> SqsHttpCounters; // Sqs http subsystem + + THttpCounters(const NKikimrConfig::TSqsConfig& cfg, TIntrusivePtr<NMonitoring::TDynamicCounters> sqsHttpCounters) + : SqsHttpCounters(std::move(sqsHttpCounters)) + , Cfg(&cfg) + { + InitCounters(); + } + + TIntrusivePtr<THttpUserCounters> GetUserCounters(const TString& userName); + +private: + void InitCounters(); + TIntrusivePtr<THttpUserCounters> GetUserCountersImpl(const TString& userName, const TIntrusivePtr<THttpUserCounters>& aggregatedUserCounters); + +private: + const NKikimrConfig::TSqsConfig* Cfg = nullptr; + TAdaptiveLock Lock; + THashMap<TString, TIntrusivePtr<THttpUserCounters>> UserCounters; + TIntrusivePtr<THttpUserCounters> AggregatedUserCounters; +}; + +// Cloud specific counters. struct TCloudAuthCounters { - // Durations for different security actions types. - TLazyCachedHistogram AuthenticateDuration; // Histogram with buckets for durations (== 18 counters). - TLazyCachedHistogram AuthorizeDuration; // Histogram with buckets for durations (== 18 counters). - TLazyCachedHistogram GetFolderIdDuration; // Histogram with buckets for durations (== 18 counters). - - explicit TCloudAuthCounters(const NKikimrConfig::TSqsConfig& cfg, TIntrusivePtr<NMonitoring::TDynamicCounters> cloudAuthCountersRoot) - : Cfg(&cfg) - { + // Durations for different security actions types. + TLazyCachedHistogram AuthenticateDuration; // Histogram with buckets for durations (== 18 counters). + TLazyCachedHistogram AuthorizeDuration; // Histogram with buckets for durations (== 18 counters). + TLazyCachedHistogram GetFolderIdDuration; // Histogram with buckets for durations (== 18 counters). + + explicit TCloudAuthCounters(const NKikimrConfig::TSqsConfig& cfg, TIntrusivePtr<NMonitoring::TDynamicCounters> cloudAuthCountersRoot) + : Cfg(&cfg) + { InitCounters(std::move(cloudAuthCountersRoot)); } void IncCounter(const NCloudAuth::EActionType actionType, const NCloudAuth::ECredentialType credentialType, int grpcStatus); static constexpr int GRPC_STATUSES_COUNT = 18; - + private: void InitCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> cloudAuthCounters); private: - const NKikimrConfig::TSqsConfig* Cfg = nullptr; - TLazyCachedCounter CloudAuthCounters[NCloudAuth::EActionType::ActionTypesCount] // 3 types. - [NCloudAuth::ECredentialType::CredentialTypesCount] // 2 types. - [GRPC_STATUSES_COUNT]; // 18 types. + const NKikimrConfig::TSqsConfig* Cfg = nullptr; + TLazyCachedCounter CloudAuthCounters[NCloudAuth::EActionType::ActionTypesCount] // 3 types. + [NCloudAuth::ECredentialType::CredentialTypesCount] // 2 types. + [GRPC_STATUSES_COUNT]; // 18 types. }; // Metering counters in SQS core subsystem. @@ -808,4 +808,4 @@ private: const NKikimrConfig::TSqsConfig& Config; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/debug_info.cpp b/ydb/core/ymq/base/debug_info.cpp index b80896b6fbd..7ab5e3b484f 100644 --- a/ydb/core/ymq/base/debug_info.cpp +++ b/ydb/core/ymq/base/debug_info.cpp @@ -1,22 +1,22 @@ -#include "debug_info.h" - -namespace NKikimr::NSQS { - -TDebugInfoHolder DebugInfo = {}; - -TDebugInfo::TDebugInfo() { - UnparsedHttpRequests.reserve(1000); -} - -void TDebugInfo::MoveToParsedHttpRequests(const TString& requestId, class THttpRequest* request) { - ParsedHttpRequests.emplace(requestId, request); - UnparsedHttpRequests.erase(request); -} - -void TDebugInfo::EraseHttpRequest(const TString& requestId, class THttpRequest* request) { - if (!ParsedHttpRequests.EraseKeyValue(requestId, request)) { - UnparsedHttpRequests.erase(request); - } -} - -} // namespace NKikimr::NSQS +#include "debug_info.h" + +namespace NKikimr::NSQS { + +TDebugInfoHolder DebugInfo = {}; + +TDebugInfo::TDebugInfo() { + UnparsedHttpRequests.reserve(1000); +} + +void TDebugInfo::MoveToParsedHttpRequests(const TString& requestId, class THttpRequest* request) { + ParsedHttpRequests.emplace(requestId, request); + UnparsedHttpRequests.erase(request); +} + +void TDebugInfo::EraseHttpRequest(const TString& requestId, class THttpRequest* request) { + if (!ParsedHttpRequests.EraseKeyValue(requestId, request)) { + UnparsedHttpRequests.erase(request); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/debug_info.h b/ydb/core/ymq/base/debug_info.h index 8ead82ebed5..11c30ed0a6f 100644 --- a/ydb/core/ymq/base/debug_info.h +++ b/ydb/core/ymq/base/debug_info.h @@ -1,102 +1,102 @@ -#pragma once -#include <util/generic/hash.h> -#include <util/generic/hash_set.h> -#include <util/generic/string.h> -#include <util/system/spinlock.h> - -namespace NActors { -class IActor; -} // namespace NActors - -namespace NKikimr::NSQS { -namespace NDebug { - -// Multimap for per request objects. -// Provides simple deletion by key and value functionality. -template <class TKey, class T> -class TMultimapWithKeyValueErase : public THashMultiMap<TKey, T> { -public: - TMultimapWithKeyValueErase() { - this->reserve(1000); // reserve to not reallocate at runtime - } - - using THashMultiMap<TKey, T>::THashMultiMap; - - bool EraseKeyValue(const TKey& key, const T& value) { - auto [i, end] = this->equal_range(key); - for (; i != end; ++i) { - if (i->second == value) { - this->erase(i); - return true; - } - } - return false; - } -}; - -} // namespace NDebug - -// Structure for using in debug purpuses with gdb. -// Can show all main SQS objects in coredump. -struct TDebugInfo { - TDebugInfo(); - - // Actors - class TSqsService* SqsServiceActorPtr = nullptr; - class TSqsProxyService* SqsProxyServiceActorPtr = nullptr; - - // Requests by request id - NDebug::TMultimapWithKeyValueErase<TString, class TProxyActor*> ProxyActors; - NDebug::TMultimapWithKeyValueErase<TString, NActors::IActor*> ActionActors; - NDebug::TMultimapWithKeyValueErase<TString, class TMiniKqlExecutionActor*> ExecutorActors; - - // Queue activities by [username/queuename] +#pragma once +#include <util/generic/hash.h> +#include <util/generic/hash_set.h> +#include <util/generic/string.h> +#include <util/system/spinlock.h> + +namespace NActors { +class IActor; +} // namespace NActors + +namespace NKikimr::NSQS { +namespace NDebug { + +// Multimap for per request objects. +// Provides simple deletion by key and value functionality. +template <class TKey, class T> +class TMultimapWithKeyValueErase : public THashMultiMap<TKey, T> { +public: + TMultimapWithKeyValueErase() { + this->reserve(1000); // reserve to not reallocate at runtime + } + + using THashMultiMap<TKey, T>::THashMultiMap; + + bool EraseKeyValue(const TKey& key, const T& value) { + auto [i, end] = this->equal_range(key); + for (; i != end; ++i) { + if (i->second == value) { + this->erase(i); + return true; + } + } + return false; + } +}; + +} // namespace NDebug + +// Structure for using in debug purpuses with gdb. +// Can show all main SQS objects in coredump. +struct TDebugInfo { + TDebugInfo(); + + // Actors + class TSqsService* SqsServiceActorPtr = nullptr; + class TSqsProxyService* SqsProxyServiceActorPtr = nullptr; + + // Requests by request id + NDebug::TMultimapWithKeyValueErase<TString, class TProxyActor*> ProxyActors; + NDebug::TMultimapWithKeyValueErase<TString, NActors::IActor*> ActionActors; + NDebug::TMultimapWithKeyValueErase<TString, class TMiniKqlExecutionActor*> ExecutorActors; + + // Queue activities by [username/queuename] NDebug::TMultimapWithKeyValueErase<TString, class TQueueLeader*> QueueLeaders; - NDebug::TMultimapWithKeyValueErase<TString, class TPurgeActor*> QueuePurgeActors; - NDebug::TMultimapWithKeyValueErase<TString, class TRetentionActor*> QueueRetentionActors; - NDebug::TMultimapWithKeyValueErase<TString, class TCleanupActor*> QueueCleanupActors; - NDebug::TMultimapWithKeyValueErase<TString, class TQueueMigrationActor*> QueueMigrationActors; - - // Http - class TAsyncHttpServer* HttpServer = nullptr; - THashSet<class THttpRequest*> UnparsedHttpRequests; // requests without assigned request id - NDebug::TMultimapWithKeyValueErase<TString, class THttpRequest*> ParsedHttpRequests; // http requests with request id - void MoveToParsedHttpRequests(const TString& requestId, class THttpRequest* request); - void EraseHttpRequest(const TString& requestId, class THttpRequest* request); -}; - -// Helper for safe access to debug info. -class TDebugInfoHolder { -public: - class TAutoGuarder { - friend class TDebugInfoHolder; - - TAutoGuarder(TDebugInfoHolder& parent) - : Parent(parent) - , Guard(Parent.Lock) - { - } - - public: - TDebugInfo* operator->() { - return &Parent.DebugInfo; - } - - private: - TDebugInfoHolder& Parent; - TGuard<TAdaptiveLock> Guard; - }; - - // Returns safe (guarded) debug info to write to. - TAutoGuarder operator->() { - return { *this }; - } - -private: - TDebugInfo DebugInfo; - TAdaptiveLock Lock; -}; - -extern TDebugInfoHolder DebugInfo; - -} // namespace NKikimr::NSQS + NDebug::TMultimapWithKeyValueErase<TString, class TPurgeActor*> QueuePurgeActors; + NDebug::TMultimapWithKeyValueErase<TString, class TRetentionActor*> QueueRetentionActors; + NDebug::TMultimapWithKeyValueErase<TString, class TCleanupActor*> QueueCleanupActors; + NDebug::TMultimapWithKeyValueErase<TString, class TQueueMigrationActor*> QueueMigrationActors; + + // Http + class TAsyncHttpServer* HttpServer = nullptr; + THashSet<class THttpRequest*> UnparsedHttpRequests; // requests without assigned request id + NDebug::TMultimapWithKeyValueErase<TString, class THttpRequest*> ParsedHttpRequests; // http requests with request id + void MoveToParsedHttpRequests(const TString& requestId, class THttpRequest* request); + void EraseHttpRequest(const TString& requestId, class THttpRequest* request); +}; + +// Helper for safe access to debug info. +class TDebugInfoHolder { +public: + class TAutoGuarder { + friend class TDebugInfoHolder; + + TAutoGuarder(TDebugInfoHolder& parent) + : Parent(parent) + , Guard(Parent.Lock) + { + } + + public: + TDebugInfo* operator->() { + return &Parent.DebugInfo; + } + + private: + TDebugInfoHolder& Parent; + TGuard<TAdaptiveLock> Guard; + }; + + // Returns safe (guarded) debug info to write to. + TAutoGuarder operator->() { + return { *this }; + } + +private: + TDebugInfo DebugInfo; + TAdaptiveLock Lock; +}; + +extern TDebugInfoHolder DebugInfo; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/dlq_helpers.cpp b/ydb/core/ymq/base/dlq_helpers.cpp index a2ba9fac425..610a53165f4 100644 --- a/ydb/core/ymq/base/dlq_helpers.cpp +++ b/ydb/core/ymq/base/dlq_helpers.cpp @@ -44,11 +44,11 @@ TRedrivePolicy TRedrivePolicy::FromJson(const TString& json, const NKikimrConfig size_t maxReceiveCountValue = 0; if (TryFromString(validatedRedrivePolicy[maxReceiveCount].ForceString(), maxReceiveCountValue)) { if (maxReceiveCountValue < TLimits::MinMaxReceiveCount || maxReceiveCountValue > TLimits::MaxMaxReceiveCount) { - policy.ErrorText = "maxReceiveCount must be greater than 0 and less than 1001."; + policy.ErrorText = "maxReceiveCount must be greater than 0 and less than 1001."; return policy; } } else { - policy.ErrorText = "Failed to parse maxReceiveCount as an integer."; + policy.ErrorText = "Failed to parse maxReceiveCount as an integer."; return policy; } @@ -64,7 +64,7 @@ TRedrivePolicy TRedrivePolicy::FromJson(const TString& json, const NKikimrConfig } if (!queueName) { - policy.ErrorText = "Empty dead letter target queue name."; + policy.ErrorText = "Empty dead letter target queue name."; return policy; } @@ -77,7 +77,7 @@ TRedrivePolicy TRedrivePolicy::FromJson(const TString& json, const NKikimrConfig policy.TargetQueueName = ""; policy.TargetArn = ""; } else { - policy.ErrorText = "RedrivePolicy attribute is ill-constructed."; + policy.ErrorText = "RedrivePolicy attribute is ill-constructed."; } return policy; diff --git a/ydb/core/ymq/base/helpers.cpp b/ydb/core/ymq/base/helpers.cpp index 0d6ad36b135..ef900ee463b 100644 --- a/ydb/core/ymq/base/helpers.cpp +++ b/ydb/core/ymq/base/helpers.cpp @@ -1,189 +1,189 @@ -#include "helpers.h" - +#include "helpers.h" + #include <library/cpp/string_utils/base64/base64.h> - -#include <util/charset/utf8.h> -#include <util/generic/array_size.h> -#include <util/generic/yexception.h> -#include <util/stream/format.h> -#include <util/string/ascii.h> -#include <util/string/builder.h> - -namespace NKikimr::NSQS { - -static bool AlphaNumAndPunctuation[256] = {}; - -static bool MakeAlphaNumAndPunctuation() { - char src[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"; - for (size_t i = 0; i < Y_ARRAY_SIZE(src) - 1; ++i) { - AlphaNumAndPunctuation[static_cast<unsigned char>(src[i])] = true; - } - return true; -} - -static const bool AlphaNumAndPunctuationMade = MakeAlphaNumAndPunctuation(); - -bool IsAlphaNumAndPunctuation(TStringBuf str) { - for (char c : str) { - if (!AlphaNumAndPunctuation[static_cast<unsigned char>(c)]) { - return false; - } - } - return true; -} - - -static bool MessageAttributesCharacters[256] = {}; + +#include <util/charset/utf8.h> +#include <util/generic/array_size.h> +#include <util/generic/yexception.h> +#include <util/stream/format.h> +#include <util/string/ascii.h> +#include <util/string/builder.h> + +namespace NKikimr::NSQS { + +static bool AlphaNumAndPunctuation[256] = {}; + +static bool MakeAlphaNumAndPunctuation() { + char src[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"; + for (size_t i = 0; i < Y_ARRAY_SIZE(src) - 1; ++i) { + AlphaNumAndPunctuation[static_cast<unsigned char>(src[i])] = true; + } + return true; +} + +static const bool AlphaNumAndPunctuationMade = MakeAlphaNumAndPunctuation(); + +bool IsAlphaNumAndPunctuation(TStringBuf str) { + for (char c : str) { + if (!AlphaNumAndPunctuation[static_cast<unsigned char>(c)]) { + return false; + } + } + return true; +} + + +static bool MessageAttributesCharacters[256] = {}; constexpr TStringBuf AWS_RESERVED_PREFIX = "AWS."; constexpr TStringBuf AMAZON_RESERVED_PREFIX = "Amazon."; constexpr TStringBuf YA_RESERVED_PREFIX = "Ya."; constexpr TStringBuf YC_RESERVED_PREFIX = "YC."; constexpr TStringBuf YANDEX_RESERVED_PREFIX = "Yandex."; constexpr TStringBuf FIFO_SUFFIX = ".fifo"; - -static bool MakeMessageAttributesCharacters() { - char src[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-."; - for (size_t i = 0; i < Y_ARRAY_SIZE(src) - 1; ++i) { - MessageAttributesCharacters[static_cast<unsigned char>(src[i])] = true; - } - return true; -} - -static const bool MessageAttributesCharactersAreMade = MakeMessageAttributesCharacters(); - -// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html -bool ValidateMessageAttributeName(TStringBuf str, bool& hasYandexPrefix, bool allowYandexPrefix) { + +static bool MakeMessageAttributesCharacters() { + char src[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-."; + for (size_t i = 0; i < Y_ARRAY_SIZE(src) - 1; ++i) { + MessageAttributesCharacters[static_cast<unsigned char>(src[i])] = true; + } + return true; +} + +static const bool MessageAttributesCharactersAreMade = MakeMessageAttributesCharacters(); + +// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html +bool ValidateMessageAttributeName(TStringBuf str, bool& hasYandexPrefix, bool allowYandexPrefix) { if (!str || str.size() > 256) { - return false; - } - + return false; + } + if (str[0] == '.' || str[str.size() - 1] == '.') { - return false; - } - + return false; + } + for (size_t i = 0; i < str.size() - 1; ++i) { - if (!MessageAttributesCharacters[static_cast<unsigned char>(str[i])]) { - return false; - } - if (str[i] == '.' && str[i + 1] == '.') { - return false; - } - } - + if (!MessageAttributesCharacters[static_cast<unsigned char>(str[i])]) { + return false; + } + if (str[i] == '.' && str[i + 1] == '.') { + return false; + } + } + if (!MessageAttributesCharacters[static_cast<unsigned char>(str[str.size() - 1])]) { - return false; - } - - // AWS reserved prefixes: - if (AsciiHasPrefixIgnoreCase(str, AWS_RESERVED_PREFIX) || AsciiHasPrefixIgnoreCase(str, AMAZON_RESERVED_PREFIX)) { - return false; - } - - // Yandex reserved prefixes: - if (AsciiHasPrefixIgnoreCase(str, YA_RESERVED_PREFIX) || AsciiHasPrefixIgnoreCase(str, YC_RESERVED_PREFIX) || AsciiHasPrefixIgnoreCase(str, YANDEX_RESERVED_PREFIX)) { - hasYandexPrefix = true; - if (!allowYandexPrefix) { - return false; - } - } - - return true; -} - -static bool QueueNameCharacters[256] = {}; - -static bool MakeQueueNameCharacters() { - char src[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"; - for (size_t i = 0; i < Y_ARRAY_SIZE(src) - 1; ++i) { - QueueNameCharacters[static_cast<unsigned char>(src[i])] = true; - } - return true; -} - -static const bool QueueNameCharactersAreMade = MakeQueueNameCharacters(); - -bool ValidateQueueNameOrUserName(TStringBuf name) { + return false; + } + + // AWS reserved prefixes: + if (AsciiHasPrefixIgnoreCase(str, AWS_RESERVED_PREFIX) || AsciiHasPrefixIgnoreCase(str, AMAZON_RESERVED_PREFIX)) { + return false; + } + + // Yandex reserved prefixes: + if (AsciiHasPrefixIgnoreCase(str, YA_RESERVED_PREFIX) || AsciiHasPrefixIgnoreCase(str, YC_RESERVED_PREFIX) || AsciiHasPrefixIgnoreCase(str, YANDEX_RESERVED_PREFIX)) { + hasYandexPrefix = true; + if (!allowYandexPrefix) { + return false; + } + } + + return true; +} + +static bool QueueNameCharacters[256] = {}; + +static bool MakeQueueNameCharacters() { + char src[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"; + for (size_t i = 0; i < Y_ARRAY_SIZE(src) - 1; ++i) { + QueueNameCharacters[static_cast<unsigned char>(src[i])] = true; + } + return true; +} + +static const bool QueueNameCharactersAreMade = MakeQueueNameCharacters(); + +bool ValidateQueueNameOrUserName(TStringBuf name) { if (name.size() > 80) { - return false; - } - if (AsciiHasSuffixIgnoreCase(name, FIFO_SUFFIX)) { + return false; + } + if (AsciiHasSuffixIgnoreCase(name, FIFO_SUFFIX)) { name = name.SubStr(0, name.size() - FIFO_SUFFIX.size()); - } + } if (name.empty()) { - return false; - } + return false; + } for (size_t i = 0; i < name.size(); ++i) { - if (!QueueNameCharacters[static_cast<unsigned char>(name[i])]) { - return false; - } - } - return true; -} - -static TString ProtobufToString(const NProtoBuf::Message& proto) { - TString ret; + if (!QueueNameCharacters[static_cast<unsigned char>(name[i])]) { + return false; + } + } + return true; +} + +static TString ProtobufToString(const NProtoBuf::Message& proto) { + TString ret; Y_PROTOBUF_SUPPRESS_NODISCARD proto.SerializeToString(&ret); - return ret; -} - -static TString EncodeString(const TString& value) { - TString result = Base64EncodeUrl(value); + return ret; +} + +static TString EncodeString(const TString& value) { + TString result = Base64EncodeUrl(value); // Remove these symbols from the end of the string to avoid problems // with cgi escaping. - while (!result.empty() && (result.back() == ',' || result.back() == '=')) { - result.pop_back(); - } - - return result; -} - -TString EncodeReceiptHandle(const TReceipt& receipt) { - return EncodeString(ProtobufToString(receipt)); -} - -TReceipt DecodeReceiptHandle(const TString& receipt) { - TString decoded = Base64DecodeUneven(receipt); - TReceipt ret; - Y_ENSURE(!decoded.empty()); - Y_ENSURE(ret.ParseFromString(decoded)); - return ret; -} - -// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html -static bool IsValidMessageBodyCharacter(wchar32 c) { - if (c < 0x20u) { - return c == 0x9u || c == 0xAu || c == 0xDu; - } else { - if (c <= 0xD7FFu) { - return true; - } else if (c >= 0xE000u && c <= 0xFFFDu) { - return true; - } else if (c >= 0x10000u && c <= 0x10FFFFu) { - return true; - } - } - return false; -} - -bool ValidateMessageBody(TStringBuf body, TString& errorDescription) { - const unsigned char* s = reinterpret_cast<const unsigned char*>(body.data()); - const unsigned char* const end = s + body.size(); - while (s != end) { - wchar32 c; - size_t clen; - const RECODE_RESULT result = SafeReadUTF8Char(c, clen, s, end); - if (result != RECODE_OK) { - errorDescription = TStringBuilder() << "nonunicode characters are not allowed"; - return false; - } - if (!IsValidMessageBodyCharacter(c)) { - errorDescription = TStringBuilder() << "character " << Hex(c) << " is not allowed"; - return false; - } - - s += clen; - } - return true; -} - -} // namespace NKikimr::NSQS + while (!result.empty() && (result.back() == ',' || result.back() == '=')) { + result.pop_back(); + } + + return result; +} + +TString EncodeReceiptHandle(const TReceipt& receipt) { + return EncodeString(ProtobufToString(receipt)); +} + +TReceipt DecodeReceiptHandle(const TString& receipt) { + TString decoded = Base64DecodeUneven(receipt); + TReceipt ret; + Y_ENSURE(!decoded.empty()); + Y_ENSURE(ret.ParseFromString(decoded)); + return ret; +} + +// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html +static bool IsValidMessageBodyCharacter(wchar32 c) { + if (c < 0x20u) { + return c == 0x9u || c == 0xAu || c == 0xDu; + } else { + if (c <= 0xD7FFu) { + return true; + } else if (c >= 0xE000u && c <= 0xFFFDu) { + return true; + } else if (c >= 0x10000u && c <= 0x10FFFFu) { + return true; + } + } + return false; +} + +bool ValidateMessageBody(TStringBuf body, TString& errorDescription) { + const unsigned char* s = reinterpret_cast<const unsigned char*>(body.data()); + const unsigned char* const end = s + body.size(); + while (s != end) { + wchar32 c; + size_t clen; + const RECODE_RESULT result = SafeReadUTF8Char(c, clen, s, end); + if (result != RECODE_OK) { + errorDescription = TStringBuilder() << "nonunicode characters are not allowed"; + return false; + } + if (!IsValidMessageBodyCharacter(c)) { + errorDescription = TStringBuilder() << "character " << Hex(c) << " is not allowed"; + return false; + } + + s += clen; + } + return true; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/helpers.h b/ydb/core/ymq/base/helpers.h index a8779c08bdb..42b27b83de7 100644 --- a/ydb/core/ymq/base/helpers.h +++ b/ydb/core/ymq/base/helpers.h @@ -1,22 +1,22 @@ -#pragma once +#pragma once #include <ydb/core/ymq/proto/records.pb.h> - -#include <util/generic/strbuf.h> - -namespace NKikimr::NSQS { - -// Validation of deduplication id, group id, receive request attempt id -bool IsAlphaNumAndPunctuation(TStringBuf str); - -// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html -bool ValidateMessageAttributeName(TStringBuf str, bool& hasYandexPrefix, bool allowYandexPrefix = false); - -bool ValidateQueueNameOrUserName(TStringBuf name); - -// Validation function for message body or string message attributes -bool ValidateMessageBody(TStringBuf body, TString& errorDescription); - -TString EncodeReceiptHandle(const TReceipt& receipt); -TReceipt DecodeReceiptHandle(const TString& receipt); - -} // namespace NKikimr::NSQS + +#include <util/generic/strbuf.h> + +namespace NKikimr::NSQS { + +// Validation of deduplication id, group id, receive request attempt id +bool IsAlphaNumAndPunctuation(TStringBuf str); + +// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html +bool ValidateMessageAttributeName(TStringBuf str, bool& hasYandexPrefix, bool allowYandexPrefix = false); + +bool ValidateQueueNameOrUserName(TStringBuf name); + +// Validation function for message body or string message attributes +bool ValidateMessageBody(TStringBuf body, TString& errorDescription); + +TString EncodeReceiptHandle(const TReceipt& receipt); +TReceipt DecodeReceiptHandle(const TString& receipt); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/limits.h b/ydb/core/ymq/base/limits.h index b251b3ff62a..7c85b7a683b 100644 --- a/ydb/core/ymq/base/limits.h +++ b/ydb/core/ymq/base/limits.h @@ -3,11 +3,11 @@ #include <util/datetime/base.h> #include <util/system/defaults.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { namespace TLimits { - static constexpr size_t MinBatchSize = 1; - + static constexpr size_t MinBatchSize = 1; + static constexpr size_t MaxBatchSize = 10; static constexpr size_t MaxDelaySeconds = 900; @@ -18,17 +18,17 @@ namespace TLimits { static constexpr size_t MaxMessageSize = (256 * 1024); - static constexpr TDuration MaxMessageRetentionPeriod = TDuration::Days(14); + static constexpr TDuration MaxMessageRetentionPeriod = TDuration::Days(14); static constexpr size_t DelaySeconds = 0; static constexpr size_t VisibilityTimeout = 30; - - static constexpr TDuration MaxVisibilityTimeout = TDuration::Hours(12); + + static constexpr TDuration MaxVisibilityTimeout = TDuration::Hours(12); static constexpr size_t MinMaxReceiveCount = 1; static constexpr size_t MaxMaxReceiveCount = 1000; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/probes.cpp b/ydb/core/ymq/base/probes.cpp index 25bab250752..ed19adbb49b 100644 --- a/ydb/core/ymq/base/probes.cpp +++ b/ydb/core/ymq/base/probes.cpp @@ -1,3 +1,3 @@ -#include "probes.h" - -LWTRACE_DEFINE_PROVIDER(SQS_PROVIDER); +#include "probes.h" + +LWTRACE_DEFINE_PROVIDER(SQS_PROVIDER); diff --git a/ydb/core/ymq/base/probes.h b/ydb/core/ymq/base/probes.h index 0fa0a46ffc2..f0143d8f291 100644 --- a/ydb/core/ymq/base/probes.h +++ b/ydb/core/ymq/base/probes.h @@ -1,42 +1,42 @@ -#pragma once - +#pragma once + #include <library/cpp/lwtrace/all.h> - -#define SQS_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ + +#define SQS_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \ PROBE(CreateLeader, GROUPS("SqsLeadersLifeTime"), \ - TYPES(TString, TString, TString), \ - NAMES("user", "queue", "reason")) \ + TYPES(TString, TString, TString), \ + NAMES("user", "queue", "reason")) \ PROBE(DestroyLeader, GROUPS("SqsLeadersLifeTime"), \ - TYPES(TString, TString, TString), \ - NAMES("user", "queue", "reason")) \ + TYPES(TString, TString, TString), \ + NAMES("user", "queue", "reason")) \ PROBE(IncLeaderRef, GROUPS("SqsLeadersLifeTime"), \ - TYPES(TString, TString, TString), \ - NAMES("user", "queue", "referer")) \ + TYPES(TString, TString, TString), \ + NAMES("user", "queue", "referer")) \ PROBE(DecLeaderRef, GROUPS("SqsLeadersLifeTime"), \ - TYPES(TString), \ - NAMES("rerefer")) \ + TYPES(TString), \ + NAMES("rerefer")) \ PROBE(IncLeaderRefAlreadyHasRef, GROUPS("SqsLeadersLifeTime"), \ - TYPES(TString, TString, TString), \ - NAMES("user", "queue", "referer")) \ + TYPES(TString, TString, TString), \ + NAMES("user", "queue", "referer")) \ PROBE(DecLeaderRefNotInRefSet, GROUPS("SqsLeadersLifeTime"), \ - TYPES(TString), \ - NAMES("rerefer")) \ - \ - PROBE(QueueAttributesCacheMiss, GROUPS("SqsPerformance"), \ - TYPES(TString, TString, TString), \ - NAMES("user", "queue", "requestId")) \ - PROBE(QueueRequestCacheMiss, GROUPS("SqsPerformance"), \ - TYPES(TString, TString, TString, TString), \ - NAMES("user", "queue", "requestId", "requestType")) \ - PROBE(LoadInfly, GROUPS("SqsPerformance"), \ - TYPES(TString, TString, ui64, ui64), \ - NAMES("user", "queue", "shard", "count")) \ - PROBE(AddMessagesToInfly, GROUPS("SqsPerformance"), \ - TYPES(TString, TString, ui64, ui64), \ - NAMES("user", "queue", "shard", "count")) \ - PROBE(InflyInvalidation, GROUPS("SqsPerformance"), \ - TYPES(TString, TString, ui64, ui64, TString), \ - NAMES("user", "queue", "shard", "count", "reason")) \ - /**/ - -LWTRACE_DECLARE_PROVIDER(SQS_PROVIDER) + TYPES(TString), \ + NAMES("rerefer")) \ + \ + PROBE(QueueAttributesCacheMiss, GROUPS("SqsPerformance"), \ + TYPES(TString, TString, TString), \ + NAMES("user", "queue", "requestId")) \ + PROBE(QueueRequestCacheMiss, GROUPS("SqsPerformance"), \ + TYPES(TString, TString, TString, TString), \ + NAMES("user", "queue", "requestId", "requestType")) \ + PROBE(LoadInfly, GROUPS("SqsPerformance"), \ + TYPES(TString, TString, ui64, ui64), \ + NAMES("user", "queue", "shard", "count")) \ + PROBE(AddMessagesToInfly, GROUPS("SqsPerformance"), \ + TYPES(TString, TString, ui64, ui64), \ + NAMES("user", "queue", "shard", "count")) \ + PROBE(InflyInvalidation, GROUPS("SqsPerformance"), \ + TYPES(TString, TString, ui64, ui64, TString), \ + NAMES("user", "queue", "shard", "count", "reason")) \ + /**/ + +LWTRACE_DECLARE_PROVIDER(SQS_PROVIDER) diff --git a/ydb/core/ymq/base/query_id.h b/ydb/core/ymq/base/query_id.h index ad54d0c70d6..44c4ab9c33f 100644 --- a/ydb/core/ymq/base/query_id.h +++ b/ydb/core/ymq/base/query_id.h @@ -2,36 +2,36 @@ #include <util/system/defaults.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -enum EQueryId { - DELETE_MESSAGE_ID = 0, - LOCK_GROUP_ID = 1, - READ_MESSAGE_ID = 2, - WRITE_MESSAGE_ID = 3, - PURGE_QUEUE_ID = 4, - CHANGE_VISIBILITY_ID = 5, - CLEANUP_DEDUPLICATION_ID = 6, - CLEANUP_READS_ID = 7, - LIST_QUEUES_ID = 8, - SET_QUEUE_ATTRIBUTES_ID = 9, - SET_RETENTION_ID = 10, - LOAD_MESSAGES_ID = 11, - INTERNAL_GET_QUEUE_ATTRIBUTES_ID = 12, - PURGE_QUEUE_STAGE2_ID = 13, - GET_MESSAGE_COUNT_METRIC_ID = 14, - GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID = 15, - GET_RETENTION_OFFSET_ID = 16, - LOAD_INFLY_ID = 17, - ADD_MESSAGES_TO_INFLY_ID = 18, - GET_STATE_ID = 19, +enum EQueryId { + DELETE_MESSAGE_ID = 0, + LOCK_GROUP_ID = 1, + READ_MESSAGE_ID = 2, + WRITE_MESSAGE_ID = 3, + PURGE_QUEUE_ID = 4, + CHANGE_VISIBILITY_ID = 5, + CLEANUP_DEDUPLICATION_ID = 6, + CLEANUP_READS_ID = 7, + LIST_QUEUES_ID = 8, + SET_QUEUE_ATTRIBUTES_ID = 9, + SET_RETENTION_ID = 10, + LOAD_MESSAGES_ID = 11, + INTERNAL_GET_QUEUE_ATTRIBUTES_ID = 12, + PURGE_QUEUE_STAGE2_ID = 13, + GET_MESSAGE_COUNT_METRIC_ID = 14, + GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID = 15, + GET_RETENTION_OFFSET_ID = 16, + LOAD_INFLY_ID = 17, + ADD_MESSAGES_TO_INFLY_ID = 18, + GET_STATE_ID = 19, LIST_DEAD_LETTER_SOURCE_QUEUES_ID = 20, LOAD_OR_REDRIVE_MESSAGE_ID = 21, READ_OR_REDRIVE_MESSAGE_ID = 22, - GET_USER_SETTINGS_ID = 23, - GET_QUEUES_LIST_ID = 24, + GET_USER_SETTINGS_ID = 23, + GET_QUEUES_LIST_ID = 24, - QUERY_VECTOR_SIZE, -}; - -} // namespace NKikimr::NSQS + QUERY_VECTOR_SIZE, +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/queue_attributes.cpp b/ydb/core/ymq/base/queue_attributes.cpp index 8c6436aba93..d3535c4286f 100644 --- a/ydb/core/ymq/base/queue_attributes.cpp +++ b/ydb/core/ymq/base/queue_attributes.cpp @@ -48,7 +48,7 @@ TQueueAttributes TQueueAttributes::FromAttributesAndConfig(const THashMap<TStrin result.ContentBasedDeduplication = false; } else { result.Error = &NErrors::INVALID_ATTRIBUTE_VALUE; - result.ErrorText = Sprintf(INVALID_PARAM_MSG " Valid values: true, false.", name.c_str()); + result.ErrorText = Sprintf(INVALID_PARAM_MSG " Valid values: true, false.", name.c_str()); break; } } else if (name == "FifoQueue" && isFifoQueue) { @@ -57,9 +57,9 @@ TQueueAttributes TQueueAttributes::FromAttributesAndConfig(const THashMap<TStrin } else { result.Error = &NErrors::INVALID_ATTRIBUTE_VALUE; if (value == "false") { - result.ErrorText = Sprintf(INVALID_PARAM_MSG " Reason: Modifying queue type is not supported.", name.c_str()); + result.ErrorText = Sprintf(INVALID_PARAM_MSG " Reason: Modifying queue type is not supported.", name.c_str()); } else { - result.ErrorText = Sprintf(INVALID_PARAM_MSG " Valid values: true, false.", name.c_str()); + result.ErrorText = Sprintf(INVALID_PARAM_MSG " Valid values: true, false.", name.c_str()); } break; } @@ -67,7 +67,7 @@ TQueueAttributes TQueueAttributes::FromAttributesAndConfig(const THashMap<TStrin result.RedrivePolicy = TRedrivePolicy::FromJson(value, config); if (result.RedrivePolicy.IsValid()) { if (*result.RedrivePolicy.TargetQueueName && isFifoQueue != result.RedrivePolicy.TargetQueueName->EndsWith(".fifo")) { - result.ErrorText = "Target dead letter queue should have the same type as source queue."; + result.ErrorText = "Target dead letter queue should have the same type as source queue."; } else { continue; } @@ -78,7 +78,7 @@ TQueueAttributes TQueueAttributes::FromAttributesAndConfig(const THashMap<TStrin break; } else { result.Error = &NErrors::INVALID_ATTRIBUTE_NAME; - result.ErrorText = Sprintf("Unknown Attribute %s.", name.c_str()); + result.ErrorText = Sprintf("Unknown Attribute %s.", name.c_str()); break; } } @@ -107,7 +107,7 @@ bool TQueueAttributes::TryParseLimitedValue(const TString& attrName, const TStri result = Nothing(); Error = &NErrors::INVALID_ATTRIBUTE_VALUE; - ErrorText = Sprintf(INVALID_PARAM_MSG " Valid values are from %" PRIu64 " to %" PRIu64 " both inclusive.", attrName.c_str(), allowedMinValue, allowedMaxValue); + ErrorText = Sprintf(INVALID_PARAM_MSG " Valid values are from %" PRIu64 " to %" PRIu64 " both inclusive.", attrName.c_str(), allowedMinValue, allowedMaxValue); return false; } diff --git a/ydb/core/ymq/base/queue_id.cpp b/ydb/core/ymq/base/queue_id.cpp index 0b93440a39f..5c68141fee0 100644 --- a/ydb/core/ymq/base/queue_id.cpp +++ b/ydb/core/ymq/base/queue_id.cpp @@ -2,7 +2,7 @@ #include <util/generic/string.h> #include <util/string/cast.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { // Queue id breakdown in bits [15][1][64][7][13]: // [15] - service id @@ -71,4 +71,4 @@ TString MakeQueueId(const ui16 serviceId, const ui64 uniqueNum, const TString& a return TQueueId(serviceId, uniqueNum, accountName).AsString(); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/queue_id.h b/ydb/core/ymq/base/queue_id.h index a2cac100e06..b9af0eee20b 100644 --- a/ydb/core/ymq/base/queue_id.h +++ b/ydb/core/ymq/base/queue_id.h @@ -2,8 +2,8 @@ #include <util/generic/string.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { TString MakeQueueId(const ui16 serviceId, const ui64 uniqueNum, const TString& accountName); -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/queue_path.h b/ydb/core/ymq/base/queue_path.h index baaa781b009..90f9442ab45 100644 --- a/ydb/core/ymq/base/queue_path.h +++ b/ydb/core/ymq/base/queue_path.h @@ -1,40 +1,40 @@ -#pragma once -#include <util/generic/string.h> -#include <util/string/join.h> - -namespace NKikimr::NSQS { - -struct TQueuePath { - TString Root; - TString UserName; - TString QueueName; +#pragma once +#include <util/generic/string.h> +#include <util/string/join.h> + +namespace NKikimr::NSQS { + +struct TQueuePath { + TString Root; + TString UserName; + TString QueueName; TString VersionSuffix; - - TQueuePath() - { } - - TQueuePath(const TString& root, - const TString& userName, - const TString& queueName, - const ui64 version = 0) - : Root(root) - , UserName(userName) - , QueueName(queueName) + + TQueuePath() + { } + + TQueuePath(const TString& root, + const TString& userName, + const TString& queueName, + const ui64 version = 0) + : Root(root) + , UserName(userName) + , QueueName(queueName) { if (version) { VersionSuffix = TString::Join("v", ToString(version)); } } - - operator TString() const { - return GetQueuePath(); - } - - TString GetQueuePath() const { - return Join("/", Root, UserName, QueueName); - } - - TString GetVersionedQueuePath() const { + + operator TString() const { + return GetQueuePath(); + } + + TString GetQueuePath() const { + return Join("/", Root, UserName, QueueName); + } + + TString GetVersionedQueuePath() const { auto tmp = GetQueuePath(); if (!VersionSuffix) { return tmp; @@ -43,13 +43,13 @@ struct TQueuePath { } } - TString GetUserPath() const { - return Join("/", Root, UserName); - } - - TString GetRootPath() const { - return Root; - } -}; - -} // namespace NKikimr::NSQS + TString GetUserPath() const { + return Join("/", Root, UserName); + } + + TString GetRootPath() const { + return Root; + } +}; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/secure_protobuf_printer.cpp b/ydb/core/ymq/base/secure_protobuf_printer.cpp index 55898466fb5..a5788e2c64c 100644 --- a/ydb/core/ymq/base/secure_protobuf_printer.cpp +++ b/ydb/core/ymq/base/secure_protobuf_printer.cpp @@ -1,59 +1,59 @@ -#include "secure_protobuf_printer.h" - +#include "secure_protobuf_printer.h" + #include <ydb/library/protobuf_printer/hide_field_printer.h> #include <ydb/library/protobuf_printer/protobuf_printer.h> #include <ydb/library/protobuf_printer/token_field_printer.h> - -#include <util/generic/strbuf.h> -#include <util/generic/singleton.h> - + +#include <util/generic/strbuf.h> +#include <util/generic/singleton.h> + #include <google/protobuf/message.h> #include <google/protobuf/text_format.h> - -namespace NKikimr::NSQS { - -namespace { - -class TSecureTextFormatPrinter : public TCustomizableTextFormatPrinter { - void RegisterSecureFieldPrinters() { - // User data - RegisterFieldValuePrinters<TSendMessageRequest, THideFieldValuePrinter>("MessageBody"); - RegisterFieldValuePrinters<TReceiveMessageResponse::TMessage, THideFieldValuePrinter>("Data"); - RegisterFieldValuePrinters<TMessageAttribute, THideFieldValuePrinter>("StringValue", "BinaryValue"); - - // Security - RegisterFieldValuePrinters<TCredentials, TTokenFieldValuePrinter>("OAuthToken", "TvmTicket", "StaticCreds"); - } - -public: - TSecureTextFormatPrinter() { - SetSingleLineMode(true); - SetUseUtf8StringEscaping(true); - RegisterSecureFieldPrinters(); - } - - TString Print(const ::google::protobuf::Message& msg) const { - TProtoStringType string; - PrintToString(msg, &string); - - // Copied from ShortUtf8DebugString() implementation. - // Single line mode currently might have an extra space at the end. - if (string.size() > 0 && string[string.size() - 1] == ' ') { - string.resize(string.size() - 1); - } - - return string; - } -}; - -} // namespace - -TString SecureShortUtf8DebugString(const NKikimrClient::TSqsRequest& msg) { - return Default<TSecureTextFormatPrinter>().Print(msg); -} - -TString SecureShortUtf8DebugString(const NKikimrClient::TSqsResponse& msg) { - return Default<TSecureTextFormatPrinter>().Print(msg); -} - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +namespace { + +class TSecureTextFormatPrinter : public TCustomizableTextFormatPrinter { + void RegisterSecureFieldPrinters() { + // User data + RegisterFieldValuePrinters<TSendMessageRequest, THideFieldValuePrinter>("MessageBody"); + RegisterFieldValuePrinters<TReceiveMessageResponse::TMessage, THideFieldValuePrinter>("Data"); + RegisterFieldValuePrinters<TMessageAttribute, THideFieldValuePrinter>("StringValue", "BinaryValue"); + + // Security + RegisterFieldValuePrinters<TCredentials, TTokenFieldValuePrinter>("OAuthToken", "TvmTicket", "StaticCreds"); + } + +public: + TSecureTextFormatPrinter() { + SetSingleLineMode(true); + SetUseUtf8StringEscaping(true); + RegisterSecureFieldPrinters(); + } + + TString Print(const ::google::protobuf::Message& msg) const { + TProtoStringType string; + PrintToString(msg, &string); + + // Copied from ShortUtf8DebugString() implementation. + // Single line mode currently might have an extra space at the end. + if (string.size() > 0 && string[string.size() - 1] == ' ') { + string.resize(string.size() - 1); + } + + return string; + } +}; + +} // namespace + +TString SecureShortUtf8DebugString(const NKikimrClient::TSqsRequest& msg) { + return Default<TSecureTextFormatPrinter>().Print(msg); +} + +TString SecureShortUtf8DebugString(const NKikimrClient::TSqsResponse& msg) { + return Default<TSecureTextFormatPrinter>().Print(msg); +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/secure_protobuf_printer.h b/ydb/core/ymq/base/secure_protobuf_printer.h index 39a1b4787aa..07642d2b682 100644 --- a/ydb/core/ymq/base/secure_protobuf_printer.h +++ b/ydb/core/ymq/base/secure_protobuf_printer.h @@ -1,11 +1,11 @@ -#pragma once -#include <util/generic/string.h> - +#pragma once +#include <util/generic/string.h> + #include <ydb/core/protos/msgbus.pb.h> - -namespace NKikimr::NSQS { - -TString SecureShortUtf8DebugString(const NKikimrClient::TSqsRequest& msg); -TString SecureShortUtf8DebugString(const NKikimrClient::TSqsResponse& msg); - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +TString SecureShortUtf8DebugString(const NKikimrClient::TSqsRequest& msg); +TString SecureShortUtf8DebugString(const NKikimrClient::TSqsResponse& msg); + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/security.h b/ydb/core/ymq/base/security.h index 764be96265c..a2136c70a6a 100644 --- a/ydb/core/ymq/base/security.h +++ b/ydb/core/ymq/base/security.h @@ -3,7 +3,7 @@ #include <util/generic/string.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { template <typename TReq, typename TCreds> TString ExtractSecurityToken(const TReq& request) { @@ -27,4 +27,4 @@ TString ExtractSecurityToken(const TReq& request) { return {}; } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/table_info.h b/ydb/core/ymq/base/table_info.h index 2ea37f8f0ed..a1d193feb8b 100644 --- a/ydb/core/ymq/base/table_info.h +++ b/ydb/core/ymq/base/table_info.h @@ -4,7 +4,7 @@ #include <util/generic/vector.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { struct TColumn { TString Name; @@ -12,10 +12,10 @@ struct TColumn { bool Key; size_t Partitions; - TColumn(const TString& name, - const NScheme::TTypeId type, - const bool key = false, - const size_t partitions = 0) + TColumn(const TString& name, + const NScheme::TTypeId type, + const bool key = false, + const size_t partitions = 0) : Name(name) , TypeId(type) , Key(key) @@ -26,61 +26,61 @@ struct TColumn { struct TTable { TString Name; TVector<TColumn> Columns; - bool InMemory = false; - bool Sequential = false; - bool Small = false; - bool OnePartitionPerShard = false; // <queue_name>/State table - has one datashard per SQS-shard - i64 Shard = 0; + bool InMemory = false; + bool Sequential = false; + bool Small = false; + bool OnePartitionPerShard = false; // <queue_name>/State table - has one datashard per SQS-shard + i64 Shard = 0; bool HasLeaderTablet = false; - bool EnableAutosplit = false; - ui64 SizeToSplit = 0; + bool EnableAutosplit = false; + ui64 SizeToSplit = 0; - TTable() = default; + TTable() = default; - TTable(const TString& name) + TTable(const TString& name) : Name(name) { } - TTable& SetColumns(const TVector<TColumn>& columns) { + TTable& SetColumns(const TVector<TColumn>& columns) { Columns = columns; return *this; } - TTable& SetInMemory(bool value) { + TTable& SetInMemory(bool value) { InMemory = value; return *this; } - TTable& SetSequential(bool value) { + TTable& SetSequential(bool value) { Sequential = value; return *this; } - TTable& SetSmall(bool value) { + TTable& SetSmall(bool value) { Small = value; return *this; } - TTable& SetOnePartitionPerShard(bool value) { - OnePartitionPerShard = value; - return *this; - } - - TTable& SetShard(i64 value) { + TTable& SetOnePartitionPerShard(bool value) { + OnePartitionPerShard = value; + return *this; + } + + TTable& SetShard(i64 value) { Shard = value; return *this; } - + TTable& SetHasLeaderTablet(bool value = true) { HasLeaderTablet = value; - return *this; - } - - TTable& SetAutosplit(bool enableAutosplit, ui64 sizeToSplit) { - EnableAutosplit = enableAutosplit; - SizeToSplit = sizeToSplit; - return *this; - } + return *this; + } + + TTable& SetAutosplit(bool enableAutosplit, ui64 sizeToSplit) { + EnableAutosplit = enableAutosplit; + SizeToSplit = sizeToSplit; + return *this; + } }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/ut/action_ut.cpp b/ydb/core/ymq/base/ut/action_ut.cpp index 04298436fe5..6aeab136c51 100644 --- a/ydb/core/ymq/base/ut/action_ut.cpp +++ b/ydb/core/ymq/base/ut/action_ut.cpp @@ -1,74 +1,74 @@ #include <ydb/core/ymq/base/action.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(ActionParsingTest) { - Y_UNIT_TEST(ToAndFromStringAreConsistent) { - EAction first = static_cast<EAction>(EAction::Unknown + 1); - for (EAction i = first; i < EAction::ActionsArraySize; i = static_cast<EAction>(i + 1)) { - UNIT_ASSERT_STRINGS_EQUAL(ActionToString(i), ActionToString(ActionFromString(ActionToString(i)))); - } - } - - Y_UNIT_TEST(ActionsForQueueTest) { - EAction first = static_cast<EAction>(EAction::Unknown + 1); - for (EAction i = first; i < EAction::ActionsArraySize; i = static_cast<EAction>(i + 1)) { - UNIT_ASSERT_C(IsActionForQueue(i) || IsActionForUser(i), i); - UNIT_ASSERT_C(!(IsActionForQueue(i) && IsActionForUser(i)), i); - } - } - - Y_UNIT_TEST(BatchActionTest) { - EAction first = static_cast<EAction>(EAction::Unknown + 1); - for (EAction i = first; i < EAction::ActionsArraySize; i = static_cast<EAction>(i + 1)) { - const TString& name = ActionToString(i); - const EAction nonBatch = GetNonBatchAction(i); - if (TStringBuf(name).EndsWith("Batch")) { - UNIT_ASSERT_C(IsBatchAction(i), i); - UNIT_ASSERT_UNEQUAL_C(i, nonBatch, i); - UNIT_ASSERT_C(TStringBuf(name).StartsWith(ActionToString(nonBatch)), i); - } else { - UNIT_ASSERT_C(!IsBatchAction(i), i); - UNIT_ASSERT_EQUAL_C(i, nonBatch, i); - } - } - } - - Y_UNIT_TEST(ActionsForMessageTest) { - UNIT_ASSERT(IsActionForMessage(EAction::SendMessage)); - UNIT_ASSERT(IsActionForMessage(EAction::SendMessageBatch)); - UNIT_ASSERT(IsActionForMessage(EAction::DeleteMessage)); - UNIT_ASSERT(IsActionForMessage(EAction::DeleteMessageBatch)); - UNIT_ASSERT(IsActionForMessage(EAction::ReceiveMessage)); - UNIT_ASSERT(IsActionForMessage(EAction::ChangeMessageVisibility)); - UNIT_ASSERT(IsActionForMessage(EAction::ChangeMessageVisibilityBatch)); - - UNIT_ASSERT(!IsActionForMessage(EAction::CreateQueue)); - UNIT_ASSERT(!IsActionForMessage(EAction::DeleteQueue)); - UNIT_ASSERT(!IsActionForMessage(EAction::PurgeQueue)); - UNIT_ASSERT(!IsActionForMessage(EAction::PurgeQueueBatch)); - } - - Y_UNIT_TEST(FastActionsTest) { - UNIT_ASSERT(IsFastAction(EAction::SendMessage)); - UNIT_ASSERT(IsFastAction(EAction::SendMessageBatch)); - UNIT_ASSERT(IsFastAction(EAction::DeleteMessage)); - UNIT_ASSERT(IsFastAction(EAction::DeleteMessageBatch)); - UNIT_ASSERT(IsFastAction(EAction::ReceiveMessage)); - UNIT_ASSERT(IsFastAction(EAction::ChangeMessageVisibility)); - UNIT_ASSERT(IsFastAction(EAction::ChangeMessageVisibilityBatch)); - UNIT_ASSERT(IsFastAction(EAction::GetQueueAttributes)); - UNIT_ASSERT(IsFastAction(EAction::GetQueueAttributesBatch)); - UNIT_ASSERT(IsFastAction(EAction::GetQueueUrl)); - UNIT_ASSERT(IsFastAction(EAction::SetQueueAttributes)); - - UNIT_ASSERT(!IsFastAction(EAction::CreateQueue)); - UNIT_ASSERT(!IsFastAction(EAction::DeleteQueue)); - UNIT_ASSERT(!IsFastAction(EAction::CreateUser)); - UNIT_ASSERT(!IsFastAction(EAction::DeleteUser)); - } -} - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(ActionParsingTest) { + Y_UNIT_TEST(ToAndFromStringAreConsistent) { + EAction first = static_cast<EAction>(EAction::Unknown + 1); + for (EAction i = first; i < EAction::ActionsArraySize; i = static_cast<EAction>(i + 1)) { + UNIT_ASSERT_STRINGS_EQUAL(ActionToString(i), ActionToString(ActionFromString(ActionToString(i)))); + } + } + + Y_UNIT_TEST(ActionsForQueueTest) { + EAction first = static_cast<EAction>(EAction::Unknown + 1); + for (EAction i = first; i < EAction::ActionsArraySize; i = static_cast<EAction>(i + 1)) { + UNIT_ASSERT_C(IsActionForQueue(i) || IsActionForUser(i), i); + UNIT_ASSERT_C(!(IsActionForQueue(i) && IsActionForUser(i)), i); + } + } + + Y_UNIT_TEST(BatchActionTest) { + EAction first = static_cast<EAction>(EAction::Unknown + 1); + for (EAction i = first; i < EAction::ActionsArraySize; i = static_cast<EAction>(i + 1)) { + const TString& name = ActionToString(i); + const EAction nonBatch = GetNonBatchAction(i); + if (TStringBuf(name).EndsWith("Batch")) { + UNIT_ASSERT_C(IsBatchAction(i), i); + UNIT_ASSERT_UNEQUAL_C(i, nonBatch, i); + UNIT_ASSERT_C(TStringBuf(name).StartsWith(ActionToString(nonBatch)), i); + } else { + UNIT_ASSERT_C(!IsBatchAction(i), i); + UNIT_ASSERT_EQUAL_C(i, nonBatch, i); + } + } + } + + Y_UNIT_TEST(ActionsForMessageTest) { + UNIT_ASSERT(IsActionForMessage(EAction::SendMessage)); + UNIT_ASSERT(IsActionForMessage(EAction::SendMessageBatch)); + UNIT_ASSERT(IsActionForMessage(EAction::DeleteMessage)); + UNIT_ASSERT(IsActionForMessage(EAction::DeleteMessageBatch)); + UNIT_ASSERT(IsActionForMessage(EAction::ReceiveMessage)); + UNIT_ASSERT(IsActionForMessage(EAction::ChangeMessageVisibility)); + UNIT_ASSERT(IsActionForMessage(EAction::ChangeMessageVisibilityBatch)); + + UNIT_ASSERT(!IsActionForMessage(EAction::CreateQueue)); + UNIT_ASSERT(!IsActionForMessage(EAction::DeleteQueue)); + UNIT_ASSERT(!IsActionForMessage(EAction::PurgeQueue)); + UNIT_ASSERT(!IsActionForMessage(EAction::PurgeQueueBatch)); + } + + Y_UNIT_TEST(FastActionsTest) { + UNIT_ASSERT(IsFastAction(EAction::SendMessage)); + UNIT_ASSERT(IsFastAction(EAction::SendMessageBatch)); + UNIT_ASSERT(IsFastAction(EAction::DeleteMessage)); + UNIT_ASSERT(IsFastAction(EAction::DeleteMessageBatch)); + UNIT_ASSERT(IsFastAction(EAction::ReceiveMessage)); + UNIT_ASSERT(IsFastAction(EAction::ChangeMessageVisibility)); + UNIT_ASSERT(IsFastAction(EAction::ChangeMessageVisibilityBatch)); + UNIT_ASSERT(IsFastAction(EAction::GetQueueAttributes)); + UNIT_ASSERT(IsFastAction(EAction::GetQueueAttributesBatch)); + UNIT_ASSERT(IsFastAction(EAction::GetQueueUrl)); + UNIT_ASSERT(IsFastAction(EAction::SetQueueAttributes)); + + UNIT_ASSERT(!IsFastAction(EAction::CreateQueue)); + UNIT_ASSERT(!IsFastAction(EAction::DeleteQueue)); + UNIT_ASSERT(!IsFastAction(EAction::CreateUser)); + UNIT_ASSERT(!IsFastAction(EAction::DeleteUser)); + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/ut/counters_ut.cpp b/ydb/core/ymq/base/ut/counters_ut.cpp index 5ff3d26e36e..a08b8c54765 100644 --- a/ydb/core/ymq/base/ut/counters_ut.cpp +++ b/ydb/core/ymq/base/ut/counters_ut.cpp @@ -1,200 +1,200 @@ #include <ydb/core/ymq/base/counters.h> - + #include <ydb/core/base/path.h> #include <ydb/core/protos/config.pb.h> - + #include <library/cpp/testing/unittest/registar.h> - -#include <util/string/subst.h> - -namespace NKikimr::NSQS { - -TString CountersString(const TIntrusivePtr<NMonitoring::TDynamicCounters>& root) { - TStringStream ss; - root->OutputPlainText(ss); - return ss.Str(); -} - -std::vector<std::pair<TString, TString>> ParseCounterPath(const TString& path) { - const auto pathComponents = SplitPath(path); - std::vector<std::pair<TString, TString>> ret(pathComponents.size()); - for (size_t i = 0; i < pathComponents.size(); ++i) { - const size_t pos = pathComponents[i].find('='); - if (pos == TString::npos) { - ret[i].first = DEFAULT_COUNTER_NAME; - ret[i].second = pathComponents[i]; - } else { - ret[i].first = pathComponents[i].substr(0, pos); - ret[i].second = pathComponents[i].substr(pos + 1); - } - } - return ret; -} - -// Gets counters by path. -// Path can be: -// "counters" - simple counter. -// "path/counter" - simple path. -// "user=my_user/queue=my_queue/TransactionCount" - with nondefault names. -void AssertCounterValue(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters, const TString& path, i64 expectedValue) { - const auto pathComponents = ParseCounterPath(path); - UNIT_ASSERT_GT(pathComponents.size(), 0); - TIntrusivePtr<NMonitoring::TDynamicCounters> parent = counters; - for (size_t i = 0; i < pathComponents.size() - 1; ++i) { - parent = parent->FindSubgroup(pathComponents[i].first, pathComponents[i].second); - UNIT_ASSERT_C(parent, "Subgroup \"" << pathComponents[i].first << "=" << pathComponents[i].second << "\" was not found. Level: " << i); - } - auto counter = parent->GetNamedCounter(pathComponents.back().first, pathComponents.back().second); - UNIT_ASSERT_VALUES_EQUAL_C(counter->Val(), expectedValue, - "Name: \"" << path - << "\", Expected value: " << expectedValue - << ", Actual value: " << counter->Val() - << "\nCounters string:\n" << CountersString(counters)); -} - -void AssertCounterValues(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters, const std::vector<std::pair<TString, i64>>& expectedValues) { - for (const auto& [path, expectedValue] : expectedValues) { - AssertCounterValue(counters, path, expectedValue); - } -} - -Y_UNIT_TEST_SUITE(LazyCounterTest) { - Y_UNIT_TEST(LazyCounterTest) { - TIntrusivePtr<NMonitoring::TDynamicCounters> root = new NMonitoring::TDynamicCounters(); - TLazyCachedCounter counter, counter2, counter3; - counter.Init(root, ELifetime::Persistent, EValueType::Absolute, "A", ELaziness::OnDemand); - counter3.Init(root, ELifetime::Persistent, EValueType::Absolute, "B", ELaziness::OnStart); - UNIT_ASSERT_STRINGS_EQUAL(CountersString(root), "sensor=B: 0\n"); - ++*counter; - ++*counter2; - UNIT_ASSERT_STRINGS_EQUAL(CountersString(root), "sensor=A: 1\nsensor=B: 0\n"); - } - - void AggregationTest(ELaziness lazy) { - TIntrusivePtr<NMonitoring::TDynamicCounters> counters = new NMonitoring::TDynamicCounters(); - TLazyCachedCounter parent; - parent.Init(counters, ELifetime::Persistent, EValueType::Absolute, "parent", lazy); - - TLazyCachedCounter child1; - child1.Init(counters, ELifetime::Persistent, EValueType::Absolute, "child1", lazy); - child1.SetAggregatedParent(&parent); - - TLazyCachedCounter child2; - child2.Init(counters, ELifetime::Persistent, EValueType::Absolute, "child2", lazy); - child2.SetAggregatedParent(&child1); - - TLazyCachedCounter child3; - child3.Init(counters, ELifetime::Persistent, EValueType::Absolute, "child3", lazy); - child3.SetAggregatedParent(&child1); - - ++*child3; - AssertCounterValues(counters, - { - { "parent", 1 }, - { "child1", 1 }, - { "child3", 1 }, - }); - if (lazy == ELaziness::OnDemand) { - UNIT_ASSERT(CountersString(counters).find("child2") == TString::npos); - } else { - UNIT_ASSERT_STRING_CONTAINS(CountersString(counters), "child2"); - AssertCounterValue(counters, "child2", 0); - } - - child3->Inc(); - AssertCounterValues(counters, - { - { "parent", 2 }, - { "child1", 2 }, - { "child3", 2 }, - }); - - *child2 = 10; - AssertCounterValues(counters, - { - { "parent", 12 }, - { "child1", 12 }, - { "child2", 10 }, - { "child3", 2 }, - }); - - --*child2; - AssertCounterValues(counters, - { - { "parent", 11 }, - { "child1", 11 }, - { "child2", 9 }, - { "child3", 2 }, - }); - - *child2 += -3; - AssertCounterValues(counters, - { - { "parent", 8 }, - { "child1", 8 }, - { "child2", 6 }, - { "child3", 2 }, - }); - - ++*child1; - AssertCounterValues(counters, - { - { "parent", 9 }, - { "child1", 9 }, - { "child2", 6 }, - { "child3", 2 }, - }); - - child3->Add(5); - AssertCounterValues(counters, - { - { "parent", 14 }, - { "child1", 14 }, - { "child2", 6 }, - { "child3", 7 }, - }); - } - - Y_UNIT_TEST(AggregationLazyTest) { - AggregationTest(ELaziness::OnDemand); - } - - Y_UNIT_TEST(AggregationNonLazyTest) { - AggregationTest(ELaziness::OnStart); - } - - Y_UNIT_TEST(HistogramAggregationTest) { - TIntrusivePtr<NMonitoring::TDynamicCounters> counters = new NMonitoring::TDynamicCounters(); - const NMonitoring::TBucketBounds buckets = { 1, 3, 5 }; - TLazyCachedHistogram parent; - parent.Init(counters, ELifetime::Persistent, buckets, "parent", ELaziness::OnStart); - - TLazyCachedHistogram child; - child.Init(counters, ELifetime::Persistent, buckets, "child", ELaziness::OnStart); - child.SetAggregatedParent(&parent); - - child->Collect(2); - parent->Collect(10); - - const auto parentSnapshot = counters->GetHistogram("parent", NMonitoring::ExplicitHistogram(buckets))->Snapshot(); - const auto childSnapshot = counters->GetHistogram("child", NMonitoring::ExplicitHistogram(buckets))->Snapshot(); - - UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Count(), 4); - UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Count(), 4); - - UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(0), 0); - UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(1), 1); - UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(2), 0); - UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(3), 1); - - UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(0), 0); - UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(1), 1); - UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(2), 0); - UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(3), 0); - } -} - -Y_UNIT_TEST_SUITE(UserCountersTest) { + +#include <util/string/subst.h> + +namespace NKikimr::NSQS { + +TString CountersString(const TIntrusivePtr<NMonitoring::TDynamicCounters>& root) { + TStringStream ss; + root->OutputPlainText(ss); + return ss.Str(); +} + +std::vector<std::pair<TString, TString>> ParseCounterPath(const TString& path) { + const auto pathComponents = SplitPath(path); + std::vector<std::pair<TString, TString>> ret(pathComponents.size()); + for (size_t i = 0; i < pathComponents.size(); ++i) { + const size_t pos = pathComponents[i].find('='); + if (pos == TString::npos) { + ret[i].first = DEFAULT_COUNTER_NAME; + ret[i].second = pathComponents[i]; + } else { + ret[i].first = pathComponents[i].substr(0, pos); + ret[i].second = pathComponents[i].substr(pos + 1); + } + } + return ret; +} + +// Gets counters by path. +// Path can be: +// "counters" - simple counter. +// "path/counter" - simple path. +// "user=my_user/queue=my_queue/TransactionCount" - with nondefault names. +void AssertCounterValue(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters, const TString& path, i64 expectedValue) { + const auto pathComponents = ParseCounterPath(path); + UNIT_ASSERT_GT(pathComponents.size(), 0); + TIntrusivePtr<NMonitoring::TDynamicCounters> parent = counters; + for (size_t i = 0; i < pathComponents.size() - 1; ++i) { + parent = parent->FindSubgroup(pathComponents[i].first, pathComponents[i].second); + UNIT_ASSERT_C(parent, "Subgroup \"" << pathComponents[i].first << "=" << pathComponents[i].second << "\" was not found. Level: " << i); + } + auto counter = parent->GetNamedCounter(pathComponents.back().first, pathComponents.back().second); + UNIT_ASSERT_VALUES_EQUAL_C(counter->Val(), expectedValue, + "Name: \"" << path + << "\", Expected value: " << expectedValue + << ", Actual value: " << counter->Val() + << "\nCounters string:\n" << CountersString(counters)); +} + +void AssertCounterValues(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters, const std::vector<std::pair<TString, i64>>& expectedValues) { + for (const auto& [path, expectedValue] : expectedValues) { + AssertCounterValue(counters, path, expectedValue); + } +} + +Y_UNIT_TEST_SUITE(LazyCounterTest) { + Y_UNIT_TEST(LazyCounterTest) { + TIntrusivePtr<NMonitoring::TDynamicCounters> root = new NMonitoring::TDynamicCounters(); + TLazyCachedCounter counter, counter2, counter3; + counter.Init(root, ELifetime::Persistent, EValueType::Absolute, "A", ELaziness::OnDemand); + counter3.Init(root, ELifetime::Persistent, EValueType::Absolute, "B", ELaziness::OnStart); + UNIT_ASSERT_STRINGS_EQUAL(CountersString(root), "sensor=B: 0\n"); + ++*counter; + ++*counter2; + UNIT_ASSERT_STRINGS_EQUAL(CountersString(root), "sensor=A: 1\nsensor=B: 0\n"); + } + + void AggregationTest(ELaziness lazy) { + TIntrusivePtr<NMonitoring::TDynamicCounters> counters = new NMonitoring::TDynamicCounters(); + TLazyCachedCounter parent; + parent.Init(counters, ELifetime::Persistent, EValueType::Absolute, "parent", lazy); + + TLazyCachedCounter child1; + child1.Init(counters, ELifetime::Persistent, EValueType::Absolute, "child1", lazy); + child1.SetAggregatedParent(&parent); + + TLazyCachedCounter child2; + child2.Init(counters, ELifetime::Persistent, EValueType::Absolute, "child2", lazy); + child2.SetAggregatedParent(&child1); + + TLazyCachedCounter child3; + child3.Init(counters, ELifetime::Persistent, EValueType::Absolute, "child3", lazy); + child3.SetAggregatedParent(&child1); + + ++*child3; + AssertCounterValues(counters, + { + { "parent", 1 }, + { "child1", 1 }, + { "child3", 1 }, + }); + if (lazy == ELaziness::OnDemand) { + UNIT_ASSERT(CountersString(counters).find("child2") == TString::npos); + } else { + UNIT_ASSERT_STRING_CONTAINS(CountersString(counters), "child2"); + AssertCounterValue(counters, "child2", 0); + } + + child3->Inc(); + AssertCounterValues(counters, + { + { "parent", 2 }, + { "child1", 2 }, + { "child3", 2 }, + }); + + *child2 = 10; + AssertCounterValues(counters, + { + { "parent", 12 }, + { "child1", 12 }, + { "child2", 10 }, + { "child3", 2 }, + }); + + --*child2; + AssertCounterValues(counters, + { + { "parent", 11 }, + { "child1", 11 }, + { "child2", 9 }, + { "child3", 2 }, + }); + + *child2 += -3; + AssertCounterValues(counters, + { + { "parent", 8 }, + { "child1", 8 }, + { "child2", 6 }, + { "child3", 2 }, + }); + + ++*child1; + AssertCounterValues(counters, + { + { "parent", 9 }, + { "child1", 9 }, + { "child2", 6 }, + { "child3", 2 }, + }); + + child3->Add(5); + AssertCounterValues(counters, + { + { "parent", 14 }, + { "child1", 14 }, + { "child2", 6 }, + { "child3", 7 }, + }); + } + + Y_UNIT_TEST(AggregationLazyTest) { + AggregationTest(ELaziness::OnDemand); + } + + Y_UNIT_TEST(AggregationNonLazyTest) { + AggregationTest(ELaziness::OnStart); + } + + Y_UNIT_TEST(HistogramAggregationTest) { + TIntrusivePtr<NMonitoring::TDynamicCounters> counters = new NMonitoring::TDynamicCounters(); + const NMonitoring::TBucketBounds buckets = { 1, 3, 5 }; + TLazyCachedHistogram parent; + parent.Init(counters, ELifetime::Persistent, buckets, "parent", ELaziness::OnStart); + + TLazyCachedHistogram child; + child.Init(counters, ELifetime::Persistent, buckets, "child", ELaziness::OnStart); + child.SetAggregatedParent(&parent); + + child->Collect(2); + parent->Collect(10); + + const auto parentSnapshot = counters->GetHistogram("parent", NMonitoring::ExplicitHistogram(buckets))->Snapshot(); + const auto childSnapshot = counters->GetHistogram("child", NMonitoring::ExplicitHistogram(buckets))->Snapshot(); + + UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Count(), 4); + UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Count(), 4); + + UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(0), 0); + UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(1), 1); + UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(2), 0); + UNIT_ASSERT_VALUES_EQUAL(parentSnapshot->Value(3), 1); + + UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(0), 0); + UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(1), 1); + UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(2), 0); + UNIT_ASSERT_VALUES_EQUAL(childSnapshot->Value(3), 0); + } +} + +Y_UNIT_TEST_SUITE(UserCountersTest) { #define ASSERT_STR_COUPLE_CONTAINS(string1, string2, what) \ UNIT_ASSERT_STRING_CONTAINS(string1, what); \ UNIT_ASSERT_STRING_CONTAINS(string2, what); @@ -215,37 +215,37 @@ Y_UNIT_TEST_SUITE(UserCountersTest) { ASSERT_STR_COUPLE_DONT_CONTAIN(CountersString(core), CountersString(ymqCounters), TString("user=") + user); \ ASSERT_STR_COUPLE_DONT_CONTAIN(CountersString(core), CountersString(ymqCounters), TString("cloud=") + user); - Y_UNIT_TEST(DisableCountersTest) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); + Y_UNIT_TEST(DisableCountersTest) { + NKikimrConfig::TSqsConfig cfg; + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters = new NMonitoring::TDynamicCounters(); TIntrusivePtr<TUserCounters> user1 = new TUserCounters(cfg, core, ymqCounters, nullptr, "user1", nullptr); TIntrusivePtr<TUserCounters> user2 = new TUserCounters(cfg, core, ymqCounters, nullptr, "user2", nullptr); ASSERT_USER_PRESENT("user1"); ASSERT_USER_PRESENT("user2"); - - user1->DisableCounters(false); + + user1->DisableCounters(false); ASSERT_USER_PRESENT("user1"); - - user1->DisableCounters(true); + + user1->DisableCounters(true); TString sqsCntrText = CountersString(core); TString ymqCntrText = CountersString(ymqCounters); ASSERT_USER_ABSENT("user1"); ASSERT_USER_PRESENT("user2"); - - // again - user1->DisableCounters(true); + + // again + user1->DisableCounters(true); sqsCntrText = CountersString(core); ymqCntrText = CountersString(ymqCounters); ASSERT_USER_ABSENT("user1"); ASSERT_USER_PRESENT("user2"); - - *user1->RequestTimeouts = 1; - *user2->RequestTimeouts = 2; - - // return back - user1->DisableCounters(false); + + *user1->RequestTimeouts = 1; + *user2->RequestTimeouts = 2; + + // return back + user1->DisableCounters(false); sqsCntrText = CountersString(core); ymqCntrText = CountersString(ymqCounters); ASSERT_USER_PRESENT("user1"); @@ -254,83 +254,83 @@ Y_UNIT_TEST_SUITE(UserCountersTest) { ASSERT_FIRST_OF_COUPLE_CONTAINS(sqsCntrText, ymqCntrText, "RequestTimeouts: 2"); ASSERT_FIRST_OF_COUPLE_CONTAINS(sqsCntrText, ymqCntrText, "RequestTimeouts: 2"); ASSERT_FIRST_OF_COUPLE_CONTAINS(sqsCntrText, ymqCntrText, "RequestTimeouts: 0"); - - // again - *user1->RequestTimeouts = 3; - - user1->DisableCounters(false); + + // again + *user1->RequestTimeouts = 3; + + user1->DisableCounters(false); sqsCntrText = CountersString(core); ymqCntrText = CountersString(ymqCounters); ASSERT_USER_PRESENT("user1"); ASSERT_USER_PRESENT("user2"); ASSERT_FIRST_OF_COUPLE_CONTAINS(sqsCntrText, ymqCntrText, "RequestTimeouts: 2"); ASSERT_FIRST_OF_COUPLE_CONTAINS(sqsCntrText, ymqCntrText, "RequestTimeouts: 3"); - } - - Y_UNIT_TEST(RemoveUserCountersTest) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); + } + + Y_UNIT_TEST(RemoveUserCountersTest) { + NKikimrConfig::TSqsConfig cfg; + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters = new NMonitoring::TDynamicCounters(); - + TIntrusivePtr<TUserCounters> user = new TUserCounters(cfg, core, ymqCounters, nullptr, "my_user", nullptr); ASSERT_USER_PRESENT("my_user"); - user->RemoveCounters(); + user->RemoveCounters(); ASSERT_USER_ABSENT("my_user"); - } - - Y_UNIT_TEST(CountersAggregationTest) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); + } + + Y_UNIT_TEST(CountersAggregationTest) { + NKikimrConfig::TSqsConfig cfg; + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters = new NMonitoring::TDynamicCounters(); TIntrusivePtr<TUserCounters> total = new TUserCounters(cfg, core, ymqCounters, nullptr, TOTAL_COUNTER_LABEL, nullptr); - total->ShowDetailedCounters(TInstant::Max()); + total->ShowDetailedCounters(TInstant::Max()); TIntrusivePtr<TUserCounters> user = new TUserCounters(cfg, core, ymqCounters, nullptr, "my_user", total); - UNIT_ASSERT_STRING_CONTAINS(CountersString(core), "user=my_user"); - UNIT_ASSERT_STRING_CONTAINS(CountersString(core), "user=total"); - - UNIT_ASSERT_VALUES_EQUAL(user->GetDetailedCounters(), total->GetDetailedCounters()); - UNIT_ASSERT_VALUES_EQUAL(user->GetTransactionCounters(), total->GetTransactionCounters()); - - user->ShowDetailedCounters(TInstant::Max()); - UNIT_ASSERT_VALUES_UNEQUAL(user->GetDetailedCounters(), total->GetDetailedCounters()); - UNIT_ASSERT_VALUES_UNEQUAL(user->GetTransactionCounters(), total->GetTransactionCounters()); - - // Try different types of member counters: detailed/usual/arrays/api statuses. - ++*user->RequestTimeouts; + UNIT_ASSERT_STRING_CONTAINS(CountersString(core), "user=my_user"); + UNIT_ASSERT_STRING_CONTAINS(CountersString(core), "user=total"); + + UNIT_ASSERT_VALUES_EQUAL(user->GetDetailedCounters(), total->GetDetailedCounters()); + UNIT_ASSERT_VALUES_EQUAL(user->GetTransactionCounters(), total->GetTransactionCounters()); + + user->ShowDetailedCounters(TInstant::Max()); + UNIT_ASSERT_VALUES_UNEQUAL(user->GetDetailedCounters(), total->GetDetailedCounters()); + UNIT_ASSERT_VALUES_UNEQUAL(user->GetTransactionCounters(), total->GetTransactionCounters()); + + // Try different types of member counters: detailed/usual/arrays/api statuses. + ++*user->RequestTimeouts; ++*user->SqsActionCounters[EAction::CreateQueue].Errors; - ++*user->GetDetailedCounters()->CreateAccountOnTheFly_Success; - user->GetDetailedCounters()->APIStatuses.AddOk(3); - user->GetDetailedCounters()->APIStatuses.AddError("AccessDeniedException", 2); - user->GetDetailedCounters()->APIStatuses.AddError("AccessDeniedException1", 4); // unknown - ++*user->GetTransactionCounters()->TransactionsCount; - ++*user->GetTransactionCounters()->QueryTypeCounters[EQueryId::WRITE_MESSAGE_ID].TransactionsFailed; - + ++*user->GetDetailedCounters()->CreateAccountOnTheFly_Success; + user->GetDetailedCounters()->APIStatuses.AddOk(3); + user->GetDetailedCounters()->APIStatuses.AddError("AccessDeniedException", 2); + user->GetDetailedCounters()->APIStatuses.AddError("AccessDeniedException1", 4); // unknown + ++*user->GetTransactionCounters()->TransactionsCount; + ++*user->GetTransactionCounters()->QueryTypeCounters[EQueryId::WRITE_MESSAGE_ID].TransactionsFailed; + ++*user->YmqActionCounters[EAction::CreateQueue].Errors; - AssertCounterValues(core, - { - { "user=total/RequestTimeouts", 1 }, - { "user=my_user/RequestTimeouts", 1 }, - { "user=total/CreateQueue_Errors", 1 }, - { "user=my_user/CreateQueue_Errors", 1 }, - { "user=total/queue=total/SendMessage_Count", 0 }, - { "user=my_user/queue=total/SendMessage_Count", 0 }, - { "user=total/CreateAccountOnTheFly_Success", 1 }, - { "user=my_user/CreateAccountOnTheFly_Success", 1 }, - { "user=total/StatusesByType/status_code=OK", 3 }, - { "user=my_user/StatusesByType/status_code=OK", 3 }, - { "user=total/StatusesByType/status_code=AccessDeniedException", 2 }, - { "user=my_user/StatusesByType/status_code=AccessDeniedException", 2 }, - { "user=total/StatusesByType/status_code=Unknown", 4 }, - { "user=my_user/StatusesByType/status_code=Unknown", 4 }, - { "user=total/queue=total/TransactionsCount", 1 }, - { "user=my_user/queue=total/TransactionsCount", 1 }, - { "user=total/queue=total/TransactionsFailedByType/query_type=WRITE_MESSAGE_ID", 1 }, - { "user=my_user/queue=total/TransactionsFailedByType/query_type=WRITE_MESSAGE_ID", 1 }, - }); + AssertCounterValues(core, + { + { "user=total/RequestTimeouts", 1 }, + { "user=my_user/RequestTimeouts", 1 }, + { "user=total/CreateQueue_Errors", 1 }, + { "user=my_user/CreateQueue_Errors", 1 }, + { "user=total/queue=total/SendMessage_Count", 0 }, + { "user=my_user/queue=total/SendMessage_Count", 0 }, + { "user=total/CreateAccountOnTheFly_Success", 1 }, + { "user=my_user/CreateAccountOnTheFly_Success", 1 }, + { "user=total/StatusesByType/status_code=OK", 3 }, + { "user=my_user/StatusesByType/status_code=OK", 3 }, + { "user=total/StatusesByType/status_code=AccessDeniedException", 2 }, + { "user=my_user/StatusesByType/status_code=AccessDeniedException", 2 }, + { "user=total/StatusesByType/status_code=Unknown", 4 }, + { "user=my_user/StatusesByType/status_code=Unknown", 4 }, + { "user=total/queue=total/TransactionsCount", 1 }, + { "user=my_user/queue=total/TransactionsCount", 1 }, + { "user=total/queue=total/TransactionsFailedByType/query_type=WRITE_MESSAGE_ID", 1 }, + { "user=my_user/queue=total/TransactionsFailedByType/query_type=WRITE_MESSAGE_ID", 1 }, + }); // auto queue = user->CreateQueueCounters("my_queue", "folder", true); @@ -339,24 +339,24 @@ Y_UNIT_TEST_SUITE(UserCountersTest) { { "cloud=my_user/method=create_queue/name=api.http.errors_count_per_second", 1 }, } ); - } -} - -Y_UNIT_TEST_SUITE(QueueCountersTest) { - Y_UNIT_TEST(InsertCountersTest) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); + } +} + +Y_UNIT_TEST_SUITE(QueueCountersTest) { + Y_UNIT_TEST(InsertCountersTest) { + NKikimrConfig::TSqsConfig cfg; + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters = new NMonitoring::TDynamicCounters(); TIntrusivePtr<TUserCounters> user = new TUserCounters(cfg, core, ymqCounters, nullptr, "my_user", nullptr); ASSERT_USER_PRESENT("my_user"); - + auto queue = user->CreateQueueCounters("my_queue", "folder", false); ASSERT_STR_COUPLE_DONT_CONTAIN(CountersString(core), CountersString(ymqCounters), "my_queue"); - - queue->InsertCounters(); + + queue->InsertCounters(); ASSERT_STR_COUPLE_CONTAINS(CountersString(core), CountersString(ymqCounters), "my_queue"); - + ++*queue->RequestTimeouts; auto sqsCntrText = CountersString(core); auto ymqCntrText = CountersString(ymqCounters); @@ -367,98 +367,98 @@ Y_UNIT_TEST_SUITE(QueueCountersTest) { ymqCntrText = CountersString(ymqCounters); ASSERT_FIRST_OF_COUPLE_CONTAINS(ymqCntrText, sqsCntrText, "request_timeouts_count_per_second: 1"); - // Second time: - queue->InsertCounters(); + // Second time: + queue->InsertCounters(); ASSERT_STR_COUPLE_CONTAINS(CountersString(core), CountersString(ymqCounters), "my_queue"); - } - + } + void RemoveQueueCountersTest(bool leader, const TString& folderId) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); + NKikimrConfig::TSqsConfig cfg; + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters = new NMonitoring::TDynamicCounters(); TIntrusivePtr<TUserCounters> user = new TUserCounters(cfg, core, ymqCounters, nullptr, "my_user", nullptr); - TIntrusivePtr<TQueueCounters> queue = user->CreateQueueCounters("my_queue", folderId, true); + TIntrusivePtr<TQueueCounters> queue = user->CreateQueueCounters("my_queue", folderId, true); if (leader) { queue = queue->GetCountersForLeaderNode(); - } + } ASSERT_STR_COUPLE_CONTAINS(CountersString(core), CountersString(ymqCounters), "queue=my_queue"); - queue->RemoveCounters(); + queue->RemoveCounters(); ASSERT_STR_COUPLE_DONT_CONTAIN(CountersString(core), CountersString(ymqCounters), "queue=my_queue"); - } - + } + Y_UNIT_TEST(RemoveQueueCountersNonLeaderWithoutFolderTest) { - RemoveQueueCountersTest(false, ""); - } - + RemoveQueueCountersTest(false, ""); + } + Y_UNIT_TEST(RemoveQueueCountersLeaderWithoutFolderTest) { - RemoveQueueCountersTest(true, ""); - } - + RemoveQueueCountersTest(true, ""); + } + Y_UNIT_TEST(RemoveQueueCountersNonLeaderWithFolderTest) { - RemoveQueueCountersTest(false, "my_folder"); - } - + RemoveQueueCountersTest(false, "my_folder"); + } + Y_UNIT_TEST(RemoveQueueCountersLeaderWithFolderTest) { - RemoveQueueCountersTest(true, "my_folder"); - } - - void CountersAggregationTest(bool cloudMode = false) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetYandexCloudMode(cloudMode); - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); + RemoveQueueCountersTest(true, "my_folder"); + } + + void CountersAggregationTest(bool cloudMode = false) { + NKikimrConfig::TSqsConfig cfg; + cfg.SetYandexCloudMode(cloudMode); + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> core = new NMonitoring::TDynamicCounters(); TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters = new NMonitoring::TDynamicCounters(); TIntrusivePtr<TUserCounters> total = new TUserCounters(cfg, core, ymqCounters, nullptr, TOTAL_COUNTER_LABEL, nullptr); - total->ShowDetailedCounters(TInstant::Max()); + total->ShowDetailedCounters(TInstant::Max()); TIntrusivePtr<TUserCounters> user = new TUserCounters(cfg, core, ymqCounters, nullptr, "my_user", total); TIntrusivePtr<TQueueCounters> queue = user->CreateQueueCounters("my_queue", cloudMode ? "my_folder" : "", true)->GetCountersForLeaderNode(); - UNIT_ASSERT_STRING_CONTAINS(CountersString(core), "queue=my_queue"); - - queue->ShowDetailedCounters(TInstant::Max()); - - // Try different types of member counters: detailed/usual/arrays/api statuses. - ++*queue->MessagesPurged; + UNIT_ASSERT_STRING_CONTAINS(CountersString(core), "queue=my_queue"); + + queue->ShowDetailedCounters(TInstant::Max()); + + // Try different types of member counters: detailed/usual/arrays/api statuses. + ++*queue->MessagesPurged; ++*queue->purged_count_per_second; ++*queue->SqsActionCounters[EAction::SendMessageBatch].Errors; ++*queue->YmqActionCounters[EAction::SendMessageBatch].Errors; - ++*queue->GetDetailedCounters()->ReceiveMessage_KeysInvalidated; - ++*queue->GetTransactionCounters()->TransactionRetryTimeouts; - ++*queue->GetTransactionCounters()->QueryTypeCounters[EQueryId::DELETE_MESSAGE_ID].TransactionsCount; - - // path fixer - auto x = [cloudMode](const TString& path) -> TString { - TString ret = path; - if (cloudMode) { - size_t replacements = SubstGlobal(ret, "/queue=total", "/folder=total/queue=total"); - replacements += SubstGlobal(ret, "/queue=my_queue", "/folder=my_folder/queue=my_queue"); - UNIT_ASSERT_LE(replacements, 1); - } - return ret; - }; - - AssertCounterValues(core, - { - { x("user=total/queue=total/MessagesPurged"), 1 }, - { x("user=my_user/queue=total/MessagesPurged"), 1 }, - { x("user=my_user/queue=my_queue/MessagesPurged"), 1 }, - - { x("user=total/queue=total/SendMessage_Errors"), 1 }, - { x("user=my_user/queue=total/SendMessage_Errors"), 1 }, - { x("user=my_user/queue=my_queue/SendMessage_Errors"), 1 }, - - { x("user=total/queue=total/ReceiveMessage_KeysInvalidated"), 1 }, - { x("user=my_user/queue=total/ReceiveMessage_KeysInvalidated"), 1 }, - { x("user=my_user/queue=my_queue/ReceiveMessage_KeysInvalidated"), 1 }, - - { x("user=total/queue=total/TransactionRetryTimeouts"), 1 }, - { x("user=my_user/queue=total/TransactionRetryTimeouts"), 1 }, - { x("user=my_user/queue=my_queue/TransactionRetryTimeouts"), 1 }, - - { x("user=total/queue=total/TransactionsByType/query_type=DELETE_MESSAGE_ID"), 1 }, - { x("user=my_user/queue=total/TransactionsByType/query_type=DELETE_MESSAGE_ID"), 1 }, - { x("user=my_user/queue=my_queue/TransactionsByType/query_type=DELETE_MESSAGE_ID"), 1 }, - }); + ++*queue->GetDetailedCounters()->ReceiveMessage_KeysInvalidated; + ++*queue->GetTransactionCounters()->TransactionRetryTimeouts; + ++*queue->GetTransactionCounters()->QueryTypeCounters[EQueryId::DELETE_MESSAGE_ID].TransactionsCount; + + // path fixer + auto x = [cloudMode](const TString& path) -> TString { + TString ret = path; + if (cloudMode) { + size_t replacements = SubstGlobal(ret, "/queue=total", "/folder=total/queue=total"); + replacements += SubstGlobal(ret, "/queue=my_queue", "/folder=my_folder/queue=my_queue"); + UNIT_ASSERT_LE(replacements, 1); + } + return ret; + }; + + AssertCounterValues(core, + { + { x("user=total/queue=total/MessagesPurged"), 1 }, + { x("user=my_user/queue=total/MessagesPurged"), 1 }, + { x("user=my_user/queue=my_queue/MessagesPurged"), 1 }, + + { x("user=total/queue=total/SendMessage_Errors"), 1 }, + { x("user=my_user/queue=total/SendMessage_Errors"), 1 }, + { x("user=my_user/queue=my_queue/SendMessage_Errors"), 1 }, + + { x("user=total/queue=total/ReceiveMessage_KeysInvalidated"), 1 }, + { x("user=my_user/queue=total/ReceiveMessage_KeysInvalidated"), 1 }, + { x("user=my_user/queue=my_queue/ReceiveMessage_KeysInvalidated"), 1 }, + + { x("user=total/queue=total/TransactionRetryTimeouts"), 1 }, + { x("user=my_user/queue=total/TransactionRetryTimeouts"), 1 }, + { x("user=my_user/queue=my_queue/TransactionRetryTimeouts"), 1 }, + + { x("user=total/queue=total/TransactionsByType/query_type=DELETE_MESSAGE_ID"), 1 }, + { x("user=my_user/queue=total/TransactionsByType/query_type=DELETE_MESSAGE_ID"), 1 }, + { x("user=my_user/queue=my_queue/TransactionsByType/query_type=DELETE_MESSAGE_ID"), 1 }, + }); AssertCounterValues(ymqCounters, { @@ -466,40 +466,40 @@ Y_UNIT_TEST_SUITE(QueueCountersTest) { { x("cloud=my_user/queue=my_queue/method=send_message_batch/name=api.http.errors_count_per_second"), 1 }, }); - } - - Y_UNIT_TEST(CountersAggregationTest) { - CountersAggregationTest(); - } - - Y_UNIT_TEST(CountersAggregationCloudTest) { - CountersAggregationTest(true); - } -} - -Y_UNIT_TEST_SUITE(HttpCountersTest) { - Y_UNIT_TEST(CountersAggregationTest) { - NKikimrConfig::TSqsConfig cfg; - cfg.SetCreateLazyCounters(false); - TIntrusivePtr<NMonitoring::TDynamicCounters> root = new NMonitoring::TDynamicCounters(); - TIntrusivePtr<THttpCounters> http = new THttpCounters(cfg, root); - TIntrusivePtr<THttpUserCounters> user = http->GetUserCounters("my_user"); - - *http->ConnectionsCount = 33; - ++*http->GetUserCounters("my_user")->RequestExceptions; - ++*http->GetUserCounters("your_user")->ActionCounters[EAction::SetQueueAttributes].Requests; - - AssertCounterValues(root, - { - { "ConnectionsCount", 33 }, - { "user=total/RequestExceptions", 1 }, - { "user=my_user/RequestExceptions", 1 }, - { "user=total/SetQueueAttributesRequest", 1 }, - { "user=your_user/SetQueueAttributesRequest", 1 }, - }); - } -} - + } + + Y_UNIT_TEST(CountersAggregationTest) { + CountersAggregationTest(); + } + + Y_UNIT_TEST(CountersAggregationCloudTest) { + CountersAggregationTest(true); + } +} + +Y_UNIT_TEST_SUITE(HttpCountersTest) { + Y_UNIT_TEST(CountersAggregationTest) { + NKikimrConfig::TSqsConfig cfg; + cfg.SetCreateLazyCounters(false); + TIntrusivePtr<NMonitoring::TDynamicCounters> root = new NMonitoring::TDynamicCounters(); + TIntrusivePtr<THttpCounters> http = new THttpCounters(cfg, root); + TIntrusivePtr<THttpUserCounters> user = http->GetUserCounters("my_user"); + + *http->ConnectionsCount = 33; + ++*http->GetUserCounters("my_user")->RequestExceptions; + ++*http->GetUserCounters("your_user")->ActionCounters[EAction::SetQueueAttributes].Requests; + + AssertCounterValues(root, + { + { "ConnectionsCount", 33 }, + { "user=total/RequestExceptions", 1 }, + { "user=my_user/RequestExceptions", 1 }, + { "user=total/SetQueueAttributesRequest", 1 }, + { "user=your_user/SetQueueAttributesRequest", 1 }, + }); + } +} + Y_UNIT_TEST_SUITE(MeteringCountersTest) { Y_UNIT_TEST(CountersAggregationTest) { NKikimrConfig::TSqsConfig config; @@ -527,4 +527,4 @@ Y_UNIT_TEST_SUITE(MeteringCountersTest) { } } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp b/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp index 3254de751c8..1819a583dab 100644 --- a/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp +++ b/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp @@ -9,27 +9,27 @@ Y_UNIT_TEST_SUITE(RedrivePolicy) { NKikimrConfig::TSqsConfig config; { // basic sanity - TRedrivePolicy basicPolicy = TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config); + TRedrivePolicy basicPolicy = TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config); UNIT_ASSERT(basicPolicy.IsValid()); - UNIT_ASSERT(basicPolicy.TargetArn && *basicPolicy.TargetArn == "yrn:ya:sqs:ru-central1:radix:mymegadlq"); + UNIT_ASSERT(basicPolicy.TargetArn && *basicPolicy.TargetArn == "yrn:ya:sqs:ru-central1:radix:mymegadlq"); UNIT_ASSERT(basicPolicy.TargetQueueName && *basicPolicy.TargetQueueName == "mymegadlq"); UNIT_ASSERT(basicPolicy.MaxReceiveCount && *basicPolicy.MaxReceiveCount == 3); } { // count as str - TRedrivePolicy basicPolicy = TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:ultradlq","maxReceiveCount":"42"})__", config); + TRedrivePolicy basicPolicy = TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:ultradlq","maxReceiveCount":"42"})__", config); UNIT_ASSERT(basicPolicy.IsValid()); - UNIT_ASSERT(basicPolicy.TargetArn && *basicPolicy.TargetArn == "yrn:ya:sqs:ru-central1:radix:ultradlq"); + UNIT_ASSERT(basicPolicy.TargetArn && *basicPolicy.TargetArn == "yrn:ya:sqs:ru-central1:radix:ultradlq"); UNIT_ASSERT(basicPolicy.TargetQueueName && *basicPolicy.TargetQueueName == "ultradlq"); UNIT_ASSERT(basicPolicy.MaxReceiveCount && *basicPolicy.MaxReceiveCount == 42); } - UNIT_ASSERT(TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); // maxReceiveCount should be from 1 to 1000 - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":0})__", config).IsValid()); - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":1001})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":0})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":1001})__", config).IsValid()); // maxReceiveCount should be integer - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":"omglol"})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":"omglol"})__", config).IsValid()); // target arn shouldn't be empty UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"","maxReceiveCount":3})__", config).IsValid()); // both fields are expected @@ -49,26 +49,26 @@ Y_UNIT_TEST_SUITE(RedrivePolicy) { Y_UNIT_TEST(RedrivePolicyToJsonTest) { NKikimrConfig::TSqsConfig config; - const TString json = R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__"; + const TString json = R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__"; UNIT_ASSERT_EQUAL(TRedrivePolicy::FromJson(json, config).ToJson(), json); } Y_UNIT_TEST(RedrivePolicyArnValidationTest) { NKikimrConfig::TSqsConfig config; - UNIT_ASSERT(TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); // empty queue name - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1:radix:","maxReceiveCount":3})__", config).IsValid()); // empty account - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1::mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central1::mymegadlq","maxReceiveCount":3})__", config).IsValid()); // bad region - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central42:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:ya:sqs:ru-central42:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); // bad prefix - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrrrr:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrrrr:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); // broken prefix - UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrr:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrr:ya:sqs:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); config.SetYandexCloudMode(true); // check cloud prefix - UNIT_ASSERT(TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:yc:ymq:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); + UNIT_ASSERT(TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yrn:yc:ymq:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); UNIT_ASSERT(!TRedrivePolicy::FromJson(R"__({"deadLetterTargetArn":"yarrr:yc:ymq:ru-central1:radix:mymegadlq","maxReceiveCount":3})__", config).IsValid()); } } diff --git a/ydb/core/ymq/base/ut/helpers_ut.cpp b/ydb/core/ymq/base/ut/helpers_ut.cpp index 9f1db59c722..b62c8d16017 100644 --- a/ydb/core/ymq/base/ut/helpers_ut.cpp +++ b/ydb/core/ymq/base/ut/helpers_ut.cpp @@ -1,106 +1,106 @@ #include <ydb/core/ymq/base/helpers.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(StringValidationTest) { - Y_UNIT_TEST(IsAlphaNumAndPunctuationTest) { - UNIT_ASSERT(IsAlphaNumAndPunctuation("123")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("[abd]")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("ABC-ZYX")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("{_<=>_}")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("**")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("\"\\/~")); - UNIT_ASSERT(IsAlphaNumAndPunctuation("(!)")); - UNIT_ASSERT(IsAlphaNumAndPunctuation(":-)")); - - - UNIT_ASSERT(!IsAlphaNumAndPunctuation(" ")); - UNIT_ASSERT(!IsAlphaNumAndPunctuation(TStringBuf("\0", 1))); - UNIT_ASSERT(!IsAlphaNumAndPunctuation("\t\n")); - UNIT_ASSERT(!IsAlphaNumAndPunctuation("айди")); - UNIT_ASSERT(!IsAlphaNumAndPunctuation("§")); - } -} - -Y_UNIT_TEST_SUITE(MessageAttributeValidationTest) { - Y_UNIT_TEST(MessageAttributeValidationTest) { - auto AssertValidAttrImpl = [](TStringBuf name, bool valid, bool allowYandexPrefix = false, bool hasYandexPrefix = false) { - bool yandexPrefixFound = false; - UNIT_ASSERT_VALUES_EQUAL_C(ValidateMessageAttributeName(name, yandexPrefixFound, allowYandexPrefix), valid, - "Attribute name: \"" << name << "\", AllowYandexPrefix: " << allowYandexPrefix << ", YandexPrefixFound: " << yandexPrefixFound); - UNIT_ASSERT_VALUES_EQUAL_C(hasYandexPrefix, yandexPrefixFound, - "Attribute name: \"" << name << "\", AllowYandexPrefix: " << allowYandexPrefix << ", YandexPrefixFound: " << yandexPrefixFound); - }; - - auto AssertValidAttr = [&](TStringBuf name, bool allowYandexPrefix = false, bool hasYandexPrefix = false) { - AssertValidAttrImpl(name, true, allowYandexPrefix, hasYandexPrefix); - }; - - auto AssertInvalidAttr = [&](TStringBuf name, bool allowYandexPrefix = false, bool hasYandexPrefix = false) { - AssertValidAttrImpl(name, false, allowYandexPrefix, hasYandexPrefix); - }; - - AssertInvalidAttr(""); - AssertInvalidAttr("TooManyCharacters__skdhfkjhsfjdhfkshfkjsdhkfhsdkfhkshfkhskfhdskjfhsdkfhksdhfkjdshfksdhfgjhsdgf1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111122222222222222222222222222222222222222222222222222233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333"); - AssertInvalidAttr("aWs.trololo"); - AssertInvalidAttr("aws."); - AssertInvalidAttr("amazon."); - AssertInvalidAttr("amazon.33"); - AssertInvalidAttr("invalid_characters!"); - AssertInvalidAttr("space "); - AssertInvalidAttr(".StartsWithPeriod"); - AssertInvalidAttr("EndsWithPeriod."); - AssertInvalidAttr("Two..Periods"); - - // Yandex reserved prefixes: - AssertInvalidAttr("ya.reserved", false, true); - AssertInvalidAttr("YC.reserved", false, true); - AssertInvalidAttr("YANDEX.reserved", false, true); - - AssertValidAttr("not.prefix.ya", false, false); - AssertValidAttr("ya.reserved", true, true); - AssertValidAttr("YC.reserved", true, true); - AssertValidAttr("Yandex.reserved", true, true); - - // Valid - AssertValidAttr("OK"); - AssertValidAttr("Name"); - AssertValidAttr("name"); - AssertValidAttr("alpha-num"); - AssertValidAttr("with.period"); - AssertValidAttr("with_underscore"); - } -} - -Y_UNIT_TEST_SUITE(NameValidationTest) { - Y_UNIT_TEST(NameValidationTest) { - UNIT_ASSERT(!ValidateQueueNameOrUserName("")); - UNIT_ASSERT(!ValidateQueueNameOrUserName(".fifo")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("TooManyCharacters__skdhfkjhsfjdhfkshfkjsdhkfhsdkfhkshfkhskfhdskjfhsdkfhksdhfkjdshfksdhfgjhsdgf1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111122222222222222222222222222222222222222222222222222233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("point.")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("space ")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("русские буквы")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("*")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("!")); - UNIT_ASSERT(!ValidateQueueNameOrUserName("/")); - - - UNIT_ASSERT(ValidateQueueNameOrUserName("OK")); - UNIT_ASSERT(ValidateQueueNameOrUserName("Name.fifo")); - UNIT_ASSERT(ValidateQueueNameOrUserName("name.FIFO")); - UNIT_ASSERT(ValidateQueueNameOrUserName("name")); - UNIT_ASSERT(ValidateQueueNameOrUserName("alpha-num")); - UNIT_ASSERT(ValidateQueueNameOrUserName("0123")); - UNIT_ASSERT(ValidateQueueNameOrUserName("with_underscore")); - } -} - -Y_UNIT_TEST_SUITE(MessageBodyValidationTest) { - Y_UNIT_TEST(MessageBodyValidationTest) { - TString desc; + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(StringValidationTest) { + Y_UNIT_TEST(IsAlphaNumAndPunctuationTest) { + UNIT_ASSERT(IsAlphaNumAndPunctuation("123")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("[abd]")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("ABC-ZYX")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("{_<=>_}")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("**")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("\"\\/~")); + UNIT_ASSERT(IsAlphaNumAndPunctuation("(!)")); + UNIT_ASSERT(IsAlphaNumAndPunctuation(":-)")); + + + UNIT_ASSERT(!IsAlphaNumAndPunctuation(" ")); + UNIT_ASSERT(!IsAlphaNumAndPunctuation(TStringBuf("\0", 1))); + UNIT_ASSERT(!IsAlphaNumAndPunctuation("\t\n")); + UNIT_ASSERT(!IsAlphaNumAndPunctuation("айди")); + UNIT_ASSERT(!IsAlphaNumAndPunctuation("§")); + } +} + +Y_UNIT_TEST_SUITE(MessageAttributeValidationTest) { + Y_UNIT_TEST(MessageAttributeValidationTest) { + auto AssertValidAttrImpl = [](TStringBuf name, bool valid, bool allowYandexPrefix = false, bool hasYandexPrefix = false) { + bool yandexPrefixFound = false; + UNIT_ASSERT_VALUES_EQUAL_C(ValidateMessageAttributeName(name, yandexPrefixFound, allowYandexPrefix), valid, + "Attribute name: \"" << name << "\", AllowYandexPrefix: " << allowYandexPrefix << ", YandexPrefixFound: " << yandexPrefixFound); + UNIT_ASSERT_VALUES_EQUAL_C(hasYandexPrefix, yandexPrefixFound, + "Attribute name: \"" << name << "\", AllowYandexPrefix: " << allowYandexPrefix << ", YandexPrefixFound: " << yandexPrefixFound); + }; + + auto AssertValidAttr = [&](TStringBuf name, bool allowYandexPrefix = false, bool hasYandexPrefix = false) { + AssertValidAttrImpl(name, true, allowYandexPrefix, hasYandexPrefix); + }; + + auto AssertInvalidAttr = [&](TStringBuf name, bool allowYandexPrefix = false, bool hasYandexPrefix = false) { + AssertValidAttrImpl(name, false, allowYandexPrefix, hasYandexPrefix); + }; + + AssertInvalidAttr(""); + AssertInvalidAttr("TooManyCharacters__skdhfkjhsfjdhfkshfkjsdhkfhsdkfhkshfkhskfhdskjfhsdkfhksdhfkjdshfksdhfgjhsdgf1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111122222222222222222222222222222222222222222222222222233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333"); + AssertInvalidAttr("aWs.trololo"); + AssertInvalidAttr("aws."); + AssertInvalidAttr("amazon."); + AssertInvalidAttr("amazon.33"); + AssertInvalidAttr("invalid_characters!"); + AssertInvalidAttr("space "); + AssertInvalidAttr(".StartsWithPeriod"); + AssertInvalidAttr("EndsWithPeriod."); + AssertInvalidAttr("Two..Periods"); + + // Yandex reserved prefixes: + AssertInvalidAttr("ya.reserved", false, true); + AssertInvalidAttr("YC.reserved", false, true); + AssertInvalidAttr("YANDEX.reserved", false, true); + + AssertValidAttr("not.prefix.ya", false, false); + AssertValidAttr("ya.reserved", true, true); + AssertValidAttr("YC.reserved", true, true); + AssertValidAttr("Yandex.reserved", true, true); + + // Valid + AssertValidAttr("OK"); + AssertValidAttr("Name"); + AssertValidAttr("name"); + AssertValidAttr("alpha-num"); + AssertValidAttr("with.period"); + AssertValidAttr("with_underscore"); + } +} + +Y_UNIT_TEST_SUITE(NameValidationTest) { + Y_UNIT_TEST(NameValidationTest) { + UNIT_ASSERT(!ValidateQueueNameOrUserName("")); + UNIT_ASSERT(!ValidateQueueNameOrUserName(".fifo")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("TooManyCharacters__skdhfkjhsfjdhfkshfkjsdhkfhsdkfhkshfkhskfhdskjfhsdkfhksdhfkjdshfksdhfgjhsdgf1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111122222222222222222222222222222222222222222222222222233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("point.")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("space ")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("русские буквы")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("*")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("!")); + UNIT_ASSERT(!ValidateQueueNameOrUserName("/")); + + + UNIT_ASSERT(ValidateQueueNameOrUserName("OK")); + UNIT_ASSERT(ValidateQueueNameOrUserName("Name.fifo")); + UNIT_ASSERT(ValidateQueueNameOrUserName("name.FIFO")); + UNIT_ASSERT(ValidateQueueNameOrUserName("name")); + UNIT_ASSERT(ValidateQueueNameOrUserName("alpha-num")); + UNIT_ASSERT(ValidateQueueNameOrUserName("0123")); + UNIT_ASSERT(ValidateQueueNameOrUserName("with_underscore")); + } +} + +Y_UNIT_TEST_SUITE(MessageBodyValidationTest) { + Y_UNIT_TEST(MessageBodyValidationTest) { + TString desc; UNIT_ASSERT(ValidateMessageBody("english text.", desc)); UNIT_ASSERT(ValidateMessageBody("русский текст.", desc)); UNIT_ASSERT(ValidateMessageBody("\n", desc)); @@ -112,12 +112,12 @@ Y_UNIT_TEST_SUITE(MessageBodyValidationTest) { UNIT_ASSERT(ValidateMessageBody("\uFFFD", desc)); UNIT_ASSERT(ValidateMessageBody("\uD7FF", desc)); UNIT_ASSERT(ValidateMessageBody("\u00FF", desc)); - + UNIT_ASSERT(!ValidateMessageBody(TStringBuf("\0", 1), desc)); UNIT_ASSERT(!ValidateMessageBody("\u0002", desc)); UNIT_ASSERT(!ValidateMessageBody("\u0019", desc)); UNIT_ASSERT(!ValidateMessageBody("\uFFFF", desc)); - } -} - -} // namespace NKikimr::NSQS + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp b/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp index 3ef6b6a956d..996bf183712 100644 --- a/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp +++ b/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp @@ -1,37 +1,37 @@ #include <ydb/core/ymq/base/secure_protobuf_printer.h> - + #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(SecureProtobufPrinterTest) { - Y_UNIT_TEST(MessageBody) { - { - NKikimrClient::TSqsRequest msg; - msg.MutableSendMessage()->SetMessageBody("trololo"); + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(SecureProtobufPrinterTest) { + Y_UNIT_TEST(MessageBody) { + { + NKikimrClient::TSqsRequest msg; + msg.MutableSendMessage()->SetMessageBody("trololo"); UNIT_ASSERT_STRINGS_EQUAL(SecureShortUtf8DebugString(msg), "SendMessage { MessageBody: \"***\" }"); - } - - { - NKikimrClient::TSqsResponse msg; - msg.MutableReceiveMessage()->AddMessages()->SetData("trololo"); + } + + { + NKikimrClient::TSqsResponse msg; + msg.MutableReceiveMessage()->AddMessages()->SetData("trololo"); UNIT_ASSERT_STRINGS_EQUAL(SecureShortUtf8DebugString(msg), "ReceiveMessage { Messages { Data: \"***\" } }"); - } - } - - Y_UNIT_TEST(Tokens) { - { - NKikimrClient::TSqsRequest msg; - msg.MutableGetQueueUrl()->MutableCredentials()->SetOAuthToken("123456789012345678901234567890"); - UNIT_ASSERT_STRINGS_EQUAL(SecureShortUtf8DebugString(msg), "GetQueueUrl { Credentials { OAuthToken: \"1234****7890 (F229119D)\" } }"); - } - - { - NKikimrClient::TSqsRequest msg; - msg.MutableGetQueueUrl()->MutableCredentials()->SetTvmTicket("short"); - UNIT_ASSERT_STRINGS_EQUAL(SecureShortUtf8DebugString(msg), "GetQueueUrl { Credentials { TvmTicket: \"**** (60C3567B)\" } }"); - } - } -} - -} // namespace NKikimr::NSQS + } + } + + Y_UNIT_TEST(Tokens) { + { + NKikimrClient::TSqsRequest msg; + msg.MutableGetQueueUrl()->MutableCredentials()->SetOAuthToken("123456789012345678901234567890"); + UNIT_ASSERT_STRINGS_EQUAL(SecureShortUtf8DebugString(msg), "GetQueueUrl { Credentials { OAuthToken: \"1234****7890 (F229119D)\" } }"); + } + + { + NKikimrClient::TSqsRequest msg; + msg.MutableGetQueueUrl()->MutableCredentials()->SetTvmTicket("short"); + UNIT_ASSERT_STRINGS_EQUAL(SecureShortUtf8DebugString(msg), "GetQueueUrl { Credentials { TvmTicket: \"**** (60C3567B)\" } }"); + } + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/base/ut/ya.make b/ydb/core/ymq/base/ut/ya.make index f927641a90b..25080176fa4 100644 --- a/ydb/core/ymq/base/ut/ya.make +++ b/ydb/core/ymq/base/ut/ya.make @@ -1,23 +1,23 @@ -OWNER( - galaxycrab +OWNER( + galaxycrab g:kikimr - g:sqs -) - -UNITTEST() - -PEERDIR( + g:sqs +) + +UNITTEST() + +PEERDIR( ydb/core/base ydb/core/ymq/base -) - -SRCS( - action_ut.cpp - counters_ut.cpp +) + +SRCS( + action_ut.cpp + counters_ut.cpp dlq_helpers_ut.cpp - helpers_ut.cpp - secure_protobuf_printer_ut.cpp + helpers_ut.cpp + secure_protobuf_printer_ut.cpp queue_attributes_ut.cpp -) - -END() +) + +END() diff --git a/ydb/core/ymq/base/ya.make b/ydb/core/ymq/base/ya.make index 14cd4e72a10..a6cc6ccbce9 100644 --- a/ydb/core/ymq/base/ya.make +++ b/ydb/core/ymq/base/ya.make @@ -1,28 +1,28 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + LIBRARY() SRCS( acl.cpp - action.cpp - counters.cpp - debug_info.cpp + action.cpp + counters.cpp + debug_info.cpp dlq_helpers.cpp - helpers.cpp - probes.cpp + helpers.cpp + probes.cpp queue_attributes.cpp queue_id.cpp - secure_protobuf_printer.cpp + secure_protobuf_printer.cpp events_writer_iface.h ) -GENERATE_ENUM_SERIALIZATION(query_id.h) +GENERATE_ENUM_SERIALIZATION(query_id.h) GENERATE_ENUM_SERIALIZATION(cloud_enums.h) - + PEERDIR( contrib/libs/openssl library/cpp/cgiparam diff --git a/ydb/core/ymq/client/bin/main.cpp b/ydb/core/ymq/client/bin/main.cpp index cb7dbd2a2c6..04c68ea2663 100644 --- a/ydb/core/ymq/client/bin/main.cpp +++ b/ydb/core/ymq/client/bin/main.cpp @@ -16,88 +16,88 @@ using namespace NLastGetopt; using namespace NKikimr::NSQS; -class TSqsOptions : public TOpts { -public: - TSqsOptions(bool addUserOption = true) { - SetFreeArgsNum(0); - AddHelpOption('h'); - AddLongOption('s', "server", "sqs host name") - .Optional() - .StoreResult(&Host) - .DefaultValue("localhost") - .RequiredArgument("HOST"); - AddLongOption('p', "port", "sqs grpc port number") - .Optional() - .StoreResult(&Port) - .DefaultValue("2135") - .RequiredArgument("PORT"); - if (addUserOption) { - AddLongOption('u', "user", "name of user who performs an action") - .Required() - .StoreResult(&User) - .RequiredArgument("USER"); - } - AddLongOption('o', "oauth-token", "oauth token. Can also be set from SQS_OAUTH_TOKEN environment variable") - .Optional() - .StoreResult(&OAuthToken); - AddLongOption('t', "tvm-ticket", "tvm ticket. Can also be set from SQS_TVM_TICKET environment variable") - .Optional() - .StoreResult(&TVMTicket); - } - - template <class TRequestProto> - void SetCredentials(TRequestProto& req) { - if (!OAuthToken) { - OAuthToken = GetEnv("SQS_OAUTH_TOKEN"); - } - if (!TVMTicket) { - TVMTicket = GetEnv("SQS_TVM_TICKET"); - } - if (OAuthToken && TVMTicket) { - Cerr << "Please specify either OAuth token or TVM ticket, not both." << Endl; - exit(1); - } - if (OAuthToken) { - req.MutableCredentials()->SetOAuthToken(OAuthToken); - } - if (TVMTicket) { - req.MutableCredentials()->SetTvmTicket(TVMTicket); - } - - } - - TString Host; - ui16 Port = 0; - TString User; - TString OAuthToken; - TString TVMTicket; -}; - +class TSqsOptions : public TOpts { +public: + TSqsOptions(bool addUserOption = true) { + SetFreeArgsNum(0); + AddHelpOption('h'); + AddLongOption('s', "server", "sqs host name") + .Optional() + .StoreResult(&Host) + .DefaultValue("localhost") + .RequiredArgument("HOST"); + AddLongOption('p', "port", "sqs grpc port number") + .Optional() + .StoreResult(&Port) + .DefaultValue("2135") + .RequiredArgument("PORT"); + if (addUserOption) { + AddLongOption('u', "user", "name of user who performs an action") + .Required() + .StoreResult(&User) + .RequiredArgument("USER"); + } + AddLongOption('o', "oauth-token", "oauth token. Can also be set from SQS_OAUTH_TOKEN environment variable") + .Optional() + .StoreResult(&OAuthToken); + AddLongOption('t', "tvm-ticket", "tvm ticket. Can also be set from SQS_TVM_TICKET environment variable") + .Optional() + .StoreResult(&TVMTicket); + } + + template <class TRequestProto> + void SetCredentials(TRequestProto& req) { + if (!OAuthToken) { + OAuthToken = GetEnv("SQS_OAUTH_TOKEN"); + } + if (!TVMTicket) { + TVMTicket = GetEnv("SQS_TVM_TICKET"); + } + if (OAuthToken && TVMTicket) { + Cerr << "Please specify either OAuth token or TVM ticket, not both." << Endl; + exit(1); + } + if (OAuthToken) { + req.MutableCredentials()->SetOAuthToken(OAuthToken); + } + if (TVMTicket) { + req.MutableCredentials()->SetTvmTicket(TVMTicket); + } + + } + + TString Host; + ui16 Port = 0; + TString User; + TString OAuthToken; + TString TVMTicket; +}; + static int HandleChange(int argc, const char* argv[]) { TString name; TString receipt; ui64 timeout; - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of creating queue") - .Required() - .StoreResult(&name); - opts.AddLongOption('r', "receipt", "receipt handle") - .Required() - .StoreResult(&receipt); - opts.AddLongOption('t', "timeout", "visibility timeout") - .Required() - .StoreResult(&timeout); + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of creating queue") + .Required() + .StoreResult(&name); + opts.AddLongOption('r', "receipt", "receipt handle") + .Required() + .StoreResult(&receipt); + opts.AddLongOption('t', "timeout", "visibility timeout") + .Required() + .StoreResult(&timeout); - TOptsParseResult res(&opts, argc, argv); + TOptsParseResult res(&opts, argc, argv); TChangeMessageVisibilityRequest req; - req.MutableAuth()->SetUserName(opts.User); + req.MutableAuth()->SetUserName(opts.User); req.SetQueueName(name); req.SetReceiptHandle(receipt); req.SetVisibilityTimeout(timeout); - opts.SetCredentials(req); - auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).ChangeMessageVisibility(req); + opts.SetCredentials(req); + auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).ChangeMessageVisibility(req); if (resp.HasError()) { Cerr << "Got error for queue : " @@ -112,80 +112,80 @@ static int HandleChange(int argc, const char* argv[]) { static int HandleCreate(int argc, const char* argv[]) { TString queue; - ui64 shards = 0; - ui64 partitions = 0; + ui64 shards = 0; + ui64 partitions = 0; ui64 retentionSeconds = 0; - bool enableOutOfOrderExecution = false; - bool disableOutOfOrderExecution = false; - bool contentBasedDeduplication = false; - bool enableAutosplit = false; - bool disableAutosplit = false; - ui64 sizeToSplit = 0; - - TCreateQueueRequest req; - - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of creating queue") - .Required() - .StoreResult(&queue); - opts.AddLongOption("shards", "number of shards") - .Optional() - .StoreResult(&shards) - .DefaultValue(ToString(req.GetShards())); - opts.AddLongOption("partitions", "number of data partitions") - .Optional() - .StoreResult(&partitions) - .DefaultValue(ToString(req.GetPartitions())); - opts.AddLongOption("enable-out-of-order-execution", "enable internal tables out of order execution") - .NoArgument() - .SetFlag(&enableOutOfOrderExecution) - .DefaultValue("false"); - opts.AddLongOption("disable-out-of-order-execution", "disable internal tables out of order execution") - .NoArgument() - .SetFlag(&disableOutOfOrderExecution) - .DefaultValue("false"); - opts.AddLongOption("retention-seconds", "retention time in seconds") - .Optional() - .StoreResult(&retentionSeconds); - opts.AddLongOption("content-based-deduplication", "enable content based deduplication") - .NoArgument() - .SetFlag(&contentBasedDeduplication) - .DefaultValue("false"); - opts.AddLongOption("enable-auto-split", "enable autosplit for tables with message data") - .NoArgument() - .SetFlag(&enableAutosplit) - .DefaultValue("false"); - opts.AddLongOption("disable-auto-split", "disable autosplit for tables with message data") - .NoArgument() - .SetFlag(&disableAutosplit) - .DefaultValue("false"); - opts.AddLongOption("size-to-split", "size of message data datashard to split (bytes)") - .Optional() - .StoreResult(&sizeToSplit) - .DefaultValue(ToString(req.GetSizeToSplit())); - - TOptsParseResult res(&opts, argc, argv); - - req.MutableAuth()->SetUserName(opts.User); + bool enableOutOfOrderExecution = false; + bool disableOutOfOrderExecution = false; + bool contentBasedDeduplication = false; + bool enableAutosplit = false; + bool disableAutosplit = false; + ui64 sizeToSplit = 0; + + TCreateQueueRequest req; + + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of creating queue") + .Required() + .StoreResult(&queue); + opts.AddLongOption("shards", "number of shards") + .Optional() + .StoreResult(&shards) + .DefaultValue(ToString(req.GetShards())); + opts.AddLongOption("partitions", "number of data partitions") + .Optional() + .StoreResult(&partitions) + .DefaultValue(ToString(req.GetPartitions())); + opts.AddLongOption("enable-out-of-order-execution", "enable internal tables out of order execution") + .NoArgument() + .SetFlag(&enableOutOfOrderExecution) + .DefaultValue("false"); + opts.AddLongOption("disable-out-of-order-execution", "disable internal tables out of order execution") + .NoArgument() + .SetFlag(&disableOutOfOrderExecution) + .DefaultValue("false"); + opts.AddLongOption("retention-seconds", "retention time in seconds") + .Optional() + .StoreResult(&retentionSeconds); + opts.AddLongOption("content-based-deduplication", "enable content based deduplication") + .NoArgument() + .SetFlag(&contentBasedDeduplication) + .DefaultValue("false"); + opts.AddLongOption("enable-auto-split", "enable autosplit for tables with message data") + .NoArgument() + .SetFlag(&enableAutosplit) + .DefaultValue("false"); + opts.AddLongOption("disable-auto-split", "disable autosplit for tables with message data") + .NoArgument() + .SetFlag(&disableAutosplit) + .DefaultValue("false"); + opts.AddLongOption("size-to-split", "size of message data datashard to split (bytes)") + .Optional() + .StoreResult(&sizeToSplit) + .DefaultValue(ToString(req.GetSizeToSplit())); + + TOptsParseResult res(&opts, argc, argv); + + req.MutableAuth()->SetUserName(opts.User); req.SetQueueName(queue); - if (queue.EndsWith(".fifo")) { - auto fifoAttr = req.AddAttributes(); - fifoAttr->SetName("FifoQueue"); - fifoAttr->SetValue("true"); - } - if (contentBasedDeduplication) { - auto dedupAttr = req.AddAttributes(); - dedupAttr->SetName("ContentBasedDeduplication"); - dedupAttr->SetValue("true"); - } + if (queue.EndsWith(".fifo")) { + auto fifoAttr = req.AddAttributes(); + fifoAttr->SetName("FifoQueue"); + fifoAttr->SetValue("true"); + } + if (contentBasedDeduplication) { + auto dedupAttr = req.AddAttributes(); + dedupAttr->SetName("ContentBasedDeduplication"); + dedupAttr->SetValue("true"); + } req.SetShards(shards); req.SetPartitions(partitions); - if (enableOutOfOrderExecution) { - req.SetEnableOutOfOrderTransactionsExecution(true); - } else if (disableOutOfOrderExecution) { - req.SetEnableOutOfOrderTransactionsExecution(false); - } - opts.SetCredentials(req); + if (enableOutOfOrderExecution) { + req.SetEnableOutOfOrderTransactionsExecution(true); + } else if (disableOutOfOrderExecution) { + req.SetEnableOutOfOrderTransactionsExecution(false); + } + opts.SetCredentials(req); if (retentionSeconds) { auto* newAttribute = req.mutable_attributes()->Add(); @@ -193,20 +193,20 @@ static int HandleCreate(int argc, const char* argv[]) { newAttribute->SetValue(ToString(retentionSeconds)); } - if (enableAutosplit) { - req.SetEnableAutosplit(true); // explicitly - } else if (disableAutosplit) { - req.SetEnableAutosplit(false); // explicitly - } - if (req.GetEnableAutosplit()) { - if (sizeToSplit == 0) { - Cerr << "SizeToSplit can't be zero." << Endl; - return 1; - } - req.SetSizeToSplit(sizeToSplit); - } - - auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).CreateQueue(req); + if (enableAutosplit) { + req.SetEnableAutosplit(true); // explicitly + } else if (disableAutosplit) { + req.SetEnableAutosplit(false); // explicitly + } + if (req.GetEnableAutosplit()) { + if (sizeToSplit == 0) { + Cerr << "SizeToSplit can't be zero." << Endl; + return 1; + } + req.SetSizeToSplit(sizeToSplit); + } + + auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).CreateQueue(req); if (resp.HasError()) { Cerr << "Got error for queue : " @@ -216,7 +216,7 @@ static int HandleCreate(int argc, const char* argv[]) { } else { Cerr << "New queue has been created : " << resp.GetQueueName() << Endl; - Cout << resp.GetQueueUrl() << Endl; + Cout << resp.GetQueueUrl() << Endl; } return 0; } @@ -224,19 +224,19 @@ static int HandleCreate(int argc, const char* argv[]) { static int HandleDelete(int argc, const char* argv[]) { TString queue; - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of deleting queue") - .Required() - .StoreResult(&queue); + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of deleting queue") + .Required() + .StoreResult(&queue); - TOptsParseResult res(&opts, argc, argv); + TOptsParseResult res(&opts, argc, argv); TDeleteQueueRequest req; - req.MutableAuth()->SetUserName(opts.User); + req.MutableAuth()->SetUserName(opts.User); req.SetQueueName(queue); - opts.SetCredentials(req); + opts.SetCredentials(req); - auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).DeleteQueue(req); + auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).DeleteQueue(req); if (resp.HasError()) { Cerr << "Got error for queue : " @@ -251,15 +251,15 @@ static int HandleDelete(int argc, const char* argv[]) { } static int HandleList(int argc, const char* argv[]) { - TSqsOptions opts; + TSqsOptions opts; - TOptsParseResult res(&opts, argc, argv); + TOptsParseResult res(&opts, argc, argv); TListQueuesRequest req; - req.MutableAuth()->SetUserName(opts.User); - opts.SetCredentials(req); + req.MutableAuth()->SetUserName(opts.User); + opts.SetCredentials(req); - auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).ListQueues(req); + auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).ListQueues(req); if (resp.HasError()) { Cerr << "Got error: " @@ -279,31 +279,31 @@ static int HandleSend(int argc, const char* argv[]) { TString groupId; TString dedup; - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of queue") - .Required() - .StoreResult(&queueName); - opts.AddLongOption('d', "data", "message body") - .Required() - .StoreResult(&data); - opts.AddLongOption('g', "group", "message group id") - .Optional() - .StoreResult(&groupId); - opts.AddLongOption("dedup", "deduplication token") - .Optional() - .StoreResult(&dedup); - - TOptsParseResult res(&opts, argc, argv); + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of queue") + .Required() + .StoreResult(&queueName); + opts.AddLongOption('d', "data", "message body") + .Required() + .StoreResult(&data); + opts.AddLongOption('g', "group", "message group id") + .Optional() + .StoreResult(&groupId); + opts.AddLongOption("dedup", "deduplication token") + .Optional() + .StoreResult(&dedup); + + TOptsParseResult res(&opts, argc, argv); TSendMessageRequest req; - req.MutableAuth()->SetUserName(opts.User); + req.MutableAuth()->SetUserName(opts.User); req.SetQueueName(queueName); req.SetMessageBody(data); req.SetMessageGroupId(groupId); req.SetMessageDeduplicationId(dedup); - opts.SetCredentials(req); + opts.SetCredentials(req); - auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).SendMessage(req); + auto resp = TQueueClient(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)).SendMessage(req); if (resp.HasError()) { Cerr << "Got error for queue : " @@ -326,38 +326,38 @@ static int HandleRead(int argc, const char* argv[]) { ui64 waitTime = 0; bool keep = false; - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of deleting queue") - .Required() - .StoreResult(&queueName); - opts.AddLongOption('a', "attempt-id", "attempt-id") - .Optional() - .StoreResult(&attemptId); - opts.AddLongOption("count", "read count") - .Optional() - .StoreResult(&count) - .DefaultValue("1"); - opts.AddLongOption("keep", "don't commit readed messages") - .Optional() - .NoArgument() - .SetFlag(&keep); - opts.AddLongOption("wait", "wait time in seconds") - .Optional() - .StoreResult(&waitTime) - .DefaultValue("0"); - - TOptsParseResult res(&opts, argc, argv); - - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of deleting queue") + .Required() + .StoreResult(&queueName); + opts.AddLongOption('a', "attempt-id", "attempt-id") + .Optional() + .StoreResult(&attemptId); + opts.AddLongOption("count", "read count") + .Optional() + .StoreResult(&count) + .DefaultValue("1"); + opts.AddLongOption("keep", "don't commit readed messages") + .Optional() + .NoArgument() + .SetFlag(&keep); + opts.AddLongOption("wait", "wait time in seconds") + .Optional() + .StoreResult(&waitTime) + .DefaultValue("0"); + + TOptsParseResult res(&opts, argc, argv); + + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); TReceiveMessageRequest req; - req.MutableAuth()->SetUserName(opts.User); + req.MutableAuth()->SetUserName(opts.User); req.SetQueueName(queueName); req.SetMaxNumberOfMessages(count); req.SetVisibilityTimeout(60); req.SetReceiveRequestAttemptId(attemptId); req.SetWaitTimeSeconds(waitTime); - opts.SetCredentials(req); + opts.SetCredentials(req); auto resp = client.ReceiveMessage(req); if (resp.HasError()) { @@ -384,7 +384,7 @@ static int HandleRead(int argc, const char* argv[]) { for (auto ri = receipts.begin(); ri != receipts.end(); ++ri) { TDeleteMessageRequest d; - d.MutableAuth()->SetUserName(opts.User); + d.MutableAuth()->SetUserName(opts.User); d.SetQueueName(queueName); d.SetReceiptHandle(*ri); auto del = client.DeleteMessage(d); @@ -409,19 +409,19 @@ static int HandleRead(int argc, const char* argv[]) { static int HandlePurge(int argc, const char* argv[]) { TString queueName; - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of deleting queue") - .Required() - .StoreResult(&queueName); + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of deleting queue") + .Required() + .StoreResult(&queueName); - TOptsParseResult res(&opts, argc, argv); + TOptsParseResult res(&opts, argc, argv); - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); TPurgeQueueRequest req; - req.MutableAuth()->SetUserName(opts.User); + req.MutableAuth()->SetUserName(opts.User); req.SetQueueName(queueName); - opts.SetCredentials(req); + opts.SetCredentials(req); auto resp = client.PurgeQueue(req); if (resp.HasError()) { @@ -440,35 +440,35 @@ static int HandlePurge(int argc, const char* argv[]) { static int HandleUser(int argc, const char* argv[]) { bool list = false; bool del = false; - TString name; - - TSqsOptions opts; - opts.AddLongOption("list", "list configured users") - .Optional() - .NoArgument() - .SetFlag(&list); - opts.AddLongOption("delete", "delete user") - .Optional() - .NoArgument() - .SetFlag(&del); - opts.AddLongOption('n', "name", "name of user to create/delete") - .Optional() - .RequiredArgument("NAME") - .StoreResult(&name); - - TOptsParseResult res(&opts, argc, argv); - - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); - - if (!list && !name) { - Cerr << "Name parameter is required for creation/deletion." << Endl; - return 1; - } - + TString name; + + TSqsOptions opts; + opts.AddLongOption("list", "list configured users") + .Optional() + .NoArgument() + .SetFlag(&list); + opts.AddLongOption("delete", "delete user") + .Optional() + .NoArgument() + .SetFlag(&del); + opts.AddLongOption('n', "name", "name of user to create/delete") + .Optional() + .RequiredArgument("NAME") + .StoreResult(&name); + + TOptsParseResult res(&opts, argc, argv); + + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + + if (!list && !name) { + Cerr << "Name parameter is required for creation/deletion." << Endl; + return 1; + } + if (list) { TListUsersRequest req; req.MutableAuth()->SetUserName(GetUsername()); - opts.SetCredentials(req); + opts.SetCredentials(req); auto resp = client.ListUsers(req); for (size_t i = 0; i < resp.UserNamesSize(); ++i) { @@ -476,14 +476,14 @@ static int HandleUser(int argc, const char* argv[]) { } } else if (del) { TDeleteUserRequest req; - req.MutableAuth()->SetUserName(opts.User); - req.SetUserName(name); - opts.SetCredentials(req); + req.MutableAuth()->SetUserName(opts.User); + req.SetUserName(name); + opts.SetCredentials(req); auto resp = client.DeleteUser(req); if (resp.HasError()) { Cerr << "Got error for user : " - << opts.User << " : " + << opts.User << " : " << resp.GetError().GetMessage() << Endl; return 1; } else { @@ -492,14 +492,14 @@ static int HandleUser(int argc, const char* argv[]) { } } else { TCreateUserRequest req; - req.MutableAuth()->SetUserName(opts.User); - req.SetUserName(name); - opts.SetCredentials(req); + req.MutableAuth()->SetUserName(opts.User); + req.SetUserName(name); + opts.SetCredentials(req); auto resp = client.CreateUser(req); if (resp.HasError()) { Cerr << "Got error for user : " - << opts.User << " : " + << opts.User << " : " << resp.GetError().GetMessage() << Endl; return 1; } else { @@ -518,27 +518,27 @@ static int HandlePermissions(int argc, const char* argv[]) { TString setRequest; bool clearACL; - TSqsOptions opts(false); - opts.AddLongOption('q', "resource", "resource path") - .Required() - .StoreResult(&resource); - opts.AddLongOption('g', "grant", "grant permissions to specified user") - .Optional() - .StoreResult(&grantRequest); - opts.AddLongOption('r', "revoke", "revoke permissions from specified user") - .Optional() - .StoreResult(&revokeRequest); - opts.AddLongOption('x', "set", "set permissions for specified user") - .Optional() - .StoreResult(&setRequest); - opts.AddLongOption('c', "clear-acl", "clear all acl for node") - .Optional() - .NoArgument() - .SetFlag(&clearACL); - - TOptsParseResult res(&opts, argc, argv); - - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + TSqsOptions opts(false); + opts.AddLongOption('q', "resource", "resource path") + .Required() + .StoreResult(&resource); + opts.AddLongOption('g', "grant", "grant permissions to specified user") + .Optional() + .StoreResult(&grantRequest); + opts.AddLongOption('r', "revoke", "revoke permissions from specified user") + .Optional() + .StoreResult(&revokeRequest); + opts.AddLongOption('x', "set", "set permissions for specified user") + .Optional() + .StoreResult(&setRequest); + opts.AddLongOption('c', "clear-acl", "clear all acl for node") + .Optional() + .NoArgument() + .SetFlag(&clearACL); + + TOptsParseResult res(&opts, argc, argv); + + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); TModifyPermissionsRequest req; @@ -572,7 +572,7 @@ static int HandlePermissions(int argc, const char* argv[]) { return 1; } - opts.SetCredentials(req); + opts.SetCredentials(req); if (clearACL) { req.SetClearACL(true); @@ -595,19 +595,19 @@ static int HandlePermissions(int argc, const char* argv[]) { static int HandleListPermissions(int argc, const char* argv[]) { TString path; - TSqsOptions opts(false); - opts.AddLongOption('P', "path", "path to node") - .Required() - .StoreResult(&path); + TSqsOptions opts(false); + opts.AddLongOption('P', "path", "path to node") + .Required() + .StoreResult(&path); - TOptsParseResult res(&opts, argc, argv); + TOptsParseResult res(&opts, argc, argv); - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); TListPermissionsRequest req; req.SetPath(path); - opts.SetCredentials(req); + opts.SetCredentials(req); auto resp = client.ListPermissions(req); if (resp.HasError()) { @@ -624,96 +624,96 @@ static int HandleListPermissions(int argc, const char* argv[]) { -static int HandleGetQueueAttributes(int argc, const char* argv[]) { - TString queueName; - - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of deleting queue") - .Required() - .StoreResult(&queueName); - - TOptsParseResult res(&opts, argc, argv); - - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); - - TGetQueueAttributesRequest req; - req.MutableAuth()->SetUserName(opts.User); - req.SetQueueName(queueName); - req.AddNames("All"); - opts.SetCredentials(req); - auto resp = client.GetQueueAttributes(req); - - if (resp.HasError()) { - Cerr << "Got error for queue : " - << queueName << " : " - << resp.GetError().GetMessage() << Endl; - return 1; - } else { - Cout << resp.Utf8DebugString() << Endl; - } - return 0; -} - -static int HandleSetQueueAttributes(int argc, const char* argv[]) { - TString queueName; - TString name, value; - - TSqsOptions opts; - opts.AddLongOption('q', "queue-name", "name of deleting queue") - .Required() - .StoreResult(&queueName); - opts.AddLongOption('n', "name", "name of queue attribute") - .Required() - .StoreResult(&name); - opts.AddLongOption('v', "value", "value of queue attribute to set") - .Required() - .StoreResult(&value); - - TOptsParseResult res(&opts, argc, argv); - - TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); - - TSetQueueAttributesRequest req; - req.MutableAuth()->SetUserName(opts.User); - req.SetQueueName(queueName); - opts.SetCredentials(req); - auto* attr = req.AddAttributes(); - attr->SetName(name); - attr->SetValue(value); - auto resp = client.SetQueueAttributes(req); - - if (resp.HasError()) { - Cerr << "Got error for queue : " - << queueName << " : " - << resp.GetError().GetMessage() << Endl; - return 1; - } else { - Cout << "OK" << Endl; - } - return 0; -} - +static int HandleGetQueueAttributes(int argc, const char* argv[]) { + TString queueName; + + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of deleting queue") + .Required() + .StoreResult(&queueName); + + TOptsParseResult res(&opts, argc, argv); + + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + + TGetQueueAttributesRequest req; + req.MutableAuth()->SetUserName(opts.User); + req.SetQueueName(queueName); + req.AddNames("All"); + opts.SetCredentials(req); + auto resp = client.GetQueueAttributes(req); + + if (resp.HasError()) { + Cerr << "Got error for queue : " + << queueName << " : " + << resp.GetError().GetMessage() << Endl; + return 1; + } else { + Cout << resp.Utf8DebugString() << Endl; + } + return 0; +} + +static int HandleSetQueueAttributes(int argc, const char* argv[]) { + TString queueName; + TString name, value; + + TSqsOptions opts; + opts.AddLongOption('q', "queue-name", "name of deleting queue") + .Required() + .StoreResult(&queueName); + opts.AddLongOption('n', "name", "name of queue attribute") + .Required() + .StoreResult(&name); + opts.AddLongOption('v', "value", "value of queue attribute to set") + .Required() + .StoreResult(&value); + + TOptsParseResult res(&opts, argc, argv); + + TQueueClient client(TClientOptions().SetHost(opts.Host).SetPort(opts.Port)); + + TSetQueueAttributesRequest req; + req.MutableAuth()->SetUserName(opts.User); + req.SetQueueName(queueName); + opts.SetCredentials(req); + auto* attr = req.AddAttributes(); + attr->SetName(name); + attr->SetValue(value); + auto resp = client.SetQueueAttributes(req); + + if (resp.HasError()) { + Cerr << "Got error for queue : " + << queueName << " : " + << resp.GetError().GetMessage() << Endl; + return 1; + } else { + Cout << "OK" << Endl; + } + return 0; +} + int main(int argc, const char* argv[]) { try { - TModChooser mods; - mods.SetDescription("SQS client tool"); - mods.AddMode("change", HandleChange, "change visibility timeout"); - mods.AddMode("create", HandleCreate, "create queue"); - mods.AddMode("delete", HandleDelete, "delete queue"); - mods.AddMode("list", HandleList, "list existing queues"); - mods.AddMode("read", HandleRead, "receive and delete message"); - mods.AddMode("send", HandleSend, "send message"); - mods.AddMode("purge", HandlePurge, "purge queue"); - mods.AddMode("user", HandleUser, "initialize user"); - mods.AddMode("permissions", HandlePermissions, "modify queue permissions"); - mods.AddMode("get-attributes", HandleGetQueueAttributes, "get queue attributes"); - mods.AddMode("set-attributes", HandleSetQueueAttributes, "set queue attributes"); - mods.AddMode("list-permissions", HandleListPermissions, "list permissions"); - return mods.Run(argc, argv); + TModChooser mods; + mods.SetDescription("SQS client tool"); + mods.AddMode("change", HandleChange, "change visibility timeout"); + mods.AddMode("create", HandleCreate, "create queue"); + mods.AddMode("delete", HandleDelete, "delete queue"); + mods.AddMode("list", HandleList, "list existing queues"); + mods.AddMode("read", HandleRead, "receive and delete message"); + mods.AddMode("send", HandleSend, "send message"); + mods.AddMode("purge", HandlePurge, "purge queue"); + mods.AddMode("user", HandleUser, "initialize user"); + mods.AddMode("permissions", HandlePermissions, "modify queue permissions"); + mods.AddMode("get-attributes", HandleGetQueueAttributes, "get queue attributes"); + mods.AddMode("set-attributes", HandleSetQueueAttributes, "set queue attributes"); + mods.AddMode("list-permissions", HandleListPermissions, "list permissions"); + return mods.Run(argc, argv); } catch (const TQueueException& e) { Cerr << "Queue Error: " - << e.Error().GetErrorCode() << " (" << e.Status() << "): " << e.Message() << Endl; - Cerr << "Request id: " << e.GetRequestId() << Endl; + << e.Error().GetErrorCode() << " (" << e.Status() << "): " << e.Message() << Endl; + Cerr << "Request id: " << e.GetRequestId() << Endl; return 1; } catch (...) { Cerr << CurrentExceptionMessage() << Endl; diff --git a/ydb/core/ymq/client/bin/ya.make b/ydb/core/ymq/client/bin/ya.make index d00ca35587e..baf6cfdec53 100644 --- a/ydb/core/ymq/client/bin/ya.make +++ b/ydb/core/ymq/client/bin/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + PROGRAM(sqs) SRCS( diff --git a/ydb/core/ymq/client/cpp/client.cpp b/ydb/core/ymq/client/cpp/client.cpp index c96f2dcaab4..7d6827987bb 100644 --- a/ydb/core/ymq/client/cpp/client.cpp +++ b/ydb/core/ymq/client/cpp/client.cpp @@ -4,13 +4,13 @@ #include <util/generic/yexception.h> #include <util/string/join.h> -#include <util/system/defaults.h> +#include <util/system/defaults.h> #include <util/system/event.h> #include <util/system/user.h> using namespace NKikimr::NGRpcProxy; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TQueueClient::TImpl { public: @@ -21,10 +21,10 @@ public: } #define METHOD_IMPL(name, hint) \ - Y_CAT(Y_CAT(T, name), Response) name(const Y_CAT(Y_CAT(T, name), Request)& req) { \ + Y_CAT(Y_CAT(T, name), Response) name(const Y_CAT(Y_CAT(T, name), Request)& req) { \ NKikimrClient::TSqsRequest request; \ - Y_CAT(Y_CAT(T, name), Response) resp; \ - request.Y_CAT(Mutable, name)()->CopyFrom(req); \ + Y_CAT(Y_CAT(T, name), Response) resp; \ + request.Y_CAT(Mutable, name)()->CopyFrom(req); \ TAutoEvent e; \ Client_.SqsRequest(request, [e, &resp] (const NGRpcProxy::TGrpcError* error, const NKikimrClient::TSqsResponse& result) mutable \ { \ @@ -32,14 +32,14 @@ public: resp.MutableError()->SetStatus(502); \ resp.MutableError()->SetMessage(error->first); \ } else { \ - resp.CopyFrom(result.Y_CAT(Get, name)()); \ + resp.CopyFrom(result.Y_CAT(Get, name)()); \ } \ e.Signal(); \ } \ ); \ e.WaitI(); \ if (resp.HasError() && Options_.Throw) { \ - ythrow TQueueException(resp.GetError(), resp.GetRequestId()); \ + ythrow TQueueException(resp.GetError(), resp.GetRequestId()); \ } \ return resp; \ } @@ -57,8 +57,8 @@ public: METHOD_IMPL(ReceiveMessage, "can't receive a message"); METHOD_IMPL(SendMessage, "can't enqueue a message"); METHOD_IMPL(ModifyPermissions, "can't modify permissions"); - METHOD_IMPL(GetQueueAttributes, "can't get queue attributes"); - METHOD_IMPL(SetQueueAttributes, "can't set queue attributes"); + METHOD_IMPL(GetQueueAttributes, "can't get queue attributes"); + METHOD_IMPL(SetQueueAttributes, "can't set queue attributes"); METHOD_IMPL(ListPermissions, "can't list permissions"); #undef METHOD_IMPL @@ -134,16 +134,16 @@ TModifyPermissionsResponse TQueueClient::ModifyPermissions(const TModifyPermissi return Impl_->ModifyPermissions(req); } -TGetQueueAttributesResponse TQueueClient::GetQueueAttributes(const TGetQueueAttributesRequest& req) { - return Impl_->GetQueueAttributes(req); -} - -TSetQueueAttributesResponse TQueueClient::SetQueueAttributes(const TSetQueueAttributesRequest& req) { - return Impl_->SetQueueAttributes(req); -} - +TGetQueueAttributesResponse TQueueClient::GetQueueAttributes(const TGetQueueAttributesRequest& req) { + return Impl_->GetQueueAttributes(req); +} + +TSetQueueAttributesResponse TQueueClient::SetQueueAttributes(const TSetQueueAttributesRequest& req) { + return Impl_->SetQueueAttributes(req); +} + TListPermissionsResponse TQueueClient::ListPermissions(const TListPermissionsRequest& req) { return Impl_->ListPermissions(req); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/client/cpp/client.h b/ydb/core/ymq/client/cpp/client.h index d42ed843264..c0d87813064 100644 --- a/ydb/core/ymq/client/cpp/client.h +++ b/ydb/core/ymq/client/cpp/client.h @@ -4,14 +4,14 @@ #include <util/generic/yexception.h> #include <util/generic/vector.h> -#include <util/system/defaults.h> +#include <util/system/defaults.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { struct TClientOptions { #define DECLARE_FIELD(name, type, default) \ type name{default}; \ - TClientOptions& Y_CAT(Set, name)(const type& value) { \ + TClientOptions& Y_CAT(Set, name)(const type& value) { \ name = value; \ return *this; \ } @@ -31,32 +31,32 @@ public: TQueueException() { } - TQueueException(const TError& error, const TString& requestId) + TQueueException(const TError& error, const TString& requestId) : Error_(error) - , RequestId(requestId) + , RequestId(requestId) { Append(error.GetMessage()); } - TString Message() const { + TString Message() const { return Error_.GetMessage(); } - int Status() const { + int Status() const { return Error_.GetStatus(); } - const TError& Error() const { + const TError& Error() const { return Error_; } - const TString& GetRequestId() const { - return RequestId; - } - + const TString& GetRequestId() const { + return RequestId; + } + private: TError Error_; - TString RequestId; + TString RequestId; }; class TQueueClient { @@ -92,10 +92,10 @@ public: TModifyPermissionsResponse ModifyPermissions(const TModifyPermissionsRequest& req); - TGetQueueAttributesResponse GetQueueAttributes(const TGetQueueAttributesRequest& req); - - TSetQueueAttributesResponse SetQueueAttributes(const TSetQueueAttributesRequest& req); - + TGetQueueAttributesResponse GetQueueAttributes(const TGetQueueAttributesRequest& req); + + TSetQueueAttributesResponse SetQueueAttributes(const TSetQueueAttributesRequest& req); + TListPermissionsResponse ListPermissions(const TListPermissionsRequest& req); private: @@ -103,4 +103,4 @@ private: THolder<TImpl> Impl_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/client/cpp/ya.make b/ydb/core/ymq/client/cpp/ya.make index 96d064b19c8..8d9c44670e2 100644 --- a/ydb/core/ymq/client/cpp/ya.make +++ b/ydb/core/ymq/client/cpp/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + LIBRARY() SRCS( diff --git a/ydb/core/ymq/client/ya.make b/ydb/core/ymq/client/ya.make index fdb750c7183..19f199a8e8d 100644 --- a/ydb/core/ymq/client/ya.make +++ b/ydb/core/ymq/client/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + RECURSE( bin cpp diff --git a/ydb/core/ymq/http/http.cpp b/ydb/core/ymq/http/http.cpp index 5d0963bfb2e..f27db247a08 100644 --- a/ydb/core/ymq/http/http.cpp +++ b/ydb/core/ymq/http/http.cpp @@ -18,61 +18,61 @@ #include <library/cpp/http/server/response.h> #include <library/cpp/http/misc/parsed_request.h> -#include <util/generic/guid.h> +#include <util/generic/guid.h> #include <util/generic/hash.h> #include <util/generic/set.h> -#include <util/generic/hash_set.h> -#include <util/network/init.h> +#include <util/generic/hash_set.h> +#include <util/network/init.h> #include <util/string/ascii.h> -#include <util/string/builder.h> +#include <util/string/builder.h> #include <library/cpp/string_utils/quote/quote.h> #include <util/string/split.h> #include <library/cpp/string_utils/url/url.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { using NKikimrClient::TSqsRequest; using NKikimrClient::TSqsResponse; -namespace { - +namespace { + constexpr TStringBuf AUTHORIZATION_HEADER = "authorization"; constexpr TStringBuf SECURITY_TOKEN_HEADER = "x-amz-security-token"; constexpr TStringBuf IAM_TOKEN_HEADER = "x-yacloud-subjecttoken"; constexpr TStringBuf FORWARDED_IP_HEADER = "x-forwarded-for"; constexpr TStringBuf REQUEST_ID_HEADER = "x-request-id"; - -const std::vector<TStringBuf> PRIVATE_TOKENS_HEADERS = { - SECURITY_TOKEN_HEADER, - IAM_TOKEN_HEADER, -}; - -const TString CREDENTIAL_PARAM = "credential"; - + +const std::vector<TStringBuf> PRIVATE_TOKENS_HEADERS = { + SECURITY_TOKEN_HEADER, + IAM_TOKEN_HEADER, +}; + +const TString CREDENTIAL_PARAM = "credential"; + constexpr TStringBuf PRIVATE_REQUEST_PATH_PREFIX = "/private"; - -const TSet<TString> ModifyPermissionsActions = {"GrantPermissions", "RevokePermissions", "SetPermissions"}; - -bool IsPrivateTokenHeader(TStringBuf headerName) { - for (const TStringBuf h : PRIVATE_TOKENS_HEADERS) { - if (AsciiEqualsIgnoreCase(h, headerName)) { - return true; - } - } - return false; -} - -class THttpCallback : public IReplyCallback { + +const TSet<TString> ModifyPermissionsActions = {"GrantPermissions", "RevokePermissions", "SetPermissions"}; + +bool IsPrivateTokenHeader(TStringBuf headerName) { + for (const TStringBuf h : PRIVATE_TOKENS_HEADERS) { + if (AsciiEqualsIgnoreCase(h, headerName)) { + return true; + } + } + return false; +} + +class THttpCallback : public IReplyCallback { public: - THttpCallback(THttpRequest* req, const TSqsRequest& requestParams) + THttpCallback(THttpRequest* req, const TSqsRequest& requestParams) : Request_(req) - , RequestParams_(requestParams) + , RequestParams_(requestParams) { } - void DoSendReply(const TSqsResponse& resp) override { + void DoSendReply(const TSqsResponse& resp) override { auto response = ResponseToAmazonXmlFormat(resp); - LogRequest(resp, response); + LogRequest(resp, response); response.FolderId = resp.GetFolderId(); response.IsFifo = resp.GetIsFifo(); @@ -81,78 +81,78 @@ public: Request_->SendResponse(response); } - -private: - TString LogString(const TSqsResponse& resp) const { - TStringBuilder rec; - rec << "Request: " << SecureShortUtf8DebugString(RequestParams_) - << ", Response: " << SecureShortUtf8DebugString(resp); - return rec; - } - - void LogRequest(const TSqsResponse& resp, const TSqsHttpResponse& xmlResp) const { - const int status = xmlResp.StatusCode; - const bool is500 = status >= 500 && status < 600; - auto priority = is500 ? NActors::NLog::PRI_WARN : NActors::NLog::PRI_DEBUG; - RLOG_SQS_REQ_BASE(*Request_->GetServer()->GetActorSystem(), priority, Request_->GetRequestId(), LogString(resp)); - } - -private: - THttpRequest* const Request_; - const TSqsRequest RequestParams_; -}; - -class TPingHttpCallback : public IPingReplyCallback { -public: - TPingHttpCallback(THttpRequest* req) - : Request_(req) - { - } - - void DoSendReply() override { - Request_->SendResponse(TSqsHttpResponse("pong", 200, PLAIN_TEXT_CONTENT_TYPE)); - } - + private: - THttpRequest* const Request_; + TString LogString(const TSqsResponse& resp) const { + TStringBuilder rec; + rec << "Request: " << SecureShortUtf8DebugString(RequestParams_) + << ", Response: " << SecureShortUtf8DebugString(resp); + return rec; + } + + void LogRequest(const TSqsResponse& resp, const TSqsHttpResponse& xmlResp) const { + const int status = xmlResp.StatusCode; + const bool is500 = status >= 500 && status < 600; + auto priority = is500 ? NActors::NLog::PRI_WARN : NActors::NLog::PRI_DEBUG; + RLOG_SQS_REQ_BASE(*Request_->GetServer()->GetActorSystem(), priority, Request_->GetRequestId(), LogString(resp)); + } + +private: + THttpRequest* const Request_; + const TSqsRequest RequestParams_; }; -} // namespace - -THttpRequest::THttpRequest(TAsyncHttpServer* p) +class TPingHttpCallback : public IPingReplyCallback { +public: + TPingHttpCallback(THttpRequest* req) + : Request_(req) + { + } + + void DoSendReply() override { + Request_->SendResponse(TSqsHttpResponse("pong", 200, PLAIN_TEXT_CONTENT_TYPE)); + } + +private: + THttpRequest* const Request_; +}; + +} // namespace + +THttpRequest::THttpRequest(TAsyncHttpServer* p) : Parent_(p) { - Parent_->UpdateConnectionsCountCounter(); - DebugInfo->UnparsedHttpRequests.emplace(this); + Parent_->UpdateConnectionsCountCounter(); + DebugInfo->UnparsedHttpRequests.emplace(this); } -THttpRequest::~THttpRequest() { - Parent_->UpdateConnectionsCountCounter(); - DebugInfo->EraseHttpRequest(RequestId_, this); -} - -void THttpRequest::SendResponse(const TSqsHttpResponse& r) { - auto* parent = Parent_; - auto& actorSystem = *Parent_->ActorSystem_; - const TString reqId = RequestId_; +THttpRequest::~THttpRequest() { + Parent_->UpdateConnectionsCountCounter(); + DebugInfo->EraseHttpRequest(RequestId_, this); +} + +void THttpRequest::SendResponse(const TSqsHttpResponse& r) { + auto* parent = Parent_; + auto& actorSystem = *Parent_->ActorSystem_; + const TString reqId = RequestId_; Response_ = r; try { - static_cast<IObjectInQueue*>(this)->Process(nullptr); // calls DoReply() + static_cast<IObjectInQueue*>(this)->Process(nullptr); // calls DoReply() } catch (...) { - // Note: The 'this' pointer has been destroyed inside Process. - RLOG_SQS_REQ_BASE_ERROR(actorSystem, reqId, "Error while sending response: " << CurrentExceptionMessage()); - INC_COUNTER(parent->HttpCounters_, InternalExceptions); + // Note: The 'this' pointer has been destroyed inside Process. + RLOG_SQS_REQ_BASE_ERROR(actorSystem, reqId, "Error while sending response: " << CurrentExceptionMessage()); + INC_COUNTER(parent->HttpCounters_, InternalExceptions); } } -void THttpRequest::WriteResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response) { - LogHttpRequestResponse(replyParams, response); - THttpResponse httpResponse(static_cast<HttpCodes>(response.StatusCode)); - if (response.ContentType) { - httpResponse.SetContent(response.Body, response.ContentType); - } - +void THttpRequest::WriteResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response) { + LogHttpRequestResponse(replyParams, response); + THttpResponse httpResponse(static_cast<HttpCodes>(response.StatusCode)); + if (response.ContentType) { + httpResponse.SetContent(response.Body, response.ContentType); + } + if (Parent_->Config.GetYandexCloudMode() && !IsPrivateRequest_) { // Send request attributes to the metering actor auto reportRequestAttributes = MakeHolder<TSqsEvents::TEvReportProcessedRequestAttributes>(); @@ -171,77 +171,77 @@ void THttpRequest::WriteResponse(const TReplyParams& replyParams, const TSqsHttp Parent_->ActorSystem_->Send(MakeSqsMeteringServiceID(), reportRequestAttributes.Release()); } - httpResponse.OutTo(replyParams.Output); -} - -TString THttpRequest::LogHttpRequestResponseCommonInfoString() { - const TDuration duration = TInstant::Now() - StartTime_; - TStringBuilder logString; - logString << "Request done."; - if (UserName_) { - logString << " User [" << UserName_ << "]"; - } - if (QueueName_) { - logString << " Queue [" << QueueName_ << "]"; - } - if (Action_ != EAction::Unknown) { - logString << " Action [" << ActionToString(Action_) << "]"; - } - logString << " IP [" << SourceAddress_ << "] Duration [" << duration.MilliSeconds() << "ms]"; - return logString; -} - -TString THttpRequest::LogHttpRequestResponseDebugInfoString(const TReplyParams& replyParams, const TSqsHttpResponse& response) { - TStringBuilder rec; - // request - rec << "Http request: {user: " << UserName_ - << ", action: " << ActionToString(Action_) - << ", method=\"" << HttpMethod << "\", line=\"" << replyParams.Input.FirstLine() << "\"}"; - // response - rec << ", http response: {code=" << response.StatusCode; - if (response.StatusCode != 200) { // Write error description (it doesn't contain user fields that we can't write to log) - rec << ", response=\"" << response.Body << "\""; - } - rec << "}"; - return rec; + httpResponse.OutTo(replyParams.Output); } -void THttpRequest::LogHttpRequestResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response) { - auto& actorSystem = *Parent_->ActorSystem_; - RLOG_SQS_BASE_INFO(actorSystem, LogHttpRequestResponseCommonInfoString()); - - const bool is500 = response.StatusCode >= 500 && response.StatusCode < 600; - auto priority = is500 ? NActors::NLog::PRI_WARN : NActors::NLog::PRI_DEBUG; - RLOG_SQS_BASE(actorSystem, priority, LogHttpRequestResponseDebugInfoString(replyParams, response)); -} - -bool THttpRequest::DoReply(const TReplyParams& p) { - // this function is called two times +TString THttpRequest::LogHttpRequestResponseCommonInfoString() { + const TDuration duration = TInstant::Now() - StartTime_; + TStringBuilder logString; + logString << "Request done."; + if (UserName_) { + logString << " User [" << UserName_ << "]"; + } + if (QueueName_) { + logString << " Queue [" << QueueName_ << "]"; + } + if (Action_ != EAction::Unknown) { + logString << " Action [" << ActionToString(Action_) << "]"; + } + logString << " IP [" << SourceAddress_ << "] Duration [" << duration.MilliSeconds() << "ms]"; + return logString; +} + +TString THttpRequest::LogHttpRequestResponseDebugInfoString(const TReplyParams& replyParams, const TSqsHttpResponse& response) { + TStringBuilder rec; + // request + rec << "Http request: {user: " << UserName_ + << ", action: " << ActionToString(Action_) + << ", method=\"" << HttpMethod << "\", line=\"" << replyParams.Input.FirstLine() << "\"}"; + // response + rec << ", http response: {code=" << response.StatusCode; + if (response.StatusCode != 200) { // Write error description (it doesn't contain user fields that we can't write to log) + rec << ", response=\"" << response.Body << "\""; + } + rec << "}"; + return rec; +} + +void THttpRequest::LogHttpRequestResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response) { + auto& actorSystem = *Parent_->ActorSystem_; + RLOG_SQS_BASE_INFO(actorSystem, LogHttpRequestResponseCommonInfoString()); + + const bool is500 = response.StatusCode >= 500 && response.StatusCode < 600; + auto priority = is500 ? NActors::NLog::PRI_WARN : NActors::NLog::PRI_DEBUG; + RLOG_SQS_BASE(actorSystem, priority, LogHttpRequestResponseDebugInfoString(replyParams, response)); +} + +bool THttpRequest::DoReply(const TReplyParams& p) { + // this function is called two times if (Response_.Defined()) { - WriteResponse(p, *Response_); + WriteResponse(p, *Response_); return true; } try { ParseHeaders(p.Input); - + if (SetupPing(p)) { return false; } - + ParseRequest(p.Input); - const TDuration parseTime = TInstant::Now() - StartTime_; - RLOG_SQS_BASE_DEBUG(*Parent_->ActorSystem_, "Parse time: [" << parseTime.MilliSeconds() << "ms]"); + const TDuration parseTime = TInstant::Now() - StartTime_; + RLOG_SQS_BASE_DEBUG(*Parent_->ActorSystem_, "Parse time: [" << parseTime.MilliSeconds() << "ms]"); RLOG_SQS_BASE_INFO( *Parent_->ActorSystem_, "Start request. User [" << UserName_ << "] Queue [" << QueueName_ << "], Cloud [" << AccountName_ << "], Folder [" << FolderId_ << "] Action [" << ActionToString(Action_) << "] IP [" << SourceAddress_ << "]" ); - + if (!Parent_->Config.GetYandexCloudMode() && UserName_.empty()) { - WriteResponse(p, MakeErrorXmlResponse(NErrors::MISSING_PARAMETER, Parent_->AggregatedUserCounters_.Get(), "No user name was provided.")); + WriteResponse(p, MakeErrorXmlResponse(NErrors::MISSING_PARAMETER, Parent_->AggregatedUserCounters_.Get(), "No user name was provided.")); return true; } @@ -249,32 +249,32 @@ bool THttpRequest::DoReply(const TReplyParams& p) { return false; } else { if (Response_.Defined()) { - WriteResponse(p, *Response_); + WriteResponse(p, *Response_); } else { - WriteResponse(p, MakeErrorXmlResponse(NErrors::INTERNAL_FAILURE, Parent_->AggregatedUserCounters_.Get())); + WriteResponse(p, MakeErrorXmlResponse(NErrors::INTERNAL_FAILURE, Parent_->AggregatedUserCounters_.Get())); } return true; } } catch (...) { - if (UserCounters_) { - INC_COUNTER(UserCounters_, RequestExceptions); - } else if (Parent_->HttpCounters_) { - INC_COUNTER(Parent_->HttpCounters_, RequestExceptions); - } + if (UserCounters_) { + INC_COUNTER(UserCounters_, RequestExceptions); + } else if (Parent_->HttpCounters_) { + INC_COUNTER(Parent_->HttpCounters_, RequestExceptions); + } - RLOG_SQS_BASE_INFO(*Parent_->ActorSystem_, "http exception: " + RLOG_SQS_BASE_INFO(*Parent_->ActorSystem_, "http exception: " << "message=" << CurrentExceptionMessage()); - WriteResponse(p, MakeErrorXmlResponseFromCurrentException(Parent_->AggregatedUserCounters_.Get(), RequestId_)); + WriteResponse(p, MakeErrorXmlResponseFromCurrentException(Parent_->AggregatedUserCounters_.Get(), RequestId_)); return true; } } -TString THttpRequest::GetRequestPathPart(TStringBuf path, size_t partIdx) const { - if (IsPrivateRequest_) { - path.SkipPrefix(PRIVATE_REQUEST_PATH_PREFIX); - } - +TString THttpRequest::GetRequestPathPart(TStringBuf path, size_t partIdx) const { + if (IsPrivateRequest_) { + path.SkipPrefix(PRIVATE_REQUEST_PATH_PREFIX); + } + TVector<TStringBuf> items; StringSplitter(path).Split('/').AddTo(&items); if (items.size() > partIdx) { @@ -283,21 +283,21 @@ TString THttpRequest::GetRequestPathPart(TStringBuf path, size_t partIdx) const return TString(); } -TString THttpRequest::ExtractQueueNameFromPath(const TStringBuf path) { +TString THttpRequest::ExtractQueueNameFromPath(const TStringBuf path) { return GetRequestPathPart(path, 2); } -TString THttpRequest::ExtractAccountNameFromPath(const TStringBuf path) { +TString THttpRequest::ExtractAccountNameFromPath(const TStringBuf path) { return GetRequestPathPart(path, 1); } -void THttpRequest::ExtractQueueAndAccountNames(const TStringBuf path) { +void THttpRequest::ExtractQueueAndAccountNames(const TStringBuf path) { if (Action_ == EAction::ModifyPermissions) return; if (Action_ == EAction::GetQueueUrl || Action_ == EAction::CreateQueue) { if (!QueryParams_.QueueName) { - throw TSQSException(NErrors::MISSING_PARAMETER) << "No queue name was provided."; + throw TSQSException(NErrors::MISSING_PARAMETER) << "No queue name was provided."; } QueueName_ = *QueryParams_.QueueName; @@ -305,80 +305,80 @@ void THttpRequest::ExtractQueueAndAccountNames(const TStringBuf path) { const auto pathAndQuery = QueryParams_.QueueUrl ? GetPathAndQuery(*QueryParams_.QueueUrl) : GetPathAndQuery(path); QueueName_ = ExtractQueueNameFromPath(pathAndQuery); AccountName_ = ExtractAccountNameFromPath(pathAndQuery); - - if (IsProxyAction(Action_)) { - if (QueryParams_.QueueUrl && *QueryParams_.QueueUrl) { - if (!QueueName_) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid queue url."; - } - } else { - if (!pathAndQuery || pathAndQuery == "/") { - throw TSQSException(NErrors::MISSING_PARAMETER) << "No queue url was provided."; - } - } - } - } -} - -TString THttpRequest::HttpHeadersLogString(const THttpInput& input) { - TStringBuilder headersStr; + + if (IsProxyAction(Action_)) { + if (QueryParams_.QueueUrl && *QueryParams_.QueueUrl) { + if (!QueueName_) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid queue url."; + } + } else { + if (!pathAndQuery || pathAndQuery == "/") { + throw TSQSException(NErrors::MISSING_PARAMETER) << "No queue url was provided."; + } + } + } + } +} + +TString THttpRequest::HttpHeadersLogString(const THttpInput& input) { + TStringBuilder headersStr; + for (const auto& header : input.Headers()) { + if (!headersStr.empty()) { + headersStr << ", "; + } else { + headersStr << "Http headers: "; + } + headersStr << header.Name(); + if (IsPrivateTokenHeader(header.Name())) { + headersStr << "=" << header.Value().size() << " bytes"; + } else { + headersStr << "=\"" << header.Value() << "\""; + } + } + if (headersStr.empty()) { + headersStr << "No http headers"; + } + return headersStr; +} + +void THttpRequest::ParseHeaders(const THttpInput& input) { + TString sourceReqId; for (const auto& header : input.Headers()) { - if (!headersStr.empty()) { - headersStr << ", "; - } else { - headersStr << "Http headers: "; - } - headersStr << header.Name(); - if (IsPrivateTokenHeader(header.Name())) { - headersStr << "=" << header.Value().size() << " bytes"; - } else { - headersStr << "=\"" << header.Value() << "\""; - } - } - if (headersStr.empty()) { - headersStr << "No http headers"; - } - return headersStr; -} - -void THttpRequest::ParseHeaders(const THttpInput& input) { - TString sourceReqId; - for (const auto& header : input.Headers()) { - if (AsciiEqualsIgnoreCase(header.Name(), AUTHORIZATION_HEADER)) { + if (AsciiEqualsIgnoreCase(header.Name(), AUTHORIZATION_HEADER)) { ParseAuthorization(header.Value()); } else if (AsciiEqualsIgnoreCase(header.Name(), SECURITY_TOKEN_HEADER)) { SecurityToken_ = header.Value(); } else if (AsciiEqualsIgnoreCase(header.Name(), IAM_TOKEN_HEADER)) { IamToken_ = header.Value(); - } else if (AsciiEqualsIgnoreCase(header.Name(), FORWARDED_IP_HEADER)) { - SourceAddress_ = header.Value(); - } else if (AsciiEqualsIgnoreCase(header.Name(), REQUEST_ID_HEADER)) { - sourceReqId = header.Value(); + } else if (AsciiEqualsIgnoreCase(header.Name(), FORWARDED_IP_HEADER)) { + SourceAddress_ = header.Value(); + } else if (AsciiEqualsIgnoreCase(header.Name(), REQUEST_ID_HEADER)) { + sourceReqId = header.Value(); } } - - GenerateRequestId(sourceReqId); - - if (SourceAddress_.empty()) { - ExtractSourceAddressFromSocket(); - } - - RLOG_SQS_BASE_TRACE(*Parent_->ActorSystem_, HttpHeadersLogString(input)); + + GenerateRequestId(sourceReqId); + + if (SourceAddress_.empty()) { + ExtractSourceAddressFromSocket(); + } + + RLOG_SQS_BASE_TRACE(*Parent_->ActorSystem_, HttpHeadersLogString(input)); } -void THttpRequest::ParseAuthorization(const TString& value) { - TMap<TString, TString> params = ParseAuthorizationParams(value); +void THttpRequest::ParseAuthorization(const TString& value) { + TMap<TString, TString> params = ParseAuthorizationParams(value); - TString credential = params[CREDENTIAL_PARAM]; - const size_t slashPos = credential.find('/'); - if (slashPos == TString::npos) { - UserName_ = credential; - } else { - UserName_ = credential.substr(0, slashPos); + TString credential = params[CREDENTIAL_PARAM]; + const size_t slashPos = credential.find('/'); + if (slashPos == TString::npos) { + UserName_ = credential; + } else { + UserName_ = credential.substr(0, slashPos); } } -void THttpRequest::ParseCgiParameters(const TCgiParameters& params) { +void THttpRequest::ParseCgiParameters(const TCgiParameters& params) { TParametersParser parser(&QueryParams_); for (auto pi = params.begin(); pi != params.end(); ++pi) { @@ -386,12 +386,12 @@ void THttpRequest::ParseCgiParameters(const TCgiParameters& params) { } } -void THttpRequest::ParsePrivateRequestPathPrefix(const TStringBuf& path) { - if (path.StartsWith(PRIVATE_REQUEST_PATH_PREFIX)) { - IsPrivateRequest_ = true; - } -} - +void THttpRequest::ParsePrivateRequestPathPrefix(const TStringBuf& path) { + if (path.StartsWith(PRIVATE_REQUEST_PATH_PREFIX)) { + IsPrivateRequest_ = true; + } +} + ui64 THttpRequest::CalculateRequestSizeInBytes(const THttpInput& input, const ui64 contentLength) const { ui64 requestSize = input.FirstLine().size(); for (const auto& header : input.Headers()) { @@ -406,42 +406,42 @@ ui64 THttpRequest::CalculateRequestSizeInBytes(const THttpInput& input, const ui return requestSize; } -void THttpRequest::ParseRequest(THttpInput& input) { - if (Parent_->HttpCounters_ && UserName_) { - UserCounters_ = Parent_->HttpCounters_->GetUserCounters(UserName_); - } - +void THttpRequest::ParseRequest(THttpInput& input) { + if (Parent_->HttpCounters_ && UserName_) { + UserCounters_ = Parent_->HttpCounters_->GetUserCounters(UserName_); + } + TParsedHttpFull parsed(input.FirstLine()); - HttpMethod = TString(parsed.Method); - ui64 contentLength = 0; - if (HttpMethod == "POST") { - try { - if (input.GetContentLength(contentLength)) { - InputData.ConstructInPlace(); - InputData->Resize(contentLength); - if (input.Load(InputData->Data(), (size_t)contentLength) != contentLength) { - throw TSQSException(NErrors::MALFORMED_QUERY_STRING) << "Can't load request body."; - } - } else { - throw TSQSException(NErrors::MISSING_PARAMETER) << "No Content-Length."; + HttpMethod = TString(parsed.Method); + ui64 contentLength = 0; + if (HttpMethod == "POST") { + try { + if (input.GetContentLength(contentLength)) { + InputData.ConstructInPlace(); + InputData->Resize(contentLength); + if (input.Load(InputData->Data(), (size_t)contentLength) != contentLength) { + throw TSQSException(NErrors::MALFORMED_QUERY_STRING) << "Can't load request body."; + } + } else { + throw TSQSException(NErrors::MISSING_PARAMETER) << "No Content-Length."; } - } catch (...) { - RLOG_SQS_BASE_ERROR(*Parent_->ActorSystem_, "Failed to parse http request \"" << input.FirstLine() << "\": " << CurrentExceptionMessage()); - } - } - - RLOG_SQS_BASE_DEBUG(*Parent_->ActorSystem_, "Incoming http request: " << input.FirstLine()); + } catch (...) { + RLOG_SQS_BASE_ERROR(*Parent_->ActorSystem_, "Failed to parse http request \"" << input.FirstLine() << "\": " << CurrentExceptionMessage()); + } + } - ParsePrivateRequestPathPrefix(parsed.Path); + RLOG_SQS_BASE_DEBUG(*Parent_->ActorSystem_, "Incoming http request: " << input.FirstLine()); + + ParsePrivateRequestPathPrefix(parsed.Path); RequestSizeInBytes_ = CalculateRequestSizeInBytes(input, contentLength); - if (HttpMethod == "POST") { - ParseCgiParameters(TCgiParameters(TStringBuf(InputData->Data(), contentLength))); - } else if (HttpMethod == "GET") { + if (HttpMethod == "POST") { + ParseCgiParameters(TCgiParameters(TStringBuf(InputData->Data(), contentLength))); + } else if (HttpMethod == "GET") { ParseCgiParameters(TCgiParameters(parsed.Cgi)); } else { - throw TSQSException(NErrors::MALFORMED_QUERY_STRING) << "Unsupported method: \"" << parsed.Method << "\"."; + throw TSQSException(NErrors::MALFORMED_QUERY_STRING) << "Unsupported method: \"" << parsed.Method << "\"."; } if (QueryParams_.Action) { @@ -451,10 +451,10 @@ void THttpRequest::ParseRequest(THttpInput& input) { Action_ = ActionFromString(*QueryParams_.Action); } - THttpActionCounters* counters = GetActionCounters(); - INC_COUNTER(counters, Requests); + THttpActionCounters* counters = GetActionCounters(); + INC_COUNTER(counters, Requests); } else { - throw TSQSException(NErrors::MISSING_ACTION) << "Action param was not found."; + throw TSQSException(NErrors::MISSING_ACTION) << "Action param was not found."; } if (QueryParams_.FolderId) { @@ -473,12 +473,12 @@ void THttpRequest::ParseRequest(THttpInput& input) { Y_CAT(Setup, NAME)(requestHolder->Y_CAT(Mutable, NAME)()); \ CopyCredentials(requestHolder->Y_CAT(Mutable, NAME)(), Parent_->Config); \ break; \ - } + } #define HANDLE_SETUP_PRIVATE_ACTION_CASE(NAME) \ case EAction::NAME: { \ if (!IsPrivateRequest_) { \ - RLOG_SQS_BASE_ERROR(*Parent_->ActorSystem_, \ + RLOG_SQS_BASE_ERROR(*Parent_->ActorSystem_, \ "Attempt to call " \ Y_STRINGIZE(NAME) \ " action without private url path"); \ @@ -487,32 +487,32 @@ void THttpRequest::ParseRequest(THttpInput& input) { Y_CAT(SetupPrivate, NAME)(requestHolder->Y_CAT(Mutable, NAME)()); \ CopyCredentials(requestHolder->Y_CAT(Mutable, NAME)(), Parent_->Config); \ break; \ - } - -bool THttpRequest::SetupRequest() { + } + +bool THttpRequest::SetupRequest() { auto requestHolder = MakeHolder<TSqsRequest>(); - requestHolder->SetRequestId(RequestId_); - - // Validate batches - if (IsBatchAction(Action_)) { - if (QueryParams_.BatchEntries.empty()) { - throw TSQSException(NErrors::EMPTY_BATCH_REQUEST); - } - if (!IsPrivateAction(Action_) && QueryParams_.BatchEntries.size() > TLimits::MaxBatchSize) { - throw TSQSException(NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); - } - THashSet<TString> ids; - for (const auto& entry : QueryParams_.BatchEntries) { - if (!entry.second.Id || !*entry.second.Id) { - throw TSQSException(NErrors::MISSING_PARAMETER) << "No id in batch entry."; - } - - if (!ids.insert(*entry.second.Id).second) { - throw TSQSException(NErrors::BATCH_ENTRY_IDS_NOT_DISTINCT); - } - } - } - + requestHolder->SetRequestId(RequestId_); + + // Validate batches + if (IsBatchAction(Action_)) { + if (QueryParams_.BatchEntries.empty()) { + throw TSQSException(NErrors::EMPTY_BATCH_REQUEST); + } + if (!IsPrivateAction(Action_) && QueryParams_.BatchEntries.size() > TLimits::MaxBatchSize) { + throw TSQSException(NErrors::TOO_MANY_ENTRIES_IN_BATCH_REQUEST); + } + THashSet<TString> ids; + for (const auto& entry : QueryParams_.BatchEntries) { + if (!entry.second.Id || !*entry.second.Id) { + throw TSQSException(NErrors::MISSING_PARAMETER) << "No id in batch entry."; + } + + if (!ids.insert(*entry.second.Id).second) { + throw TSQSException(NErrors::BATCH_ENTRY_IDS_NOT_DISTINCT); + } + } + } + switch (Action_) { HANDLE_SETUP_ACTION_CASE(ChangeMessageVisibility); HANDLE_SETUP_ACTION_CASE(ChangeMessageVisibilityBatch); @@ -535,24 +535,24 @@ bool THttpRequest::SetupRequest() { HANDLE_SETUP_ACTION_CASE(SendMessageBatch); HANDLE_SETUP_ACTION_CASE(SetQueueAttributes); - HANDLE_SETUP_PRIVATE_ACTION_CASE(DeleteQueueBatch); + HANDLE_SETUP_PRIVATE_ACTION_CASE(DeleteQueueBatch); HANDLE_SETUP_PRIVATE_ACTION_CASE(CountQueues); - HANDLE_SETUP_PRIVATE_ACTION_CASE(PurgeQueueBatch); - HANDLE_SETUP_PRIVATE_ACTION_CASE(GetQueueAttributesBatch); + HANDLE_SETUP_PRIVATE_ACTION_CASE(PurgeQueueBatch); + HANDLE_SETUP_PRIVATE_ACTION_CASE(GetQueueAttributesBatch); - case EAction::Unknown: - case EAction::ActionsArraySize: // to avoid compiler warning - Response_ = MakeErrorXmlResponse(NErrors::MISSING_ACTION, Parent_->AggregatedUserCounters_.Get(), TStringBuilder() << "Unknown action: \"" + *QueryParams_.Action << "\"."); - return false; + case EAction::Unknown: + case EAction::ActionsArraySize: // to avoid compiler warning + Response_ = MakeErrorXmlResponse(NErrors::MISSING_ACTION, Parent_->AggregatedUserCounters_.Get(), TStringBuilder() << "Unknown action: \"" + *QueryParams_.Action << "\"."); + return false; } - RLOG_SQS_BASE_DEBUG(*Parent_->ActorSystem_, "Create proxy action actor for request " << SecureShortUtf8DebugString(*requestHolder)); + RLOG_SQS_BASE_DEBUG(*Parent_->ActorSystem_, "Create proxy action actor for request " << SecureShortUtf8DebugString(*requestHolder)); const bool enableQueueLeader = Parent_->Config.HasEnableQueueMaster() ? Parent_->Config.GetEnableQueueMaster() : Parent_->Config.GetEnableQueueLeader(); - auto httpCallback = MakeHolder<THttpCallback>(this, *requestHolder); + auto httpCallback = MakeHolder<THttpCallback>(this, *requestHolder); TAuthActorData data { .SQSRequest = std::move(requestHolder), @@ -575,7 +575,7 @@ bool THttpRequest::SetupRequest() { return true; } -void THttpRequest::SetupChangeMessageVisibility(TChangeMessageVisibilityRequest* const req) { +void THttpRequest::SetupChangeMessageVisibility(TChangeMessageVisibilityRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); @@ -583,11 +583,11 @@ void THttpRequest::SetupChangeMessageVisibility(TChangeMessageVisibilityRequest* req->SetReceiptHandle(CGIEscapeRet(*QueryParams_.ReceiptHandle)); } if (QueryParams_.VisibilityTimeout) { - req->SetVisibilityTimeout(*QueryParams_.VisibilityTimeout); + req->SetVisibilityTimeout(*QueryParams_.VisibilityTimeout); } } -void THttpRequest::SetupChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequest* const req) { +void THttpRequest::SetupChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequest* const req) { req->MutableAuth()->SetUserName(UserName_); req->SetQueueName(QueueName_); @@ -602,12 +602,12 @@ void THttpRequest::SetupChangeMessageVisibilityBatch(TChangeMessageVisibilityBat entry->SetReceiptHandle(CGIEscapeRet(*params.ReceiptHandle)); } if (params.VisibilityTimeout) { - entry->SetVisibilityTimeout(*params.VisibilityTimeout); + entry->SetVisibilityTimeout(*params.VisibilityTimeout); } } } -void THttpRequest::SetupCreateQueue(TCreateQueueRequest* const req) { +void THttpRequest::SetupCreateQueue(TCreateQueueRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); @@ -616,15 +616,15 @@ void THttpRequest::SetupCreateQueue(TCreateQueueRequest* const req) { } } -void THttpRequest::SetupCreateUser(TCreateUserRequest* const req) { +void THttpRequest::SetupCreateUser(TCreateUserRequest* const req) { req->MutableAuth()->SetUserName(UserName_); if (QueryParams_.UserName) { - req->SetUserName(*QueryParams_.UserName); + req->SetUserName(*QueryParams_.UserName); } } -void THttpRequest::SetupDeleteMessage(TDeleteMessageRequest* const req) { +void THttpRequest::SetupDeleteMessage(TDeleteMessageRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); @@ -633,7 +633,7 @@ void THttpRequest::SetupDeleteMessage(TDeleteMessageRequest* const req) { } } -void THttpRequest::SetupDeleteMessageBatch(TDeleteMessageBatchRequest* const req) { +void THttpRequest::SetupDeleteMessageBatch(TDeleteMessageBatchRequest* const req) { req->MutableAuth()->SetUserName(UserName_); req->SetQueueName(QueueName_); @@ -650,12 +650,12 @@ void THttpRequest::SetupDeleteMessageBatch(TDeleteMessageBatchRequest* const req } } -void THttpRequest::SetupDeleteQueue(TDeleteQueueRequest* const req) { +void THttpRequest::SetupDeleteQueue(TDeleteQueueRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); } -void THttpRequest::SetupListPermissions(TListPermissionsRequest* const req) { +void THttpRequest::SetupListPermissions(TListPermissionsRequest* const req) { if (QueryParams_.Path) { req->SetPath(*QueryParams_.Path); } @@ -666,64 +666,64 @@ void THttpRequest::SetupListDeadLetterSourceQueues(TListDeadLetterSourceQueuesRe req->MutableAuth()->SetUserName(UserName_); } -void THttpRequest::SetupPrivateDeleteQueueBatch(TDeleteQueueBatchRequest* const req) { - for (const auto& entry : QueryParams_.BatchEntries) { - auto* protoEntry = req->AddEntries(); - const TParameters& params = entry.second; - if (params.Id) { - protoEntry->SetId(*params.Id); - } - if (params.QueueUrl) { - protoEntry->SetQueueName(ExtractQueueNameFromPath(GetPathAndQuery(*params.QueueUrl))); - } - } - req->MutableAuth()->SetUserName(UserName_); -} - +void THttpRequest::SetupPrivateDeleteQueueBatch(TDeleteQueueBatchRequest* const req) { + for (const auto& entry : QueryParams_.BatchEntries) { + auto* protoEntry = req->AddEntries(); + const TParameters& params = entry.second; + if (params.Id) { + protoEntry->SetId(*params.Id); + } + if (params.QueueUrl) { + protoEntry->SetQueueName(ExtractQueueNameFromPath(GetPathAndQuery(*params.QueueUrl))); + } + } + req->MutableAuth()->SetUserName(UserName_); +} + void THttpRequest::SetupPrivateCountQueues(TCountQueuesRequest* const req) { req->MutableAuth()->SetUserName(UserName_); } -void THttpRequest::SetupPrivatePurgeQueueBatch(TPurgeQueueBatchRequest* const req) { - for (const auto& entry : QueryParams_.BatchEntries) { - auto* protoEntry = req->AddEntries(); - const TParameters& params = entry.second; - if (params.Id) { - protoEntry->SetId(*params.Id); - } - if (params.QueueUrl) { - protoEntry->SetQueueName(ExtractQueueNameFromPath(GetPathAndQuery(*params.QueueUrl))); - } - } - req->MutableAuth()->SetUserName(UserName_); -} - -void THttpRequest::SetupPrivateGetQueueAttributesBatch(TGetQueueAttributesBatchRequest* const req) { - for (const auto& entry : QueryParams_.BatchEntries) { - auto* protoEntry = req->AddEntries(); - const TParameters& params = entry.second; - if (params.Id) { - protoEntry->SetId(*params.Id); - } - if (params.QueueUrl) { - protoEntry->SetQueueName(ExtractQueueNameFromPath(GetPathAndQuery(*params.QueueUrl))); - } - } - for (const auto& name : QueryParams_.AttributeNames) { - req->AddNames(name.second); - } - req->MutableAuth()->SetUserName(UserName_); -} - -void THttpRequest::SetupDeleteUser(TDeleteUserRequest* const req) { - req->MutableAuth()->SetUserName(UserName_); +void THttpRequest::SetupPrivatePurgeQueueBatch(TPurgeQueueBatchRequest* const req) { + for (const auto& entry : QueryParams_.BatchEntries) { + auto* protoEntry = req->AddEntries(); + const TParameters& params = entry.second; + if (params.Id) { + protoEntry->SetId(*params.Id); + } + if (params.QueueUrl) { + protoEntry->SetQueueName(ExtractQueueNameFromPath(GetPathAndQuery(*params.QueueUrl))); + } + } + req->MutableAuth()->SetUserName(UserName_); +} + +void THttpRequest::SetupPrivateGetQueueAttributesBatch(TGetQueueAttributesBatchRequest* const req) { + for (const auto& entry : QueryParams_.BatchEntries) { + auto* protoEntry = req->AddEntries(); + const TParameters& params = entry.second; + if (params.Id) { + protoEntry->SetId(*params.Id); + } + if (params.QueueUrl) { + protoEntry->SetQueueName(ExtractQueueNameFromPath(GetPathAndQuery(*params.QueueUrl))); + } + } + for (const auto& name : QueryParams_.AttributeNames) { + req->AddNames(name.second); + } + req->MutableAuth()->SetUserName(UserName_); +} + +void THttpRequest::SetupDeleteUser(TDeleteUserRequest* const req) { + req->MutableAuth()->SetUserName(UserName_); if (QueryParams_.UserName) { - req->SetUserName(*QueryParams_.UserName); + req->SetUserName(*QueryParams_.UserName); } } -void THttpRequest::SetupGetQueueAttributes(TGetQueueAttributesRequest* const req) { +void THttpRequest::SetupGetQueueAttributes(TGetQueueAttributesRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); @@ -732,12 +732,12 @@ void THttpRequest::SetupGetQueueAttributes(TGetQueueAttributesRequest* const req } } -void THttpRequest::SetupGetQueueUrl(TGetQueueUrlRequest* const req) { +void THttpRequest::SetupGetQueueUrl(TGetQueueUrlRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); } -void THttpRequest::SetupListQueues(TListQueuesRequest* const req) { +void THttpRequest::SetupListQueues(TListQueuesRequest* const req) { req->MutableAuth()->SetUserName(UserName_); if (QueryParams_.QueueNamePrefix) { @@ -745,7 +745,7 @@ void THttpRequest::SetupListQueues(TListQueuesRequest* const req) { } } -void THttpRequest::SetupListUsers(TListUsersRequest* const req) { +void THttpRequest::SetupListUsers(TListUsersRequest* const req) { req->MutableAuth()->SetUserName(UserName_); if (QueryParams_.UserNamePrefix) { @@ -766,7 +766,7 @@ static void SetupModifyPermissionsAction(const TParameters& queryParams, TModify } } -void THttpRequest::SetupModifyPermissions(TModifyPermissionsRequest* const req) { +void THttpRequest::SetupModifyPermissions(TModifyPermissionsRequest* const req) { auto it = ModifyPermissionsActions.begin(); if (*QueryParams_.Action == *it++) { SetupModifyPermissionsAction(QueryParams_, *req->MutableActions()->Add()->MutableGrant()); @@ -790,17 +790,17 @@ void THttpRequest::SetupModifyPermissions(TModifyPermissionsRequest* const req) } } -void THttpRequest::SetupPurgeQueue(TPurgeQueueRequest* const req) { +void THttpRequest::SetupPurgeQueue(TPurgeQueueRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); } -void THttpRequest::SetupReceiveMessage(TReceiveMessageRequest* const req) { +void THttpRequest::SetupReceiveMessage(TReceiveMessageRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); if (QueryParams_.MaxNumberOfMessages) { - req->SetMaxNumberOfMessages(*QueryParams_.MaxNumberOfMessages); + req->SetMaxNumberOfMessages(*QueryParams_.MaxNumberOfMessages); } else { req->SetMaxNumberOfMessages(1); } @@ -808,10 +808,10 @@ void THttpRequest::SetupReceiveMessage(TReceiveMessageRequest* const req) { req->SetReceiveRequestAttemptId(*QueryParams_.ReceiveRequestAttemptId); } if (QueryParams_.VisibilityTimeout) { - req->SetVisibilityTimeout(*QueryParams_.VisibilityTimeout); + req->SetVisibilityTimeout(*QueryParams_.VisibilityTimeout); } if (QueryParams_.WaitTimeSeconds) { - req->SetWaitTimeSeconds(*QueryParams_.WaitTimeSeconds); + req->SetWaitTimeSeconds(*QueryParams_.WaitTimeSeconds); } for (const auto& name : QueryParams_.AttributeNames) { @@ -822,51 +822,51 @@ void THttpRequest::SetupReceiveMessage(TReceiveMessageRequest* const req) { } } -static void ValidateMessageAttribute(const TMessageAttribute& attr, bool allowYandexPrefix, bool& hasYandexPrefix) { - if (!ValidateMessageAttributeName(attr.GetName(), hasYandexPrefix, allowYandexPrefix)) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid message attribute name."; - } - if (attr.GetDataType().empty()) { - throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "No message attribute data type provided."; - } - if (attr.GetDataType().size() > 256) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Message attribute data type is too long."; - } - if (attr.GetStringValue().empty() && attr.GetBinaryValue().empty()) { - throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "No message attribute value provided."; - } - if (!attr.GetStringValue().empty() && !attr.GetBinaryValue().empty()) { - throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "Message attribute has both value and binary value."; - } -} - -static void ValidateMessageAttributes(const TMap<int, TMessageAttribute>& messageAttributes, bool allowYandexPrefix, bool& hasYandexPrefix) { - THashSet<TStringBuf> attributeNames; - for (const auto& item : messageAttributes) { - ValidateMessageAttribute(item.second, allowYandexPrefix, hasYandexPrefix); - if (!attributeNames.insert(item.second.GetName()).second) { - throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "Duplicated message attribute name."; - } - } -} - -static TString FormatNames(const TMap<int, TMessageAttribute>& messageAttributes) { - TStringBuilder names; - for (const auto& item : messageAttributes) { - if (!names.empty()) { - names << ", "; - } - names << "\"" << item.second.GetName() << "\""; - } - return std::move(names); -} - -void THttpRequest::SetupSendMessage(TSendMessageRequest* const req) { +static void ValidateMessageAttribute(const TMessageAttribute& attr, bool allowYandexPrefix, bool& hasYandexPrefix) { + if (!ValidateMessageAttributeName(attr.GetName(), hasYandexPrefix, allowYandexPrefix)) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid message attribute name."; + } + if (attr.GetDataType().empty()) { + throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "No message attribute data type provided."; + } + if (attr.GetDataType().size() > 256) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Message attribute data type is too long."; + } + if (attr.GetStringValue().empty() && attr.GetBinaryValue().empty()) { + throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "No message attribute value provided."; + } + if (!attr.GetStringValue().empty() && !attr.GetBinaryValue().empty()) { + throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "Message attribute has both value and binary value."; + } +} + +static void ValidateMessageAttributes(const TMap<int, TMessageAttribute>& messageAttributes, bool allowYandexPrefix, bool& hasYandexPrefix) { + THashSet<TStringBuf> attributeNames; + for (const auto& item : messageAttributes) { + ValidateMessageAttribute(item.second, allowYandexPrefix, hasYandexPrefix); + if (!attributeNames.insert(item.second.GetName()).second) { + throw TSQSException(NErrors::INVALID_PARAMETER_COMBINATION) << "Duplicated message attribute name."; + } + } +} + +static TString FormatNames(const TMap<int, TMessageAttribute>& messageAttributes) { + TStringBuilder names; + for (const auto& item : messageAttributes) { + if (!names.empty()) { + names << ", "; + } + names << "\"" << item.second.GetName() << "\""; + } + return std::move(names); +} + +void THttpRequest::SetupSendMessage(TSendMessageRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); if (QueryParams_.DelaySeconds) { - req->SetDelaySeconds(*QueryParams_.DelaySeconds); + req->SetDelaySeconds(*QueryParams_.DelaySeconds); } if (QueryParams_.MessageBody) { req->SetMessageBody(*QueryParams_.MessageBody); @@ -878,18 +878,18 @@ void THttpRequest::SetupSendMessage(TSendMessageRequest* const req) { req->SetMessageGroupId(*QueryParams_.MessageGroupId); } - bool hasYandexPrefix = false; - ValidateMessageAttributes(QueryParams_.MessageAttributes, Parent_->Config.GetAllowYandexAttributePrefix(), hasYandexPrefix); - if (hasYandexPrefix) { - RLOG_SQS_BASE_WARN(*Parent_->ActorSystem_, "Attribute names contain yandex reserved prefix: " << FormatNames(QueryParams_.MessageAttributes)); - } - + bool hasYandexPrefix = false; + ValidateMessageAttributes(QueryParams_.MessageAttributes, Parent_->Config.GetAllowYandexAttributePrefix(), hasYandexPrefix); + if (hasYandexPrefix) { + RLOG_SQS_BASE_WARN(*Parent_->ActorSystem_, "Attribute names contain yandex reserved prefix: " << FormatNames(QueryParams_.MessageAttributes)); + } + for (const auto& item : QueryParams_.MessageAttributes) { req->AddMessageAttributes()->CopyFrom(item.second); } } -void THttpRequest::SetupSendMessageBatch(TSendMessageBatchRequest* const req) { +void THttpRequest::SetupSendMessageBatch(TSendMessageBatchRequest* const req) { req->MutableAuth()->SetUserName(UserName_); req->SetQueueName(QueueName_); @@ -898,7 +898,7 @@ void THttpRequest::SetupSendMessageBatch(TSendMessageBatchRequest* const req) { TSendMessageRequest* const entry = req->AddEntries(); if (params.DelaySeconds) { - entry->SetDelaySeconds(*params.DelaySeconds); + entry->SetDelaySeconds(*params.DelaySeconds); } if (params.Id) { entry->SetId(*params.Id); @@ -913,19 +913,19 @@ void THttpRequest::SetupSendMessageBatch(TSendMessageBatchRequest* const req) { entry->SetMessageGroupId(*params.MessageGroupId); } - bool hasYandexPrefix = false; - ValidateMessageAttributes(params.MessageAttributes, Parent_->Config.GetAllowYandexAttributePrefix(), hasYandexPrefix); - if (hasYandexPrefix) { - RLOG_SQS_BASE_WARN(*Parent_->ActorSystem_, "Attribute names contain yandex reserved prefix: " << FormatNames(params.MessageAttributes)); - } - + bool hasYandexPrefix = false; + ValidateMessageAttributes(params.MessageAttributes, Parent_->Config.GetAllowYandexAttributePrefix(), hasYandexPrefix); + if (hasYandexPrefix) { + RLOG_SQS_BASE_WARN(*Parent_->ActorSystem_, "Attribute names contain yandex reserved prefix: " << FormatNames(params.MessageAttributes)); + } + for (const auto& attr : params.MessageAttributes) { entry->AddMessageAttributes()->CopyFrom(attr.second); } } } -void THttpRequest::SetupSetQueueAttributes(TSetQueueAttributesRequest* const req) { +void THttpRequest::SetupSetQueueAttributes(TSetQueueAttributesRequest* const req) { req->SetQueueName(QueueName_); req->MutableAuth()->SetUserName(UserName_); @@ -934,67 +934,67 @@ void THttpRequest::SetupSetQueueAttributes(TSetQueueAttributesRequest* const req } } -void THttpRequest::ExtractSourceAddressFromSocket() { - struct sockaddr_in6 addr; - socklen_t addrSize = sizeof(struct sockaddr_in6); - if (getpeername(Socket(), (struct sockaddr*)&addr, &addrSize) != 0) { - SourceAddress_ = "unknown"; - } else { - char address[INET6_ADDRSTRLEN]; - if (inet_ntop(AF_INET6, &(addr.sin6_addr), address, INET6_ADDRSTRLEN) != nullptr) { - SourceAddress_ = address; - } else { - SourceAddress_ = "unknown"; - } - } -} - -void THttpRequest::GenerateRequestId(const TString& sourceReqId) { - TStringBuilder builder; - builder << CreateGuidAsString(); - if (!sourceReqId.empty()) { - builder << "-" << sourceReqId; - } - - RequestId_ = std::move(builder); - - DebugInfo->MoveToParsedHttpRequests(RequestId_, this); -} - -THttpActionCounters* THttpRequest::GetActionCounters() const { - if (Action_ <= EAction::Unknown || Action_ >= EAction::ActionsArraySize) { - return nullptr; - } - if (!UserCounters_) { - return nullptr; - } - return &UserCounters_->ActionCounters[Action_]; -} - -bool THttpRequest::SetupPing(const TReplyParams& params) { - TParsedHttpFull parsed(params.Input.FirstLine()); +void THttpRequest::ExtractSourceAddressFromSocket() { + struct sockaddr_in6 addr; + socklen_t addrSize = sizeof(struct sockaddr_in6); + if (getpeername(Socket(), (struct sockaddr*)&addr, &addrSize) != 0) { + SourceAddress_ = "unknown"; + } else { + char address[INET6_ADDRSTRLEN]; + if (inet_ntop(AF_INET6, &(addr.sin6_addr), address, INET6_ADDRSTRLEN) != nullptr) { + SourceAddress_ = address; + } else { + SourceAddress_ = "unknown"; + } + } +} + +void THttpRequest::GenerateRequestId(const TString& sourceReqId) { + TStringBuilder builder; + builder << CreateGuidAsString(); + if (!sourceReqId.empty()) { + builder << "-" << sourceReqId; + } + + RequestId_ = std::move(builder); + + DebugInfo->MoveToParsedHttpRequests(RequestId_, this); +} + +THttpActionCounters* THttpRequest::GetActionCounters() const { + if (Action_ <= EAction::Unknown || Action_ >= EAction::ActionsArraySize) { + return nullptr; + } + if (!UserCounters_) { + return nullptr; + } + return &UserCounters_->ActionCounters[Action_]; +} + +bool THttpRequest::SetupPing(const TReplyParams& params) { + TParsedHttpFull parsed(params.Input.FirstLine()); if (parsed.Method == "GET" && (parsed.Path == "/private/ping" || parsed.Path == "/private/ping/") && parsed.Cgi.empty()) { - HttpMethod = TString(parsed.Method); // for logging - Parent_->ActorSystem_->Register(CreatePingActor(MakeHolder<TPingHttpCallback>(this), RequestId_), - NActors::TMailboxType::HTSwap, Parent_->PoolId_); - return true; - } - return false; -} - + HttpMethod = TString(parsed.Method); // for logging + Parent_->ActorSystem_->Register(CreatePingActor(MakeHolder<TPingHttpCallback>(this), RequestId_), + NActors::TMailboxType::HTSwap, Parent_->PoolId_); + return true; + } + return false; +} + /////////////////////////////////////////////////////////////////////////////// -TAsyncHttpServer::TAsyncHttpServer(const NKikimrConfig::TSqsConfig& config) - : THttpServer(this, MakeHttpServerOptions(config)) - , Config(config) +TAsyncHttpServer::TAsyncHttpServer(const NKikimrConfig::TSqsConfig& config) + : THttpServer(this, MakeHttpServerOptions(config)) + , Config(config) { - DebugInfo->HttpServer = this; + DebugInfo->HttpServer = this; } -TAsyncHttpServer::~TAsyncHttpServer() { - Stop(); - DebugInfo->HttpServer = nullptr; -} +TAsyncHttpServer::~TAsyncHttpServer() { + Stop(); + DebugInfo->HttpServer = nullptr; +} void TAsyncHttpServer::Initialize( NActors::TActorSystem* as, TIntrusivePtr<NMonitoring::TDynamicCounters> sqsCounters, @@ -1002,14 +1002,14 @@ void TAsyncHttpServer::Initialize( ) { ActorSystem_ = as; HttpCounters_ = new THttpCounters(Config, sqsCounters->GetSubgroup("subsystem", "http")); - if (Config.GetYandexCloudMode()) { + if (Config.GetYandexCloudMode()) { CloudAuthCounters_ = MakeHolder<TCloudAuthCounters>(Config, sqsCounters->GetSubgroup("subsystem", "cloud_auth")); - } + } AggregatedUserCounters_ = MakeIntrusive<TUserCounters>( Config, sqsCounters->GetSubgroup("subsystem", "core"), ymqCounters, nullptr, TOTAL_COUNTER_LABEL, nullptr, true ); - AggregatedUserCounters_->ShowDetailedCounters(TInstant::Max()); + AggregatedUserCounters_->ShowDetailedCounters(TInstant::Max()); PoolId_ = poolId; } @@ -1019,30 +1019,30 @@ void TAsyncHttpServer::Start() { } } -TClientRequest* TAsyncHttpServer::CreateClient() { - return new THttpRequest(this); -} - -void TAsyncHttpServer::UpdateConnectionsCountCounter() { - if (HttpCounters_) { - *HttpCounters_->ConnectionsCount = GetClientCount(); - } -} - -void TAsyncHttpServer::OnException() { - LOG_SQS_BASE_ERROR(*ActorSystem_, "Exception in http server: " << CurrentExceptionMessage()); - INC_COUNTER(HttpCounters_, InternalExceptions); +TClientRequest* TAsyncHttpServer::CreateClient() { + return new THttpRequest(this); } -THttpServerOptions TAsyncHttpServer::MakeHttpServerOptions(const NKikimrConfig::TSqsConfig& config) { +void TAsyncHttpServer::UpdateConnectionsCountCounter() { + if (HttpCounters_) { + *HttpCounters_->ConnectionsCount = GetClientCount(); + } +} + +void TAsyncHttpServer::OnException() { + LOG_SQS_BASE_ERROR(*ActorSystem_, "Exception in http server: " << CurrentExceptionMessage()); + INC_COUNTER(HttpCounters_, InternalExceptions); +} + +THttpServerOptions TAsyncHttpServer::MakeHttpServerOptions(const NKikimrConfig::TSqsConfig& config) { const auto& cfg = config.GetHttpServerConfig(); - THttpServerOptions options; - options.SetThreads(cfg.GetThreads()); - options.SetPort(cfg.GetPort()); - options.SetMaxConnections(cfg.GetMaxConnections()); - options.SetMaxQueueSize(cfg.GetMaxQueueSize()); - options.EnableKeepAlive(cfg.GetEnableKeepAlive()); - return options; -} - -} // namespace NKikimr::NSQS + THttpServerOptions options; + options.SetThreads(cfg.GetThreads()); + options.SetPort(cfg.GetPort()); + options.SetMaxConnections(cfg.GetMaxConnections()); + options.SetMaxQueueSize(cfg.GetMaxQueueSize()); + options.EnableKeepAlive(cfg.GetEnableKeepAlive()); + return options; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/http.h b/ydb/core/ymq/http/http.h index 01a2b0edc4f..4e281c8b0f3 100644 --- a/ydb/core/ymq/http/http.h +++ b/ydb/core/ymq/http/http.h @@ -2,7 +2,7 @@ #include "params.h" #include "types.h" - + #include <ydb/core/protos/config.pb.h> #include <ydb/core/protos/sqs.pb.h> @@ -12,138 +12,138 @@ #include <library/cpp/actors/core/actorsystem.h> #include <library/cpp/http/server/http.h> -#include <util/generic/buffer.h> +#include <util/generic/buffer.h> #include <util/generic/maybe.h> #include <library/cpp/cgiparam/cgiparam.h> -namespace NKikimr::NSQS { - -class TAsyncHttpServer; -class THttpRequest; - -class THttpRequest : public TRequestReplier { -public: - THttpRequest(TAsyncHttpServer* p); - ~THttpRequest(); - - void SendResponse(const TSqsHttpResponse& r); - - const TString& GetRequestId() const { - return RequestId_; - } - - const TAsyncHttpServer* GetServer() const { - return Parent_; - } - -private: - bool DoReply(const TReplyParams& p) override; - - void WriteResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response); - - TString LogHttpRequestResponseCommonInfoString(); - TString LogHttpRequestResponseDebugInfoString(const TReplyParams& replyParams, const TSqsHttpResponse& response); - void LogHttpRequestResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response); - -private: - template<typename T> - void CopyCredentials(T* const request, const NKikimrConfig::TSqsConfig& config) { - if (SecurityToken_ && !config.GetYandexCloudMode()) { - // it's also TVM-compatible due to universal TicketParser - request->MutableCredentials()->SetOAuthToken(SecurityToken_); +namespace NKikimr::NSQS { + +class TAsyncHttpServer; +class THttpRequest; + +class THttpRequest : public TRequestReplier { +public: + THttpRequest(TAsyncHttpServer* p); + ~THttpRequest(); + + void SendResponse(const TSqsHttpResponse& r); + + const TString& GetRequestId() const { + return RequestId_; + } + + const TAsyncHttpServer* GetServer() const { + return Parent_; + } + +private: + bool DoReply(const TReplyParams& p) override; + + void WriteResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response); + + TString LogHttpRequestResponseCommonInfoString(); + TString LogHttpRequestResponseDebugInfoString(const TReplyParams& replyParams, const TSqsHttpResponse& response); + void LogHttpRequestResponse(const TReplyParams& replyParams, const TSqsHttpResponse& response); + +private: + template<typename T> + void CopyCredentials(T* const request, const NKikimrConfig::TSqsConfig& config) { + if (SecurityToken_ && !config.GetYandexCloudMode()) { + // it's also TVM-compatible due to universal TicketParser + request->MutableCredentials()->SetOAuthToken(SecurityToken_); } - } + } - TString GetRequestPathPart(TStringBuf path, size_t partIdx) const; - TString ExtractQueueNameFromPath(const TStringBuf path); - TString ExtractAccountNameFromPath(const TStringBuf path); + TString GetRequestPathPart(TStringBuf path, size_t partIdx) const; + TString ExtractQueueNameFromPath(const TStringBuf path); + TString ExtractAccountNameFromPath(const TStringBuf path); ui64 CalculateRequestSizeInBytes(const THttpInput& input, const ui64 contentLength) const; - void ExtractQueueAndAccountNames(const TStringBuf path); - - TString HttpHeadersLogString(const THttpInput& input); - void ParseHeaders(const THttpInput& input); - void ParseAuthorization(const TString& value); - void ParseRequest(THttpInput& input); - void ParseCgiParameters(const TCgiParameters& params); - void ParsePrivateRequestPathPrefix(const TStringBuf& path); - - bool SetupRequest(); - - void SetupChangeMessageVisibility(TChangeMessageVisibilityRequest* const req); - void SetupChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequest* const req); - void SetupCreateQueue(TCreateQueueRequest* const req); - void SetupCreateUser(TCreateUserRequest* const req); - void SetupGetQueueAttributes(TGetQueueAttributesRequest* const req); - void SetupGetQueueUrl(TGetQueueUrlRequest* const req); - void SetupDeleteMessage(TDeleteMessageRequest* const req); - void SetupDeleteMessageBatch(TDeleteMessageBatchRequest* const req); - void SetupDeleteQueue(TDeleteQueueRequest* const req); - void SetupListPermissions(TListPermissionsRequest* const req); + void ExtractQueueAndAccountNames(const TStringBuf path); + + TString HttpHeadersLogString(const THttpInput& input); + void ParseHeaders(const THttpInput& input); + void ParseAuthorization(const TString& value); + void ParseRequest(THttpInput& input); + void ParseCgiParameters(const TCgiParameters& params); + void ParsePrivateRequestPathPrefix(const TStringBuf& path); + + bool SetupRequest(); + + void SetupChangeMessageVisibility(TChangeMessageVisibilityRequest* const req); + void SetupChangeMessageVisibilityBatch(TChangeMessageVisibilityBatchRequest* const req); + void SetupCreateQueue(TCreateQueueRequest* const req); + void SetupCreateUser(TCreateUserRequest* const req); + void SetupGetQueueAttributes(TGetQueueAttributesRequest* const req); + void SetupGetQueueUrl(TGetQueueUrlRequest* const req); + void SetupDeleteMessage(TDeleteMessageRequest* const req); + void SetupDeleteMessageBatch(TDeleteMessageBatchRequest* const req); + void SetupDeleteQueue(TDeleteQueueRequest* const req); + void SetupListPermissions(TListPermissionsRequest* const req); void SetupListDeadLetterSourceQueues(TListDeadLetterSourceQueuesRequest* const req); - void SetupPrivateDeleteQueueBatch(TDeleteQueueBatchRequest* const req); - void SetupPrivatePurgeQueueBatch(TPurgeQueueBatchRequest* const req); - void SetupPrivateGetQueueAttributesBatch(TGetQueueAttributesBatchRequest* const req); - void SetupDeleteUser(TDeleteUserRequest* const req); - void SetupListQueues(TListQueuesRequest* const req); + void SetupPrivateDeleteQueueBatch(TDeleteQueueBatchRequest* const req); + void SetupPrivatePurgeQueueBatch(TPurgeQueueBatchRequest* const req); + void SetupPrivateGetQueueAttributesBatch(TGetQueueAttributesBatchRequest* const req); + void SetupDeleteUser(TDeleteUserRequest* const req); + void SetupListQueues(TListQueuesRequest* const req); void SetupPrivateCountQueues(TCountQueuesRequest* const req); - void SetupListUsers(TListUsersRequest* const req); - void SetupModifyPermissions(TModifyPermissionsRequest* const req); - void SetupReceiveMessage(TReceiveMessageRequest* const req); - void SetupSendMessage(TSendMessageRequest* const req); - void SetupSendMessageBatch(TSendMessageBatchRequest* const req); - void SetupPurgeQueue(TPurgeQueueRequest* const req); - void SetupSetQueueAttributes(TSetQueueAttributesRequest* const req); - - void ExtractSourceAddressFromSocket(); - - void GenerateRequestId(const TString& sourceReqId); - - THttpActionCounters* GetActionCounters() const; - - // Checks whether request is ping and then starts ping actor. - // If request is ping, returns true, otherwise - false. - bool SetupPing(const TReplyParams& p); - -private: - TAsyncHttpServer* const Parent_; - TIntrusivePtr<THttpUserCounters> UserCounters_; - - TParameters QueryParams_; - EAction Action_ = EAction::Unknown; - TString UserName_; - TString AccountName_; - TString QueueName_; - TString SecurityToken_; - TString IamToken_; - TString FolderId_; + void SetupListUsers(TListUsersRequest* const req); + void SetupModifyPermissions(TModifyPermissionsRequest* const req); + void SetupReceiveMessage(TReceiveMessageRequest* const req); + void SetupSendMessage(TSendMessageRequest* const req); + void SetupSendMessageBatch(TSendMessageBatchRequest* const req); + void SetupPurgeQueue(TPurgeQueueRequest* const req); + void SetupSetQueueAttributes(TSetQueueAttributesRequest* const req); + + void ExtractSourceAddressFromSocket(); + + void GenerateRequestId(const TString& sourceReqId); + + THttpActionCounters* GetActionCounters() const; + + // Checks whether request is ping and then starts ping actor. + // If request is ping, returns true, otherwise - false. + bool SetupPing(const TReplyParams& p); + +private: + TAsyncHttpServer* const Parent_; + TIntrusivePtr<THttpUserCounters> UserCounters_; + + TParameters QueryParams_; + EAction Action_ = EAction::Unknown; + TString UserName_; + TString AccountName_; + TString QueueName_; + TString SecurityToken_; + TString IamToken_; + TString FolderId_; TString ApiMethod_; - THolder<TAwsRequestSignV4> AwsSignature_; - - TMaybe<TBuffer> InputData; - TString HttpMethod; - TMaybe<TSqsHttpResponse> Response_; - TString RequestId_; - - // Source values parsed from headers - TString SourceAddress_; + THolder<TAwsRequestSignV4> AwsSignature_; + TMaybe<TBuffer> InputData; + TString HttpMethod; + TMaybe<TSqsHttpResponse> Response_; + TString RequestId_; + + // Source values parsed from headers + TString SourceAddress_; + ui64 RequestSizeInBytes_ = 0; - bool IsPrivateRequest_ = false; // Has "/private" path prefix - TInstant StartTime_ = TInstant::Now(); -}; - -class TAsyncHttpServer - : public THttpServer - , public THttpServer::ICallBack -{ - friend THttpRequest; + bool IsPrivateRequest_ = false; // Has "/private" path prefix + TInstant StartTime_ = TInstant::Now(); +}; +class TAsyncHttpServer + : public THttpServer + , public THttpServer::ICallBack +{ + friend THttpRequest; + public: - TAsyncHttpServer(const NKikimrConfig::TSqsConfig& config); - ~TAsyncHttpServer(); + TAsyncHttpServer(const NKikimrConfig::TSqsConfig& config); + ~TAsyncHttpServer(); void Initialize( NActors::TActorSystem* as, @@ -153,25 +153,25 @@ public: void Start(); - NActors::TActorSystem* GetActorSystem() const { - return ActorSystem_; - } - + NActors::TActorSystem* GetActorSystem() const { + return ActorSystem_; + } + private: - // THttpServer::ICallback + // THttpServer::ICallback TClientRequest* CreateClient() override; - void OnException() override; - static THttpServerOptions MakeHttpServerOptions(const NKikimrConfig::TSqsConfig& config); - - void UpdateConnectionsCountCounter(); + void OnException() override; + static THttpServerOptions MakeHttpServerOptions(const NKikimrConfig::TSqsConfig& config); + void UpdateConnectionsCountCounter(); + private: - const NKikimrConfig::TSqsConfig Config; - NActors::TActorSystem* ActorSystem_ = nullptr; - TIntrusivePtr<THttpCounters> HttpCounters_; // http subsystem counters + const NKikimrConfig::TSqsConfig Config; + NActors::TActorSystem* ActorSystem_ = nullptr; + TIntrusivePtr<THttpCounters> HttpCounters_; // http subsystem counters THolder<TCloudAuthCounters> CloudAuthCounters_; // cloud_auth subsystem counters - TIntrusivePtr<TUserCounters> AggregatedUserCounters_; // aggregated counters for user in core subsystem - ui32 PoolId_ = 0; + TIntrusivePtr<TUserCounters> AggregatedUserCounters_; // aggregated counters for user in core subsystem + ui32 PoolId_ = 0; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/params.h b/ydb/core/ymq/http/params.h index 7834fc4f0f7..f5c6bff80b6 100644 --- a/ydb/core/ymq/http/params.h +++ b/ydb/core/ymq/http/params.h @@ -6,15 +6,15 @@ #include <util/generic/maybe.h> #include <util/generic/string.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { struct TParameters { TMaybe<TString> Action; TMaybe<TString> Clear; - TMaybe<ui64> DelaySeconds; + TMaybe<ui64> DelaySeconds; TMaybe<TString> FolderId; TMaybe<TString> Id; - TMaybe<ui32> MaxNumberOfMessages; + TMaybe<ui32> MaxNumberOfMessages; TMaybe<TString> MessageBody; TMaybe<TString> MessageDeduplicationId; TMaybe<TString> MessageGroupId; @@ -28,8 +28,8 @@ struct TParameters { TMaybe<TString> UserName; TMaybe<TString> UserNamePrefix; TMaybe<TString> Version; - TMaybe<ui64> VisibilityTimeout; - TMaybe<ui64> WaitTimeSeconds; + TMaybe<ui64> VisibilityTimeout; + TMaybe<ui64> WaitTimeSeconds; TMap<int, TString> AttributeNames; TMap<int, TAttribute> Attributes; @@ -42,7 +42,7 @@ public: TParametersParser(TParameters* params); ~TParametersParser(); - // Throws TSQSException + // Throws TSQSException void Append(const TString& name, const TString& value); private: @@ -52,4 +52,4 @@ private: int Num_; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/parser.rl6 b/ydb/core/ymq/http/parser.rl6 index b617c334a31..0ecb1864701 100644 --- a/ydb/core/ymq/http/parser.rl6 +++ b/ydb/core/ymq/http/parser.rl6 @@ -3,56 +3,56 @@ #include <ydb/core/ymq/http/params.h> #include <library/cpp/string_utils/base64/base64.h> - -#include <util/string/cast.h> -#include <util/generic/strbuf.h> - -namespace NKikimr::NSQS { + +#include <util/string/cast.h> +#include <util/generic/strbuf.h> + +namespace NKikimr::NSQS { namespace { - -const TString& ValidateAlphaNumAndPunctuation128ForAssign(const TString& value, TStringBuf parameterName) { - if (value.size() > 128 || !IsAlphaNumAndPunctuation(value)) { - if (parameterName) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) - << "Invalid parameter \"" << parameterName - << "\". It is expected to be shorter than 128 characters and consist of aplhanum and punctuation characters."; - } else { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE); - } - } - return value; -} - -class TParameterValidator { -public: - TParameterValidator(TStringBuf value, TStringBuf parameterName) - : Value(value) - , ParameterName(parameterName) - { - } - - template <class T> - operator TMaybe<T>() const { - T v; - if (!TryFromString(Value, v)) { - if (ParameterName) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid parameter \"" << ParameterName << "\"."; - } else { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE); - } - } - return v; - } - -private: - TStringBuf Value; - TStringBuf ParameterName; -}; - -TParameterValidator ParseAndValidate(TStringBuf value, TStringBuf parameterName) { - return TParameterValidator(value, parameterName); -} - + +const TString& ValidateAlphaNumAndPunctuation128ForAssign(const TString& value, TStringBuf parameterName) { + if (value.size() > 128 || !IsAlphaNumAndPunctuation(value)) { + if (parameterName) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) + << "Invalid parameter \"" << parameterName + << "\". It is expected to be shorter than 128 characters and consist of aplhanum and punctuation characters."; + } else { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE); + } + } + return value; +} + +class TParameterValidator { +public: + TParameterValidator(TStringBuf value, TStringBuf parameterName) + : Value(value) + , ParameterName(parameterName) + { + } + + template <class T> + operator TMaybe<T>() const { + T v; + if (!TryFromString(Value, v)) { + if (ParameterName) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid parameter \"" << ParameterName << "\"."; + } else { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE); + } + } + return v; + } + +private: + TStringBuf Value; + TStringBuf ParameterName; +}; + +TParameterValidator ParseAndValidate(TStringBuf value, TStringBuf parameterName) { + return TParameterValidator(value, parameterName); +} + %%{ machine params_parser; @@ -74,8 +74,8 @@ message_attribute = (("Name" % { CurrentParams_->MessageAttributes[Num_].SetName(value); }) | ("Value" '.' (("StringValue" % { CurrentParams_->MessageAttributes[Num_].SetStringValue(value); }) | - ("DataType" % { CurrentParams_->MessageAttributes[Num_].SetDataType(value); }) | - ("BinaryValue" % { CurrentParams_->MessageAttributes[Num_].SetBinaryValue(Base64Decode(value)); })))); + ("DataType" % { CurrentParams_->MessageAttributes[Num_].SetDataType(value); }) | + ("BinaryValue" % { CurrentParams_->MessageAttributes[Num_].SetBinaryValue(Base64Decode(value)); })))); change_visibility_entry = "ChangeMessageVisibilityBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' @@ -97,21 +97,21 @@ send_message_entry = ("MessageGroupId" % { CurrentParams_->MessageGroupId = ValidateAlphaNumAndPunctuation128ForAssign(value, TStringBuf("MessageGroupId")); }) | message_attribute); -delete_queue_entry = - "DeleteQueueBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' - (("Id" % { CurrentParams_->Id = value; }) | - ("QueueUrl" % { CurrentParams_->QueueUrl = value; })); - -purge_queue_entry = - "PurgeQueueBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' - (("Id" % { CurrentParams_->Id = value; }) | - ("QueueUrl" % { CurrentParams_->QueueUrl = value; })); - -get_queue_attributes_entry = - "GetQueueAttributesBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' - (("Id" % { CurrentParams_->Id = value; }) | - ("QueueUrl" % { CurrentParams_->QueueUrl = value; })); - +delete_queue_entry = + "DeleteQueueBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' + (("Id" % { CurrentParams_->Id = value; }) | + ("QueueUrl" % { CurrentParams_->QueueUrl = value; })); + +purge_queue_entry = + "PurgeQueueBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' + (("Id" % { CurrentParams_->Id = value; }) | + ("QueueUrl" % { CurrentParams_->QueueUrl = value; })); + +get_queue_attributes_entry = + "GetQueueAttributesBatchRequestEntry" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; }) '.' + (("Id" % { CurrentParams_->Id = value; }) | + ("QueueUrl" % { CurrentParams_->QueueUrl = value; })); + permissions_entry = "Permission" > { Id_ = 1; } ('.' int %{ Id_ = I; CurrentParams_ = &Params_->BatchEntries[Id_]; CurrentParams_->Action = value; }); @@ -144,9 +144,9 @@ main := |* message_attribute; permissions_entry; send_message_entry; - delete_queue_entry; - purge_queue_entry; - get_queue_attributes_entry; + delete_queue_entry; + purge_queue_entry; + get_queue_attributes_entry; *|; }%% @@ -186,4 +186,4 @@ void TParametersParser::Append(const TString& name, const TString& value) { %% write exec; } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/types.cpp b/ydb/core/ymq/http/types.cpp index 36da2414ad2..cfcbc4709fb 100644 --- a/ydb/core/ymq/http/types.cpp +++ b/ydb/core/ymq/http/types.cpp @@ -1,19 +1,19 @@ #include "types.h" -#include <util/generic/is_in.h> +#include <util/generic/is_in.h> #include <util/generic/hash.h> -#include <util/generic/hash_set.h> +#include <util/generic/hash_set.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -extern const TString XML_CONTENT_TYPE = "application/xml"; -extern const TString PLAIN_TEXT_CONTENT_TYPE = "text/plain"; - -TSqsHttpResponse::TSqsHttpResponse(const TString& body, int status, const TString& contentType) +extern const TString XML_CONTENT_TYPE = "application/xml"; +extern const TString PLAIN_TEXT_CONTENT_TYPE = "text/plain"; + +TSqsHttpResponse::TSqsHttpResponse(const TString& body, int status, const TString& contentType) : Body(body) - , ContentType(contentType) + , ContentType(contentType) , StatusCode(status) { } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/types.h b/ydb/core/ymq/http/types.h index c33574572e0..3f7259916b3 100644 --- a/ydb/core/ymq/http/types.h +++ b/ydb/core/ymq/http/types.h @@ -1,25 +1,25 @@ #pragma once #include <ydb/core/ymq/base/action.h> - + #include <util/generic/string.h> -namespace NKikimr::NSQS { - -extern const TString XML_CONTENT_TYPE; -extern const TString PLAIN_TEXT_CONTENT_TYPE; +namespace NKikimr::NSQS { -struct TSqsHttpResponse { +extern const TString XML_CONTENT_TYPE; +extern const TString PLAIN_TEXT_CONTENT_TYPE; + +struct TSqsHttpResponse { TString Body; - TString ContentType; - int StatusCode = 0; + TString ContentType; + int StatusCode = 0; TString FolderId; TString ResourceId; - bool IsFifo = false; + bool IsFifo = false; - TSqsHttpResponse() = default; - TSqsHttpResponse(const TString& body, int status, const TString& contentType = XML_CONTENT_TYPE); + TSqsHttpResponse() = default; + TSqsHttpResponse(const TString& body, int status, const TString& contentType = XML_CONTENT_TYPE); }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/ut/xml_builder_ut.cpp b/ydb/core/ymq/http/ut/xml_builder_ut.cpp index 2eeed3a977e..45f7153cad2 100644 --- a/ydb/core/ymq/http/ut/xml_builder_ut.cpp +++ b/ydb/core/ymq/http/ut/xml_builder_ut.cpp @@ -1,36 +1,36 @@ #include <ydb/core/ymq/http/xml_builder.h> - + #include <library/cpp/testing/unittest/registar.h> - -Y_UNIT_TEST_SUITE(XmlBuilderTest) { - Y_UNIT_TEST(WritesProperly) { - TXmlStringBuilder builder; - - { - TXmlDocument doc(builder); - { - TXmlRecursiveElement elem1(builder, "upper_elem"); - TXmlElement elem2(builder, "elem", "content"); - } - } - - UNIT_ASSERT_STRINGS_EQUAL(builder.GetResult(), R"&&&(<?xml version="1.0" encoding="UTF-8"?>)&&&" "\n" R"&&&(<upper_elem><elem>content</elem></upper_elem>)&&&" "\n"); - } - - Y_UNIT_TEST(MacroBuilder) { - XML_BUILDER() { - XML_DOC() { - XML_ELEM("El1") { - XML_ELEM("El2") { - XML_ELEM_CONT("ElC1", "content1"); - } - XML_ELEM("El3") { - XML_ELEM_CONT("ElC2", "content2"); - } - } - } - } - - UNIT_ASSERT_STRINGS_EQUAL(XML_RESULT(), R"&&&(<?xml version="1.0" encoding="UTF-8"?>)&&&" "\n" R"&&&(<El1><El2><ElC1>content1</ElC1></El2><El3><ElC2>content2</ElC2></El3></El1>)&&&" "\n"); - } -} + +Y_UNIT_TEST_SUITE(XmlBuilderTest) { + Y_UNIT_TEST(WritesProperly) { + TXmlStringBuilder builder; + + { + TXmlDocument doc(builder); + { + TXmlRecursiveElement elem1(builder, "upper_elem"); + TXmlElement elem2(builder, "elem", "content"); + } + } + + UNIT_ASSERT_STRINGS_EQUAL(builder.GetResult(), R"&&&(<?xml version="1.0" encoding="UTF-8"?>)&&&" "\n" R"&&&(<upper_elem><elem>content</elem></upper_elem>)&&&" "\n"); + } + + Y_UNIT_TEST(MacroBuilder) { + XML_BUILDER() { + XML_DOC() { + XML_ELEM("El1") { + XML_ELEM("El2") { + XML_ELEM_CONT("ElC1", "content1"); + } + XML_ELEM("El3") { + XML_ELEM_CONT("ElC2", "content2"); + } + } + } + } + + UNIT_ASSERT_STRINGS_EQUAL(XML_RESULT(), R"&&&(<?xml version="1.0" encoding="UTF-8"?>)&&&" "\n" R"&&&(<El1><El2><ElC1>content1</ElC1></El2><El3><ElC2>content2</ElC2></El3></El1>)&&&" "\n"); + } +} diff --git a/ydb/core/ymq/http/ut/ya.make b/ydb/core/ymq/http/ut/ya.make index bc3265b0e6b..35a5afc28bd 100644 --- a/ydb/core/ymq/http/ut/ya.make +++ b/ydb/core/ymq/http/ut/ya.make @@ -1,17 +1,17 @@ -OWNER( - galaxycrab +OWNER( + galaxycrab g:kikimr - g:sqs -) - -UNITTEST() - -PEERDIR( + g:sqs +) + +UNITTEST() + +PEERDIR( ydb/core/ymq/http -) - -SRCS( - xml_builder_ut.cpp -) - -END() +) + +SRCS( + xml_builder_ut.cpp +) + +END() diff --git a/ydb/core/ymq/http/xml.cpp b/ydb/core/ymq/http/xml.cpp index 0cb6f85274f..a8678f493a4 100644 --- a/ydb/core/ymq/http/xml.cpp +++ b/ydb/core/ymq/http/xml.cpp @@ -1,69 +1,69 @@ #include "xml.h" -#include "xml_builder.h" +#include "xml_builder.h" #include <library/cpp/protobuf/json/proto2json.h> #include <library/cpp/string_utils/base64/base64.h> - + #include <util/string/builder.h> -#include <util/string/cast.h> +#include <util/string/cast.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { using NKikimrClient::TSqsResponse; -// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - list of error codes -TString BuildErrorXmlString(const TString& message, const TString& errorCode, const TString& requestId) { - XML_BUILDER() { - XML_DOC() { - XML_ELEM("ErrorResponse") { - XML_ELEM("Error") { - XML_ELEM_CONT("Message", message); - XML_ELEM_CONT("Code", errorCode); - } - XML_ELEM_CONT("RequestId", requestId); - } - } - } - return XML_RESULT(); -} - -TString BuildErrorXmlString(const TError& error, const TString& requestId) { - return BuildErrorXmlString(error.GetMessage(), error.GetErrorCode(), requestId); -} - -static void AddError(const TString& errorCode, TUserCounters* userCounters) { - if (userCounters) { - if (auto* detailed = userCounters->GetDetailedCounters()) { - detailed->APIStatuses.AddError(errorCode); - } - } -} - -TSqsHttpResponse MakeErrorXmlResponse(const TErrorClass& errorClass, TUserCounters* userCounters, const TString& message, const TString& requestId) { - AddError(errorClass.ErrorCode, userCounters); - return TSqsHttpResponse(BuildErrorXmlString(message.empty() ? errorClass.DefaultMessage : message, errorClass.ErrorCode, requestId), errorClass.HttpStatusCode); -} - -TSqsHttpResponse MakeErrorXmlResponseFromCurrentException(TUserCounters* userCounters, const TString& requestId) { - try { - try { - throw; - } catch (const TSQSException& ex) { - return MakeErrorXmlResponse(ex.ErrorClass, userCounters, ex.what(), requestId); - } catch (const std::exception&) { - return MakeErrorXmlResponse(NErrors::INTERNAL_FAILURE, userCounters, TString(), requestId); - } - } catch (const TWriteXmlError& error) { - AddError("InternalFailure", userCounters); - return TSqsHttpResponse("InternalFailure", 500, PLAIN_TEXT_CONTENT_TYPE); - } +// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - list of error codes +TString BuildErrorXmlString(const TString& message, const TString& errorCode, const TString& requestId) { + XML_BUILDER() { + XML_DOC() { + XML_ELEM("ErrorResponse") { + XML_ELEM("Error") { + XML_ELEM_CONT("Message", message); + XML_ELEM_CONT("Code", errorCode); + } + XML_ELEM_CONT("RequestId", requestId); + } + } + } + return XML_RESULT(); } +TString BuildErrorXmlString(const TError& error, const TString& requestId) { + return BuildErrorXmlString(error.GetMessage(), error.GetErrorCode(), requestId); +} + +static void AddError(const TString& errorCode, TUserCounters* userCounters) { + if (userCounters) { + if (auto* detailed = userCounters->GetDetailedCounters()) { + detailed->APIStatuses.AddError(errorCode); + } + } +} + +TSqsHttpResponse MakeErrorXmlResponse(const TErrorClass& errorClass, TUserCounters* userCounters, const TString& message, const TString& requestId) { + AddError(errorClass.ErrorCode, userCounters); + return TSqsHttpResponse(BuildErrorXmlString(message.empty() ? errorClass.DefaultMessage : message, errorClass.ErrorCode, requestId), errorClass.HttpStatusCode); +} + +TSqsHttpResponse MakeErrorXmlResponseFromCurrentException(TUserCounters* userCounters, const TString& requestId) { + try { + try { + throw; + } catch (const TSQSException& ex) { + return MakeErrorXmlResponse(ex.ErrorClass, userCounters, ex.what(), requestId); + } catch (const std::exception&) { + return MakeErrorXmlResponse(NErrors::INTERNAL_FAILURE, userCounters, TString(), requestId); + } + } catch (const TWriteXmlError& error) { + AddError("InternalFailure", userCounters); + return TSqsHttpResponse("InternalFailure", 500, PLAIN_TEXT_CONTENT_TYPE); + } +} + template <typename T> static bool MaybeErrorResponse(const T& resp, TStringBuilder& builder) { if (resp.HasError()) { - builder << BuildErrorXmlString(resp.GetError(), resp.GetRequestId()); + builder << BuildErrorXmlString(resp.GetError(), resp.GetRequestId()); return true; } return false; @@ -73,73 +73,73 @@ static TString BoolToString(const bool b) { return TString(b ? "true" : "false"); } -void WriteQueueAttributesToXml(const TGetQueueAttributesResponse& rec, TXmlStringBuilder& xmlBuilder) { - if (rec.HasApproximateNumberOfMessages()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "ApproximateNumberOfMessages"); - XML_ELEM_CONT("Value", ToString(rec.GetApproximateNumberOfMessages())); - } - } - if (rec.HasApproximateNumberOfMessagesDelayed()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "ApproximateNumberOfMessagesDelayed"); - XML_ELEM_CONT("Value", ToString(rec.GetApproximateNumberOfMessagesDelayed())); - } - } - if (rec.HasApproximateNumberOfMessagesNotVisible()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "ApproximateNumberOfMessagesNotVisible"); - XML_ELEM_CONT("Value", ToString(rec.GetApproximateNumberOfMessagesNotVisible())); - } - } - if (rec.HasContentBasedDeduplication()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "ContentBasedDeduplication"); +void WriteQueueAttributesToXml(const TGetQueueAttributesResponse& rec, TXmlStringBuilder& xmlBuilder) { + if (rec.HasApproximateNumberOfMessages()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "ApproximateNumberOfMessages"); + XML_ELEM_CONT("Value", ToString(rec.GetApproximateNumberOfMessages())); + } + } + if (rec.HasApproximateNumberOfMessagesDelayed()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "ApproximateNumberOfMessagesDelayed"); + XML_ELEM_CONT("Value", ToString(rec.GetApproximateNumberOfMessagesDelayed())); + } + } + if (rec.HasApproximateNumberOfMessagesNotVisible()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "ApproximateNumberOfMessagesNotVisible"); + XML_ELEM_CONT("Value", ToString(rec.GetApproximateNumberOfMessagesNotVisible())); + } + } + if (rec.HasContentBasedDeduplication()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "ContentBasedDeduplication"); XML_ELEM_CONT("Value", BoolToString(rec.GetContentBasedDeduplication())); - } - } - if (rec.HasCreatedTimestamp()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "CreatedTimestamp"); - XML_ELEM_CONT("Value", ToString(rec.GetCreatedTimestamp())); - } - } - if (rec.HasDelaySeconds()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "DelaySeconds"); - XML_ELEM_CONT("Value", ToString(rec.GetDelaySeconds())); - } - } - if (rec.HasFifoQueue()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "FifoQueue"); + } + } + if (rec.HasCreatedTimestamp()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "CreatedTimestamp"); + XML_ELEM_CONT("Value", ToString(rec.GetCreatedTimestamp())); + } + } + if (rec.HasDelaySeconds()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "DelaySeconds"); + XML_ELEM_CONT("Value", ToString(rec.GetDelaySeconds())); + } + } + if (rec.HasFifoQueue()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "FifoQueue"); XML_ELEM_CONT("Value", BoolToString(rec.GetFifoQueue())); - } - } - if (rec.HasMaximumMessageSize()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "MaximumMessageSize"); - XML_ELEM_CONT("Value", ToString(rec.GetMaximumMessageSize())); - } - } - if (rec.HasMessageRetentionPeriod()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "MessageRetentionPeriod"); - XML_ELEM_CONT("Value", ToString(rec.GetMessageRetentionPeriod())); - } - } - if (rec.HasReceiveMessageWaitTimeSeconds()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "ReceiveMessageWaitTimeSeconds"); - XML_ELEM_CONT("Value", ToString(rec.GetReceiveMessageWaitTimeSeconds())); - } - } - if (rec.HasVisibilityTimeout()) { - XML_ELEM("Attribute") { - XML_ELEM_CONT("Name", "VisibilityTimeout"); - XML_ELEM_CONT("Value", ToString(rec.GetVisibilityTimeout())); - } - } + } + } + if (rec.HasMaximumMessageSize()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "MaximumMessageSize"); + XML_ELEM_CONT("Value", ToString(rec.GetMaximumMessageSize())); + } + } + if (rec.HasMessageRetentionPeriod()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "MessageRetentionPeriod"); + XML_ELEM_CONT("Value", ToString(rec.GetMessageRetentionPeriod())); + } + } + if (rec.HasReceiveMessageWaitTimeSeconds()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "ReceiveMessageWaitTimeSeconds"); + XML_ELEM_CONT("Value", ToString(rec.GetReceiveMessageWaitTimeSeconds())); + } + } + if (rec.HasVisibilityTimeout()) { + XML_ELEM("Attribute") { + XML_ELEM_CONT("Name", "VisibilityTimeout"); + XML_ELEM_CONT("Value", ToString(rec.GetVisibilityTimeout())); + } + } if (rec.HasRedrivePolicy()) { XML_ELEM("Attribute") { XML_ELEM_CONT("Name", "RedrivePolicy"); @@ -152,265 +152,265 @@ void WriteQueueAttributesToXml(const TGetQueueAttributesResponse& rec, TXmlStrin XML_ELEM_CONT("Value", ToString(rec.GetQueueArn())); } } -} - -TSqsHttpResponse ResponseToAmazonXmlFormat(const TSqsResponse& resp) { +} + +TSqsHttpResponse ResponseToAmazonXmlFormat(const TSqsResponse& resp) { TStringBuilder result; -#define HANDLE_ERROR(METHOD) \ - if (MaybeErrorResponse(resp.Y_CAT(Get, METHOD)(), result)) { \ - return TSqsHttpResponse(result, resp.Y_CAT(Get, METHOD)().GetError().GetStatus()); \ - } \ - /**/ - +#define HANDLE_ERROR(METHOD) \ + if (MaybeErrorResponse(resp.Y_CAT(Get, METHOD)(), result)) { \ + return TSqsHttpResponse(result, resp.Y_CAT(Get, METHOD)().GetError().GetStatus()); \ + } \ + /**/ + switch (resp.GetResponseCase()) { case TSqsResponse::kChangeMessageVisibility: { - HANDLE_ERROR(ChangeMessageVisibility); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("ChangeMessageVisibilityResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetChangeMessageVisibility().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(ChangeMessageVisibility); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("ChangeMessageVisibilityResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetChangeMessageVisibility().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kChangeMessageVisibilityBatch: { - HANDLE_ERROR(ChangeMessageVisibilityBatch); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("ChangeMessageVisibilityBatchResponse") { - XML_ELEM("ChangeMessageVisibilityBatchResult") { - for (size_t i = 0; i < resp.GetChangeMessageVisibilityBatch().EntriesSize(); ++i) { - const auto& entry = resp.GetChangeMessageVisibilityBatch().GetEntries(i); - if (entry.HasError()) { - XML_ELEM("BatchResultErrorEntry") { - XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("Message", entry.GetError().GetMessage()); - } - } else { - XML_ELEM("ChangeMessageVisibilityBatchResultEntry") { - XML_ELEM_CONT("Id", entry.GetId()); - } - } - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetChangeMessageVisibilityBatch().GetRequestId()); - } - } + HANDLE_ERROR(ChangeMessageVisibilityBatch); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("ChangeMessageVisibilityBatchResponse") { + XML_ELEM("ChangeMessageVisibilityBatchResult") { + for (size_t i = 0; i < resp.GetChangeMessageVisibilityBatch().EntriesSize(); ++i) { + const auto& entry = resp.GetChangeMessageVisibilityBatch().GetEntries(i); + if (entry.HasError()) { + XML_ELEM("BatchResultErrorEntry") { + XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("Message", entry.GetError().GetMessage()); + } + } else { + XML_ELEM("ChangeMessageVisibilityBatchResultEntry") { + XML_ELEM_CONT("Id", entry.GetId()); + } + } + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetChangeMessageVisibilityBatch().GetRequestId()); + } + } } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kCreateQueue: { - HANDLE_ERROR(CreateQueue); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("CreateQueueResponse") { - XML_ELEM("CreateQueueResult") { - XML_ELEM_CONT("QueueUrl", resp.GetCreateQueue().GetQueueUrl()); - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetCreateQueue().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(CreateQueue); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("CreateQueueResponse") { + XML_ELEM("CreateQueueResult") { + XML_ELEM_CONT("QueueUrl", resp.GetCreateQueue().GetQueueUrl()); + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetCreateQueue().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kDeleteMessage: { - HANDLE_ERROR(DeleteMessage); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("DeleteMessageResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetDeleteMessage().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(DeleteMessage); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("DeleteMessageResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetDeleteMessage().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kDeleteMessageBatch: { - HANDLE_ERROR(DeleteMessageBatch); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("DeleteMessageBatchResponse") { - XML_ELEM("DeleteMessageBatchResult") { - for (size_t i = 0; i < resp.GetDeleteMessageBatch().EntriesSize(); ++i) { - const auto& entry = resp.GetDeleteMessageBatch().GetEntries(i); - if (entry.HasError()) { - XML_ELEM("BatchResultErrorEntry") { - XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("Message", entry.GetError().GetMessage()); - } - } else { - XML_ELEM("DeleteMessageBatchResultEntry") { - XML_ELEM_CONT("Id", entry.GetId()); - } - } - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetDeleteMessageBatch().GetRequestId()); - } - } + HANDLE_ERROR(DeleteMessageBatch); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("DeleteMessageBatchResponse") { + XML_ELEM("DeleteMessageBatchResult") { + for (size_t i = 0; i < resp.GetDeleteMessageBatch().EntriesSize(); ++i) { + const auto& entry = resp.GetDeleteMessageBatch().GetEntries(i); + if (entry.HasError()) { + XML_ELEM("BatchResultErrorEntry") { + XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("Message", entry.GetError().GetMessage()); + } + } else { + XML_ELEM("DeleteMessageBatchResultEntry") { + XML_ELEM_CONT("Id", entry.GetId()); + } + } + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetDeleteMessageBatch().GetRequestId()); + } + } } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kDeleteQueue: { - HANDLE_ERROR(DeleteQueue); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("DeleteQueueResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetDeleteQueue().GetRequestId()); - } - } - } - } - result << XML_RESULT(); - break; - } - - case TSqsResponse::kDeleteQueueBatch: { - HANDLE_ERROR(DeleteQueueBatch); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("DeleteQueueBatchResponse") { - XML_ELEM("DeleteQueueBatchResult") { - for (size_t i = 0; i < resp.GetDeleteQueueBatch().EntriesSize(); ++i) { - const auto& entry = resp.GetDeleteQueueBatch().GetEntries(i); - if (entry.HasError()) { - XML_ELEM("BatchResultErrorEntry") { - XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("Message", entry.GetError().GetMessage()); - } - } else { - XML_ELEM("DeleteQueueBatchResultEntry") { - XML_ELEM_CONT("Id", entry.GetId()); - } - } - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetDeleteQueueBatch().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(DeleteQueue); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("DeleteQueueResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetDeleteQueue().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } - + + case TSqsResponse::kDeleteQueueBatch: { + HANDLE_ERROR(DeleteQueueBatch); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("DeleteQueueBatchResponse") { + XML_ELEM("DeleteQueueBatchResult") { + for (size_t i = 0; i < resp.GetDeleteQueueBatch().EntriesSize(); ++i) { + const auto& entry = resp.GetDeleteQueueBatch().GetEntries(i); + if (entry.HasError()) { + XML_ELEM("BatchResultErrorEntry") { + XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("Message", entry.GetError().GetMessage()); + } + } else { + XML_ELEM("DeleteQueueBatchResultEntry") { + XML_ELEM_CONT("Id", entry.GetId()); + } + } + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetDeleteQueueBatch().GetRequestId()); + } + } + } + } + result << XML_RESULT(); + break; + } + case TSqsResponse::kGetQueueAttributes: { - HANDLE_ERROR(GetQueueAttributes); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("GetQueueAttributesResponse") { - XML_ELEM("GetQueueAttributesResult") { - const auto& rec = resp.GetGetQueueAttributes(); - WriteQueueAttributesToXml(rec, xmlBuilder); - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetGetQueueAttributes().GetRequestId()); - } - } - } - } - result << XML_RESULT(); - break; - } - - case TSqsResponse::kGetQueueAttributesBatch: { - HANDLE_ERROR(GetQueueAttributesBatch); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("GetQueueAttributesBatchResponse") { - XML_ELEM("GetQueueAttributesBatchResult") { - const auto& result = resp.GetGetQueueAttributesBatch(); - for (const auto& entry : result.GetEntries()) { - if (entry.HasError()) { - XML_ELEM("BatchResultErrorEntry") { - XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("Message", entry.GetError().GetMessage()); - } - } else { - XML_ELEM("GetQueueAttributesBatchResultEntry") { - XML_ELEM_CONT("Id", entry.GetId()); - WriteQueueAttributesToXml(entry, xmlBuilder); - } - } - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetGetQueueAttributesBatch().GetRequestId()); - } - } + HANDLE_ERROR(GetQueueAttributes); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("GetQueueAttributesResponse") { + XML_ELEM("GetQueueAttributesResult") { + const auto& rec = resp.GetGetQueueAttributes(); + WriteQueueAttributesToXml(rec, xmlBuilder); + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetGetQueueAttributes().GetRequestId()); + } + } + } + } + result << XML_RESULT(); + break; + } + + case TSqsResponse::kGetQueueAttributesBatch: { + HANDLE_ERROR(GetQueueAttributesBatch); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("GetQueueAttributesBatchResponse") { + XML_ELEM("GetQueueAttributesBatchResult") { + const auto& result = resp.GetGetQueueAttributesBatch(); + for (const auto& entry : result.GetEntries()) { + if (entry.HasError()) { + XML_ELEM("BatchResultErrorEntry") { + XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("Message", entry.GetError().GetMessage()); + } + } else { + XML_ELEM("GetQueueAttributesBatchResultEntry") { + XML_ELEM_CONT("Id", entry.GetId()); + WriteQueueAttributesToXml(entry, xmlBuilder); + } + } + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetGetQueueAttributesBatch().GetRequestId()); + } + } } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kGetQueueUrl: { - HANDLE_ERROR(GetQueueUrl); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("GetQueueUrlResponse") { - XML_ELEM("GetQueueUrlResult") { - XML_ELEM_CONT("QueueUrl", resp.GetGetQueueUrl().GetQueueUrl()); - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetGetQueueUrl().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(GetQueueUrl); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("GetQueueUrlResponse") { + XML_ELEM("GetQueueUrlResult") { + XML_ELEM_CONT("QueueUrl", resp.GetGetQueueUrl().GetQueueUrl()); + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetGetQueueUrl().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kListQueues: { - HANDLE_ERROR(ListQueues); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("ListQueuesResponse") { - XML_ELEM("ListQueuesResult") { - for (const auto& item : resp.GetListQueues().queues()) { - XML_ELEM_CONT("QueueUrl", item.GetQueueUrl()); - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetListQueues().GetRequestId()); - } - } - } + HANDLE_ERROR(ListQueues); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("ListQueuesResponse") { + XML_ELEM("ListQueuesResult") { + for (const auto& item : resp.GetListQueues().queues()) { + XML_ELEM_CONT("QueueUrl", item.GetQueueUrl()); + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetListQueues().GetRequestId()); + } + } + } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kCountQueues: { - HANDLE_ERROR(CountQueues); + HANDLE_ERROR(CountQueues); XML_BUILDER() { XML_DOC() { XML_ELEM("CountQueuesResponse") { @@ -428,259 +428,259 @@ TSqsHttpResponse ResponseToAmazonXmlFormat(const TSqsResponse& resp) { } case TSqsResponse::kListUsers: { - HANDLE_ERROR(ListUsers); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("ListUsersResponse") { - XML_ELEM("ListUsersResult") { - for (const auto& item : resp.GetListUsers().usernames()) { - XML_ELEM_CONT("UserName", item); - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetListUsers().GetRequestId()); - } - } - } + HANDLE_ERROR(ListUsers); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("ListUsersResponse") { + XML_ELEM("ListUsersResult") { + for (const auto& item : resp.GetListUsers().usernames()) { + XML_ELEM_CONT("UserName", item); + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetListUsers().GetRequestId()); + } + } + } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kPurgeQueue: { - HANDLE_ERROR(PurgeQueue); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("PurgeQueueResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetPurgeQueue().GetRequestId()); - } - } - } - } - result << XML_RESULT(); - break; - } - - case TSqsResponse::kPurgeQueueBatch: { - HANDLE_ERROR(PurgeQueueBatch); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("PurgeQueueBatchResponse") { - XML_ELEM("PurgeQueueBatchResult") { - for (size_t i = 0; i < resp.GetPurgeQueueBatch().EntriesSize(); ++i) { - const auto& entry = resp.GetPurgeQueueBatch().GetEntries(i); - if (entry.HasError()) { - XML_ELEM("BatchResultErrorEntry") { - XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("Message", entry.GetError().GetMessage()); - } - } else { - XML_ELEM("PurgeQueueBatchResultEntry") { - XML_ELEM_CONT("Id", entry.GetId()); - } - } - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetPurgeQueueBatch().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(PurgeQueue); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("PurgeQueueResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetPurgeQueue().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } + case TSqsResponse::kPurgeQueueBatch: { + HANDLE_ERROR(PurgeQueueBatch); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("PurgeQueueBatchResponse") { + XML_ELEM("PurgeQueueBatchResult") { + for (size_t i = 0; i < resp.GetPurgeQueueBatch().EntriesSize(); ++i) { + const auto& entry = resp.GetPurgeQueueBatch().GetEntries(i); + if (entry.HasError()) { + XML_ELEM("BatchResultErrorEntry") { + XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("Message", entry.GetError().GetMessage()); + } + } else { + XML_ELEM("PurgeQueueBatchResultEntry") { + XML_ELEM_CONT("Id", entry.GetId()); + } + } + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetPurgeQueueBatch().GetRequestId()); + } + } + } + } + result << XML_RESULT(); + break; + } + case TSqsResponse::kReceiveMessage: { - HANDLE_ERROR(ReceiveMessage); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("ReceiveMessageResponse") { - XML_ELEM("ReceiveMessageResult") { - for (size_t i = 0; i < resp.GetReceiveMessage().MessagesSize(); ++i) { - const auto& message = resp.GetReceiveMessage().GetMessages(i); -#define ATTRIBUTE(name, value) \ - XML_ELEM_IMPL("Attribute", Y_CAT(__LINE__, 1)) { \ - XML_ELEM_CONT_IMPL("Name", name, Y_CAT(__LINE__, 2)); \ - XML_ELEM_CONT_IMPL("Value", ToString(value), Y_CAT(__LINE__, 3)); \ - } - - XML_ELEM("Message") { - XML_ELEM_CONT("MessageId", message.GetMessageId()); - XML_ELEM_CONT("ReceiptHandle", message.GetReceiptHandle()); - XML_ELEM_CONT("MD5OfBody", message.GetMD5OfMessageBody()); - if (message.HasMD5OfMessageAttributes()) { - XML_ELEM_CONT("MD5OfMessageAttributes", message.GetMD5OfMessageAttributes()); - } - XML_ELEM_CONT("Body", message.GetData()); - - // attributes - if (message.HasSequenceNumber()) { - ATTRIBUTE("SequenceNumber", message.GetSequenceNumber()); - } - if (message.HasMessageDeduplicationId()) { - ATTRIBUTE("MessageDeduplicationId", message.GetMessageDeduplicationId()); - } - if (message.HasMessageGroupId()) { - ATTRIBUTE("MessageGroupId", message.GetMessageGroupId()); - } - if (message.HasApproximateFirstReceiveTimestamp()) { - ATTRIBUTE("ApproximateFirstReceiveTimestamp", message.GetApproximateFirstReceiveTimestamp()); - } - if (message.HasApproximateReceiveCount()) { - ATTRIBUTE("ApproximateReceiveCount", message.GetApproximateReceiveCount()); - } - if (message.HasSentTimestamp()) { - ATTRIBUTE("SentTimestamp", message.GetSentTimestamp()); - } + HANDLE_ERROR(ReceiveMessage); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("ReceiveMessageResponse") { + XML_ELEM("ReceiveMessageResult") { + for (size_t i = 0; i < resp.GetReceiveMessage().MessagesSize(); ++i) { + const auto& message = resp.GetReceiveMessage().GetMessages(i); +#define ATTRIBUTE(name, value) \ + XML_ELEM_IMPL("Attribute", Y_CAT(__LINE__, 1)) { \ + XML_ELEM_CONT_IMPL("Name", name, Y_CAT(__LINE__, 2)); \ + XML_ELEM_CONT_IMPL("Value", ToString(value), Y_CAT(__LINE__, 3)); \ + } + + XML_ELEM("Message") { + XML_ELEM_CONT("MessageId", message.GetMessageId()); + XML_ELEM_CONT("ReceiptHandle", message.GetReceiptHandle()); + XML_ELEM_CONT("MD5OfBody", message.GetMD5OfMessageBody()); + if (message.HasMD5OfMessageAttributes()) { + XML_ELEM_CONT("MD5OfMessageAttributes", message.GetMD5OfMessageAttributes()); + } + XML_ELEM_CONT("Body", message.GetData()); + + // attributes + if (message.HasSequenceNumber()) { + ATTRIBUTE("SequenceNumber", message.GetSequenceNumber()); + } + if (message.HasMessageDeduplicationId()) { + ATTRIBUTE("MessageDeduplicationId", message.GetMessageDeduplicationId()); + } + if (message.HasMessageGroupId()) { + ATTRIBUTE("MessageGroupId", message.GetMessageGroupId()); + } + if (message.HasApproximateFirstReceiveTimestamp()) { + ATTRIBUTE("ApproximateFirstReceiveTimestamp", message.GetApproximateFirstReceiveTimestamp()); + } + if (message.HasApproximateReceiveCount()) { + ATTRIBUTE("ApproximateReceiveCount", message.GetApproximateReceiveCount()); + } + if (message.HasSentTimestamp()) { + ATTRIBUTE("SentTimestamp", message.GetSentTimestamp()); + } if (message.HasSenderId()) { ATTRIBUTE("SenderId", message.GetSenderId()); } - // message attributes - for (const auto& attr : message.messageattributes()) { - XML_ELEM("MessageAttribute") { - XML_ELEM_CONT("Name", attr.GetName()); - XML_ELEM("Value") { - if (attr.HasDataType()) { - XML_ELEM_CONT("DataType", attr.GetDataType()); - } - if (attr.HasStringValue()) { - XML_ELEM_CONT("StringValue", attr.GetStringValue()); - } - if (attr.HasBinaryValue()) { - XML_ELEM_CONT("BinaryValue", Base64Encode(attr.GetBinaryValue())); - } - } - } - } - } -#undef ATTRIBUTE - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetReceiveMessage().GetRequestId()); - } + // message attributes + for (const auto& attr : message.messageattributes()) { + XML_ELEM("MessageAttribute") { + XML_ELEM_CONT("Name", attr.GetName()); + XML_ELEM("Value") { + if (attr.HasDataType()) { + XML_ELEM_CONT("DataType", attr.GetDataType()); + } + if (attr.HasStringValue()) { + XML_ELEM_CONT("StringValue", attr.GetStringValue()); + } + if (attr.HasBinaryValue()) { + XML_ELEM_CONT("BinaryValue", Base64Encode(attr.GetBinaryValue())); + } + } + } + } + } +#undef ATTRIBUTE + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetReceiveMessage().GetRequestId()); + } } } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kSendMessage: { - HANDLE_ERROR(SendMessage); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("SendMessageResponse") { - XML_ELEM("SendMessageResult") { - XML_ELEM_CONT("MD5OfMessageBody", resp.GetSendMessage().GetMD5OfMessageBody()); - if (resp.GetSendMessage().HasMD5OfMessageAttributes()) { - XML_ELEM_CONT("MD5OfMessageAttributes", resp.GetSendMessage().GetMD5OfMessageAttributes()); - } - XML_ELEM_CONT("MessageId", resp.GetSendMessage().GetMessageId()); - if (resp.GetSendMessage().HasSequenceNumber()) { - XML_ELEM_CONT("SequenceNumber", ToString(resp.GetSendMessage().GetSequenceNumber())); - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetSendMessage().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(SendMessage); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("SendMessageResponse") { + XML_ELEM("SendMessageResult") { + XML_ELEM_CONT("MD5OfMessageBody", resp.GetSendMessage().GetMD5OfMessageBody()); + if (resp.GetSendMessage().HasMD5OfMessageAttributes()) { + XML_ELEM_CONT("MD5OfMessageAttributes", resp.GetSendMessage().GetMD5OfMessageAttributes()); + } + XML_ELEM_CONT("MessageId", resp.GetSendMessage().GetMessageId()); + if (resp.GetSendMessage().HasSequenceNumber()) { + XML_ELEM_CONT("SequenceNumber", ToString(resp.GetSendMessage().GetSequenceNumber())); + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetSendMessage().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kSendMessageBatch: { - HANDLE_ERROR(SendMessageBatch); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("SendMessageBatchResponse") { - XML_ELEM("SendMessageBatchResult") { - for (size_t i = 0; i < resp.GetSendMessageBatch().EntriesSize(); ++i) { - const auto& entry = resp.GetSendMessageBatch().GetEntries(i); - if (entry.HasError()) { - XML_ELEM("BatchResultErrorEntry") { - XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("Message", entry.GetError().GetMessage()); - } - } else { - XML_ELEM("SendMessageBatchResultEntry") { - XML_ELEM_CONT("Id", entry.GetId()); - XML_ELEM_CONT("MD5OfMessageBody", entry.GetMD5OfMessageBody()); - XML_ELEM_CONT("MessageId", entry.GetMessageId()); - if (entry.HasSequenceNumber()) { - XML_ELEM_CONT("SequenceNumber", ToString(entry.GetSequenceNumber())); - } - if (entry.HasMD5OfMessageAttributes()) { - XML_ELEM_CONT("MD5OfMessageAttributes", entry.GetMD5OfMessageAttributes()); - } - } - } - } - } - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetSendMessageBatch().GetRequestId()); - } - } + HANDLE_ERROR(SendMessageBatch); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("SendMessageBatchResponse") { + XML_ELEM("SendMessageBatchResult") { + for (size_t i = 0; i < resp.GetSendMessageBatch().EntriesSize(); ++i) { + const auto& entry = resp.GetSendMessageBatch().GetEntries(i); + if (entry.HasError()) { + XML_ELEM("BatchResultErrorEntry") { + XML_ELEM_CONT("Code", entry.GetError().GetErrorCode()); + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("Message", entry.GetError().GetMessage()); + } + } else { + XML_ELEM("SendMessageBatchResultEntry") { + XML_ELEM_CONT("Id", entry.GetId()); + XML_ELEM_CONT("MD5OfMessageBody", entry.GetMD5OfMessageBody()); + XML_ELEM_CONT("MessageId", entry.GetMessageId()); + if (entry.HasSequenceNumber()) { + XML_ELEM_CONT("SequenceNumber", ToString(entry.GetSequenceNumber())); + } + if (entry.HasMD5OfMessageAttributes()) { + XML_ELEM_CONT("MD5OfMessageAttributes", entry.GetMD5OfMessageAttributes()); + } + } + } + } + } + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetSendMessageBatch().GetRequestId()); + } + } } } - result << XML_RESULT(); + result << XML_RESULT(); break; } case TSqsResponse::kSetQueueAttributes: { - HANDLE_ERROR(SetQueueAttributes); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("SetQueueAttributesResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetSetQueueAttributes().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(SetQueueAttributes); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("SetQueueAttributesResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetSetQueueAttributes().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kCreateUser: { - HANDLE_ERROR(CreateUser); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("CreateUserResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetCreateUser().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(CreateUser); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("CreateUserResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetCreateUser().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kDeleteUser: { - HANDLE_ERROR(DeleteUser); - XML_BUILDER() { - XML_DOC() { - XML_ELEM("DeleteUserResponse") { - XML_ELEM("ResponseMetadata") { - XML_ELEM_CONT("RequestId", resp.GetCreateUser().GetRequestId()); - } - } - } - } - result << XML_RESULT(); + HANDLE_ERROR(DeleteUser); + XML_BUILDER() { + XML_DOC() { + XML_ELEM("DeleteUserResponse") { + XML_ELEM("ResponseMetadata") { + XML_ELEM_CONT("RequestId", resp.GetCreateUser().GetRequestId()); + } + } + } + } + result << XML_RESULT(); break; } case TSqsResponse::kModifyPermissions: { - HANDLE_ERROR(ModifyPermissions); + HANDLE_ERROR(ModifyPermissions); XML_BUILDER() { XML_DOC() { XML_ELEM("ModifyPermissionsResponse") { @@ -695,11 +695,11 @@ TSqsHttpResponse ResponseToAmazonXmlFormat(const TSqsResponse& resp) { } case TSqsResponse::kListPermissions: { - HANDLE_ERROR(ListPermissions); - + HANDLE_ERROR(ListPermissions); + const auto& listPermissions = resp.GetListPermissions(); -#define SERIALIZE_PERMISSIONS(resource, permission) \ +#define SERIALIZE_PERMISSIONS(resource, permission) \ for (size_t i = 0; i < listPermissions.Y_CAT(Get, Y_CAT(resource, Permissions))().Y_CAT(Y_CAT(permission, s), Size)(); ++i) { \ XML_ELEM_IMPL("Ya" Y_STRINGIZE(permission), Y_CAT(__LINE__, a)) { \ const auto& permissions = listPermissions.Y_CAT(Get, Y_CAT(resource, Permissions))().Y_CAT(Get, Y_CAT(permission, s))(i); \ @@ -737,7 +737,7 @@ TSqsHttpResponse ResponseToAmazonXmlFormat(const TSqsResponse& resp) { #undef SERIALIZE_PERMISSIONS_FOR_RESOURCE #undef SERIALIZE_PERMISSIONS case TSqsResponse::kListDeadLetterSourceQueues: { - HANDLE_ERROR(ListDeadLetterSourceQueues); + HANDLE_ERROR(ListDeadLetterSourceQueues); XML_BUILDER() { XML_DOC() { XML_ELEM("ListDeadLetterSourceQueuesResponse") { @@ -757,13 +757,13 @@ TSqsHttpResponse ResponseToAmazonXmlFormat(const TSqsResponse& resp) { } case TSqsResponse::RESPONSE_NOT_SET: { - return MakeErrorXmlResponse(NErrors::INTERNAL_FAILURE, nullptr, "Not implemented."); + return MakeErrorXmlResponse(NErrors::INTERNAL_FAILURE, nullptr, "Not implemented."); } } - return TSqsHttpResponse(result, 200); - -#undef HANDLE_ERROR + return TSqsHttpResponse(result, 200); + +#undef HANDLE_ERROR } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/xml.h b/ydb/core/ymq/http/xml.h index 4b0ce5758bc..008a5ad186a 100644 --- a/ydb/core/ymq/http/xml.h +++ b/ydb/core/ymq/http/xml.h @@ -5,11 +5,11 @@ #include <ydb/core/protos/msgbus.pb.h> #include <ydb/core/ymq/base/counters.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -TSqsHttpResponse MakeErrorXmlResponse(const TErrorClass& errorClass, TUserCounters* userCounters, const TString& message = TString(), const TString& requestId = TString()); -TSqsHttpResponse MakeErrorXmlResponseFromCurrentException(TUserCounters* userCounters, const TString& requestId); +TSqsHttpResponse MakeErrorXmlResponse(const TErrorClass& errorClass, TUserCounters* userCounters, const TString& message = TString(), const TString& requestId = TString()); +TSqsHttpResponse MakeErrorXmlResponseFromCurrentException(TUserCounters* userCounters, const TString& requestId); -TSqsHttpResponse ResponseToAmazonXmlFormat(const NKikimrClient::TSqsResponse& resp); +TSqsHttpResponse ResponseToAmazonXmlFormat(const NKikimrClient::TSqsResponse& resp); -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/http/xml_builder.cpp b/ydb/core/ymq/http/xml_builder.cpp index cd26311dd93..41017bec10b 100644 --- a/ydb/core/ymq/http/xml_builder.cpp +++ b/ydb/core/ymq/http/xml_builder.cpp @@ -1,86 +1,86 @@ #include <ydb/core/ymq/http/xml_builder.h> - -#include <libxml/threads.h> -#include <libxml/xmlwriter.h> - -#include <exception> -#include <mutex> - -// SQS-284: LeakSanitizer: detected memory leaks: xmlTextWriterStartDocument -// Libxml2 has a small problem in this place. -// Make ASAN happy: use libxml calls in one thread first time. In this case there will be no leak. -// TSAN also complains in several other places in libxml. -// They all appear during initialization of global structures. -// So, if we call functions, that initialize them, once in one thread, bug won't appear. -static std::once_flag LibXmlBugsWorkaroundFlag; -static void LibXmlBugsWorkAround() { - xmlInitCharEncodingHandlers(); // Leak Sanitizer - xmlGetGlobalState(); // Thread Sanitizer - xmlInitThreads(); // Thread Sanitizer -} - -TXmlStringBuilder::TXmlStringBuilder() { - std::call_once(LibXmlBugsWorkaroundFlag, LibXmlBugsWorkAround); - - MemoryBuffer = xmlBufferCreate(); - if (MemoryBuffer == nullptr) { - ythrow TWriteXmlError() << "Failed to create memory buffer"; - } - xmlBufferPtr buffer = (xmlBufferPtr)MemoryBuffer; - - TextWriter = xmlNewTextWriterMemory(buffer, 0); - if (TextWriter == nullptr) { - xmlBufferFree(buffer); - ythrow TWriteXmlError() << "Failed to create xml text writer"; - } -} - -TXmlStringBuilder::~TXmlStringBuilder() { - xmlFreeTextWriter((xmlTextWriterPtr)TextWriter); - xmlBufferFree((xmlBufferPtr)MemoryBuffer); -} - -TString TXmlStringBuilder::GetResult() { - xmlBufferPtr buf = (xmlBufferPtr)MemoryBuffer; - return (const char*)buf->content; -} - -TXmlDocument::TXmlDocument(TXmlStringBuilder& builder) - : Builder(builder) -{ - if (xmlTextWriterStartDocument((xmlTextWriterPtr)Builder.TextWriter, nullptr, "UTF-8", nullptr) < 0) { - ythrow TWriteXmlError() << "Failed to start xml document"; - } -} - -TXmlDocument::~TXmlDocument() noexcept(false) { - const bool unwinding = std::uncaught_exception(); // TODO: use C++'17 std::uncaught_exceptions() func - if (!unwinding) { - if (xmlTextWriterEndDocument((xmlTextWriterPtr)Builder.TextWriter) < 0) { - ythrow TWriteXmlError() << "Failed to end xml document"; - } - } -} - -TXmlElement::TXmlElement(TXmlStringBuilder& builder, const char* name, const char* content) { - if (xmlTextWriterWriteElement((xmlTextWriterPtr)builder.TextWriter, (const xmlChar*)name, (const xmlChar*)content) < 0) { - ythrow TWriteXmlError() << "Failed to write xml element"; - } -} - -TXmlRecursiveElement::TXmlRecursiveElement(TXmlStringBuilder& builder, const char* name) - : Builder(builder) -{ - if (xmlTextWriterStartElement((xmlTextWriterPtr)Builder.TextWriter, (const xmlChar*)name) < 0) { - ythrow TWriteXmlError() << "Failed to write xml element"; - } -} - -TXmlRecursiveElement::~TXmlRecursiveElement() noexcept(false) { - const bool unwinding = std::uncaught_exception(); // TODO: use C++'17 std::uncaught_exceptions() func - if (!unwinding) { - if (xmlTextWriterEndElement((xmlTextWriterPtr)Builder.TextWriter) < 0) { - ythrow TWriteXmlError() << "Failed to end xml element"; - } - } -} + +#include <libxml/threads.h> +#include <libxml/xmlwriter.h> + +#include <exception> +#include <mutex> + +// SQS-284: LeakSanitizer: detected memory leaks: xmlTextWriterStartDocument +// Libxml2 has a small problem in this place. +// Make ASAN happy: use libxml calls in one thread first time. In this case there will be no leak. +// TSAN also complains in several other places in libxml. +// They all appear during initialization of global structures. +// So, if we call functions, that initialize them, once in one thread, bug won't appear. +static std::once_flag LibXmlBugsWorkaroundFlag; +static void LibXmlBugsWorkAround() { + xmlInitCharEncodingHandlers(); // Leak Sanitizer + xmlGetGlobalState(); // Thread Sanitizer + xmlInitThreads(); // Thread Sanitizer +} + +TXmlStringBuilder::TXmlStringBuilder() { + std::call_once(LibXmlBugsWorkaroundFlag, LibXmlBugsWorkAround); + + MemoryBuffer = xmlBufferCreate(); + if (MemoryBuffer == nullptr) { + ythrow TWriteXmlError() << "Failed to create memory buffer"; + } + xmlBufferPtr buffer = (xmlBufferPtr)MemoryBuffer; + + TextWriter = xmlNewTextWriterMemory(buffer, 0); + if (TextWriter == nullptr) { + xmlBufferFree(buffer); + ythrow TWriteXmlError() << "Failed to create xml text writer"; + } +} + +TXmlStringBuilder::~TXmlStringBuilder() { + xmlFreeTextWriter((xmlTextWriterPtr)TextWriter); + xmlBufferFree((xmlBufferPtr)MemoryBuffer); +} + +TString TXmlStringBuilder::GetResult() { + xmlBufferPtr buf = (xmlBufferPtr)MemoryBuffer; + return (const char*)buf->content; +} + +TXmlDocument::TXmlDocument(TXmlStringBuilder& builder) + : Builder(builder) +{ + if (xmlTextWriterStartDocument((xmlTextWriterPtr)Builder.TextWriter, nullptr, "UTF-8", nullptr) < 0) { + ythrow TWriteXmlError() << "Failed to start xml document"; + } +} + +TXmlDocument::~TXmlDocument() noexcept(false) { + const bool unwinding = std::uncaught_exception(); // TODO: use C++'17 std::uncaught_exceptions() func + if (!unwinding) { + if (xmlTextWriterEndDocument((xmlTextWriterPtr)Builder.TextWriter) < 0) { + ythrow TWriteXmlError() << "Failed to end xml document"; + } + } +} + +TXmlElement::TXmlElement(TXmlStringBuilder& builder, const char* name, const char* content) { + if (xmlTextWriterWriteElement((xmlTextWriterPtr)builder.TextWriter, (const xmlChar*)name, (const xmlChar*)content) < 0) { + ythrow TWriteXmlError() << "Failed to write xml element"; + } +} + +TXmlRecursiveElement::TXmlRecursiveElement(TXmlStringBuilder& builder, const char* name) + : Builder(builder) +{ + if (xmlTextWriterStartElement((xmlTextWriterPtr)Builder.TextWriter, (const xmlChar*)name) < 0) { + ythrow TWriteXmlError() << "Failed to write xml element"; + } +} + +TXmlRecursiveElement::~TXmlRecursiveElement() noexcept(false) { + const bool unwinding = std::uncaught_exception(); // TODO: use C++'17 std::uncaught_exceptions() func + if (!unwinding) { + if (xmlTextWriterEndElement((xmlTextWriterPtr)Builder.TextWriter) < 0) { + ythrow TWriteXmlError() << "Failed to end xml element"; + } + } +} diff --git a/ydb/core/ymq/http/xml_builder.h b/ydb/core/ymq/http/xml_builder.h index d9e87a4b2da..ff36c2c80a7 100644 --- a/ydb/core/ymq/http/xml_builder.h +++ b/ydb/core/ymq/http/xml_builder.h @@ -1,87 +1,87 @@ -#pragma once -#include <util/generic/string.h> -#include <util/generic/yexception.h> -#include <util/system/defaults.h> - -class TXmlDocument; -class TXmlElement; -class TXmlRecursiveElement; - -class TXmlStringBuilder { -public: - TXmlStringBuilder(); - ~TXmlStringBuilder(); - - TString GetResult(); - -private: - friend class TXmlDocument; - friend class TXmlElement; - friend class TXmlRecursiveElement; - void* TextWriter = nullptr; // xmlTextWriterPtr // void* to avoid including libxml headers - void* MemoryBuffer = nullptr; // xmlBufferPtr // void* to avoid including libxml headers -}; - -// RAII document wrapper -class TXmlDocument { -public: - TXmlDocument(TXmlStringBuilder& builder); - ~TXmlDocument() noexcept(false); - - operator bool() const { - return true; - } - -private: - TXmlStringBuilder& Builder; -}; - -class TXmlElement { -public: - TXmlElement(TXmlStringBuilder& builder, const char* name, const char* content); - - template <class T1, class T2> - TXmlElement(TXmlStringBuilder& builder, const T1& name, const T2& content) - : TXmlElement(builder, ToChars(name), ToChars(content)) - { - } - -private: - static const char* ToChars(const char* str) { - return str; - } - - static const char* ToChars(const TString& str) { - return str.c_str(); - } -}; - -// RAII element wrapper -class TXmlRecursiveElement { -public: - TXmlRecursiveElement(TXmlStringBuilder& builder, const char* name); - TXmlRecursiveElement(TXmlStringBuilder& builder, const TString& name) - : TXmlRecursiveElement(builder, name.c_str()) - { - } - ~TXmlRecursiveElement() noexcept(false); - - operator bool() const { - return true; - } - -private: - TXmlStringBuilder& Builder; -}; - -class TWriteXmlError: public yexception { -}; - -// Simplified usage -#define XML_BUILDER() TXmlStringBuilder xmlBuilder; -#define XML_RESULT() xmlBuilder.GetResult() -#define XML_DOC() if (TXmlDocument xmlDocument = TXmlDocument(xmlBuilder)) -#define XML_ELEM_IMPL(name, suffix) if (TXmlRecursiveElement Y_CAT(xmlElement, suffix) = TXmlRecursiveElement(xmlBuilder, name)) -#define XML_ELEM_CONT_IMPL(name, content, suffix) TXmlElement Y_CAT(xmlElement, suffix)(xmlBuilder, name, content) -#define XML_ELEM(name) XML_ELEM_IMPL(name, __LINE__) -#define XML_ELEM_CONT(name, content) XML_ELEM_CONT_IMPL(name, content, __LINE__) +#pragma once +#include <util/generic/string.h> +#include <util/generic/yexception.h> +#include <util/system/defaults.h> + +class TXmlDocument; +class TXmlElement; +class TXmlRecursiveElement; + +class TXmlStringBuilder { +public: + TXmlStringBuilder(); + ~TXmlStringBuilder(); + + TString GetResult(); + +private: + friend class TXmlDocument; + friend class TXmlElement; + friend class TXmlRecursiveElement; + void* TextWriter = nullptr; // xmlTextWriterPtr // void* to avoid including libxml headers + void* MemoryBuffer = nullptr; // xmlBufferPtr // void* to avoid including libxml headers +}; + +// RAII document wrapper +class TXmlDocument { +public: + TXmlDocument(TXmlStringBuilder& builder); + ~TXmlDocument() noexcept(false); + + operator bool() const { + return true; + } + +private: + TXmlStringBuilder& Builder; +}; + +class TXmlElement { +public: + TXmlElement(TXmlStringBuilder& builder, const char* name, const char* content); + + template <class T1, class T2> + TXmlElement(TXmlStringBuilder& builder, const T1& name, const T2& content) + : TXmlElement(builder, ToChars(name), ToChars(content)) + { + } + +private: + static const char* ToChars(const char* str) { + return str; + } + + static const char* ToChars(const TString& str) { + return str.c_str(); + } +}; + +// RAII element wrapper +class TXmlRecursiveElement { +public: + TXmlRecursiveElement(TXmlStringBuilder& builder, const char* name); + TXmlRecursiveElement(TXmlStringBuilder& builder, const TString& name) + : TXmlRecursiveElement(builder, name.c_str()) + { + } + ~TXmlRecursiveElement() noexcept(false); + + operator bool() const { + return true; + } + +private: + TXmlStringBuilder& Builder; +}; + +class TWriteXmlError: public yexception { +}; + +// Simplified usage +#define XML_BUILDER() TXmlStringBuilder xmlBuilder; +#define XML_RESULT() xmlBuilder.GetResult() +#define XML_DOC() if (TXmlDocument xmlDocument = TXmlDocument(xmlBuilder)) +#define XML_ELEM_IMPL(name, suffix) if (TXmlRecursiveElement Y_CAT(xmlElement, suffix) = TXmlRecursiveElement(xmlBuilder, name)) +#define XML_ELEM_CONT_IMPL(name, content, suffix) TXmlElement Y_CAT(xmlElement, suffix)(xmlBuilder, name, content) +#define XML_ELEM(name) XML_ELEM_IMPL(name, __LINE__) +#define XML_ELEM_CONT(name, content) XML_ELEM_CONT_IMPL(name, content, __LINE__) diff --git a/ydb/core/ymq/http/ya.make b/ydb/core/ymq/http/ya.make index a8c5f62386b..637725a7f3e 100644 --- a/ydb/core/ymq/http/ya.make +++ b/ydb/core/ymq/http/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + LIBRARY() SRCS( @@ -10,11 +10,11 @@ SRCS( http.cpp types.cpp xml.cpp - xml_builder.cpp + xml_builder.cpp ) PEERDIR( - contrib/libs/libxml + contrib/libs/libxml library/cpp/actors/core library/cpp/cgiparam library/cpp/http/misc diff --git a/ydb/core/ymq/proto/ya.make b/ydb/core/ymq/proto/ya.make index 6a2e1949e26..d30159e5b1d 100644 --- a/ydb/core/ymq/proto/ya.make +++ b/ydb/core/ymq/proto/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + PROTO_LIBRARY() SRCS( diff --git a/ydb/core/ymq/queues/common/queries.cpp b/ydb/core/ymq/queues/common/queries.cpp index ee4d78b5807..6c956920f48 100644 --- a/ydb/core/ymq/queues/common/queries.cpp +++ b/ydb/core/ymq/queues/common/queries.cpp @@ -1,34 +1,34 @@ -#include "queries.h" - -namespace NKikimr::NSQS { - -extern const char* const GetQueueParamsQuery = R"__( - ( - (let name (Parameter 'NAME (DataType 'Utf8String))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%1$s/.Queues) - - (let queuesRow '( - '('Account userName) - '('QueueName name))) - (let queuesSelect '( - 'QueueId - 'QueueState - 'FifoQueue - 'Shards - 'Version - 'Partitions)) - (let queuesRead - (SelectRow queuesTable queuesRow queuesSelect)) - (let exists - (Exists queuesRead)) - - (return (Extend - (AsList (SetResult 'exists exists)) - (ListIf exists (SetResult 'queue queuesRead)) - )) - ) -)__"; - -} // namespace NKikimr::NSQS +#include "queries.h" + +namespace NKikimr::NSQS { + +extern const char* const GetQueueParamsQuery = R"__( + ( + (let name (Parameter 'NAME (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + + (let queuesTable '%1$s/.Queues) + + (let queuesRow '( + '('Account userName) + '('QueueName name))) + (let queuesSelect '( + 'QueueId + 'QueueState + 'FifoQueue + 'Shards + 'Version + 'Partitions)) + (let queuesRead + (SelectRow queuesTable queuesRow queuesSelect)) + (let exists + (Exists queuesRead)) + + (return (Extend + (AsList (SetResult 'exists exists)) + (ListIf exists (SetResult 'queue queuesRead)) + )) + ) +)__"; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/common/queries.h b/ydb/core/ymq/queues/common/queries.h index 4f455213fe4..0a689631e7a 100644 --- a/ydb/core/ymq/queues/common/queries.h +++ b/ydb/core/ymq/queues/common/queries.h @@ -1,7 +1,7 @@ -#pragma once - -namespace NKikimr::NSQS { - -extern const char* const GetQueueParamsQuery; - -} // namespace NKikimr::NSQS +#pragma once + +namespace NKikimr::NSQS { + +extern const char* const GetQueueParamsQuery; + +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/common/ya.make b/ydb/core/ymq/queues/common/ya.make index 98d17fd89fb..5c88545eb26 100644 --- a/ydb/core/ymq/queues/common/ya.make +++ b/ydb/core/ymq/queues/common/ya.make @@ -1,13 +1,13 @@ OWNER(g:sqs) - -LIBRARY() - -SRCS( - queries.cpp -) - -PEERDIR( + +LIBRARY() + +SRCS( + queries.cpp +) + +PEERDIR( ydb/core/ymq/base -) - -END() +) + +END() diff --git a/ydb/core/ymq/queues/fifo/queries.cpp b/ydb/core/ymq/queues/fifo/queries.cpp index 5876be81d67..3a28176124c 100644 --- a/ydb/core/ymq/queues/fifo/queries.cpp +++ b/ydb/core/ymq/queues/fifo/queries.cpp @@ -1,271 +1,271 @@ #include "queries.h" #include <ydb/core/ymq/base/constants.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { namespace { const char* const ChangeMessageVisibilityQuery = R"__( ( - (let now (Parameter 'NOW (DataType 'Uint64))) - (let groupsReadAttemptIdsPeriod (Parameter 'GROUPS_READ_ATTEMPT_IDS_PERIOD (DataType 'Uint64))) - (let keys (Parameter 'KEYS - (ListType (StructType - '('Offset (DataType 'Uint64)) - '('GroupId (DataType 'String)) - '('ReceiveAttemptId (DataType 'Utf8String)) - '('LockTimestamp (DataType 'Uint64)) - '('NewVisibilityDeadline (DataType 'Uint64)))))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let groupsReadAttemptIdsPeriod (Parameter 'GROUPS_READ_ATTEMPT_IDS_PERIOD (DataType 'Uint64))) + (let keys (Parameter 'KEYS + (ListType (StructType + '('Offset (DataType 'Uint64)) + '('GroupId (DataType 'String)) + '('ReceiveAttemptId (DataType 'Utf8String)) + '('LockTimestamp (DataType 'Uint64)) + '('NewVisibilityDeadline (DataType 'Uint64)))))) (let groupTable '%1$s/Groups) - (let readsTable '%1$s/Reads) - - (let records - (MapParameter keys (lambda '(item) (block '( - (let groupRow '( - '('GroupId (Member item 'GroupId)))) - (let groupSelect '( - 'Head - 'LockTimestamp - 'VisibilityDeadline - 'ReceiveAttemptId)) - (let groupRead (SelectRow groupTable groupRow groupSelect)) - - (let readsRow '( - '('ReceiveAttemptId (Member item 'ReceiveAttemptId)))) - (let readsSelect '( - 'Deadline)) - (let readsRead (SelectRow readsTable readsRow readsSelect)) - - (let exists (Exists groupRead)) - - (let changeCond - (IfPresent groupRead - (lambda '(x) - (Coalesce - (And - (Equal (Member x 'Head) (Member item 'Offset)) - (LessOrEqual now (Member groupRead 'VisibilityDeadline))) - (Bool 'false))) - (Bool 'false))) - - (let lockTimestamp (Member item 'LockTimestamp)) - (let readDeadline (Member readsRead 'Deadline)) - (let readCreateTimestamp (Sub readDeadline groupsReadAttemptIdsPeriod)) - (let sameReceiveAttempt - (Coalesce - (And - (Less lockTimestamp readDeadline) - (GreaterOrEqual lockTimestamp readCreateTimestamp) - ) - (Bool 'false) - ) - ) - - (return (AsStruct - '('GroupId (Member item 'GroupId)) - '('Exists exists) - '('ChangeCond changeCond) - '('NewVisibilityDeadline (Member item 'NewVisibilityDeadline)) - '('ReceiveAttemptId (Member item 'ReceiveAttemptId)) - '('SameReceiveAttempt sameReceiveAttempt)))))))) - - (let recordsToChange - (Filter records (lambda '(item) (block '( - (return (And (Member item 'Exists) (Member item 'ChangeCond))) - ))))) - - (let recordsToEraseReceiveAttempt - (Filter records (lambda '(item) (block '( - (return (Member item 'SameReceiveAttempt)) - ))))) - - (return (Extend - (AsList (SetResult 'result records)) - (AsList (SetResult 'result records)) - (AsList (SetResult 'recordsToEraseReceiveAttempt recordsToEraseReceiveAttempt)) - - (Map recordsToChange (lambda '(item) (block '( - (let groupRow '( - '('GroupId (Member item 'GroupId)))) - (let visibilityUpdate '( - '('VisibilityDeadline (Member item 'NewVisibilityDeadline)))) - (return (UpdateRow groupTable groupRow visibilityUpdate)) - )))) - - (Map recordsToEraseReceiveAttempt (lambda '(item) (block '( - (let readsRow '( - '('ReceiveAttemptId (Member item 'ReceiveAttemptId)))) - (return (EraseRow readsTable readsRow)) - )))) - )) + (let readsTable '%1$s/Reads) + + (let records + (MapParameter keys (lambda '(item) (block '( + (let groupRow '( + '('GroupId (Member item 'GroupId)))) + (let groupSelect '( + 'Head + 'LockTimestamp + 'VisibilityDeadline + 'ReceiveAttemptId)) + (let groupRead (SelectRow groupTable groupRow groupSelect)) + + (let readsRow '( + '('ReceiveAttemptId (Member item 'ReceiveAttemptId)))) + (let readsSelect '( + 'Deadline)) + (let readsRead (SelectRow readsTable readsRow readsSelect)) + + (let exists (Exists groupRead)) + + (let changeCond + (IfPresent groupRead + (lambda '(x) + (Coalesce + (And + (Equal (Member x 'Head) (Member item 'Offset)) + (LessOrEqual now (Member groupRead 'VisibilityDeadline))) + (Bool 'false))) + (Bool 'false))) + + (let lockTimestamp (Member item 'LockTimestamp)) + (let readDeadline (Member readsRead 'Deadline)) + (let readCreateTimestamp (Sub readDeadline groupsReadAttemptIdsPeriod)) + (let sameReceiveAttempt + (Coalesce + (And + (Less lockTimestamp readDeadline) + (GreaterOrEqual lockTimestamp readCreateTimestamp) + ) + (Bool 'false) + ) + ) + + (return (AsStruct + '('GroupId (Member item 'GroupId)) + '('Exists exists) + '('ChangeCond changeCond) + '('NewVisibilityDeadline (Member item 'NewVisibilityDeadline)) + '('ReceiveAttemptId (Member item 'ReceiveAttemptId)) + '('SameReceiveAttempt sameReceiveAttempt)))))))) + + (let recordsToChange + (Filter records (lambda '(item) (block '( + (return (And (Member item 'Exists) (Member item 'ChangeCond))) + ))))) + + (let recordsToEraseReceiveAttempt + (Filter records (lambda '(item) (block '( + (return (Member item 'SameReceiveAttempt)) + ))))) + + (return (Extend + (AsList (SetResult 'result records)) + (AsList (SetResult 'result records)) + (AsList (SetResult 'recordsToEraseReceiveAttempt recordsToEraseReceiveAttempt)) + + (Map recordsToChange (lambda '(item) (block '( + (let groupRow '( + '('GroupId (Member item 'GroupId)))) + (let visibilityUpdate '( + '('VisibilityDeadline (Member item 'NewVisibilityDeadline)))) + (return (UpdateRow groupTable groupRow visibilityUpdate)) + )))) + + (Map recordsToEraseReceiveAttempt (lambda '(item) (block '( + (let readsRow '( + '('ReceiveAttemptId (Member item 'ReceiveAttemptId)))) + (return (EraseRow readsTable readsRow)) + )))) + )) ) )__"; const char* const PurgeQueueQuery = R"__( ( - (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) - (let offsetTo (Parameter 'OFFSET_TO (DataType 'Uint64))) - (let now (Parameter 'NOW (DataType 'Uint64))) - (let shard (Parameter 'SHARD (DataType 'Uint64))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - + (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) + (let offsetTo (Parameter 'OFFSET_TO (DataType 'Uint64))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let shard (Parameter 'SHARD (DataType 'Uint64))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + (let messageTable '%1$s/Messages) (let stateTable '%1$s/State) - (let stateRow '( - '('State shard))) - (let stateSelect '( - 'CleanupVersion - 'LastModifiedTimestamp)) - (let stateRead - (SelectRow stateTable stateRow stateSelect)) - - (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + (let stateRow '( + '('State shard))) + (let stateSelect '( + 'CleanupVersion + 'LastModifiedTimestamp)) + (let stateRead + (SelectRow stateTable stateRow stateSelect)) + (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + (let messageRange '( - '('Offset offsetFrom offsetTo))) + '('Offset offsetFrom offsetTo))) (let messageSelect '( - 'SentTimestamp + 'SentTimestamp 'Offset - 'RandomId)) - - (let selectResult (SelectRange messageTable messageRange messageSelect '('('"ItemsLimit" batchSize)))) - - (let messages (Member selectResult 'List)) - (let truncated (Member selectResult 'Truncated)) - (let newCleanupVersion (Add (Member stateRead 'CleanupVersion) (Uint64 '1))) - - (let stateUpdate '( - '('LastModifiedTimestamp modifiedTimestamp) - '('CleanupVersion newCleanupVersion) - )) - - (return (Extend - (AsList (SetResult 'messages messages)) - (AsList (SetResult 'truncated truncated)) - (AsList (SetResult 'cleanupVersion newCleanupVersion)) - (AsList (UpdateRow stateTable stateRow stateUpdate)) - )) - ) -)__"; - -const char* const PurgeQueueStage2Query = R"__( - ( - (let cleanupVersion (Parameter 'CLEANUP_VERSION (DataType 'Uint64))) - (let now (Parameter 'NOW (DataType 'Uint64))) - (let messages (Parameter 'MESSAGES - (ListType (StructType - '('Offset (DataType 'Uint64)) - '('RandomId (DataType 'Uint64)) - '('SentTimestamp (DataType 'Uint64)) - )))) - - (let dataTable '%1$s/Data) - (let groupTable '%1$s/Groups) - (let messageTable '%1$s/Messages) - (let sentTsIdx '%1$s/SentTimestampIdx) - (let stateTable '%1$s/State) - - (let records - (MapParameter messages (lambda '(item) (block '( - (let messageRow '( - '('Offset (Member item 'Offset)))) - (let messageSelect '( - 'Offset - 'RandomId - 'GroupId - 'SentTimestamp - 'NextOffset - 'NextRandomId)) - - (let msg (SelectRow messageTable messageRow messageSelect)) - (return msg)))))) - - (let recordsExisted - (Filter records (lambda '(item) (block '( - (return (Exists item)) - ))))) - + 'RandomId)) + + (let selectResult (SelectRange messageTable messageRange messageSelect '('('"ItemsLimit" batchSize)))) + + (let messages (Member selectResult 'List)) + (let truncated (Member selectResult 'Truncated)) + (let newCleanupVersion (Add (Member stateRead 'CleanupVersion) (Uint64 '1))) + + (let stateUpdate '( + '('LastModifiedTimestamp modifiedTimestamp) + '('CleanupVersion newCleanupVersion) + )) + + (return (Extend + (AsList (SetResult 'messages messages)) + (AsList (SetResult 'truncated truncated)) + (AsList (SetResult 'cleanupVersion newCleanupVersion)) + (AsList (UpdateRow stateTable stateRow stateUpdate)) + )) + ) +)__"; + +const char* const PurgeQueueStage2Query = R"__( + ( + (let cleanupVersion (Parameter 'CLEANUP_VERSION (DataType 'Uint64))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let messages (Parameter 'MESSAGES + (ListType (StructType + '('Offset (DataType 'Uint64)) + '('RandomId (DataType 'Uint64)) + '('SentTimestamp (DataType 'Uint64)) + )))) + + (let dataTable '%1$s/Data) + (let groupTable '%1$s/Groups) + (let messageTable '%1$s/Messages) + (let sentTsIdx '%1$s/SentTimestampIdx) + (let stateTable '%1$s/State) + + (let records + (MapParameter messages (lambda '(item) (block '( + (let messageRow '( + '('Offset (Member item 'Offset)))) + (let messageSelect '( + 'Offset + 'RandomId + 'GroupId + 'SentTimestamp + 'NextOffset + 'NextRandomId)) + + (let msg (SelectRow messageTable messageRow messageSelect)) + (return msg)))))) + + (let recordsExisted + (Filter records (lambda '(item) (block '( + (return (Exists item)) + ))))) + (let stateRow '( '('State (Uint64 '0)))) - (let stateSelect '( - 'MessageCount - 'CleanupVersion - 'LastModifiedTimestamp)) - - (let stateRead (SelectRow stateTable stateRow stateSelect)) - (let count (Sub (Member stateRead 'MessageCount) (Length recordsExisted))) - (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) - + (let stateSelect '( + 'MessageCount + 'CleanupVersion + 'LastModifiedTimestamp)) + + (let stateRead (SelectRow stateTable stateRow stateSelect)) + (let count (Sub (Member stateRead 'MessageCount) (Length recordsExisted))) + (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + (let stateUpdate '( - '('LastModifiedTimestamp modifiedTimestamp) - '('MessageCount count))) - - (let versionIsSame - (Coalesce - (Equal (Member stateRead 'CleanupVersion) cleanupVersion) - (Bool 'false) - ) - ) - + '('LastModifiedTimestamp modifiedTimestamp) + '('MessageCount count))) + + (let versionIsSame + (Coalesce + (Equal (Member stateRead 'CleanupVersion) cleanupVersion) + (Bool 'false) + ) + ) + (return (Extend - (AsList (SetResult 'versionIsSame versionIsSame)) - (AsList (SetResult 'messagesDeleted - (If versionIsSame - (Length recordsExisted) - (Uint64 '0)) - )) - - (If versionIsSame - (AsList (UpdateRow stateTable stateRow stateUpdate)) - (AsList (Void))) - - (If versionIsSame - (Map recordsExisted (lambda '(item) (block '( - (let groupRow '( - '('GroupId (Member item 'GroupId)))) - (let update '( - '('RandomId (Member item 'NextRandomId)) - '('Head (Member item 'NextOffset)))) - - # If we delete the last message, we need to delete the empty group - (let groupIsEmpty - (Coalesce - (Equal (Member item 'NextOffset) (Uint64 '0)) - (Bool 'false) - ) - ) - - (return - (If groupIsEmpty - (EraseRow groupTable groupRow) - (UpdateRow groupTable groupRow update) - ) - ))))) - (AsList (Void))) - - (If versionIsSame - (Map recordsExisted (lambda '(item) (block '( - (let row '( - '('RandomId (Member item 'RandomId)) - '('Offset (Member item 'Offset)))) - (return (EraseRow dataTable row)))))) - (AsList (Void))) - - (If versionIsSame - (Map recordsExisted (lambda '(item) (block '( - (let row '( - '('Offset (Member item 'Offset)))) - (return (EraseRow messageTable row)))))) - (AsList (Void))) - - (If versionIsSame - (Map recordsExisted (lambda '(item) (block '( - (let row '( - '('SentTimestamp (Member item 'SentTimestamp)) - '('Offset (Member item 'Offset)))) - (return (EraseRow sentTsIdx row)))))) - (AsList (Void))) + (AsList (SetResult 'versionIsSame versionIsSame)) + (AsList (SetResult 'messagesDeleted + (If versionIsSame + (Length recordsExisted) + (Uint64 '0)) + )) + + (If versionIsSame + (AsList (UpdateRow stateTable stateRow stateUpdate)) + (AsList (Void))) + + (If versionIsSame + (Map recordsExisted (lambda '(item) (block '( + (let groupRow '( + '('GroupId (Member item 'GroupId)))) + (let update '( + '('RandomId (Member item 'NextRandomId)) + '('Head (Member item 'NextOffset)))) + + # If we delete the last message, we need to delete the empty group + (let groupIsEmpty + (Coalesce + (Equal (Member item 'NextOffset) (Uint64 '0)) + (Bool 'false) + ) + ) + + (return + (If groupIsEmpty + (EraseRow groupTable groupRow) + (UpdateRow groupTable groupRow update) + ) + ))))) + (AsList (Void))) + + (If versionIsSame + (Map recordsExisted (lambda '(item) (block '( + (let row '( + '('RandomId (Member item 'RandomId)) + '('Offset (Member item 'Offset)))) + (return (EraseRow dataTable row)))))) + (AsList (Void))) + + (If versionIsSame + (Map recordsExisted (lambda '(item) (block '( + (let row '( + '('Offset (Member item 'Offset)))) + (return (EraseRow messageTable row)))))) + (AsList (Void))) + + (If versionIsSame + (Map recordsExisted (lambda '(item) (block '( + (let row '( + '('SentTimestamp (Member item 'SentTimestamp)) + '('Offset (Member item 'Offset)))) + (return (EraseRow sentTsIdx row)))))) + (AsList (Void))) )) ) )__"; @@ -276,17 +276,17 @@ const char* const DeleteMessageQuery = R"__( (ListType (StructType '('GroupId (DataType 'String)) '('Offset (DataType 'Uint64)) - '('LockTimestamp (DataType 'Uint64)) - '('ReceiveAttemptId (DataType 'Utf8String)))))) + '('LockTimestamp (DataType 'Uint64)) + '('ReceiveAttemptId (DataType 'Utf8String)))))) (let now (Parameter 'NOW (DataType 'Uint64))) - (let groupsReadAttemptIdsPeriod (Parameter 'GROUPS_READ_ATTEMPT_IDS_PERIOD (DataType 'Uint64))) + (let groupsReadAttemptIdsPeriod (Parameter 'GROUPS_READ_ATTEMPT_IDS_PERIOD (DataType 'Uint64))) (let dataTable '%1$s/Data) (let groupTable '%1$s/Groups) (let messageTable '%1$s/Messages) (let sentTsIdx '%1$s/SentTimestampIdx) (let stateTable '%1$s/State) - (let readsTable '%1$s/Reads) + (let readsTable '%1$s/Reads) (let records (MapParameter keys (lambda '(item) (block '( @@ -304,34 +304,34 @@ const char* const DeleteMessageQuery = R"__( (let groupSelect '( 'GroupId 'Head - 'LockTimestamp - 'ReceiveAttemptId)) - - (let readsRow '( - '('ReceiveAttemptId (Member item 'ReceiveAttemptId)))) - (let readsSelect '( - 'Deadline)) - (let readsRead (SelectRow readsTable readsRow readsSelect)) - - (let lockTimestamp (Member item 'LockTimestamp)) - (let readDeadline (Member readsRead 'Deadline)) - (let readCreateTimestamp (Sub readDeadline groupsReadAttemptIdsPeriod)) - (let sameReceiveAttempt - (Coalesce - (And - (Less lockTimestamp readDeadline) - (GreaterOrEqual lockTimestamp readCreateTimestamp) - ) - (Bool 'false) - ) - ) - + 'LockTimestamp + 'ReceiveAttemptId)) + + (let readsRow '( + '('ReceiveAttemptId (Member item 'ReceiveAttemptId)))) + (let readsSelect '( + 'Deadline)) + (let readsRead (SelectRow readsTable readsRow readsSelect)) + + (let lockTimestamp (Member item 'LockTimestamp)) + (let readDeadline (Member readsRead 'Deadline)) + (let readCreateTimestamp (Sub readDeadline groupsReadAttemptIdsPeriod)) + (let sameReceiveAttempt + (Coalesce + (And + (Less lockTimestamp readDeadline) + (GreaterOrEqual lockTimestamp readCreateTimestamp) + ) + (Bool 'false) + ) + ) + (return '( (SelectRow groupTable groupRow groupSelect) (SelectRow messageTable messageRow messageSelect) - (Member item 'LockTimestamp) - (Member item 'ReceiveAttemptId) - sameReceiveAttempt))))))) + (Member item 'LockTimestamp) + (Member item 'ReceiveAttemptId) + sameReceiveAttempt))))))) (let valid (Filter records (lambda '(item) (block '( @@ -346,11 +346,11 @@ const char* const DeleteMessageQuery = R"__( (Equal (Member group 'LockTimestamp) lockTimestamp)) (Bool 'false)))))))) - (let validWithReceiveAttemptToDelete - (Filter records (lambda '(item) (block '( - (let sameReceiveAttempt (Nth item '4)) - (return sameReceiveAttempt)))))) - + (let validWithReceiveAttemptToDelete + (Filter records (lambda '(item) (block '( + (let sameReceiveAttempt (Nth item '4)) + (return sameReceiveAttempt)))))) + (let result (Map valid (lambda '(item) (block '( (let msg (Nth item '1)) @@ -361,15 +361,15 @@ const char* const DeleteMessageQuery = R"__( (let stateRow '( '('State (Uint64 '0)))) (let stateSelect '( - 'MessageCount - 'LastModifiedTimestamp)) + 'MessageCount + 'LastModifiedTimestamp)) (let stateRead (SelectRow stateTable stateRow stateSelect)) - (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) (let count (Sub (Member stateRead 'MessageCount) (Length valid))) - + (let stateUpdate '( - '('LastModifiedTimestamp modifiedTimestamp) + '('LastModifiedTimestamp modifiedTimestamp) '('MessageCount count))) (return (Extend @@ -409,11 +409,11 @@ const char* const DeleteMessageQuery = R"__( '('SentTimestamp (Member (Nth item '1) 'SentTimestamp)) '('Offset (Member (Nth item '1) 'Offset)))) (return (EraseRow sentTsIdx row)))))) - - (Map validWithReceiveAttemptToDelete (lambda '(item) (block '( - (let row '( - '('ReceiveAttemptId (Nth item '3)))) - (return (EraseRow readsTable row)))))) + + (Map validWithReceiveAttemptToDelete (lambda '(item) (block '( + (let row '( + '('ReceiveAttemptId (Nth item '3)))) + (return (EraseRow readsTable row)))))) )) ) )__"; @@ -425,11 +425,11 @@ const char* const SetQueueAttributesQuery = R"__( (let visibility (Parameter 'VISIBILITY (OptionalType (DataType 'Uint64)))) (let wait (Parameter 'WAIT (OptionalType (DataType 'Uint64)))) (let maxMessageSize (Parameter 'MAX_MESSAGE_SIZE (OptionalType (DataType 'Uint64)))) - (let contentBasedDeduplication (Parameter 'CONTENT_BASED_DEDUPLICATION (OptionalType (DataType 'Bool)))) + (let contentBasedDeduplication (Parameter 'CONTENT_BASED_DEDUPLICATION (OptionalType (DataType 'Bool)))) (let maxReceiveCount (Parameter 'MAX_RECEIVE_COUNT (OptionalType (DataType 'Uint64)))) (let dlqArn (Parameter 'DLQ_TARGET_ARN (OptionalType (DataType 'Utf8String)))) (let dlqName (Parameter 'DLQ_TARGET_NAME (OptionalType (DataType 'Utf8String)))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) (let attrsTable '%1$s/Attributes) @@ -439,82 +439,82 @@ const char* const SetQueueAttributesQuery = R"__( 'DelaySeconds 'MessageRetentionPeriod 'ReceiveMessageWaitTime - 'VisibilityTimeout - 'MaximumMessageSize + 'VisibilityTimeout + 'MaximumMessageSize 'DlqName 'DlqArn 'MaxReceiveCount - 'ContentBasedDeduplication)) + 'ContentBasedDeduplication)) (let attrsRead (SelectRow attrsTable attrsRow attrsSelect)) (let attrsUpdate '( '('DelaySeconds (Coalesce delay (Member attrsRead 'DelaySeconds))) '('MessageRetentionPeriod (Coalesce retention (Member attrsRead 'MessageRetentionPeriod))) '('ReceiveMessageWaitTime (Coalesce wait (Member attrsRead 'ReceiveMessageWaitTime))) - '('VisibilityTimeout (Coalesce visibility (Member attrsRead 'VisibilityTimeout))) - '('MaximumMessageSize (Coalesce maxMessageSize (Member attrsRead 'MaximumMessageSize))) + '('VisibilityTimeout (Coalesce visibility (Member attrsRead 'VisibilityTimeout))) + '('MaximumMessageSize (Coalesce maxMessageSize (Member attrsRead 'MaximumMessageSize))) '('MaxReceiveCount (Coalesce maxReceiveCount (Member attrsRead 'MaxReceiveCount))) '('DlqName (Coalesce dlqName (Member attrsRead 'DlqName))) '('DlqArn (Coalesce dlqArn (Member attrsRead 'DlqArn))) - '('ContentBasedDeduplication (Coalesce contentBasedDeduplication (Member attrsRead 'ContentBasedDeduplication))))) - - (let queuesTable '%5$s/.Queues) - (let queuesRow '( - '('Account userName) - '('QueueName (Utf8String '"%4$s")))) - - (let queuesRowSelect '( - 'DlqName)) - - (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) - - (let queuesRowUpdate '( - '('DlqName (Coalesce dlqName (Member queuesRowRead 'DlqName))))) - + '('ContentBasedDeduplication (Coalesce contentBasedDeduplication (Member attrsRead 'ContentBasedDeduplication))))) + + (let queuesTable '%5$s/.Queues) + (let queuesRow '( + '('Account userName) + '('QueueName (Utf8String '"%4$s")))) + + (let queuesRowSelect '( + 'DlqName)) + + (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) + + (let queuesRowUpdate '( + '('DlqName (Coalesce dlqName (Member queuesRowRead 'DlqName))))) + (return (AsList (UpdateRow attrsTable attrsRow attrsUpdate) - (UpdateRow queuesTable queuesRow queuesRowUpdate))) + (UpdateRow queuesTable queuesRow queuesRowUpdate))) ) )__"; -const char* const InternalGetQueueAttributesQuery = R"__( - ( - (let attrsTable '%1$s/Attributes) - - (let attrsRow '( - '('State (Uint64 '0)))) - (let attrsSelect '( - 'ContentBasedDeduplication - 'DelaySeconds - 'FifoQueue - 'MaximumMessageSize - 'MessageRetentionPeriod - 'ReceiveMessageWaitTime +const char* const InternalGetQueueAttributesQuery = R"__( + ( + (let attrsTable '%1$s/Attributes) + + (let attrsRow '( + '('State (Uint64 '0)))) + (let attrsSelect '( + 'ContentBasedDeduplication + 'DelaySeconds + 'FifoQueue + 'MaximumMessageSize + 'MessageRetentionPeriod + 'ReceiveMessageWaitTime 'MaxReceiveCount 'DlqName 'DlqArn - 'VisibilityTimeout - 'ShowDetailedCountersDeadline)) - - (return (AsList - (SetResult 'attrs (SelectRow attrsTable attrsRow attrsSelect)))) - ) -)__"; - + 'VisibilityTimeout + 'ShowDetailedCountersDeadline)) + + (return (AsList + (SetResult 'attrs (SelectRow attrsTable attrsRow attrsSelect)))) + ) +)__"; + const char* const ListQueuesQuery = R"__( ( - (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%5$s/.Queues) + (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let queuesTable '%5$s/.Queues) + (let skipFolderIdFilter (Equal folderId (Utf8String '""))) - (let queuesRange '( - '('Account userName userName) + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queueSelect '('QueueName 'QueueId 'QueueState 'FifoQueue 'CreatedTimestamp 'CustomQueueName 'FolderId 'MasterTabletId 'Version 'Shards)) - (let queues (Member (SelectRange queuesTable queuesRange queueSelect '()) 'List)) + (let queues (Member (SelectRange queuesTable queuesRange queueSelect '()) 'List)) (let filtered (Filter queues (lambda '(item) (block '( (return (Coalesce @@ -538,10 +538,10 @@ const char* const LockGroupsQuery = R"__( (let attemptId (Parameter 'ATTEMPT_ID (DataType 'Utf8String))) (let now (Parameter 'NOW (DataType 'Uint64))) (let count (Parameter 'COUNT (DataType 'Uint64))) - (let visibilityTimeout (Parameter 'VISIBILITY_TIMEOUT (DataType 'Uint64))) - (let groupsReadAttemptIdsPeriod (Parameter 'GROUPS_READ_ATTEMPT_IDS_PERIOD (DataType 'Uint64))) - (let fromGroup (Parameter 'FROM_GROUP (DataType 'String))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + (let visibilityTimeout (Parameter 'VISIBILITY_TIMEOUT (DataType 'Uint64))) + (let groupsReadAttemptIdsPeriod (Parameter 'GROUPS_READ_ATTEMPT_IDS_PERIOD (DataType 'Uint64))) + (let fromGroup (Parameter 'FROM_GROUP (DataType 'String))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) (let groupTable '%1$s/Groups) (let readsTable '%1$s/Reads) @@ -551,22 +551,22 @@ const char* const LockGroupsQuery = R"__( '('ReceiveAttemptId attemptId))) (let readsSelect (SelectRow readsTable readsRow '('Deadline))) (let readsUpdate '( - '('Deadline (Add now groupsReadAttemptIdsPeriod)))) + '('Deadline (Add now groupsReadAttemptIdsPeriod)))) (let sameCond (IfPresent readsSelect (lambda '(x) (Coalesce (Less now (Member x 'Deadline)) (Bool 'false))) (Bool 'false))) (let groupRange '( - '('GroupId fromGroup (Void)))) + '('GroupId fromGroup (Void)))) (let groupSelect '( 'GroupId 'RandomId 'Head 'ReceiveAttemptId 'VisibilityDeadline)) - (let groupsRead (SelectRange groupTable groupRange groupSelect '('('"ItemsLimit" batchSize)))) - (let groups (Member groupsRead 'List)) - (let truncated (Member groupsRead 'Truncated)) - (let lastProcessedGroup (ToOptional (Skip groups (Sub (Length groups) (Uint64 '1))))) + (let groupsRead (SelectRange groupTable groupRange groupSelect '('('"ItemsLimit" batchSize)))) + (let groups (Member groupsRead 'List)) + (let truncated (Member groupsRead 'Truncated)) + (let lastProcessedGroup (ToOptional (Skip groups (Sub (Length groups) (Uint64 '1))))) (let previous (Take (Filter groups (lambda '(item) (block '( (return (Coalesce (Equal (Member item 'ReceiveAttemptId) attemptId) (Bool 'false))) @@ -588,14 +588,14 @@ const char* const LockGroupsQuery = R"__( (let groupUpdate '( '('ReceiveAttemptId attemptId) '('LockTimestamp now) - '('VisibilityDeadline (Add now visibilityTimeout)))) + '('VisibilityDeadline (Add now visibilityTimeout)))) (return (UpdateRow groupTable groupRow groupUpdate)) )))) (return (Extend (AsList (SetResult 'sameCond sameCond)) - (AsList (SetResult 'truncated truncated)) - (AsList (SetResult 'lastProcessedGroup lastProcessedGroup)) + (AsList (SetResult 'truncated truncated)) + (AsList (SetResult 'lastProcessedGroup lastProcessedGroup)) (AsList (If sameCond (SetResult 'offsets previous) (SetResult 'offsets filtered))) (ListIf (And (Not sameCond) (HasItems filtered)) (UpdateRow readsTable readsRow readsUpdate)) (If sameCond (Map previous update) (Map filtered update)))) @@ -805,7 +805,7 @@ const char* const ReadOrRedriveMessageQuery = R"__( (return (Extend (AsList (SetResult 'result messagesToReturnAsStruct)) (AsList (SetResult 'movedMessagesCount (Length messagesToMoveAsStruct))) - (AsList (SetResult 'newMessagesCount newSourceMsgCount)) + (AsList (SetResult 'newMessagesCount newSourceMsgCount)) (ListIf (HasItems messagesToMoveAsStruct) (UpdateRow dlqStateTable dlqStateRow dlqStateUpdate)) (ListIf (HasItems messagesToMoveAsStruct) (UpdateRow sourceStateTable sourceStateRow sourceStateUpdate)) @@ -933,19 +933,19 @@ const char* const WriteMessageQuery = R"__( ( (let randomId (Parameter 'RANDOM_ID (DataType 'Uint64))) (let timestamp (Parameter 'TIMESTAMP (DataType 'Uint64))) - (let deduplicationPeriod (Parameter 'DEDUPLICATION_PERIOD (DataType 'Uint64))) - (let messages (Parameter 'MESSAGES - (ListType (StructType - '('Attributes (DataType 'String)) - '('Data (DataType 'String)) - '('MessageId (DataType 'String)) - '('SenderId (DataType 'String)) - '('GroupId (DataType 'String)) - '('DeduplicationId (DataType 'String)) - '('Delay (DataType 'Uint64)) - '('Index (DataType 'Uint64)) - )) - )) + (let deduplicationPeriod (Parameter 'DEDUPLICATION_PERIOD (DataType 'Uint64))) + (let messages (Parameter 'MESSAGES + (ListType (StructType + '('Attributes (DataType 'String)) + '('Data (DataType 'String)) + '('MessageId (DataType 'String)) + '('SenderId (DataType 'String)) + '('GroupId (DataType 'String)) + '('DeduplicationId (DataType 'String)) + '('Delay (DataType 'Uint64)) + '('Index (DataType 'Uint64)) + )) + )) (let dataTable '%1$s/Data) (let dedupTable '%1$s/Deduplication) @@ -958,252 +958,252 @@ const char* const WriteMessageQuery = R"__( '('State (Uint64 '0)))) (let stateSelect '( 'MessageCount - 'WriteOffset - 'LastModifiedTimestamp)) + 'WriteOffset + 'LastModifiedTimestamp)) (let stateRead (SelectRow stateTable stateRow stateSelect)) - (let sentTimestamp (Max timestamp (Member stateRead 'LastModifiedTimestamp))) - (let startOffset (Add (Member stateRead 'WriteOffset) (Uint64 '1))) - - (let messagesInfo - (MapParameter messages (lambda '(item) (block '( - (let dedupRow '( - '('DedupId (Member item 'DeduplicationId)))) - (let dedupSelect '( - 'Deadline - 'MessageId - 'Offset)) - (let dedupRead (SelectRow dedupTable dedupRow dedupSelect)) - - (let dedupCond (IfPresent dedupRead (lambda '(x) (Coalesce (Less (Member x 'Deadline) sentTimestamp) (Bool 'false))) (Bool 'true))) - - (let groupRow '( - '('GroupId (Member item 'GroupId)))) - (let groupSelect '( - 'Head - 'Tail)) - - (let groupRead (SelectRow groupTable groupRow groupSelect)) - - (let tail (IfPresent groupRead (lambda '(x) (Coalesce (Member x 'Tail) (Uint64 '0))) (Uint64 '0))) - - (let offset (Add startOffset (Member item 'Index))) - (return - (AsStruct - '('dedupCond dedupCond) - '('dedupSelect dedupRead) - '('groupRead groupRead) - '('tail tail) - - '('Attributes (Member item 'Attributes)) - '('Data (Member item 'Data)) - '('MessageId (Member item 'MessageId)) - '('SenderId (Member item 'SenderId)) - '('GroupId (Member item 'GroupId)) - '('DeduplicationId (Member item 'DeduplicationId)) - '('Delay (Member item 'Delay)) - '('Index (Member item 'Index)) - )) - ))))) - - (let messagesAdded - (Filter messagesInfo (lambda '(item) (block '( - (return (Member item 'dedupCond)) - ))))) - - (let messagesInfoFirstNotDuplicated - (Sort messagesInfo (Bool 'true) (lambda '(item) (block '( - (return (If (Member item 'dedupCond) (Uint64 '0) (Uint64 '1))) - )))) - ) - - (let newMessagesCount (Add (Member stateRead 'MessageCount) (Length messagesAdded))) - (let newWriteOffset (Add (Member stateRead 'WriteOffset) (Length messagesAdded))) - - (let messagesInfoWithProperIndexes - (Enumerate messagesInfoFirstNotDuplicated (Coalesce startOffset (Uint64 '0)))) - - (let messagesInfoWithProperIndexesSorted - (Sort messagesInfoWithProperIndexes (Bool 'true) (lambda '(item) (block '( - (return (Member (Nth item '1) 'Index)) - )))) - ) - - (let result - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (return - (AsStruct - '('dedupCond (Member (Nth item '1) 'dedupCond)) - '('dedupSelect (Member (Nth item '1) 'dedupSelect)) - '('offset (Nth item '0)) - )) - ))))) - + (let sentTimestamp (Max timestamp (Member stateRead 'LastModifiedTimestamp))) + (let startOffset (Add (Member stateRead 'WriteOffset) (Uint64 '1))) + + (let messagesInfo + (MapParameter messages (lambda '(item) (block '( + (let dedupRow '( + '('DedupId (Member item 'DeduplicationId)))) + (let dedupSelect '( + 'Deadline + 'MessageId + 'Offset)) + (let dedupRead (SelectRow dedupTable dedupRow dedupSelect)) + + (let dedupCond (IfPresent dedupRead (lambda '(x) (Coalesce (Less (Member x 'Deadline) sentTimestamp) (Bool 'false))) (Bool 'true))) + + (let groupRow '( + '('GroupId (Member item 'GroupId)))) + (let groupSelect '( + 'Head + 'Tail)) + + (let groupRead (SelectRow groupTable groupRow groupSelect)) + + (let tail (IfPresent groupRead (lambda '(x) (Coalesce (Member x 'Tail) (Uint64 '0))) (Uint64 '0))) + + (let offset (Add startOffset (Member item 'Index))) + (return + (AsStruct + '('dedupCond dedupCond) + '('dedupSelect dedupRead) + '('groupRead groupRead) + '('tail tail) + + '('Attributes (Member item 'Attributes)) + '('Data (Member item 'Data)) + '('MessageId (Member item 'MessageId)) + '('SenderId (Member item 'SenderId)) + '('GroupId (Member item 'GroupId)) + '('DeduplicationId (Member item 'DeduplicationId)) + '('Delay (Member item 'Delay)) + '('Index (Member item 'Index)) + )) + ))))) + + (let messagesAdded + (Filter messagesInfo (lambda '(item) (block '( + (return (Member item 'dedupCond)) + ))))) + + (let messagesInfoFirstNotDuplicated + (Sort messagesInfo (Bool 'true) (lambda '(item) (block '( + (return (If (Member item 'dedupCond) (Uint64 '0) (Uint64 '1))) + )))) + ) + + (let newMessagesCount (Add (Member stateRead 'MessageCount) (Length messagesAdded))) + (let newWriteOffset (Add (Member stateRead 'WriteOffset) (Length messagesAdded))) + + (let messagesInfoWithProperIndexes + (Enumerate messagesInfoFirstNotDuplicated (Coalesce startOffset (Uint64 '0)))) + + (let messagesInfoWithProperIndexesSorted + (Sort messagesInfoWithProperIndexes (Bool 'true) (lambda '(item) (block '( + (return (Member (Nth item '1) 'Index)) + )))) + ) + + (let result + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (return + (AsStruct + '('dedupCond (Member (Nth item '1) 'dedupCond)) + '('dedupSelect (Member (Nth item '1) 'dedupSelect)) + '('offset (Nth item '0)) + )) + ))))) + (let stateUpdate '( - '('LastModifiedTimestamp sentTimestamp) - '('MessageCount newMessagesCount) - '('WriteOffset newWriteOffset))) - - (return (Extend - (AsList (SetResult 'result result)) - - (AsList (If (Greater (Length messagesAdded) (Uint64 '0)) (UpdateRow stateTable stateRow stateUpdate) (Void))) - - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (let dedupCond (Member (Nth item '1) 'dedupCond)) - (let dedupRow '( - '('DedupId (Member (Nth item '1) 'DeduplicationId)))) - (let dedupUpdate '( - '('Deadline (Add sentTimestamp deduplicationPeriod)) - '('Offset (Nth item '0)) - '('MessageId (Member (Nth item '1) 'MessageId)))) - (return (If dedupCond (UpdateRow dedupTable dedupRow dedupUpdate) (Void))))))) - - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (let dedupCond (Member (Nth item '1) 'dedupCond)) - (let dataRow '( - '('RandomId randomId) - '('Offset (Nth item '0)))) - (let dataUpdate '( - '('Data (Member (Nth item '1) 'Data)) - '('DedupId (Member (Nth item '1) 'DeduplicationId)) - '('Attributes (Member (Nth item '1) 'Attributes)) - '('SenderId (Member (Nth item '1) 'SenderId)) - '('MessageId (Member (Nth item '1) 'MessageId)))) - (return (If dedupCond (UpdateRow dataTable dataRow dataUpdate) (Void))))))) - - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (let dedupCond (Member (Nth item '1) 'dedupCond)) - (let msgRow '( - '('Offset (Nth item '0)))) - (let messageUpdate '( - '('RandomId randomId) - '('GroupId (Member (Nth item '1) 'GroupId)) - '('NextOffset (Uint64 '0)) - '('NextRandomId (Uint64 '0)) - '('ReceiveCount (Uint32 '0)) - '('FirstReceiveTimestamp (Uint64 '0)) - '('SentTimestamp sentTimestamp))) - (return (If dedupCond (UpdateRow msgTable msgRow messageUpdate) (Void))))))) - - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (let dedupCond (Member (Nth item '1) 'dedupCond)) - (let sentTsRow '( - '('SentTimestamp sentTimestamp) - '('Offset (Nth item '0)))) - (let delay (Member (Nth item '1) 'Delay)) - (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) - (let sentTsUpdate '( - '('RandomId randomId) - '('DelayDeadline delayDeadline) - '('GroupId (Member (Nth item '1) 'GroupId)))) - (return (If dedupCond (UpdateRow sentTsIdx sentTsRow sentTsUpdate) (Void))))))) - - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (let dedupCond (Member (Nth item '1) 'dedupCond)) - (let groupRow '( - '('GroupId (Member (Nth item '1) 'GroupId)))) - (let delay (Member (Nth item '1) 'Delay)) - (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) - (let groupInsert '( - '('RandomId randomId) - '('Head (Nth item '0)) - '('Tail (Nth item '0)) - '('LockTimestamp (Uint64 '0)) - '('VisibilityDeadline delayDeadline))) - (let groupRead (Member (Nth item '1) 'groupRead)) - (let groupUpdate '( - '('Head (Member groupRead 'Head)) - '('Tail (Nth item '0)))) - (let tail (Member (Nth item '1) 'tail)) - (return - (If dedupCond - (If (Equal tail (Uint64 '0)) - (UpdateRow groupTable groupRow groupInsert) - (UpdateRow groupTable groupRow groupUpdate) - ) - (Void) - )))))) - - (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( - (let dedupCond (Member (Nth item '1) 'dedupCond)) - (let tail (Member (Nth item '1) 'tail)) - (let prevMessageRow '( - '('Offset tail))) - (let prevMessageUpdate '( - '('NextOffset (Nth item '0)) - '('NextRandomId randomId))) - (return - (If (And dedupCond (NotEqual tail (Uint64 '0))) - (UpdateRow msgTable prevMessageRow prevMessageUpdate) - (Void)) - ))))) - )) + '('LastModifiedTimestamp sentTimestamp) + '('MessageCount newMessagesCount) + '('WriteOffset newWriteOffset))) + + (return (Extend + (AsList (SetResult 'result result)) + + (AsList (If (Greater (Length messagesAdded) (Uint64 '0)) (UpdateRow stateTable stateRow stateUpdate) (Void))) + + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (let dedupCond (Member (Nth item '1) 'dedupCond)) + (let dedupRow '( + '('DedupId (Member (Nth item '1) 'DeduplicationId)))) + (let dedupUpdate '( + '('Deadline (Add sentTimestamp deduplicationPeriod)) + '('Offset (Nth item '0)) + '('MessageId (Member (Nth item '1) 'MessageId)))) + (return (If dedupCond (UpdateRow dedupTable dedupRow dedupUpdate) (Void))))))) + + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (let dedupCond (Member (Nth item '1) 'dedupCond)) + (let dataRow '( + '('RandomId randomId) + '('Offset (Nth item '0)))) + (let dataUpdate '( + '('Data (Member (Nth item '1) 'Data)) + '('DedupId (Member (Nth item '1) 'DeduplicationId)) + '('Attributes (Member (Nth item '1) 'Attributes)) + '('SenderId (Member (Nth item '1) 'SenderId)) + '('MessageId (Member (Nth item '1) 'MessageId)))) + (return (If dedupCond (UpdateRow dataTable dataRow dataUpdate) (Void))))))) + + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (let dedupCond (Member (Nth item '1) 'dedupCond)) + (let msgRow '( + '('Offset (Nth item '0)))) + (let messageUpdate '( + '('RandomId randomId) + '('GroupId (Member (Nth item '1) 'GroupId)) + '('NextOffset (Uint64 '0)) + '('NextRandomId (Uint64 '0)) + '('ReceiveCount (Uint32 '0)) + '('FirstReceiveTimestamp (Uint64 '0)) + '('SentTimestamp sentTimestamp))) + (return (If dedupCond (UpdateRow msgTable msgRow messageUpdate) (Void))))))) + + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (let dedupCond (Member (Nth item '1) 'dedupCond)) + (let sentTsRow '( + '('SentTimestamp sentTimestamp) + '('Offset (Nth item '0)))) + (let delay (Member (Nth item '1) 'Delay)) + (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) + (let sentTsUpdate '( + '('RandomId randomId) + '('DelayDeadline delayDeadline) + '('GroupId (Member (Nth item '1) 'GroupId)))) + (return (If dedupCond (UpdateRow sentTsIdx sentTsRow sentTsUpdate) (Void))))))) + + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (let dedupCond (Member (Nth item '1) 'dedupCond)) + (let groupRow '( + '('GroupId (Member (Nth item '1) 'GroupId)))) + (let delay (Member (Nth item '1) 'Delay)) + (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) + (let groupInsert '( + '('RandomId randomId) + '('Head (Nth item '0)) + '('Tail (Nth item '0)) + '('LockTimestamp (Uint64 '0)) + '('VisibilityDeadline delayDeadline))) + (let groupRead (Member (Nth item '1) 'groupRead)) + (let groupUpdate '( + '('Head (Member groupRead 'Head)) + '('Tail (Nth item '0)))) + (let tail (Member (Nth item '1) 'tail)) + (return + (If dedupCond + (If (Equal tail (Uint64 '0)) + (UpdateRow groupTable groupRow groupInsert) + (UpdateRow groupTable groupRow groupUpdate) + ) + (Void) + )))))) + + (Map messagesInfoWithProperIndexesSorted (lambda '(item) (block '( + (let dedupCond (Member (Nth item '1) 'dedupCond)) + (let tail (Member (Nth item '1) 'tail)) + (let prevMessageRow '( + '('Offset tail))) + (let prevMessageUpdate '( + '('NextOffset (Nth item '0)) + '('NextRandomId randomId))) + (return + (If (And dedupCond (NotEqual tail (Uint64 '0))) + (UpdateRow msgTable prevMessageRow prevMessageUpdate) + (Void)) + ))))) + )) ) )__"; -static const char* const DeduplicationCleanupQuery = R"__( +static const char* const DeduplicationCleanupQuery = R"__( ( - (let now (Parameter 'NOW (DataType 'Uint64))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - (let keyRangeStart (Parameter 'KEY_RANGE_START (DataType 'String))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + (let keyRangeStart (Parameter 'KEY_RANGE_START (DataType 'String))) (let dedupTable '%1$s/Deduplication) (let dedupRange '( - '('DedupId keyRangeStart (Void)))) + '('DedupId keyRangeStart (Void)))) (let dedupSelect '( 'DedupId 'Deadline)) - (let dedups (SelectRange dedupTable dedupRange dedupSelect '('('"ItemsLimit" batchSize)))) - - (let dedupsList (Member dedups 'List)) - (let dedupToErase (Filter dedupsList (lambda '(item) (block '( - (return (Coalesce (Less (Member item 'Deadline) now) (Bool 'false))) - ))))) - - (let dedupsCount (Length dedupsList)) - (let lastSelectedRow (ToOptional (Skip dedupsList (Max (Sub dedupsCount (Uint64 '1)) (Uint64 '0))))) - - (return (Extend - (AsList (SetResult 'moreData (Member dedups 'Truncated))) - (AsList (SetResult 'lastProcessedKey (Member lastSelectedRow 'DedupId))) - - (Map dedupToErase (lambda '(item) (block '( - (return (EraseRow dedupTable '( - '('DedupId (Member item 'DedupId))))))))) - )) - ) -)__"; - -static const char* const ReadsCleanupQuery = R"__( - ( - (let now (Parameter 'NOW (DataType 'Uint64))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - (let keyRangeStart (Parameter 'KEY_RANGE_START (DataType 'Utf8String))) - - (let readsTable '%1$s/Reads) - + (let dedups (SelectRange dedupTable dedupRange dedupSelect '('('"ItemsLimit" batchSize)))) + + (let dedupsList (Member dedups 'List)) + (let dedupToErase (Filter dedupsList (lambda '(item) (block '( + (return (Coalesce (Less (Member item 'Deadline) now) (Bool 'false))) + ))))) + + (let dedupsCount (Length dedupsList)) + (let lastSelectedRow (ToOptional (Skip dedupsList (Max (Sub dedupsCount (Uint64 '1)) (Uint64 '0))))) + + (return (Extend + (AsList (SetResult 'moreData (Member dedups 'Truncated))) + (AsList (SetResult 'lastProcessedKey (Member lastSelectedRow 'DedupId))) + + (Map dedupToErase (lambda '(item) (block '( + (return (EraseRow dedupTable '( + '('DedupId (Member item 'DedupId))))))))) + )) + ) +)__"; + +static const char* const ReadsCleanupQuery = R"__( + ( + (let now (Parameter 'NOW (DataType 'Uint64))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + (let keyRangeStart (Parameter 'KEY_RANGE_START (DataType 'Utf8String))) + + (let readsTable '%1$s/Reads) + (let readRange '( - '('ReceiveAttemptId keyRangeStart (Void)))) + '('ReceiveAttemptId keyRangeStart (Void)))) (let readSelect '( 'ReceiveAttemptId 'Deadline)) - (let reads (SelectRange readsTable readRange readSelect '('('"ItemsLimit" batchSize)))) + (let reads (SelectRange readsTable readRange readSelect '('('"ItemsLimit" batchSize)))) - (let readsList (Member reads 'List)) - (let readsToErase (Filter readsList (lambda '(item) (block '( + (let readsList (Member reads 'List)) + (let readsToErase (Filter readsList (lambda '(item) (block '( (return (Coalesce (Less (Member item 'Deadline) now) (Bool 'false))) ))))) - (let readsCount (Length readsList)) - (let lastSelectedRow (ToOptional (Skip readsList (Max (Sub readsCount (Uint64 '1)) (Uint64 '0))))) + (let readsCount (Length readsList)) + (let lastSelectedRow (ToOptional (Skip readsList (Max (Sub readsCount (Uint64 '1)) (Uint64 '0))))) (return (Extend - (AsList (SetResult 'moreData (Member reads 'Truncated))) - (AsList (SetResult 'lastProcessedKey (Member lastSelectedRow 'ReceiveAttemptId))) - + (AsList (SetResult 'moreData (Member reads 'Truncated))) + (AsList (SetResult 'lastProcessedKey (Member lastSelectedRow 'ReceiveAttemptId))) + (Map readsToErase (lambda '(item) (block '( (return (EraseRow readsTable '( '('ReceiveAttemptId (Member item 'ReceiveAttemptId))))))))) @@ -1229,33 +1229,33 @@ const char* const SetRetentionQuery = R"__( (let boundary (If purge now (Coalesce (Sub now (Member attrs 'MessageRetentionPeriod)) (Uint64 '0)))) - (let range '( - '('State (Uint64 '0) (Uint64 '18446744073709551615)))) - (let fields '( - 'State - 'RetentionBoundary)) - (let records (Member (SelectRange stateTable range fields '()) 'List)) + (let range '( + '('State (Uint64 '0) (Uint64 '18446744073709551615)))) + (let fields '( + 'State + 'RetentionBoundary)) + (let records (Member (SelectRange stateTable range fields '()) 'List)) - (let result - (Map records (lambda '(item) (block '( - (let updated - (Coalesce - (Less (Member item 'RetentionBoundary) boundary) - (Bool 'false))) + (let result + (Map records (lambda '(item) (block '( + (let updated + (Coalesce + (Less (Member item 'RetentionBoundary) boundary) + (Bool 'false))) - (return (AsStruct - '('Shard (Member item 'State)) - '('RetentionBoundary (Max boundary (Member item 'RetentionBoundary))) - '('Updated updated)))))))) + (return (AsStruct + '('Shard (Member item 'State)) + '('RetentionBoundary (Max boundary (Member item 'RetentionBoundary))) + '('Updated updated)))))))) - (let updated (Filter result (lambda '(item) (block '( - (return (Coalesce (Equal (Member item 'Updated) (Bool 'true)) (Bool 'false)))))))) + (let updated (Filter result (lambda '(item) (block '( + (return (Coalesce (Equal (Member item 'Updated) (Bool 'true)) (Bool 'false)))))))) (return (Extend (AsList (SetResult 'result result)) (AsList (SetResult 'retention (Member attrs 'MessageRetentionPeriod))) - (Map updated (lambda '(item) (block '( + (Map updated (lambda '(item) (block '( (let row '( '('State (Member item 'Shard)))) (let update '( @@ -1265,101 +1265,101 @@ const char* const SetRetentionQuery = R"__( ) )__"; -const char* const GetMessageCountMetricsQuery = R"__( - ( - (let shard (Parameter 'SHARD (DataType 'Uint64))) - - (let stateTable '%1$s/State) - - (let stateRow '( - '('State shard))) - (let stateSelect '( - 'MessageCount - 'InflyCount - 'CreatedTimestamp)) - - (let stateRead - (SelectRow stateTable stateRow stateSelect)) - - (return (AsList - (SetResult 'messagesCount (Member stateRead 'MessageCount)) - (SetResult 'inflyMessagesCount (Member stateRead 'InflyCount)) - (SetResult 'createdTimestamp (Member stateRead 'CreatedTimestamp)))) - ) -)__"; - -const char* const GetOldestMessageTimestampMetricsQuery = R"__( - ( - (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) - - (let sentTsIdx '%1$s/SentTimestampIdx) - - (let sentIdxRange '( - '('SentTimestamp timeFrom (Uint64 '18446744073709551615)) - '('Offset (Uint64 '0) (Uint64 '18446744073709551615)))) - (let sentIdxSelect '( - 'SentTimestamp)) - (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" (Uint64 '1))))) - (let messages (Member selectResult 'List)) - - (return (Extend - (AsList (SetResult 'messages messages)) - )) - ) -)__"; - -const char* const GetRetentionOffsetQuery = R"__( - ( - (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) - (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) - (let timeTo (Parameter 'TIME_TO (DataType 'Uint64))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - - (let sentTsIdx '%1$s/SentTimestampIdx) - - (let sentIdxRange '( - '('SentTimestamp timeFrom timeTo) - '('Offset offsetFrom (Uint64 '18446744073709551615)))) - (let sentIdxSelect '( - 'Offset)) - (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" batchSize)))) - (let messages (Member selectResult 'List)) - (let truncated (Member selectResult 'Truncated)) - - (return (Extend - (AsList (SetResult 'messages messages)) - (AsList (SetResult 'truncated truncated)) - )) - ) -)__"; - +const char* const GetMessageCountMetricsQuery = R"__( + ( + (let shard (Parameter 'SHARD (DataType 'Uint64))) + + (let stateTable '%1$s/State) + + (let stateRow '( + '('State shard))) + (let stateSelect '( + 'MessageCount + 'InflyCount + 'CreatedTimestamp)) + + (let stateRead + (SelectRow stateTable stateRow stateSelect)) + + (return (AsList + (SetResult 'messagesCount (Member stateRead 'MessageCount)) + (SetResult 'inflyMessagesCount (Member stateRead 'InflyCount)) + (SetResult 'createdTimestamp (Member stateRead 'CreatedTimestamp)))) + ) +)__"; + +const char* const GetOldestMessageTimestampMetricsQuery = R"__( + ( + (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) + + (let sentTsIdx '%1$s/SentTimestampIdx) + + (let sentIdxRange '( + '('SentTimestamp timeFrom (Uint64 '18446744073709551615)) + '('Offset (Uint64 '0) (Uint64 '18446744073709551615)))) + (let sentIdxSelect '( + 'SentTimestamp)) + (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" (Uint64 '1))))) + (let messages (Member selectResult 'List)) + + (return (Extend + (AsList (SetResult 'messages messages)) + )) + ) +)__"; + +const char* const GetRetentionOffsetQuery = R"__( + ( + (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) + (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) + (let timeTo (Parameter 'TIME_TO (DataType 'Uint64))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + + (let sentTsIdx '%1$s/SentTimestampIdx) + + (let sentIdxRange '( + '('SentTimestamp timeFrom timeTo) + '('Offset offsetFrom (Uint64 '18446744073709551615)))) + (let sentIdxSelect '( + 'Offset)) + (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" batchSize)))) + (let messages (Member selectResult 'List)) + (let truncated (Member selectResult 'Truncated)) + + (return (Extend + (AsList (SetResult 'messages messages)) + (AsList (SetResult 'truncated truncated)) + )) + ) +)__"; + const char* const ListDeadLetterSourceQueuesQuery = R"__( ( - (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%5$s/.Queues) + (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - (let queuesRow '( - '('Account userName) + (let queuesTable '%5$s/.Queues) + + (let queuesRow '( + '('Account userName) '('QueueName (Utf8String '"%4$s")))) - (let queuesRowSelect '( + (let queuesRowSelect '( 'QueueName 'CustomQueueName)) - (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) + (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) (let skipFolderIdFilter (Equal folderId (Utf8String '""))) (let dlqName - (If skipFolderIdFilter (Member queuesRowRead 'QueueName) (Coalesce (Member queuesRowRead 'CustomQueueName) (Utf8String '"")))) + (If skipFolderIdFilter (Member queuesRowRead 'QueueName) (Coalesce (Member queuesRowRead 'CustomQueueName) (Utf8String '"")))) - (let queuesRange '( - '('Account userName userName) + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) - (let queuesSelect '('QueueName 'QueueState 'FolderId 'DlqName 'CustomQueueName)) - (let queues (Member (SelectRange queuesTable queuesRange queuesSelect '()) 'List)) + (let queuesSelect '('QueueName 'QueueState 'FolderId 'DlqName 'CustomQueueName)) + (let queues (Member (SelectRange queuesTable queuesRange queuesSelect '()) 'List)) (let filtered (Filter queues (lambda '(item) (block '( (return (Coalesce @@ -1384,38 +1384,38 @@ const char* const ListDeadLetterSourceQueuesQuery = R"__( const char* GetFifoQueryById(size_t id) { switch (id) { - case DELETE_MESSAGE_ID: // 0 - return DeleteMessageQuery; - case LOCK_GROUP_ID: // 1 - return LockGroupsQuery; - case READ_MESSAGE_ID: // 2 - return ReadMessageQuery; - case WRITE_MESSAGE_ID: // 3 - return WriteMessageQuery; - case PURGE_QUEUE_ID: // 4 - return PurgeQueueQuery; - case CHANGE_VISIBILITY_ID: // 5 - return ChangeMessageVisibilityQuery; - case CLEANUP_DEDUPLICATION_ID: // 6 - return DeduplicationCleanupQuery; - case CLEANUP_READS_ID: // 7 - return ReadsCleanupQuery; - case LIST_QUEUES_ID: // 8 - return ListQueuesQuery; - case SET_QUEUE_ATTRIBUTES_ID: // 9 - return SetQueueAttributesQuery; - case SET_RETENTION_ID: // 10 - return SetRetentionQuery; - case INTERNAL_GET_QUEUE_ATTRIBUTES_ID: // 13 - return InternalGetQueueAttributesQuery; - case PURGE_QUEUE_STAGE2_ID: // 14 - return PurgeQueueStage2Query; - case GET_MESSAGE_COUNT_METRIC_ID: // 15 - return GetMessageCountMetricsQuery; - case GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID: // 16 - return GetOldestMessageTimestampMetricsQuery; - case GET_RETENTION_OFFSET_ID: // 17 - return GetRetentionOffsetQuery; + case DELETE_MESSAGE_ID: // 0 + return DeleteMessageQuery; + case LOCK_GROUP_ID: // 1 + return LockGroupsQuery; + case READ_MESSAGE_ID: // 2 + return ReadMessageQuery; + case WRITE_MESSAGE_ID: // 3 + return WriteMessageQuery; + case PURGE_QUEUE_ID: // 4 + return PurgeQueueQuery; + case CHANGE_VISIBILITY_ID: // 5 + return ChangeMessageVisibilityQuery; + case CLEANUP_DEDUPLICATION_ID: // 6 + return DeduplicationCleanupQuery; + case CLEANUP_READS_ID: // 7 + return ReadsCleanupQuery; + case LIST_QUEUES_ID: // 8 + return ListQueuesQuery; + case SET_QUEUE_ATTRIBUTES_ID: // 9 + return SetQueueAttributesQuery; + case SET_RETENTION_ID: // 10 + return SetRetentionQuery; + case INTERNAL_GET_QUEUE_ATTRIBUTES_ID: // 13 + return InternalGetQueueAttributesQuery; + case PURGE_QUEUE_STAGE2_ID: // 14 + return PurgeQueueStage2Query; + case GET_MESSAGE_COUNT_METRIC_ID: // 15 + return GetMessageCountMetricsQuery; + case GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID: // 16 + return GetOldestMessageTimestampMetricsQuery; + case GET_RETENTION_OFFSET_ID: // 17 + return GetRetentionOffsetQuery; case LIST_DEAD_LETTER_SOURCE_QUEUES_ID: // 18 return ListDeadLetterSourceQueuesQuery; case READ_OR_REDRIVE_MESSAGE_ID: // 22 @@ -1425,4 +1425,4 @@ const char* GetFifoQueryById(size_t id) { return nullptr; } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/fifo/queries.h b/ydb/core/ymq/queues/fifo/queries.h index 6886062974a..c3387e6ab15 100644 --- a/ydb/core/ymq/queues/fifo/queries.h +++ b/ydb/core/ymq/queues/fifo/queries.h @@ -2,8 +2,8 @@ #include <ydb/core/ymq/base/query_id.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { const char* GetFifoQueryById(size_t id); -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/fifo/schema.cpp b/ydb/core/ymq/queues/fifo/schema.cpp index 49e04d773b1..bd42521e1bb 100644 --- a/ydb/core/ymq/queues/fifo/schema.cpp +++ b/ydb/core/ymq/queues/fifo/schema.cpp @@ -1,6 +1,6 @@ #include "schema.h" -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { static const TVector<TColumn> AttributesColumns = { TColumn("State", NScheme::NTypeIds::Uint64, true), @@ -13,8 +13,8 @@ static const TVector<TColumn> AttributesColumns = { TColumn("VisibilityTimeout", NScheme::NTypeIds::Uint64), TColumn("DlqName", NScheme::NTypeIds::Utf8), TColumn("DlqArn", NScheme::NTypeIds::Utf8), - TColumn("MaxReceiveCount", NScheme::NTypeIds::Uint64), - TColumn("ShowDetailedCountersDeadline", NScheme::NTypeIds::Uint64)}; + TColumn("MaxReceiveCount", NScheme::NTypeIds::Uint64), + TColumn("ShowDetailedCountersDeadline", NScheme::NTypeIds::Uint64)}; static const TVector<TColumn> StateColumns = { TColumn("State", NScheme::NTypeIds::Uint64, true), @@ -25,9 +25,9 @@ static const TVector<TColumn> StateColumns = { TColumn("MessageCount", NScheme::NTypeIds::Int64), TColumn("InflyCount", NScheme::NTypeIds::Int64), TColumn("ReadOffset", NScheme::NTypeIds::Uint64), - TColumn("WriteOffset", NScheme::NTypeIds::Uint64), - TColumn("CleanupVersion", NScheme::NTypeIds::Uint64), - TColumn("InflyVersion", NScheme::NTypeIds::Uint64)}; + TColumn("WriteOffset", NScheme::NTypeIds::Uint64), + TColumn("CleanupVersion", NScheme::NTypeIds::Uint64), + TColumn("InflyVersion", NScheme::NTypeIds::Uint64)}; static const TVector<TColumn> DataColumns = { TColumn("RandomId", NScheme::NTypeIds::Uint64, true, 20), @@ -81,7 +81,7 @@ TVector<TTable> GetFifoTables() { list.push_back(TTable("Attributes") .SetColumns(AttributesColumns) - .SetShard(-1) + .SetShard(-1) .SetHasLeaderTablet()); list.push_back(TTable("Data") .SetColumns(DataColumns) @@ -110,4 +110,4 @@ TVector<TTable> GetFifoTables() { return list; } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/fifo/schema.h b/ydb/core/ymq/queues/fifo/schema.h index 788fab0ba7f..846b98ec10e 100644 --- a/ydb/core/ymq/queues/fifo/schema.h +++ b/ydb/core/ymq/queues/fifo/schema.h @@ -2,8 +2,8 @@ #include <ydb/core/ymq/base/table_info.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { TVector<TTable> GetFifoTables(); -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/fifo/ya.make b/ydb/core/ymq/queues/fifo/ya.make index 770b3147a92..a1b7ade4332 100644 --- a/ydb/core/ymq/queues/fifo/ya.make +++ b/ydb/core/ymq/queues/fifo/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + LIBRARY() SRCS( diff --git a/ydb/core/ymq/queues/std/queries.cpp b/ydb/core/ymq/queues/std/queries.cpp index 202d9c1489f..198bf94d85e 100644 --- a/ydb/core/ymq/queues/std/queries.cpp +++ b/ydb/core/ymq/queues/std/queries.cpp @@ -1,99 +1,99 @@ #include "queries.h" #include <ydb/core/ymq/base/constants.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { namespace { -static const char* const AddMessagesToInflyQuery = R"__( +static const char* const AddMessagesToInflyQuery = R"__( ( - (let inflyLimit (Parameter 'INFLY_LIMIT (DataType 'Uint64))) - (let shard (Parameter 'SHARD (DataType 'Uint64))) - (let from (Parameter 'FROM (DataType 'Uint64))) - (let expectedMaxCount (Parameter 'EXPECTED_MAX_COUNT (DataType 'Uint64))) + (let inflyLimit (Parameter 'INFLY_LIMIT (DataType 'Uint64))) + (let shard (Parameter 'SHARD (DataType 'Uint64))) + (let from (Parameter 'FROM (DataType 'Uint64))) + (let expectedMaxCount (Parameter 'EXPECTED_MAX_COUNT (DataType 'Uint64))) (let inflyTable '%1$s/%2$i/Infly) (let msgTable '%1$s/%2$i/Messages) (let stateTable '%1$s/State) - (let stateRow '( - '('State shard))) - (let stateFields '( - 'InflyCount - 'MessageCount - 'ReadOffset - 'InflyVersion)) - (let state (SelectRow stateTable stateRow stateFields)) - - (let inflyMaxCountToAdd - (Convert - (Coalesce - (Max - (Sub inflyLimit (Member state 'InflyCount)) - (Uint64 '0) - ) - (Uint64 '0) - ) - 'Uint64 - ) - ) - - (let msgRange '( - '('Offset from (Uint64 '18446744073709551615)))) - (let msgFields '( - 'Offset - 'RandomId - 'SentTimestamp - 'DelayDeadline)) - (let messages (Take (Member (SelectRange msgTable msgRange msgFields '('('"ItemsLimit" expectedMaxCount))) 'List) inflyMaxCountToAdd)) - - (let inflyCount (Add (Member state 'InflyCount) (Length messages))) - (let lastElement (ToOptional (Skip messages (Sub (Length messages) (Uint64 '1))))) - (let nextReadOffset - (If (HasItems messages) - (Add - (Member lastElement 'Offset) - (Uint64 '1)) - (Member state 'ReadOffset))) - (let currentInflyVersion - (Coalesce - (Member state 'InflyVersion) - (Uint64 '0) - ) - ) - (let newInflyVersion (If (HasItems messages) (Add currentInflyVersion (Uint64 '1)) currentInflyVersion)) + (let stateRow '( + '('State shard))) + (let stateFields '( + 'InflyCount + 'MessageCount + 'ReadOffset + 'InflyVersion)) + (let state (SelectRow stateTable stateRow stateFields)) + + (let inflyMaxCountToAdd + (Convert + (Coalesce + (Max + (Sub inflyLimit (Member state 'InflyCount)) + (Uint64 '0) + ) + (Uint64 '0) + ) + 'Uint64 + ) + ) + + (let msgRange '( + '('Offset from (Uint64 '18446744073709551615)))) + (let msgFields '( + 'Offset + 'RandomId + 'SentTimestamp + 'DelayDeadline)) + (let messages (Take (Member (SelectRange msgTable msgRange msgFields '('('"ItemsLimit" expectedMaxCount))) 'List) inflyMaxCountToAdd)) + + (let inflyCount (Add (Member state 'InflyCount) (Length messages))) + (let lastElement (ToOptional (Skip messages (Sub (Length messages) (Uint64 '1))))) + (let nextReadOffset + (If (HasItems messages) + (Add + (Member lastElement 'Offset) + (Uint64 '1)) + (Member state 'ReadOffset))) + (let currentInflyVersion + (Coalesce + (Member state 'InflyVersion) + (Uint64 '0) + ) + ) + (let newInflyVersion (If (HasItems messages) (Add currentInflyVersion (Uint64 '1)) currentInflyVersion)) (return (Extend - (AsList (SetResult 'messages messages)) - (AsList (SetResult 'inflyCount inflyCount)) - (AsList (SetResult 'messagesCount (Member state 'MessageCount))) - (AsList (SetResult 'readOffset nextReadOffset)) - (AsList (SetResult 'currentInflyVersion currentInflyVersion)) - (AsList (SetResult 'newInflyVersion newInflyVersion)) - - (ListIf (HasItems messages) (block '( + (AsList (SetResult 'messages messages)) + (AsList (SetResult 'inflyCount inflyCount)) + (AsList (SetResult 'messagesCount (Member state 'MessageCount))) + (AsList (SetResult 'readOffset nextReadOffset)) + (AsList (SetResult 'currentInflyVersion currentInflyVersion)) + (AsList (SetResult 'newInflyVersion newInflyVersion)) + + (ListIf (HasItems messages) (block '( (let row '( '('State shard))) (let update '( - '('ReadOffset nextReadOffset) - '('InflyCount inflyCount) - '('InflyVersion newInflyVersion))) + '('ReadOffset nextReadOffset) + '('InflyCount inflyCount) + '('InflyVersion newInflyVersion))) (return (UpdateRow stateTable row update))))) - (Map messages (lambda '(item) (block '( + (Map messages (lambda '(item) (block '( (let row '( '('Offset (Member item 'Offset)))) (let update '( '('RandomId (Member item 'RandomId)) - '('LoadId (Uint64 '0)) + '('LoadId (Uint64 '0)) '('FirstReceiveTimestamp (Uint64 '0)) '('LockTimestamp (Uint64 '0)) '('ReceiveCount (Uint32 '0)) '('SentTimestamp (Member item 'SentTimestamp)) - '('DelayDeadline (Member item 'DelayDeadline)) + '('DelayDeadline (Member item 'DelayDeadline)) '('VisibilityDeadline (Uint64 '0)))) (return (UpdateRow inflyTable row update)))))) - (Map messages (lambda '(item) (block '( + (Map messages (lambda '(item) (block '( (let row '( '('Offset (Member item 'Offset)))) (return (EraseRow msgTable row)))))))) @@ -102,231 +102,231 @@ static const char* const AddMessagesToInflyQuery = R"__( const char* const ChangeMessageVisibilityQuery = R"__( ( - (let now (Parameter 'NOW (DataType 'Uint64))) - (let keys (Parameter 'KEYS - (ListType (StructType - '('Offset (DataType 'Uint64)) - '('LockTimestamp (DataType 'Uint64)) - '('NewVisibilityDeadline (DataType 'Uint64)))))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let keys (Parameter 'KEYS + (ListType (StructType + '('Offset (DataType 'Uint64)) + '('LockTimestamp (DataType 'Uint64)) + '('NewVisibilityDeadline (DataType 'Uint64)))))) (let inflyTable '%1$s/%2$i/Infly) - (let records - (MapParameter keys (lambda '(item) (block '( - (let messageRow '( - '('Offset (Member item 'Offset)))) - (let inflySelect '( - 'VisibilityDeadline - 'LockTimestamp)) - (let inflyRead (SelectRow inflyTable messageRow inflySelect)) - - (let exists (Exists inflyRead)) - - (let changeCond - (Coalesce - (And - (LessOrEqual now (Member inflyRead 'VisibilityDeadline)) - (Equal (Member item 'LockTimestamp) (Member inflyRead 'LockTimestamp)) - ) - (Bool 'false))) - - (return (AsStruct - '('Offset (Member item 'Offset)) - '('Exists exists) - '('ChangeCond changeCond) - '('CurrentVisibilityDeadline (Member inflyRead 'VisibilityDeadline)) - '('NewVisibilityDeadline (Member item 'NewVisibilityDeadline))))))))) - - (let recordsToChange - (Filter records (lambda '(item) (block '( - (return (And (Member item 'Exists) (Member item 'ChangeCond))) - ))))) - - (return (Extend - (AsList (SetResult 'result records)) - - (Map recordsToChange (lambda '(item) (block '( - (let messageRow '( - '('Offset (Member item 'Offset)))) - (let visibilityUpdate '( - '('VisibilityDeadline (Member item 'NewVisibilityDeadline)))) - (return (UpdateRow inflyTable messageRow visibilityUpdate)) - )))) + (let records + (MapParameter keys (lambda '(item) (block '( + (let messageRow '( + '('Offset (Member item 'Offset)))) + (let inflySelect '( + 'VisibilityDeadline + 'LockTimestamp)) + (let inflyRead (SelectRow inflyTable messageRow inflySelect)) + + (let exists (Exists inflyRead)) + + (let changeCond + (Coalesce + (And + (LessOrEqual now (Member inflyRead 'VisibilityDeadline)) + (Equal (Member item 'LockTimestamp) (Member inflyRead 'LockTimestamp)) + ) + (Bool 'false))) + + (return (AsStruct + '('Offset (Member item 'Offset)) + '('Exists exists) + '('ChangeCond changeCond) + '('CurrentVisibilityDeadline (Member inflyRead 'VisibilityDeadline)) + '('NewVisibilityDeadline (Member item 'NewVisibilityDeadline))))))))) + + (let recordsToChange + (Filter records (lambda '(item) (block '( + (return (And (Member item 'Exists) (Member item 'ChangeCond))) + ))))) + + (return (Extend + (AsList (SetResult 'result records)) + + (Map recordsToChange (lambda '(item) (block '( + (let messageRow '( + '('Offset (Member item 'Offset)))) + (let visibilityUpdate '( + '('VisibilityDeadline (Member item 'NewVisibilityDeadline)))) + (return (UpdateRow inflyTable messageRow visibilityUpdate)) + )))) )) ) )__"; const char* const PurgeQueueQuery = R"__( ( - (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) - (let offsetTo (Parameter 'OFFSET_TO (DataType 'Uint64))) - (let now (Parameter 'NOW (DataType 'Uint64))) - (let shard (Parameter 'SHARD (DataType 'Uint64))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) + (let offsetTo (Parameter 'OFFSET_TO (DataType 'Uint64))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let shard (Parameter 'SHARD (DataType 'Uint64))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - (let msgTable '%1$s/%2$i/Messages) - (let inflyTable '%1$s/%2$i/Infly) - (let stateTable '%1$s/State) + (let msgTable '%1$s/%2$i/Messages) + (let inflyTable '%1$s/%2$i/Infly) + (let stateTable '%1$s/State) (let stateRow '( '('State shard))) (let stateSelect '( - 'CleanupVersion - 'LastModifiedTimestamp)) + 'CleanupVersion + 'LastModifiedTimestamp)) (let stateRead (SelectRow stateTable stateRow stateSelect)) - (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) - - (let messageRange '( - '('Offset offsetFrom offsetTo))) - (let inflyRange '( - '('Offset offsetFrom offsetTo))) - (let messageSelect '( + (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + + (let messageRange '( + '('Offset offsetFrom offsetTo))) + (let inflyRange '( + '('Offset offsetFrom offsetTo))) + (let messageSelect '( 'SentTimestamp 'Offset 'RandomId)) - - (let selectResult (SelectRange msgTable messageRange messageSelect '('('"ItemsLimit" batchSize)))) - (let selectInflyResult (SelectRange inflyTable inflyRange messageSelect '('('"ItemsLimit" batchSize)))) - - (let messages (Member selectResult 'List)) - (let inflyMessages (Member selectInflyResult 'List)) - (let truncated (Coalesce (Member selectResult 'Truncated) (Member selectInflyResult 'Truncated) (Bool 'false))) - (let newCleanupVersion (Add (Member stateRead 'CleanupVersion) (Uint64 '1))) + + (let selectResult (SelectRange msgTable messageRange messageSelect '('('"ItemsLimit" batchSize)))) + (let selectInflyResult (SelectRange inflyTable inflyRange messageSelect '('('"ItemsLimit" batchSize)))) + + (let messages (Member selectResult 'List)) + (let inflyMessages (Member selectInflyResult 'List)) + (let truncated (Coalesce (Member selectResult 'Truncated) (Member selectInflyResult 'Truncated) (Bool 'false))) + (let newCleanupVersion (Add (Member stateRead 'CleanupVersion) (Uint64 '1))) (let stateUpdate '( - '('LastModifiedTimestamp modifiedTimestamp) - '('CleanupVersion newCleanupVersion) - )) + '('LastModifiedTimestamp modifiedTimestamp) + '('CleanupVersion newCleanupVersion) + )) (return (Extend - (AsList (SetResult 'messages messages)) - (AsList (SetResult 'inflyMessages inflyMessages)) - (AsList (SetResult 'truncated truncated)) - (AsList (SetResult 'cleanupVersion newCleanupVersion)) + (AsList (SetResult 'messages messages)) + (AsList (SetResult 'inflyMessages inflyMessages)) + (AsList (SetResult 'truncated truncated)) + (AsList (SetResult 'cleanupVersion newCleanupVersion)) (AsList (UpdateRow stateTable stateRow stateUpdate)) + )) + ) +)__"; + +const char* const PurgeQueueStage2Query = R"__( + ( + (let shard (Parameter 'SHARD (DataType 'Uint64))) + (let cleanupVersion (Parameter 'CLEANUP_VERSION (DataType 'Uint64))) + (let now (Parameter 'NOW (DataType 'Uint64))) + (let messages (Parameter 'MESSAGES + (ListType (StructType + '('Offset (DataType 'Uint64)) + '('RandomId (DataType 'Uint64)) + '('SentTimestamp (DataType 'Uint64)) + )))) + + (let dataTable '%1$s/%2$i/MessageData) + (let inflyTable '%1$s/%2$i/Infly) + (let msgTable '%1$s/%2$i/Messages) + (let sentTsIdx '%1$s/%2$i/SentTimestampIdx) + (let stateTable '%1$s/State) + + (let stateRow '( + '('State shard))) + (let stateSelect '( + 'MessageCount + 'InflyCount + 'CleanupVersion + 'LastModifiedTimestamp)) + (let stateRead + (SelectRow stateTable stateRow stateSelect)) + + (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + + (let inflyRecords + (MapParameter messages (lambda '(item) (block '( + (let row '( + '('Offset (Member item 'Offset)))) + (let fields '( + 'Offset + 'SentTimestamp)) + (return (SelectRow inflyTable row fields))))))) + + (let inflyRecordsExisted + (Filter inflyRecords (lambda '(item) (block '( + (return (Exists item)) + ))))) + + (let messageRecords + (MapParameter messages (lambda '(item) (block '( + (let row '( + '('Offset (Member item 'Offset)))) + (let fields '( + 'Offset + 'SentTimestamp)) + (return (SelectRow msgTable row fields))))))) + + (let messageRecordsExisted + (Filter messageRecords (lambda '(item) (block '( + (return (Exists item)) + ))))) + + (let messagesDeleted + (Add (Length messageRecordsExisted) (Length inflyRecordsExisted)) + ) + + (let newMessagesCount (Sub (Member stateRead 'MessageCount) messagesDeleted)) + + (let stateUpdate '( + '('LastModifiedTimestamp modifiedTimestamp) + '('MessageCount newMessagesCount) + '('InflyCount (Sub (Member stateRead 'InflyCount) (Length inflyRecordsExisted))) )) - ) -)__"; - -const char* const PurgeQueueStage2Query = R"__( - ( - (let shard (Parameter 'SHARD (DataType 'Uint64))) - (let cleanupVersion (Parameter 'CLEANUP_VERSION (DataType 'Uint64))) - (let now (Parameter 'NOW (DataType 'Uint64))) - (let messages (Parameter 'MESSAGES - (ListType (StructType - '('Offset (DataType 'Uint64)) - '('RandomId (DataType 'Uint64)) - '('SentTimestamp (DataType 'Uint64)) - )))) - - (let dataTable '%1$s/%2$i/MessageData) - (let inflyTable '%1$s/%2$i/Infly) - (let msgTable '%1$s/%2$i/Messages) - (let sentTsIdx '%1$s/%2$i/SentTimestampIdx) - (let stateTable '%1$s/State) - - (let stateRow '( - '('State shard))) - (let stateSelect '( - 'MessageCount - 'InflyCount - 'CleanupVersion - 'LastModifiedTimestamp)) - (let stateRead - (SelectRow stateTable stateRow stateSelect)) - - (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) - - (let inflyRecords - (MapParameter messages (lambda '(item) (block '( - (let row '( - '('Offset (Member item 'Offset)))) - (let fields '( - 'Offset - 'SentTimestamp)) - (return (SelectRow inflyTable row fields))))))) - - (let inflyRecordsExisted - (Filter inflyRecords (lambda '(item) (block '( - (return (Exists item)) - ))))) - - (let messageRecords - (MapParameter messages (lambda '(item) (block '( - (let row '( - '('Offset (Member item 'Offset)))) - (let fields '( - 'Offset - 'SentTimestamp)) - (return (SelectRow msgTable row fields))))))) - - (let messageRecordsExisted - (Filter messageRecords (lambda '(item) (block '( - (return (Exists item)) - ))))) - - (let messagesDeleted - (Add (Length messageRecordsExisted) (Length inflyRecordsExisted)) - ) - - (let newMessagesCount (Sub (Member stateRead 'MessageCount) messagesDeleted)) - - (let stateUpdate '( - '('LastModifiedTimestamp modifiedTimestamp) - '('MessageCount newMessagesCount) - '('InflyCount (Sub (Member stateRead 'InflyCount) (Length inflyRecordsExisted))) - )) - - (let versionIsSame - (Coalesce - (Equal (Member stateRead 'CleanupVersion) cleanupVersion) - (Bool 'false) - ) - ) - - (return (Extend - (AsList (SetResult 'versionIsSame versionIsSame)) - (AsList (SetResult 'newMessagesCount (If versionIsSame newMessagesCount (Member stateRead 'MessageCount)))) - - (AsList (SetResult 'messagesDeleted - (If versionIsSame - messagesDeleted - (Uint64 '0)) - )) - - (If versionIsSame - (AsList (UpdateRow stateTable stateRow stateUpdate)) - (AsList (Void))) - - (If versionIsSame - (Map inflyRecordsExisted (lambda '(item) (block '( - (return (EraseRow inflyTable '( - '('Offset (Member item 'Offset))))))))) - (AsList (Void))) - - (If versionIsSame - (Map messages (lambda '(item) (block '( - (return (EraseRow dataTable '( - '('RandomId (Member item 'RandomId)) - '('Offset (Member item 'Offset))))))))) - (AsList (Void))) - - (If versionIsSame - (Map messageRecordsExisted (lambda '(item) (block '( - (return (EraseRow msgTable '( - '('Offset (Member item 'Offset))))))))) - (AsList (Void))) - - (If versionIsSame - (Map messages (lambda '(item) (block '( - (return (EraseRow sentTsIdx '( - '('SentTimestamp (Member item 'SentTimestamp)) - '('Offset (Member item 'Offset))))))))) - (AsList (Void))) - )) + + (let versionIsSame + (Coalesce + (Equal (Member stateRead 'CleanupVersion) cleanupVersion) + (Bool 'false) + ) + ) + + (return (Extend + (AsList (SetResult 'versionIsSame versionIsSame)) + (AsList (SetResult 'newMessagesCount (If versionIsSame newMessagesCount (Member stateRead 'MessageCount)))) + + (AsList (SetResult 'messagesDeleted + (If versionIsSame + messagesDeleted + (Uint64 '0)) + )) + + (If versionIsSame + (AsList (UpdateRow stateTable stateRow stateUpdate)) + (AsList (Void))) + + (If versionIsSame + (Map inflyRecordsExisted (lambda '(item) (block '( + (return (EraseRow inflyTable '( + '('Offset (Member item 'Offset))))))))) + (AsList (Void))) + + (If versionIsSame + (Map messages (lambda '(item) (block '( + (return (EraseRow dataTable '( + '('RandomId (Member item 'RandomId)) + '('Offset (Member item 'Offset))))))))) + (AsList (Void))) + + (If versionIsSame + (Map messageRecordsExisted (lambda '(item) (block '( + (return (EraseRow msgTable '( + '('Offset (Member item 'Offset))))))))) + (AsList (Void))) + + (If versionIsSame + (Map messages (lambda '(item) (block '( + (return (EraseRow sentTsIdx '( + '('SentTimestamp (Member item 'SentTimestamp)) + '('Offset (Member item 'Offset))))))))) + (AsList (Void))) + )) ) )__"; @@ -368,23 +368,23 @@ const char* const DeleteMessageQuery = R"__( '('State shard))) (let stateSelect '( 'InflyCount - 'MessageCount - 'LastModifiedTimestamp)) + 'MessageCount + 'LastModifiedTimestamp)) (let stateRead (SelectRow stateTable stateRow stateSelect)) - (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) - - (let newMessagesCount (Sub (Member stateRead 'MessageCount) (Length existed))) + (let modifiedTimestamp (Max now (Member stateRead 'LastModifiedTimestamp))) + + (let newMessagesCount (Sub (Member stateRead 'MessageCount) (Length existed))) (let stateUpdate '( - '('LastModifiedTimestamp modifiedTimestamp) + '('LastModifiedTimestamp modifiedTimestamp) '('InflyCount (Sub (Member stateRead 'InflyCount) (Length existed))) - '('MessageCount newMessagesCount))) + '('MessageCount newMessagesCount))) (let deleteCond (HasItems existed)) (return (Extend (AsList (SetResult 'deleted result)) - (AsList (SetResult 'newMessagesCount newMessagesCount)) + (AsList (SetResult 'newMessagesCount newMessagesCount)) (ListIf deleteCond (UpdateRow stateTable stateRow stateUpdate)) (If deleteCond @@ -423,7 +423,7 @@ const char* const SetQueueAttributesQuery = R"__( (let maxReceiveCount (Parameter 'MAX_RECEIVE_COUNT (OptionalType (DataType 'Uint64)))) (let dlqArn (Parameter 'DLQ_TARGET_ARN (OptionalType (DataType 'Utf8String)))) (let dlqName (Parameter 'DLQ_TARGET_NAME (OptionalType (DataType 'Utf8String)))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) (let attrsTable '%1$s/Attributes) @@ -433,7 +433,7 @@ const char* const SetQueueAttributesQuery = R"__( 'DelaySeconds 'MessageRetentionPeriod 'ReceiveMessageWaitTime - 'VisibilityTimeout + 'VisibilityTimeout 'MaximumMessageSize 'DlqName 'DlqArn @@ -444,69 +444,69 @@ const char* const SetQueueAttributesQuery = R"__( '('DelaySeconds (Coalesce delay (Member attrsRead 'DelaySeconds))) '('MessageRetentionPeriod (Coalesce retention (Member attrsRead 'MessageRetentionPeriod))) '('ReceiveMessageWaitTime (Coalesce wait (Member attrsRead 'ReceiveMessageWaitTime))) - '('VisibilityTimeout (Coalesce visibility (Member attrsRead 'VisibilityTimeout))) + '('VisibilityTimeout (Coalesce visibility (Member attrsRead 'VisibilityTimeout))) '('MaxReceiveCount (Coalesce maxReceiveCount (Member attrsRead 'MaxReceiveCount))) '('DlqName (Coalesce dlqName (Member attrsRead 'DlqName))) '('DlqArn (Coalesce dlqArn (Member attrsRead 'DlqArn))) - '('MaximumMessageSize (Coalesce maxMessageSize (Member attrsRead 'MaximumMessageSize))))) - - (let queuesTable '%5$s/.Queues) - (let queuesRow '( - '('Account userName) - '('QueueName (Utf8String '"%4$s")))) - - (let queuesRowSelect '( - 'DlqName)) - - (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) - - (let queuesRowUpdate '( - '('DlqName (Coalesce dlqName (Member queuesRowRead 'DlqName))))) - + '('MaximumMessageSize (Coalesce maxMessageSize (Member attrsRead 'MaximumMessageSize))))) + + (let queuesTable '%5$s/.Queues) + (let queuesRow '( + '('Account userName) + '('QueueName (Utf8String '"%4$s")))) + + (let queuesRowSelect '( + 'DlqName)) + + (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) + + (let queuesRowUpdate '( + '('DlqName (Coalesce dlqName (Member queuesRowRead 'DlqName))))) + (return (AsList (UpdateRow attrsTable attrsRow attrsUpdate) - (UpdateRow queuesTable queuesRow queuesRowUpdate))) + (UpdateRow queuesTable queuesRow queuesRowUpdate))) ) )__"; -const char* const InternalGetQueueAttributesQuery = R"__( - ( - (let attrsTable '%1$s/Attributes) - - (let attrsRow '( - '('State (Uint64 '0)))) - (let attrsSelect '( - 'ContentBasedDeduplication - 'DelaySeconds - 'FifoQueue - 'MaximumMessageSize - 'MessageRetentionPeriod - 'ReceiveMessageWaitTime +const char* const InternalGetQueueAttributesQuery = R"__( + ( + (let attrsTable '%1$s/Attributes) + + (let attrsRow '( + '('State (Uint64 '0)))) + (let attrsSelect '( + 'ContentBasedDeduplication + 'DelaySeconds + 'FifoQueue + 'MaximumMessageSize + 'MessageRetentionPeriod + 'ReceiveMessageWaitTime 'MaxReceiveCount 'DlqName 'DlqArn - 'VisibilityTimeout - 'ShowDetailedCountersDeadline)) - - (return (AsList - (SetResult 'attrs (SelectRow attrsTable attrsRow attrsSelect)))) - ) -)__"; - + 'VisibilityTimeout + 'ShowDetailedCountersDeadline)) + + (return (AsList + (SetResult 'attrs (SelectRow attrsTable attrsRow attrsSelect)))) + ) +)__"; + const char* const ListQueuesQuery = R"__( ( - (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%5$s/.Queues) + (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) + (let queuesTable '%5$s/.Queues) + (let skipFolderIdFilter (Equal folderId (Utf8String '""))) - (let queuesRange '( - '('Account userName userName) + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queueSelect '('QueueName 'QueueId 'QueueState 'FifoQueue 'CreatedTimestamp 'CustomQueueName 'FolderId 'MasterTabletId 'Version 'Shards)) - (let queues (Member (SelectRange queuesTable queuesRange queueSelect '()) 'List)) + (let queues (Member (SelectRange queuesTable queuesRange queueSelect '()) 'List)) (let filtered (Filter queues (lambda '(item) (block '( (return (Coalesce @@ -531,10 +531,10 @@ const char* const LoadMessageQuery = R"__( (ListType (StructType '('RandomId (DataType 'Uint64)) '('Offset (DataType 'Uint64)) - '('CurrentVisibilityDeadline (DataType 'Uint64)) + '('CurrentVisibilityDeadline (DataType 'Uint64)) '('DlqIndex (DataType 'Uint64)) '('IsDeadLetter (DataType 'Bool)) - '('VisibilityDeadline (DataType 'Uint64)))))) + '('VisibilityDeadline (DataType 'Uint64)))))) (let now (Parameter 'NOW (DataType 'Uint64))) (let readId (Parameter 'READ_ID (DataType 'Uint64))) (let shard (Parameter 'SHARD (DataType 'Uint64))) @@ -545,15 +545,15 @@ const char* const LoadMessageQuery = R"__( (let records (MapParameter keys (lambda '(item) (block '( (let read (block '( - (let row '( + (let row '( '('Offset (Member item 'Offset)))) (let fields '( 'LoadId 'FirstReceiveTimestamp 'ReceiveCount 'SentTimestamp - 'VisibilityDeadline - 'DelayDeadline)) + 'VisibilityDeadline + 'DelayDeadline)) (return (SelectRow inflyTable row fields))))) (let data (block '( @@ -570,23 +570,23 @@ const char* const LoadMessageQuery = R"__( (let receiveTimestamp (If (Coalesce (Equal (Member read 'FirstReceiveTimestamp) (Uint64 '0)) (Bool 'false)) now (Member read 'FirstReceiveTimestamp))) - (let visibilityDeadlineInDb - (Max - (Member read 'VisibilityDeadline) - (Coalesce - (Member read 'DelayDeadline) - (Uint64 '0) - ) - ) - ) - + (let visibilityDeadlineInDb + (Max + (Member read 'VisibilityDeadline) + (Coalesce + (Member read 'DelayDeadline) + (Uint64 '0) + ) + ) + ) + (let valid (Coalesce - (Or - (Equal visibilityDeadlineInDb (Member item 'CurrentVisibilityDeadline)) - (And - (Equal (Member read 'LoadId) readId) - (Equal (Member read 'VisibilityDeadline) (Member item 'VisibilityDeadline)))) + (Or + (Equal visibilityDeadlineInDb (Member item 'CurrentVisibilityDeadline)) + (And + (Equal (Member read 'LoadId) readId) + (Equal (Member read 'VisibilityDeadline) (Member item 'VisibilityDeadline)))) (Bool 'false))) (return @@ -604,9 +604,9 @@ const char* const LoadMessageQuery = R"__( '('DlqIndex (Member item 'DlqIndex)) '('ReceiveCount (Add (Member read 'ReceiveCount) (Uint32 '1))) '('SentTimestamp (Member read 'SentTimestamp)) - '('VisibilityDeadline (If valid (Member item 'VisibilityDeadline) (Member read 'VisibilityDeadline))) - '('Valid valid) - '('Exists (Exists read)))) + '('VisibilityDeadline (If valid (Member item 'VisibilityDeadline) (Member read 'VisibilityDeadline))) + '('Valid valid) + '('Exists (Exists read)))) ))))) (let result @@ -614,18 +614,18 @@ const char* const LoadMessageQuery = R"__( (return (Coalesce (Member item 'Valid) (Bool 'false)))))))) (return (Extend - (AsList (SetResult 'result records)) + (AsList (SetResult 'result records)) (AsList (SetResult 'movedMessagesCount (Uint64 '0))) (Map result (lambda '(item) (block '( (let row '( '('Offset (Member item 'Offset)))) (let update '( - '('LoadId readId) + '('LoadId readId) '('FirstReceiveTimestamp (Member item 'FirstReceiveTimestamp)) '('LockTimestamp (Member item 'LockTimestamp)) '('ReceiveCount (Member item 'ReceiveCount)) - '('VisibilityDeadline (Member item 'VisibilityDeadline)))) + '('VisibilityDeadline (Member item 'VisibilityDeadline)))) (return (UpdateRow inflyTable row update)))))))) ) )__"; @@ -751,16 +751,16 @@ const char* const LoadOrRedriveMessageQuery = R"__( '('MessageCount newDlqMessagesCount) '('WriteOffset newDlqWriteOffset))) - (let newSourceMessagesCount (Sub (Member sourceStateRead 'MessageCount) (Length messagesToMove))) + (let newSourceMessagesCount (Sub (Member sourceStateRead 'MessageCount) (Length messagesToMove))) (let sourceStateUpdate '( '('LastModifiedTimestamp (Max (Member sourceStateRead 'LastModifiedTimestamp) now)) - '('MessageCount newSourceMessagesCount) + '('MessageCount newSourceMessagesCount) '('InflyCount (Sub (Member sourceStateRead 'InflyCount) (Length messagesToMove))))) (return (Extend - (AsList (SetResult 'result records)) + (AsList (SetResult 'result records)) (AsList (SetResult 'movedMessagesCount (Length messagesToMove))) - (AsList (SetResult 'newMessagesCount newSourceMessagesCount)) + (AsList (SetResult 'newMessagesCount newSourceMessagesCount)) (AsList (UpdateRow deadLetterStateTable deadLetterStateRow deadLetterStateUpdate)) (AsList (UpdateRow sourceStateTable sourceStateRow sourceStateUpdate)) @@ -830,19 +830,19 @@ const char* const LoadOrRedriveMessageQuery = R"__( const char* const WriteMessageQuery = R"__( ( - (let randomId (Parameter 'RANDOM_ID (DataType 'Uint64))) - (let timestamp (Parameter 'TIMESTAMP (DataType 'Uint64))) - (let shard (Parameter 'SHARD (DataType 'Uint64))) - (let messages (Parameter 'MESSAGES - (ListType (StructType - '('Attributes (DataType 'String)) - '('Data (DataType 'String)) - '('MessageId (DataType 'String)) - '('SenderId (DataType 'String)) - '('Delay (DataType 'Uint64)) - '('Index (DataType 'Uint64)) - )) - )) + (let randomId (Parameter 'RANDOM_ID (DataType 'Uint64))) + (let timestamp (Parameter 'TIMESTAMP (DataType 'Uint64))) + (let shard (Parameter 'SHARD (DataType 'Uint64))) + (let messages (Parameter 'MESSAGES + (ListType (StructType + '('Attributes (DataType 'String)) + '('Data (DataType 'String)) + '('MessageId (DataType 'String)) + '('SenderId (DataType 'String)) + '('Delay (DataType 'Uint64)) + '('Index (DataType 'Uint64)) + )) + )) (let dataTable '%1$s/%2$i/MessageData) (let msgTable '%1$s/%2$i/Messages) @@ -853,68 +853,68 @@ const char* const WriteMessageQuery = R"__( '('State shard))) (let stateSelect '( 'MessageCount - 'WriteOffset - 'LastModifiedTimestamp)) + 'WriteOffset + 'LastModifiedTimestamp)) (let stateRead (SelectRow stateTable stateRow stateSelect)) - (let sentTimestamp (Max timestamp (Member stateRead 'LastModifiedTimestamp))) - - (let newMessagesCount (Add (Member stateRead 'MessageCount) (Length messages))) - (let newWriteOffset (Add (Member stateRead 'WriteOffset) (Length messages))) - (let startOffset (Add (Member stateRead 'WriteOffset) (Uint64 '1))) - - (let stateUpdate '( - '('LastModifiedTimestamp sentTimestamp) - '('MessageCount newMessagesCount) - '('WriteOffset newWriteOffset))) - - (let result - (MapParameter messages (lambda '(item) (block '( - (return - (AsStruct - '('dedupCond (Bool 'true)) - )) - ))))) - - (return (Extend - (AsList (SetResult 'newMessagesCount newMessagesCount)) - (AsList (SetResult 'result result)) - - (AsList (UpdateRow stateTable stateRow stateUpdate)) - - (MapParameter messages (lambda '(item) (block '( - (let msgRow '( - '('Offset (Add startOffset (Member item 'Index))))) - (let delay (Member item 'Delay)) - (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) - (let messageUpdate '( - '('RandomId randomId) - '('SentTimestamp sentTimestamp) - '('DelayDeadline delayDeadline))) - (return (UpdateRow msgTable msgRow messageUpdate)))))) - - (MapParameter messages (lambda '(item) (block '( - (let sentTsRow '( - '('SentTimestamp sentTimestamp) - '('Offset (Add startOffset (Member item 'Index))))) - (let delay (Member item 'Delay)) - (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) - (let sentTsUpdate '( - '('RandomId randomId) - '('DelayDeadline delayDeadline))) - (return (UpdateRow sentTsIdx sentTsRow sentTsUpdate)))))) - - (MapParameter messages (lambda '(item) (block '( - (let dataRow '( - '('RandomId randomId) - '('Offset (Add startOffset (Member item 'Index))))) - - (let dataUpdate '( - '('Data (Member item 'Data)) - '('Attributes (Member item 'Attributes)) - '('SenderId (Member item 'SenderId)) - '('MessageId (Member item 'MessageId)))) - (return (UpdateRow dataTable dataRow dataUpdate)))))) + (let sentTimestamp (Max timestamp (Member stateRead 'LastModifiedTimestamp))) + + (let newMessagesCount (Add (Member stateRead 'MessageCount) (Length messages))) + (let newWriteOffset (Add (Member stateRead 'WriteOffset) (Length messages))) + (let startOffset (Add (Member stateRead 'WriteOffset) (Uint64 '1))) + + (let stateUpdate '( + '('LastModifiedTimestamp sentTimestamp) + '('MessageCount newMessagesCount) + '('WriteOffset newWriteOffset))) + + (let result + (MapParameter messages (lambda '(item) (block '( + (return + (AsStruct + '('dedupCond (Bool 'true)) + )) + ))))) + + (return (Extend + (AsList (SetResult 'newMessagesCount newMessagesCount)) + (AsList (SetResult 'result result)) + + (AsList (UpdateRow stateTable stateRow stateUpdate)) + + (MapParameter messages (lambda '(item) (block '( + (let msgRow '( + '('Offset (Add startOffset (Member item 'Index))))) + (let delay (Member item 'Delay)) + (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) + (let messageUpdate '( + '('RandomId randomId) + '('SentTimestamp sentTimestamp) + '('DelayDeadline delayDeadline))) + (return (UpdateRow msgTable msgRow messageUpdate)))))) + + (MapParameter messages (lambda '(item) (block '( + (let sentTsRow '( + '('SentTimestamp sentTimestamp) + '('Offset (Add startOffset (Member item 'Index))))) + (let delay (Member item 'Delay)) + (let delayDeadline (If (Equal delay (Uint64 '0)) (Uint64 '0) (Add sentTimestamp delay))) + (let sentTsUpdate '( + '('RandomId randomId) + '('DelayDeadline delayDeadline))) + (return (UpdateRow sentTsIdx sentTsRow sentTsUpdate)))))) + + (MapParameter messages (lambda '(item) (block '( + (let dataRow '( + '('RandomId randomId) + '('Offset (Add startOffset (Member item 'Index))))) + + (let dataUpdate '( + '('Data (Member item 'Data)) + '('Attributes (Member item 'Attributes)) + '('SenderId (Member item 'SenderId)) + '('MessageId (Member item 'MessageId)))) + (return (UpdateRow dataTable dataRow dataUpdate)))))) )) ) )__"; @@ -937,33 +937,33 @@ const char* const SetRetentionQuery = R"__( (let boundary (If purge now (Coalesce (Sub now (Member attrs 'MessageRetentionPeriod)) (Uint64 '0)))) - (let range '( - '('State (Uint64 '0) (Uint64 '18446744073709551615)))) - (let fields '( - 'State - 'RetentionBoundary)) - (let records (Member (SelectRange stateTable range fields '()) 'List)) + (let range '( + '('State (Uint64 '0) (Uint64 '18446744073709551615)))) + (let fields '( + 'State + 'RetentionBoundary)) + (let records (Member (SelectRange stateTable range fields '()) 'List)) - (let result - (Map records (lambda '(item) (block '( - (let updated - (Coalesce - (Less (Member item 'RetentionBoundary) boundary) - (Bool 'false))) + (let result + (Map records (lambda '(item) (block '( + (let updated + (Coalesce + (Less (Member item 'RetentionBoundary) boundary) + (Bool 'false))) - (return (AsStruct - '('Shard (Member item 'State)) - '('RetentionBoundary (Max boundary (Member item 'RetentionBoundary))) - '('Updated updated)))))))) + (return (AsStruct + '('Shard (Member item 'State)) + '('RetentionBoundary (Max boundary (Member item 'RetentionBoundary))) + '('Updated updated)))))))) - (let updated (Filter result (lambda '(item) (block '( - (return (Coalesce (Equal (Member item 'Updated) (Bool 'true)) (Bool 'false)))))))) + (let updated (Filter result (lambda '(item) (block '( + (return (Coalesce (Equal (Member item 'Updated) (Bool 'true)) (Bool 'false)))))))) (return (Extend (AsList (SetResult 'result result)) (AsList (SetResult 'retention (Member attrs 'MessageRetentionPeriod))) - (Map updated (lambda '(item) (block '( + (Map updated (lambda '(item) (block '( (let row '( '('State (Member item 'Shard)))) (let update '( @@ -973,129 +973,129 @@ const char* const SetRetentionQuery = R"__( ) )__"; -const char* const GetOldestMessageTimestampMetricsQuery = R"__( - ( - (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) - - (let sentTsIdx '%1$s/%2$i/SentTimestampIdx) - - (let sentIdxRange '( - '('SentTimestamp timeFrom (Uint64 '18446744073709551615)) - '('Offset (Uint64 '0) (Uint64 '18446744073709551615)))) - (let sentIdxSelect '( - 'SentTimestamp)) - (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" (Uint64 '1))))) - (let messages (Member selectResult 'List)) - - (return (Extend - (AsList (SetResult 'messages messages)) - )) - ) -)__"; - -const char* const GetRetentionOffsetQuery = R"__( - ( - (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) - (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) - (let timeTo (Parameter 'TIME_TO (DataType 'Uint64))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - - (let sentTsIdx '%1$s/%2$i/SentTimestampIdx) - - (let sentIdxRange '( - '('SentTimestamp timeFrom timeTo) - '('Offset offsetFrom (Uint64 '18446744073709551615)))) - (let sentIdxSelect '( - 'Offset)) - (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" batchSize)))) - (let messages (Member selectResult 'List)) - (let truncated (Member selectResult 'Truncated)) - - (return (Extend - (AsList (SetResult 'messages messages)) - (AsList (SetResult 'truncated truncated)) - )) - ) -)__"; - -const char* const LoadInflyQuery = R"__( - ( - (let shard (Parameter 'SHARD (DataType 'Uint64))) - - (let inflyTable '%1$s/%2$i/Infly) - (let stateTable '%1$s/State) - - (let range '( - '('Offset (Uint64 '0) (Uint64 '18446744073709551615)))) - (let fields '( - 'Offset - 'RandomId - 'DelayDeadline +const char* const GetOldestMessageTimestampMetricsQuery = R"__( + ( + (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) + + (let sentTsIdx '%1$s/%2$i/SentTimestampIdx) + + (let sentIdxRange '( + '('SentTimestamp timeFrom (Uint64 '18446744073709551615)) + '('Offset (Uint64 '0) (Uint64 '18446744073709551615)))) + (let sentIdxSelect '( + 'SentTimestamp)) + (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" (Uint64 '1))))) + (let messages (Member selectResult 'List)) + + (return (Extend + (AsList (SetResult 'messages messages)) + )) + ) +)__"; + +const char* const GetRetentionOffsetQuery = R"__( + ( + (let offsetFrom (Parameter 'OFFSET_FROM (DataType 'Uint64))) + (let timeFrom (Parameter 'TIME_FROM (DataType 'Uint64))) + (let timeTo (Parameter 'TIME_TO (DataType 'Uint64))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + + (let sentTsIdx '%1$s/%2$i/SentTimestampIdx) + + (let sentIdxRange '( + '('SentTimestamp timeFrom timeTo) + '('Offset offsetFrom (Uint64 '18446744073709551615)))) + (let sentIdxSelect '( + 'Offset)) + (let selectResult (SelectRange sentTsIdx sentIdxRange sentIdxSelect '('('"ItemsLimit" batchSize)))) + (let messages (Member selectResult 'List)) + (let truncated (Member selectResult 'Truncated)) + + (return (Extend + (AsList (SetResult 'messages messages)) + (AsList (SetResult 'truncated truncated)) + )) + ) +)__"; + +const char* const LoadInflyQuery = R"__( + ( + (let shard (Parameter 'SHARD (DataType 'Uint64))) + + (let inflyTable '%1$s/%2$i/Infly) + (let stateTable '%1$s/State) + + (let range '( + '('Offset (Uint64 '0) (Uint64 '18446744073709551615)))) + (let fields '( + 'Offset + 'RandomId + 'DelayDeadline 'ReceiveCount - 'VisibilityDeadline)) - (let infly (Member (SelectRange inflyTable range fields '()) 'List)) - - (let stateRow '( - '('State shard))) - (let stateFields '( - 'InflyVersion)) - (let state (SelectRow stateTable stateRow stateFields)) - - (return (Extend - (AsList (SetResult 'inflyVersion (Coalesce (Member state 'InflyVersion) (Uint64 '0)))) - (AsList (SetResult 'infly infly)))) - ) -)__"; - -const char* const GetStateQuery = R"__( - ( - (let shard (Parameter 'SHARD (DataType 'Uint64))) - - (let stateTable '%1$s/State) - - (let state (block '( - (let row '( - '('State shard))) - (let fields '( - 'InflyCount - 'MessageCount - 'ReadOffset - 'WriteOffset - 'CreatedTimestamp)) - (return (SelectRow stateTable row fields))))) - - (return (AsList - (SetResult 'state state))) - ) -)__"; - + 'VisibilityDeadline)) + (let infly (Member (SelectRange inflyTable range fields '()) 'List)) + + (let stateRow '( + '('State shard))) + (let stateFields '( + 'InflyVersion)) + (let state (SelectRow stateTable stateRow stateFields)) + + (return (Extend + (AsList (SetResult 'inflyVersion (Coalesce (Member state 'InflyVersion) (Uint64 '0)))) + (AsList (SetResult 'infly infly)))) + ) +)__"; + +const char* const GetStateQuery = R"__( + ( + (let shard (Parameter 'SHARD (DataType 'Uint64))) + + (let stateTable '%1$s/State) + + (let state (block '( + (let row '( + '('State shard))) + (let fields '( + 'InflyCount + 'MessageCount + 'ReadOffset + 'WriteOffset + 'CreatedTimestamp)) + (return (SelectRow stateTable row fields))))) + + (return (AsList + (SetResult 'state state))) + ) +)__"; + const char* const ListDeadLetterSourceQueuesQuery = R"__( ( - (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) - (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - - (let queuesTable '%5$s/.Queues) + (let folderId (Parameter 'FOLDERID (DataType 'Utf8String))) + (let userName (Parameter 'USER_NAME (DataType 'Utf8String))) - (let queuesRow '( - '('Account userName) + (let queuesTable '%5$s/.Queues) + + (let queuesRow '( + '('Account userName) '('QueueName (Utf8String '"%4$s")))) - (let queuesRowSelect '( + (let queuesRowSelect '( 'QueueName 'CustomQueueName)) - (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) + (let queuesRowRead (SelectRow queuesTable queuesRow queuesRowSelect)) (let skipFolderIdFilter (Equal folderId (Utf8String '""))) (let dlqName - (If skipFolderIdFilter (Member queuesRowRead 'QueueName) (Coalesce (Member queuesRowRead 'CustomQueueName) (Utf8String '"")))) + (If skipFolderIdFilter (Member queuesRowRead 'QueueName) (Coalesce (Member queuesRowRead 'CustomQueueName) (Utf8String '"")))) - (let queuesRange '( - '('Account userName userName) + (let queuesRange '( + '('Account userName userName) '('QueueName (Utf8String '"") (Void)))) (let queueSelect '('QueueName 'QueueState 'FolderId 'DlqName 'CustomQueueName)) - (let queues (Member (SelectRange queuesTable queuesRange queueSelect '()) 'List)) + (let queues (Member (SelectRange queuesTable queuesRange queueSelect '()) 'List)) (let filtered (Filter queues (lambda '(item) (block '( (return (Coalesce @@ -1116,32 +1116,32 @@ const char* const ListDeadLetterSourceQueuesQuery = R"__( ) )__"; -const char* const GetUserSettingsQuery = R"__( - ( - (let fromUser (Parameter 'FROM_USER (DataType 'Utf8String))) - (let fromName (Parameter 'FROM_NAME (DataType 'Utf8String))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - - (let settingsTable '%5$s/.Settings) - - (let range '( - '('Account fromUser (Void)) - '('Name fromName (Void)))) - (let settingsSelect '( - 'Account - 'Name - 'Value)) - (let settingsResult (SelectRange settingsTable range settingsSelect '('('"ItemsLimit" batchSize)))) - (let settings (Member settingsResult 'List)) - (let truncated (Coalesce (Member settingsResult 'Truncated) (Bool 'false))) - - (return (Extend - (AsList (SetResult 'settings settings)) - (AsList (SetResult 'truncated truncated)) - )) - ) -)__"; - +const char* const GetUserSettingsQuery = R"__( + ( + (let fromUser (Parameter 'FROM_USER (DataType 'Utf8String))) + (let fromName (Parameter 'FROM_NAME (DataType 'Utf8String))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + + (let settingsTable '%5$s/.Settings) + + (let range '( + '('Account fromUser (Void)) + '('Name fromName (Void)))) + (let settingsSelect '( + 'Account + 'Name + 'Value)) + (let settingsResult (SelectRange settingsTable range settingsSelect '('('"ItemsLimit" batchSize)))) + (let settings (Member settingsResult 'List)) + (let truncated (Coalesce (Member settingsResult 'Truncated) (Bool 'false))) + + (return (Extend + (AsList (SetResult 'settings settings)) + (AsList (SetResult 'truncated truncated)) + )) + ) +)__"; + const char* const GetMessageCountMetricsQuery = R"__( ( (let shard (Parameter 'SHARD (DataType 'Uint64))) @@ -1165,85 +1165,85 @@ const char* const GetMessageCountMetricsQuery = R"__( ) )__"; -const char* const GetQueuesListQuery = R"__( - ( - (let fromUser (Parameter 'FROM_USER (DataType 'Utf8String))) - (let fromQueue (Parameter 'FROM_QUEUE (DataType 'Utf8String))) - (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) - - (let queuesTable '%5$s/.Queues) - - (let range '( - '('Account fromUser (Void)) - '('QueueName fromQueue (Void)))) - (let queuesSelect '( - 'Account - 'QueueName - 'QueueState - 'CreatedTimestamp - 'CustomQueueName +const char* const GetQueuesListQuery = R"__( + ( + (let fromUser (Parameter 'FROM_USER (DataType 'Utf8String))) + (let fromQueue (Parameter 'FROM_QUEUE (DataType 'Utf8String))) + (let batchSize (Parameter 'BATCH_SIZE (DataType 'Uint64))) + + (let queuesTable '%5$s/.Queues) + + (let range '( + '('Account fromUser (Void)) + '('QueueName fromQueue (Void)))) + (let queuesSelect '( + 'Account + 'QueueName + 'QueueState + 'CreatedTimestamp + 'CustomQueueName 'DlqName - 'FolderId - 'MasterTabletId - 'Version - 'Shards)) - (let queuesResult (SelectRange queuesTable range queuesSelect '('('"ItemsLimit" batchSize)))) - (let queues (Member queuesResult 'List)) - (let truncated (Coalesce (Member queuesResult 'Truncated) (Bool 'false))) - - (return (Extend - (AsList (SetResult 'queues queues)) - (AsList (SetResult 'truncated truncated)) - )) - ) -)__"; - + 'FolderId + 'MasterTabletId + 'Version + 'Shards)) + (let queuesResult (SelectRange queuesTable range queuesSelect '('('"ItemsLimit" batchSize)))) + (let queues (Member queuesResult 'List)) + (let truncated (Coalesce (Member queuesResult 'Truncated) (Bool 'false))) + + (return (Extend + (AsList (SetResult 'queues queues)) + (AsList (SetResult 'truncated truncated)) + )) + ) +)__"; + } // namespace const char* GetStdQueryById(size_t id) { switch (id) { - case DELETE_MESSAGE_ID: // 0 - return DeleteMessageQuery; - case WRITE_MESSAGE_ID: // 3 - return WriteMessageQuery; - case PURGE_QUEUE_ID: // 4 - return PurgeQueueQuery; - case CHANGE_VISIBILITY_ID: // 5 - return ChangeMessageVisibilityQuery; - case LIST_QUEUES_ID: // 8 - return ListQueuesQuery; - case SET_QUEUE_ATTRIBUTES_ID: // 9 - return SetQueueAttributesQuery; - case SET_RETENTION_ID: // 10 - return SetRetentionQuery; - case LOAD_MESSAGES_ID: // 11 - return LoadMessageQuery; - case INTERNAL_GET_QUEUE_ATTRIBUTES_ID: // 12 - return InternalGetQueueAttributesQuery; - case PURGE_QUEUE_STAGE2_ID: // 13 - return PurgeQueueStage2Query; - case GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID: // 15 - return GetOldestMessageTimestampMetricsQuery; - case GET_RETENTION_OFFSET_ID: // 16 - return GetRetentionOffsetQuery; - case LOAD_INFLY_ID: // 17 - return LoadInflyQuery; - case ADD_MESSAGES_TO_INFLY_ID: // 18 - return AddMessagesToInflyQuery; - case GET_STATE_ID: // 19 - return GetStateQuery; + case DELETE_MESSAGE_ID: // 0 + return DeleteMessageQuery; + case WRITE_MESSAGE_ID: // 3 + return WriteMessageQuery; + case PURGE_QUEUE_ID: // 4 + return PurgeQueueQuery; + case CHANGE_VISIBILITY_ID: // 5 + return ChangeMessageVisibilityQuery; + case LIST_QUEUES_ID: // 8 + return ListQueuesQuery; + case SET_QUEUE_ATTRIBUTES_ID: // 9 + return SetQueueAttributesQuery; + case SET_RETENTION_ID: // 10 + return SetRetentionQuery; + case LOAD_MESSAGES_ID: // 11 + return LoadMessageQuery; + case INTERNAL_GET_QUEUE_ATTRIBUTES_ID: // 12 + return InternalGetQueueAttributesQuery; + case PURGE_QUEUE_STAGE2_ID: // 13 + return PurgeQueueStage2Query; + case GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID: // 15 + return GetOldestMessageTimestampMetricsQuery; + case GET_RETENTION_OFFSET_ID: // 16 + return GetRetentionOffsetQuery; + case LOAD_INFLY_ID: // 17 + return LoadInflyQuery; + case ADD_MESSAGES_TO_INFLY_ID: // 18 + return AddMessagesToInflyQuery; + case GET_STATE_ID: // 19 + return GetStateQuery; case LIST_DEAD_LETTER_SOURCE_QUEUES_ID: // 20 return ListDeadLetterSourceQueuesQuery; case LOAD_OR_REDRIVE_MESSAGE_ID: // 21 return LoadOrRedriveMessageQuery; - case GET_USER_SETTINGS_ID: // 23 - return GetUserSettingsQuery; - case GET_QUEUES_LIST_ID: // 24 - return GetQueuesListQuery; + case GET_USER_SETTINGS_ID: // 23 + return GetUserSettingsQuery; + case GET_QUEUES_LIST_ID: // 24 + return GetQueuesListQuery; case GET_MESSAGE_COUNT_METRIC_ID: // 14 return GetMessageCountMetricsQuery; } return nullptr; } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/std/queries.h b/ydb/core/ymq/queues/std/queries.h index 00cdf8b4fae..62d1b20b59b 100644 --- a/ydb/core/ymq/queues/std/queries.h +++ b/ydb/core/ymq/queues/std/queries.h @@ -2,8 +2,8 @@ #include <ydb/core/ymq/base/query_id.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { const char* GetStdQueryById(size_t id); -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/std/schema.cpp b/ydb/core/ymq/queues/std/schema.cpp index 4ba75d17a23..038b289d2dd 100644 --- a/ydb/core/ymq/queues/std/schema.cpp +++ b/ydb/core/ymq/queues/std/schema.cpp @@ -1,8 +1,8 @@ #include "schema.h" -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutosplit, ui64 sizeToSplit) { +TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutosplit, ui64 sizeToSplit) { const TVector<TColumn> AttributesColumns = { TColumn("State", NScheme::NTypeIds::Uint64, true), TColumn("ContentBasedDeduplication", NScheme::NTypeIds::Bool), @@ -14,8 +14,8 @@ TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutos TColumn("VisibilityTimeout", NScheme::NTypeIds::Uint64), TColumn("DlqName", NScheme::NTypeIds::Utf8), TColumn("DlqArn", NScheme::NTypeIds::Utf8), - TColumn("MaxReceiveCount", NScheme::NTypeIds::Uint64), - TColumn("ShowDetailedCountersDeadline", NScheme::NTypeIds::Uint64)}; + TColumn("MaxReceiveCount", NScheme::NTypeIds::Uint64), + TColumn("ShowDetailedCountersDeadline", NScheme::NTypeIds::Uint64)}; const TVector<TColumn> StateColumns = { TColumn("State", NScheme::NTypeIds::Uint64, true), @@ -26,9 +26,9 @@ TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutos TColumn("InflyCount", NScheme::NTypeIds::Int64), TColumn("MessageCount", NScheme::NTypeIds::Int64), TColumn("ReadOffset", NScheme::NTypeIds::Uint64), - TColumn("WriteOffset", NScheme::NTypeIds::Uint64), - TColumn("CleanupVersion", NScheme::NTypeIds::Uint64), - TColumn("InflyVersion", NScheme::NTypeIds::Uint64)}; + TColumn("WriteOffset", NScheme::NTypeIds::Uint64), + TColumn("CleanupVersion", NScheme::NTypeIds::Uint64), + TColumn("InflyVersion", NScheme::NTypeIds::Uint64)}; const TVector<TColumn> MessagesDataColumns = { TColumn("RandomId", NScheme::NTypeIds::Uint64, true, partitions), @@ -38,13 +38,13 @@ TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutos TColumn("MessageId", NScheme::NTypeIds::String), TColumn("SenderId", NScheme::NTypeIds::String)}; - const TVector<TColumn> MessagesColumns = { + const TVector<TColumn> MessagesColumns = { TColumn("Offset", NScheme::NTypeIds::Uint64, true), TColumn("RandomId", NScheme::NTypeIds::Uint64), TColumn("SentTimestamp", NScheme::NTypeIds::Uint64), TColumn("DelayDeadline", NScheme::NTypeIds::Uint64)}; - const TVector<TColumn> InflyColumns = { + const TVector<TColumn> InflyColumns = { TColumn("Offset", NScheme::NTypeIds::Uint64, true), TColumn("RandomId", NScheme::NTypeIds::Uint64), TColumn("LoadId", NScheme::NTypeIds::Uint64), @@ -52,8 +52,8 @@ TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutos TColumn("LockTimestamp", NScheme::NTypeIds::Uint64), TColumn("ReceiveCount", NScheme::NTypeIds::Uint32), TColumn("SentTimestamp", NScheme::NTypeIds::Uint64), - TColumn("VisibilityDeadline", NScheme::NTypeIds::Uint64), - TColumn("DelayDeadline", NScheme::NTypeIds::Uint64)}; + TColumn("VisibilityDeadline", NScheme::NTypeIds::Uint64), + TColumn("DelayDeadline", NScheme::NTypeIds::Uint64)}; const TVector<TColumn> SentTimestampIdxColumns = { TColumn("SentTimestamp", NScheme::NTypeIds::Uint64, true), @@ -68,34 +68,34 @@ TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutos .SetColumns(AttributesColumns) .SetSmall(true) .SetInMemory(true) - .SetShard(-1) + .SetShard(-1) .SetHasLeaderTablet()); list.push_back(TTable("State") .SetColumns(StateColumns) .SetSmall(true) - .SetOnePartitionPerShard(true) + .SetOnePartitionPerShard(true) .SetInMemory(true) .SetShard(-1)); for (ui64 i = 0; i < shards; ++i) { list.push_back(TTable("MessageData") - .SetColumns(MessagesDataColumns) - .SetSequential(true) - .SetShard(i) - .SetAutosplit(enableAutosplit, sizeToSplit)); + .SetColumns(MessagesDataColumns) + .SetSequential(true) + .SetShard(i) + .SetAutosplit(enableAutosplit, sizeToSplit)); list.push_back(TTable("Messages") - .SetColumns(MessagesColumns) - .SetSequential(true) - .SetShard(i)); + .SetColumns(MessagesColumns) + .SetSequential(true) + .SetShard(i)); list.push_back(TTable("Infly") - .SetColumns(InflyColumns) - .SetSmall(true) - .SetInMemory(true) - .SetShard(i)); + .SetColumns(InflyColumns) + .SetSmall(true) + .SetInMemory(true) + .SetShard(i)); list.push_back(TTable("SentTimestampIdx") - .SetColumns(SentTimestampIdxColumns) - .SetSequential(true) - .SetShard(i)); + .SetColumns(SentTimestampIdxColumns) + .SetSequential(true) + .SetShard(i)); } return list; @@ -118,4 +118,4 @@ TVector<TTable> GetStandardTableNames(ui64 shards) { return list; } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/std/schema.h b/ydb/core/ymq/queues/std/schema.h index 7358490ca88..f11ca7693b7 100644 --- a/ydb/core/ymq/queues/std/schema.h +++ b/ydb/core/ymq/queues/std/schema.h @@ -2,10 +2,10 @@ #include <ydb/core/ymq/base/table_info.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { -TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutosplit, ui64 sizeToSplit); +TVector<TTable> GetStandardTables(ui64 shards, ui64 partitions, bool enableAutosplit, ui64 sizeToSplit); TVector<TTable> GetStandardTableNames(ui64 shards); -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/core/ymq/queues/std/ya.make b/ydb/core/ymq/queues/std/ya.make index 770b3147a92..a1b7ade4332 100644 --- a/ydb/core/ymq/queues/std/ya.make +++ b/ydb/core/ymq/queues/std/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + LIBRARY() SRCS( diff --git a/ydb/core/ymq/queues/ya.make b/ydb/core/ymq/queues/ya.make index fe960b5c983..d2bac0d90df 100644 --- a/ydb/core/ymq/queues/ya.make +++ b/ydb/core/ymq/queues/ya.make @@ -1,10 +1,10 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + RECURSE( - common + common fifo std ) diff --git a/ydb/core/ymq/ut/params_ut.cpp b/ydb/core/ymq/ut/params_ut.cpp index 8cc3aae9efe..e2275fb58e2 100644 --- a/ydb/core/ymq/ut/params_ut.cpp +++ b/ydb/core/ymq/ut/params_ut.cpp @@ -32,8 +32,8 @@ Y_UNIT_TEST_SUITE(TParseParamsTests) { UNIT_ASSERT_EQUAL(params.Action, "ChangeMessageVisibilityBatch"); UNIT_ASSERT_EQUAL(params.BatchEntries.size(), 2); - UNIT_ASSERT_EQUAL(*params.BatchEntries[1].VisibilityTimeout, 10); - UNIT_ASSERT_EQUAL(*params.BatchEntries[2].VisibilityTimeout, 20); + UNIT_ASSERT_EQUAL(*params.BatchEntries[1].VisibilityTimeout, 10); + UNIT_ASSERT_EQUAL(*params.BatchEntries[2].VisibilityTimeout, 20); UNIT_ASSERT_EQUAL(params.BatchEntries[1].ReceiptHandle, "batch message 1"); UNIT_ASSERT_EQUAL(params.BatchEntries[2].ReceiptHandle, "batch message 2"); UNIT_ASSERT_EQUAL(params.BatchEntries[2].Id, "Y"); @@ -64,9 +64,9 @@ Y_UNIT_TEST_SUITE(TParseParamsTests) { parser.Append("MessageAttribute.3.Name", "MyAttr"); parser.Append("MessageAttribute.3.Value.StringValue", "test attr"); parser.Append("MessageAttribute.3.Value.DataType", "string"); - parser.Append("MessageAttribute.5.Name", "MyBinaryAttr"); - parser.Append("MessageAttribute.5.Value.BinaryValue", "YmluYXJ5X2RhdGE="); - parser.Append("MessageAttribute.5.Value.DataType", "Binary"); + parser.Append("MessageAttribute.5.Name", "MyBinaryAttr"); + parser.Append("MessageAttribute.5.Value.BinaryValue", "YmluYXJ5X2RhdGE="); + parser.Append("MessageAttribute.5.Value.DataType", "Binary"); UNIT_ASSERT_EQUAL(params.Action, "SendMessage"); UNIT_ASSERT_EQUAL(params.MessageBody, "test"); @@ -74,13 +74,13 @@ Y_UNIT_TEST_SUITE(TParseParamsTests) { UNIT_ASSERT_EQUAL(params.Attributes[1].GetName(), "DelaySeconds"); UNIT_ASSERT_EQUAL(params.Attributes[1].GetValue(), "1"); - UNIT_ASSERT_VALUES_EQUAL(params.MessageAttributes.size(), 2); - UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[3].GetName(), "MyAttr"); - UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[3].GetStringValue(), "test attr"); - UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[3].GetDataType(), "string"); - UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[5].GetName(), "MyBinaryAttr"); - UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[5].GetBinaryValue(), "binary_data"); - UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[5].GetDataType(), "Binary"); + UNIT_ASSERT_VALUES_EQUAL(params.MessageAttributes.size(), 2); + UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[3].GetName(), "MyAttr"); + UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[3].GetStringValue(), "test attr"); + UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[3].GetDataType(), "string"); + UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[5].GetName(), "MyBinaryAttr"); + UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[5].GetBinaryValue(), "binary_data"); + UNIT_ASSERT_STRINGS_EQUAL(params.MessageAttributes[5].GetDataType(), "Binary"); } Y_UNIT_TEST(SendMessageBatchRequest) { @@ -107,39 +107,39 @@ Y_UNIT_TEST_SUITE(TParseParamsTests) { UNIT_ASSERT_EQUAL(params.BatchEntries[2].MessageAttributes[1].GetDataType(), "string"); } - Y_UNIT_TEST(DeleteQueueBatchRequest) { - TParameters params; - TParametersParser parser(¶ms); - parser.Append("DeleteQueueBatchRequestEntry.2.QueueUrl", "url2"); - parser.Append("DeleteQueueBatchRequestEntry.1.Id", "id1"); - - UNIT_ASSERT_VALUES_EQUAL(params.BatchEntries.size(), 2); - UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[1].Id, "id1"); - UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[2].QueueUrl, "url2"); - } - - Y_UNIT_TEST(PurgeQueueBatchRequest) { - TParameters params; - TParametersParser parser(¶ms); - parser.Append("PurgeQueueBatchRequestEntry.1.QueueUrl", "my/url"); - parser.Append("PurgeQueueBatchRequestEntry.1.Id", "id"); - - UNIT_ASSERT_VALUES_EQUAL(params.BatchEntries.size(), 1); - UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[1].Id, "id"); - UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[1].QueueUrl, "my/url"); - } - - Y_UNIT_TEST(GetQueueAttributesBatchRequest) { - TParameters params; - TParametersParser parser(¶ms); - parser.Append("GetQueueAttributesBatchRequestEntry.2.QueueUrl", "url"); - parser.Append("GetQueueAttributesBatchRequestEntry.2.Id", "id"); - - UNIT_ASSERT_VALUES_EQUAL(params.BatchEntries.size(), 1); - UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[2].Id, "id"); - UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[2].QueueUrl, "url"); - } - + Y_UNIT_TEST(DeleteQueueBatchRequest) { + TParameters params; + TParametersParser parser(¶ms); + parser.Append("DeleteQueueBatchRequestEntry.2.QueueUrl", "url2"); + parser.Append("DeleteQueueBatchRequestEntry.1.Id", "id1"); + + UNIT_ASSERT_VALUES_EQUAL(params.BatchEntries.size(), 2); + UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[1].Id, "id1"); + UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[2].QueueUrl, "url2"); + } + + Y_UNIT_TEST(PurgeQueueBatchRequest) { + TParameters params; + TParametersParser parser(¶ms); + parser.Append("PurgeQueueBatchRequestEntry.1.QueueUrl", "my/url"); + parser.Append("PurgeQueueBatchRequestEntry.1.Id", "id"); + + UNIT_ASSERT_VALUES_EQUAL(params.BatchEntries.size(), 1); + UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[1].Id, "id"); + UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[1].QueueUrl, "my/url"); + } + + Y_UNIT_TEST(GetQueueAttributesBatchRequest) { + TParameters params; + TParametersParser parser(¶ms); + parser.Append("GetQueueAttributesBatchRequestEntry.2.QueueUrl", "url"); + parser.Append("GetQueueAttributesBatchRequestEntry.2.Id", "id"); + + UNIT_ASSERT_VALUES_EQUAL(params.BatchEntries.size(), 1); + UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[2].Id, "id"); + UNIT_ASSERT_STRINGS_EQUAL(*params.BatchEntries[2].QueueUrl, "url"); + } + Y_UNIT_TEST(UnnumberedAttribute) { TParameters params; TParametersParser parser(¶ms); @@ -159,46 +159,46 @@ Y_UNIT_TEST_SUITE(TParseParamsTests) { UNIT_ASSERT_EQUAL(params.AttributeNames.size(), 1); UNIT_ASSERT_EQUAL(params.AttributeNames[1], "All"); } - - Y_UNIT_TEST(FailsOnInvalidDeduplicationId) { - TParameters params; - TParametersParser parser(¶ms); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageDeduplicationId", "±"), TSQSException); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageDeduplicationId", "very_big_0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112222222222"), TSQSException); - } - - Y_UNIT_TEST(FailsOnInvalidGroupId) { - TParameters params; - TParametersParser parser(¶ms); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageGroupId", "§"), TSQSException); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageGroupId", "very_big_0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112222222222"), TSQSException); - } - - Y_UNIT_TEST(FailsOnInvalidReceiveRequestAttemptId) { - TParameters params; - TParametersParser parser(¶ms); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("ReceiveRequestAttemptId", "§"), TSQSException); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("ReceiveRequestAttemptId", "very_big_0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112222222222"), TSQSException); - } - - Y_UNIT_TEST(FailsOnInvalidMaxNumberOfMessages) { - TParameters params; - TParametersParser parser(¶ms); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MaxNumberOfMessages", "-1"), TSQSException); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MaxNumberOfMessages", "a"), TSQSException); - } - - Y_UNIT_TEST(FailsOnInvalidWaitTime) { - TParameters params; - TParametersParser parser(¶ms); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("WaitTimeSeconds", "123456789012345678901234567890"), TSQSException); // too big for uint64 - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("WaitTimeSeconds", "trololo"), TSQSException); - } - - Y_UNIT_TEST(FailsOnInvalidDelaySeconds) { - TParameters params; - TParametersParser parser(¶ms); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("SendMessageBatchRequestEntry.2.DelaySeconds", "1.0"), TSQSException); - UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("DelaySeconds", "1+3"), TSQSException); - } + + Y_UNIT_TEST(FailsOnInvalidDeduplicationId) { + TParameters params; + TParametersParser parser(¶ms); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageDeduplicationId", "±"), TSQSException); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageDeduplicationId", "very_big_0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112222222222"), TSQSException); + } + + Y_UNIT_TEST(FailsOnInvalidGroupId) { + TParameters params; + TParametersParser parser(¶ms); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageGroupId", "§"), TSQSException); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MessageGroupId", "very_big_0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112222222222"), TSQSException); + } + + Y_UNIT_TEST(FailsOnInvalidReceiveRequestAttemptId) { + TParameters params; + TParametersParser parser(¶ms); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("ReceiveRequestAttemptId", "§"), TSQSException); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("ReceiveRequestAttemptId", "very_big_0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112222222222"), TSQSException); + } + + Y_UNIT_TEST(FailsOnInvalidMaxNumberOfMessages) { + TParameters params; + TParametersParser parser(¶ms); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MaxNumberOfMessages", "-1"), TSQSException); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("MaxNumberOfMessages", "a"), TSQSException); + } + + Y_UNIT_TEST(FailsOnInvalidWaitTime) { + TParameters params; + TParametersParser parser(¶ms); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("WaitTimeSeconds", "123456789012345678901234567890"), TSQSException); // too big for uint64 + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("WaitTimeSeconds", "trololo"), TSQSException); + } + + Y_UNIT_TEST(FailsOnInvalidDelaySeconds) { + TParameters params; + TParametersParser parser(¶ms); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("SendMessageBatchRequestEntry.2.DelaySeconds", "1.0"), TSQSException); + UNIT_CHECK_GENERATED_EXCEPTION(parser.Append("DelaySeconds", "1+3"), TSQSException); + } } diff --git a/ydb/core/ymq/ut/ya.make b/ydb/core/ymq/ut/ya.make index 27db5cd8222..19360d44081 100644 --- a/ydb/core/ymq/ut/ya.make +++ b/ydb/core/ymq/ut/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + UNITTEST() SRCS( diff --git a/ydb/core/ymq/ya.make b/ydb/core/ymq/ya.make index 341606be22b..c73f7f3d749 100644 --- a/ydb/core/ymq/ya.make +++ b/ydb/core/ymq/ya.make @@ -1,8 +1,8 @@ -OWNER( +OWNER( g:kikimr - g:sqs -) - + g:sqs +) + RECURSE( actor base diff --git a/ydb/core/yq/libs/actors/clusters_from_connections.cpp b/ydb/core/yq/libs/actors/clusters_from_connections.cpp index 93e04718927..a76678bfc0f 100644 --- a/ydb/core/yq/libs/actors/clusters_from_connections.cpp +++ b/ydb/core/yq/libs/actors/clusters_from_connections.cpp @@ -92,7 +92,7 @@ void AddClustersFromConnections(const THashMap<TString, YandexQuery::Connection> if (ds.endpoint()) clusterCfg->SetEndpoint(ds.endpoint()); clusterCfg->SetDatabase(ds.database()); - clusterCfg->SetDatabaseId(ds.database_id()); + clusterCfg->SetDatabaseId(ds.database_id()); clusterCfg->SetUseSsl(ds.secure()); clusterCfg->SetAddBearerToToken(useBearerForYdb); clusterCfg->SetClusterType(TPqClusterConfig::CT_DATA_STREAMS); diff --git a/ydb/core/yq/libs/actors/database_resolver.cpp b/ydb/core/yq/libs/actors/database_resolver.cpp index c784d80c00a..305869cf808 100644 --- a/ydb/core/yq/libs/actors/database_resolver.cpp +++ b/ydb/core/yq/libs/actors/database_resolver.cpp @@ -2,18 +2,18 @@ #include <ydb/core/yq/libs/events/events.h> #include <ydb/core/yq/libs/common/cache.h> - + #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/actors/http/http.h> #include <library/cpp/actors/http/http_proxy.h> #include <library/cpp/json/json_reader.h> #include <ydb/core/protos/services.pb.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "DatabaseResolver - TraceId: " << TraceId << ": " << stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "DatabaseResolver - TraceId: " << TraceId << ": " << stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "DatabaseResolver - TraceId: " << TraceId << ": " << stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "DatabaseResolver - TraceId: " << TraceId << ": " << stream) namespace NYq { @@ -115,7 +115,7 @@ private: NJson::TJsonValue databaseInfo; auto it = Requests.find(ev->Get()->Request); - LOG_D("Got databaseId response " << ev->Get()->Response->Body); + LOG_D("Got databaseId response " << ev->Get()->Response->Body); if (it == Requests.end()) { errorMessage = "Unknown databaseId"; } else { @@ -125,7 +125,7 @@ private: if (parseJsonOk && (parserIt = Parsers.find(databaseType)) != Parsers.end()) { try { auto res = parserIt->second(databaseInfo, MdbTransformHost); - LOG_D("Got " << databaseId << " " << databaseType << " endpoint " << res.Endpoint << " " << res.Database); + LOG_D("Got " << databaseId << " " << databaseType << " endpoint " << res.Endpoint << " " << res.Database); DatabaseId2Endpoint[std::make_pair(databaseId, databaseType)] = res; result.ConstructInPlace(res); } catch (...) { @@ -151,7 +151,7 @@ private: } if (errorMessage) { - LOG_E("Error on response parsing: " << errorMessage); + LOG_E("Error on response parsing: " << errorMessage); Success = false; } @@ -259,7 +259,7 @@ private: void Handle(TEvents::TEvEndpointRequest::TPtr ev, const TActorContext& ctx) { TraceId = ev->Get()->TraceId; - LOG_D("Start databaseId resolver for " << ev->Get()->DatabaseIds.size() << " ids"); + LOG_D("Start databaseId resolver for " << ev->Get()->DatabaseIds.size() << " ids"); THashMap<NHttp::THttpOutgoingRequestPtr, std::tuple<TString, DatabaseType, TEvents::TDatabaseAuth>> requests; // request, (dbId, type, info) THashMap<std::pair<TString, DatabaseType>, TEndpoint> ready; for (const auto& [p, info] : ev->Get()->DatabaseIds) { diff --git a/ydb/core/yq/libs/actors/logging/log.h b/ydb/core/yq/libs/actors/logging/log.h index a5a938f0149..d2cee7d4cfc 100644 --- a/ydb/core/yq/libs/actors/logging/log.h +++ b/ydb/core/yq/libs/actors/logging/log.h @@ -1,59 +1,59 @@ -#pragma once +#pragma once #include <ydb/core/protos/services.pb.h> - -#include <library/cpp/actors/core/log.h> - -#define LOG_STREAMS_IMPL(level, component, logRecordStream) \ - LOG_LOG_S(::NActors::TActivationContext::AsActorContext(), ::NActors::NLog:: Y_CAT(PRI_, level), ::NKikimrServices::component, logRecordStream); - + +#include <library/cpp/actors/core/log.h> + +#define LOG_STREAMS_IMPL(level, component, logRecordStream) \ + LOG_LOG_S(::NActors::TActivationContext::AsActorContext(), ::NActors::NLog:: Y_CAT(PRI_, level), ::NKikimrServices::component, logRecordStream); + #define LOG_STREAMS_IMPL_AS(actorSystem, level, component, logRecordStream) \ LOG_LOG_S(actorSystem, ::NActors::NLog:: Y_CAT(PRI_, level), ::NKikimrServices::component, logRecordStream); -// Component: STREAMS. -#define LOG_STREAMS_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS, logRecordStream) -#define LOG_STREAMS_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS, logRecordStream) -#define LOG_STREAMS_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS, logRecordStream) -#define LOG_STREAMS_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS, logRecordStream) -#define LOG_STREAMS_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS, logRecordStream) -#define LOG_STREAMS_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS, logRecordStream) -#define LOG_STREAMS_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS, logRecordStream) -#define LOG_STREAMS_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS, logRecordStream) -#define LOG_STREAMS_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS, logRecordStream) - -// Component: STREAMS_SERVICE. -#define LOG_STREAMS_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS_SERVICE, logRecordStream) -#define LOG_STREAMS_SERVICE_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS_SERVICE, logRecordStream) - -// Component: STREAMS_STORAGE_SERVICE. -#define LOG_STREAMS_STORAGE_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS_STORAGE_SERVICE, logRecordStream) -#define LOG_STREAMS_STORAGE_SERVICE_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS_STORAGE_SERVICE, logRecordStream) - +// Component: STREAMS. +#define LOG_STREAMS_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS, logRecordStream) +#define LOG_STREAMS_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS, logRecordStream) +#define LOG_STREAMS_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS, logRecordStream) +#define LOG_STREAMS_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS, logRecordStream) +#define LOG_STREAMS_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS, logRecordStream) +#define LOG_STREAMS_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS, logRecordStream) +#define LOG_STREAMS_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS, logRecordStream) +#define LOG_STREAMS_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS, logRecordStream) +#define LOG_STREAMS_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS, logRecordStream) + +// Component: STREAMS_SERVICE. +#define LOG_STREAMS_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS_SERVICE, logRecordStream) +#define LOG_STREAMS_SERVICE_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS_SERVICE, logRecordStream) + +// Component: STREAMS_STORAGE_SERVICE. +#define LOG_STREAMS_STORAGE_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS_STORAGE_SERVICE, logRecordStream) +#define LOG_STREAMS_STORAGE_SERVICE_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS_STORAGE_SERVICE, logRecordStream) + #define LOG_STREAMS_STORAGE_SERVICE_AS_DEBUG(actorSystem, logRecordStream) LOG_STREAMS_IMPL_AS(actorSystem, DEBUG, STREAMS_STORAGE_SERVICE, logRecordStream) -// Component: STREAMS_SCHEDULER_SERVICE. -#define LOG_STREAMS_SCHEDULER_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS_SCHEDULER_SERVICE, logRecordStream) -#define LOG_STREAMS_SCHEDULER_SERVICE_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS_SCHEDULER_SERVICE, logRecordStream) +// Component: STREAMS_SCHEDULER_SERVICE. +#define LOG_STREAMS_SCHEDULER_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_ALERT(logRecordStream) LOG_STREAMS_IMPL(ALERT, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_CRIT(logRecordStream) LOG_STREAMS_IMPL(CRIT, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_ERROR(logRecordStream) LOG_STREAMS_IMPL(ERROR, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_WARN(logRecordStream) LOG_STREAMS_IMPL(WARN, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_NOTICE(logRecordStream) LOG_STREAMS_IMPL(NOTICE, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_INFO(logRecordStream) LOG_STREAMS_IMPL(INFO, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_DEBUG(logRecordStream) LOG_STREAMS_IMPL(DEBUG, STREAMS_SCHEDULER_SERVICE, logRecordStream) +#define LOG_STREAMS_SCHEDULER_SERVICE_TRACE(logRecordStream) LOG_STREAMS_IMPL(TRACE, STREAMS_SCHEDULER_SERVICE, logRecordStream) // Component: STREAMS_RESOURCE_SERVICE. #define LOG_STREAMS_RESOURCE_SERVICE_EMERG(logRecordStream) LOG_STREAMS_IMPL(EMERG, STREAMS_RESOURCE_SERVICE, logRecordStream) diff --git a/ydb/core/yq/libs/actors/logging/ya.make b/ydb/core/yq/libs/actors/logging/ya.make index e2c2f53379a..ab795ec60c8 100644 --- a/ydb/core/yq/libs/actors/logging/ya.make +++ b/ydb/core/yq/libs/actors/logging/ya.make @@ -1,16 +1,16 @@ -OWNER(g:yq) - -LIBRARY() - -SRCS( - log.h -) - -PEERDIR( - library/cpp/actors/core - ydb/core/protos -) - -YQL_LAST_ABI_VERSION() - -END() +OWNER(g:yq) + +LIBRARY() + +SRCS( + log.h +) + +PEERDIR( + library/cpp/actors/core + ydb/core/protos +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/core/yq/libs/actors/nodes_health_check.cpp b/ydb/core/yq/libs/actors/nodes_health_check.cpp index 96e7c1c1cd9..541eac729a8 100644 --- a/ydb/core/yq/libs/actors/nodes_health_check.cpp +++ b/ydb/core/yq/libs/actors/nodes_health_check.cpp @@ -13,10 +13,10 @@ #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <google/protobuf/util/time_util.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) namespace NYq { diff --git a/ydb/core/yq/libs/actors/nodes_manager.cpp b/ydb/core/yq/libs/actors/nodes_manager.cpp index 62d92aec0fd..f381a722f71 100644 --- a/ydb/core/yq/libs/actors/nodes_manager.cpp +++ b/ydb/core/yq/libs/actors/nodes_manager.cpp @@ -15,12 +15,12 @@ #include <ydb/core/protos/services.pb.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) -#define LOG_I(stream) \ - LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) +#define LOG_I(stream) \ + LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_NODES_MANAGER, stream) namespace NYq { @@ -53,7 +53,7 @@ public: }; TYqlNodesManagerActor( - const NYq::TYqSharedResources::TPtr& yqSharedResources, + const NYq::TYqSharedResources::TPtr& yqSharedResources, const NDqs::TWorkerManagerCounters& workerManagerCounters, TIntrusivePtr<ITimeProvider> timeProvider, TIntrusivePtr<IRandomProvider> randomProvider, @@ -71,16 +71,16 @@ public: , PrivateApiConfig(privateApiConfig) , Tenant(tenant) , MkqlInitialMemoryLimit(mkqlInitialMemoryLimit) - , YqSharedResources(yqSharedResources) + , YqSharedResources(yqSharedResources) , IcPort(icPort) , Address(address) , Client( - YqSharedResources->YdbDriver, + YqSharedResources->YdbDriver, NYdb::TCommonClientSettings() .DiscoveryEndpoint(PrivateApiConfig.GetTaskServiceEndpoint()) .Database(PrivateApiConfig.GetTaskServiceDatabase() ? PrivateApiConfig.GetTaskServiceDatabase() : TMaybe<TString>()), clientCounters) - + { InstanceId = GetGuidAsString(RandomProvider->GenUuid4()); } @@ -261,8 +261,8 @@ private: TString Tenant; ui64 MkqlInitialMemoryLimit; - NYq::TYqSharedResources::TPtr YqSharedResources; - + NYq::TYqSharedResources::TPtr YqSharedResources; + const ui32 IcPort; // Interconnect Port const TString Address; @@ -297,13 +297,13 @@ IActor* CreateYqlNodesManager( TIntrusivePtr<IRandomProvider> randomProvider, const ::NYq::NCommon::TServiceCounters& serviceCounters, const NConfig::TPrivateApiConfig& privateApiConfig, - const NYq::TYqSharedResources::TPtr& yqSharedResources, + const NYq::TYqSharedResources::TPtr& yqSharedResources, const ui32& icPort, const TString& address, const TString& tenant, ui64 mkqlInitialMemoryLimit, const NMonitoring::TDynamicCounterPtr& clientCounters) { - return new TYqlNodesManagerActor(yqSharedResources, workerManagerCounters, + return new TYqlNodesManagerActor(yqSharedResources, workerManagerCounters, timeProvider, randomProvider, serviceCounters, privateApiConfig, icPort, address, tenant, mkqlInitialMemoryLimit, clientCounters); } diff --git a/ydb/core/yq/libs/actors/nodes_manager.h b/ydb/core/yq/libs/actors/nodes_manager.h index 7301219b589..bd99f4d254c 100644 --- a/ydb/core/yq/libs/actors/nodes_manager.h +++ b/ydb/core/yq/libs/actors/nodes_manager.h @@ -3,7 +3,7 @@ #include <ydb/core/yq/libs/common/service_counters.h> #include <ydb/core/yq/libs/events/events.h> #include <ydb/core/yq/libs/shared_resources/shared_resources.h> - + #include <ydb/library/yql/minikql/computation/mkql_computation_node.h> #include <ydb/library/yql/providers/dq/provider/yql_dq_gateway.h> #include <ydb/library/yql/providers/dq/worker_manager/interface/counters.h> @@ -27,7 +27,7 @@ IActor* CreateYqlNodesManager( TIntrusivePtr<IRandomProvider> randomProvider, const ::NYq::NCommon::TServiceCounters& serviceCounters, const NConfig::TPrivateApiConfig& privateApiConfig, - const NYq::TYqSharedResources::TPtr& yqSharedResources, + const NYq::TYqSharedResources::TPtr& yqSharedResources, const ui32& icPort, const TString& address, const TString& tenant = "", diff --git a/ydb/core/yq/libs/actors/pending_fetcher.cpp b/ydb/core/yq/libs/actors/pending_fetcher.cpp index 0095963c8f1..30e33acfaf8 100644 --- a/ydb/core/yq/libs/actors/pending_fetcher.cpp +++ b/ydb/core/yq/libs/actors/pending_fetcher.cpp @@ -57,19 +57,19 @@ #include <util/generic/guid.h> #include <util/system/hostname.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Fetcher: " << stream) -#define LOG_I(stream) \ - LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Fetcher: " << stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Fetcher: " << stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Fetcher: " << stream) +#define LOG_I(stream) \ + LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Fetcher: " << stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Fetcher: " << stream) namespace NYq { using namespace NActors; -namespace { - +namespace { + struct TEvGetTaskInternalResponse : public NActors::TEventLocal<TEvGetTaskInternalResponse, NActors::TEvents::TSystem::Completed> { bool Success = false; const TIssues Issues; @@ -85,17 +85,17 @@ struct TEvGetTaskInternalResponse : public NActors::TEventLocal<TEvGetTaskIntern { } }; -template <class TElement> -TVector<TElement> VectorFromProto(const ::google::protobuf::RepeatedPtrField<TElement>& field) { - return { field.begin(), field.end() }; -} - -} // namespace - +template <class TElement> +TVector<TElement> VectorFromProto(const ::google::protobuf::RepeatedPtrField<TElement>& field) { + return { field.begin(), field.end() }; +} + +} // namespace + class TYqlPendingFetcher : public NActors::TActorBootstrapped<TYqlPendingFetcher> { public: TYqlPendingFetcher( - const NYq::TYqSharedResources::TPtr& yqSharedResources, + const NYq::TYqSharedResources::TPtr& yqSharedResources, const ::NYq::NConfig::TCommonConfig& commonConfig, const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, const ::NYq::NConfig::TPrivateApiConfig& privateApiConfig, @@ -105,13 +105,13 @@ public: TIntrusivePtr<ITimeProvider> timeProvider, TIntrusivePtr<IRandomProvider> randomProvider, NKikimr::NMiniKQL::TComputationNodeFactory dqCompFactory, - const ::NYq::NCommon::TServiceCounters& serviceCounters, + const ::NYq::NCommon::TServiceCounters& serviceCounters, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, IHTTPGateway::TPtr s3Gateway, ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, const NMonitoring::TDynamicCounterPtr& clientCounters ) - : YqSharedResources(yqSharedResources) + : YqSharedResources(yqSharedResources) , CommonConfig(commonConfig) , CheckpointCoordinatorConfig(checkpointCoordinatorConfig) , PrivateApiConfig(privateApiConfig) @@ -124,11 +124,11 @@ public: , ServiceCounters(serviceCounters, "pending_fetcher") , CredentialsFactory(credentialsFactory) , S3Gateway(s3Gateway) - , PqCmConnections(std::move(pqCmConnections)) + , PqCmConnections(std::move(pqCmConnections)) , Guid(CreateGuidAsString()) , ClientCounters(clientCounters) , Client( - YqSharedResources->YdbDriver, + YqSharedResources->YdbDriver, NYdb::TCommonClientSettings() .DiscoveryEndpoint(PrivateApiConfig.GetTaskServiceEndpoint()) .Database(PrivateApiConfig.GetTaskServiceDatabase() ? PrivateApiConfig.GetTaskServiceDatabase() : TMaybe<TString>()), @@ -177,7 +177,7 @@ private: HasRunningRequest = false; LOG_D("Got GetTask response from PrivateApi"); if (!ev->Get()->Success) { - LOG_E("Error with GetTask: "<< ev->Get()->Issues.ToString()); + LOG_E("Error with GetTask: "<< ev->Get()->Issues.ToString()); return; } @@ -192,7 +192,7 @@ private: } void GetPendingTask() { - LOG_D("Request Private::GetTask" << ", Owner: " << Guid << ", Host: " << HostName()); + LOG_D("Request Private::GetTask" << ", Owner: " << Guid << ", Host: " << HostName()); Yq::Private::GetTaskRequest request; request.set_owner_id(Guid); request.set_host(HostName()); @@ -222,7 +222,7 @@ private: } void RunTask(const Yq::Private::GetTaskResult::Task& task) { - LOG_D("NewTask:" + LOG_D("NewTask:" << " Scope: " << task.scope() << " Id: " << task.query_id().value() << " UserId: " << task.user_id() @@ -234,7 +234,7 @@ private: } TRunActorParams params( - YqSharedResources->YdbDriver, S3Gateway, + YqSharedResources->YdbDriver, S3Gateway, FunctionRegistry, RandomProvider, ModuleResolver, ModuleResolver->GetNextUniqueId(), DqCompFactory, PqCmConnections, @@ -251,7 +251,7 @@ private: task.execute_mode(), GetEntityIdAsString(CommonConfig.GetIdsPrefix(), EEntityType::RESULT), task.state_load_mode(), - task.disposition(), + task.disposition(), task.status(), task.sensor_labels().at("cloud_id"), VectorFromProto(task.result_set_meta()), @@ -275,7 +275,7 @@ private: hFunc(TEvGetTaskInternalResponse, HandleResponse) ); - NYq::TYqSharedResources::TPtr YqSharedResources; + NYq::TYqSharedResources::TPtr YqSharedResources; NYq::NConfig::TCommonConfig CommonConfig; NYq::NConfig::TCheckpointCoordinatorConfig CheckpointCoordinatorConfig; NYq::NConfig::TPrivateApiConfig PrivateApiConfig; @@ -287,7 +287,7 @@ private: TIntrusivePtr<IRandomProvider> RandomProvider; NKikimr::NMiniKQL::TComputationNodeFactory DqCompFactory; TIntrusivePtr<IDqGateway> DqGateway; - ::NYq::NCommon::TServiceCounters ServiceCounters; + ::NYq::NCommon::TServiceCounters ServiceCounters; IModuleResolver::TPtr ModuleResolver; @@ -298,7 +298,7 @@ private: ISecuredServiceAccountCredentialsFactory::TPtr CredentialsFactory; const IHTTPGateway::TPtr S3Gateway; - const ::NPq::NConfigurationManager::IConnections::TPtr PqCmConnections; + const ::NPq::NConfigurationManager::IConnections::TPtr PqCmConnections; const TString Guid; //OwnerId const NMonitoring::TDynamicCounterPtr ClientCounters; @@ -309,7 +309,7 @@ private: NActors::IActor* CreatePendingFetcher( - const NYq::TYqSharedResources::TPtr& yqSharedResources, + const NYq::TYqSharedResources::TPtr& yqSharedResources, const ::NYq::NConfig::TCommonConfig& commonConfig, const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, const ::NYq::NConfig::TPrivateApiConfig& privateApiConfig, @@ -319,14 +319,14 @@ NActors::IActor* CreatePendingFetcher( TIntrusivePtr<ITimeProvider> timeProvider, TIntrusivePtr<IRandomProvider> randomProvider, NKikimr::NMiniKQL::TComputationNodeFactory dqCompFactory, - const ::NYq::NCommon::TServiceCounters& serviceCounters, + const ::NYq::NCommon::TServiceCounters& serviceCounters, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, IHTTPGateway::TPtr s3Gateway, ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, const NMonitoring::TDynamicCounterPtr& clientCounters) { return new TYqlPendingFetcher( - yqSharedResources, + yqSharedResources, commonConfig, checkpointCoordinatorConfig, privateApiConfig, @@ -338,7 +338,7 @@ NActors::IActor* CreatePendingFetcher( dqCompFactory, serviceCounters, credentialsFactory, - s3Gateway, + s3Gateway, std::move(pqCmConnections), clientCounters); } diff --git a/ydb/core/yq/libs/actors/pinger.cpp b/ydb/core/yq/libs/actors/pinger.cpp index 55fc80d6a0b..d49650022f5 100644 --- a/ydb/core/yq/libs/actors/pinger.cpp +++ b/ydb/core/yq/libs/actors/pinger.cpp @@ -5,128 +5,128 @@ #include <ydb/core/yq/libs/control_plane_storage/control_plane_storage.h> #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <ydb/core/yq/libs/events/events.h> - + #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/actors/core/hfunc.h> #include <library/cpp/actors/core/events.h> #include <library/cpp/actors/core/log.h> #include <library/cpp/protobuf/interop/cast.h> -#include <util/generic/utility.h> - -#include <deque> - -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) - -#define LOG_W(stream) \ - LOG_WARN_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) - -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) - -#define LOG_T(stream) \ - LOG_TRACE_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) - +#include <util/generic/utility.h> + +#include <deque> + +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) + +#define LOG_W(stream) \ + LOG_WARN_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) + +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) + +#define LOG_T(stream) \ + LOG_TRACE_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Pinger - " << "QueryId: " << Id << ", Owner: " << OwnerId << " " << stream) + namespace NYq { using namespace NActors; using namespace NYql; -struct TEvPingResponse : public NActors::TEventLocal<TEvPingResponse, NActors::TEvents::TSystem::CallbackCompletion> { - TPingTaskResult Result; - YandexQuery::QueryAction Action = YandexQuery::QUERY_ACTION_UNSPECIFIED; - - explicit TEvPingResponse(TPingTaskResult&& result) - : Result(std::move(result)) - , Action(Result.IsResultSet() ? Result.GetResult().action() : YandexQuery::QUERY_ACTION_UNSPECIFIED) - { - } +struct TEvPingResponse : public NActors::TEventLocal<TEvPingResponse, NActors::TEvents::TSystem::CallbackCompletion> { + TPingTaskResult Result; + YandexQuery::QueryAction Action = YandexQuery::QUERY_ACTION_UNSPECIFIED; - explicit TEvPingResponse(const TString& errorMessage) - : TEvPingResponse(MakeResultFromErrorMessage(errorMessage)) + explicit TEvPingResponse(TPingTaskResult&& result) + : Result(std::move(result)) + , Action(Result.IsResultSet() ? Result.GetResult().action() : YandexQuery::QUERY_ACTION_UNSPECIFIED) { } -private: - static TPingTaskResult MakeResultFromErrorMessage(const TString& errorMessage) { - NYql::TIssues issues; - issues.AddIssue(errorMessage); - return TPingTaskResult(NYdb::TStatus(NYdb::EStatus::INTERNAL_ERROR, std::move(issues)), nullptr); - } + explicit TEvPingResponse(const TString& errorMessage) + : TEvPingResponse(MakeResultFromErrorMessage(errorMessage)) + { + } + +private: + static TPingTaskResult MakeResultFromErrorMessage(const TString& errorMessage) { + NYql::TIssues issues; + issues.AddIssue(errorMessage); + return TPingTaskResult(NYdb::TStatus(NYdb::EStatus::INTERNAL_ERROR, std::move(issues)), nullptr); + } }; class TPingerActor : public NActors::TActorBootstrapped<TPingerActor> { - class TRetryState { - public: + class TRetryState { + public: void Init(const TInstant& now, const TInstant& startLeaseTime, const TDuration& maxRetryTime) { - StartRequestTime = now; - StartLeaseTime = startLeaseTime; - Delay = TDuration::Zero(); - RetriesCount = 0; + StartRequestTime = now; + StartLeaseTime = startLeaseTime; + Delay = TDuration::Zero(); + RetriesCount = 0; MaxRetryTime = maxRetryTime; - } - - void UpdateStartLeaseTime(TInstant startLeaseTime) { - StartLeaseTime = startLeaseTime; - } - - TMaybe<TDuration> GetNextDelay(TInstant now) { - if (now >= StartLeaseTime + MaxRetryTime) { - return Nothing(); - } - - const TDuration nextDelay = Delay; // The first delay is zero - Delay = ClampVal(Delay * 2, MinDelay, MaxDelay); - - const TDuration randomizedNextDelay = nextDelay ? RandomizeDelay(nextDelay) : nextDelay; - if (now + randomizedNextDelay < StartLeaseTime + MaxRetryTime) { - ++RetriesCount; - return randomizedNextDelay; - } - return Nothing(); - } - - TDuration GetRetryTime(TInstant now) const { - return now - StartRequestTime; - } - - size_t GetRetriesCount() const { - return RetriesCount; - } - - operator bool() const { - return StartRequestTime != TInstant::Zero(); // State has been initialized. - } - - private: - static TDuration RandomizeDelay(TDuration baseDelay) { - const TDuration::TValue half = baseDelay.GetValue() / 2; - return TDuration::FromValue(half + RandomNumber<TDuration::TValue>(half)); - } - - private: - TDuration Delay; // The first retry will be done instantly. - TInstant StartLeaseTime; - TInstant StartRequestTime; - size_t RetriesCount = 0; - + } + + void UpdateStartLeaseTime(TInstant startLeaseTime) { + StartLeaseTime = startLeaseTime; + } + + TMaybe<TDuration> GetNextDelay(TInstant now) { + if (now >= StartLeaseTime + MaxRetryTime) { + return Nothing(); + } + + const TDuration nextDelay = Delay; // The first delay is zero + Delay = ClampVal(Delay * 2, MinDelay, MaxDelay); + + const TDuration randomizedNextDelay = nextDelay ? RandomizeDelay(nextDelay) : nextDelay; + if (now + randomizedNextDelay < StartLeaseTime + MaxRetryTime) { + ++RetriesCount; + return randomizedNextDelay; + } + return Nothing(); + } + + TDuration GetRetryTime(TInstant now) const { + return now - StartRequestTime; + } + + size_t GetRetriesCount() const { + return RetriesCount; + } + + operator bool() const { + return StartRequestTime != TInstant::Zero(); // State has been initialized. + } + + private: + static TDuration RandomizeDelay(TDuration baseDelay) { + const TDuration::TValue half = baseDelay.GetValue() / 2; + return TDuration::FromValue(half + RandomNumber<TDuration::TValue>(half)); + } + + private: + TDuration Delay; // The first retry will be done instantly. + TInstant StartLeaseTime; + TInstant StartRequestTime; + size_t RetriesCount = 0; + TDuration MaxRetryTime; - static constexpr TDuration MaxDelay = TDuration::Seconds(5); - static constexpr TDuration MinDelay = TDuration::MilliSeconds(100); // from second retry - }; - - struct TForwardPingReqInfo { - TForwardPingReqInfo(TEvents::TEvForwardPingRequest::TPtr&& ev) - : Request(std::move(ev)) - { - } - - TEvents::TEvForwardPingRequest::TPtr Request; - bool Requested = false; - TRetryState RetryState; - }; - + static constexpr TDuration MaxDelay = TDuration::Seconds(5); + static constexpr TDuration MinDelay = TDuration::MilliSeconds(100); // from second retry + }; + + struct TForwardPingReqInfo { + TForwardPingReqInfo(TEvents::TEvForwardPingRequest::TPtr&& ev) + : Request(std::move(ev)) + { + } + + TEvents::TEvForwardPingRequest::TPtr Request; + bool Requested = false; + TRetryState RetryState; + }; + struct TConfig { NConfig::TPingerConfig Proto; TDuration PingPeriod = TDuration::Seconds(15); @@ -158,260 +158,260 @@ public: , Client(client) , Parent(parent) , Deadline(deadline) - { - } + { + } static constexpr char ActorName[] = "YQ_PINGER"; - void Bootstrap() { + void Bootstrap() { LOG_D("Start Pinger"); - StartLeaseTime = TActivationContext::Now(); // Not accurate value, but it allows us to retry the first unsuccessful ping request. - ScheduleNextPing(); + StartLeaseTime = TActivationContext::Now(); // Not accurate value, but it allows us to retry the first unsuccessful ping request. + ScheduleNextPing(); Become(&TPingerActor::StateFunc); } private: STRICT_STFUNC( StateFunc, - cFunc(NActors::TEvents::TEvPoison::EventType, PassAway) - hFunc(NActors::TEvents::TEvWakeup, Wakeup) + cFunc(NActors::TEvents::TEvPoison::EventType, PassAway) + hFunc(NActors::TEvents::TEvWakeup, Wakeup) hFunc(TEvPingResponse, Handle) - hFunc(TEvents::TEvForwardPingRequest, Handle) + hFunc(TEvents::TEvForwardPingRequest, Handle) ) void PassAway() override { LOG_D("Stop Pinger"); - NActors::TActorBootstrapped<TPingerActor>::PassAway(); + NActors::TActorBootstrapped<TPingerActor>::PassAway(); } - void ScheduleNextPing() { - if (!Finishing) { - SchedulerCookieHolder.Reset(ISchedulerCookie::Make2Way()); + void ScheduleNextPing() { + if (!Finishing) { + SchedulerCookieHolder.Reset(ISchedulerCookie::Make2Way()); Schedule(Config.PingPeriod, new NActors::TEvents::TEvWakeup(ContinueLeaseWakeupTag), SchedulerCookieHolder.Get()); - } - } - - void Wakeup(NActors::TEvents::TEvWakeup::TPtr& ev) { - if (FatalError) { - LOG_D("Got wakeup after fatal error. Ignore"); - return; - } - - switch (ev->Get()->Tag) { - case ContinueLeaseWakeupTag: - WakeupContinueLease(); - break; - case RetryContinueLeaseWakeupTag: - WakeupRetryContinueLease(); - break; - case RetryForwardPingRequestWakeupTag: - WakeupRetryForwardPingRequest(); - break; - default: - Y_FAIL("Unknow wakeup tag: %lu", ev->Get()->Tag); - } - } - - void WakeupContinueLease() { + } + } + + void Wakeup(NActors::TEvents::TEvWakeup::TPtr& ev) { + if (FatalError) { + LOG_D("Got wakeup after fatal error. Ignore"); + return; + } + + switch (ev->Get()->Tag) { + case ContinueLeaseWakeupTag: + WakeupContinueLease(); + break; + case RetryContinueLeaseWakeupTag: + WakeupRetryContinueLease(); + break; + case RetryForwardPingRequestWakeupTag: + WakeupRetryForwardPingRequest(); + break; + default: + Y_FAIL("Unknow wakeup tag: %lu", ev->Get()->Tag); + } + } + + void WakeupContinueLease() { SchedulerCookieHolder.Reset(nullptr); - if (!Finishing) { - Ping(); - } - } - - void WakeupRetryContinueLease() { - Ping(true); - } - - void WakeupRetryForwardPingRequest() { - Y_VERIFY(!ForwardRequests.empty()); - auto& reqInfo = ForwardRequests.front(); - Y_VERIFY(!reqInfo.Requested); - ForwardPing(true); - } - - void Handle(TEvents::TEvForwardPingRequest::TPtr& ev) { - Y_VERIFY(ev->Cookie != ContinueLeaseRequestCookie); - Y_VERIFY(!Finishing); - if (ev->Get()->Final) { - Finishing = true; - SchedulerCookieHolder.Reset(nullptr); - } - - LOG_D("Forward ping request: " << ev->Get()->Request); - if (FatalError) { - if (Finishing) { - LOG_D("Got final ping request after fatal error"); - PassAway(); - } - } else { - ForwardRequests.emplace_back(std::move(ev)); - ForwardPing(); - } + if (!Finishing) { + Ping(); + } + } + + void WakeupRetryContinueLease() { + Ping(true); + } + + void WakeupRetryForwardPingRequest() { + Y_VERIFY(!ForwardRequests.empty()); + auto& reqInfo = ForwardRequests.front(); + Y_VERIFY(!reqInfo.Requested); + ForwardPing(true); + } + + void Handle(TEvents::TEvForwardPingRequest::TPtr& ev) { + Y_VERIFY(ev->Cookie != ContinueLeaseRequestCookie); + Y_VERIFY(!Finishing); + if (ev->Get()->Final) { + Finishing = true; + SchedulerCookieHolder.Reset(nullptr); + } + + LOG_D("Forward ping request: " << ev->Get()->Request); + if (FatalError) { + if (Finishing) { + LOG_D("Got final ping request after fatal error"); + PassAway(); + } + } else { + ForwardRequests.emplace_back(std::move(ev)); + ForwardPing(); + } } - void SendQueryAction(YandexQuery::QueryAction action) { - if (!Finishing) { - Send(Parent, new TEvents::TEvQueryActionResult(action)); + void SendQueryAction(YandexQuery::QueryAction action) { + if (!Finishing) { + Send(Parent, new TEvents::TEvQueryActionResult(action)); } } - static bool Retryable(TEvPingResponse::TPtr& ev) { - if (ev->Get()->Result.IsTransportError()) { - return true; - } - - const NYdb::EStatus status = ev->Get()->Result.GetStatus(); - if (status == NYdb::EStatus::INTERNAL_ERROR - || status == NYdb::EStatus::UNAVAILABLE - || status == NYdb::EStatus::OVERLOADED - || status == NYdb::EStatus::TIMEOUT - || status == NYdb::EStatus::BAD_SESSION - || status == NYdb::EStatus::SESSION_EXPIRED - || status == NYdb::EStatus::SESSION_BUSY) { - return true; - } - - return false; - } - + static bool Retryable(TEvPingResponse::TPtr& ev) { + if (ev->Get()->Result.IsTransportError()) { + return true; + } + + const NYdb::EStatus status = ev->Get()->Result.GetStatus(); + if (status == NYdb::EStatus::INTERNAL_ERROR + || status == NYdb::EStatus::UNAVAILABLE + || status == NYdb::EStatus::OVERLOADED + || status == NYdb::EStatus::TIMEOUT + || status == NYdb::EStatus::BAD_SESSION + || status == NYdb::EStatus::SESSION_EXPIRED + || status == NYdb::EStatus::SESSION_BUSY) { + return true; + } + + return false; + } + void Handle(TEvPingResponse::TPtr& ev) { - if (FatalError) { - LOG_D("Got ping response after fatal error. Ignore"); - return; - } - - const TInstant now = TActivationContext::Now(); - const bool success = ev->Get()->Result.IsSuccess(); - const bool retryable = !success && Retryable(ev); - const bool continueLeaseRequest = ev->Cookie == ContinueLeaseRequestCookie; - TRetryState* retryState = nullptr; - Y_VERIFY(continueLeaseRequest || !ForwardRequests.empty()); - if (retryable) { - if (continueLeaseRequest) { - retryState = &RetryState; - } else { - retryState = &ForwardRequests.front().RetryState; - } - Y_VERIFY(*retryState); // Initialized - } - - if (continueLeaseRequest) { - Y_VERIFY(Requested); - Requested = false; - } else { - Y_VERIFY(ForwardRequests.front().Requested); - ForwardRequests.front().Requested = false; - } - - TMaybe<TDuration> retryAfter; - if (retryable) { - retryState->UpdateStartLeaseTime(StartLeaseTime); - retryAfter = retryState->GetNextDelay(now); - } - - if (success) { - LOG_D("Ping response success: " << ev->Get()->Result.GetResult()); - StartLeaseTime = now; + if (FatalError) { + LOG_D("Got ping response after fatal error. Ignore"); + return; + } + + const TInstant now = TActivationContext::Now(); + const bool success = ev->Get()->Result.IsSuccess(); + const bool retryable = !success && Retryable(ev); + const bool continueLeaseRequest = ev->Cookie == ContinueLeaseRequestCookie; + TRetryState* retryState = nullptr; + Y_VERIFY(continueLeaseRequest || !ForwardRequests.empty()); + if (retryable) { + if (continueLeaseRequest) { + retryState = &RetryState; + } else { + retryState = &ForwardRequests.front().RetryState; + } + Y_VERIFY(*retryState); // Initialized + } + + if (continueLeaseRequest) { + Y_VERIFY(Requested); + Requested = false; + } else { + Y_VERIFY(ForwardRequests.front().Requested); + ForwardRequests.front().Requested = false; + } + + TMaybe<TDuration> retryAfter; + if (retryable) { + retryState->UpdateStartLeaseTime(StartLeaseTime); + retryAfter = retryState->GetNextDelay(now); + } + + if (success) { + LOG_D("Ping response success: " << ev->Get()->Result.GetResult()); + StartLeaseTime = now; auto action = ev->Get()->Action; if (action != YandexQuery::QUERY_ACTION_UNSPECIFIED && !Finishing) { - LOG_D("Query action: " << YandexQuery::QueryAction_Name(action)); - SendQueryAction(action); - } - - if (continueLeaseRequest) { - ScheduleNextPing(); - } else { - Send(Parent, new TEvents::TEvForwardPingResponse(true, ev->Get()->Action), 0, ev->Cookie); - ForwardRequests.pop_front(); - - // Process next forward ping request. - if (!ForwardRequests.empty()) { - ForwardPing(); - } - } - } else if (retryAfter) { - LOG_W("Ping response error: " << ev->Get()->Result.GetIssues().ToOneLineString() << ". Retry after: " << *retryAfter); - Schedule(*retryAfter, new NActors::TEvents::TEvWakeup(continueLeaseRequest ? RetryContinueLeaseWakeupTag : RetryForwardPingRequestWakeupTag)); - } else { - TRetryState* retryStateForLogging = retryState; - if (!retryStateForLogging) { - retryStateForLogging = continueLeaseRequest ? &RetryState : &ForwardRequests.front().RetryState; + LOG_D("Query action: " << YandexQuery::QueryAction_Name(action)); + SendQueryAction(action); } - LOG_E("Ping response error: " << ev->Get()->Result.GetIssues().ToOneLineString() << ". Retried " << retryStateForLogging->GetRetriesCount() << " times during " << retryStateForLogging->GetRetryTime(now)); - Send(Parent, new TEvents::TEvForwardPingResponse(false, ev->Get()->Action), 0, ev->Cookie); - FatalError = true; - ForwardRequests.clear(); + + if (continueLeaseRequest) { + ScheduleNextPing(); + } else { + Send(Parent, new TEvents::TEvForwardPingResponse(true, ev->Get()->Action), 0, ev->Cookie); + ForwardRequests.pop_front(); + + // Process next forward ping request. + if (!ForwardRequests.empty()) { + ForwardPing(); + } + } + } else if (retryAfter) { + LOG_W("Ping response error: " << ev->Get()->Result.GetIssues().ToOneLineString() << ". Retry after: " << *retryAfter); + Schedule(*retryAfter, new NActors::TEvents::TEvWakeup(continueLeaseRequest ? RetryContinueLeaseWakeupTag : RetryForwardPingRequestWakeupTag)); + } else { + TRetryState* retryStateForLogging = retryState; + if (!retryStateForLogging) { + retryStateForLogging = continueLeaseRequest ? &RetryState : &ForwardRequests.front().RetryState; + } + LOG_E("Ping response error: " << ev->Get()->Result.GetIssues().ToOneLineString() << ". Retried " << retryStateForLogging->GetRetriesCount() << " times during " << retryStateForLogging->GetRetryTime(now)); + Send(Parent, new TEvents::TEvForwardPingResponse(false, ev->Get()->Action), 0, ev->Cookie); + FatalError = true; + ForwardRequests.clear(); } - if (Finishing && ForwardRequests.empty() && !Requested) { - LOG_D("Query finished"); - PassAway(); + if (Finishing && ForwardRequests.empty() && !Requested) { + LOG_D("Query finished"); + PassAway(); } } - void ForwardPing(bool retry = false) { - Y_VERIFY(!ForwardRequests.empty()); - auto& reqInfo = ForwardRequests.front(); - if (!reqInfo.Requested && (retry || !reqInfo.RetryState)) { - reqInfo.Requested = true; - Y_VERIFY(!retry || reqInfo.RetryState); - if (!retry && !reqInfo.RetryState) { + void ForwardPing(bool retry = false) { + Y_VERIFY(!ForwardRequests.empty()); + auto& reqInfo = ForwardRequests.front(); + if (!reqInfo.Requested && (retry || !reqInfo.RetryState)) { + reqInfo.Requested = true; + Y_VERIFY(!retry || reqInfo.RetryState); + if (!retry && !reqInfo.RetryState) { reqInfo.RetryState.Init(TActivationContext::Now(), StartLeaseTime, Config.PingPeriod); - } - LOG_D((retry ? "Retry forward" : "Forward") << " request Private::PingTask"); - - Ping(reqInfo.Request->Get()->Request, reqInfo.Request->Cookie); - } - } - - void Ping(bool retry = false) { - LOG_D((retry ? "Retry request" : "Request") << " Private::PingTask"); - - Y_VERIFY(!Requested); - Requested = true; - - if (!retry) { + } + LOG_D((retry ? "Retry forward" : "Forward") << " request Private::PingTask"); + + Ping(reqInfo.Request->Get()->Request, reqInfo.Request->Cookie); + } + } + + void Ping(bool retry = false) { + LOG_D((retry ? "Retry request" : "Request") << " Private::PingTask"); + + Y_VERIFY(!Requested); + Requested = true; + + if (!retry) { RetryState.Init(TActivationContext::Now(), StartLeaseTime, Config.PingPeriod); - } + } Ping(Yq::Private::PingTaskRequest(), ContinueLeaseRequestCookie); - } - + } + void Ping(Yq::Private::PingTaskRequest request, ui64 cookie) { - // Fill ids + // Fill ids request.set_scope(Scope.ToString()); request.set_owner_id(OwnerId); request.mutable_query_id()->set_value(Id); *request.mutable_deadline() = NProtoInterop::CastToProto(Deadline); - - const auto* actorSystem = NActors::TActivationContext::ActorSystem(); - const auto selfId = SelfId(); - LOG_T("Send ping task request: " << request); - auto future = Client.PingTask(std::move(request)); - future.Subscribe( - [actorSystem, selfId, cookie, future](const NThreading::TFuture<TPingTaskResult>&) mutable { - std::unique_ptr<TEvPingResponse> ev; + + const auto* actorSystem = NActors::TActivationContext::ActorSystem(); + const auto selfId = SelfId(); + LOG_T("Send ping task request: " << request); + auto future = Client.PingTask(std::move(request)); + future.Subscribe( + [actorSystem, selfId, cookie, future](const NThreading::TFuture<TPingTaskResult>&) mutable { + std::unique_ptr<TEvPingResponse> ev; try { - auto result = future.ExtractValue(); - ev = std::make_unique<TEvPingResponse>(std::move(result)); + auto result = future.ExtractValue(); + ev = std::make_unique<TEvPingResponse>(std::move(result)); } catch (...) { - ev = std::make_unique<TEvPingResponse>(TStringBuilder() - << "Exception on ping response: " - << CurrentExceptionMessage()); + ev = std::make_unique<TEvPingResponse>(TStringBuilder() + << "Exception on ping response: " + << CurrentExceptionMessage()); } - actorSystem->Send(new IEventHandle(selfId, selfId, ev.release(), 0, cookie)); + actorSystem->Send(new IEventHandle(selfId, selfId, ev.release(), 0, cookie)); } - ); + ); } - static constexpr ui64 ContinueLeaseRequestCookie = Max(); - - enum : ui64 { - ContinueLeaseWakeupTag, - RetryContinueLeaseWakeupTag, - RetryForwardPingRequestWakeupTag, - }; - + static constexpr ui64 ContinueLeaseRequestCookie = Max(); + + enum : ui64 { + ContinueLeaseWakeupTag, + RetryContinueLeaseWakeupTag, + RetryForwardPingRequestWakeupTag, + }; + TConfig Config; const TScope Scope; @@ -421,15 +421,15 @@ private: TPrivateClient Client; bool Requested = false; - TInstant StartLeaseTime; - TRetryState RetryState; + TInstant StartLeaseTime; + TRetryState RetryState; const TActorId Parent; const TInstant Deadline; - std::deque<TForwardPingReqInfo> ForwardRequests; - bool Finishing = false; - bool FatalError = false; // Nonretryable error from PingTask or all retries finished. - + std::deque<TForwardPingReqInfo> ForwardRequests; + bool Finishing = false; + bool FatalError = false; // Nonretryable error from PingTask or all retries finished. + TSchedulerCookieHolder SchedulerCookieHolder; }; diff --git a/ydb/core/yq/libs/actors/proxy.h b/ydb/core/yq/libs/actors/proxy.h index 442d29ac444..584ec741300 100644 --- a/ydb/core/yq/libs/actors/proxy.h +++ b/ydb/core/yq/libs/actors/proxy.h @@ -34,7 +34,7 @@ NActors::TActorId MakeYqlAnalyticsHttpProxyId(); NActors::TActorId MakeYqlAnalyticsFetcherId(ui32 nodeId); NActors::IActor* CreatePendingFetcher( - const NYq::TYqSharedResources::TPtr& yqSharedResources, + const NYq::TYqSharedResources::TPtr& yqSharedResources, const ::NYq::NConfig::TCommonConfig& commonConfig, const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, const ::NYq::NConfig::TPrivateApiConfig& privateApiConfig, @@ -44,7 +44,7 @@ NActors::IActor* CreatePendingFetcher( TIntrusivePtr<ITimeProvider> timeProvider, TIntrusivePtr<IRandomProvider> randomProvider, NKikimr::NMiniKQL::TComputationNodeFactory dqCompFactory, - const ::NYq::NCommon::TServiceCounters& serviceCounters, + const ::NYq::NCommon::TServiceCounters& serviceCounters, NYql::ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, NYql::IHTTPGateway::TPtr s3Gateway, ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, diff --git a/ydb/core/yq/libs/actors/proxy_private.cpp b/ydb/core/yq/libs/actors/proxy_private.cpp index f8528945786..44ec0967661 100644 --- a/ydb/core/yq/libs/actors/proxy_private.cpp +++ b/ydb/core/yq/libs/actors/proxy_private.cpp @@ -3,7 +3,7 @@ #include <ydb/core/yq/libs/events/events.h> #include <ydb/core/yq/libs/shared_resources/db_pool.h> - + #include <library/cpp/actors/core/events.h> #include <library/cpp/actors/core/hfunc.h> #include <library/cpp/actors/core/actor_bootstrapped.h> @@ -15,12 +15,12 @@ #include <util/generic/guid.h> #include <util/system/hostname.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, stream) -#define LOG_I(stream) \ - LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, stream) +#define LOG_I(stream) \ + LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, stream) namespace NYq { diff --git a/ydb/core/yq/libs/actors/proxy_private.h b/ydb/core/yq/libs/actors/proxy_private.h index f5378ebab80..9ac661e713b 100644 --- a/ydb/core/yq/libs/actors/proxy_private.h +++ b/ydb/core/yq/libs/actors/proxy_private.h @@ -1,17 +1,17 @@ #pragma once #include <ydb/core/yq/libs/shared_resources/db_pool.h> #include <ydb/core/yq/libs/events/events.h> - + #include <ydb/library/yql/minikql/computation/mkql_computation_node.h> #include <ydb/library/yql/providers/dq/provider/yql_dq_gateway.h> #include <ydb/library/yql/providers/dq/worker_manager/interface/counters.h> #include <ydb/library/yql/providers/dq/actors/proto_builder.h> -#include <library/cpp/actors/core/actorsystem.h> -#include <library/cpp/time_provider/time_provider.h> -#include <library/cpp/random_provider/random_provider.h> +#include <library/cpp/actors/core/actorsystem.h> +#include <library/cpp/time_provider/time_provider.h> +#include <library/cpp/random_provider/random_provider.h> #include <library/cpp/monlib/metrics/histogram_collector.h> - + namespace NKikimr { namespace NMiniKQL { class IFunctionRegistry; diff --git a/ydb/core/yq/libs/actors/result_writer.cpp b/ydb/core/yq/libs/actors/result_writer.cpp index d51b8aae9ff..26016c0c0ad 100644 --- a/ydb/core/yq/libs/actors/result_writer.cpp +++ b/ydb/core/yq/libs/actors/result_writer.cpp @@ -20,12 +20,12 @@ #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <ydb/core/yq/libs/private_client/private_client.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Writer: " << TraceId << ": " << stream) -#define LOG_I(stream) \ - LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Writer: " << TraceId << ": " << stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Writer: " << TraceId << ": " << stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Writer: " << TraceId << ": " << stream) +#define LOG_I(stream) \ + LOG_INFO_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Writer: " << TraceId << ": " << stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, "Writer: " << TraceId << ": " << stream) namespace NYq { @@ -79,7 +79,7 @@ private: void PassAway() { auto duration = (TInstant::Now()-StartTime); - LOG_I("FinishWrite, Records: " << RowIndex << " HasError: " << HasError << " Size: " << Size << " Rows: " << Rows << " FreeSpace: " << FreeSpace << " Duration: " << duration << " AvgSpeed: " << Size/(duration.Seconds()+1)/1024/1024); + LOG_I("FinishWrite, Records: " << RowIndex << " HasError: " << HasError << " Size: " << Size << " Rows: " << Rows << " FreeSpace: " << FreeSpace << " Duration: " << duration << " AvgSpeed: " << Size/(duration.Seconds()+1)/1024/1024); NActors::IActor::PassAway(); } diff --git a/ydb/core/yq/libs/actors/run_actor.cpp b/ydb/core/yq/libs/actors/run_actor.cpp index 5549f8f254e..3e427fa802b 100644 --- a/ydb/core/yq/libs/actors/run_actor.cpp +++ b/ydb/core/yq/libs/actors/run_actor.cpp @@ -67,15 +67,15 @@ #include <util/string/split.h> #include <ydb/core/yq/libs/checkpointing/checkpoint_coordinator.h> #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/checkpoint_storage/storage_service.h> +#include <ydb/core/yq/libs/checkpoint_storage/storage_service.h> #include <ydb/core/yq/libs/db_resolver/db_async_resolver_impl.h> #include <ydb/core/yq/libs/common/database_token_builder.h> #include <ydb/core/yq/libs/private_client/private_client.h> -#define LOG_E(stream) \ +#define LOG_E(stream) \ LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, Params.QueryId << " RunActor : " << stream) -#define LOG_D(stream) \ +#define LOG_D(stream) \ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, Params.QueryId << " RunActor : " << stream) namespace NYq { @@ -87,10 +87,10 @@ using namespace NDqs; class TDeferredCountersCleanupActor : public NActors::TActorBootstrapped<TDeferredCountersCleanupActor> { public: TDeferredCountersCleanupActor( - const NMonitoring::TDynamicCounterPtr& rootCountersParent, - const NMonitoring::TDynamicCounterPtr& publicCountersParent, - const TString& queryId) - : RootCountersParent(rootCountersParent) + const NMonitoring::TDynamicCounterPtr& rootCountersParent, + const NMonitoring::TDynamicCounterPtr& publicCountersParent, + const TString& queryId) + : RootCountersParent(rootCountersParent) , PublicCountersParent(publicCountersParent) , QueryId(queryId) { @@ -98,8 +98,8 @@ public: static constexpr char ActorName[] = "YQ_DEFERRED_COUNTERS_CLEANUP"; - void Bootstrap() { - Become(&TDeferredCountersCleanupActor::StateFunc, TDuration::Seconds(60), new NActors::TEvents::TEvWakeup()); + void Bootstrap() { + Become(&TDeferredCountersCleanupActor::StateFunc, TDuration::Seconds(60), new NActors::TEvents::TEvWakeup()); } STRICT_STFUNC(StateFunc, @@ -115,7 +115,7 @@ public: } PassAway(); } - + private: const NMonitoring::TDynamicCounterPtr RootCountersParent; const NMonitoring::TDynamicCounterPtr PublicCountersParent; @@ -138,9 +138,9 @@ public: static constexpr char ActorName[] = "YQ_RUN_ACTOR"; - void Bootstrap() { - LOG_D("Start run actor. Compute state: " << YandexQuery::QueryMeta::ComputeStatus_Name(Params.Status)); - LogReceivedParams(); + void Bootstrap() { + LOG_D("Start run actor. Compute state: " << YandexQuery::QueryMeta::ComputeStatus_Name(Params.Status)); + LogReceivedParams(); Pinger = Register( CreatePingerActor( Params.Scope, @@ -159,7 +159,7 @@ public: Params.PingerConfig, Params.Deadline )); - Become(&TRunActor::StateFuncWrapper<&TRunActor::StateFunc>); + Become(&TRunActor::StateFuncWrapper<&TRunActor::StateFunc>); try { Run(); } catch (const std::exception&) { @@ -168,96 +168,96 @@ public: } private: - template <void (TRunActor::* DelegatedStateFunc)(STFUNC_SIG)> + template <void (TRunActor::* DelegatedStateFunc)(STFUNC_SIG)> STFUNC(StateFuncWrapper) { try { - (this->*DelegatedStateFunc)(ev, ctx); - } catch (...) { + (this->*DelegatedStateFunc)(ev, ctx); + } catch (...) { FailOnException(); } } STRICT_STFUNC(StateFunc, HFunc(TEvents::TEvAsyncContinue, Handle); - hFunc(NActors::TEvents::TEvUndelivered, Handle); - hFunc(NYq::TEvents::TEvGraphParams, Handle); + hFunc(NActors::TEvents::TEvUndelivered, Handle); + hFunc(NYq::TEvents::TEvGraphParams, Handle); hFunc(NYq::TEvents::TEvDataStreamsReadRulesCreationResult, Handle); hFunc(NYql::NDqs::TEvQueryResponse, Handle); - hFunc(TEvents::TEvQueryActionResult, Handle); - hFunc(TEvents::TEvForwardPingResponse, Handle); - hFunc(TEvCheckpointCoordinator::TEvZeroCheckpointDone, Handle); + hFunc(TEvents::TEvQueryActionResult, Handle); + hFunc(TEvents::TEvForwardPingResponse, Handle); + hFunc(TEvCheckpointCoordinator::TEvZeroCheckpointDone, Handle); ) - STRICT_STFUNC(FinishStateFunc, + STRICT_STFUNC(FinishStateFunc, hFunc(NYq::TEvents::TEvDataStreamsReadRulesCreationResult, HandleFinish); - hFunc(TEvents::TEvDataStreamsReadRulesDeletionResult, HandleFinish); - hFunc(NYql::NDqs::TEvQueryResponse, HandleFinish); - hFunc(TEvents::TEvForwardPingResponse, HandleFinish); - - // Ignore tail of action events after normal work. - IgnoreFunc(TEvents::TEvAsyncContinue); - IgnoreFunc(NActors::TEvents::TEvUndelivered); - IgnoreFunc(NYq::TEvents::TEvGraphParams); - IgnoreFunc(TEvents::TEvQueryActionResult); - IgnoreFunc(TEvCheckpointCoordinator::TEvZeroCheckpointDone); - ) - - void KillExecuter() { - if (ExecuterId) { - Send(ExecuterId, new NActors::TEvents::TEvPoison()); - - // Clear finished actors ids - ExecuterId = {}; - CheckpointCoordinatorId = {}; - ControlId = {}; - } - } - - void KillChildrenActors() { - if (ReadRulesCreatorId) { - Send(ReadRulesCreatorId, new NActors::TEvents::TEvPoison()); - } - - KillExecuter(); - } - - void CancelRunningQuery() { - if (ReadRulesCreatorId) { - LOG_D("Cancel read rules creation"); - Send(ReadRulesCreatorId, new NActors::TEvents::TEvPoison()); - } - - if (ControlId) { - LOG_D("Cancel running query"); - Send(ControlId, new NDq::TEvDq::TEvAbortExecution(Ydb::StatusIds::ABORTED, YandexQuery::QueryMeta::ComputeStatus_Name(FinalQueryStatus))); - } else { - QueryResponseArrived = true; - } - } - - void PassAway() override { + hFunc(TEvents::TEvDataStreamsReadRulesDeletionResult, HandleFinish); + hFunc(NYql::NDqs::TEvQueryResponse, HandleFinish); + hFunc(TEvents::TEvForwardPingResponse, HandleFinish); + + // Ignore tail of action events after normal work. + IgnoreFunc(TEvents::TEvAsyncContinue); + IgnoreFunc(NActors::TEvents::TEvUndelivered); + IgnoreFunc(NYq::TEvents::TEvGraphParams); + IgnoreFunc(TEvents::TEvQueryActionResult); + IgnoreFunc(TEvCheckpointCoordinator::TEvZeroCheckpointDone); + ) + + void KillExecuter() { + if (ExecuterId) { + Send(ExecuterId, new NActors::TEvents::TEvPoison()); + + // Clear finished actors ids + ExecuterId = {}; + CheckpointCoordinatorId = {}; + ControlId = {}; + } + } + + void KillChildrenActors() { + if (ReadRulesCreatorId) { + Send(ReadRulesCreatorId, new NActors::TEvents::TEvPoison()); + } + + KillExecuter(); + } + + void CancelRunningQuery() { + if (ReadRulesCreatorId) { + LOG_D("Cancel read rules creation"); + Send(ReadRulesCreatorId, new NActors::TEvents::TEvPoison()); + } + + if (ControlId) { + LOG_D("Cancel running query"); + Send(ControlId, new NDq::TEvDq::TEvAbortExecution(Ydb::StatusIds::ABORTED, YandexQuery::QueryMeta::ComputeStatus_Name(FinalQueryStatus))); + } else { + QueryResponseArrived = true; + } + } + + void PassAway() override { if (!Params.Automatic) { // Cleanup non-automatic counters only Register(new TDeferredCountersCleanupActor(RootCountersParent, PublicCountersParent, Params.QueryId)); } - - KillChildrenActors(); - - NActors::TActorBootstrapped<TRunActor>::PassAway(); - } - + + KillChildrenActors(); + + NActors::TActorBootstrapped<TRunActor>::PassAway(); + } + void Run() { - if (!Params.DqGraphs.empty() && Params.Status != YandexQuery::QueryMeta::STARTING) { - FillDqGraphParams(); - } - + if (!Params.DqGraphs.empty() && Params.Status != YandexQuery::QueryMeta::STARTING) { + FillDqGraphParams(); + } + switch (Params.Status) { case YandexQuery::QueryMeta::ABORTING_BY_USER: case YandexQuery::QueryMeta::ABORTING_BY_SYSTEM: - case YandexQuery::QueryMeta::FAILING: - case YandexQuery::QueryMeta::COMPLETING: - FinalizingStatusIsWritten = true; - Finish(GetFinalStatusFromFinalizingStatus(Params.Status)); + case YandexQuery::QueryMeta::FAILING: + case YandexQuery::QueryMeta::COMPLETING: + FinalizingStatusIsWritten = true; + Finish(GetFinalStatusFromFinalizingStatus(Params.Status)); break; case YandexQuery::QueryMeta::STARTING: HandleConnections(); @@ -268,7 +268,7 @@ private: ReRunQuery(); break; default: - Abort("Fail to start query from unexpected status " + YandexQuery::QueryMeta::ComputeStatus_Name(Params.Status), YandexQuery::QueryMeta::FAILED); + Abort("Fail to start query from unexpected status " + YandexQuery::QueryMeta::ComputeStatus_Name(Params.Status), YandexQuery::QueryMeta::FAILED); break; } } @@ -279,7 +279,7 @@ private: THashMap<std::pair<TString, DatabaseType>, TEvents::TDatabaseAuth> databaseIds; for (const auto& connection : Params.Connections) { if (!connection.content().name()) { - LOG_D("Connection with empty name " << connection.meta().id()); + LOG_D("Connection with empty name " << connection.meta().id()); continue; } Connections[connection.content().name()] = connection; // Necessary for TDatabaseAsyncResolverWithMeta @@ -290,7 +290,7 @@ private: void RunProgram() { LOG_D("RunProgram"); if (!CompileQuery()) { - Abort("Failed to compile query", YandexQuery::QueryMeta::FAILED); + Abort("Failed to compile query", YandexQuery::QueryMeta::FAILED); } } @@ -300,104 +300,104 @@ private: void Fail(const TString& errorMessage) { LOG_E("Fail for query " << Params.QueryId << ", finishing: " << Finishing << ", details: " << errorMessage); - + if (YqConnections.empty()) { Issues.AddIssue("YqConnections array is empty"); } - - if (!Finishing) { - Abort("Internal Error", YandexQuery::QueryMeta::FAILED); - return; + + if (!Finishing) { + Abort("Internal Error", YandexQuery::QueryMeta::FAILED); + return; } - // Already finishing. Fail instantly. - Issues.AddIssue("Internal Error"); - - if (!ConsumersAreDeleted) { + // Already finishing. Fail instantly. + Issues.AddIssue("Internal Error"); + + if (!ConsumersAreDeleted) { for (const Yq::Private::TopicConsumer& c : Params.CreatedTopicConsumers) { - TransientIssues.AddIssue(TStringBuilder() << "Created read rule `" << c.consumer_name() << "` for topic `" << c.topic_path() << "` (database id " << c.database_id() << ") maybe was left undeleted: internal error occurred"); - TransientIssues.back().Severity = NYql::TSeverityIds::S_WARNING; - } - } - - // If target status was successful, change it to failed because we are in internal error handler. - if (QueryStateUpdateRequest.status() == YandexQuery::QueryMeta::COMPLETED || QueryStateUpdateRequest.status() == YandexQuery::QueryMeta::PAUSED) { - QueryStateUpdateRequest.set_status(YandexQuery::QueryMeta::FAILED); - } - - SendPingAndPassAway(); + TransientIssues.AddIssue(TStringBuilder() << "Created read rule `" << c.consumer_name() << "` for topic `" << c.topic_path() << "` (database id " << c.database_id() << ") maybe was left undeleted: internal error occurred"); + TransientIssues.back().Severity = NYql::TSeverityIds::S_WARNING; + } + } + + // If target status was successful, change it to failed because we are in internal error handler. + if (QueryStateUpdateRequest.status() == YandexQuery::QueryMeta::COMPLETED || QueryStateUpdateRequest.status() == YandexQuery::QueryMeta::PAUSED) { + QueryStateUpdateRequest.set_status(YandexQuery::QueryMeta::FAILED); + } + + SendPingAndPassAway(); } - void Handle(TEvents::TEvQueryActionResult::TPtr& ev) { + void Handle(TEvents::TEvQueryActionResult::TPtr& ev) { Action = ev->Get()->Action; - LOG_D("New query action received: " << YandexQuery::QueryAction_Name(Action)); + LOG_D("New query action received: " << YandexQuery::QueryAction_Name(Action)); switch (Action) { case YandexQuery::ABORT: - case YandexQuery::ABORT_GRACEFULLY: // not fully implemented - Abort("Aborted by user", YandexQuery::QueryMeta::ABORTED_BY_USER); - break; - case YandexQuery::PAUSE: // not implemented - case YandexQuery::PAUSE_GRACEFULLY: // not implemented - case YandexQuery::RESUME: // not implemented - Abort(TStringBuilder() << "Unsupported query action: " << YandexQuery::QueryAction_Name(Action), YandexQuery::QueryMeta::FAILED); + case YandexQuery::ABORT_GRACEFULLY: // not fully implemented + Abort("Aborted by user", YandexQuery::QueryMeta::ABORTED_BY_USER); break; + case YandexQuery::PAUSE: // not implemented + case YandexQuery::PAUSE_GRACEFULLY: // not implemented + case YandexQuery::RESUME: // not implemented + Abort(TStringBuilder() << "Unsupported query action: " << YandexQuery::QueryAction_Name(Action), YandexQuery::QueryMeta::FAILED); + break; default: - Abort(TStringBuilder() << "Unknown query action: " << YandexQuery::QueryAction_Name(Action), YandexQuery::QueryMeta::FAILED); + Abort(TStringBuilder() << "Unknown query action: " << YandexQuery::QueryAction_Name(Action), YandexQuery::QueryMeta::FAILED); break; } } void CheckForConsumers() { - struct TTopicIndependentConsumers { - struct TTopicIndependentConsumer { - TString ConsumerName; - std::vector<NYql::NPq::TTopicPartitionsSet> PartitionsSets; - }; - - std::pair<TString, bool> AddPartitionsSet(const TMaybe<NYql::NPq::TTopicPartitionsSet>& set, const TString& consumerNamePrefix) { - if (!ConsumerNamePrefix) { // Init - ConsumerNamePrefix = consumerNamePrefix; - } - - if (!set) { - return {AddNewConsumer(set), true}; - } - - for (TTopicIndependentConsumer& consumer : IndependentConsumers) { - if (!consumer.PartitionsSets.empty()) { - bool intersects = false; - for (const NYql::NPq::TTopicPartitionsSet& consumerSet : consumer.PartitionsSets) { - if (consumerSet.Intersects(*set)) { - intersects = true; - break; - } - } - if (!intersects) { - consumer.PartitionsSets.push_back(*set); - return {consumer.ConsumerName, false}; - } - } - } - return {AddNewConsumer(set), true}; - } - - TString AddNewConsumer(const TMaybe<NYql::NPq::TTopicPartitionsSet>& set) { - TTopicIndependentConsumer& c = IndependentConsumers.emplace_back(); - c.ConsumerName = IndependentConsumers.size() == 1 ? ConsumerNamePrefix : TStringBuilder() << ConsumerNamePrefix << '-' << IndependentConsumers.size(); - if (set) { - c.PartitionsSets.push_back(*set); - } - return c.ConsumerName; - } - - TString ConsumerNamePrefix; - std::vector<TTopicIndependentConsumer> IndependentConsumers; - }; - - THashMap<TString, TTopicIndependentConsumers> topicToIndependentConsumers; + struct TTopicIndependentConsumers { + struct TTopicIndependentConsumer { + TString ConsumerName; + std::vector<NYql::NPq::TTopicPartitionsSet> PartitionsSets; + }; + + std::pair<TString, bool> AddPartitionsSet(const TMaybe<NYql::NPq::TTopicPartitionsSet>& set, const TString& consumerNamePrefix) { + if (!ConsumerNamePrefix) { // Init + ConsumerNamePrefix = consumerNamePrefix; + } + + if (!set) { + return {AddNewConsumer(set), true}; + } + + for (TTopicIndependentConsumer& consumer : IndependentConsumers) { + if (!consumer.PartitionsSets.empty()) { + bool intersects = false; + for (const NYql::NPq::TTopicPartitionsSet& consumerSet : consumer.PartitionsSets) { + if (consumerSet.Intersects(*set)) { + intersects = true; + break; + } + } + if (!intersects) { + consumer.PartitionsSets.push_back(*set); + return {consumer.ConsumerName, false}; + } + } + } + return {AddNewConsumer(set), true}; + } + + TString AddNewConsumer(const TMaybe<NYql::NPq::TTopicPartitionsSet>& set) { + TTopicIndependentConsumer& c = IndependentConsumers.emplace_back(); + c.ConsumerName = IndependentConsumers.size() == 1 ? ConsumerNamePrefix : TStringBuilder() << ConsumerNamePrefix << '-' << IndependentConsumers.size(); + if (set) { + c.PartitionsSets.push_back(*set); + } + return c.ConsumerName; + } + + TString ConsumerNamePrefix; + std::vector<TTopicIndependentConsumer> IndependentConsumers; + }; + + THashMap<TString, TTopicIndependentConsumers> topicToIndependentConsumers; ui32 graphIndex = 0; for (auto& graphParams : DqGraphParams) { - LOG_D("Graph " << graphIndex); + LOG_D("Graph " << graphIndex); graphIndex++; const TString consumerNamePrefix = graphIndex == 1 ? Params.QueryId : TStringBuilder() << Params.QueryId << '-' << graphIndex; // Simple name in simple case const auto& secureParams = graphParams.GetSecureParams(); @@ -408,48 +408,48 @@ private: YQL_ENSURE(settingsAny.Is<NYql::NPq::NProto::TDqPqTopicSource>()); NYql::NPq::NProto::TDqPqTopicSource srcDesc; YQL_ENSURE(settingsAny.UnpackTo(&srcDesc)); - + if (!srcDesc.GetConsumerName()) { const auto [consumerName, isNewConsumer] = topicToIndependentConsumers[srcDesc.GetTopicPath()] - .AddPartitionsSet(NYql::NPq::GetTopicPartitionsSet(task.GetMeta()), consumerNamePrefix); + .AddPartitionsSet(NYql::NPq::GetTopicPartitionsSet(task.GetMeta()), consumerNamePrefix); srcDesc.SetConsumerName(consumerName); settingsAny.PackFrom(srcDesc); - if (isNewConsumer) { - auto s = consumerName; - LOG_D("Create consumer \"" << s << "\" for topic \"" << srcDesc.GetTopicPath() << "\""); - if (const TString& tokenName = srcDesc.GetToken().GetName()) { - const auto token = secureParams.find(tokenName); - YQL_ENSURE(token != secureParams.end(), "Token " << tokenName << " was not found in secure params"); - CredentialsForConsumersCreation.emplace_back( - CreateCredentialsProviderFactoryForStructuredToken(Params.CredentialsFactory, token->second, srcDesc.GetAddBearerToToken())); - } else { - CredentialsForConsumersCreation.emplace_back(NYdb::CreateInsecureCredentialsProviderFactory()); - } + if (isNewConsumer) { + auto s = consumerName; + LOG_D("Create consumer \"" << s << "\" for topic \"" << srcDesc.GetTopicPath() << "\""); + if (const TString& tokenName = srcDesc.GetToken().GetName()) { + const auto token = secureParams.find(tokenName); + YQL_ENSURE(token != secureParams.end(), "Token " << tokenName << " was not found in secure params"); + CredentialsForConsumersCreation.emplace_back( + CreateCredentialsProviderFactoryForStructuredToken(Params.CredentialsFactory, token->second, srcDesc.GetAddBearerToToken())); + } else { + CredentialsForConsumersCreation.emplace_back(NYdb::CreateInsecureCredentialsProviderFactory()); + } TopicsForConsumersCreation.emplace_back(std::move(srcDesc)); } - } - } - } - } - } - } - - void Handle(TEvents::TEvForwardPingResponse::TPtr& ev) { - LOG_D("Forward ping response. Success: " << ev->Get()->Success << ". Cookie: " << ev->Cookie); - if (!ev->Get()->Success) { // Failed setting new status or lease was lost - ResignQuery(); - return; - } - + } + } + } + } + } + } + + void Handle(TEvents::TEvForwardPingResponse::TPtr& ev) { + LOG_D("Forward ping response. Success: " << ev->Get()->Success << ". Cookie: " << ev->Cookie); + if (!ev->Get()->Success) { // Failed setting new status or lease was lost + ResignQuery(); + return; + } + if (ev->Cookie == SaveQueryInfoCookie) { if (TopicsForConsumersCreation.size()) { ReadRulesCreatorId = Register( ::NYq::MakeReadRuleCreatorActor( SelfId(), Params.QueryId, - Params.Driver, + Params.Driver, std::move(TopicsForConsumersCreation), std::move(CredentialsForConsumersCreation) ) @@ -457,24 +457,24 @@ private: } else { RunDqGraphs(); } - } else if (ev->Cookie == SetLoadFromCheckpointModeCookie) { - Send(CheckpointCoordinatorId, new TEvCheckpointCoordinator::TEvRunGraph()); - } - } - - void HandleFinish(TEvents::TEvForwardPingResponse::TPtr& ev) { - LOG_D("Forward ping response. Success: " << ev->Get()->Success << ". Cookie: " << ev->Cookie); - if (!ev->Get()->Success) { // Failed setting new status or lease was lost - Fail("Failed to write finalizing status"); - return; - } - - if (ev->Cookie == SaveFinalizingStatusCookie) { - FinalizingStatusIsWritten = true; - ContinueFinish(); - } - } - + } else if (ev->Cookie == SetLoadFromCheckpointModeCookie) { + Send(CheckpointCoordinatorId, new TEvCheckpointCoordinator::TEvRunGraph()); + } + } + + void HandleFinish(TEvents::TEvForwardPingResponse::TPtr& ev) { + LOG_D("Forward ping response. Success: " << ev->Get()->Success << ". Cookie: " << ev->Cookie); + if (!ev->Get()->Success) { // Failed setting new status or lease was lost + Fail("Failed to write finalizing status"); + return; + } + + if (ev->Cookie == SaveFinalizingStatusCookie) { + FinalizingStatusIsWritten = true; + ContinueFinish(); + } + } + TString CheckLimitsOfDqGraphs() { size_t dqTasks = 0; for (const auto& dqGraph : DqGraphParams) { @@ -496,17 +496,17 @@ private: return false; } - void Handle(NYq::TEvents::TEvGraphParams::TPtr& ev) { + void Handle(NYq::TEvents::TEvGraphParams::TPtr& ev) { LOG_D("Graph params with tasks: " << ev->Get()->GraphParams.TasksSize()); - DqGraphParams.push_back(ev->Get()->GraphParams); - } - - void Handle(TEvCheckpointCoordinator::TEvZeroCheckpointDone::TPtr&) { - LOG_D("Coordinator saved zero checkpoint"); - Y_VERIFY(CheckpointCoordinatorId); - SetLoadFromCheckpointMode(); + DqGraphParams.push_back(ev->Get()->GraphParams); } + void Handle(TEvCheckpointCoordinator::TEvZeroCheckpointDone::TPtr&) { + LOG_D("Coordinator saved zero checkpoint"); + Y_VERIFY(CheckpointCoordinatorId); + SetLoadFromCheckpointMode(); + } + i32 UpdateResultIndices() { i32 count = 0; for (const auto& graphParams : DqGraphParams) { @@ -542,21 +542,21 @@ private: CheckForConsumers(); - Params.CreatedTopicConsumers.clear(); - Params.CreatedTopicConsumers.reserve(TopicsForConsumersCreation.size()); + Params.CreatedTopicConsumers.clear(); + Params.CreatedTopicConsumers.reserve(TopicsForConsumersCreation.size()); for (const NYql::NPq::NProto::TDqPqTopicSource& src : TopicsForConsumersCreation) { auto& consumer = *request.add_created_topic_consumers(); consumer.set_database_id(src.GetDatabaseId()); consumer.set_database(src.GetDatabase()); consumer.set_topic_path(src.GetTopicPath()); consumer.set_consumer_name(src.GetConsumerName()); - consumer.set_cluster_endpoint(src.GetEndpoint()); - consumer.set_use_ssl(src.GetUseSsl()); - consumer.set_token_name(src.GetToken().GetName()); - consumer.set_add_bearer_to_token(src.GetAddBearerToToken()); - - // Save for deletion - Params.CreatedTopicConsumers.push_back(consumer); + consumer.set_cluster_endpoint(src.GetEndpoint()); + consumer.set_use_ssl(src.GetUseSsl()); + consumer.set_token_name(src.GetToken().GetName()); + consumer.set_add_bearer_to_token(src.GetAddBearerToToken()); + + // Save for deletion + Params.CreatedTopicConsumers.push_back(consumer); } for (const auto& graphParams : DqGraphParams) { @@ -566,14 +566,14 @@ private: Send(Pinger, new TEvents::TEvForwardPingRequest(request), 0, SaveQueryInfoCookie); } - void SetLoadFromCheckpointMode() { - Yq::Private::PingTaskRequest request; - request.set_state_load_mode(YandexQuery::FROM_LAST_CHECKPOINT); - request.mutable_disposition()->mutable_from_last_checkpoint(); - - Send(Pinger, new TEvents::TEvForwardPingRequest(request), 0, SetLoadFromCheckpointModeCookie); - } - + void SetLoadFromCheckpointMode() { + Yq::Private::PingTaskRequest request; + request.set_state_load_mode(YandexQuery::FROM_LAST_CHECKPOINT); + request.mutable_disposition()->mutable_from_last_checkpoint(); + + Send(Pinger, new TEvents::TEvForwardPingRequest(request), 0, SetLoadFromCheckpointModeCookie); + } + TString BuildNormalizedStatistics(const NDqProto::TQueryResponse& response) { struct TStatisticsNode { @@ -645,7 +645,7 @@ private: return out.Str(); } - void SaveStatistics(const NYql::NDqProto::TQueryResponse& result) { + void SaveStatistics(const NYql::NDqProto::TQueryResponse& result) { // Yson routines are very strict, so it's better to try-catch them try { Statistics.push_back(BuildNormalizedStatistics(result)); @@ -662,149 +662,149 @@ private: } catch (NYson::TYsonException& ex) { LOG_E(ex.what()); } + } + + void AddIssues(const google::protobuf::RepeatedPtrField<Ydb::Issue::IssueMessage>& issuesProto) { + TIssues issues; + IssuesFromMessage(issuesProto, issues); + Issues.AddIssues(issues); + } + + void SaveQueryResponse(NYql::NDqs::TEvQueryResponse::TPtr& ev) { + auto& result = ev->Get()->Record; + LOG_D("Query response. Retryable: " << result.GetRetriable() + << ". Result set index: " << DqGraphIndex + << ". Issues count: " << result.IssuesSize() + << ". Rows count: " << result.GetRowsCount()); + + AddIssues(result.issues()); + RetryNeeded |= result.GetRetriable(); + + if (Finishing && !result.issues_size()) { // Race between abort and successful finishing. Override with success and provide results to user. + FinalQueryStatus = YandexQuery::QueryMeta::COMPLETED; + Issues.Clear(); + } + + auto resultSetIndex = DqGrapResultIndices.at(DqGraphIndex); + if (resultSetIndex >= 0) { + auto& header = *QueryStateUpdateRequest.mutable_result_set_meta(resultSetIndex); + header.set_truncated(result.GetTruncated()); + header.set_rows_count(result.GetRowsCount()); + } + + QueryStateUpdateRequest.mutable_result_id()->set_value(Params.ResultId); + + SaveStatistics(result); + + KillExecuter(); + } + + void Handle(NYql::NDqs::TEvQueryResponse::TPtr& ev) { + SaveQueryResponse(ev); + + const bool failure = Issues.Size() > 0; + const bool finalize = failure || DqGraphIndex + 1 >= static_cast<i32>(DqGraphParams.size()); + if (finalize) { + if (RetryNeeded) { + ResignQuery(); + return; + } + + Finish(GetFinishStatus(!failure)); + return; + } + + // Continue with the next graph + QueryStateUpdateRequest.set_dq_graph_index(++DqGraphIndex); + RunNextDqGraph(); + LOG_D("Send save query response request to pinger"); + Send(Pinger, new TEvents::TEvForwardPingRequest(QueryStateUpdateRequest)); } - void AddIssues(const google::protobuf::RepeatedPtrField<Ydb::Issue::IssueMessage>& issuesProto) { - TIssues issues; - IssuesFromMessage(issuesProto, issues); - Issues.AddIssues(issues); - } - - void SaveQueryResponse(NYql::NDqs::TEvQueryResponse::TPtr& ev) { - auto& result = ev->Get()->Record; - LOG_D("Query response. Retryable: " << result.GetRetriable() - << ". Result set index: " << DqGraphIndex - << ". Issues count: " << result.IssuesSize() - << ". Rows count: " << result.GetRowsCount()); - - AddIssues(result.issues()); - RetryNeeded |= result.GetRetriable(); - - if (Finishing && !result.issues_size()) { // Race between abort and successful finishing. Override with success and provide results to user. - FinalQueryStatus = YandexQuery::QueryMeta::COMPLETED; - Issues.Clear(); - } - - auto resultSetIndex = DqGrapResultIndices.at(DqGraphIndex); - if (resultSetIndex >= 0) { - auto& header = *QueryStateUpdateRequest.mutable_result_set_meta(resultSetIndex); - header.set_truncated(result.GetTruncated()); - header.set_rows_count(result.GetRowsCount()); - } - - QueryStateUpdateRequest.mutable_result_id()->set_value(Params.ResultId); - - SaveStatistics(result); - - KillExecuter(); - } - - void Handle(NYql::NDqs::TEvQueryResponse::TPtr& ev) { - SaveQueryResponse(ev); - - const bool failure = Issues.Size() > 0; - const bool finalize = failure || DqGraphIndex + 1 >= static_cast<i32>(DqGraphParams.size()); - if (finalize) { - if (RetryNeeded) { - ResignQuery(); - return; - } - - Finish(GetFinishStatus(!failure)); - return; - } - - // Continue with the next graph - QueryStateUpdateRequest.set_dq_graph_index(++DqGraphIndex); - RunNextDqGraph(); - LOG_D("Send save query response request to pinger"); - Send(Pinger, new TEvents::TEvForwardPingRequest(QueryStateUpdateRequest)); - } - - void HandleFinish(NYql::NDqs::TEvQueryResponse::TPtr& ev) { - // In this case we can have race between normal finishing of running query and aborting it. - // If query is finished with success error code or failure != abort, we override abort with this result. - // This logic is located in SaveQueryResponse() method. - ev->Get()->Record.SetRetriable(false); // User aborted => don't retry, only show issues - - QueryResponseArrived = true; - SaveQueryResponse(ev); - - ContinueFinish(); - } - + void HandleFinish(NYql::NDqs::TEvQueryResponse::TPtr& ev) { + // In this case we can have race between normal finishing of running query and aborting it. + // If query is finished with success error code or failure != abort, we override abort with this result. + // This logic is located in SaveQueryResponse() method. + ev->Get()->Record.SetRetriable(false); // User aborted => don't retry, only show issues + + QueryResponseArrived = true; + SaveQueryResponse(ev); + + ContinueFinish(); + } + void Handle(NYq::TEvents::TEvDataStreamsReadRulesCreationResult::TPtr& ev) { - LOG_D("Read rules creation finished. Issues: " << ev->Get()->Issues.Size()); - ReadRulesCreatorId = {}; - if (ev->Get()->Issues) { - AddIssueWithSubIssues("Problems with read rules creation", ev->Get()->Issues); - LOG_D(Issues.ToOneLineString()); - Finish(YandexQuery::QueryMeta::FAILED); + LOG_D("Read rules creation finished. Issues: " << ev->Get()->Issues.Size()); + ReadRulesCreatorId = {}; + if (ev->Get()->Issues) { + AddIssueWithSubIssues("Problems with read rules creation", ev->Get()->Issues); + LOG_D(Issues.ToOneLineString()); + Finish(YandexQuery::QueryMeta::FAILED); } else { RunDqGraphs(); } } void HandleFinish(NYq::TEvents::TEvDataStreamsReadRulesCreationResult::TPtr& ev) { - ReadRulesCreatorId = {}; - if (ev->Get()->Issues) { - TransientIssues.AddIssues(ev->Get()->Issues); - LOG_D(TransientIssues.ToOneLineString()); - } - if (CanRunReadRulesDeletionActor()) { - RunReadRulesDeletionActor(); - } - } - - void HandleFinish(TEvents::TEvDataStreamsReadRulesDeletionResult::TPtr& ev) { - ConsumersAreDeleted = true; // Don't print extra warnings. - - if (ev->Get()->TransientIssues) { - for (const auto& issue : ev->Get()->TransientIssues) { - TransientIssues.AddIssue(issue); - } - } - - ContinueFinish(); - } - - bool NeedDeleteReadRules() const { - return !Params.CreatedTopicConsumers.empty(); - } - - bool CanRunReadRulesDeletionActor() const { - return !ReadRulesCreatorId && FinalizingStatusIsWritten && QueryResponseArrived; - } - - void RunReadRulesDeletionActor() { - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials; + ReadRulesCreatorId = {}; + if (ev->Get()->Issues) { + TransientIssues.AddIssues(ev->Get()->Issues); + LOG_D(TransientIssues.ToOneLineString()); + } + if (CanRunReadRulesDeletionActor()) { + RunReadRulesDeletionActor(); + } + } + + void HandleFinish(TEvents::TEvDataStreamsReadRulesDeletionResult::TPtr& ev) { + ConsumersAreDeleted = true; // Don't print extra warnings. + + if (ev->Get()->TransientIssues) { + for (const auto& issue : ev->Get()->TransientIssues) { + TransientIssues.AddIssue(issue); + } + } + + ContinueFinish(); + } + + bool NeedDeleteReadRules() const { + return !Params.CreatedTopicConsumers.empty(); + } + + bool CanRunReadRulesDeletionActor() const { + return !ReadRulesCreatorId && FinalizingStatusIsWritten && QueryResponseArrived; + } + + void RunReadRulesDeletionActor() { + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials; credentials.reserve(Params.CreatedTopicConsumers.size()); for (const Yq::Private::TopicConsumer& c : Params.CreatedTopicConsumers) { - if (const TString& tokenName = c.token_name()) { - credentials.emplace_back( - CreateCredentialsProviderFactoryForStructuredToken(Params.CredentialsFactory, FindTokenByName(tokenName), c.add_bearer_to_token())); - } else { - credentials.emplace_back(NYdb::CreateInsecureCredentialsProviderFactory()); - } - } - - Register( - ::NYq::MakeReadRuleDeleterActor( - SelfId(), + if (const TString& tokenName = c.token_name()) { + credentials.emplace_back( + CreateCredentialsProviderFactoryForStructuredToken(Params.CredentialsFactory, FindTokenByName(tokenName), c.add_bearer_to_token())); + } else { + credentials.emplace_back(NYdb::CreateInsecureCredentialsProviderFactory()); + } + } + + Register( + ::NYq::MakeReadRuleDeleterActor( + SelfId(), Params.QueryId, - Params.Driver, - Params.CreatedTopicConsumers, - std::move(credentials) - ) - ); - } - + Params.Driver, + Params.CreatedTopicConsumers, + std::move(credentials) + ) + ); + } + void RunDqGraphs() { if (DqGraphParams.empty()) { *QueryStateUpdateRequest.mutable_started_at() = google::protobuf::util::TimeUtil::MillisecondsToTimestamp(CreatedAt.MilliSeconds()); QueryStateUpdateRequest.set_resign_query(false); const bool isOk = Issues.Size() == 0; - Finish(GetFinishStatus(isOk)); + Finish(GetFinishStatus(isOk)); return; } @@ -884,16 +884,16 @@ private: ControlId = NActors::TActivationContext::Register(NYql::MakeTaskController(SessionId, ExecuterId, resultId, dqConfiguration, QueryCounters, TDuration::Seconds(3)).Release()); if (EnableCheckpointCoordinator) { - CheckpointCoordinatorId = NActors::TActivationContext::Register(MakeCheckpointCoordinator( - ::NYq::TCoordinatorId(Params.QueryId + "-" + ToString(DqGraphIndex), Params.PreviousQueryRevision), - ControlId, - NYql::NDq::MakeCheckpointStorageID(), - SelfId(), - Params.CheckpointCoordinatorConfig, - QueryCounters.Counters, - dqGraphParams, - Params.StateLoadMode, - Params.StreamingDisposition).Release()); + CheckpointCoordinatorId = NActors::TActivationContext::Register(MakeCheckpointCoordinator( + ::NYq::TCoordinatorId(Params.QueryId + "-" + ToString(DqGraphIndex), Params.PreviousQueryRevision), + ControlId, + NYql::NDq::MakeCheckpointStorageID(), + SelfId(), + Params.CheckpointCoordinatorConfig, + QueryCounters.Counters, + dqGraphParams, + Params.StateLoadMode, + Params.StreamingDisposition).Release()); } Yql::DqsProto::ExecuteGraphRequest request; @@ -904,8 +904,8 @@ private: *request.MutableSecureParams() = dqGraphParams.GetSecureParams(); *request.MutableColumns() = dqGraphParams.GetColumns(); NTasksPacker::UnPack(*request.MutableTask(), dqGraphParams.GetTasks(), dqGraphParams.GetStageProgram()); - NActors::TActivationContext::Send(new IEventHandle(ExecuterId, SelfId(), new NYql::NDqs::TEvGraphRequest(request, ControlId, resultId, CheckpointCoordinatorId))); - LOG_D("Executer: " << ExecuterId << ", Controller: " << ControlId << ", ResultIdActor: " << resultId << ", CheckPointCoordinatior " << CheckpointCoordinatorId); + NActors::TActivationContext::Send(new IEventHandle(ExecuterId, SelfId(), new NYql::NDqs::TEvGraphRequest(request, ControlId, resultId, CheckpointCoordinatorId))); + LOG_D("Executer: " << ExecuterId << ", Controller: " << ControlId << ", ResultIdActor: " << resultId << ", CheckPointCoordinatior " << CheckpointCoordinatorId); } void SetupDqSettings(::google::protobuf::RepeatedPtrField< ::NYql::TAttr>& dqSettings) const { @@ -959,7 +959,7 @@ private: } } - YandexQuery::QueryMeta::ComputeStatus GetFinishStatus(bool isOk) const { + YandexQuery::QueryMeta::ComputeStatus GetFinishStatus(bool isOk) const { if (isOk) { return YandexQuery::QueryMeta::COMPLETED; } @@ -979,115 +979,115 @@ private: } } - YandexQuery::QueryMeta::ComputeStatus GetFinalizingStatus() { // Status before final. "*ING" one. - switch (FinalQueryStatus) { - case YandexQuery::QueryMeta_ComputeStatus_QueryMeta_ComputeStatus_INT_MIN_SENTINEL_DO_NOT_USE_: - case YandexQuery::QueryMeta_ComputeStatus_QueryMeta_ComputeStatus_INT_MAX_SENTINEL_DO_NOT_USE_: - case YandexQuery::QueryMeta::COMPUTE_STATUS_UNSPECIFIED: - case YandexQuery::QueryMeta::STARTING: - case YandexQuery::QueryMeta::ABORTING_BY_USER: - case YandexQuery::QueryMeta::ABORTING_BY_SYSTEM: - case YandexQuery::QueryMeta::RESUMING: - case YandexQuery::QueryMeta::RUNNING: - case YandexQuery::QueryMeta::COMPLETING: - case YandexQuery::QueryMeta::FAILING: - case YandexQuery::QueryMeta::PAUSING: { - TStringBuilder msg; - msg << "\"" << YandexQuery::QueryMeta::ComputeStatus_Name(FinalQueryStatus) << "\" is not a final status for query"; - Issues.AddIssue(msg); - throw yexception() << msg; - } - - case YandexQuery::QueryMeta::ABORTED_BY_USER: - return YandexQuery::QueryMeta::ABORTING_BY_USER; - case YandexQuery::QueryMeta::ABORTED_BY_SYSTEM: - return YandexQuery::QueryMeta::ABORTING_BY_SYSTEM; - case YandexQuery::QueryMeta::COMPLETED: - return YandexQuery::QueryMeta::COMPLETING; - case YandexQuery::QueryMeta::FAILED: - return YandexQuery::QueryMeta::FAILING; - case YandexQuery::QueryMeta::PAUSED: - return YandexQuery::QueryMeta::PAUSING; - } - } - - static YandexQuery::QueryMeta::ComputeStatus GetFinalStatusFromFinalizingStatus(YandexQuery::QueryMeta::ComputeStatus status) { - switch (status) { - case YandexQuery::QueryMeta::ABORTING_BY_USER: - return YandexQuery::QueryMeta::ABORTED_BY_USER; - case YandexQuery::QueryMeta::ABORTING_BY_SYSTEM: - return YandexQuery::QueryMeta::ABORTED_BY_SYSTEM; - case YandexQuery::QueryMeta::COMPLETING: - return YandexQuery::QueryMeta::COMPLETED; - case YandexQuery::QueryMeta::FAILING: - return YandexQuery::QueryMeta::FAILED; - default: - return YandexQuery::QueryMeta::COMPUTE_STATUS_UNSPECIFIED; - } - } - - void WriteFinalizingStatus() { - const YandexQuery::QueryMeta::ComputeStatus finalizingStatus = GetFinalizingStatus(); - Params.Status = finalizingStatus; - LOG_D("Write finalizing status: " << YandexQuery::QueryMeta::ComputeStatus_Name(finalizingStatus)); - Yq::Private::PingTaskRequest request; - request.set_status(finalizingStatus); - Send(Pinger, new TEvents::TEvForwardPingRequest(request), 0, SaveFinalizingStatusCookie); - } - - void Finish(YandexQuery::QueryMeta::ComputeStatus status) { + YandexQuery::QueryMeta::ComputeStatus GetFinalizingStatus() { // Status before final. "*ING" one. + switch (FinalQueryStatus) { + case YandexQuery::QueryMeta_ComputeStatus_QueryMeta_ComputeStatus_INT_MIN_SENTINEL_DO_NOT_USE_: + case YandexQuery::QueryMeta_ComputeStatus_QueryMeta_ComputeStatus_INT_MAX_SENTINEL_DO_NOT_USE_: + case YandexQuery::QueryMeta::COMPUTE_STATUS_UNSPECIFIED: + case YandexQuery::QueryMeta::STARTING: + case YandexQuery::QueryMeta::ABORTING_BY_USER: + case YandexQuery::QueryMeta::ABORTING_BY_SYSTEM: + case YandexQuery::QueryMeta::RESUMING: + case YandexQuery::QueryMeta::RUNNING: + case YandexQuery::QueryMeta::COMPLETING: + case YandexQuery::QueryMeta::FAILING: + case YandexQuery::QueryMeta::PAUSING: { + TStringBuilder msg; + msg << "\"" << YandexQuery::QueryMeta::ComputeStatus_Name(FinalQueryStatus) << "\" is not a final status for query"; + Issues.AddIssue(msg); + throw yexception() << msg; + } + + case YandexQuery::QueryMeta::ABORTED_BY_USER: + return YandexQuery::QueryMeta::ABORTING_BY_USER; + case YandexQuery::QueryMeta::ABORTED_BY_SYSTEM: + return YandexQuery::QueryMeta::ABORTING_BY_SYSTEM; + case YandexQuery::QueryMeta::COMPLETED: + return YandexQuery::QueryMeta::COMPLETING; + case YandexQuery::QueryMeta::FAILED: + return YandexQuery::QueryMeta::FAILING; + case YandexQuery::QueryMeta::PAUSED: + return YandexQuery::QueryMeta::PAUSING; + } + } + + static YandexQuery::QueryMeta::ComputeStatus GetFinalStatusFromFinalizingStatus(YandexQuery::QueryMeta::ComputeStatus status) { + switch (status) { + case YandexQuery::QueryMeta::ABORTING_BY_USER: + return YandexQuery::QueryMeta::ABORTED_BY_USER; + case YandexQuery::QueryMeta::ABORTING_BY_SYSTEM: + return YandexQuery::QueryMeta::ABORTED_BY_SYSTEM; + case YandexQuery::QueryMeta::COMPLETING: + return YandexQuery::QueryMeta::COMPLETED; + case YandexQuery::QueryMeta::FAILING: + return YandexQuery::QueryMeta::FAILED; + default: + return YandexQuery::QueryMeta::COMPUTE_STATUS_UNSPECIFIED; + } + } + + void WriteFinalizingStatus() { + const YandexQuery::QueryMeta::ComputeStatus finalizingStatus = GetFinalizingStatus(); + Params.Status = finalizingStatus; + LOG_D("Write finalizing status: " << YandexQuery::QueryMeta::ComputeStatus_Name(finalizingStatus)); + Yq::Private::PingTaskRequest request; + request.set_status(finalizingStatus); + Send(Pinger, new TEvents::TEvForwardPingRequest(request), 0, SaveFinalizingStatusCookie); + } + + void Finish(YandexQuery::QueryMeta::ComputeStatus status) { LOG_D("Is about to finish query with status " << YandexQuery::QueryMeta::ComputeStatus_Name(status));; - Finishing = true; - RetryNeeded = false; - FinalQueryStatus = status; - QueryStateUpdateRequest.set_status(FinalQueryStatus); // Can be changed later. + Finishing = true; + RetryNeeded = false; + FinalQueryStatus = status; + QueryStateUpdateRequest.set_status(FinalQueryStatus); // Can be changed later. *QueryStateUpdateRequest.mutable_finished_at() = google::protobuf::util::TimeUtil::MillisecondsToTimestamp(TInstant::Now().MilliSeconds()); - Become(&TRunActor::StateFuncWrapper<&TRunActor::FinishStateFunc>); + Become(&TRunActor::StateFuncWrapper<&TRunActor::FinishStateFunc>); - if (!FinalizingStatusIsWritten) { - WriteFinalizingStatus(); - } + if (!FinalizingStatusIsWritten) { + WriteFinalizingStatus(); + } - CancelRunningQuery(); - ContinueFinish(); - } + CancelRunningQuery(); + ContinueFinish(); + } - void ContinueFinish() { - if (NeedDeleteReadRules() && !ConsumersAreDeleted) { - if (CanRunReadRulesDeletionActor()) { - RunReadRulesDeletionActor(); + void ContinueFinish() { + if (NeedDeleteReadRules() && !ConsumersAreDeleted) { + if (CanRunReadRulesDeletionActor()) { + RunReadRulesDeletionActor(); } - return; - } - - SendPingAndPassAway(); - } - - void ResignQuery() { - QueryStateUpdateRequest.set_resign_query(true); - SendPingAndPassAway(); - } - - void SendPingAndPassAway() { - // Run ping. - if (QueryStateUpdateRequest.resign_query()) { // Retry state => all issues are not fatal. - TransientIssues.AddIssues(Issues); - Issues.Clear(); - } - - NYql::IssuesToMessage(TransientIssues, QueryStateUpdateRequest.mutable_transient_issues()); - NYql::IssuesToMessage(Issues, QueryStateUpdateRequest.mutable_issues()); - - Send(Pinger, new TEvents::TEvForwardPingRequest(QueryStateUpdateRequest, true)); - + return; + } + + SendPingAndPassAway(); + } + + void ResignQuery() { + QueryStateUpdateRequest.set_resign_query(true); + SendPingAndPassAway(); + } + + void SendPingAndPassAway() { + // Run ping. + if (QueryStateUpdateRequest.resign_query()) { // Retry state => all issues are not fatal. + TransientIssues.AddIssues(Issues); + Issues.Clear(); + } + + NYql::IssuesToMessage(TransientIssues, QueryStateUpdateRequest.mutable_transient_issues()); + NYql::IssuesToMessage(Issues, QueryStateUpdateRequest.mutable_issues()); + + Send(Pinger, new TEvents::TEvForwardPingRequest(QueryStateUpdateRequest, true)); + PassAway(); } - void Abort(const TString& message, YandexQuery::QueryMeta::ComputeStatus status, const NYql::TIssues& issues = {}) { - AddIssueWithSubIssues(message, issues); - Finish(status); - } - + void Abort(const TString& message, YandexQuery::QueryMeta::ComputeStatus status, const NYql::TIssues& issues = {}) { + AddIssueWithSubIssues(message, issues); + Finish(status); + } + void FillDqGraphParams() { for (const auto& s : Params.DqGraphs) { NYq::NProto::TGraphParams dqGraphParams; @@ -1152,13 +1152,13 @@ private: } { - NYql::TPqGatewayServices pqServices( - Params.Driver, - Params.PqCmConnections, - Params.CredentialsFactory, - std::make_shared<NYql::TPqGatewayConfig>(gatewaysConfig.GetPq()), - Params.FunctionRegistry - ); + NYql::TPqGatewayServices pqServices( + Params.Driver, + Params.PqCmConnections, + Params.CredentialsFactory, + std::make_shared<NYql::TPqGatewayConfig>(gatewaysConfig.GetPq()), + Params.FunctionRegistry + ); const auto pqGateway = NYql::CreatePqNativeGateway(pqServices); dataProvidersInit.push_back(GetPqDataProviderInitializer(pqGateway, false, dbResolver)); } @@ -1267,61 +1267,61 @@ private: if (status == TProgram::TStatus::Ok || (DqGraphParams.size() > 0 && !DqGraphParams[0].GetResultType())) { PrepareGraphs(); } else { - Abort(TStringBuilder() << "Run query failed: " << ToString(status), YandexQuery::QueryMeta::FAILED, Program->Issues()); + Abort(TStringBuilder() << "Run query failed: " << ToString(status), YandexQuery::QueryMeta::FAILED, Program->Issues()); } } - void Handle(NActors::TEvents::TEvUndelivered::TPtr&) { + void Handle(NActors::TEvents::TEvUndelivered::TPtr&) { Fail("TRunActor::OnUndelivered"); } - TString FindTokenByName(const TString& tokenName) const { - for (auto& graphParams : DqGraphParams) { - const auto& secureParams = graphParams.GetSecureParams(); - const auto token = secureParams.find(tokenName); - if (token != secureParams.end()) { - return token->second; - } - } + TString FindTokenByName(const TString& tokenName) const { + for (auto& graphParams : DqGraphParams) { + const auto& secureParams = graphParams.GetSecureParams(); + const auto token = secureParams.find(tokenName); + if (token != secureParams.end()) { + return token->second; + } + } throw yexception() << "Token " << tokenName << " was not found in secure params"; - } - - void AddIssueWithSubIssues(const TString& message, const NYql::TIssues& issues) { - NYql::TIssue issue(message); - for (const NYql::TIssue& i : issues) { - issue.AddSubIssue(MakeIntrusive<NYql::TIssue>(i)); - } - Issues.AddIssue(std::move(issue)); - } - - void LogReceivedParams() { - LOG_D("Run actors params: { QueryId: " << Params.QueryId - << " CloudId: " << Params.CloudId - << " UserId: " << Params.UserId - << " Owner: " << Params.Owner - << " PreviousQueryRevision: " << Params.PreviousQueryRevision - << " Connections: " << Params.Connections.size() - << " Bindings: " << Params.Bindings.size() - << " AccountIdSignatures: " << Params.AccountIdSignatures.size() - << " QueryType: " << YandexQuery::QueryContent::QueryType_Name(Params.QueryType) - << " ExecuteMode: " << YandexQuery::ExecuteMode_Name(Params.ExecuteMode) - << " ResultId: " << Params.ResultId - << " StateLoadMode: " << YandexQuery::StateLoadMode_Name(Params.StateLoadMode) - << " StreamingDisposition: " << Params.StreamingDisposition - << " Status: " << YandexQuery::QueryMeta::ComputeStatus_Name(Params.Status) - << " DqGraphs: " << Params.DqGraphs.size() - << " DqGraphIndex: " << Params.DqGraphIndex - << " CreatedTopicConsumers: " << Params.CreatedTopicConsumers.size() - << " }"); - } - + } + + void AddIssueWithSubIssues(const TString& message, const NYql::TIssues& issues) { + NYql::TIssue issue(message); + for (const NYql::TIssue& i : issues) { + issue.AddSubIssue(MakeIntrusive<NYql::TIssue>(i)); + } + Issues.AddIssue(std::move(issue)); + } + + void LogReceivedParams() { + LOG_D("Run actors params: { QueryId: " << Params.QueryId + << " CloudId: " << Params.CloudId + << " UserId: " << Params.UserId + << " Owner: " << Params.Owner + << " PreviousQueryRevision: " << Params.PreviousQueryRevision + << " Connections: " << Params.Connections.size() + << " Bindings: " << Params.Bindings.size() + << " AccountIdSignatures: " << Params.AccountIdSignatures.size() + << " QueryType: " << YandexQuery::QueryContent::QueryType_Name(Params.QueryType) + << " ExecuteMode: " << YandexQuery::ExecuteMode_Name(Params.ExecuteMode) + << " ResultId: " << Params.ResultId + << " StateLoadMode: " << YandexQuery::StateLoadMode_Name(Params.StateLoadMode) + << " StreamingDisposition: " << Params.StreamingDisposition + << " Status: " << YandexQuery::QueryMeta::ComputeStatus_Name(Params.Status) + << " DqGraphs: " << Params.DqGraphs.size() + << " DqGraphIndex: " << Params.DqGraphIndex + << " CreatedTopicConsumers: " << Params.CreatedTopicConsumers.size() + << " }"); + } + private: - TRunActorParams Params; + TRunActorParams Params; THashMap<TString, YandexQuery::Connection> YqConnections; TProgramPtr Program; TIssues Issues; - TIssues TransientIssues; + TIssues TransientIssues; TQueryResult QueryResult; TInstant Deadline; TActorId Pinger; @@ -1334,37 +1334,37 @@ private: NMonitoring::TDynamicCounterPtr PublicCountersParent; NActors::TActorId ExecuterId; NActors::TActorId ControlId; - NActors::TActorId CheckpointCoordinatorId; + NActors::TActorId CheckpointCoordinatorId; TString SessionId; ::NYq::NCommon::TServiceCounters ServiceCounters; ::NYq::NCommon::TServiceCounters QueryCounters; bool EnableCheckpointCoordinator = false; bool RetryNeeded = false; Yq::Private::PingTaskRequest QueryStateUpdateRequest; - THashMap<TString, YandexQuery::Connection> Connections; // Necessary for DbAsyncResolver - + THashMap<TString, YandexQuery::Connection> Connections; // Necessary for DbAsyncResolver + const ui64 MaxTasksPerOperation = 100; - // Consumers creation - TVector<NYql::NPq::NProto::TDqPqTopicSource> TopicsForConsumersCreation; - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> CredentialsForConsumersCreation; + // Consumers creation + TVector<NYql::NPq::NProto::TDqPqTopicSource> TopicsForConsumersCreation; + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> CredentialsForConsumersCreation; TVector<TString> Statistics; - NActors::TActorId ReadRulesCreatorId; - - // Finish - bool Finishing = false; - bool ConsumersAreDeleted = false; - bool FinalizingStatusIsWritten = false; - bool QueryResponseArrived = false; - YandexQuery::QueryMeta::ComputeStatus FinalQueryStatus = YandexQuery::QueryMeta::COMPUTE_STATUS_UNSPECIFIED; // Status that will be assigned to query after it finishes. - - // Cookies for pings - enum : ui64 { + NActors::TActorId ReadRulesCreatorId; + + // Finish + bool Finishing = false; + bool ConsumersAreDeleted = false; + bool FinalizingStatusIsWritten = false; + bool QueryResponseArrived = false; + YandexQuery::QueryMeta::ComputeStatus FinalQueryStatus = YandexQuery::QueryMeta::COMPUTE_STATUS_UNSPECIFIED; // Status that will be assigned to query after it finishes. + + // Cookies for pings + enum : ui64 { SaveQueryInfoCookie = 1, - UpdateQueryInfoCookie, - SaveFinalizingStatusCookie, - SetLoadFromCheckpointModeCookie, - }; + UpdateQueryInfoCookie, + SaveFinalizingStatusCookie, + SetLoadFromCheckpointModeCookie, + }; }; diff --git a/ydb/core/yq/libs/actors/run_actor_params.cpp b/ydb/core/yq/libs/actors/run_actor_params.cpp index 1446cae1bbe..538e9242ced 100644 --- a/ydb/core/yq/libs/actors/run_actor_params.cpp +++ b/ydb/core/yq/libs/actors/run_actor_params.cpp @@ -12,9 +12,9 @@ TRunActorParams::TRunActorParams( NYql::IModuleResolver::TPtr& moduleResolver, ui64 nextUniqueId, NKikimr::NMiniKQL::TComputationNodeFactory dqCompFactory, - ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, + ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, const ::NYq::NConfig::TCommonConfig& commonConfig, - const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, + const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, const ::NYq::NConfig::TPrivateApiConfig& privateApiConfig, const ::NYq::NConfig::TGatewaysConfig& gatewaysConfig, const ::NYq::NConfig::TPingerConfig& pingerConfig, @@ -34,12 +34,12 @@ TRunActorParams::TRunActorParams( YandexQuery::ExecuteMode executeMode, const TString& resultId, const YandexQuery::StateLoadMode stateLoadMode, - const YandexQuery::StreamingDisposition& streamingDisposition, + const YandexQuery::StreamingDisposition& streamingDisposition, YandexQuery::QueryMeta::ComputeStatus status, const TString& cloudId, TVector<YandexQuery::ResultSetMeta> resultSetMetas, TVector<TString> dqGraphs, - int32_t dqGraphIndex, + int32_t dqGraphIndex, TVector<Yq::Private::TopicConsumer> createdTopicConsumers, bool automatic, const TString& queryName, @@ -53,9 +53,9 @@ TRunActorParams::TRunActorParams( , ModuleResolver(moduleResolver) , NextUniqueId(nextUniqueId) , DqCompFactory(dqCompFactory) - , PqCmConnections(std::move(pqCmConnections)) + , PqCmConnections(std::move(pqCmConnections)) , CommonConfig(commonConfig) - , CheckpointCoordinatorConfig(checkpointCoordinatorConfig) + , CheckpointCoordinatorConfig(checkpointCoordinatorConfig) , PrivateApiConfig(privateApiConfig) , GatewaysConfig(gatewaysConfig) , PingerConfig(pingerConfig) @@ -75,13 +75,13 @@ TRunActorParams::TRunActorParams( , ExecuteMode(executeMode) , ResultId(resultId) , StateLoadMode(stateLoadMode) - , StreamingDisposition(streamingDisposition) + , StreamingDisposition(streamingDisposition) , Status(status) , CloudId(cloudId) , ResultSetMetas(std::move(resultSetMetas)) , DqGraphs(std::move(dqGraphs)) , DqGraphIndex(dqGraphIndex) - , CreatedTopicConsumers(std::move(createdTopicConsumers)) + , CreatedTopicConsumers(std::move(createdTopicConsumers)) , Automatic(automatic) , QueryName(queryName) , Deadline(deadline) diff --git a/ydb/core/yq/libs/actors/run_actor_params.h b/ydb/core/yq/libs/actors/run_actor_params.h index bad5c3c32b4..970254a0df8 100644 --- a/ydb/core/yq/libs/actors/run_actor_params.h +++ b/ydb/core/yq/libs/actors/run_actor_params.h @@ -3,7 +3,7 @@ #include <ydb/core/yq/libs/config/protos/pinger.pb.h> #include <ydb/core/yq/libs/config/protos/yq_config.pb.h> #include <ydb/core/yq/libs/events/events.h> - + #include <ydb/library/yql/providers/common/token_accessor/client/factory.h> #include <ydb/library/yql/minikql/computation/mkql_computation_node.h> #include <ydb/library/yql/providers/dq/provider/yql_dq_gateway.h> @@ -11,10 +11,10 @@ #include <ydb/library/yql/providers/solomon/provider/yql_solomon_gateway.h> #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> -#include <library/cpp/actors/core/actorsystem.h> -#include <library/cpp/time_provider/time_provider.h> -#include <library/cpp/random_provider/random_provider.h> - +#include <library/cpp/actors/core/actorsystem.h> +#include <library/cpp/time_provider/time_provider.h> +#include <library/cpp/random_provider/random_provider.h> + namespace NYq { struct TRunActorParams { // TODO2 : Change name @@ -26,9 +26,9 @@ struct TRunActorParams { // TODO2 : Change name NYql::IModuleResolver::TPtr& moduleResolver, ui64 nextUniqueId, NKikimr::NMiniKQL::TComputationNodeFactory dqCompFactory, - ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, + ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, const ::NYq::NConfig::TCommonConfig& commonConfig, - const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, + const ::NYq::NConfig::TCheckpointCoordinatorConfig& checkpointCoordinatorConfig, const ::NYq::NConfig::TPrivateApiConfig& privateApiConfig, const ::NYq::NConfig::TGatewaysConfig& gatewaysConfig, const ::NYq::NConfig::TPingerConfig& pingerConfig, @@ -48,12 +48,12 @@ struct TRunActorParams { // TODO2 : Change name YandexQuery::ExecuteMode executeMode, const TString& resultId, const YandexQuery::StateLoadMode stateLoadMode, - const YandexQuery::StreamingDisposition& streamingDisposition, + const YandexQuery::StreamingDisposition& streamingDisposition, YandexQuery::QueryMeta::ComputeStatus status, const TString& cloudId, TVector<YandexQuery::ResultSetMeta> resultSetMetas, - TVector<TString> dqGraphs, - int32_t dqGraphIndex, + TVector<TString> dqGraphs, + int32_t dqGraphIndex, TVector<Yq::Private::TopicConsumer> createdTopicConsumers, bool automatic, const TString& queryName, @@ -72,9 +72,9 @@ struct TRunActorParams { // TODO2 : Change name ui64 NextUniqueId; NKikimr::NMiniKQL::TComputationNodeFactory DqCompFactory; - ::NPq::NConfigurationManager::IConnections::TPtr PqCmConnections; + ::NPq::NConfigurationManager::IConnections::TPtr PqCmConnections; const ::NYq::NConfig::TCommonConfig CommonConfig; - const ::NYq::NConfig::TCheckpointCoordinatorConfig CheckpointCoordinatorConfig; + const ::NYq::NConfig::TCheckpointCoordinatorConfig CheckpointCoordinatorConfig; const ::NYq::NConfig::TPrivateApiConfig PrivateApiConfig; const ::NYq::NConfig::TGatewaysConfig GatewaysConfig; const ::NYq::NConfig::TPingerConfig PingerConfig; @@ -94,8 +94,8 @@ struct TRunActorParams { // TODO2 : Change name const YandexQuery::ExecuteMode ExecuteMode; const TString ResultId; const YandexQuery::StateLoadMode StateLoadMode; - const YandexQuery::StreamingDisposition StreamingDisposition; - YandexQuery::QueryMeta::ComputeStatus Status; + const YandexQuery::StreamingDisposition StreamingDisposition; + YandexQuery::QueryMeta::ComputeStatus Status; const TString CloudId; const TVector<YandexQuery::ResultSetMeta> ResultSetMetas; const TVector<TString> DqGraphs; diff --git a/ydb/core/yq/libs/actors/task_get.cpp b/ydb/core/yq/libs/actors/task_get.cpp index d31517aa9fe..e7505f30b02 100644 --- a/ydb/core/yq/libs/actors/task_get.cpp +++ b/ydb/core/yq/libs/actors/task_get.cpp @@ -18,10 +18,10 @@ #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <ydb/library/security/util.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateGetTask - Owner: " << OwnerId << ", " << "Host: " << Host << ", "<< stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateGetTask - Owner: " << OwnerId << ", " << "Host: " << Host << ", " << stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateGetTask - Owner: " << OwnerId << ", " << "Host: " << Host << ", "<< stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateGetTask - Owner: " << OwnerId << ", " << "Host: " << Host << ", " << stream) namespace NYq { @@ -162,8 +162,8 @@ private: newTask->mutable_sensor_labels()->insert({"cloud_id", task.Internal.cloud_id()}); newTask->set_automatic(task.Query.content().automatic()); newTask->set_query_name(task.Query.content().name()); - *newTask->mutable_deadline() = NProtoInterop::CastToProto(task.Deadline); - newTask->mutable_disposition()->CopyFrom(task.Internal.disposition()); + *newTask->mutable_deadline() = NProtoInterop::CastToProto(task.Deadline); + newTask->mutable_disposition()->CopyFrom(task.Internal.disposition()); THashMap<TString, TString> accountIdSignatures; for (const auto& connection: task.Internal.connection()) { @@ -212,7 +212,7 @@ private: const THistogramPtr RequestedMBytes; const TInstant StartTime; - ::NYq::TSigner::TPtr Signer; + ::NYq::TSigner::TPtr Signer; NYql::TIssues Issues; TString OwnerId; diff --git a/ydb/core/yq/libs/actors/task_ping.cpp b/ydb/core/yq/libs/actors/task_ping.cpp index 29410dccd14..8e1e5895bec 100644 --- a/ydb/core/yq/libs/actors/task_ping.cpp +++ b/ydb/core/yq/libs/actors/task_ping.cpp @@ -15,10 +15,10 @@ #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <google/protobuf/util/time_util.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivatePingTask - QueryId: " << OperationId << ", Owner: " << OwnerId << ", " << stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivatePingTask - QueryId: " << OperationId << ", Owner: " << OwnerId << ", "<< stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivatePingTask - QueryId: " << OperationId << ", Owner: " << OwnerId << ", " << stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivatePingTask - QueryId: " << OperationId << ", Owner: " << OwnerId << ", "<< stream) namespace NYq { @@ -153,19 +153,19 @@ private: event->FinishedAt = TInstant::FromValue(google::protobuf::util::TimeUtil::TimestampToMicroseconds(req.finished_at())); } event->ResignQuery = req.resign_query(); - - event->CreatedTopicConsumers.reserve(req.created_topic_consumers_size()); - for (const auto& topicConsumerProto : req.created_topic_consumers()) { - auto& topicConsumer = event->CreatedTopicConsumers.emplace_back(); - topicConsumer.DatabaseId = topicConsumerProto.database_id(); - topicConsumer.Database = topicConsumerProto.database(); - topicConsumer.TopicPath = topicConsumerProto.topic_path(); - topicConsumer.ConsumerName = topicConsumerProto.consumer_name(); - topicConsumer.ClusterEndpoint = topicConsumerProto.cluster_endpoint(); - topicConsumer.UseSsl = topicConsumerProto.use_ssl(); - topicConsumer.TokenName = topicConsumerProto.token_name(); - topicConsumer.AddBearerToToken = topicConsumerProto.add_bearer_to_token(); - } + + event->CreatedTopicConsumers.reserve(req.created_topic_consumers_size()); + for (const auto& topicConsumerProto : req.created_topic_consumers()) { + auto& topicConsumer = event->CreatedTopicConsumers.emplace_back(); + topicConsumer.DatabaseId = topicConsumerProto.database_id(); + topicConsumer.Database = topicConsumerProto.database(); + topicConsumer.TopicPath = topicConsumerProto.topic_path(); + topicConsumer.ConsumerName = topicConsumerProto.consumer_name(); + topicConsumer.ClusterEndpoint = topicConsumerProto.cluster_endpoint(); + topicConsumer.UseSsl = topicConsumerProto.use_ssl(); + topicConsumer.TokenName = topicConsumerProto.token_name(); + topicConsumer.AddBearerToToken = topicConsumerProto.add_bearer_to_token(); + } event->DqGraphs.reserve(req.dq_graph_size()); for (const auto& g : req.dq_graph()) { @@ -173,15 +173,15 @@ private: event->DqGraphs.emplace_back(g); } - if (req.state_load_mode()) { - event->StateLoadMode = req.state_load_mode(); - } - - if (req.has_disposition()) { - event->StreamingDisposition = req.disposition(); - } - + if (req.state_load_mode()) { + event->StateLoadMode = req.state_load_mode(); + } + if (req.has_disposition()) { + event->StreamingDisposition = req.disposition(); + } + + LOG_D("Statistics length: " << req.statistics().size() << ", " << "Ast length: " << req.ast().size() << " bytes, " << "Plan length: " << req.plan().size() << " bytes, " diff --git a/ydb/core/yq/libs/actors/task_result_write.cpp b/ydb/core/yq/libs/actors/task_result_write.cpp index 5919ddbaaba..f9098749c88 100644 --- a/ydb/core/yq/libs/actors/task_result_write.cpp +++ b/ydb/core/yq/libs/actors/task_result_write.cpp @@ -14,10 +14,10 @@ #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <ydb/core/yq/libs/control_plane_storage/control_plane_storage.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateWriteTask - ResultId: " << ResultId << ", RequestId: " << RequestId << ", " << stream) -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateWriteTask - ResultId: " << ResultId << ", RequestId: " << RequestId << ", " << stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateWriteTask - ResultId: " << ResultId << ", RequestId: " << RequestId << ", " << stream) +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::YQL_PRIVATE_PROXY, "PrivateWriteTask - ResultId: " << ResultId << ", RequestId: " << RequestId << ", " << stream) namespace NYq { diff --git a/ydb/core/yq/libs/actors/ya.make b/ydb/core/yq/libs/actors/ya.make index 36385938142..d268c418fda 100644 --- a/ydb/core/yq/libs/actors/ya.make +++ b/ydb/core/yq/libs/actors/ya.make @@ -33,12 +33,12 @@ PEERDIR( library/cpp/yson/node ydb/core/base ydb/core/protos - ydb/core/yq/libs/actors/logging - ydb/core/yq/libs/checkpointing - ydb/core/yq/libs/checkpointing_common + ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/checkpointing + ydb/core/yq/libs/checkpointing_common ydb/core/yq/libs/common ydb/core/yq/libs/control_plane_storage - ydb/core/yq/libs/control_plane_storage/events + ydb/core/yq/libs/control_plane_storage/events ydb/core/yq/libs/db_resolver ydb/core/yq/libs/db_schema ydb/core/yq/libs/events @@ -84,7 +84,7 @@ PEERDIR( YQL_LAST_ABI_VERSION() END() - -RECURSE( - logging -) + +RECURSE( + logging +) diff --git a/ydb/core/yq/libs/audit/events/events.h b/ydb/core/yq/libs/audit/events/events.h index 15026dc082a..362f119c19f 100644 --- a/ydb/core/yq/libs/audit/events/events.h +++ b/ydb/core/yq/libs/audit/events/events.h @@ -28,7 +28,7 @@ struct TEvAuditService { // Event ids. enum EEv : ui32 { - EvCreateBindingReport = YqEventSubspaceBegin(NYq::TYqEventSubspace::AuditService), + EvCreateBindingReport = YqEventSubspaceBegin(NYq::TYqEventSubspace::AuditService), EvModifyBindingReport, EvDeleteBindingReport, EvCreateConnectionReport, @@ -41,7 +41,7 @@ struct TEvAuditService { EvEnd, }; - static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::AuditService), "All events must be in their subspace"); + static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::AuditService), "All events must be in their subspace"); private: template <class TRequest, class TAuditDetailsObj> diff --git a/ydb/core/yq/libs/audit/events/ya.make b/ydb/core/yq/libs/audit/events/ya.make index cb36076615b..28bf12561f3 100644 --- a/ydb/core/yq/libs/audit/events/ya.make +++ b/ydb/core/yq/libs/audit/events/ya.make @@ -9,7 +9,7 @@ SRCS( PEERDIR( library/cpp/actors/core library/cpp/actors/interconnect - ydb/core/yq/libs/checkpointing_common + ydb/core/yq/libs/checkpointing_common ydb/core/yq/libs/control_plane_storage/events ydb/public/api/protos ydb/library/yql/public/issue diff --git a/ydb/core/yq/libs/audit/mock/yq_mock_audit_service.cpp b/ydb/core/yq/libs/audit/mock/yq_mock_audit_service.cpp index 74f994e0df9..d7c6e298b38 100644 --- a/ydb/core/yq/libs/audit/mock/yq_mock_audit_service.cpp +++ b/ydb/core/yq/libs/audit/mock/yq_mock_audit_service.cpp @@ -10,8 +10,8 @@ class TYqMockAuditServiceActor : public NActors::TActor<TYqMockAuditServiceActor public: TYqMockAuditServiceActor() : TActor<TYqMockAuditServiceActor>(&TYqMockAuditServiceActor::StateFunc) {} - static constexpr char ActorName[] = "YQ_MOCK_AUDIT_SERVICE"; - + static constexpr char ActorName[] = "YQ_MOCK_AUDIT_SERVICE"; + private: STRICT_STFUNC(StateFunc, hFunc(TEvAuditService::CreateBindingAuditReport, Handle); diff --git a/ydb/core/yq/libs/checkpoint_storage/checkpoint_storage.h b/ydb/core/yq/libs/checkpoint_storage/checkpoint_storage.h index 74923fc8169..9fe588fbc40 100644 --- a/ydb/core/yq/libs/checkpoint_storage/checkpoint_storage.h +++ b/ydb/core/yq/libs/checkpoint_storage/checkpoint_storage.h @@ -1,7 +1,7 @@ #pragma once #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/checkpoint_storage/proto/graph_description.pb.h> +#include <ydb/core/yq/libs/checkpoint_storage/proto/graph_description.pb.h> #include <ydb/library/yql/public/issue/yql_issue.h> @@ -9,7 +9,7 @@ #include <util/generic/ptr.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// @@ -19,7 +19,7 @@ public: using TGetCoordinatorsResult = std::pair<TCoordinators, NYql::TIssues>; using TAddToStateSizeResult = std::pair<ui64, NYql::TIssues>; using TGetTotalCheckpointsStateSizeResult = std::pair<ui64, NYql::TIssues>; - using TCreateCheckpointResult = std::pair<TString, NYql::TIssues>; // graphDescId for subsequent usage. + using TCreateCheckpointResult = std::pair<TString, NYql::TIssues>; // graphDescId for subsequent usage. virtual NThreading::TFuture<NYql::TIssues> Init() = 0; @@ -28,18 +28,18 @@ public: virtual NThreading::TFuture<TGetCoordinatorsResult> GetCoordinators() = 0; - virtual NThreading::TFuture<TCreateCheckpointResult> CreateCheckpoint( + virtual NThreading::TFuture<TCreateCheckpointResult> CreateCheckpoint( const TCoordinatorId& coordinator, const TCheckpointId& checkpointId, - const TString& graphDescId, - ECheckpointStatus status) = 0; - - virtual NThreading::TFuture<TCreateCheckpointResult> CreateCheckpoint( - const TCoordinatorId& coordinator, - const TCheckpointId& checkpointId, - const NProto::TCheckpointGraphDescription& graphDesc, + const TString& graphDescId, ECheckpointStatus status) = 0; + virtual NThreading::TFuture<TCreateCheckpointResult> CreateCheckpoint( + const TCoordinatorId& coordinator, + const TCheckpointId& checkpointId, + const NProto::TCheckpointGraphDescription& graphDesc, + ECheckpointStatus status) = 0; + virtual NThreading::TFuture<NYql::TIssues> UpdateCheckpointStatus( const TCoordinatorId& coordinator, const TCheckpointId& checkpointId, @@ -52,7 +52,7 @@ public: virtual NThreading::TFuture<TGetCheckpointsResult> GetCheckpoints(const TString& graph) = 0; virtual NThreading::TFuture<TGetCheckpointsResult> GetCheckpoints( - const TString& graph, const TVector<ECheckpointStatus>& statuses, ui64 limit, bool loadGraphDescription = false) = 0; + const TString& graph, const TVector<ECheckpointStatus>& statuses, ui64 limit, bool loadGraphDescription = false) = 0; // GC interface // Note that no coordinator check required @@ -81,4 +81,4 @@ public: using TCheckpointStoragePtr = TIntrusivePtr<ICheckpointStorage>; -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/events/events.cpp b/ydb/core/yq/libs/checkpoint_storage/events/events.cpp index 6c3d2603e7e..a4930159cf2 100644 --- a/ydb/core/yq/libs/checkpoint_storage/events/events.cpp +++ b/ydb/core/yq/libs/checkpoint_storage/events/events.cpp @@ -1 +1 @@ -#include "events.h" +#include "events.h" diff --git a/ydb/core/yq/libs/checkpoint_storage/events/events.h b/ydb/core/yq/libs/checkpoint_storage/events/events.h index 2641f85bae2..d1269d4334d 100644 --- a/ydb/core/yq/libs/checkpoint_storage/events/events.h +++ b/ydb/core/yq/libs/checkpoint_storage/events/events.h @@ -1,200 +1,200 @@ -#pragma once +#pragma once #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/events/event_subspace.h> -#include <ydb/core/yq/libs/checkpoint_storage/proto/graph_description.pb.h> - +#include <ydb/core/yq/libs/events/event_subspace.h> +#include <ydb/core/yq/libs/checkpoint_storage/proto/graph_description.pb.h> + #include <ydb/library/yql/public/issue/yql_issue.h> - -#include <library/cpp/actors/core/events.h> -#include <library/cpp/actors/core/event_pb.h> -#include <library/cpp/actors/interconnect/events_local.h> - -namespace NYq { - -struct TEvCheckpointStorage { - // Event ids. - enum EEv : ui32 { - EvRegisterCoordinatorRequest = YqEventSubspaceBegin(TYqEventSubspace::CheckpointStorage), - EvRegisterCoordinatorResponse, - EvCreateCheckpointRequest, - EvCreateCheckpointResponse, - EvSetCheckpointStatusPendingCommitRequest, - EvSetCheckpointStatusPendingCommitResponse, - EvCompleteCheckpointRequest, - EvCompleteCheckpointResponse, - EvAbortCheckpointRequest, - EvAbortCheckpointResponse, - EvGetCheckpointsMetadataRequest, - EvGetCheckpointsMetadataResponse, - - // Internal Storage events. + +#include <library/cpp/actors/core/events.h> +#include <library/cpp/actors/core/event_pb.h> +#include <library/cpp/actors/interconnect/events_local.h> + +namespace NYq { + +struct TEvCheckpointStorage { + // Event ids. + enum EEv : ui32 { + EvRegisterCoordinatorRequest = YqEventSubspaceBegin(TYqEventSubspace::CheckpointStorage), + EvRegisterCoordinatorResponse, + EvCreateCheckpointRequest, + EvCreateCheckpointResponse, + EvSetCheckpointStatusPendingCommitRequest, + EvSetCheckpointStatusPendingCommitResponse, + EvCompleteCheckpointRequest, + EvCompleteCheckpointResponse, + EvAbortCheckpointRequest, + EvAbortCheckpointResponse, + EvGetCheckpointsMetadataRequest, + EvGetCheckpointsMetadataResponse, + + // Internal Storage events. EvNewCheckpointSucceeded, - - EvEnd, - }; - - static_assert(EvEnd <= YqEventSubspaceEnd(TYqEventSubspace::CheckpointStorage), "All events must be in their subspace"); - - // Events. - - struct TEvRegisterCoordinatorRequest - : NActors::TEventLocal<TEvRegisterCoordinatorRequest, EvRegisterCoordinatorRequest> { - explicit TEvRegisterCoordinatorRequest(TCoordinatorId coordinatorId) - : CoordinatorId(std::move(coordinatorId)) { - } - - TCoordinatorId CoordinatorId; - }; - - struct TEvRegisterCoordinatorResponse - : NActors::TEventLocal<TEvRegisterCoordinatorResponse, EvRegisterCoordinatorResponse> { - TEvRegisterCoordinatorResponse() = default; - - explicit TEvRegisterCoordinatorResponse(NYql::TIssues issues) - : Issues(std::move(issues)) { - } - - NYql::TIssues Issues; - }; - - struct TEvCreateCheckpointRequest : NActors::TEventLocal<TEvCreateCheckpointRequest, EvCreateCheckpointRequest> { - TEvCreateCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId, ui64 nodeCount, const NProto::TCheckpointGraphDescription& graphDesc) - : CoordinatorId(std::move(coordinatorId)) - , CheckpointId(std::move(checkpointId)) - , NodeCount(nodeCount) - , GraphDescription(graphDesc) { - } - - TEvCreateCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId, ui64 nodeCount, const TString& graphDescId) - : CoordinatorId(std::move(coordinatorId)) - , CheckpointId(std::move(checkpointId)) - , NodeCount(nodeCount) - , GraphDescription(graphDescId) { - } - - TCoordinatorId CoordinatorId; - TCheckpointId CheckpointId; - ui64 NodeCount; - std::variant<TString, NProto::TCheckpointGraphDescription> GraphDescription; - }; - - struct TEvCreateCheckpointResponse : NActors::TEventLocal<TEvCreateCheckpointResponse, EvCreateCheckpointResponse> { - TEvCreateCheckpointResponse(TCheckpointId checkpointId, NYql::TIssues issues, TString graphDescId) - : CheckpointId(std::move(checkpointId)) - , Issues(std::move(issues)) - , GraphDescId(std::move(graphDescId)) { - } - - TCheckpointId CheckpointId; - NYql::TIssues Issues; - TString GraphDescId; - }; - - struct TEvSetCheckpointPendingCommitStatusRequest - : NActors::TEventLocal<TEvSetCheckpointPendingCommitStatusRequest, EvSetCheckpointStatusPendingCommitRequest> { - TEvSetCheckpointPendingCommitStatusRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId) - : CoordinatorId(std::move(coordinatorId)) - , CheckpointId(std::move(checkpointId)) { - } - - TCoordinatorId CoordinatorId; - TCheckpointId CheckpointId; - }; - - struct TEvSetCheckpointPendingCommitStatusResponse - : NActors::TEventLocal<TEvSetCheckpointPendingCommitStatusResponse, EvSetCheckpointStatusPendingCommitResponse> { - TEvSetCheckpointPendingCommitStatusResponse(TCheckpointId checkpointId, NYql::TIssues issues) - : CheckpointId(std::move(checkpointId)) - , Issues(std::move(issues)) { - } - - TCheckpointId CheckpointId; - NYql::TIssues Issues; - }; - - struct TEvCompleteCheckpointRequest - : NActors::TEventLocal<TEvCompleteCheckpointRequest, EvCompleteCheckpointRequest> { - TEvCompleteCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId) - : CoordinatorId(std::move(coordinatorId)) - , CheckpointId(std::move(checkpointId)) { - } - - TCoordinatorId CoordinatorId; - TCheckpointId CheckpointId; - }; - - struct TEvCompleteCheckpointResponse - : NActors::TEventLocal<TEvCompleteCheckpointResponse, EvCompleteCheckpointResponse> { - TEvCompleteCheckpointResponse(TCheckpointId checkpointId, NYql::TIssues issues) - : CheckpointId(std::move(checkpointId)) - , Issues(std::move(issues)) { - } - - TCheckpointId CheckpointId; - NYql::TIssues Issues; - }; - - struct TEvAbortCheckpointRequest - : NActors::TEventLocal<TEvAbortCheckpointRequest, EvAbortCheckpointRequest> { - TEvAbortCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId, TString reason) - : CoordinatorId(std::move(coordinatorId)) - , CheckpointId(std::move(checkpointId)) - , Reason(std::move(reason)) { - } - - TCoordinatorId CoordinatorId; - TCheckpointId CheckpointId; - TString Reason; - }; - - struct TEvAbortCheckpointResponse - : NActors::TEventLocal<TEvAbortCheckpointResponse, EvAbortCheckpointResponse> { - TEvAbortCheckpointResponse(TCheckpointId checkpointId, NYql::TIssues issues) - : CheckpointId(std::move(checkpointId)) - , Issues(std::move(issues)) { - } - - TCheckpointId CheckpointId; - NYql::TIssues Issues; - }; - - struct TEvGetCheckpointsMetadataRequest - : NActors::TEventLocal<TEvGetCheckpointsMetadataRequest, EvGetCheckpointsMetadataRequest> { - explicit TEvGetCheckpointsMetadataRequest(TString graphId, TVector<ECheckpointStatus> statuses = TVector<ECheckpointStatus>(), ui64 limit = std::numeric_limits<ui64>::max(), bool loadGraphDescription = false) + + EvEnd, + }; + + static_assert(EvEnd <= YqEventSubspaceEnd(TYqEventSubspace::CheckpointStorage), "All events must be in their subspace"); + + // Events. + + struct TEvRegisterCoordinatorRequest + : NActors::TEventLocal<TEvRegisterCoordinatorRequest, EvRegisterCoordinatorRequest> { + explicit TEvRegisterCoordinatorRequest(TCoordinatorId coordinatorId) + : CoordinatorId(std::move(coordinatorId)) { + } + + TCoordinatorId CoordinatorId; + }; + + struct TEvRegisterCoordinatorResponse + : NActors::TEventLocal<TEvRegisterCoordinatorResponse, EvRegisterCoordinatorResponse> { + TEvRegisterCoordinatorResponse() = default; + + explicit TEvRegisterCoordinatorResponse(NYql::TIssues issues) + : Issues(std::move(issues)) { + } + + NYql::TIssues Issues; + }; + + struct TEvCreateCheckpointRequest : NActors::TEventLocal<TEvCreateCheckpointRequest, EvCreateCheckpointRequest> { + TEvCreateCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId, ui64 nodeCount, const NProto::TCheckpointGraphDescription& graphDesc) + : CoordinatorId(std::move(coordinatorId)) + , CheckpointId(std::move(checkpointId)) + , NodeCount(nodeCount) + , GraphDescription(graphDesc) { + } + + TEvCreateCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId, ui64 nodeCount, const TString& graphDescId) + : CoordinatorId(std::move(coordinatorId)) + , CheckpointId(std::move(checkpointId)) + , NodeCount(nodeCount) + , GraphDescription(graphDescId) { + } + + TCoordinatorId CoordinatorId; + TCheckpointId CheckpointId; + ui64 NodeCount; + std::variant<TString, NProto::TCheckpointGraphDescription> GraphDescription; + }; + + struct TEvCreateCheckpointResponse : NActors::TEventLocal<TEvCreateCheckpointResponse, EvCreateCheckpointResponse> { + TEvCreateCheckpointResponse(TCheckpointId checkpointId, NYql::TIssues issues, TString graphDescId) + : CheckpointId(std::move(checkpointId)) + , Issues(std::move(issues)) + , GraphDescId(std::move(graphDescId)) { + } + + TCheckpointId CheckpointId; + NYql::TIssues Issues; + TString GraphDescId; + }; + + struct TEvSetCheckpointPendingCommitStatusRequest + : NActors::TEventLocal<TEvSetCheckpointPendingCommitStatusRequest, EvSetCheckpointStatusPendingCommitRequest> { + TEvSetCheckpointPendingCommitStatusRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId) + : CoordinatorId(std::move(coordinatorId)) + , CheckpointId(std::move(checkpointId)) { + } + + TCoordinatorId CoordinatorId; + TCheckpointId CheckpointId; + }; + + struct TEvSetCheckpointPendingCommitStatusResponse + : NActors::TEventLocal<TEvSetCheckpointPendingCommitStatusResponse, EvSetCheckpointStatusPendingCommitResponse> { + TEvSetCheckpointPendingCommitStatusResponse(TCheckpointId checkpointId, NYql::TIssues issues) + : CheckpointId(std::move(checkpointId)) + , Issues(std::move(issues)) { + } + + TCheckpointId CheckpointId; + NYql::TIssues Issues; + }; + + struct TEvCompleteCheckpointRequest + : NActors::TEventLocal<TEvCompleteCheckpointRequest, EvCompleteCheckpointRequest> { + TEvCompleteCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId) + : CoordinatorId(std::move(coordinatorId)) + , CheckpointId(std::move(checkpointId)) { + } + + TCoordinatorId CoordinatorId; + TCheckpointId CheckpointId; + }; + + struct TEvCompleteCheckpointResponse + : NActors::TEventLocal<TEvCompleteCheckpointResponse, EvCompleteCheckpointResponse> { + TEvCompleteCheckpointResponse(TCheckpointId checkpointId, NYql::TIssues issues) + : CheckpointId(std::move(checkpointId)) + , Issues(std::move(issues)) { + } + + TCheckpointId CheckpointId; + NYql::TIssues Issues; + }; + + struct TEvAbortCheckpointRequest + : NActors::TEventLocal<TEvAbortCheckpointRequest, EvAbortCheckpointRequest> { + TEvAbortCheckpointRequest(TCoordinatorId coordinatorId, TCheckpointId checkpointId, TString reason) + : CoordinatorId(std::move(coordinatorId)) + , CheckpointId(std::move(checkpointId)) + , Reason(std::move(reason)) { + } + + TCoordinatorId CoordinatorId; + TCheckpointId CheckpointId; + TString Reason; + }; + + struct TEvAbortCheckpointResponse + : NActors::TEventLocal<TEvAbortCheckpointResponse, EvAbortCheckpointResponse> { + TEvAbortCheckpointResponse(TCheckpointId checkpointId, NYql::TIssues issues) + : CheckpointId(std::move(checkpointId)) + , Issues(std::move(issues)) { + } + + TCheckpointId CheckpointId; + NYql::TIssues Issues; + }; + + struct TEvGetCheckpointsMetadataRequest + : NActors::TEventLocal<TEvGetCheckpointsMetadataRequest, EvGetCheckpointsMetadataRequest> { + explicit TEvGetCheckpointsMetadataRequest(TString graphId, TVector<ECheckpointStatus> statuses = TVector<ECheckpointStatus>(), ui64 limit = std::numeric_limits<ui64>::max(), bool loadGraphDescription = false) : GraphId(std::move(graphId)) , Statuses(std::move(statuses)) - , Limit(limit) - , LoadGraphDescription(loadGraphDescription) { - } - - TString GraphId; + , Limit(limit) + , LoadGraphDescription(loadGraphDescription) { + } + + TString GraphId; TVector<ECheckpointStatus> Statuses; ui64 Limit; - bool LoadGraphDescription = false; - }; - - struct TEvGetCheckpointsMetadataResponse - : NActors::TEventLocal<TEvGetCheckpointsMetadataResponse, EvGetCheckpointsMetadataResponse> { - TEvGetCheckpointsMetadataResponse(TVector<TCheckpointMetadata> checkpoints, NYql::TIssues issues) - : Checkpoints(std::move(checkpoints)) - , Issues(std::move(issues)) { - } - - TCheckpoints Checkpoints; - NYql::TIssues Issues; - }; - - // note that no response exists + bool LoadGraphDescription = false; + }; + + struct TEvGetCheckpointsMetadataResponse + : NActors::TEventLocal<TEvGetCheckpointsMetadataResponse, EvGetCheckpointsMetadataResponse> { + TEvGetCheckpointsMetadataResponse(TVector<TCheckpointMetadata> checkpoints, NYql::TIssues issues) + : Checkpoints(std::move(checkpoints)) + , Issues(std::move(issues)) { + } + + TCheckpoints Checkpoints; + NYql::TIssues Issues; + }; + + // note that no response exists struct TEvNewCheckpointSucceeded : NActors::TEventLocal<TEvNewCheckpointSucceeded, EvNewCheckpointSucceeded> { TEvNewCheckpointSucceeded(TCoordinatorId coordinatorId, TCheckpointId checkpointId) - : CoordinatorId(std::move(coordinatorId)) - , CheckpointId(std::move(checkpointId)) - { - } - - TCoordinatorId CoordinatorId; - TCheckpointId CheckpointId; - }; -}; - -} // namespace NYq + : CoordinatorId(std::move(coordinatorId)) + , CheckpointId(std::move(checkpointId)) + { + } + + TCoordinatorId CoordinatorId; + TCheckpointId CheckpointId; + }; +}; + +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/events/ya.make b/ydb/core/yq/libs/checkpoint_storage/events/ya.make index 84e07c6b9ac..68fb8ad4fb0 100644 --- a/ydb/core/yq/libs/checkpoint_storage/events/ya.make +++ b/ydb/core/yq/libs/checkpoint_storage/events/ya.make @@ -1,18 +1,18 @@ OWNER(g:yq) - -LIBRARY() - -SRCS( - events.cpp -) - -PEERDIR( - library/cpp/actors/core - library/cpp/actors/interconnect + +LIBRARY() + +SRCS( + events.cpp +) + +PEERDIR( + library/cpp/actors/core + library/cpp/actors/interconnect ydb/core/yq/libs/checkpointing_common - ydb/core/yq/libs/events - ydb/core/yq/libs/checkpoint_storage/proto + ydb/core/yq/libs/events + ydb/core/yq/libs/checkpoint_storage/proto ydb/library/yql/public/issue -) - -END() +) + +END() diff --git a/ydb/core/yq/libs/checkpoint_storage/gc.cpp b/ydb/core/yq/libs/checkpoint_storage/gc.cpp index 2a0e73ecb4f..be01771bccc 100644 --- a/ydb/core/yq/libs/checkpoint_storage/gc.cpp +++ b/ydb/core/yq/libs/checkpoint_storage/gc.cpp @@ -1,13 +1,13 @@ #include "gc.h" #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/checkpoint_storage/events/events.h> -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/checkpoint_storage/events/events.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <library/cpp/actors/core/actor_bootstrapped.h> #include <library/cpp/actors/core/hfunc.h> -namespace NYq { +namespace NYq { using namespace NActors; using namespace NThreading; @@ -60,7 +60,7 @@ public: void Bootstrap(const TActorContext& ctx); - static constexpr char ActorName[] = "YQ_GC_ACTOR"; + static constexpr char ActorName[] = "YQ_GC_ACTOR"; private: STRICT_STFUNC(StateFunc, @@ -179,4 +179,4 @@ std::unique_ptr<NActors::IActor> NewGC( return std::unique_ptr<NActors::IActor>(new TActorGC(checkpointStorage, stateStorage)); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/gc.h b/ydb/core/yq/libs/checkpoint_storage/gc.h index 57063ed1bb9..f55dfe5b620 100644 --- a/ydb/core/yq/libs/checkpoint_storage/gc.h +++ b/ydb/core/yq/libs/checkpoint_storage/gc.h @@ -9,7 +9,7 @@ #include <memory> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// @@ -18,4 +18,4 @@ std::unique_ptr<NActors::IActor> NewGC( const TCheckpointStoragePtr& checkpointStorage, const TStateStoragePtr& stateStorage); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/proto/graph_description.proto b/ydb/core/yq/libs/checkpoint_storage/proto/graph_description.proto index 9e8325f28fa..83ea8d544f3 100644 --- a/ydb/core/yq/libs/checkpoint_storage/proto/graph_description.proto +++ b/ydb/core/yq/libs/checkpoint_storage/proto/graph_description.proto @@ -1,10 +1,10 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - -package NYq.NProto; - -import "ydb/core/yq/libs/graph_params/proto/graph_params.proto"; - -message TCheckpointGraphDescription { - TGraphParams Graph = 1; -} +syntax = "proto3"; +option cc_enable_arenas = true; + +package NYq.NProto; + +import "ydb/core/yq/libs/graph_params/proto/graph_params.proto"; + +message TCheckpointGraphDescription { + TGraphParams Graph = 1; +} diff --git a/ydb/core/yq/libs/checkpoint_storage/proto/ya.make b/ydb/core/yq/libs/checkpoint_storage/proto/ya.make index 0c17f53147c..2cbcf9aa0e7 100644 --- a/ydb/core/yq/libs/checkpoint_storage/proto/ya.make +++ b/ydb/core/yq/libs/checkpoint_storage/proto/ya.make @@ -1,15 +1,15 @@ -OWNER(g:yq) - -PROTO_LIBRARY() - -PEERDIR( - ydb/core/yq/libs/graph_params/proto -) - -SRCS( - graph_description.proto -) - -EXCLUDE_TAGS(GO_PROTO) - -END() +OWNER(g:yq) + +PROTO_LIBRARY() + +PEERDIR( + ydb/core/yq/libs/graph_params/proto +) + +SRCS( + graph_description.proto +) + +EXCLUDE_TAGS(GO_PROTO) + +END() diff --git a/ydb/core/yq/libs/checkpoint_storage/state_storage.h b/ydb/core/yq/libs/checkpoint_storage/state_storage.h index d1d89703de3..c15b76eb27f 100644 --- a/ydb/core/yq/libs/checkpoint_storage/state_storage.h +++ b/ydb/core/yq/libs/checkpoint_storage/state_storage.h @@ -9,13 +9,13 @@ #include <util/generic/ptr.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// class IStateStorage : public virtual TThrRefBase { public: - using TGetStateResult = std::pair<std::vector<NYql::NDqProto::TComputeActorState>, NYql::TIssues>; + using TGetStateResult = std::pair<std::vector<NYql::NDqProto::TComputeActorState>, NYql::TIssues>; using TCountStatesResult = std::pair<size_t, NYql::TIssues>; virtual NThreading::TFuture<NYql::TIssues> Init() = 0; @@ -24,10 +24,10 @@ public: ui64 taskId, const TString& graphId, const TCheckpointId& checkpointId, - const NYql::NDqProto::TComputeActorState& state) = 0; + const NYql::NDqProto::TComputeActorState& state) = 0; virtual NThreading::TFuture<TGetStateResult> GetState( - const std::vector<ui64>& taskIds, + const std::vector<ui64>& taskIds, const TString& graphId, const TCheckpointId& checkpointId) = 0; @@ -47,4 +47,4 @@ public: using TStateStoragePtr = TIntrusivePtr<IStateStorage>; -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/storage_proxy.cpp b/ydb/core/yq/libs/checkpoint_storage/storage_proxy.cpp index 07c8d5101a0..85264a0ea06 100644 --- a/ydb/core/yq/libs/checkpoint_storage/storage_proxy.cpp +++ b/ydb/core/yq/libs/checkpoint_storage/storage_proxy.cpp @@ -7,9 +7,9 @@ #include "ydb_state_storage.h" #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/checkpoint_storage/events/events.h> +#include <ydb/core/yq/libs/checkpoint_storage/events/events.h> -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <ydb/core/yq/libs/ydb/util.h> #include <ydb/library/yql/dq/actors/compute/dq_compute_actor.h> @@ -18,14 +18,14 @@ #include <library/cpp/actors/core/hfunc.h> #include <util/stream/file.h> -#include <util/string/join.h> +#include <util/string/join.h> #include <util/string/strip.h> -#define LOG_STORAGE_ASYNC_DEBUG(actorContext, stream) LOG_LOG_S(actorContext, ::NActors::NLog::PRI_DEBUG, ::NKikimrServices::STREAMS_STORAGE_SERVICE, stream); -#define LOG_STORAGE_ASYNC_INFO(actorContext, stream) LOG_LOG_S(actorContext, ::NActors::NLog::PRI_INFO, ::NKikimrServices::STREAMS_STORAGE_SERVICE, stream); -#define LOG_STORAGE_ASYNC_WARN(actorContext, stream) LOG_LOG_S(actorContext, ::NActors::NLog::PRI_WARN, ::NKikimrServices::STREAMS_STORAGE_SERVICE, stream); +#define LOG_STORAGE_ASYNC_DEBUG(actorContext, stream) LOG_LOG_S(actorContext, ::NActors::NLog::PRI_DEBUG, ::NKikimrServices::STREAMS_STORAGE_SERVICE, stream); +#define LOG_STORAGE_ASYNC_INFO(actorContext, stream) LOG_LOG_S(actorContext, ::NActors::NLog::PRI_INFO, ::NKikimrServices::STREAMS_STORAGE_SERVICE, stream); +#define LOG_STORAGE_ASYNC_WARN(actorContext, stream) LOG_LOG_S(actorContext, ::NActors::NLog::PRI_WARN, ::NKikimrServices::STREAMS_STORAGE_SERVICE, stream); -namespace NYq { +namespace NYq { using namespace NActors; @@ -35,7 +35,7 @@ namespace { class TStorageProxy : public TActorBootstrapped<TStorageProxy> { NConfig::TCheckpointCoordinatorConfig Config; - NConfig::TCommonConfig CommonConfig; + NConfig::TCommonConfig CommonConfig; NConfig::TYdbStorageConfig StorageConfig; TCheckpointStoragePtr CheckpointStorage; TStateStoragePtr StateStorage; @@ -45,34 +45,34 @@ class TStorageProxy : public TActorBootstrapped<TStorageProxy> { public: explicit TStorageProxy( const NConfig::TCheckpointCoordinatorConfig& config, - const NConfig::TCommonConfig& commonConfig, + const NConfig::TCommonConfig& commonConfig, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory); - void Bootstrap(); + void Bootstrap(); - static constexpr char ActorName[] = "YQ_STORAGE_PROXY"; + static constexpr char ActorName[] = "YQ_STORAGE_PROXY"; private: STRICT_STFUNC(StateFunc, - hFunc(TEvCheckpointStorage::TEvRegisterCoordinatorRequest, Handle); - hFunc(TEvCheckpointStorage::TEvCreateCheckpointRequest, Handle); - hFunc(TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest, Handle); - hFunc(TEvCheckpointStorage::TEvCompleteCheckpointRequest, Handle); - hFunc(TEvCheckpointStorage::TEvAbortCheckpointRequest, Handle); - hFunc(TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest, Handle); + hFunc(TEvCheckpointStorage::TEvRegisterCoordinatorRequest, Handle); + hFunc(TEvCheckpointStorage::TEvCreateCheckpointRequest, Handle); + hFunc(TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest, Handle); + hFunc(TEvCheckpointStorage::TEvCompleteCheckpointRequest, Handle); + hFunc(TEvCheckpointStorage::TEvAbortCheckpointRequest, Handle); + hFunc(TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest, Handle); hFunc(NYql::NDq::TEvDqCompute::TEvSaveTaskState, Handle); hFunc(NYql::NDq::TEvDqCompute::TEvGetTaskState, Handle); ) - void Handle(TEvCheckpointStorage::TEvRegisterCoordinatorRequest::TPtr& ev); + void Handle(TEvCheckpointStorage::TEvRegisterCoordinatorRequest::TPtr& ev); - void Handle(TEvCheckpointStorage::TEvCreateCheckpointRequest::TPtr& ev); - void Handle(TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest::TPtr& ev); - void Handle(TEvCheckpointStorage::TEvCompleteCheckpointRequest::TPtr& ev); - void Handle(TEvCheckpointStorage::TEvAbortCheckpointRequest::TPtr& ev); + void Handle(TEvCheckpointStorage::TEvCreateCheckpointRequest::TPtr& ev); + void Handle(TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest::TPtr& ev); + void Handle(TEvCheckpointStorage::TEvCompleteCheckpointRequest::TPtr& ev); + void Handle(TEvCheckpointStorage::TEvAbortCheckpointRequest::TPtr& ev); - void Handle(TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest::TPtr& ev); + void Handle(TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest::TPtr& ev); void Handle(NYql::NDq::TEvDqCompute::TEvSaveTaskState::TPtr& ev); void Handle(NYql::NDq::TEvDqCompute::TEvGetTaskState::TPtr& ev); @@ -99,27 +99,27 @@ static void FillDefaultParameters(NConfig::TCheckpointCoordinatorConfig& checkpo TStorageProxy::TStorageProxy( const NConfig::TCheckpointCoordinatorConfig& config, - const NConfig::TCommonConfig& commonConfig, + const NConfig::TCommonConfig& commonConfig, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory) : Config(config) - , CommonConfig(commonConfig) + , CommonConfig(commonConfig) , StorageConfig(Config.GetStorage()) , CredentialsProviderFactory(credentialsProviderFactory) { FillDefaultParameters(Config, StorageConfig); } -void TStorageProxy::Bootstrap() { - CheckpointStorage = NewYdbCheckpointStorage(StorageConfig, CredentialsProviderFactory, CreateEntityIdGenerator(CommonConfig.GetIdsPrefix())); - auto issues = CheckpointStorage->Init().GetValueSync(); - if (!issues.Empty()) { - LOG_STREAMS_STORAGE_SERVICE_ERROR("Failed to init checkpoint storage: " << issues.ToOneLineString()); - } +void TStorageProxy::Bootstrap() { + CheckpointStorage = NewYdbCheckpointStorage(StorageConfig, CredentialsProviderFactory, CreateEntityIdGenerator(CommonConfig.GetIdsPrefix())); + auto issues = CheckpointStorage->Init().GetValueSync(); + if (!issues.Empty()) { + LOG_STREAMS_STORAGE_SERVICE_ERROR("Failed to init checkpoint storage: " << issues.ToOneLineString()); + } StateStorage = NewYdbStateStorage(StorageConfig, CredentialsProviderFactory); - issues = StateStorage->Init().GetValueSync(); - if (!issues.Empty()) { - LOG_STREAMS_STORAGE_SERVICE_ERROR("Failed to init checkpoint state storage: " << issues.ToOneLineString()); - } + issues = StateStorage->Init().GetValueSync(); + if (!issues.Empty()) { + LOG_STREAMS_STORAGE_SERVICE_ERROR("Failed to init checkpoint state storage: " << issues.ToOneLineString()); + } if (Config.GetCheckpointGarbageConfig().GetEnabled()) { const auto& gcConfig = Config.GetCheckpointGarbageConfig(); @@ -133,7 +133,7 @@ void TStorageProxy::Bootstrap() { << ":" << StorageConfig.GetDatabase().data()) } -void TStorageProxy::Handle(TEvCheckpointStorage::TEvRegisterCoordinatorRequest::TPtr& ev) { +void TStorageProxy::Handle(TEvCheckpointStorage::TEvRegisterCoordinatorRequest::TPtr& ev) { const auto* event = ev->Get(); LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << event->CoordinatorId << "] Got TEvRegisterCoordinatorRequest") @@ -154,7 +154,7 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvRegisterCoordinatorRequest:: }); } -void TStorageProxy::Handle(TEvCheckpointStorage::TEvCreateCheckpointRequest::TPtr& ev) { +void TStorageProxy::Handle(TEvCheckpointStorage::TEvCreateCheckpointRequest::TPtr& ev) { const auto* event = ev->Get(); LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << event->CoordinatorId << "] [" << event->CheckpointId << "] Got TEvCreateCheckpointRequest") @@ -170,8 +170,8 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvCreateCheckpointRequest::TPt auto issues = result.second; if (issues) { - LOG_STORAGE_ASYNC_WARN(context, "[" << coordinatorId << "] [" << checkpointId << "] Failed to fetch total graph checkpoints size: " << issues.ToString()); - context.Send(sender, new TEvCheckpointStorage::TEvCreateCheckpointResponse(checkpointId, std::move(issues), TString()), 0, cookie); + LOG_STORAGE_ASYNC_WARN(context, "[" << coordinatorId << "] [" << checkpointId << "] Failed to fetch total graph checkpoints size: " << issues.ToString()); + context.Send(sender, new TEvCheckpointStorage::TEvCreateCheckpointResponse(checkpointId, std::move(issues), TString()), 0, cookie); return false; } @@ -183,51 +183,51 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvCreateCheckpointRequest::TPt auto message = ss.Str(); LOG_STORAGE_ASYNC_WARN(context, message) issues.AddIssue(message); - LOG_STORAGE_ASYNC_DEBUG(context, "[" << coordinatorId << "] [" << checkpointId << "] Send TEvCreateCheckpointResponse"); - context.Send(sender, new TEvCheckpointStorage::TEvCreateCheckpointResponse(checkpointId, std::move(issues), TString()), 0, cookie); + LOG_STORAGE_ASYNC_DEBUG(context, "[" << coordinatorId << "] [" << checkpointId << "] Send TEvCreateCheckpointResponse"); + context.Send(sender, new TEvCheckpointStorage::TEvCreateCheckpointResponse(checkpointId, std::move(issues), TString()), 0, cookie); return false; } return true; }) - .Apply([checkpointId = event->CheckpointId, - coordinatorId = event->CoordinatorId, - cookie = ev->Cookie, - sender = ev->Sender, - graphDesc = event->GraphDescription, - storage = CheckpointStorage] + .Apply([checkpointId = event->CheckpointId, + coordinatorId = event->CoordinatorId, + cookie = ev->Cookie, + sender = ev->Sender, + graphDesc = event->GraphDescription, + storage = CheckpointStorage] (const NThreading::TFuture<bool>& passedSizeLimitCheckFuture) { if (!passedSizeLimitCheckFuture.GetValue()) { - return NThreading::TFuture<ICheckpointStorage::TCreateCheckpointResult>(); - } - if (std::holds_alternative<TString>(graphDesc)) { - return storage->CreateCheckpoint(coordinatorId, checkpointId, std::get<TString>(graphDesc), ECheckpointStatus::Pending); - } else { - return storage->CreateCheckpoint(coordinatorId, checkpointId, std::get<NProto::TCheckpointGraphDescription>(graphDesc), ECheckpointStatus::Pending); + return NThreading::TFuture<ICheckpointStorage::TCreateCheckpointResult>(); } + if (std::holds_alternative<TString>(graphDesc)) { + return storage->CreateCheckpoint(coordinatorId, checkpointId, std::get<TString>(graphDesc), ECheckpointStatus::Pending); + } else { + return storage->CreateCheckpoint(coordinatorId, checkpointId, std::get<NProto::TCheckpointGraphDescription>(graphDesc), ECheckpointStatus::Pending); + } }) .Apply([checkpointId = event->CheckpointId, coordinatorId = event->CoordinatorId, cookie = ev->Cookie, sender = ev->Sender, context = TActivationContext::AsActorContext()] - (const NThreading::TFuture<ICheckpointStorage::TCreateCheckpointResult>& resultFuture) { - if (!resultFuture.Initialized()) { // didn't pass the size limit check + (const NThreading::TFuture<ICheckpointStorage::TCreateCheckpointResult>& resultFuture) { + if (!resultFuture.Initialized()) { // didn't pass the size limit check return; } - auto result = resultFuture.GetValue(); - auto issues = result.second; - auto response = std::make_unique<TEvCheckpointStorage::TEvCreateCheckpointResponse>(checkpointId, std::move(issues), result.first); + auto result = resultFuture.GetValue(); + auto issues = result.second; + auto response = std::make_unique<TEvCheckpointStorage::TEvCreateCheckpointResponse>(checkpointId, std::move(issues), result.first); if (response->Issues) { - LOG_STORAGE_ASYNC_WARN(context, "[" << coordinatorId << "] [" << checkpointId << "] Failed to create checkpoint: " << response->Issues.ToString()); + LOG_STORAGE_ASYNC_WARN(context, "[" << coordinatorId << "] [" << checkpointId << "] Failed to create checkpoint: " << response->Issues.ToString()); } else { - LOG_STORAGE_ASYNC_INFO(context, "[" << coordinatorId << "] [" << checkpointId << "] Checkpoint created"); + LOG_STORAGE_ASYNC_INFO(context, "[" << coordinatorId << "] [" << checkpointId << "] Checkpoint created"); } - LOG_STORAGE_ASYNC_DEBUG(context, "[" << coordinatorId << "] [" << checkpointId << "] Send TEvCreateCheckpointResponse"); + LOG_STORAGE_ASYNC_DEBUG(context, "[" << coordinatorId << "] [" << checkpointId << "] Send TEvCreateCheckpointResponse"); context.Send(sender, response.release(), 0, cookie); }); } -void TStorageProxy::Handle(TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest::TPtr& ev) { +void TStorageProxy::Handle(TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest::TPtr& ev) { const auto* event = ev->Get(); LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << event->CoordinatorId << "] [" << event->CheckpointId << "] Got TEvSetCheckpointPendingCommitStatusRequest") CheckpointStorage->UpdateCheckpointStatus(event->CoordinatorId, event->CheckpointId, ECheckpointStatus::PendingCommit, ECheckpointStatus::Pending) @@ -249,7 +249,7 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvSetCheckpointPendingCommitSt }); } -void TStorageProxy::Handle(TEvCheckpointStorage::TEvCompleteCheckpointRequest::TPtr& ev) { +void TStorageProxy::Handle(TEvCheckpointStorage::TEvCompleteCheckpointRequest::TPtr& ev) { const auto* event = ev->Get(); LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << event->CoordinatorId << "] [" << event->CheckpointId << "] Got TEvCompleteCheckpointRequest") CheckpointStorage->UpdateCheckpointStatus(event->CoordinatorId, event->CheckpointId, ECheckpointStatus::Completed, ECheckpointStatus::PendingCommit) @@ -278,7 +278,7 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvCompleteCheckpointRequest::T }); } -void TStorageProxy::Handle(TEvCheckpointStorage::TEvAbortCheckpointRequest::TPtr& ev) { +void TStorageProxy::Handle(TEvCheckpointStorage::TEvAbortCheckpointRequest::TPtr& ev) { const auto* event = ev->Get(); LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << event->CoordinatorId << "] [" << event->CheckpointId << "] Got TEvAbortCheckpointRequest") CheckpointStorage->AbortCheckpoint(event->CoordinatorId,event->CheckpointId) @@ -299,10 +299,10 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvAbortCheckpointRequest::TPtr }); } -void TStorageProxy::Handle(TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest::TPtr& ev) { +void TStorageProxy::Handle(TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest::TPtr& ev) { const auto* event = ev->Get(); LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << event->GraphId << "] Got TEvGetCheckpointsMetadataRequest") - CheckpointStorage->GetCheckpoints(event->GraphId, event->Statuses, event->Limit, event->LoadGraphDescription) + CheckpointStorage->GetCheckpoints(event->GraphId, event->Statuses, event->Limit, event->LoadGraphDescription) .Apply([graphId = event->GraphId, cookie = ev->Cookie, sender = ev->Sender, @@ -320,17 +320,17 @@ void TStorageProxy::Handle(TEvCheckpointStorage::TEvGetCheckpointsMetadataReques void TStorageProxy::Handle(NYql::NDq::TEvDqCompute::TEvSaveTaskState::TPtr& ev) { auto* event = ev->Get(); const auto checkpointId = TCheckpointId(event->Checkpoint.GetGeneration(), event->Checkpoint.GetId()); - LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << checkpointId << "] Got TEvSaveTaskState: task " << event->TaskId); + LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << checkpointId << "] Got TEvSaveTaskState: task " << event->TaskId); - const size_t stateSize = event->State.ByteSizeLong(); + const size_t stateSize = event->State.ByteSizeLong(); if (stateSize > Config.GetStateStorageLimits().GetMaxTaskStateSizeBytes()) { - LOG_STREAMS_STORAGE_SERVICE_WARN("[" << checkpointId << "] Won't save task state because it's too big: task: " << event->TaskId + LOG_STREAMS_STORAGE_SERVICE_WARN("[" << checkpointId << "] Won't save task state because it's too big: task: " << event->TaskId << ", state size: " << stateSize << "/" << Config.GetStateStorageLimits().GetMaxTaskStateSizeBytes()); auto response = std::make_unique<NYql::NDq::TEvDqCompute::TEvSaveTaskStateResult>(); response->Record.MutableCheckpoint()->SetGeneration(checkpointId.CoordinatorGeneration); response->Record.MutableCheckpoint()->SetId(checkpointId.SeqNo); response->Record.SetStateSizeBytes(0); - response->Record.SetTaskId(event->TaskId); + response->Record.SetTaskId(event->TaskId); response->Record.SetStatus(NYql::NDqProto::TEvSaveTaskStateResult::STATE_TOO_BIG); Send(ev->Sender, response.release()); return; @@ -342,14 +342,14 @@ void TStorageProxy::Handle(NYql::NDq::TEvDqCompute::TEvSaveTaskState::TPtr& ev) taskId = event->TaskId, cookie = ev->Cookie, sender = ev->Sender, - stateSize = stateSize, + stateSize = stateSize, context = TActivationContext::AsActorContext()](const NThreading::TFuture<NYql::TIssues>& futureResult) { const auto& issues = futureResult.GetValue(); auto response = std::make_unique<NYql::NDq::TEvDqCompute::TEvSaveTaskStateResult>(); response->Record.MutableCheckpoint()->SetGeneration(checkpointId.CoordinatorGeneration); response->Record.MutableCheckpoint()->SetId(checkpointId.SeqNo); response->Record.SetStateSizeBytes(stateSize); - response->Record.SetTaskId(taskId); + response->Record.SetTaskId(taskId); if (issues) { LOG_STORAGE_ASYNC_WARN(context, "[" << checkpointId << "] Failed to save task state: task: " << taskId << ", issues: " << issues.ToString()) @@ -365,23 +365,23 @@ void TStorageProxy::Handle(NYql::NDq::TEvDqCompute::TEvSaveTaskState::TPtr& ev) void TStorageProxy::Handle(NYql::NDq::TEvDqCompute::TEvGetTaskState::TPtr& ev) { const auto* event = ev->Get(); const auto checkpointId = TCheckpointId(event->Checkpoint.GetGeneration(), event->Checkpoint.GetId()); - LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << checkpointId << "] Got TEvGetTaskState: tasks {" << JoinSeq(", ", event->TaskIds) << "}"); + LOG_STREAMS_STORAGE_SERVICE_DEBUG("[" << checkpointId << "] Got TEvGetTaskState: tasks {" << JoinSeq(", ", event->TaskIds) << "}"); - StateStorage->GetState(event->TaskIds, event->GraphId, checkpointId) + StateStorage->GetState(event->TaskIds, event->GraphId, checkpointId) .Apply([checkpointId = event->Checkpoint, generation = event->Generation, - taskIds = event->TaskIds, + taskIds = event->TaskIds, cookie = ev->Cookie, sender = ev->Sender, context = TActivationContext::AsActorContext()](const NThreading::TFuture<IStateStorage::TGetStateResult>& resultFuture) { auto result = resultFuture.GetValue(); - auto response = std::make_unique<NYql::NDq::TEvDqCompute::TEvGetTaskStateResult>(checkpointId, result.second, generation); - std::swap(response->States, result.first); + auto response = std::make_unique<NYql::NDq::TEvDqCompute::TEvGetTaskStateResult>(checkpointId, result.second, generation); + std::swap(response->States, result.first); if (response->Issues) { - LOG_STORAGE_ASYNC_WARN(context, "[" << checkpointId << "] Failed to get task state: taskIds: {" << JoinSeq(", ", taskIds) << "}, issues: " << response->Issues.ToString()); + LOG_STORAGE_ASYNC_WARN(context, "[" << checkpointId << "] Failed to get task state: taskIds: {" << JoinSeq(", ", taskIds) << "}, issues: " << response->Issues.ToString()); } - LOG_STORAGE_ASYNC_DEBUG(context, "[" << checkpointId << "] Send TEvGetTaskStateResult"); + LOG_STORAGE_ASYNC_DEBUG(context, "[" << checkpointId << "] Send TEvGetTaskStateResult"); context.Send(sender, response.release(), 0, cookie); }); } @@ -392,10 +392,10 @@ void TStorageProxy::Handle(NYql::NDq::TEvDqCompute::TEvGetTaskState::TPtr& ev) { std::unique_ptr<NActors::IActor> NewStorageProxy( const NConfig::TCheckpointCoordinatorConfig& config, - const NConfig::TCommonConfig& commonConfig, + const NConfig::TCommonConfig& commonConfig, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory) { - return std::unique_ptr<NActors::IActor>(new TStorageProxy(config, commonConfig, credentialsProviderFactory)); + return std::unique_ptr<NActors::IActor>(new TStorageProxy(config, commonConfig, credentialsProviderFactory)); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/storage_proxy.h b/ydb/core/yq/libs/checkpoint_storage/storage_proxy.h index c67d928a151..5681b90bb38 100644 --- a/ydb/core/yq/libs/checkpoint_storage/storage_proxy.h +++ b/ydb/core/yq/libs/checkpoint_storage/storage_proxy.h @@ -1,7 +1,7 @@ #pragma once #include <ydb/core/yq/libs/config/protos/checkpoint_coordinator.pb.h> -#include <ydb/core/yq/libs/config/protos/common.pb.h> +#include <ydb/core/yq/libs/config/protos/common.pb.h> #include <ydb/library/security/ydb_credentials_provider_factory.h> @@ -9,11 +9,11 @@ #include <memory> -namespace NYq { +namespace NYq { std::unique_ptr<NActors::IActor> NewStorageProxy( const NConfig::TCheckpointCoordinatorConfig& config, - const NConfig::TCommonConfig& commonConfig, + const NConfig::TCommonConfig& commonConfig, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/storage_service.cpp b/ydb/core/yq/libs/checkpoint_storage/storage_service.cpp index 421c19d7fdb..9a3bb51f07a 100644 --- a/ydb/core/yq/libs/checkpoint_storage/storage_service.cpp +++ b/ydb/core/yq/libs/checkpoint_storage/storage_service.cpp @@ -2,7 +2,7 @@ #include "storage_proxy.h" -namespace NYq { +namespace NYq { using namespace NActors; @@ -10,10 +10,10 @@ using namespace NActors; std::unique_ptr<NActors::IActor> NewCheckpointStorageService( const NConfig::TCheckpointCoordinatorConfig& config, - const NConfig::TCommonConfig& commonConfig, + const NConfig::TCommonConfig& commonConfig, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory) { - return NewStorageProxy(config, commonConfig, credentialsProviderFactory); + return NewStorageProxy(config, commonConfig, credentialsProviderFactory); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/storage_service.h b/ydb/core/yq/libs/checkpoint_storage/storage_service.h index 5eab844545a..db92499c70d 100644 --- a/ydb/core/yq/libs/checkpoint_storage/storage_service.h +++ b/ydb/core/yq/libs/checkpoint_storage/storage_service.h @@ -1,7 +1,7 @@ #pragma once #include <ydb/core/yq/libs/config/protos/checkpoint_coordinator.pb.h> -#include <ydb/core/yq/libs/config/protos/common.pb.h> +#include <ydb/core/yq/libs/config/protos/common.pb.h> #include <ydb/library/security/ydb_credentials_provider_factory.h> @@ -9,11 +9,11 @@ #include <memory> -namespace NYq { +namespace NYq { std::unique_ptr<NActors::IActor> NewCheckpointStorageService( const NConfig::TCheckpointCoordinatorConfig& config, - const NConfig::TCommonConfig& commonConfig, + const NConfig::TCommonConfig& commonConfig, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/ya.make b/ydb/core/yq/libs/checkpoint_storage/ya.make index fc2f449e371..19ff96874c7 100644 --- a/ydb/core/yq/libs/checkpoint_storage/ya.make +++ b/ydb/core/yq/libs/checkpoint_storage/ya.make @@ -11,14 +11,14 @@ SRCS( ) PEERDIR( - contrib/libs/fmt + contrib/libs/fmt library/cpp/actors/core - ydb/core/yq/libs/actors/logging - ydb/core/yq/libs/control_plane_storage + ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/control_plane_storage ydb/core/yq/libs/ydb - ydb/core/yq/libs/checkpoint_storage/events - ydb/core/yq/libs/checkpoint_storage/proto - ydb/core/yq/libs/checkpointing_common + ydb/core/yq/libs/checkpoint_storage/events + ydb/core/yq/libs/checkpoint_storage/proto + ydb/core/yq/libs/checkpointing_common ydb/library/security ydb/public/sdk/cpp/client/ydb_scheme ydb/public/sdk/cpp/client/ydb_table @@ -30,7 +30,7 @@ YQL_LAST_ABI_VERSION() END() -RECURSE( - events - proto -) +RECURSE( + events + proto +) diff --git a/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.cpp b/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.cpp index 7799b52daaf..e073d20cb10 100644 --- a/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.cpp +++ b/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.cpp @@ -6,12 +6,12 @@ #include <ydb/public/sdk/cpp/client/ydb_scheme/scheme.h> #include <util/stream/str.h> -#include <util/string/builder.h> +#include <util/string/builder.h> #include <util/string/printf.h> -#include <fmt/format.h> - -namespace NYq { +#include <fmt/format.h> + +namespace NYq { using namespace NThreading; using namespace NYdb; @@ -23,39 +23,39 @@ namespace { //////////////////////////////////////////////////////////////////////////////// -const char* const CoordinatorsSyncTable = "coordinators_sync"; -const char* const CheckpointsMetadataTable = "checkpoints_metadata"; -const char* const CheckpointsGraphsDescriptionTable = "checkpoints_graphs_description"; - -//////////////////////////////////////////////////////////////////////////////// - -struct TCheckpointGraphDescriptionContext : public TThrRefBase { - TString GraphDescId; - const TMaybe<NProto::TCheckpointGraphDescription> NewGraphDescription; - - explicit TCheckpointGraphDescriptionContext(const TString& graphDescId) - : GraphDescId(graphDescId) - { - } - - explicit TCheckpointGraphDescriptionContext(const NProto::TCheckpointGraphDescription& desc) - : NewGraphDescription(desc) - { - } -}; - -using TCheckpointGraphDescriptionContextPtr = TIntrusivePtr<TCheckpointGraphDescriptionContext>; +const char* const CoordinatorsSyncTable = "coordinators_sync"; +const char* const CheckpointsMetadataTable = "checkpoints_metadata"; +const char* const CheckpointsGraphsDescriptionTable = "checkpoints_graphs_description"; //////////////////////////////////////////////////////////////////////////////// +struct TCheckpointGraphDescriptionContext : public TThrRefBase { + TString GraphDescId; + const TMaybe<NProto::TCheckpointGraphDescription> NewGraphDescription; + + explicit TCheckpointGraphDescriptionContext(const TString& graphDescId) + : GraphDescId(graphDescId) + { + } + + explicit TCheckpointGraphDescriptionContext(const NProto::TCheckpointGraphDescription& desc) + : NewGraphDescription(desc) + { + } +}; + +using TCheckpointGraphDescriptionContextPtr = TIntrusivePtr<TCheckpointGraphDescriptionContext>; + +//////////////////////////////////////////////////////////////////////////////// + struct TCheckpointContext : public TThrRefBase { const TCheckpointId CheckpointId; const ECheckpointStatus Status; // optional new status const ECheckpointStatus ExpectedStatus; // optional expecrted current status, used only in some operations TGenerationContextPtr GenerationContext; - TCheckpointGraphDescriptionContextPtr CheckpointGraphDescriptionContext; - IEntityIdGenerator::TPtr EntityIdGenerator; + TCheckpointGraphDescriptionContextPtr CheckpointGraphDescriptionContext; + IEntityIdGenerator::TPtr EntityIdGenerator; TCheckpointContext(const TCheckpointId& id, ECheckpointStatus status, @@ -140,88 +140,88 @@ TFuture<TStatus> CreateCheckpoint(const TCheckpointContextPtr& context) { // TODO: use prepared query const auto& generationContext = context->GenerationContext; - const auto& graphDescContext = context->CheckpointGraphDescriptionContext; + const auto& graphDescContext = context->CheckpointGraphDescriptionContext; - TStringBuilder query; - using namespace fmt::literals; - const TString firstPart = fmt::format(R"sql( + TStringBuilder query; + using namespace fmt::literals; + const TString firstPart = fmt::format(R"sql( --!syntax_v1 - PRAGMA TablePathPrefix("{table_path_prefix}"); - DECLARE $ts AS Timestamp; - DECLARE $graph_id AS String; - DECLARE $graph_desc_id AS String; - DECLARE $coordinator_generation AS Uint64; - DECLARE $seq_no AS Uint64; - DECLARE $status AS Uint8; - {optional_graph_description_declaration} - INSERT INTO {checkpoints_metadata_table_name} - (graph_id, coordinator_generation, seq_no, status, created_by, modified_by, state_size, graph_description_id) - VALUES ($graph_id, $coordinator_generation, $seq_no, $status, $ts, $ts, 0, $graph_desc_id); - )sql", - "table_path_prefix"_a = generationContext->TablePathPrefix, - "checkpoints_metadata_table_name"_a = CheckpointsMetadataTable, - "optional_graph_description_declaration"_a = graphDescContext->NewGraphDescription ? "DECLARE $graph_description AS String;" : "" - ); - - query << firstPart; - - NYdb::TParamsBuilder params; - params - .AddParam("$graph_id") - .String(generationContext->PrimaryKey) - .Build() - .AddParam("$graph_desc_id") - .String(graphDescContext->GraphDescId) - .Build() - .AddParam("$coordinator_generation") - .Uint64(context->CheckpointId.CoordinatorGeneration) - .Build() - .AddParam("$seq_no") - .Uint64(context->CheckpointId.SeqNo) - .Build() - .AddParam("$status") - .Uint8((ui8)context->Status) - .Build() - .AddParam("$ts") - .Timestamp(TInstant::Now()) - .Build(); - - if (graphDescContext->NewGraphDescription) { - const TString graphDescriptionPart = fmt::format(R"sql( - INSERT INTO {checkpoints_graphs_description_table_name} - (id, ref_count, graph_description) - VALUES ($graph_desc_id, 1, $graph_description); - )sql", - "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable - ); - - query << graphDescriptionPart; - - TString serializedGraphDescription; - if (!graphDescContext->NewGraphDescription->SerializeToString(&serializedGraphDescription)) { - NYql::TIssues issues; - issues.AddIssue("Failed to serialize graph description proto"); - return MakeFuture(TStatus(EStatus::BAD_REQUEST, std::move(issues))); - } - - params - .AddParam("$graph_description") - .String(serializedGraphDescription) - .Build(); - } else { - const TString graphDescriptionPart = fmt::format(R"sql( - UPDATE {checkpoints_graphs_description_table_name} - SET ref_count = ref_count + 1 - WHERE id = $graph_desc_id; - )sql", - "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable - ); - - query << graphDescriptionPart; - } - + PRAGMA TablePathPrefix("{table_path_prefix}"); + DECLARE $ts AS Timestamp; + DECLARE $graph_id AS String; + DECLARE $graph_desc_id AS String; + DECLARE $coordinator_generation AS Uint64; + DECLARE $seq_no AS Uint64; + DECLARE $status AS Uint8; + {optional_graph_description_declaration} + INSERT INTO {checkpoints_metadata_table_name} + (graph_id, coordinator_generation, seq_no, status, created_by, modified_by, state_size, graph_description_id) + VALUES ($graph_id, $coordinator_generation, $seq_no, $status, $ts, $ts, 0, $graph_desc_id); + )sql", + "table_path_prefix"_a = generationContext->TablePathPrefix, + "checkpoints_metadata_table_name"_a = CheckpointsMetadataTable, + "optional_graph_description_declaration"_a = graphDescContext->NewGraphDescription ? "DECLARE $graph_description AS String;" : "" + ); + + query << firstPart; + + NYdb::TParamsBuilder params; + params + .AddParam("$graph_id") + .String(generationContext->PrimaryKey) + .Build() + .AddParam("$graph_desc_id") + .String(graphDescContext->GraphDescId) + .Build() + .AddParam("$coordinator_generation") + .Uint64(context->CheckpointId.CoordinatorGeneration) + .Build() + .AddParam("$seq_no") + .Uint64(context->CheckpointId.SeqNo) + .Build() + .AddParam("$status") + .Uint8((ui8)context->Status) + .Build() + .AddParam("$ts") + .Timestamp(TInstant::Now()) + .Build(); + + if (graphDescContext->NewGraphDescription) { + const TString graphDescriptionPart = fmt::format(R"sql( + INSERT INTO {checkpoints_graphs_description_table_name} + (id, ref_count, graph_description) + VALUES ($graph_desc_id, 1, $graph_description); + )sql", + "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable + ); + + query << graphDescriptionPart; + + TString serializedGraphDescription; + if (!graphDescContext->NewGraphDescription->SerializeToString(&serializedGraphDescription)) { + NYql::TIssues issues; + issues.AddIssue("Failed to serialize graph description proto"); + return MakeFuture(TStatus(EStatus::BAD_REQUEST, std::move(issues))); + } + + params + .AddParam("$graph_description") + .String(serializedGraphDescription) + .Build(); + } else { + const TString graphDescriptionPart = fmt::format(R"sql( + UPDATE {checkpoints_graphs_description_table_name} + SET ref_count = ref_count + 1 + WHERE id = $graph_desc_id; + )sql", + "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable + ); + + query << graphDescriptionPart; + } + auto ttxControl = TTxControl::Tx(*generationContext->Transaction).CommitTx(); - return generationContext->Session.ExecuteDataQuery(query, ttxControl, params.Build()).Apply( + return generationContext->Session.ExecuteDataQuery(query, ttxControl, params.Build()).Apply( [] (const TFuture<TDataQueryResult>& future) { TStatus status = future.GetValue(); return status; @@ -257,50 +257,50 @@ TFuture<TStatus> UpdateCheckpoint(const TCheckpointContextPtr& context) { }); } -TFuture<TDataQueryResult> SelectGraphDescId(const TCheckpointContextPtr& context) { - const auto& generationContext = context->GenerationContext; - const auto& graphDescContext = context->CheckpointGraphDescriptionContext; - - auto query = Sprintf(R"( - --!syntax_v1 - PRAGMA TablePathPrefix("%s"); - - SELECT ref_count - FROM %s - WHERE id = "%s"; - )", generationContext->TablePathPrefix.c_str(), - CheckpointsGraphsDescriptionTable, - graphDescContext->GraphDescId.c_str()); - - return generationContext->Session.ExecuteDataQuery(query, TTxControl::Tx(*generationContext->Transaction)); -} - -bool GraphDescIdExists(const TFuture<TDataQueryResult>& result) { - return result.GetValue().GetResultSet(0).RowsCount() != 0; -} - -TFuture<TStatus> GenerateGraphDescId(const TCheckpointContextPtr& context) { - if (context->CheckpointGraphDescriptionContext->GraphDescId) { // already given - return MakeFuture(TStatus(EStatus::SUCCESS, NYql::TIssues())); - } - - Y_VERIFY(context->EntityIdGenerator); - context->CheckpointGraphDescriptionContext->GraphDescId = context->EntityIdGenerator->Generate(EEntityType::CHECKPOINT_GRAPH_DESCRIPTION); - return SelectGraphDescId(context) - .Apply( - [context](const TFuture<TDataQueryResult>& result) { - if (!result.GetValue().IsSuccess()) { - return MakeFuture<TStatus>(result.GetValue()); - } - if (!GraphDescIdExists(result)) { - return MakeFuture(TStatus(EStatus::SUCCESS, NYql::TIssues())); - } else { - context->CheckpointGraphDescriptionContext->GraphDescId = {}; // Regenerate - return GenerateGraphDescId(context); - } - }); -} - +TFuture<TDataQueryResult> SelectGraphDescId(const TCheckpointContextPtr& context) { + const auto& generationContext = context->GenerationContext; + const auto& graphDescContext = context->CheckpointGraphDescriptionContext; + + auto query = Sprintf(R"( + --!syntax_v1 + PRAGMA TablePathPrefix("%s"); + + SELECT ref_count + FROM %s + WHERE id = "%s"; + )", generationContext->TablePathPrefix.c_str(), + CheckpointsGraphsDescriptionTable, + graphDescContext->GraphDescId.c_str()); + + return generationContext->Session.ExecuteDataQuery(query, TTxControl::Tx(*generationContext->Transaction)); +} + +bool GraphDescIdExists(const TFuture<TDataQueryResult>& result) { + return result.GetValue().GetResultSet(0).RowsCount() != 0; +} + +TFuture<TStatus> GenerateGraphDescId(const TCheckpointContextPtr& context) { + if (context->CheckpointGraphDescriptionContext->GraphDescId) { // already given + return MakeFuture(TStatus(EStatus::SUCCESS, NYql::TIssues())); + } + + Y_VERIFY(context->EntityIdGenerator); + context->CheckpointGraphDescriptionContext->GraphDescId = context->EntityIdGenerator->Generate(EEntityType::CHECKPOINT_GRAPH_DESCRIPTION); + return SelectGraphDescId(context) + .Apply( + [context](const TFuture<TDataQueryResult>& result) { + if (!result.GetValue().IsSuccess()) { + return MakeFuture<TStatus>(result.GetValue()); + } + if (!GraphDescIdExists(result)) { + return MakeFuture(TStatus(EStatus::SUCCESS, NYql::TIssues())); + } else { + context->CheckpointGraphDescriptionContext->GraphDescId = {}; // Regenerate + return GenerateGraphDescId(context); + } + }); +} + TFuture<TStatus> CreateCheckpointWrapper( const TFuture<TStatus>& generationFuture, const TCheckpointContextPtr& context) @@ -312,18 +312,18 @@ TFuture<TStatus> CreateCheckpointWrapper( return MakeFuture(generationSelect); } - return GenerateGraphDescId(context) - .Apply( - [context](const TFuture<TStatus>& result) { - if (!result.GetValue().IsSuccess()) { - return MakeFuture(result.GetValue()); - } - return CreateCheckpoint(context); - }); + return GenerateGraphDescId(context) + .Apply( + [context](const TFuture<TStatus>& result) { + if (!result.GetValue().IsSuccess()) { + return MakeFuture(result.GetValue()); + } + return CreateCheckpoint(context); + }); }); } -TFuture<TDataQueryResult> SelectGraphCheckpoints(const TGenerationContextPtr& context, const TVector<ECheckpointStatus>& statuses, ui64 limit, TExecDataQuerySettings settings, bool loadGraphDescription) +TFuture<TDataQueryResult> SelectGraphCheckpoints(const TGenerationContextPtr& context, const TVector<ECheckpointStatus>& statuses, ui64 limit, TExecDataQuerySettings settings, bool loadGraphDescription) { NYdb::TParamsBuilder paramsBuilder; if (statuses) { @@ -335,55 +335,55 @@ TFuture<TDataQueryResult> SelectGraphCheckpoints(const TGenerationContextPtr& co } paramsBuilder.AddParam("$graph_id").String(context->PrimaryKey).Build(); - if (limit < std::numeric_limits<ui64>::max()) { - paramsBuilder.AddParam("$limit").Uint64(limit).Build(); - } + if (limit < std::numeric_limits<ui64>::max()) { + paramsBuilder.AddParam("$limit").Uint64(limit).Build(); + } auto params = paramsBuilder.Build(); - using namespace fmt::literals; - TString join; - if (loadGraphDescription) { - join = fmt::format(R"sql( - INNER JOIN {checkpoints_graphs_description_table_name} AS desc - ON metadata.graph_description_id = desc.id - )sql", - "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable - ); - } - - const TString query = fmt::format(R"sql( + using namespace fmt::literals; + TString join; + if (loadGraphDescription) { + join = fmt::format(R"sql( + INNER JOIN {checkpoints_graphs_description_table_name} AS desc + ON metadata.graph_description_id = desc.id + )sql", + "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable + ); + } + + const TString query = fmt::format(R"sql( --!syntax_v1 - PRAGMA TablePathPrefix("{table_path_prefix}"); + PRAGMA TablePathPrefix("{table_path_prefix}"); PRAGMA AnsiInForEmptyOrNullableItemsCollections; - DECLARE $graph_id AS String; - {optional_statuses_declaration} - {optional_limit_declaration} - - SELECT - {graph_description_field} - metadata.coordinator_generation AS coordinator_generation, - metadata.seq_no AS seq_no, - metadata.status AS status, - metadata.created_by AS created_by, - metadata.modified_by AS modified_by - FROM {checkpoints_metadata_table_name} AS metadata - {join} - WHERE metadata.graph_id = $graph_id - {statuses_condition} + DECLARE $graph_id AS String; + {optional_statuses_declaration} + {optional_limit_declaration} + + SELECT + {graph_description_field} + metadata.coordinator_generation AS coordinator_generation, + metadata.seq_no AS seq_no, + metadata.status AS status, + metadata.created_by AS created_by, + metadata.modified_by AS modified_by + FROM {checkpoints_metadata_table_name} AS metadata + {join} + WHERE metadata.graph_id = $graph_id + {statuses_condition} ORDER BY coordinator_generation, seq_no DESC - {limit_condition}; - )sql", - "table_path_prefix"_a = context->TablePathPrefix, - "optional_statuses_declaration"_a = statuses ? "DECLARE $statuses AS List<Uint8>;" : "", - "statuses_condition"_a = statuses ? "AND metadata.status IN $statuses" : "", - "optional_limit_declaration"_a = limit < std::numeric_limits<ui64>::max() ? "DECLARE $limit AS Uint64;" : "", - "limit_condition"_a = limit < std::numeric_limits<ui64>::max() ? "LIMIT $limit" : "", - "checkpoints_metadata_table_name"_a = CheckpointsMetadataTable, - "graph_description_field"_a = loadGraphDescription ? "desc.graph_description AS graph_description," : "", - "join"_a = join - ); + {limit_condition}; + )sql", + "table_path_prefix"_a = context->TablePathPrefix, + "optional_statuses_declaration"_a = statuses ? "DECLARE $statuses AS List<Uint8>;" : "", + "statuses_condition"_a = statuses ? "AND metadata.status IN $statuses" : "", + "optional_limit_declaration"_a = limit < std::numeric_limits<ui64>::max() ? "DECLARE $limit AS Uint64;" : "", + "limit_condition"_a = limit < std::numeric_limits<ui64>::max() ? "LIMIT $limit" : "", + "checkpoints_metadata_table_name"_a = CheckpointsMetadataTable, + "graph_description_field"_a = loadGraphDescription ? "desc.graph_description AS graph_description," : "", + "join"_a = join + ); return context->Session.ExecuteDataQuery( query, @@ -395,8 +395,8 @@ TFuture<TDataQueryResult> SelectGraphCheckpoints(const TGenerationContextPtr& co TFuture<TStatus> ProcessCheckpoints( const TDataQueryResult& selectResult, const TGenerationContextPtr& context, - const TGetCheckpointsContextPtr& getContext, - bool loadGraphDescription) + const TGetCheckpointsContextPtr& getContext, + bool loadGraphDescription) { if (!selectResult.IsSuccess()) { return MakeFuture<TStatus>(selectResult); @@ -415,20 +415,20 @@ TFuture<TStatus> ProcessCheckpoints( ECheckpointStatus(*parser.ColumnParser("status").GetOptionalUint8()), *parser.ColumnParser("created_by").GetOptionalTimestamp(), *parser.ColumnParser("modified_by").GetOptionalTimestamp()); - - if (loadGraphDescription) { - if (const TMaybe<TString> graphDescription = parser.ColumnParser("graph_description").GetOptionalString(); graphDescription && *graphDescription) { - NProto::TCheckpointGraphDescription graphDesc; - if (!graphDesc.ParseFromString(*graphDescription)) { - NYql::TIssues issues; - issues.AddIssue("Failed to deserialize graph description proto"); - return MakeFuture(TStatus(EStatus::INTERNAL_ERROR, std::move(issues))); - } - - NProto::TGraphParams& graphParams = getContext->Checkpoints.back().Graph.ConstructInPlace(); - graphParams.Swap(graphDesc.MutableGraph()); - } - } + + if (loadGraphDescription) { + if (const TMaybe<TString> graphDescription = parser.ColumnParser("graph_description").GetOptionalString(); graphDescription && *graphDescription) { + NProto::TCheckpointGraphDescription graphDesc; + if (!graphDesc.ParseFromString(*graphDescription)) { + NYql::TIssues issues; + issues.AddIssue("Failed to deserialize graph description proto"); + return MakeFuture(TStatus(EStatus::INTERNAL_ERROR, std::move(issues))); + } + + NProto::TGraphParams& graphParams = getContext->Checkpoints.back().Graph.ConstructInPlace(); + graphParams.Swap(graphDesc.MutableGraph()); + } + } } return MakeFuture<TStatus>(selectResult); @@ -554,9 +554,9 @@ class TCheckpointStorage : public ICheckpointStorage { public: explicit TCheckpointStorage( const NConfig::TYdbStorageConfig& config, - const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, - const IEntityIdGenerator::TPtr& entityIdGenerator); - + const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, + const IEntityIdGenerator::TPtr& entityIdGenerator); + ~TCheckpointStorage() = default; TFuture<TIssues> Init() override; @@ -565,18 +565,18 @@ public: TFuture<TGetCoordinatorsResult> GetCoordinators() override; - TFuture<TCreateCheckpointResult> CreateCheckpoint( - const TCoordinatorId& coordinator, - const TCheckpointId& checkpointId, - const TString& graphDescId, - ECheckpointStatus status) override; - - TFuture<TCreateCheckpointResult> CreateCheckpoint( + TFuture<TCreateCheckpointResult> CreateCheckpoint( const TCoordinatorId& coordinator, const TCheckpointId& checkpointId, - const NProto::TCheckpointGraphDescription& graphDesc, + const TString& graphDescId, ECheckpointStatus status) override; + TFuture<TCreateCheckpointResult> CreateCheckpoint( + const TCoordinatorId& coordinator, + const TCheckpointId& checkpointId, + const NProto::TCheckpointGraphDescription& graphDesc, + ECheckpointStatus status) override; + TFuture<TIssues> UpdateCheckpointStatus( const TCoordinatorId& coordinator, const TCheckpointId& checkpointId, @@ -591,7 +591,7 @@ public: const TString& graph) override; TFuture<TGetCheckpointsResult> GetCheckpoints( - const TString& graph, const TVector<ECheckpointStatus>& statuses, ui64 limit, bool loadGraphDescription) override; + const TString& graph, const TVector<ECheckpointStatus>& statuses, ui64 limit, bool loadGraphDescription) override; TFuture<TIssues> DeleteGraph( const TString& graphId) override; @@ -611,23 +611,23 @@ public: TFuture<ICheckpointStorage::TGetTotalCheckpointsStateSizeResult> GetTotalCheckpointsStateSize(const TString& graphId) override; TExecDataQuerySettings DefaultExecDataQuerySettings(); - -private: - TFuture<TCreateCheckpointResult> CreateCheckpointImpl(const TCoordinatorId& coordinator, const TCheckpointContextPtr& context); - -private: - IEntityIdGenerator::TPtr EntityIdGenerator; + +private: + TFuture<TCreateCheckpointResult> CreateCheckpointImpl(const TCoordinatorId& coordinator, const TCheckpointContextPtr& context); + +private: + IEntityIdGenerator::TPtr EntityIdGenerator; }; //////////////////////////////////////////////////////////////////////////////// TCheckpointStorage::TCheckpointStorage( const NConfig::TYdbStorageConfig& config, - const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, - const IEntityIdGenerator::TPtr& entityIdGenerator) + const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, + const IEntityIdGenerator::TPtr& entityIdGenerator) : YdbConnection(NewYdbConnection(config, credentialsProviderFactory)) , Config(config) - , EntityIdGenerator(entityIdGenerator) + , EntityIdGenerator(entityIdGenerator) { } @@ -653,33 +653,33 @@ TFuture<TIssues> TCheckpointStorage::Init() } } -#define RUN_CREATE_TABLE(tableName, desc) \ - { \ - auto status = CreateTable(YdbConnection, \ - tableName, \ - std::move(desc)).GetValueSync(); \ - if (!IsTableCreated(status)) { \ - issues = status.GetIssues(); \ - \ - TStringStream ss; \ - ss << "Failed to create " << tableName \ - << " table: " << status.GetStatus(); \ - if (issues) { \ - ss << ", issues: "; \ - issues.PrintTo(ss); \ - } \ - \ - return MakeFuture(std::move(issues)); \ - } \ - } - +#define RUN_CREATE_TABLE(tableName, desc) \ + { \ + auto status = CreateTable(YdbConnection, \ + tableName, \ + std::move(desc)).GetValueSync(); \ + if (!IsTableCreated(status)) { \ + issues = status.GetIssues(); \ + \ + TStringStream ss; \ + ss << "Failed to create " << tableName \ + << " table: " << status.GetStatus(); \ + if (issues) { \ + ss << ", issues: "; \ + issues.PrintTo(ss); \ + } \ + \ + return MakeFuture(std::move(issues)); \ + } \ + } + auto graphDesc = TTableBuilder() .AddNullableColumn("graph_id", EPrimitiveType::String) .AddNullableColumn("generation", EPrimitiveType::Uint64) .SetPrimaryKeyColumn("graph_id") .Build(); - RUN_CREATE_TABLE(CoordinatorsSyncTable, graphDesc); + RUN_CREATE_TABLE(CoordinatorsSyncTable, graphDesc); // TODO: graph_id could be just secondary index, but API forbids it, // so we set it primary key column to have index @@ -691,23 +691,23 @@ TFuture<TIssues> TCheckpointStorage::Init() .AddNullableColumn("created_by", EPrimitiveType::Timestamp) .AddNullableColumn("modified_by", EPrimitiveType::Timestamp) .AddNullableColumn("state_size", EPrimitiveType::Uint64) - .AddNullableColumn("graph_description_id", EPrimitiveType::String) + .AddNullableColumn("graph_description_id", EPrimitiveType::String) .SetPrimaryKeyColumns({"graph_id", "coordinator_generation", "seq_no"}) .Build(); - RUN_CREATE_TABLE(CheckpointsMetadataTable, checkpointDesc); - - auto checkpointGraphsDescDesc = TTableBuilder() - .AddNullableColumn("id", EPrimitiveType::String) - .AddNullableColumn("ref_count", EPrimitiveType::Uint64) - .AddNullableColumn("graph_description", EPrimitiveType::String) - .SetPrimaryKeyColumn("id") - .Build(); + RUN_CREATE_TABLE(CheckpointsMetadataTable, checkpointDesc); - RUN_CREATE_TABLE(CheckpointsGraphsDescriptionTable, checkpointGraphsDescDesc); - -#undef RUN_CREATE_TABLE + auto checkpointGraphsDescDesc = TTableBuilder() + .AddNullableColumn("id", EPrimitiveType::String) + .AddNullableColumn("ref_count", EPrimitiveType::Uint64) + .AddNullableColumn("graph_description", EPrimitiveType::String) + .SetPrimaryKeyColumn("id") + .Build(); + RUN_CREATE_TABLE(CheckpointsGraphsDescriptionTable, checkpointGraphsDescDesc); + +#undef RUN_CREATE_TABLE + return MakeFuture(std::move(issues)); } @@ -762,32 +762,32 @@ TFuture<ICheckpointStorage::TGetCoordinatorsResult> TCheckpointStorage::GetCoord }); } -TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateCheckpoint( +TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateCheckpoint( const TCoordinatorId& coordinator, const TCheckpointId& checkpointId, - const TString& graphDescId, + const TString& graphDescId, ECheckpointStatus status) { - Y_VERIFY(graphDescId); + Y_VERIFY(graphDescId); auto checkpointContext = MakeIntrusive<TCheckpointContext>(checkpointId, status); - checkpointContext->CheckpointGraphDescriptionContext = MakeIntrusive<TCheckpointGraphDescriptionContext>(graphDescId); - return CreateCheckpointImpl(coordinator, checkpointContext); -} - -TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateCheckpoint( - const TCoordinatorId& coordinator, - const TCheckpointId& checkpointId, - const NProto::TCheckpointGraphDescription& graphDesc, - ECheckpointStatus status) -{ - auto checkpointContext = MakeIntrusive<TCheckpointContext>(checkpointId, status); - checkpointContext->CheckpointGraphDescriptionContext = MakeIntrusive<TCheckpointGraphDescriptionContext>(graphDesc); - checkpointContext->EntityIdGenerator = EntityIdGenerator; - return CreateCheckpointImpl(coordinator, checkpointContext); -} - -TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateCheckpointImpl(const TCoordinatorId& coordinator, const TCheckpointContextPtr& checkpointContext) { - Y_VERIFY(checkpointContext->CheckpointGraphDescriptionContext->GraphDescId || checkpointContext->EntityIdGenerator); + checkpointContext->CheckpointGraphDescriptionContext = MakeIntrusive<TCheckpointGraphDescriptionContext>(graphDescId); + return CreateCheckpointImpl(coordinator, checkpointContext); +} + +TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateCheckpoint( + const TCoordinatorId& coordinator, + const TCheckpointId& checkpointId, + const NProto::TCheckpointGraphDescription& graphDesc, + ECheckpointStatus status) +{ + auto checkpointContext = MakeIntrusive<TCheckpointContext>(checkpointId, status); + checkpointContext->CheckpointGraphDescriptionContext = MakeIntrusive<TCheckpointGraphDescriptionContext>(graphDesc); + checkpointContext->EntityIdGenerator = EntityIdGenerator; + return CreateCheckpointImpl(coordinator, checkpointContext); +} + +TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateCheckpointImpl(const TCoordinatorId& coordinator, const TCheckpointContextPtr& checkpointContext) { + Y_VERIFY(checkpointContext->CheckpointGraphDescriptionContext->GraphDescId || checkpointContext->EntityIdGenerator); auto future = YdbConnection->Client.RetryOperation( [prefix = YdbConnection->TablePathPrefix, coordinator, checkpointContext] (TSession session) { auto generationContext = MakeIntrusive<TGenerationContext>( @@ -806,14 +806,14 @@ TFuture<ICheckpointStorage::TCreateCheckpointResult> TCheckpointStorage::CreateC return CreateCheckpointWrapper(future, checkpointContext); }); - return future.Apply( - [checkpointContext](const TFuture<NYdb::TStatus>& future) { - if (NYql::TIssues issues = StatusToIssues(future.GetValue())) { - return TCreateCheckpointResult(TString(), std::move(issues)); - } else { - return TCreateCheckpointResult(checkpointContext->CheckpointGraphDescriptionContext->GraphDescId, NYql::TIssues()); - } - }); + return future.Apply( + [checkpointContext](const TFuture<NYdb::TStatus>& future) { + if (NYql::TIssues issues = StatusToIssues(future.GetValue())) { + return TCreateCheckpointResult(TString(), std::move(issues)); + } else { + return TCreateCheckpointResult(checkpointContext->CheckpointGraphDescriptionContext->GraphDescId, NYql::TIssues()); + } + }); } TFuture<TIssues> TCheckpointStorage::UpdateCheckpointStatus( @@ -871,16 +871,16 @@ TFuture<TIssues> TCheckpointStorage::AbortCheckpoint( } TFuture<ICheckpointStorage::TGetCheckpointsResult> TCheckpointStorage::GetCheckpoints(const TString& graph) { - return GetCheckpoints(graph, TVector<ECheckpointStatus>(), std::numeric_limits<ui64>::max(), true); + return GetCheckpoints(graph, TVector<ECheckpointStatus>(), std::numeric_limits<ui64>::max(), true); } TFuture<ICheckpointStorage::TGetCheckpointsResult> TCheckpointStorage::GetCheckpoints( - const TString& graph, const TVector<ECheckpointStatus>& statuses, ui64 limit, bool loadGraphDescription) + const TString& graph, const TVector<ECheckpointStatus>& statuses, ui64 limit, bool loadGraphDescription) { auto getContext = MakeIntrusive<TGetCheckpointsContext>(); auto future = YdbConnection->Client.RetryOperation( - [prefix = YdbConnection->TablePathPrefix, graph, getContext, statuses, limit, loadGraphDescription, settings = DefaultExecDataQuerySettings()] (TSession session) { + [prefix = YdbConnection->TablePathPrefix, graph, getContext, statuses, limit, loadGraphDescription, settings = DefaultExecDataQuerySettings()] (TSession session) { auto generationContext = MakeIntrusive<TGenerationContext>( session, false, @@ -891,10 +891,10 @@ TFuture<ICheckpointStorage::TGetCheckpointsResult> TCheckpointStorage::GetCheckp graph, 0UL); - auto future = SelectGraphCheckpoints(generationContext, statuses, limit, settings, loadGraphDescription); + auto future = SelectGraphCheckpoints(generationContext, statuses, limit, settings, loadGraphDescription); return future.Apply( - [generationContext, getContext, loadGraphDescription] (const TFuture<TDataQueryResult>& future) { - return ProcessCheckpoints(future.GetValue(), generationContext, getContext, loadGraphDescription); + [generationContext, getContext, loadGraphDescription] (const TFuture<TDataQueryResult>& future) { + return ProcessCheckpoints(future.GetValue(), generationContext, getContext, loadGraphDescription); }); }); @@ -987,64 +987,64 @@ TFuture<TIssues> TCheckpointStorage::DeleteMarkedCheckpoints( auto future = YdbConnection->Client.RetryOperation( [prefix = YdbConnection->TablePathPrefix, graphId, checkpointUpperBound] (TSession session) { // TODO: use prepared queries - using namespace fmt::literals; - const TString query = fmt::format(R"sql( + using namespace fmt::literals; + const TString query = fmt::format(R"sql( --!syntax_v1 - PRAGMA TablePathPrefix("{table_path_prefix}"); - DECLARE $graph_id AS String; - DECLARE $coordinator_generation AS Uint64; - DECLARE $seq_no AS Uint64; - - $refs = - SELECT - COUNT(*) AS refs, graph_description_id - FROM {checkpoints_metadata_table_name} - WHERE graph_id = $graph_id AND status = {gc_status} - AND (coordinator_generation < $coordinator_generation OR - (coordinator_generation = $coordinator_generation AND seq_no < $seq_no)) - AND graph_description_id != "" -- legacy condition (excludes old records without graph description) - GROUP BY graph_description_id; - - $update = - SELECT - checkpoints_graphs_description.id AS id, - checkpoints_graphs_description.ref_count - refs.refs AS ref_count - FROM {checkpoints_graphs_description_table_name} - INNER JOIN (SELECT * FROM $refs) AS refs - ON refs.graph_description_id = checkpoints_graphs_description.id; - - UPDATE {checkpoints_graphs_description_table_name} - ON SELECT * FROM $update WHERE ref_count > 0; - - DELETE FROM {checkpoints_graphs_description_table_name} - ON SELECT * FROM $update WHERE ref_count = 0; - - DELETE FROM {checkpoints_metadata_table_name} - WHERE graph_id = $graph_id AND status = {gc_status} - AND (coordinator_generation < $coordinator_generation OR - (coordinator_generation = $coordinator_generation AND seq_no < $seq_no)); - )sql", - "table_path_prefix"_a = prefix, - "checkpoints_metadata_table_name"_a = CheckpointsMetadataTable, - "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable, - "gc_status"_a = static_cast<ui32>(ECheckpointStatus::GC) - ); - - NYdb::TParamsBuilder params; - params - .AddParam("$graph_id") - .String(graphId) - .Build() - .AddParam("$coordinator_generation") - .Uint64(checkpointUpperBound.CoordinatorGeneration) - .Build() - .AddParam("$seq_no") - .Uint64(checkpointUpperBound.SeqNo) - .Build(); - + PRAGMA TablePathPrefix("{table_path_prefix}"); + DECLARE $graph_id AS String; + DECLARE $coordinator_generation AS Uint64; + DECLARE $seq_no AS Uint64; + + $refs = + SELECT + COUNT(*) AS refs, graph_description_id + FROM {checkpoints_metadata_table_name} + WHERE graph_id = $graph_id AND status = {gc_status} + AND (coordinator_generation < $coordinator_generation OR + (coordinator_generation = $coordinator_generation AND seq_no < $seq_no)) + AND graph_description_id != "" -- legacy condition (excludes old records without graph description) + GROUP BY graph_description_id; + + $update = + SELECT + checkpoints_graphs_description.id AS id, + checkpoints_graphs_description.ref_count - refs.refs AS ref_count + FROM {checkpoints_graphs_description_table_name} + INNER JOIN (SELECT * FROM $refs) AS refs + ON refs.graph_description_id = checkpoints_graphs_description.id; + + UPDATE {checkpoints_graphs_description_table_name} + ON SELECT * FROM $update WHERE ref_count > 0; + + DELETE FROM {checkpoints_graphs_description_table_name} + ON SELECT * FROM $update WHERE ref_count = 0; + + DELETE FROM {checkpoints_metadata_table_name} + WHERE graph_id = $graph_id AND status = {gc_status} + AND (coordinator_generation < $coordinator_generation OR + (coordinator_generation = $coordinator_generation AND seq_no < $seq_no)); + )sql", + "table_path_prefix"_a = prefix, + "checkpoints_metadata_table_name"_a = CheckpointsMetadataTable, + "checkpoints_graphs_description_table_name"_a = CheckpointsGraphsDescriptionTable, + "gc_status"_a = static_cast<ui32>(ECheckpointStatus::GC) + ); + + NYdb::TParamsBuilder params; + params + .AddParam("$graph_id") + .String(graphId) + .Build() + .AddParam("$coordinator_generation") + .Uint64(checkpointUpperBound.CoordinatorGeneration) + .Build() + .AddParam("$seq_no") + .Uint64(checkpointUpperBound.SeqNo) + .Build(); + auto future = session.ExecuteDataQuery( query, - TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params.Build()); + TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params.Build()); return future.Apply( [] (const TFuture<TDataQueryResult>& future) { @@ -1055,7 +1055,7 @@ TFuture<TIssues> TCheckpointStorage::DeleteMarkedCheckpoints( return StatusToIssues(future); } -TFuture<ICheckpointStorage::TAddToStateSizeResult> NYq::TCheckpointStorage::AddToStateSize(const TString& graphId, const TCheckpointId& checkpointId, ui64 size) { +TFuture<ICheckpointStorage::TAddToStateSizeResult> NYq::TCheckpointStorage::AddToStateSize(const TString& graphId, const TCheckpointId& checkpointId, ui64 size) { auto result = MakeIntrusive<TAddToStateSizeContext>(); auto future = YdbConnection->Client.RetryOperation( [prefix = YdbConnection->TablePathPrefix, graphId, checkpointId, size, result, thisPtr = TIntrusivePtr(this)](TSession session) { @@ -1120,7 +1120,7 @@ TFuture<ICheckpointStorage::TAddToStateSizeResult> NYq::TCheckpointStorage::AddT }); } -TFuture<ICheckpointStorage::TGetTotalCheckpointsStateSizeResult> NYq::TCheckpointStorage::GetTotalCheckpointsStateSize(const TString& graphId) { +TFuture<ICheckpointStorage::TGetTotalCheckpointsStateSizeResult> NYq::TCheckpointStorage::GetTotalCheckpointsStateSize(const TString& graphId) { auto result = MakeIntrusive<TGetTotalCheckpointsStateSizeContext>(); auto future = YdbConnection->Client.RetryOperation( [prefix = YdbConnection->TablePathPrefix, graphId, thisPtr = TIntrusivePtr(this), result](TSession session) { @@ -1169,7 +1169,7 @@ TFuture<ICheckpointStorage::TGetTotalCheckpointsStateSizeResult> NYq::TCheckpoin }); } -TExecDataQuerySettings NYq::TCheckpointStorage::DefaultExecDataQuerySettings() { +TExecDataQuerySettings NYq::TCheckpointStorage::DefaultExecDataQuerySettings() { return TExecDataQuerySettings() .KeepInQueryCache(true) .ClientTimeout(TDuration::Seconds(Config.GetClientTimeoutSec())) @@ -1183,11 +1183,11 @@ TExecDataQuerySettings NYq::TCheckpointStorage::DefaultExecDataQuerySettings() { TCheckpointStoragePtr NewYdbCheckpointStorage( const NConfig::TYdbStorageConfig& config, - const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, - const IEntityIdGenerator::TPtr& entityIdGenerator) + const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, + const IEntityIdGenerator::TPtr& entityIdGenerator) { - Y_VERIFY(entityIdGenerator); - return new TCheckpointStorage(config, credentialsProviderFactory, entityIdGenerator); + Y_VERIFY(entityIdGenerator); + return new TCheckpointStorage(config, credentialsProviderFactory, entityIdGenerator); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.h b/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.h index c52b006f916..e78f681c24b 100644 --- a/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.h +++ b/ydb/core/yq/libs/checkpoint_storage/ydb_checkpoint_storage.h @@ -3,16 +3,16 @@ #include "checkpoint_storage.h" #include <ydb/library/security/ydb_credentials_provider_factory.h> -#include <ydb/core/yq/libs/common/entity_id.h> +#include <ydb/core/yq/libs/common/entity_id.h> #include <ydb/core/yq/libs/config/protos/storage.pb.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// TCheckpointStoragePtr NewYdbCheckpointStorage( const NConfig::TYdbStorageConfig& config, - const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, - const IEntityIdGenerator::TPtr& entityIdGenerator); + const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, + const IEntityIdGenerator::TPtr& entityIdGenerator); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.cpp b/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.cpp index ab209ec1e89..f0f4f81a1ce 100644 --- a/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.cpp +++ b/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.cpp @@ -1,15 +1,15 @@ #include "ydb_state_storage.h" -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <ydb/core/yq/libs/ydb/util.h> #include <ydb/core/yq/libs/ydb/ydb.h> #include <ydb/public/sdk/cpp/client/ydb_scheme/scheme.h> #include <util/stream/str.h> -#include <util/string/join.h> +#include <util/string/join.h> -namespace NYq { +namespace NYq { using namespace NThreading; using namespace NYdb; @@ -27,36 +27,36 @@ const char* StatesTable = "states"; struct TContext : public TThrRefBase { const TString TablePathPrefix; - const std::vector<ui64> TaskIds; + const std::vector<ui64> TaskIds; const TString GraphId; const TCheckpointId CheckpointId; - std::vector<NYql::NDqProto::TComputeActorState> States; - TMaybe<TSession> Session; + std::vector<NYql::NDqProto::TComputeActorState> States; + TMaybe<TSession> Session; TContext(const TString& tablePathPrefix, - const std::vector<ui64>& taskIds, + const std::vector<ui64>& taskIds, TString graphId, const TCheckpointId& checkpointId, - std::vector<NYql::NDqProto::TComputeActorState> states = {}, - TMaybe<TSession> session = {}) + std::vector<NYql::NDqProto::TComputeActorState> states = {}, + TMaybe<TSession> session = {}) : TablePathPrefix(tablePathPrefix) - , TaskIds(taskIds) + , TaskIds(taskIds) , GraphId(std::move(graphId)) , CheckpointId(checkpointId) - , States(std::move(states)) + , States(std::move(states)) , Session(session) { } - - TContext(const TString& tablePathPrefix, - ui64 taskId, - TString graphId, - const TCheckpointId& checkpointId, - NYql::NDqProto::TComputeActorState state = {}, - TMaybe<TSession> session = {}) - : TContext(tablePathPrefix, std::vector{taskId}, std::move(graphId), checkpointId, std::vector{std::move(state)}, std::move(session)) - { - } + + TContext(const TString& tablePathPrefix, + ui64 taskId, + TString graphId, + const TCheckpointId& checkpointId, + NYql::NDqProto::TComputeActorState state = {}, + TMaybe<TSession> session = {}) + : TContext(tablePathPrefix, std::vector{taskId}, std::move(graphId), checkpointId, std::vector{std::move(state)}, std::move(session)) + { + } }; using TContextPtr = TIntrusivePtr<TContext>; @@ -71,66 +71,66 @@ using TCountStateContextPtr = TIntrusivePtr<TCountStateContext>; //////////////////////////////////////////////////////////////////////////////// -static void LoadState(NYql::NDqProto::TComputeActorState& state, const TString& serializedState) { - if (!state.ParseFromString(serializedState)) { // backward compatibility with YQL serialization - state.Clear(); - state.MutableMiniKqlProgram()->MutableData()->MutableStateData()->SetBlob(serializedState); - } -} - +static void LoadState(NYql::NDqProto::TComputeActorState& state, const TString& serializedState) { + if (!state.ParseFromString(serializedState)) { // backward compatibility with YQL serialization + state.Clear(); + state.MutableMiniKqlProgram()->MutableData()->MutableStateData()->SetBlob(serializedState); + } +} + TFuture<TStatus> ProcessState( const TDataQueryResult& selectResult, - const TContextPtr& context) + const TContextPtr& context) { if (!selectResult.IsSuccess()) { return MakeFuture<TStatus>(selectResult); } TResultSetParser parser(selectResult.GetResultSet(0)); - TStringBuilder errorMessage; - if (parser.RowsCount() == context->TaskIds.size()) { - context->States.resize(context->TaskIds.size()); - std::vector<bool> processed; - processed.resize(context->TaskIds.size()); - for (size_t i = 0; i < context->TaskIds.size(); ++i) { - if (!parser.TryNextRow()) { - errorMessage << "Can't get next row"; - break; - } - auto taskId = parser.ColumnParser("task_id").GetOptionalUint64(); - if (!taskId) { - errorMessage << "No task id in result"; - break; - } - const auto taskIt = std::find(context->TaskIds.begin(), context->TaskIds.end(), *taskId); - if (taskIt == context->TaskIds.end()) { - errorMessage << "Got unexpected task id"; - break; - } - const size_t taskIndex = std::distance(context->TaskIds.begin(), taskIt); - if (processed[taskIndex]) { - errorMessage << "Got duplicated task id"; - break; - } else { - processed[taskIndex] = true; - } - LoadState(context->States[taskIndex], *parser.ColumnParser("blob").GetOptionalString()); - } + TStringBuilder errorMessage; + if (parser.RowsCount() == context->TaskIds.size()) { + context->States.resize(context->TaskIds.size()); + std::vector<bool> processed; + processed.resize(context->TaskIds.size()); + for (size_t i = 0; i < context->TaskIds.size(); ++i) { + if (!parser.TryNextRow()) { + errorMessage << "Can't get next row"; + break; + } + auto taskId = parser.ColumnParser("task_id").GetOptionalUint64(); + if (!taskId) { + errorMessage << "No task id in result"; + break; + } + const auto taskIt = std::find(context->TaskIds.begin(), context->TaskIds.end(), *taskId); + if (taskIt == context->TaskIds.end()) { + errorMessage << "Got unexpected task id"; + break; + } + const size_t taskIndex = std::distance(context->TaskIds.begin(), taskIt); + if (processed[taskIndex]) { + errorMessage << "Got duplicated task id"; + break; + } else { + processed[taskIndex] = true; + } + LoadState(context->States[taskIndex], *parser.ColumnParser("blob").GetOptionalString()); + } } else { - errorMessage << "Not all states exist in database"; - } - - if (errorMessage) { + errorMessage << "Not all states exist in database"; + } + + if (errorMessage) { TIssues issues; TStringStream ss; ss << "Failed to select state of checkpoint '" << context->CheckpointId << "'" - << ", taskIds={" << JoinSeq(", ", context->TaskIds) << "}. Selected rows: " << parser.RowsCount(); + << ", taskIds={" << JoinSeq(", ", context->TaskIds) << "}. Selected rows: " << parser.RowsCount(); const auto& stats = selectResult.GetStats(); if (stats) { - ss << ". Stats: " << stats->ToString(); + ss << ". Stats: " << stats->ToString(); } - ss << ". " << errorMessage; + ss << ". " << errorMessage; // TODO: print status, etc @@ -161,10 +161,10 @@ public: ui64 taskId, const TString& graphId, const TCheckpointId& checkpointId, - const NYql::NDqProto::TComputeActorState& state) override; + const NYql::NDqProto::TComputeActorState& state) override; TFuture<TGetStateResult> GetState( - const std::vector<ui64>& taskIds, + const std::vector<ui64>& taskIds, const TString& graphId, const TCheckpointId& checkpointId) override; @@ -248,16 +248,16 @@ TFuture<TIssues> TStateStorage::SaveState( ui64 taskId, const TString& graphId, const TCheckpointId& checkpointId, - const NYql::NDqProto::TComputeActorState& state) + const NYql::NDqProto::TComputeActorState& state) { auto future = YdbConnection->Client.RetryOperation( - [prefix = YdbConnection->TablePathPrefix, taskId, graphId, checkpointId, state, thisPtr = TIntrusivePtr(this)] (TSession session) { + [prefix = YdbConnection->TablePathPrefix, taskId, graphId, checkpointId, state, thisPtr = TIntrusivePtr(this)] (TSession session) { auto context = MakeIntrusive<TContext>( prefix, taskId, graphId, checkpointId, - state, + state, session); return thisPtr->UpsertState(context); @@ -267,44 +267,44 @@ TFuture<TIssues> TStateStorage::SaveState( } TFuture<IStateStorage::TGetStateResult> TStateStorage::GetState( - const std::vector<ui64>& taskIds, + const std::vector<ui64>& taskIds, const TString& graphId, const TCheckpointId& checkpointId) { - if (taskIds.empty()) { - IStateStorage::TGetStateResult result; - result.second.AddIssue("Internal error loading state: no task ids specified"); - return MakeFuture<IStateStorage::TGetStateResult>(result); - } - - if (taskIds.size() > 1 && std::set<ui64>(taskIds.begin(), taskIds.end()).size() != taskIds.size()) { - IStateStorage::TGetStateResult result; - result.second.AddIssue("Internal error loading state: duplicated task ids specified"); - return MakeFuture<IStateStorage::TGetStateResult>(result); - } - - auto context = MakeIntrusive<TContext>( - YdbConnection->TablePathPrefix, - taskIds, - graphId, - checkpointId); + if (taskIds.empty()) { + IStateStorage::TGetStateResult result; + result.second.AddIssue("Internal error loading state: no task ids specified"); + return MakeFuture<IStateStorage::TGetStateResult>(result); + } + + if (taskIds.size() > 1 && std::set<ui64>(taskIds.begin(), taskIds.end()).size() != taskIds.size()) { + IStateStorage::TGetStateResult result; + result.second.AddIssue("Internal error loading state: duplicated task ids specified"); + return MakeFuture<IStateStorage::TGetStateResult>(result); + } + + auto context = MakeIntrusive<TContext>( + YdbConnection->TablePathPrefix, + taskIds, + graphId, + checkpointId); auto future = YdbConnection->Client.RetryOperation( - [context, thisPtr = TIntrusivePtr(this)] (TSession session) { - context->Session = session; + [context, thisPtr = TIntrusivePtr(this)] (TSession session) { + context->Session = session; auto future = thisPtr->SelectState(context); return future.Apply( - [context] (const TFuture<TDataQueryResult>& future) { - return ProcessState(future.GetValue(), context); + [context] (const TFuture<TDataQueryResult>& future) { + return ProcessState(future.GetValue(), context); }); }); return StatusToIssues(future).Apply( - [context] (const TFuture<TIssues>& future) { - TGetStateResult result; - std::swap(result.first, context->States); - result.second = future.GetValue(); - return MakeFuture(std::move(result)); + [context] (const TFuture<TIssues>& future) { + TGetStateResult result; + std::swap(result.first, context->States); + result.second = future.GetValue(); + return MakeFuture(std::move(result)); }); } @@ -463,16 +463,16 @@ TFuture<TIssues> TStateStorage::DeleteCheckpoints( TFuture<TDataQueryResult> TStateStorage::SelectState(const TContextPtr& context) { NYdb::TParamsBuilder paramsBuilder; - Y_VERIFY(!context->TaskIds.empty()); - if (context->TaskIds.size() == 1) { - paramsBuilder.AddParam("$task_id").Uint64(context->TaskIds[0]).Build(); - } else { - auto& taskIdsParam = paramsBuilder.AddParam("$task_ids").BeginList(); - for (const ui64 taskId : context->TaskIds) { - taskIdsParam.AddListItem().Uint64(taskId); - } - taskIdsParam.EndList().Build(); - } + Y_VERIFY(!context->TaskIds.empty()); + if (context->TaskIds.size() == 1) { + paramsBuilder.AddParam("$task_id").Uint64(context->TaskIds[0]).Build(); + } else { + auto& taskIdsParam = paramsBuilder.AddParam("$task_ids").BeginList(); + for (const ui64 taskId : context->TaskIds) { + taskIdsParam.AddListItem().Uint64(taskId); + } + taskIdsParam.EndList().Build(); + } paramsBuilder.AddParam("$graph_id").String(context->GraphId).Build(); paramsBuilder.AddParam("$coordinator_generation").Uint64(context->CheckpointId.CoordinatorGeneration).Build(); paramsBuilder.AddParam("$seq_no").Uint64(context->CheckpointId.SeqNo).Build(); @@ -483,22 +483,22 @@ TFuture<TDataQueryResult> TStateStorage::SelectState(const TContextPtr& context) --!syntax_v1 PRAGMA TablePathPrefix("%s"); - %s; - DECLARE $graph_id AS string; - DECLARE $coordinator_generation AS Uint64; - DECLARE $seq_no AS Uint64; + %s; + DECLARE $graph_id AS string; + DECLARE $coordinator_generation AS Uint64; + DECLARE $seq_no AS Uint64; - SELECT task_id, blob + SELECT task_id, blob FROM %s - WHERE %s AND graph_id = $graph_id AND coordinator_generation = $coordinator_generation AND seq_no = $seq_no; - )", - context->TablePathPrefix.c_str(), - context->TaskIds.size() == 1 ? "DECLARE $task_id AS Uint64" : "DECLARE $task_ids AS List<Uint64>", - StatesTable, - context->TaskIds.size() == 1 ? "task_id = $task_id" : "task_id IN $task_ids"); - - Y_VERIFY(context->Session); - return context->Session->ExecuteDataQuery( + WHERE %s AND graph_id = $graph_id AND coordinator_generation = $coordinator_generation AND seq_no = $seq_no; + )", + context->TablePathPrefix.c_str(), + context->TaskIds.size() == 1 ? "DECLARE $task_id AS Uint64" : "DECLARE $task_ids AS List<Uint64>", + StatesTable, + context->TaskIds.size() == 1 ? "task_id = $task_id" : "task_id IN $task_ids"); + + Y_VERIFY(context->Session); + return context->Session->ExecuteDataQuery( query, TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params, @@ -506,20 +506,20 @@ TFuture<TDataQueryResult> TStateStorage::SelectState(const TContextPtr& context) } TFuture<TStatus> TStateStorage::UpsertState(const TContextPtr& context) { - Y_VERIFY(context->States.size() == 1); - TString serializedState; - if (!context->States[0].SerializeToString(&serializedState)) { - return MakeFuture(MakeErrorStatus(EStatus::BAD_REQUEST, "Failed to serialize compute actor state", NYql::TSeverityIds::S_ERROR)); - } + Y_VERIFY(context->States.size() == 1); + TString serializedState; + if (!context->States[0].SerializeToString(&serializedState)) { + return MakeFuture(MakeErrorStatus(EStatus::BAD_REQUEST, "Failed to serialize compute actor state", NYql::TSeverityIds::S_ERROR)); + } // publish nodes NYdb::TParamsBuilder paramsBuilder; - Y_VERIFY(context->TaskIds.size() == 1); - paramsBuilder.AddParam("$task_id").Uint64(context->TaskIds[0]).Build(); + Y_VERIFY(context->TaskIds.size() == 1); + paramsBuilder.AddParam("$task_id").Uint64(context->TaskIds[0]).Build(); paramsBuilder.AddParam("$graph_id").String(context->GraphId).Build(); paramsBuilder.AddParam("$coordinator_generation").Uint64(context->CheckpointId.CoordinatorGeneration).Build(); paramsBuilder.AddParam("$seq_no").Uint64(context->CheckpointId.SeqNo).Build(); - paramsBuilder.AddParam("$blob").String(serializedState).Build(); + paramsBuilder.AddParam("$blob").String(serializedState).Build(); auto params = paramsBuilder.Build(); @@ -537,8 +537,8 @@ TFuture<TStatus> TStateStorage::UpsertState(const TContextPtr& context) { ($task_id, $graph_id, $coordinator_generation, $seq_no, $blob); )", context->TablePathPrefix.c_str(), StatesTable); - Y_VERIFY(context->Session); - auto future = context->Session->ExecuteDataQuery( + Y_VERIFY(context->Session); + auto future = context->Session->ExecuteDataQuery( query, TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params, @@ -561,4 +561,4 @@ TStateStoragePtr NewYdbStateStorage( return new TStateStorage(config, credentialsProviderFactory); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.h b/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.h index 61dba973de7..dfbec53ef20 100644 --- a/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.h +++ b/ydb/core/yq/libs/checkpoint_storage/ydb_state_storage.h @@ -5,7 +5,7 @@ #include <ydb/library/security/ydb_credentials_provider_factory.h> #include <ydb/core/yq/libs/config/protos/storage.pb.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// @@ -13,4 +13,4 @@ TStateStoragePtr NewYdbStateStorage( const NConfig::TYdbStorageConfig& config, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.cpp b/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.cpp index d9d354c2e5a..0e09bf453a2 100644 --- a/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.cpp +++ b/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.cpp @@ -2,55 +2,55 @@ #include "checkpoint_coordinator.h" -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/hfunc.h> #include <ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.h> #include <ydb/library/yql/dq/actors/dq.h> -#include <ydb/library/yql/dq/state/dq_state_load_plan.h> - -#include <util/string/builder.h> +#include <ydb/library/yql/dq/state/dq_state_load_plan.h> +#include <util/string/builder.h> + #include <utility> -#define CC_LOG_D(stream) \ - LOG_STREAMS_CHECKPOINT_COORDINATOR_DEBUG("[" << CoordinatorId << "] " << stream) -#define CC_LOG_I(stream) \ - LOG_STREAMS_CHECKPOINT_COORDINATOR_INFO("[" << CoordinatorId << "] " << stream) -#define CC_LOG_W(stream) \ - LOG_STREAMS_CHECKPOINT_COORDINATOR_WARN("[" << CoordinatorId << "] " << stream) -#define CC_LOG_E(stream) \ - LOG_STREAMS_CHECKPOINT_COORDINATOR_ERROR("[" << CoordinatorId << "] " << stream) - -namespace NYq { +#define CC_LOG_D(stream) \ + LOG_STREAMS_CHECKPOINT_COORDINATOR_DEBUG("[" << CoordinatorId << "] " << stream) +#define CC_LOG_I(stream) \ + LOG_STREAMS_CHECKPOINT_COORDINATOR_INFO("[" << CoordinatorId << "] " << stream) +#define CC_LOG_W(stream) \ + LOG_STREAMS_CHECKPOINT_COORDINATOR_WARN("[" << CoordinatorId << "] " << stream) +#define CC_LOG_E(stream) \ + LOG_STREAMS_CHECKPOINT_COORDINATOR_ERROR("[" << CoordinatorId << "] " << stream) + +namespace NYq { TCheckpointCoordinator::TCheckpointCoordinator(TCoordinatorId coordinatorId, const TActorId& taskControllerId, const TActorId& storageProxy, - const TActorId& runActorId, + const TActorId& runActorId, const TCheckpointCoordinatorConfig& settings, const NMonitoring::TDynamicCounterPtr& counters, - const NProto::TGraphParams& graphParams, - const YandexQuery::StateLoadMode& stateLoadMode, - const YandexQuery::StreamingDisposition& streamingDisposition) + const NProto::TGraphParams& graphParams, + const YandexQuery::StateLoadMode& stateLoadMode, + const YandexQuery::StreamingDisposition& streamingDisposition) : CoordinatorId(std::move(coordinatorId)) , TaskControllerId(taskControllerId) , StorageProxy(storageProxy) - , RunActorId(runActorId) + , RunActorId(runActorId) , Settings(settings) , CheckpointingPeriod(TDuration::MilliSeconds(Settings.GetCheckpointingPeriodMillis())) - , GraphParams(graphParams) + , GraphParams(graphParams) , Metrics(TCheckpointCoordinatorMetrics(counters)) - , StateLoadMode(stateLoadMode) - , StreamingDisposition(streamingDisposition) -{ + , StateLoadMode(stateLoadMode) + , StreamingDisposition(streamingDisposition) +{ } void TCheckpointCoordinator::Bootstrap() { Become(&TThis::DispatchEvent); - CC_LOG_D("Bootstrapped with streaming disposition " << StreamingDisposition << " and state load mode " << YandexQuery::StateLoadMode_Name(StateLoadMode)); + CC_LOG_D("Bootstrapped with streaming disposition " << StreamingDisposition << " and state load mode " << YandexQuery::StateLoadMode_Name(StateLoadMode)); } void TCheckpointCoordinator::Handle(const NYql::NDqs::TEvReadyState::TPtr& ev) { @@ -60,37 +60,37 @@ void TCheckpointCoordinator::Handle(const NYql::NDqs::TEvReadyState::TPtr& ev) { for (int i = 0; i < static_cast<int>(tasks.size()); ++i) { auto& task = tasks[i]; - auto& actorId = TaskIdToActor[task.GetId()]; - if (actorId) { - Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(TStringBuilder() << "Duplicate task id: " << task.GetId())); - return; - } - actorId = ActorIdFromProto(actorIds[i]); - - TComputeActorTransportStuff::TPtr transport = AllActors[actorId] = MakeIntrusive<TComputeActorTransportStuff>(); - transport->EventsQueue.Init(CoordinatorId.ToString(), SelfId(), SelfId(), task.GetId()); - transport->EventsQueue.OnNewRecipientId(actorId); - if (NYql::NDq::GetTaskCheckpointingMode(task) != NYql::NDqProto::CHECKPOINTING_MODE_DISABLED) { - if (IsIngress(task)) { - ActorsToTrigger[actorId] = transport; - ActorsToNotify[actorId] = transport; - ActorsToNotifySet.insert(actorId); - } - if (IsEgress(task)) { - ActorsToNotify[actorId] = transport; - ActorsToNotifySet.insert(actorId); - } - if (HasState(task)) { - ActorsToWaitFor[actorId] = transport; - ActorsToWaitForSet.insert(actorId); - } + auto& actorId = TaskIdToActor[task.GetId()]; + if (actorId) { + Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(TStringBuilder() << "Duplicate task id: " << task.GetId())); + return; + } + actorId = ActorIdFromProto(actorIds[i]); + + TComputeActorTransportStuff::TPtr transport = AllActors[actorId] = MakeIntrusive<TComputeActorTransportStuff>(); + transport->EventsQueue.Init(CoordinatorId.ToString(), SelfId(), SelfId(), task.GetId()); + transport->EventsQueue.OnNewRecipientId(actorId); + if (NYql::NDq::GetTaskCheckpointingMode(task) != NYql::NDqProto::CHECKPOINTING_MODE_DISABLED) { + if (IsIngress(task)) { + ActorsToTrigger[actorId] = transport; + ActorsToNotify[actorId] = transport; + ActorsToNotifySet.insert(actorId); + } + if (IsEgress(task)) { + ActorsToNotify[actorId] = transport; + ActorsToNotifySet.insert(actorId); + } + if (HasState(task)) { + ActorsToWaitFor[actorId] = transport; + ActorsToWaitForSet.insert(actorId); + } } - AllActorsSet.insert(actorId); + AllActorsSet.insert(actorId); } - PendingInit = std::make_unique<TPendingInitCoordinator>(AllActors.size()); - - CC_LOG_D("Send TEvRegisterCoordinatorRequest"); + PendingInit = std::make_unique<TPendingInitCoordinator>(AllActors.size()); + + CC_LOG_D("Send TEvRegisterCoordinatorRequest"); Send(StorageProxy, new TEvCheckpointStorage::TEvRegisterCoordinatorRequest(CoordinatorId), IEventHandle::FlagTrackDelivery); } @@ -107,68 +107,68 @@ void TCheckpointCoordinator::UpdateInProgressMetric() { } void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvRegisterCoordinatorResponse::TPtr& ev) { - CC_LOG_D("Got TEvRegisterCoordinatorResponse; issues: " << ev->Get()->Issues.ToOneLineString()); + CC_LOG_D("Got TEvRegisterCoordinatorResponse; issues: " << ev->Get()->Issues.ToOneLineString()); const auto& issues = ev->Get()->Issues; if (issues) { - auto message = "Can't register in storage: " + issues.ToOneLineString(); - CC_LOG_E(message); + auto message = "Can't register in storage: " + issues.ToOneLineString(); + CC_LOG_E(message); ++*Metrics.StorageError; Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(message)); return; } - CC_LOG_D("Successfully registered in storage"); - CC_LOG_I("Send TEvNewCheckpointCoordinator to " << AllActors.size() << " actor(s)"); - for (const auto& [actor, transport] : AllActors) { - transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinator(CoordinatorId.Generation, CoordinatorId.GraphId)); - } - - const bool needCheckpointMetadata = StateLoadMode == YandexQuery::StateLoadMode::FROM_LAST_CHECKPOINT || StreamingDisposition.has_from_last_checkpoint(); - if (needCheckpointMetadata) { - const bool loadGraphDescription = StateLoadMode == YandexQuery::StateLoadMode::EMPTY && StreamingDisposition.has_from_last_checkpoint(); - CC_LOG_I("Send TEvGetCheckpointsMetadataRequest; state load mode: " << YandexQuery::StateLoadMode_Name(StateLoadMode) << "; load graph: " << loadGraphDescription); - Send(StorageProxy, - new TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest( - CoordinatorId.GraphId, - {ECheckpointStatus::PendingCommit, ECheckpointStatus::Completed}, - 1, - loadGraphDescription), - IEventHandle::FlagTrackDelivery); - } else if (StateLoadMode == YandexQuery::StateLoadMode::EMPTY) { - ++*Metrics.StartedFromEmptyCheckpoint; - CheckpointIdGenerator = std::make_unique<TCheckpointIdGenerator>(CoordinatorId); - InitingZeroCheckpoint = true; - InitCheckpoint(); - ScheduleNextCheckpoint(); - } else { - Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(TStringBuilder() << "Unexpected state load mode (" << YandexQuery::StateLoadMode_Name(StateLoadMode) << ") and streaming disposition " << StreamingDisposition)); - } -} - -void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinatorAck::TPtr& ev) { - if (!OnComputeActorEventReceived(ev)) { - return; - } - - Y_VERIFY(PendingInit); - PendingInit->OnNewCheckpointCoordinatorAck(); - - if (PendingInit->CanInjectCheckpoint()) { - auto checkpointId = *PendingInit->CheckpointId; - InjectCheckpoint(checkpointId); + CC_LOG_D("Successfully registered in storage"); + CC_LOG_I("Send TEvNewCheckpointCoordinator to " << AllActors.size() << " actor(s)"); + for (const auto& [actor, transport] : AllActors) { + transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinator(CoordinatorId.Generation, CoordinatorId.GraphId)); + } + + const bool needCheckpointMetadata = StateLoadMode == YandexQuery::StateLoadMode::FROM_LAST_CHECKPOINT || StreamingDisposition.has_from_last_checkpoint(); + if (needCheckpointMetadata) { + const bool loadGraphDescription = StateLoadMode == YandexQuery::StateLoadMode::EMPTY && StreamingDisposition.has_from_last_checkpoint(); + CC_LOG_I("Send TEvGetCheckpointsMetadataRequest; state load mode: " << YandexQuery::StateLoadMode_Name(StateLoadMode) << "; load graph: " << loadGraphDescription); + Send(StorageProxy, + new TEvCheckpointStorage::TEvGetCheckpointsMetadataRequest( + CoordinatorId.GraphId, + {ECheckpointStatus::PendingCommit, ECheckpointStatus::Completed}, + 1, + loadGraphDescription), + IEventHandle::FlagTrackDelivery); + } else if (StateLoadMode == YandexQuery::StateLoadMode::EMPTY) { + ++*Metrics.StartedFromEmptyCheckpoint; + CheckpointIdGenerator = std::make_unique<TCheckpointIdGenerator>(CoordinatorId); + InitingZeroCheckpoint = true; + InitCheckpoint(); + ScheduleNextCheckpoint(); + } else { + Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(TStringBuilder() << "Unexpected state load mode (" << YandexQuery::StateLoadMode_Name(StateLoadMode) << ") and streaming disposition " << StreamingDisposition)); } } +void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinatorAck::TPtr& ev) { + if (!OnComputeActorEventReceived(ev)) { + return; + } + + Y_VERIFY(PendingInit); + PendingInit->OnNewCheckpointCoordinatorAck(); + + if (PendingInit->CanInjectCheckpoint()) { + auto checkpointId = *PendingInit->CheckpointId; + InjectCheckpoint(checkpointId); + } +} + void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvGetCheckpointsMetadataResponse::TPtr& ev) { const auto event = ev->Get(); const auto& checkpoints = event->Checkpoints; - CC_LOG_D("Got TEvGetCheckpointsMetadataResponse"); + CC_LOG_D("Got TEvGetCheckpointsMetadataResponse"); Y_VERIFY(!PendingRestoreCheckpoint); if (event->Issues) { ++*Metrics.StorageError; - auto message = "Can't get checkpoints to restore: " + event->Issues.ToOneLineString(); - CC_LOG_E(message); + auto message = "Can't get checkpoints to restore: " + event->Issues.ToOneLineString(); + CC_LOG_E(message); Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(message)); return; } @@ -177,90 +177,90 @@ void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvGetCheckpoint if (!checkpoints.empty()) { const auto& checkpoint = checkpoints.at(0); CheckpointIdGenerator = std::make_unique<TCheckpointIdGenerator>(CoordinatorId, checkpoint.CheckpointId); - const bool needRestoreOffsets = StateLoadMode == YandexQuery::StateLoadMode::EMPTY && StreamingDisposition.has_from_last_checkpoint(); - if (needRestoreOffsets) { - TryToRestoreOffsetsFromForeignCheckpoint(checkpoint); - } else { - RestoreFromOwnCheckpoint(checkpoint); + const bool needRestoreOffsets = StateLoadMode == YandexQuery::StateLoadMode::EMPTY && StreamingDisposition.has_from_last_checkpoint(); + if (needRestoreOffsets) { + TryToRestoreOffsetsFromForeignCheckpoint(checkpoint); + } else { + RestoreFromOwnCheckpoint(checkpoint); } return; } - // Not restored from existing checkpoint. Init zero checkpoint - ++*Metrics.StartedFromEmptyCheckpoint; - CheckpointIdGenerator = std::make_unique<TCheckpointIdGenerator>(CoordinatorId); - CC_LOG_I("Found no checkpoints to restore from, creating a 'zero' checkpoint"); - InitingZeroCheckpoint = true; + // Not restored from existing checkpoint. Init zero checkpoint + ++*Metrics.StartedFromEmptyCheckpoint; + CheckpointIdGenerator = std::make_unique<TCheckpointIdGenerator>(CoordinatorId); + CC_LOG_I("Found no checkpoints to restore from, creating a 'zero' checkpoint"); + InitingZeroCheckpoint = true; InitCheckpoint(); ScheduleNextCheckpoint(); } -void TCheckpointCoordinator::RestoreFromOwnCheckpoint(const TCheckpointMetadata& checkpoint) { - CC_LOG_I("Will restore from checkpoint " << checkpoint.CheckpointId); - PendingRestoreCheckpoint = TPendingRestoreCheckpoint(checkpoint.CheckpointId, checkpoint.Status == ECheckpointStatus::PendingCommit, ActorsToWaitForSet); - ++*Metrics.RestoredFromSavedCheckpoint; - for (const auto& [actor, transport] : ActorsToWaitFor) { - transport->EventsQueue.Send( - new NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpoint(checkpoint.CheckpointId.SeqNo, checkpoint.CheckpointId.CoordinatorGeneration, CoordinatorId.Generation)); - } -} - -void TCheckpointCoordinator::TryToRestoreOffsetsFromForeignCheckpoint(const TCheckpointMetadata& checkpoint) { - RestoringFromForeignCheckpoint = true; - CC_LOG_I("Will try to restore streaming offsets from checkpoint " << checkpoint.CheckpointId); - if (!checkpoint.Graph) { - ++*Metrics.StorageError; - const TString message = TStringBuilder() << "Can't get graph params from checkpoint " << checkpoint.CheckpointId; - CC_LOG_I(message); - Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(message)); - return; - } - - NYql::TIssues issues; - THashMap<ui64, NYql::NDqProto::NDqStateLoadPlan::TTaskPlan> plan; - const bool result = NYql::NDq::MakeContinueFromStreamingOffsetsPlan( - checkpoint.Graph->GetTasks(), - GraphParams.GetTasks(), - StreamingDisposition.from_last_checkpoint().force(), - plan, - issues); - - if (issues) { - CC_LOG_I(issues.ToOneLineString()); - } - - if (!result) { - Send(TaskControllerId, new NYql::NDq::TEvDq::TEvAbortExecution(Ydb::StatusIds::BAD_REQUEST, issues.ToString())); - return; - } - - PendingRestoreCheckpoint = TPendingRestoreCheckpoint(checkpoint.CheckpointId, false, ActorsToWaitForSet); - ++*Metrics.RestoredStreamingOffsetsFromCheckpoint; - for (const auto& [taskId, taskPlan] : plan) { - const auto actorIdIt = TaskIdToActor.find(taskId); - if (actorIdIt == TaskIdToActor.end()) { - const TString msg = TStringBuilder() << "ActorId for task id " << taskId << " was not found"; - CC_LOG_E(msg); - Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(msg)); - return; - } - const auto transportIt = ActorsToWaitFor.find(actorIdIt->second); - if (transportIt != ActorsToWaitFor.end()) { - transportIt->second->EventsQueue.Send( - new NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpoint( - checkpoint.CheckpointId.SeqNo, - checkpoint.CheckpointId.CoordinatorGeneration, - CoordinatorId.Generation, - taskPlan)); - } - } -} - +void TCheckpointCoordinator::RestoreFromOwnCheckpoint(const TCheckpointMetadata& checkpoint) { + CC_LOG_I("Will restore from checkpoint " << checkpoint.CheckpointId); + PendingRestoreCheckpoint = TPendingRestoreCheckpoint(checkpoint.CheckpointId, checkpoint.Status == ECheckpointStatus::PendingCommit, ActorsToWaitForSet); + ++*Metrics.RestoredFromSavedCheckpoint; + for (const auto& [actor, transport] : ActorsToWaitFor) { + transport->EventsQueue.Send( + new NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpoint(checkpoint.CheckpointId.SeqNo, checkpoint.CheckpointId.CoordinatorGeneration, CoordinatorId.Generation)); + } +} + +void TCheckpointCoordinator::TryToRestoreOffsetsFromForeignCheckpoint(const TCheckpointMetadata& checkpoint) { + RestoringFromForeignCheckpoint = true; + CC_LOG_I("Will try to restore streaming offsets from checkpoint " << checkpoint.CheckpointId); + if (!checkpoint.Graph) { + ++*Metrics.StorageError; + const TString message = TStringBuilder() << "Can't get graph params from checkpoint " << checkpoint.CheckpointId; + CC_LOG_I(message); + Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(message)); + return; + } + + NYql::TIssues issues; + THashMap<ui64, NYql::NDqProto::NDqStateLoadPlan::TTaskPlan> plan; + const bool result = NYql::NDq::MakeContinueFromStreamingOffsetsPlan( + checkpoint.Graph->GetTasks(), + GraphParams.GetTasks(), + StreamingDisposition.from_last_checkpoint().force(), + plan, + issues); + + if (issues) { + CC_LOG_I(issues.ToOneLineString()); + } + + if (!result) { + Send(TaskControllerId, new NYql::NDq::TEvDq::TEvAbortExecution(Ydb::StatusIds::BAD_REQUEST, issues.ToString())); + return; + } + + PendingRestoreCheckpoint = TPendingRestoreCheckpoint(checkpoint.CheckpointId, false, ActorsToWaitForSet); + ++*Metrics.RestoredStreamingOffsetsFromCheckpoint; + for (const auto& [taskId, taskPlan] : plan) { + const auto actorIdIt = TaskIdToActor.find(taskId); + if (actorIdIt == TaskIdToActor.end()) { + const TString msg = TStringBuilder() << "ActorId for task id " << taskId << " was not found"; + CC_LOG_E(msg); + Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError(msg)); + return; + } + const auto transportIt = ActorsToWaitFor.find(actorIdIt->second); + if (transportIt != ActorsToWaitFor.end()) { + transportIt->second->EventsQueue.Send( + new NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpoint( + checkpoint.CheckpointId.SeqNo, + checkpoint.CheckpointId.CoordinatorGeneration, + CoordinatorId.Generation, + taskPlan)); + } + } +} + void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpointResult::TPtr& ev) { - if (!OnComputeActorEventReceived(ev)) { - return; - } - + if (!OnComputeActorEventReceived(ev)) { + return; + } + const auto& record = ev->Get()->Record; const auto& checkpointProto = record.GetCheckpoint(); const TCheckpointId checkpoint(checkpointProto.GetGeneration(), checkpointProto.GetId()); @@ -268,52 +268,52 @@ void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvRestoreFro const TString& statusName = NYql::NDqProto::TEvRestoreFromCheckpointResult_ERestoreStatus_Name(status); CC_LOG_D("[" << checkpoint << "] Got TEvRestoreFromCheckpointResult; taskId: "<< record.GetTaskId() << ", checkpoint: " << checkpoint - << ", status: " << statusName); + << ", status: " << statusName); if (!PendingRestoreCheckpoint) { - CC_LOG_E("[" << checkpoint << "] Got TEvRestoreFromCheckpointResult but has no PendingRestoreCheckpoint"); + CC_LOG_E("[" << checkpoint << "] Got TEvRestoreFromCheckpointResult but has no PendingRestoreCheckpoint"); Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError("Got TEvRestoreFromCheckpointResult but has no PendingRestoreCheckpoint")); return; } if (PendingRestoreCheckpoint->CheckpointId != checkpoint) { - CC_LOG_E("[" << checkpoint << "] Got TEvRestoreFromCheckpointResult event with unexpected checkpoint: " << checkpoint << ", expected: " << PendingRestoreCheckpoint->CheckpointId); + CC_LOG_E("[" << checkpoint << "] Got TEvRestoreFromCheckpointResult event with unexpected checkpoint: " << checkpoint << ", expected: " << PendingRestoreCheckpoint->CheckpointId); Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::InternalError("Got unexpected checkpoint")); return; } if (status != NYql::NDqProto::TEvRestoreFromCheckpointResult_ERestoreStatus_OK) { - CC_LOG_E("[" << checkpoint << "] Can't restore: " << statusName); + CC_LOG_E("[" << checkpoint << "] Can't restore: " << statusName); Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::Aborted("Can't restore: " + statusName)); return; } PendingRestoreCheckpoint->Acknowledge(ev->Sender); - CC_LOG_D("[" << checkpoint << "] Task state restored, need " << PendingRestoreCheckpoint->NotYetAcknowledgedCount() << " more acks"); + CC_LOG_D("[" << checkpoint << "] Task state restored, need " << PendingRestoreCheckpoint->NotYetAcknowledgedCount() << " more acks"); if (PendingRestoreCheckpoint->GotAllAcknowledges()) { - if (PendingInit) { - PendingInit = nullptr; - } - + if (PendingInit) { + PendingInit = nullptr; + } + if (PendingRestoreCheckpoint->CommitAfterRestore) { - CC_LOG_I("[" << checkpoint << "] State restored, send TEvCommitState to " << ActorsToNotify.size() << " actor(s)"); - PendingCommitCheckpoints.emplace(checkpoint, TPendingCheckpoint(ActorsToNotifySet)); + CC_LOG_I("[" << checkpoint << "] State restored, send TEvCommitState to " << ActorsToNotify.size() << " actor(s)"); + PendingCommitCheckpoints.emplace(checkpoint, TPendingCheckpoint(ActorsToNotifySet)); UpdateInProgressMetric(); - for (const auto& [actor, transport] : ActorsToNotify) { - transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvCommitState(checkpoint.SeqNo, checkpoint.CoordinatorGeneration, CoordinatorId.Generation)); + for (const auto& [actor, transport] : ActorsToNotify) { + transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvCommitState(checkpoint.SeqNo, checkpoint.CoordinatorGeneration, CoordinatorId.Generation)); } } - if (RestoringFromForeignCheckpoint) { - InitingZeroCheckpoint = true; - InitCheckpoint(); - } - + if (RestoringFromForeignCheckpoint) { + InitingZeroCheckpoint = true; + InitCheckpoint(); + } + ScheduleNextCheckpoint(); - CC_LOG_I("[" << checkpoint << "] State restored, send TEvRun to " << AllActors.size() << " actors"); - for (const auto& [actor, transport] : AllActors) { - transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvRun()); + CC_LOG_I("[" << checkpoint << "] State restored, send TEvRun to " << AllActors.size() << " actors"); + for (const auto& [actor, transport] : AllActors) { + transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvRun()); } } } @@ -321,30 +321,30 @@ void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvRestoreFro void TCheckpointCoordinator::InitCheckpoint() { Y_VERIFY(CheckpointIdGenerator); const auto nextCheckpointId = CheckpointIdGenerator->NextId(); - CC_LOG_I("[" << nextCheckpointId << "] Registering new checkpoint in storage"); + CC_LOG_I("[" << nextCheckpointId << "] Registering new checkpoint in storage"); - PendingCheckpoints.emplace(nextCheckpointId, TPendingCheckpoint(ActorsToWaitForSet)); + PendingCheckpoints.emplace(nextCheckpointId, TPendingCheckpoint(ActorsToWaitForSet)); UpdateInProgressMetric(); ++*Metrics.Total; - std::unique_ptr<TEvCheckpointStorage::TEvCreateCheckpointRequest> req; - if (GraphDescId) { - req = std::make_unique<TEvCheckpointStorage::TEvCreateCheckpointRequest>(CoordinatorId, nextCheckpointId, ActorsToWaitForSet.size(), GraphDescId); - } else { - NProto::TCheckpointGraphDescription graphDesc; - graphDesc.MutableGraph()->CopyFrom(GraphParams); - req = std::make_unique<TEvCheckpointStorage::TEvCreateCheckpointRequest>(CoordinatorId, nextCheckpointId, ActorsToWaitForSet.size(), graphDesc); - } - - Send(StorageProxy, req.release(), IEventHandle::FlagTrackDelivery); + std::unique_ptr<TEvCheckpointStorage::TEvCreateCheckpointRequest> req; + if (GraphDescId) { + req = std::make_unique<TEvCheckpointStorage::TEvCreateCheckpointRequest>(CoordinatorId, nextCheckpointId, ActorsToWaitForSet.size(), GraphDescId); + } else { + NProto::TCheckpointGraphDescription graphDesc; + graphDesc.MutableGraph()->CopyFrom(GraphParams); + req = std::make_unique<TEvCheckpointStorage::TEvCreateCheckpointRequest>(CoordinatorId, nextCheckpointId, ActorsToWaitForSet.size(), graphDesc); + } + + Send(StorageProxy, req.release(), IEventHandle::FlagTrackDelivery); } void TCheckpointCoordinator::Handle(const TEvCheckpointCoordinator::TEvScheduleCheckpointing::TPtr&) { - CC_LOG_D("Got TEvScheduleCheckpointing"); + CC_LOG_D("Got TEvScheduleCheckpointing"); ScheduleNextCheckpoint(); const auto checkpointsInFly = PendingCheckpoints.size() + PendingCommitCheckpoints.size(); - if (checkpointsInFly >= Settings.GetMaxInflight() || InitingZeroCheckpoint) { - CC_LOG_W("Skip schedule checkpoint event since inflight checkpoint limit exceeded: current: " << checkpointsInFly << ", limit: " << Settings.GetMaxInflight()); + if (checkpointsInFly >= Settings.GetMaxInflight() || InitingZeroCheckpoint) { + CC_LOG_W("Skip schedule checkpoint event since inflight checkpoint limit exceeded: current: " << checkpointsInFly << ", limit: " << Settings.GetMaxInflight()); Metrics.SkippedDueToInFlightLimit->Inc(); return; } @@ -355,10 +355,10 @@ void TCheckpointCoordinator::Handle(const TEvCheckpointCoordinator::TEvScheduleC void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvCreateCheckpointResponse::TPtr& ev) { const auto& checkpointId = ev->Get()->CheckpointId; const auto& issues = ev->Get()->Issues; - CC_LOG_D("[" << checkpointId << "] Got TEvCreateCheckpointResponse"); + CC_LOG_D("[" << checkpointId << "] Got TEvCreateCheckpointResponse"); if (issues) { - CC_LOG_E("[" << checkpointId << "] Can't create checkpoint: " << issues.ToOneLineString()); + CC_LOG_E("[" << checkpointId << "] Can't create checkpoint: " << issues.ToOneLineString()); PendingCheckpoints.erase(checkpointId); UpdateInProgressMetric(); ++*Metrics.FailedToCreate; @@ -366,34 +366,34 @@ void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvCreateCheckpo return; } - if (GraphDescId) { - Y_VERIFY(GraphDescId == ev->Get()->GraphDescId); - } else { - GraphDescId = ev->Get()->GraphDescId; - Y_VERIFY(GraphDescId); - } - - if (PendingInit) { - PendingInit->CheckpointId = checkpointId; - if (PendingInit->CanInjectCheckpoint()) { - PendingInit = nullptr; - InjectCheckpoint(checkpointId); - } - } else { - InjectCheckpoint(checkpointId); - } -} - -void TCheckpointCoordinator::InjectCheckpoint(const TCheckpointId& checkpointId) { - CC_LOG_I("[" << checkpointId << "] Checkpoint successfully created, going to inject barriers to " << ActorsToTrigger.size() << " actor(s)"); - for (const auto& [toTrigger, transport] : ActorsToTrigger) { - transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvInjectCheckpoint(checkpointId.SeqNo, checkpointId.CoordinatorGeneration)); - } - + if (GraphDescId) { + Y_VERIFY(GraphDescId == ev->Get()->GraphDescId); + } else { + GraphDescId = ev->Get()->GraphDescId; + Y_VERIFY(GraphDescId); + } + + if (PendingInit) { + PendingInit->CheckpointId = checkpointId; + if (PendingInit->CanInjectCheckpoint()) { + PendingInit = nullptr; + InjectCheckpoint(checkpointId); + } + } else { + InjectCheckpoint(checkpointId); + } +} + +void TCheckpointCoordinator::InjectCheckpoint(const TCheckpointId& checkpointId) { + CC_LOG_I("[" << checkpointId << "] Checkpoint successfully created, going to inject barriers to " << ActorsToTrigger.size() << " actor(s)"); + for (const auto& [toTrigger, transport] : ActorsToTrigger) { + transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvInjectCheckpoint(checkpointId.SeqNo, checkpointId.CoordinatorGeneration)); + } + if (!GraphIsRunning) { - CC_LOG_I("[" << checkpointId << "] Send TEvRun to all actors"); - for (const auto& [actor, transport] : AllActors) { - transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvRun()); + CC_LOG_I("[" << checkpointId << "] Send TEvRun to all actors"); + for (const auto& [actor, transport] : AllActors) { + transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvRun()); } GraphIsRunning = true; } @@ -405,14 +405,14 @@ void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvSaveTaskSt const auto& status = proto.GetStatus(); const TString& statusName = NYql::NDqProto::TEvSaveTaskStateResult_EStatus_Name(status); - if (!OnComputeActorEventReceived(ev)) { - return; - } - + if (!OnComputeActorEventReceived(ev)) { + return; + } + TCheckpointId checkpointId(checkpointProto.GetGeneration(), checkpointProto.GetId()); CC_LOG_D("[" << checkpointId << "] Got TEvSaveTaskStateResult; task " << proto.GetTaskId() - << ", status: " << statusName << ", size: " << proto.GetStateSizeBytes()); + << ", status: " << statusName << ", size: " << proto.GetStateSizeBytes()); const auto it = PendingCheckpoints.find(checkpointId); if (it == PendingCheckpoints.end()) { @@ -422,16 +422,16 @@ void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvSaveTaskSt if (status == NYql::NDqProto::TEvSaveTaskStateResult::OK) { checkpoint.Acknowledge(ev->Sender, proto.GetStateSizeBytes()); - CC_LOG_D("[" << checkpointId << "] Task state saved, need " << checkpoint.NotYetAcknowledgedCount() << " more acks"); + CC_LOG_D("[" << checkpointId << "] Task state saved, need " << checkpoint.NotYetAcknowledgedCount() << " more acks"); if (checkpoint.GotAllAcknowledges()) { - CC_LOG_I("[" << checkpointId << "] Got all acks, changing checkpoint status to 'PendingCommit'"); + CC_LOG_I("[" << checkpointId << "] Got all acks, changing checkpoint status to 'PendingCommit'"); Send(StorageProxy, new TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest(CoordinatorId, checkpointId), IEventHandle::FlagTrackDelivery); - if (InitingZeroCheckpoint) { - Send(RunActorId, new TEvCheckpointCoordinator::TEvZeroCheckpointDone()); - } + if (InitingZeroCheckpoint) { + Send(RunActorId, new TEvCheckpointCoordinator::TEvZeroCheckpointDone()); + } } } else { - CC_LOG_E("[" << checkpointId << "] Can't save node state, aborting checkpoint"); + CC_LOG_E("[" << checkpointId << "] Can't save node state, aborting checkpoint"); Send(StorageProxy, new TEvCheckpointStorage::TEvAbortCheckpointRequest(CoordinatorId, checkpointId, "Can't save node state"), IEventHandle::FlagTrackDelivery); } } @@ -439,48 +439,48 @@ void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvSaveTaskSt void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusResponse::TPtr& ev) { const auto& checkpointId = ev->Get()->CheckpointId; const auto issues = ev->Get()->Issues; - CC_LOG_D("[" << checkpointId << "] Got TEvSetCheckpointPendingCommitStatusResponse"); + CC_LOG_D("[" << checkpointId << "] Got TEvSetCheckpointPendingCommitStatusResponse"); const auto it = PendingCheckpoints.find(checkpointId); if (it == PendingCheckpoints.end()) { - CC_LOG_W("[" << checkpointId << "] Got TEvSetCheckpointPendingCommitStatusResponse for checkpoint but it is not in PendingCheckpoints"); + CC_LOG_W("[" << checkpointId << "] Got TEvSetCheckpointPendingCommitStatusResponse for checkpoint but it is not in PendingCheckpoints"); return; } if (issues) { - CC_LOG_E("[" << checkpointId << "] Can't change checkpoint status to 'PendingCommit': " << issues.ToString()); + CC_LOG_E("[" << checkpointId << "] Can't change checkpoint status to 'PendingCommit': " << issues.ToString()); ++*Metrics.StorageError; PendingCheckpoints.erase(it); return; } - CC_LOG_I("[" << checkpointId << "] Checkpoint status changed to 'PendingCommit', committing states"); - PendingCommitCheckpoints.emplace(checkpointId, TPendingCheckpoint(ActorsToNotifySet, it->second.GetStats())); + CC_LOG_I("[" << checkpointId << "] Checkpoint status changed to 'PendingCommit', committing states"); + PendingCommitCheckpoints.emplace(checkpointId, TPendingCheckpoint(ActorsToNotifySet, it->second.GetStats())); PendingCheckpoints.erase(it); UpdateInProgressMetric(); - for (const auto& [toTrigger, transport] : ActorsToNotify) { - transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvCommitState(checkpointId.SeqNo, checkpointId.CoordinatorGeneration, CoordinatorId.Generation)); + for (const auto& [toTrigger, transport] : ActorsToNotify) { + transport->EventsQueue.Send(new NYql::NDq::TEvDqCompute::TEvCommitState(checkpointId.SeqNo, checkpointId.CoordinatorGeneration, CoordinatorId.Generation)); } } void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvStateCommitted::TPtr& ev) { - if (!OnComputeActorEventReceived(ev)) { - return; - } - + if (!OnComputeActorEventReceived(ev)) { + return; + } + const auto& checkpointPb = ev->Get()->Record.GetCheckpoint(); TCheckpointId checkpointId(checkpointPb.GetGeneration(), checkpointPb.GetId()); - CC_LOG_D("[" << checkpointId << "] Got TEvStateCommitted; task: " << ev->Get()->Record.GetTaskId()); + CC_LOG_D("[" << checkpointId << "] Got TEvStateCommitted; task: " << ev->Get()->Record.GetTaskId()); const auto it = PendingCommitCheckpoints.find(checkpointId); if (it == PendingCommitCheckpoints.end()) { - CC_LOG_W("[" << checkpointId << "] Got TEvStateCommitted for checkpoint " << checkpointId << " but it is not in PendingCommitCheckpoints"); + CC_LOG_W("[" << checkpointId << "] Got TEvStateCommitted for checkpoint " << checkpointId << " but it is not in PendingCommitCheckpoints"); return; } auto& checkpoint = it->second; checkpoint.Acknowledge(ev->Sender); - CC_LOG_D("[" << checkpointId << "] State committed " << ev->Sender.ToString() << ", need " << checkpoint.NotYetAcknowledgedCount() << " more acks"); + CC_LOG_D("[" << checkpointId << "] State committed " << ev->Sender.ToString() << ", need " << checkpoint.NotYetAcknowledgedCount() << " more acks"); if (checkpoint.GotAllAcknowledges()) { - CC_LOG_I("[" << checkpointId << "] Got all acks, changing checkpoint status to 'Completed'"); + CC_LOG_I("[" << checkpointId << "] Got all acks, changing checkpoint status to 'Completed'"); const auto& stats = checkpoint.GetStats(); auto durationMs = (TInstant::Now() - stats.CreatedAt).MilliSeconds(); Metrics.LastCheckpointBarrierDeliveryTimeMillis->Set(durationMs); @@ -491,10 +491,10 @@ void TCheckpointCoordinator::Handle(const NYql::NDq::TEvDqCompute::TEvStateCommi void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvCompleteCheckpointResponse::TPtr& ev) { const auto& checkpointId = ev->Get()->CheckpointId; - CC_LOG_D("[" << checkpointId << "] Got TEvCompleteCheckpointResponse"); + CC_LOG_D("[" << checkpointId << "] Got TEvCompleteCheckpointResponse"); const auto it = PendingCommitCheckpoints.find(checkpointId); if (it == PendingCommitCheckpoints.end()) { - CC_LOG_W("[" << checkpointId << "] Got TEvCompleteCheckpointResponse but related checkpoint is not in progress; checkpointId: " << checkpointId); + CC_LOG_W("[" << checkpointId << "] Got TEvCompleteCheckpointResponse but related checkpoint is not in progress; checkpointId: " << checkpointId); return; } const auto& issues = ev->Get()->Issues; @@ -506,10 +506,10 @@ void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvCompleteCheck Metrics.CheckpointDurationMillis->Collect(durationMs); Metrics.CheckpointSizeBytes->Collect(stats.StateSize); ++*Metrics.Completed; - CC_LOG_I("[" << checkpointId << "] Checkpoint completed"); + CC_LOG_I("[" << checkpointId << "] Checkpoint completed"); } else { ++*Metrics.StorageError; - CC_LOG_E("[" << checkpointId << "] Can't change checkpoint status to 'Completed': " << issues.ToString()); + CC_LOG_E("[" << checkpointId << "] Can't change checkpoint status to 'Completed': " << issues.ToString()); } PendingCommitCheckpoints.erase(it); UpdateInProgressMetric(); @@ -517,13 +517,13 @@ void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvCompleteCheck void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvAbortCheckpointResponse::TPtr& ev) { const auto& checkpointId = ev->Get()->CheckpointId; - CC_LOG_D("[" << checkpointId << "] Got TEvAbortCheckpointResponse"); + CC_LOG_D("[" << checkpointId << "] Got TEvAbortCheckpointResponse"); const auto& issues = ev->Get()->Issues; if (issues) { - CC_LOG_E("[" << checkpointId << "] Can't abort checkpoint: " << issues.ToString()); + CC_LOG_E("[" << checkpointId << "] Can't abort checkpoint: " << issues.ToString()); ++*Metrics.StorageError; } else { - CC_LOG_W("[" << checkpointId << "] Checkpoint aborted"); + CC_LOG_W("[" << checkpointId << "] Checkpoint aborted"); ++*Metrics.Aborted; } PendingCheckpoints.erase(checkpointId); @@ -531,32 +531,32 @@ void TCheckpointCoordinator::Handle(const TEvCheckpointStorage::TEvAbortCheckpoi UpdateInProgressMetric(); } -void TCheckpointCoordinator::Handle(const NYql::NDq::TEvRetryQueuePrivate::TEvRetry::TPtr& ev) { - const auto actorIt = TaskIdToActor.find(ev->Get()->EventQueueId); - Y_VERIFY(actorIt != TaskIdToActor.end()); - const auto transportIt = AllActors.find(actorIt->second); - Y_VERIFY(transportIt != AllActors.end()); - transportIt->second->EventsQueue.Retry(); -} - -void TCheckpointCoordinator::Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { - CC_LOG_I("Handle disconnected node " << ev->Get()->NodeId); - - for (const auto& [actorId, transport] : AllActors) { - transport->EventsQueue.HandleNodeDisconnected(ev->Get()->NodeId); - } -} - -void TCheckpointCoordinator::Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev) { - CC_LOG_D("Handle connected node " << ev->Get()->NodeId); - - for (const auto& [actorId, transport] : AllActors) { - transport->EventsQueue.HandleNodeConnected(ev->Get()->NodeId); - } -} - +void TCheckpointCoordinator::Handle(const NYql::NDq::TEvRetryQueuePrivate::TEvRetry::TPtr& ev) { + const auto actorIt = TaskIdToActor.find(ev->Get()->EventQueueId); + Y_VERIFY(actorIt != TaskIdToActor.end()); + const auto transportIt = AllActors.find(actorIt->second); + Y_VERIFY(transportIt != AllActors.end()); + transportIt->second->EventsQueue.Retry(); +} + +void TCheckpointCoordinator::Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { + CC_LOG_I("Handle disconnected node " << ev->Get()->NodeId); + + for (const auto& [actorId, transport] : AllActors) { + transport->EventsQueue.HandleNodeDisconnected(ev->Get()->NodeId); + } +} + +void TCheckpointCoordinator::Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev) { + CC_LOG_D("Handle connected node " << ev->Get()->NodeId); + + for (const auto& [actorId, transport] : AllActors) { + transport->EventsQueue.HandleNodeConnected(ev->Get()->NodeId); + } +} + void TCheckpointCoordinator::Handle(NActors::TEvents::TEvPoison::TPtr& ev) { - CC_LOG_D("Got TEvPoison"); + CC_LOG_D("Got TEvPoison"); Send(ev->Sender, new NActors::TEvents::TEvPoisonTaken(), 0, ev->Cookie); PassAway(); } @@ -564,25 +564,25 @@ void TCheckpointCoordinator::Handle(NActors::TEvents::TEvPoison::TPtr& ev) { void TCheckpointCoordinator::Handle(NActors::TEvents::TEvUndelivered::TPtr& ev) { TStringStream message; message << "Got TEvUndelivered; reason: " << ev->Get()->Reason << ", sourceType: " << ev->Get()->SourceType; - CC_LOG_D(message.Str()); + CC_LOG_D(message.Str()); Send(TaskControllerId, NYql::NDq::TEvDq::TEvAbortExecution::Unavailable(message.Str())); PassAway(); } -void TCheckpointCoordinator::Handle(const TEvCheckpointCoordinator::TEvRunGraph::TPtr&) { - InitingZeroCheckpoint = false; - // TODO: run graph only now, not before zero checkpoint inited -} - -void TCheckpointCoordinator::PassAway() { - for (const auto& [actorId, transport] : AllActors) { - transport->EventsQueue.Unsubscribe(); - } - TActorBootstrapped<TCheckpointCoordinator>::PassAway(); -} - -THolder<NActors::IActor> MakeCheckpointCoordinator(TCoordinatorId coordinatorId, const TActorId& taskControllerId, const TActorId& storageProxy, const TActorId& runActorId, const TCheckpointCoordinatorConfig& settings, const NMonitoring::TDynamicCounterPtr& counters, const NProto::TGraphParams& graphParams, const YandexQuery::StateLoadMode& stateLoadMode, const YandexQuery::StreamingDisposition& streamingDisposition) { - return MakeHolder<TCheckpointCoordinator>(coordinatorId, taskControllerId, storageProxy, runActorId, settings, counters, graphParams, stateLoadMode, streamingDisposition); +void TCheckpointCoordinator::Handle(const TEvCheckpointCoordinator::TEvRunGraph::TPtr&) { + InitingZeroCheckpoint = false; + // TODO: run graph only now, not before zero checkpoint inited +} + +void TCheckpointCoordinator::PassAway() { + for (const auto& [actorId, transport] : AllActors) { + transport->EventsQueue.Unsubscribe(); + } + TActorBootstrapped<TCheckpointCoordinator>::PassAway(); +} + +THolder<NActors::IActor> MakeCheckpointCoordinator(TCoordinatorId coordinatorId, const TActorId& taskControllerId, const TActorId& storageProxy, const TActorId& runActorId, const TCheckpointCoordinatorConfig& settings, const NMonitoring::TDynamicCounterPtr& counters, const NProto::TGraphParams& graphParams, const YandexQuery::StateLoadMode& stateLoadMode, const YandexQuery::StreamingDisposition& streamingDisposition) { + return MakeHolder<TCheckpointCoordinator>(coordinatorId, taskControllerId, storageProxy, runActorId, settings, counters, graphParams, stateLoadMode, streamingDisposition); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.h b/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.h index 0bb76685e15..2303b971510 100644 --- a/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.h +++ b/ydb/core/yq/libs/checkpointing/checkpoint_coordinator.h @@ -5,7 +5,7 @@ #include <ydb/core/yq/libs/checkpointing/events/events.h> #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/checkpoint_storage/events/events.h> +#include <ydb/core/yq/libs/checkpoint_storage/events/events.h> #include <ydb/core/yq/libs/config/protos/checkpoint_coordinator.pb.h> #include <ydb/public/api/protos/yq.pb.h> @@ -16,26 +16,26 @@ #include <library/cpp/actors/core/actor_bootstrapped.h> -namespace NYq { +namespace NYq { using namespace NActors; -using namespace NYq::NConfig; +using namespace NYq::NConfig; class TCheckpointCoordinator : public TActorBootstrapped<TCheckpointCoordinator> { public: TCheckpointCoordinator(TCoordinatorId coordinatorId, const TActorId& taskControllerId, const TActorId& storageProxy, - const TActorId& runActorId, + const TActorId& runActorId, const TCheckpointCoordinatorConfig& settings, const NMonitoring::TDynamicCounterPtr& counters, - const NProto::TGraphParams& graphParams, - const YandexQuery::StateLoadMode& stateLoadMode, - const YandexQuery::StreamingDisposition& streamingDisposition); + const NProto::TGraphParams& graphParams, + const YandexQuery::StateLoadMode& stateLoadMode, + const YandexQuery::StreamingDisposition& streamingDisposition); void Handle(const NYql::NDqs::TEvReadyState::TPtr&); void Handle(const TEvCheckpointStorage::TEvRegisterCoordinatorResponse::TPtr&); - void Handle(const NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinatorAck::TPtr&); + void Handle(const NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinatorAck::TPtr&); void Handle(const TEvCheckpointStorage::TEvGetCheckpointsMetadataResponse::TPtr&); void Handle(const NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpointResult::TPtr&); void Handle(const TEvCheckpointCoordinator::TEvScheduleCheckpointing::TPtr&); @@ -45,18 +45,18 @@ public: void Handle(const NYql::NDq::TEvDqCompute::TEvStateCommitted::TPtr&); void Handle(const TEvCheckpointStorage::TEvCompleteCheckpointResponse::TPtr&); void Handle(const TEvCheckpointStorage::TEvAbortCheckpointResponse::TPtr&); - void Handle(const NYql::NDq::TEvRetryQueuePrivate::TEvRetry::TPtr& ev); + void Handle(const NYql::NDq::TEvRetryQueuePrivate::TEvRetry::TPtr& ev); void Handle(NActors::TEvents::TEvPoison::TPtr&); void Handle(NActors::TEvents::TEvUndelivered::TPtr&); - void Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev); - void Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev); - void Handle(const TEvCheckpointCoordinator::TEvRunGraph::TPtr&); + void Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev); + void Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev); + void Handle(const TEvCheckpointCoordinator::TEvRunGraph::TPtr&); STRICT_STFUNC(DispatchEvent, - hFunc(NYql::NDqs::TEvReadyState, Handle) + hFunc(NYql::NDqs::TEvReadyState, Handle) hFunc(TEvCheckpointStorage::TEvRegisterCoordinatorResponse, Handle) - hFunc(NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinatorAck, Handle) + hFunc(NYql::NDq::TEvDqCompute::TEvNewCheckpointCoordinatorAck, Handle) hFunc(TEvCheckpointStorage::TEvGetCheckpointsMetadataResponse, Handle) hFunc(NYql::NDq::TEvDqCompute::TEvRestoreFromCheckpointResult, Handle) hFunc(TEvCheckpointCoordinator::TEvScheduleCheckpointing, Handle) @@ -66,12 +66,12 @@ public: hFunc(NYql::NDq::TEvDqCompute::TEvStateCommitted, Handle) hFunc(TEvCheckpointStorage::TEvCompleteCheckpointResponse, Handle) hFunc(TEvCheckpointStorage::TEvAbortCheckpointResponse, Handle) - hFunc(NYql::NDq::TEvRetryQueuePrivate::TEvRetry, Handle) + hFunc(NYql::NDq::TEvRetryQueuePrivate::TEvRetry, Handle) hFunc(NActors::TEvents::TEvPoison, Handle) hFunc(NActors::TEvents::TEvUndelivered, Handle) - hFunc(NActors::TEvInterconnect::TEvNodeDisconnected, Handle) - hFunc(NActors::TEvInterconnect::TEvNodeConnected, Handle) - hFunc(TEvCheckpointCoordinator::TEvRunGraph, Handle) + hFunc(NActors::TEvInterconnect::TEvNodeDisconnected, Handle) + hFunc(NActors::TEvInterconnect::TEvNodeConnected, Handle) + hFunc(TEvCheckpointCoordinator::TEvRunGraph, Handle) ) void Bootstrap(); @@ -80,20 +80,20 @@ public: private: void InitCheckpoint(); - void InjectCheckpoint(const TCheckpointId& checkpointId); + void InjectCheckpoint(const TCheckpointId& checkpointId); void ScheduleNextCheckpoint(); void UpdateInProgressMetric(); - void PassAway() override; - void RestoreFromOwnCheckpoint(const TCheckpointMetadata& checkpoint); - void TryToRestoreOffsetsFromForeignCheckpoint(const TCheckpointMetadata& checkpoint); - - template <class TEvPtr> - bool OnComputeActorEventReceived(TEvPtr& ev) { - const auto actorIt = AllActors.find(ev->Sender); - Y_VERIFY(actorIt != AllActors.end()); - return actorIt->second->EventsQueue.OnEventReceived(ev); - } - + void PassAway() override; + void RestoreFromOwnCheckpoint(const TCheckpointMetadata& checkpoint); + void TryToRestoreOffsetsFromForeignCheckpoint(const TCheckpointMetadata& checkpoint); + + template <class TEvPtr> + bool OnComputeActorEventReceived(TEvPtr& ev) { + const auto actorIt = AllActors.find(ev->Sender); + Y_VERIFY(actorIt != AllActors.end()); + return actorIt->second->EventsQueue.OnEventReceived(ev); + } + struct TCheckpointCoordinatorMetrics { TCheckpointCoordinatorMetrics(const NMonitoring::TDynamicCounterPtr& counters) { auto subgroup = counters->GetSubgroup("subsystem", "checkpoint_coordinator"); @@ -112,9 +112,9 @@ private: CheckpointDurationMillis = subgroup->GetHistogram("CheckpointDurationMillis", NMonitoring::ExponentialHistogram(12, 2, 1024)); // ~ 1s -> ~ 1 h CheckpointSizeBytes = subgroup->GetHistogram("CheckpointSizeBytes", NMonitoring::ExponentialHistogram(8, 32, 32)); // 32b -> 1Tb SkippedDueToInFlightLimit = subgroup->GetCounter("SkippedDueToInFlightLimit"); - RestoredFromSavedCheckpoint = subgroup->GetCounter("RestoredFromSavedCheckpoint", true); - StartedFromEmptyCheckpoint = subgroup->GetCounter("StartedFromEmptyCheckpoint", true); - RestoredStreamingOffsetsFromCheckpoint = subgroup->GetCounter("RestoredStreamingOffsetsFromCheckpoint", true); + RestoredFromSavedCheckpoint = subgroup->GetCounter("RestoredFromSavedCheckpoint", true); + StartedFromEmptyCheckpoint = subgroup->GetCounter("StartedFromEmptyCheckpoint", true); + RestoredStreamingOffsetsFromCheckpoint = subgroup->GetCounter("RestoredStreamingOffsetsFromCheckpoint", true); } NMonitoring::TDynamicCounters::TCounterPtr InProgress; @@ -129,52 +129,52 @@ private: NMonitoring::TDynamicCounters::TCounterPtr LastCheckpointDurationMillis; NMonitoring::TDynamicCounters::TCounterPtr LastCheckpointSizeBytes; NMonitoring::TDynamicCounters::TCounterPtr SkippedDueToInFlightLimit; - NMonitoring::TDynamicCounters::TCounterPtr RestoredFromSavedCheckpoint; - NMonitoring::TDynamicCounters::TCounterPtr StartedFromEmptyCheckpoint; - NMonitoring::TDynamicCounters::TCounterPtr RestoredStreamingOffsetsFromCheckpoint; + NMonitoring::TDynamicCounters::TCounterPtr RestoredFromSavedCheckpoint; + NMonitoring::TDynamicCounters::TCounterPtr StartedFromEmptyCheckpoint; + NMonitoring::TDynamicCounters::TCounterPtr RestoredStreamingOffsetsFromCheckpoint; NMonitoring::THistogramPtr CheckpointBarrierDeliveryTimeMillis; NMonitoring::THistogramPtr CheckpointDurationMillis; NMonitoring::THistogramPtr CheckpointSizeBytes; }; - struct TComputeActorTransportStuff : public TSimpleRefCount<TComputeActorTransportStuff> { - using TPtr = TIntrusivePtr<TComputeActorTransportStuff>; - - NYql::NDq::TRetryEventsQueue EventsQueue; - }; - + struct TComputeActorTransportStuff : public TSimpleRefCount<TComputeActorTransportStuff> { + using TPtr = TIntrusivePtr<TComputeActorTransportStuff>; + + NYql::NDq::TRetryEventsQueue EventsQueue; + }; + const TCoordinatorId CoordinatorId; const TActorId TaskControllerId; const TActorId StorageProxy; - const TActorId RunActorId; + const TActorId RunActorId; std::unique_ptr<TCheckpointIdGenerator> CheckpointIdGenerator; TCheckpointCoordinatorConfig Settings; const TDuration CheckpointingPeriod; - const NProto::TGraphParams GraphParams; - TString GraphDescId; - - THashMap<TActorId, TComputeActorTransportStuff::TPtr> AllActors; - THashSet<TActorId> AllActorsSet; - THashMap<TActorId, TComputeActorTransportStuff::TPtr> ActorsToTrigger; - THashMap<TActorId, TComputeActorTransportStuff::TPtr> ActorsToWaitFor; - THashSet<TActorId> ActorsToWaitForSet; - THashMap<TActorId, TComputeActorTransportStuff::TPtr> ActorsToNotify; - THashSet<TActorId> ActorsToNotifySet; - THashMap<ui64, TActorId> TaskIdToActor; // Task id -> actor. + const NProto::TGraphParams GraphParams; + TString GraphDescId; + + THashMap<TActorId, TComputeActorTransportStuff::TPtr> AllActors; + THashSet<TActorId> AllActorsSet; + THashMap<TActorId, TComputeActorTransportStuff::TPtr> ActorsToTrigger; + THashMap<TActorId, TComputeActorTransportStuff::TPtr> ActorsToWaitFor; + THashSet<TActorId> ActorsToWaitForSet; + THashMap<TActorId, TComputeActorTransportStuff::TPtr> ActorsToNotify; + THashSet<TActorId> ActorsToNotifySet; + THashMap<ui64, TActorId> TaskIdToActor; // Task id -> actor. THashMap<TCheckpointId, TPendingCheckpoint, TCheckpointIdHash> PendingCheckpoints; THashMap<TCheckpointId, TPendingCheckpoint, TCheckpointIdHash> PendingCommitCheckpoints; TMaybe<TPendingRestoreCheckpoint> PendingRestoreCheckpoint; - std::unique_ptr<TPendingInitCoordinator> PendingInit; + std::unique_ptr<TPendingInitCoordinator> PendingInit; bool GraphIsRunning = false; - bool InitingZeroCheckpoint = false; - bool RestoringFromForeignCheckpoint = false; + bool InitingZeroCheckpoint = false; + bool RestoringFromForeignCheckpoint = false; TCheckpointCoordinatorMetrics Metrics; YandexQuery::StateLoadMode StateLoadMode; - YandexQuery::StreamingDisposition StreamingDisposition; + YandexQuery::StreamingDisposition StreamingDisposition; }; -THolder<NActors::IActor> MakeCheckpointCoordinator(TCoordinatorId coordinatorId, const TActorId& executerId, const TActorId& storageProxy, const TActorId& runActorId, const TCheckpointCoordinatorConfig& settings, const NMonitoring::TDynamicCounterPtr& counters, const NProto::TGraphParams& graphParams, const YandexQuery::StateLoadMode& stateLoadMode = YandexQuery::StateLoadMode::FROM_LAST_CHECKPOINT, const YandexQuery::StreamingDisposition& streamingDisposition = {}); +THolder<NActors::IActor> MakeCheckpointCoordinator(TCoordinatorId coordinatorId, const TActorId& executerId, const TActorId& storageProxy, const TActorId& runActorId, const TCheckpointCoordinatorConfig& settings, const NMonitoring::TDynamicCounterPtr& counters, const NProto::TGraphParams& graphParams, const YandexQuery::StateLoadMode& stateLoadMode = YandexQuery::StateLoadMode::FROM_LAST_CHECKPOINT, const YandexQuery::StreamingDisposition& streamingDisposition = {}); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.cpp b/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.cpp index 6b415e8ac83..ae46ecaf5db 100644 --- a/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.cpp +++ b/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.cpp @@ -2,7 +2,7 @@ #include <ydb/core/yq/libs/checkpointing_common/defs.h> -namespace NYq { +namespace NYq { TCheckpointIdGenerator::TCheckpointIdGenerator(TCoordinatorId coordinatorId, TCheckpointId lastCheckpoint) : CoordinatorId(std::move(coordinatorId)) { @@ -16,8 +16,8 @@ TCheckpointIdGenerator::TCheckpointIdGenerator(TCoordinatorId coordinatorId, TCh } } -TCheckpointId NYq::TCheckpointIdGenerator::NextId() { +TCheckpointId NYq::TCheckpointIdGenerator::NextId() { return TCheckpointId(CoordinatorId.Generation, NextNumber++); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.h b/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.h index bcc295cf334..1dae2f41da9 100644 --- a/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.h +++ b/ydb/core/yq/libs/checkpointing/checkpoint_id_generator.h @@ -2,7 +2,7 @@ #include <ydb/core/yq/libs/checkpointing_common/defs.h> -namespace NYq { +namespace NYq { class TCheckpointIdGenerator { private: @@ -15,4 +15,4 @@ public: TCheckpointId NextId(); }; -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/events/events.cpp b/ydb/core/yq/libs/checkpointing/events/events.cpp index 6c3d2603e7e..a4930159cf2 100644 --- a/ydb/core/yq/libs/checkpointing/events/events.cpp +++ b/ydb/core/yq/libs/checkpointing/events/events.cpp @@ -1 +1 @@ -#include "events.h" +#include "events.h" diff --git a/ydb/core/yq/libs/checkpointing/events/events.h b/ydb/core/yq/libs/checkpointing/events/events.h index 320c1f1dfcf..136701e4350 100644 --- a/ydb/core/yq/libs/checkpointing/events/events.h +++ b/ydb/core/yq/libs/checkpointing/events/events.h @@ -1,42 +1,42 @@ -#pragma once +#pragma once #include <ydb/core/yq/libs/checkpointing_common/defs.h> -#include <ydb/core/yq/libs/events/event_subspace.h> - -#include <library/cpp/actors/core/events.h> -#include <library/cpp/actors/core/event_pb.h> -#include <library/cpp/actors/interconnect/events_local.h> - -namespace NYq { - -struct TEvCheckpointCoordinator { - // Event ids. - enum EEv : ui32 { - EvScheduleCheckpointing = YqEventSubspaceBegin(TYqEventSubspace::CheckpointCoordinator), - EvCoordinatorRegistered, - EvZeroCheckpointDone, - EvRunGraph, - - EvEnd, - }; - - static_assert(EvEnd <= YqEventSubspaceEnd(TYqEventSubspace::CheckpointCoordinator), "All events must be in their subspace"); - - // Events. - - struct TEvScheduleCheckpointing : NActors::TEventLocal<TEvScheduleCheckpointing, EvScheduleCheckpointing> { - }; - - struct TEvCoordinatorRegistered : NActors::TEventLocal<TEvCoordinatorRegistered, EvCoordinatorRegistered> { - }; - - // Checkpoint coordinator sends this event to run actor when it initializes a new zero checkpoint. - // Run actor saves that next time we need to restore from checkpoint. - struct TEvZeroCheckpointDone : public NActors::TEventLocal<TEvZeroCheckpointDone, EvZeroCheckpointDone> { - }; - - // When run actor saved restore info after zero checkpoint, it sends this event to checkpoint coordinator. - struct TEvRunGraph : public NActors::TEventLocal<TEvRunGraph, EvRunGraph> { - }; -}; - -} // namespace NYq +#include <ydb/core/yq/libs/events/event_subspace.h> + +#include <library/cpp/actors/core/events.h> +#include <library/cpp/actors/core/event_pb.h> +#include <library/cpp/actors/interconnect/events_local.h> + +namespace NYq { + +struct TEvCheckpointCoordinator { + // Event ids. + enum EEv : ui32 { + EvScheduleCheckpointing = YqEventSubspaceBegin(TYqEventSubspace::CheckpointCoordinator), + EvCoordinatorRegistered, + EvZeroCheckpointDone, + EvRunGraph, + + EvEnd, + }; + + static_assert(EvEnd <= YqEventSubspaceEnd(TYqEventSubspace::CheckpointCoordinator), "All events must be in their subspace"); + + // Events. + + struct TEvScheduleCheckpointing : NActors::TEventLocal<TEvScheduleCheckpointing, EvScheduleCheckpointing> { + }; + + struct TEvCoordinatorRegistered : NActors::TEventLocal<TEvCoordinatorRegistered, EvCoordinatorRegistered> { + }; + + // Checkpoint coordinator sends this event to run actor when it initializes a new zero checkpoint. + // Run actor saves that next time we need to restore from checkpoint. + struct TEvZeroCheckpointDone : public NActors::TEventLocal<TEvZeroCheckpointDone, EvZeroCheckpointDone> { + }; + + // When run actor saved restore info after zero checkpoint, it sends this event to checkpoint coordinator. + struct TEvRunGraph : public NActors::TEventLocal<TEvRunGraph, EvRunGraph> { + }; +}; + +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/events/ya.make b/ydb/core/yq/libs/checkpointing/events/ya.make index 064835085bd..3addd5ab1b7 100644 --- a/ydb/core/yq/libs/checkpointing/events/ya.make +++ b/ydb/core/yq/libs/checkpointing/events/ya.make @@ -1,15 +1,15 @@ OWNER(g:yq) - -LIBRARY() - -SRCS( - events.cpp -) - -PEERDIR( - library/cpp/actors/core - library/cpp/actors/interconnect + +LIBRARY() + +SRCS( + events.cpp +) + +PEERDIR( + library/cpp/actors/core + library/cpp/actors/interconnect ydb/core/yq/libs/checkpointing_common -) - -END() +) + +END() diff --git a/ydb/core/yq/libs/checkpointing/pending_checkpoint.cpp b/ydb/core/yq/libs/checkpointing/pending_checkpoint.cpp index 01c0204e46d..769aaacb592 100644 --- a/ydb/core/yq/libs/checkpointing/pending_checkpoint.cpp +++ b/ydb/core/yq/libs/checkpointing/pending_checkpoint.cpp @@ -1,6 +1,6 @@ #include "pending_checkpoint.h" -namespace NYq { +namespace NYq { TPendingCheckpoint::TPendingCheckpoint(THashSet<NActors::TActorId> toBeAcknowledged, TPendingCheckpointStats stats) : NotYetAcknowledged(std::move(toBeAcknowledged)) @@ -46,17 +46,17 @@ size_t TPendingRestoreCheckpoint::NotYetAcknowledgedCount() const { return NotYetAcknowledged.size(); } -void TPendingInitCoordinator::OnNewCheckpointCoordinatorAck() { - ++NewCheckpointCoordinatorAcksGot; - Y_VERIFY(NewCheckpointCoordinatorAcksGot <= ActorsCount); -} - -bool TPendingInitCoordinator::AllNewCheckpointCoordinatorAcksProcessed() const { - return NewCheckpointCoordinatorAcksGot == ActorsCount; -} - -bool TPendingInitCoordinator::CanInjectCheckpoint() const { - return AllNewCheckpointCoordinatorAcksProcessed() && CheckpointId; -} - -} // namespace NYq +void TPendingInitCoordinator::OnNewCheckpointCoordinatorAck() { + ++NewCheckpointCoordinatorAcksGot; + Y_VERIFY(NewCheckpointCoordinatorAcksGot <= ActorsCount); +} + +bool TPendingInitCoordinator::AllNewCheckpointCoordinatorAcksProcessed() const { + return NewCheckpointCoordinatorAcksGot == ActorsCount; +} + +bool TPendingInitCoordinator::CanInjectCheckpoint() const { + return AllNewCheckpointCoordinatorAcksProcessed() && CheckpointId; +} + +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/pending_checkpoint.h b/ydb/core/yq/libs/checkpointing/pending_checkpoint.h index bc42b48018b..caa0b1fd5b8 100644 --- a/ydb/core/yq/libs/checkpointing/pending_checkpoint.h +++ b/ydb/core/yq/libs/checkpointing/pending_checkpoint.h @@ -3,7 +3,7 @@ #include <library/cpp/actors/core/actor.h> -namespace NYq { +namespace NYq { struct TPendingCheckpointStats { const TInstant CreatedAt = TInstant::Now(); @@ -51,22 +51,22 @@ private: THashSet<NActors::TActorId> NotYetAcknowledged; }; -class TPendingInitCoordinator { -public: - explicit TPendingInitCoordinator(size_t actorsCount) - : ActorsCount(actorsCount) - { - } - - void OnNewCheckpointCoordinatorAck(); - bool AllNewCheckpointCoordinatorAcksProcessed() const; - - bool CanInjectCheckpoint() const; - -public: - const size_t ActorsCount; - size_t NewCheckpointCoordinatorAcksGot = 0; - TMaybe<TCheckpointId> CheckpointId; -}; - -} // namespace NYq +class TPendingInitCoordinator { +public: + explicit TPendingInitCoordinator(size_t actorsCount) + : ActorsCount(actorsCount) + { + } + + void OnNewCheckpointCoordinatorAck(); + bool AllNewCheckpointCoordinatorAcksProcessed() const; + + bool CanInjectCheckpoint() const; + +public: + const size_t ActorsCount; + size_t NewCheckpointCoordinatorAcksGot = 0; + TMaybe<TCheckpointId> CheckpointId; +}; + +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp b/ydb/core/yq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp index ceb0a9faf59..e74fbf6fe59 100644 --- a/ydb/core/yq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp +++ b/ydb/core/yq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp @@ -1,5 +1,5 @@ #include <ydb/core/yq/libs/checkpointing/checkpoint_coordinator.h> -#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> +#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> #include <ydb/core/testlib/actors/test_runtime.h> #include <ydb/core/testlib/basics/helpers.h> @@ -10,13 +10,13 @@ namespace { using namespace NKikimr; -using namespace NYq; - -enum ETestGraphFlags : ui64 { - InputWithSource = 1, - SourceWithChannelInOneTask = 2, -}; +using namespace NYq; +enum ETestGraphFlags : ui64 { + InputWithSource = 1, + SourceWithChannelInOneTask = 2, +}; + NYql::NDqProto::TReadyState BuildTestGraph(ui64 flags = 0) { NYql::NDqProto::TReadyState result; @@ -25,10 +25,10 @@ NYql::NDqProto::TReadyState BuildTestGraph(ui64 flags = 0) { ingress->SetId(1); auto* ingressOutput = ingress->AddOutputs(); ingressOutput->AddChannels(); - if (flags & ETestGraphFlags::InputWithSource) { + if (flags & ETestGraphFlags::InputWithSource) { auto* source = ingress->AddInputs()->MutableSource(); source->SetType("PqSource"); - } + } auto* map = result.AddTask(); map->SetId(2); @@ -36,10 +36,10 @@ NYql::NDqProto::TReadyState BuildTestGraph(ui64 flags = 0) { mapInput->AddChannels(); auto* mapOutput = map->AddOutputs(); mapOutput->AddChannels(); - if (flags & ETestGraphFlags::SourceWithChannelInOneTask) { + if (flags & ETestGraphFlags::SourceWithChannelInOneTask) { auto* source = map->AddInputs()->MutableSource(); source->SetType("PqSource"); - } + } auto* egress = result.AddTask(); egress->SetId(3); @@ -54,7 +54,7 @@ struct TTestBootstrap : public TTestActorRuntime { NConfig::TCheckpointCoordinatorConfig Settings; NActors::TActorId StorageProxy; NActors::TActorId CheckpointCoordinator; - NActors::TActorId RunActor; + NActors::TActorId RunActor; NActors::TActorId IngressActor; NActors::TActorId MapActor; @@ -64,14 +64,14 @@ struct TTestBootstrap : public TTestActorRuntime { NMonitoring::TDynamicCounterPtr Counters = new NMonitoring::TDynamicCounters(); - explicit TTestBootstrap(ui64 graphFlags = 0) - : TTestActorRuntime(true) + explicit TTestBootstrap(ui64 graphFlags = 0) + : TTestActorRuntime(true) , GraphState(BuildTestGraph(graphFlags)) - { + { TAutoPtr<TAppPrepare> app = new TAppPrepare(); Initialize(app->Unwrap()); StorageProxy = AllocateEdgeActor(); - RunActor = AllocateEdgeActor(); + RunActor = AllocateEdgeActor(); IngressActor = AllocateEdgeActor(); MapActor = AllocateEdgeActor(); EgressActor = AllocateEdgeActor(); @@ -91,7 +91,7 @@ struct TTestBootstrap : public TTestActorRuntime { SetLogPriority(NKikimrServices::STREAMS_CHECKPOINT_COORDINATOR, NLog::PRI_DEBUG); - CheckpointCoordinator = Register(MakeCheckpointCoordinator(TCoordinatorId("my-graph-id", 42), {}, StorageProxy, RunActor, Settings, Counters, NProto::TGraphParams()).Release()); + CheckpointCoordinator = Register(MakeCheckpointCoordinator(TCoordinatorId("my-graph-id", 42), {}, StorageProxy, RunActor, Settings, Counters, NProto::TGraphParams()).Release()); WaitForBootstrap(); Send(new IEventHandle(CheckpointCoordinator, {}, new NYql::NDqs::TEvReadyState(std::move(GraphState)))); @@ -106,13 +106,13 @@ struct TTestBootstrap : public TTestActorRuntime { }; } // namespace -namespace NYq { +namespace NYq { void MockRegisterCoordinatorResponseEvent(TTestBootstrap& bootstrap, NYql::TIssues issues = NYql::TIssues()) { bootstrap.Send(new IEventHandle( bootstrap.CheckpointCoordinator, bootstrap.StorageProxy, - new TEvCheckpointStorage::TEvRegisterCoordinatorResponse(std::move(issues)))); + new TEvCheckpointStorage::TEvRegisterCoordinatorResponse(std::move(issues)))); } void MockCheckpointsMetadataResponse(TTestBootstrap& bootstrap, NYql::TIssues issues = NYql::TIssues()) { @@ -126,7 +126,7 @@ void MockCreateCheckpointResponse(TTestBootstrap& bootstrap, TCheckpointId& chec bootstrap.Send(new IEventHandle( bootstrap.CheckpointCoordinator, bootstrap.StorageProxy, - new TEvCheckpointStorage::TEvCreateCheckpointResponse(checkpointId, std::move(issues), "42"))); + new TEvCheckpointStorage::TEvCreateCheckpointResponse(checkpointId, std::move(issues), "42"))); } void MockNodeStateSavedEvent(TTestBootstrap& bootstrap, TCheckpointId& checkpointId, TActorId& sender) { @@ -155,7 +155,7 @@ void MockSetCheckpointPendingCommitStatusResponse(TTestBootstrap& bootstrap, TCh bootstrap.Send(new IEventHandle( bootstrap.CheckpointCoordinator, bootstrap.StorageProxy, - new TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusResponse(checkpointId, std::move(issues)))); + new TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusResponse(checkpointId, std::move(issues)))); } void MockChangesCommittedEvent(TTestBootstrap& bootstrap, TCheckpointId& checkpointId, TActorId& sender) { @@ -169,15 +169,15 @@ void MockCompleteCheckpointResponse(TTestBootstrap& bootstrap, TCheckpointId& ch bootstrap.Send(new IEventHandle( bootstrap.CheckpointCoordinator, bootstrap.StorageProxy, - new TEvCheckpointStorage::TEvCompleteCheckpointResponse(checkpointId, std::move(issues)))); + new TEvCheckpointStorage::TEvCompleteCheckpointResponse(checkpointId, std::move(issues)))); } Y_UNIT_TEST_SUITE(TCheckpointCoordinatorTests) { - void ShouldTriggerCheckpointImpl(ui64 graphFlags) { - TTestBootstrap bootstrap(graphFlags); + void ShouldTriggerCheckpointImpl(ui64 graphFlags) { + TTestBootstrap bootstrap(graphFlags); Cerr << "Waiting for TEvRegisterCoordinatorRequest (storage)" << Endl; - bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvRegisterCoordinatorRequest>( + bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvRegisterCoordinatorRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); MockRegisterCoordinatorResponseEvent(bootstrap); @@ -187,7 +187,7 @@ Y_UNIT_TEST_SUITE(TCheckpointCoordinatorTests) { MockCheckpointsMetadataResponse(bootstrap); Cerr << "Waiting for TEvCreateCheckpointRequest (storage)" << Endl; - auto updateState = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvCreateCheckpointRequest>( + auto updateState = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvCreateCheckpointRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); auto& checkpointId = updateState->Get()->CheckpointId; @@ -202,7 +202,7 @@ Y_UNIT_TEST_SUITE(TCheckpointCoordinatorTests) { MockNodeStateSavedEvent(bootstrap, checkpointId, bootstrap.EgressActor); Cerr << "Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage)" << Endl; - bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest>( + bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvSetCheckpointPendingCommitStatusRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); MockSetCheckpointPendingCommitStatusResponse(bootstrap, checkpointId); @@ -216,33 +216,33 @@ Y_UNIT_TEST_SUITE(TCheckpointCoordinatorTests) { MockChangesCommittedEvent(bootstrap, checkpointId, bootstrap.EgressActor); Cerr << "Waiting for TEvCompleteCheckpointRequest (storage)" << Endl; - auto completed = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvCompleteCheckpointRequest>( + auto completed = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvCompleteCheckpointRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); UNIT_ASSERT(completed.Get() != nullptr); MockCompleteCheckpointResponse(bootstrap, checkpointId); } - Y_UNIT_TEST(ShouldTriggerCheckpoint) { - ShouldTriggerCheckpointImpl(0); - } - - Y_UNIT_TEST(ShouldTriggerCheckpointWithSource) { - ShouldTriggerCheckpointImpl(ETestGraphFlags::InputWithSource); - } - - Y_UNIT_TEST(ShouldTriggerCheckpointWithSourceWithChannel) { - ShouldTriggerCheckpointImpl(ETestGraphFlags::SourceWithChannelInOneTask); - } - - Y_UNIT_TEST(ShouldTriggerCheckpointWithSourcesAndWithChannel) { - ShouldTriggerCheckpointImpl(ETestGraphFlags::InputWithSource | ETestGraphFlags::SourceWithChannelInOneTask); - } - + Y_UNIT_TEST(ShouldTriggerCheckpoint) { + ShouldTriggerCheckpointImpl(0); + } + + Y_UNIT_TEST(ShouldTriggerCheckpointWithSource) { + ShouldTriggerCheckpointImpl(ETestGraphFlags::InputWithSource); + } + + Y_UNIT_TEST(ShouldTriggerCheckpointWithSourceWithChannel) { + ShouldTriggerCheckpointImpl(ETestGraphFlags::SourceWithChannelInOneTask); + } + + Y_UNIT_TEST(ShouldTriggerCheckpointWithSourcesAndWithChannel) { + ShouldTriggerCheckpointImpl(ETestGraphFlags::InputWithSource | ETestGraphFlags::SourceWithChannelInOneTask); + } + Y_UNIT_TEST(ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved) { TTestBootstrap bootstrap{ETestGraphFlags::InputWithSource}; Cerr << "Waiting for TEvRegisterCoordinatorRequest (storage)" << Endl; - bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvRegisterCoordinatorRequest>( + bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvRegisterCoordinatorRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); MockRegisterCoordinatorResponseEvent(bootstrap); @@ -252,7 +252,7 @@ Y_UNIT_TEST_SUITE(TCheckpointCoordinatorTests) { MockCheckpointsMetadataResponse(bootstrap); Cerr << "Waiting for TEvCreateCheckpointRequest (storage)" << Endl; - auto updateState = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvCreateCheckpointRequest>( + auto updateState = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvCreateCheckpointRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); UNIT_ASSERT(updateState->Get()->NodeCount == 3); @@ -267,10 +267,10 @@ Y_UNIT_TEST_SUITE(TCheckpointCoordinatorTests) { MockNodeStateSavedEvent(bootstrap, checkpointId, bootstrap.EgressActor); Cerr << "Waiting for TEvCompleteCheckpointRequest (storage)" << Endl; - auto completed = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvAbortCheckpointRequest>( + auto completed = bootstrap.GrabEdgeEvent<TEvCheckpointStorage::TEvAbortCheckpointRequest>( bootstrap.StorageProxy, TDuration::Seconds(10)); UNIT_ASSERT(completed.Get() != nullptr); } } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/utils.cpp b/ydb/core/yq/libs/checkpointing/utils.cpp index 22c63232140..eef395a2604 100644 --- a/ydb/core/yq/libs/checkpointing/utils.cpp +++ b/ydb/core/yq/libs/checkpointing/utils.cpp @@ -1,29 +1,29 @@ #include "utils.h" -namespace NYq { - -bool IsIngress(const NYql::NDqProto::TDqTask& task) { - // No inputs at all or the only inputs are sources. - for (const auto& input : task.GetInputs()) { - if (!input.HasSource()) { - return false; - } - } - return true; +namespace NYq { + +bool IsIngress(const NYql::NDqProto::TDqTask& task) { + // No inputs at all or the only inputs are sources. + for (const auto& input : task.GetInputs()) { + if (!input.HasSource()) { + return false; + } + } + return true; } -bool IsEgress(const NYql::NDqProto::TDqTask& task) { - for (const auto& output : task.GetOutputs()) { - if (output.HasSink()) { - return true; - } - } - return false; +bool IsEgress(const NYql::NDqProto::TDqTask& task) { + for (const auto& output : task.GetOutputs()) { + if (output.HasSink()) { + return true; + } + } + return false; } -bool HasState(const NYql::NDqProto::TDqTask& task) { +bool HasState(const NYql::NDqProto::TDqTask& task) { Y_UNUSED(task); return true; } - -} // namespace NYq + +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/utils.h b/ydb/core/yq/libs/checkpointing/utils.h index d0540804c72..64d660d05ce 100644 --- a/ydb/core/yq/libs/checkpointing/utils.h +++ b/ydb/core/yq/libs/checkpointing/utils.h @@ -2,7 +2,7 @@ #include <ydb/library/yql/dq/proto/dq_tasks.pb.h> -namespace NYq { +namespace NYq { bool IsIngress(const NYql::NDqProto::TDqTask& task); @@ -10,4 +10,4 @@ bool IsEgress(const NYql::NDqProto::TDqTask& task); bool HasState(const NYql::NDqProto::TDqTask& task); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing/ya.make b/ydb/core/yq/libs/checkpointing/ya.make index 8a397f67ef6..8128df60fd5 100644 --- a/ydb/core/yq/libs/checkpointing/ya.make +++ b/ydb/core/yq/libs/checkpointing/ya.make @@ -17,12 +17,12 @@ SRCS( PEERDIR( library/cpp/actors/core - ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/actors/logging ydb/core/yq/libs/checkpointing_common - ydb/core/yq/libs/checkpoint_storage/events + ydb/core/yq/libs/checkpoint_storage/events ydb/library/yql/dq/actors/compute ydb/library/yql/dq/proto - ydb/library/yql/dq/state + ydb/library/yql/dq/state ydb/library/yql/providers/dq/api/protos ) @@ -31,9 +31,9 @@ YQL_LAST_ABI_VERSION() END() RECURSE( - events -) - -RECURSE_FOR_TESTS( + events +) + +RECURSE_FOR_TESTS( ut ) diff --git a/ydb/core/yq/libs/checkpointing_common/defs.cpp b/ydb/core/yq/libs/checkpointing_common/defs.cpp index e487b6542cc..b51571bcb56 100644 --- a/ydb/core/yq/libs/checkpointing_common/defs.cpp +++ b/ydb/core/yq/libs/checkpointing_common/defs.cpp @@ -2,7 +2,7 @@ #include <util/digest/multi.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// @@ -22,22 +22,22 @@ size_t TCheckpointIdHash::operator ()(const TCheckpointId& checkpointId) return MultiHash(checkpointId.CoordinatorGeneration, checkpointId.SeqNo); } -} // namespace NYq +} // namespace NYq //////////////////////////////////////////////////////////////////////////////// template<> -void Out<NYq::TCoordinatorId>( +void Out<NYq::TCoordinatorId>( IOutputStream& out, - const NYq::TCoordinatorId& coordinatorId) + const NYq::TCoordinatorId& coordinatorId) { coordinatorId.PrintTo(out); } template<> -void Out<NYq::TCheckpointId>( +void Out<NYq::TCheckpointId>( IOutputStream& out, - const NYq::TCheckpointId& checkpointId) + const NYq::TCheckpointId& checkpointId) { out << checkpointId.CoordinatorGeneration << ":" << checkpointId.SeqNo; } diff --git a/ydb/core/yq/libs/checkpointing_common/defs.h b/ydb/core/yq/libs/checkpointing_common/defs.h index c57d1c4ff47..5e4316b7e0d 100644 --- a/ydb/core/yq/libs/checkpointing_common/defs.h +++ b/ydb/core/yq/libs/checkpointing_common/defs.h @@ -1,11 +1,11 @@ #pragma once -#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> +#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> #include <util/datetime/base.h> -#include <util/generic/maybe.h> +#include <util/generic/maybe.h> #include <util/stream/str.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// @@ -85,9 +85,9 @@ struct TCheckpointMetadata { TInstant Created; TInstant Modified; - TMaybe<NProto::TGraphParams> Graph; + TMaybe<NProto::TGraphParams> Graph; }; using TCheckpoints = TVector<TCheckpointMetadata>; -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/checkpointing_common/ya.make b/ydb/core/yq/libs/checkpointing_common/ya.make index 43e8d1593d3..1c2ea273cd7 100644 --- a/ydb/core/yq/libs/checkpointing_common/ya.make +++ b/ydb/core/yq/libs/checkpointing_common/ya.make @@ -6,10 +6,10 @@ SRCS( defs.cpp ) -PEERDIR( - ydb/core/yq/libs/graph_params/proto -) - +PEERDIR( + ydb/core/yq/libs/graph_params/proto +) + GENERATE_ENUM_SERIALIZATION(defs.h) END() diff --git a/ydb/core/yq/libs/common/entity_id.cpp b/ydb/core/yq/libs/common/entity_id.cpp index dab1d1abcac..ac3d1629ec8 100644 --- a/ydb/core/yq/libs/common/entity_id.cpp +++ b/ydb/core/yq/libs/common/entity_id.cpp @@ -65,23 +65,23 @@ TString GetEntityIdAsString(const TString& prefix, EEntityType type, TInstant no return stream.Str(); } -struct TEntityIdGenerator : public IEntityIdGenerator { - TEntityIdGenerator(const TString& prefix) - : Prefix(prefix) - { - Y_VERIFY(Prefix.size() == 2); - } - - TString Generate(EEntityType type) override { - return GetEntityIdAsString(Prefix, type); - } - -private: - TString Prefix; -}; - -IEntityIdGenerator::TPtr CreateEntityIdGenerator(const TString& prefix) { - return MakeIntrusive<TEntityIdGenerator>(prefix); -} - +struct TEntityIdGenerator : public IEntityIdGenerator { + TEntityIdGenerator(const TString& prefix) + : Prefix(prefix) + { + Y_VERIFY(Prefix.size() == 2); + } + + TString Generate(EEntityType type) override { + return GetEntityIdAsString(Prefix, type); + } + +private: + TString Prefix; +}; + +IEntityIdGenerator::TPtr CreateEntityIdGenerator(const TString& prefix) { + return MakeIntrusive<TEntityIdGenerator>(prefix); +} + } // namespace NYq diff --git a/ydb/core/yq/libs/common/entity_id.h b/ydb/core/yq/libs/common/entity_id.h index d34dee0f554..3713ab16d9d 100644 --- a/ydb/core/yq/libs/common/entity_id.h +++ b/ydb/core/yq/libs/common/entity_id.h @@ -11,19 +11,19 @@ enum class EEntityType : char { JOB = 'j', RESULT = 'r', CONNECTION = 'c', - BINDING = 'b', - CHECKPOINT_GRAPH_DESCRIPTION = 'g', + BINDING = 'b', + CHECKPOINT_GRAPH_DESCRIPTION = 'g', }; TString GetEntityIdAsString(const TString& prefix, EEntityType type); TString GetEntityIdAsString(const TString& prefix, EEntityType type, TInstant now, ui32 rnd); -struct IEntityIdGenerator : public TThrRefBase { - using TPtr = TIntrusivePtr<IEntityIdGenerator>; - - virtual TString Generate(EEntityType type) = 0; -}; - -IEntityIdGenerator::TPtr CreateEntityIdGenerator(const TString& prefix); - +struct IEntityIdGenerator : public TThrRefBase { + using TPtr = TIntrusivePtr<IEntityIdGenerator>; + + virtual TString Generate(EEntityType type) = 0; +}; + +IEntityIdGenerator::TPtr CreateEntityIdGenerator(const TString& prefix); + } // namespace NYq diff --git a/ydb/core/yq/libs/config/protos/issue_id.proto b/ydb/core/yq/libs/config/protos/issue_id.proto index e4090be5067..2ae73ca85c0 100644 --- a/ydb/core/yq/libs/config/protos/issue_id.proto +++ b/ydb/core/yq/libs/config/protos/issue_id.proto @@ -15,6 +15,6 @@ message TIssuesIds { TIMEOUT = 1002; BAD_REQUEST = 1003; EXPIRED = 1004; - UNSUPPORTED = 1005; + UNSUPPORTED = 1005; } } diff --git a/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.cpp b/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.cpp index 53838351b42..7794e7c9b37 100644 --- a/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.cpp +++ b/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.cpp @@ -2,7 +2,7 @@ #include "probes.h" #include "utils.h" -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <ydb/core/yq/libs/control_plane_storage/control_plane_storage.h> #include <ydb/core/yq/libs/control_plane_storage/events/events.h> #include <ydb/core/yq/libs/control_plane_storage/util.h> @@ -37,10 +37,10 @@ namespace NYq { -namespace { +namespace { using namespace NActors; -using namespace NYq::NConfig; +using namespace NYq::NConfig; using namespace NKikimr; using namespace NThreading; using namespace NYdb; @@ -48,7 +48,7 @@ using namespace NYdb::NTable; LWTRACE_USING(YQ_CONTROL_PLANE_PROXY_PROVIDER); -struct TRequestCounters: public virtual TThrRefBase { +struct TRequestCounters: public virtual TThrRefBase { const TString Name; NMonitoring::TDynamicCounters::TCounterPtr InFly; @@ -103,7 +103,7 @@ class TRequestActor : public NActors::TActorBootstrapped<TRequestActor<TRequestP TString CloudId; public: - static constexpr char ActorName[] = "YQ_CONTROL_PLANE_PROXY_REQUEST_ACTOR"; + static constexpr char ActorName[] = "YQ_CONTROL_PLANE_PROXY_REQUEST_ACTOR"; explicit TRequestActor(const NConfig::TControlPlaneProxyConfig& config, TActorId sender, ui32 cookie, @@ -317,7 +317,7 @@ class TControlPlaneProxyActor : public NActors::TActorBootstrapped<TControlPlane { for (auto& request: Requests) { request->Register(Counters); - } + } } }; @@ -335,9 +335,9 @@ public: void Bootstrap() { CPP_LOG_D("Starting yandex query control plane proxy. Actor id: " << SelfId()); - + NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(YQ_CONTROL_PLANE_PROXY_PROVIDER)); - + NActors::TMon* mon = AppData()->Mon; if (mon) { NMonitoring::TIndexMonPage* actorsMonPage = mon->RegisterIndexPage("actors", "Actors"); @@ -402,7 +402,7 @@ private: if (!Config.GetEnablePermissions()) { return issues; } - + for (const auto& requiredPermission: requiredPermissions) { if (!IsIn(ev->Get()->Permissions, requiredPermission)) { issues.AddIssue(MakeErrorIssue(TIssuesIds::ACCESS_DENIED, "No permission " + requiredPermission + " in a given scope yandexcloud://" + ev->Get()->FolderId)); @@ -423,7 +423,7 @@ private: TString user = std::move(ev->Get()->User); TString token = std::move(ev->Get()->Token); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(CreateQueryRequest, scope, user, delta, byteSize, isSuccess, isTimeout); }; @@ -461,7 +461,7 @@ private: TString user = std::move(ev->Get()->User); TString token = std::move(ev->Get()->Token); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ListQueriesRequest, scope, user, delta, byteSize, isSuccess, isTimeout); }; @@ -500,7 +500,7 @@ private: TString token = std::move(ev->Get()->Token); const TString queryId = request.query_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(DescribeQueryRequest, scope, user, queryId, delta, byteSize, isSuccess, isTimeout); }; @@ -578,7 +578,7 @@ private: TString token = std::move(ev->Get()->Token); const TString queryId = request.query_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ModifyQueryRequest, scope, user, queryId, delta, byteSize, isSuccess, isTimeout); }; @@ -617,7 +617,7 @@ private: TString token = std::move(ev->Get()->Token); const TString queryId = request.query_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(DeleteQueryRequest, scope, user, queryId, delta, byteSize, isSuccess, isTimeout); }; @@ -656,7 +656,7 @@ private: TString token = std::move(ev->Get()->Token); const TString queryId = request.query_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ControlQueryRequest, scope, user, queryId, delta, byteSize, isSuccess, isTimeout); }; @@ -698,7 +698,7 @@ private: const int64_t limit = request.limit(); const int64_t offset = request.offset(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(GetResultDataRequest, scope, user, queryId, resultSetIndex, offset, limit, delta, byteSize, isSuccess, isTimeout); }; @@ -737,7 +737,7 @@ private: TString token = std::move(ev->Get()->Token); const TString queryId = request.query_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ListJobsRequest, scope, user, queryId, delta, byteSize, isSuccess, isTimeout); }; @@ -814,7 +814,7 @@ private: TString user = std::move(ev->Get()->User); TString token = std::move(ev->Get()->Token); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(CreateConnectionRequest, scope, user, delta, byteSize, isSuccess, isTimeout); }; @@ -857,7 +857,7 @@ private: TString user = std::move(ev->Get()->User); TString token = std::move(ev->Get()->Token); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ListConnectionsRequest, scope, user, delta, byteSize, isSuccess, isTimeout); }; @@ -896,7 +896,7 @@ private: TString token = std::move(ev->Get()->Token); const TString connectionId = request.connection_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(DescribeConnectionRequest, scope, user, connectionId, delta, byteSize, isSuccess, isTimeout); }; @@ -935,7 +935,7 @@ private: TString token = std::move(ev->Get()->Token); const TString connectionId = request.connection_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ModifyConnectionRequest, scope, user, connectionId, delta, byteSize, isSuccess, isTimeout); }; @@ -979,7 +979,7 @@ private: TString token = std::move(ev->Get()->Token); const TString connectionId = request.connection_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(DeleteConnectionRequest, scope, user, connectionId, delta, byteSize, isSuccess, isTimeout); }; @@ -1060,7 +1060,7 @@ private: TString user = std::move(ev->Get()->User); TString token = std::move(ev->Get()->Token); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(CreateBindingRequest, scope, user, delta, byteSize, isSuccess, isTimeout); }; @@ -1098,7 +1098,7 @@ private: TString user = std::move(ev->Get()->User); TString token = std::move(ev->Get()->Token); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ListBindingsRequest, scope, user, delta, byteSize, isSuccess, isTimeout); }; @@ -1137,7 +1137,7 @@ private: TString token = std::move(ev->Get()->Token); const TString bindingId = request.binding_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(DescribeBindingRequest, scope, user, bindingId, delta, byteSize, isSuccess, isTimeout); }; @@ -1176,7 +1176,7 @@ private: TString token = std::move(ev->Get()->Token); const TString bindingId = request.binding_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(ModifyBindingRequest, scope, user, bindingId, delta, byteSize, isSuccess, isTimeout); }; @@ -1215,7 +1215,7 @@ private: TString token = std::move(ev->Get()->Token); const TString bindingId = request.binding_id(); const int byteSize = request.ByteSize(); - + auto probe = [=](const TDuration& delta, bool isSuccess, bool isTimeout) { LWPROBE(DeleteBindingRequest, scope, user, bindingId, delta, byteSize, isSuccess, isTimeout); }; @@ -1255,8 +1255,8 @@ private: } }; -} // namespace - +} // namespace + TActorId ControlPlaneProxyActorId() { constexpr TStringBuf name = "YQCTLPRX"; return NActors::TActorId(0, name); diff --git a/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.h b/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.h index 12e495ba1c9..7d2cc6421e7 100644 --- a/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.h +++ b/ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.h @@ -1,6 +1,6 @@ #pragma once -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <ydb/core/yq/libs/config/protos/control_plane_proxy.pb.h> #include <library/cpp/actors/core/actor.h> @@ -24,4 +24,4 @@ NActors::TActorId ControlPlaneProxyActorId(); NActors::IActor* CreateControlPlaneProxyActor(const NConfig::TControlPlaneProxyConfig& config, const NMonitoring::TDynamicCounterPtr& counters); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/control_plane_proxy/events/events.h b/ydb/core/yq/libs/control_plane_proxy/events/events.h index aeb922e7845..32790d999b2 100644 --- a/ydb/core/yq/libs/control_plane_proxy/events/events.h +++ b/ydb/core/yq/libs/control_plane_proxy/events/events.h @@ -15,7 +15,7 @@ namespace NYq { struct TEvControlPlaneProxy { // Event ids. enum EEv : ui32 { - EvCreateQueryRequest = YqEventSubspaceBegin(NYq::TYqEventSubspace::ControlPlaneProxy), + EvCreateQueryRequest = YqEventSubspaceBegin(NYq::TYqEventSubspace::ControlPlaneProxy), EvCreateQueryResponse, EvListQueriesRequest, EvListQueriesResponse, @@ -60,7 +60,7 @@ struct TEvControlPlaneProxy { EvEnd, }; - static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::ControlPlaneProxy), "All events must be in their subspace"); + static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::ControlPlaneProxy), "All events must be in their subspace"); struct TEvCreateQueryRequest : NActors::TEventLocal<TEvCreateQueryRequest, EvCreateQueryRequest> { explicit TEvCreateQueryRequest(const TString& folderId, diff --git a/ydb/core/yq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp b/ydb/core/yq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp index 3d0c5bb70b2..a65fa041833 100644 --- a/ydb/core/yq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp +++ b/ydb/core/yq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp @@ -1,4 +1,4 @@ -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.h> #include <ydb/core/yq/libs/control_plane_storage/control_plane_storage.h> #include <ydb/core/yq/libs/control_plane_storage/events/events.h> diff --git a/ydb/core/yq/libs/control_plane_proxy/ut/ya.make b/ydb/core/yq/libs/control_plane_proxy/ut/ya.make index 1550392ebc0..84b09d7f60f 100644 --- a/ydb/core/yq/libs/control_plane_proxy/ut/ya.make +++ b/ydb/core/yq/libs/control_plane_proxy/ut/ya.make @@ -6,7 +6,7 @@ PEERDIR( library/cpp/testing/unittest ydb/core/base ydb/core/testlib - ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/actors/logging ydb/core/yq/libs/control_plane_storage ydb/core/yq/libs/test_connection ydb/library/folder_service diff --git a/ydb/core/yq/libs/control_plane_proxy/ya.make b/ydb/core/yq/libs/control_plane_proxy/ya.make index 785bc92f283..b0fc3e452fc 100644 --- a/ydb/core/yq/libs/control_plane_proxy/ya.make +++ b/ydb/core/yq/libs/control_plane_proxy/ya.make @@ -11,7 +11,7 @@ PEERDIR( library/cpp/actors/core ydb/core/base ydb/core/mon - ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/actors/logging ydb/core/yq/libs/actors ydb/core/yq/libs/control_plane_proxy/events ydb/core/yq/libs/control_plane_storage diff --git a/ydb/core/yq/libs/control_plane_storage/control_plane_storage.h b/ydb/core/yq/libs/control_plane_storage/control_plane_storage.h index 5180aedeea7..77ae7067cd9 100644 --- a/ydb/core/yq/libs/control_plane_storage/control_plane_storage.h +++ b/ydb/core/yq/libs/control_plane_storage/control_plane_storage.h @@ -6,7 +6,7 @@ #include <ydb/core/yq/libs/config/protos/common.pb.h> #include <ydb/core/yq/libs/config/protos/control_plane_storage.pb.h> #include <ydb/core/yq/libs/shared_resources/shared_resources.h> -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #define CPS_LOG_D(s) \ LOG_YQ_CONTROL_PLANE_STORAGE_DEBUG(s) diff --git a/ydb/core/yq/libs/control_plane_storage/events/events.h b/ydb/core/yq/libs/control_plane_storage/events/events.h index 8c98e790d72..0e828ff9f4a 100644 --- a/ydb/core/yq/libs/control_plane_storage/events/events.h +++ b/ydb/core/yq/libs/control_plane_storage/events/events.h @@ -15,7 +15,7 @@ #include <ydb/library/yql/public/issue/yql_issue.h> #include <ydb/core/yq/libs/control_plane_storage/proto/yq_internal.pb.h> -#include <ydb/core/yq/libs/events/event_subspace.h> +#include <ydb/core/yq/libs/events/event_subspace.h> namespace NYq { @@ -95,7 +95,7 @@ public: struct TEvControlPlaneStorage { // Event ids. enum EEv : ui32 { - EvCreateQueryRequest = YqEventSubspaceBegin(NYq::TYqEventSubspace::ControlPlaneStorage), + EvCreateQueryRequest = YqEventSubspaceBegin(NYq::TYqEventSubspace::ControlPlaneStorage), EvCreateQueryResponse, EvListQueriesRequest, EvListQueriesResponse, @@ -148,7 +148,7 @@ struct TEvControlPlaneStorage { EvEnd, }; - static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::ControlPlaneStorage), "All events must be in their subspace"); + static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::ControlPlaneStorage), "All events must be in their subspace"); struct TEvCreateQueryRequest : NActors::TEventLocal<TEvCreateQueryRequest, EvCreateQueryRequest> { explicit TEvCreateQueryRequest(const TString& scope, @@ -1010,18 +1010,18 @@ struct TEvControlPlaneStorage { TDebugInfoPtr DebugInfo; }; - // Description of consumer that was created by YQ - struct TTopicConsumer { - TString DatabaseId; - TString Database; - TString TopicPath; - TString ConsumerName; - TString ClusterEndpoint; - bool UseSsl = false; - TString TokenName; - bool AddBearerToToken = false; - }; - + // Description of consumer that was created by YQ + struct TTopicConsumer { + TString DatabaseId; + TString Database; + TString TopicPath; + TString ConsumerName; + TString ClusterEndpoint; + bool UseSsl = false; + TString TokenName; + bool AddBearerToToken = false; + }; + struct TEvPingTaskRequest : NActors::TEventLocal<TEvPingTaskRequest, EvPingTaskRequest> { explicit TEvPingTaskRequest(const TString& scope, const TString& queryId, const TString& owner, const TInstant& deadline, const TString& resultId = "") : Scope(scope) @@ -1047,11 +1047,11 @@ struct TEvControlPlaneStorage { TMaybe<TInstant> StartedAt; TMaybe<TInstant> FinishedAt; bool ResignQuery = false; - TVector<TTopicConsumer> CreatedTopicConsumers; + TVector<TTopicConsumer> CreatedTopicConsumers; TVector<TString> DqGraphs; i32 DqGraphIndex = 0; - YandexQuery::StateLoadMode StateLoadMode = YandexQuery::STATE_LOAD_MODE_UNSPECIFIED; - TMaybe<YandexQuery::StreamingDisposition> StreamingDisposition; + YandexQuery::StateLoadMode StateLoadMode = YandexQuery::STATE_LOAD_MODE_UNSPECIFIED; + TMaybe<YandexQuery::StreamingDisposition> StreamingDisposition; }; struct TEvPingTaskResponse : NActors::TEventLocal<TEvPingTaskResponse, EvPingTaskResponse> { @@ -1065,7 +1065,7 @@ struct TEvControlPlaneStorage { { } - YandexQuery::QueryAction Action = YandexQuery::QUERY_ACTION_UNSPECIFIED; + YandexQuery::QueryAction Action = YandexQuery::QUERY_ACTION_UNSPECIFIED; NYql::TIssues Issues; TDebugInfoPtr DebugInfo; }; diff --git a/ydb/core/yq/libs/control_plane_storage/events/ya.make b/ydb/core/yq/libs/control_plane_storage/events/ya.make index 6bfef248919..01cc9581918 100644 --- a/ydb/core/yq/libs/control_plane_storage/events/ya.make +++ b/ydb/core/yq/libs/control_plane_storage/events/ya.make @@ -10,7 +10,7 @@ PEERDIR( library/cpp/actors/core library/cpp/actors/interconnect ydb/core/yq/libs/control_plane_storage/proto - ydb/core/yq/libs/events + ydb/core/yq/libs/events ) END() diff --git a/ydb/core/yq/libs/control_plane_storage/in_memory_control_plane_storage.cpp b/ydb/core/yq/libs/control_plane_storage/in_memory_control_plane_storage.cpp index 783b9242385..ed5837e31d6 100644 --- a/ydb/core/yq/libs/control_plane_storage/in_memory_control_plane_storage.cpp +++ b/ydb/core/yq/libs/control_plane_storage/in_memory_control_plane_storage.cpp @@ -57,7 +57,7 @@ public: { } - static constexpr char ActorName[] = "YQ_CONTROL_PLANE_STORAGE"; + static constexpr char ActorName[] = "YQ_CONTROL_PLANE_STORAGE"; private: STRICT_STFUNC(StateFunc, @@ -107,7 +107,7 @@ private: if (issues) { CPS_LOG_D("CreateQueryRequest, validation failed: " << request.DebugString() << " error: " << issues.ToString()); Send(ev->Sender, new TEvControlPlaneStorage::TEvCreateQueryResponse(issues), 0, ev->Cookie); - return; + return; } const TString user = ev->Get()->User; @@ -371,7 +371,7 @@ private: issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "Request size exceeded " + ToString(request.ByteSize()) + " out of " + ToString(Config.Proto.GetMaxRequestSize()) + " bytes")); } - const uint64_t countQueries = count_if(Queries.begin(), Queries.end(), [scope](const auto& item) { + const uint64_t countQueries = count_if(Queries.begin(), Queries.end(), [scope](const auto& item) { const auto& [key, value] = item; return key.Scope == scope; }); diff --git a/ydb/core/yq/libs/control_plane_storage/internal/task_get.cpp b/ydb/core/yq/libs/control_plane_storage/internal/task_get.cpp index 42b3c1a1acb..1f9902e454c 100644 --- a/ydb/core/yq/libs/control_plane_storage/internal/task_get.cpp +++ b/ydb/core/yq/libs/control_plane_storage/internal/task_get.cpp @@ -67,8 +67,8 @@ std::tuple<TString, NYdb::TParams, std::function<std::pair<TString, NYdb::TParam const std::shared_ptr<TResponseTasks>& responseTasks, const TInstant& taskLeaseTimestamp, bool disableCurrentIam, - const TDuration& automaticQueriesTtl, - const TDuration& resultSetsTtl) + const TDuration& automaticQueriesTtl, + const TDuration& resultSetsTtl) { const auto& task = taskInternal.Task; @@ -103,7 +103,7 @@ std::tuple<TString, NYdb::TParams, std::function<std::pair<TString, NYdb::TParam if (!task.Query.ParseFromString(*parser.ColumnParser(QUERY_COLUMN_NAME).GetOptionalString())) { throw TControlPlaneStorageException(TIssuesIds::INTERNAL_ERROR) << "Error parsing proto message for query. Please contact internal support"; } - const TInstant deadline = TInstant::Now() + (task.Query.content().automatic() ? std::min(automaticQueriesTtl, resultSetsTtl) : resultSetsTtl); + const TInstant deadline = TInstant::Now() + (task.Query.content().automatic() ? std::min(automaticQueriesTtl, resultSetsTtl) : resultSetsTtl); task.Deadline = deadline; if (!task.Internal.ParseFromString(*parser.ColumnParser(INTERNAL_COLUMN_NAME).GetOptionalString())) { throw TControlPlaneStorageException(TIssuesIds::INTERNAL_ERROR) << "Error parsing proto message for query internal. Please contact internal support"; @@ -176,7 +176,7 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvGetTaskRequ TVector<TPickTaskParams> pickTaskParams; const auto now = TInstant::Now(); if (resultSets.empty() || !resultSets.back().RowsCount()) { - return pickTaskParams; + return pickTaskParams; } TResultSetParser parser(resultSets.back()); diff --git a/ydb/core/yq/libs/control_plane_storage/internal/task_ping.cpp b/ydb/core/yq/libs/control_plane_storage/internal/task_ping.cpp index 834d40c0f72..381dcfa5fbb 100644 --- a/ydb/core/yq/libs/control_plane_storage/internal/task_ping.cpp +++ b/ydb/core/yq/libs/control_plane_storage/internal/task_ping.cpp @@ -6,17 +6,17 @@ namespace NYq { -namespace { - -bool IsFinishedStatus(YandexQuery::QueryMeta::ComputeStatus status) { - return status == YandexQuery::QueryMeta::ABORTED_BY_SYSTEM - || status == YandexQuery::QueryMeta::ABORTED_BY_USER - || status == YandexQuery::QueryMeta::COMPLETED - || status == YandexQuery::QueryMeta::FAILED; -} - -} // namespace - +namespace { + +bool IsFinishedStatus(YandexQuery::QueryMeta::ComputeStatus status) { + return status == YandexQuery::QueryMeta::ABORTED_BY_SYSTEM + || status == YandexQuery::QueryMeta::ABORTED_BY_USER + || status == YandexQuery::QueryMeta::COMPLETED + || status == YandexQuery::QueryMeta::FAILED; +} + +} // namespace + std::tuple<TString, TParams, const std::function<std::pair<TString, NYdb::TParams>(const TVector<NYdb::TResultSet>&)>> ConstructHardPingTask( const TEvControlPlaneStorage::TEvPingTaskRequest* request, std::shared_ptr<YandexQuery::QueryAction> response, const TString& tablePathPrefix, const TDuration& automaticQueriesTtl) { @@ -151,23 +151,23 @@ std::tuple<TString, TParams, const std::function<std::pair<TString, NYdb::TParam *query.mutable_meta()->mutable_result_expire_at() = NProtoInterop::CastToProto(request->Deadline); } - if (request->StateLoadMode) { - internal.set_state_load_mode(request->StateLoadMode); - if (request->StateLoadMode == YandexQuery::FROM_LAST_CHECKPOINT) { // Saved checkpoint - query.mutable_meta()->set_has_saved_checkpoints(true); - } - } - - if (request->StreamingDisposition) { - internal.mutable_disposition()->CopyFrom(*request->StreamingDisposition); - } - - if (request->Status && IsFinishedStatus(*request->Status)) { - internal.clear_created_topic_consumers(); - internal.clear_dq_graph(); - internal.clear_dq_graph_index(); - } - + if (request->StateLoadMode) { + internal.set_state_load_mode(request->StateLoadMode); + if (request->StateLoadMode == YandexQuery::FROM_LAST_CHECKPOINT) { // Saved checkpoint + query.mutable_meta()->set_has_saved_checkpoints(true); + } + } + + if (request->StreamingDisposition) { + internal.mutable_disposition()->CopyFrom(*request->StreamingDisposition); + } + + if (request->Status && IsFinishedStatus(*request->Status)) { + internal.clear_created_topic_consumers(); + internal.clear_dq_graph(); + internal.clear_dq_graph_index(); + } + if (!request->CreatedTopicConsumers.empty()) { std::set<Yq::Private::TopicConsumer, TTopicConsumerLess> mergedConsumers; for (auto&& c : *internal.mutable_created_topic_consumers()) { diff --git a/ydb/core/yq/libs/control_plane_storage/message_builders.h b/ydb/core/yq/libs/control_plane_storage/message_builders.h index 75ee6f1f9fa..97352d441d0 100644 --- a/ydb/core/yq/libs/control_plane_storage/message_builders.h +++ b/ydb/core/yq/libs/control_plane_storage/message_builders.h @@ -227,7 +227,7 @@ public: TModifyQueryBuilder& SetState(const YandexQuery::StateLoadMode& state) { - Request.set_state_load_mode(state); + Request.set_state_load_mode(state); return *this; } @@ -695,9 +695,9 @@ public: binding.set_stream_name("my_stream"); binding.set_format("json"); binding.set_compression("zip"); - auto* column = binding.mutable_schema()->add_column(); - column->set_name("sample_column_name"); - column->mutable_type()->set_type_id(Ydb::Type::UINT64); + auto* column = binding.mutable_schema()->add_column(); + column->set_name("sample_column_name"); + column->mutable_type()->set_type_id(Ydb::Type::UINT64); CreateDataStreams(binding); } @@ -810,9 +810,9 @@ public: binding.set_stream_name("my_stream"); binding.set_format("json"); binding.set_compression("zip"); - auto* column = binding.mutable_schema()->add_column(); - column->set_name("sample_column_name"); - column->mutable_type()->set_type_id(Ydb::Type::UINT64); + auto* column = binding.mutable_schema()->add_column(); + column->set_name("sample_column_name"); + column->mutable_type()->set_type_id(Ydb::Type::UINT64); CreateDataStreams(binding); } @@ -1004,7 +1004,7 @@ class TPingTaskBuilder { TMaybe<TInstant> StartedAt; TMaybe<TInstant> FinishedAt; bool ResignQuery = false; - TVector<NYq::TEvControlPlaneStorage::TTopicConsumer> CreatedTopicConsumers; + TVector<NYq::TEvControlPlaneStorage::TTopicConsumer> CreatedTopicConsumers; TVector<TString> DqGraphs; i32 DqGraphIndex = 0; @@ -1098,18 +1098,18 @@ public: return *this; } - TPingTaskBuilder& SetResignQuery(bool resignQuery = true) + TPingTaskBuilder& SetResignQuery(bool resignQuery = true) { ResignQuery = resignQuery; return *this; } - TPingTaskBuilder& AddCreatedConsumer(const TString& databaseId, const TString& database, const TString& topicPath, const TString& consumerName, const TString& clusterEndpoint, bool useSsl) - { - CreatedTopicConsumers.emplace_back(NYq::TEvControlPlaneStorage::TTopicConsumer{databaseId, database, topicPath, consumerName, clusterEndpoint, useSsl, "", false}); - return *this; - } - + TPingTaskBuilder& AddCreatedConsumer(const TString& databaseId, const TString& database, const TString& topicPath, const TString& consumerName, const TString& clusterEndpoint, bool useSsl) + { + CreatedTopicConsumers.emplace_back(NYq::TEvControlPlaneStorage::TTopicConsumer{databaseId, database, topicPath, consumerName, clusterEndpoint, useSsl, "", false}); + return *this; + } + TPingTaskBuilder& AddDqGraph(const TString& dqGraph) { DqGraphs.push_back(dqGraph); @@ -1135,7 +1135,7 @@ public: request->StartedAt = StartedAt; request->FinishedAt = FinishedAt; request->ResignQuery = ResignQuery; - request->CreatedTopicConsumers = CreatedTopicConsumers; + request->CreatedTopicConsumers = CreatedTopicConsumers; request->DqGraphs = DqGraphs; request->DqGraphIndex = DqGraphIndex; return request; diff --git a/ydb/core/yq/libs/control_plane_storage/proto/yq_internal.proto b/ydb/core/yq/libs/control_plane_storage/proto/yq_internal.proto index e6ebff0c847..eb9f0df3497 100644 --- a/ydb/core/yq/libs/control_plane_storage/proto/yq_internal.proto +++ b/ydb/core/yq/libs/control_plane_storage/proto/yq_internal.proto @@ -29,7 +29,7 @@ message QueryInternal { repeated Yq.Private.TopicConsumer created_topic_consumers = 12; repeated bytes dq_graph = 13; int32 dq_graph_index = 14; - StreamingDisposition disposition = 15; + StreamingDisposition disposition = 15; } message JobInternal { diff --git a/ydb/core/yq/libs/control_plane_storage/util.cpp b/ydb/core/yq/libs/control_plane_storage/util.cpp index 288f25a1cb4..50797a80476 100644 --- a/ydb/core/yq/libs/control_plane_storage/util.cpp +++ b/ydb/core/yq/libs/control_plane_storage/util.cpp @@ -91,7 +91,7 @@ NConfig::TControlPlaneStorageConfig FillDefaultParameters(NConfig::TControlPlane return config; } -bool DoesPingTaskUpdateQueriesTable(const TEvControlPlaneStorage::TEvPingTaskRequest* request) { +bool DoesPingTaskUpdateQueriesTable(const TEvControlPlaneStorage::TEvPingTaskRequest* request) { if (!request) { return false; } @@ -103,12 +103,12 @@ bool DoesPingTaskUpdateQueriesTable(const TEvControlPlaneStorage::TEvPingTaskReq request->Ast || request->Plan || request->StartedAt || - request->FinishedAt || + request->FinishedAt || !request->CreatedTopicConsumers.empty() || !request->DqGraphs.empty() || - request->DqGraphIndex || - request->StateLoadMode || - request->StreamingDisposition; + request->DqGraphIndex || + request->StateLoadMode || + request->StreamingDisposition; } NYdb::TValue PackItemsToList(const TVector<NYdb::TValue>& items) { diff --git a/ydb/core/yq/libs/control_plane_storage/util.h b/ydb/core/yq/libs/control_plane_storage/util.h index 8deddc25cae..005a5d4e21a 100644 --- a/ydb/core/yq/libs/control_plane_storage/util.h +++ b/ydb/core/yq/libs/control_plane_storage/util.h @@ -24,7 +24,7 @@ auto CreateArray(const T(&list)[N]) -> std::array<T, K> { return result; } -bool DoesPingTaskUpdateQueriesTable(const TEvControlPlaneStorage::TEvPingTaskRequest* request); +bool DoesPingTaskUpdateQueriesTable(const TEvControlPlaneStorage::TEvPingTaskRequest* request); NYdb::TValue PackItemsToList(const TVector<NYdb::TValue>& items); diff --git a/ydb/core/yq/libs/control_plane_storage/ya.make b/ydb/core/yq/libs/control_plane_storage/ya.make index 8304e50db50..9a4aca788be 100644 --- a/ydb/core/yq/libs/control_plane_storage/ya.make +++ b/ydb/core/yq/libs/control_plane_storage/ya.make @@ -20,7 +20,7 @@ PEERDIR( library/cpp/protobuf/interop ydb/core/base ydb/core/mon - ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/actors/logging ydb/core/yq/libs/common ydb/core/yq/libs/config ydb/core/yq/libs/config/protos diff --git a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage.cpp b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage.cpp index 222ebb6e8e3..e9c865073d7 100644 --- a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage.cpp +++ b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage.cpp @@ -76,7 +76,7 @@ TYdbControlPlaneStorageActor::TConfig::TConfig(const NConfig::TControlPlaneStora */ TAsyncStatus TYdbControlPlaneStorageActor::CreateQueriesTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, QUERIES_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, QUERIES_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(SCOPE_COLUMN_NAME, EPrimitiveType::String) @@ -107,7 +107,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateQueriesTable(TActorSystem* as) }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create quries table error: " << status.GetIssues().ToString()); return CreateQueriesTable(as); } @@ -138,7 +138,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreatePendingSmallTable(TActorSystem* }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create pending table error: " << status.GetIssues().ToString()); return CreatePendingSmallTable(as); } @@ -148,7 +148,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreatePendingSmallTable(TActorSystem* TAsyncStatus TYdbControlPlaneStorageActor::CreateConnectionsTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, CONNECTIONS_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, CONNECTIONS_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(SCOPE_COLUMN_NAME, EPrimitiveType::String) @@ -169,7 +169,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateConnectionsTable(TActorSystem* }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create connections table error: " << status.GetIssues().ToString()); return CreateConnectionsTable(as); } @@ -182,7 +182,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateDirectory(TActorSystem* as) auto schemeClient = NYdb::NScheme::TSchemeClient(YdbConnection->Driver); return schemeClient.MakeDirectory(YdbConnection->TablePathPrefix).Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create directory error: " << status.GetIssues().ToString()); return CreateDirectory(as); } @@ -192,7 +192,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateDirectory(TActorSystem* as) TAsyncStatus TYdbControlPlaneStorageActor::CreateJobsTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, JOBS_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, JOBS_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(SCOPE_COLUMN_NAME, EPrimitiveType::String) @@ -212,7 +212,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateJobsTable(TActorSystem* as) }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create jobs table error: " << status.GetIssues().ToString()); return CreateJobsTable(as); } @@ -222,7 +222,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateJobsTable(TActorSystem* as) TAsyncStatus TYdbControlPlaneStorageActor::CreateNodesTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, NODES_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, NODES_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(TENANT_COLUMN_NAME, EPrimitiveType::String) @@ -245,7 +245,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateNodesTable(TActorSystem* as) }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create nodes table error: " << status.GetIssues().ToString()); return CreateNodesTable(as); } @@ -255,7 +255,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateNodesTable(TActorSystem* as) TAsyncStatus TYdbControlPlaneStorageActor::CreateBindingsTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, BINDINGS_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, BINDINGS_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(SCOPE_COLUMN_NAME, EPrimitiveType::String) @@ -276,7 +276,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateBindingsTable(TActorSystem* as) }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create bindings table error: " << status.GetIssues().ToString()); return CreateBindingsTable(as); } @@ -287,7 +287,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateBindingsTable(TActorSystem* as) TAsyncStatus TYdbControlPlaneStorageActor::CreateIdempotencyKeysTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, IDEMPOTENCY_KEYS_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, IDEMPOTENCY_KEYS_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(SCOPE_COLUMN_NAME, EPrimitiveType::String) @@ -304,7 +304,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateIdempotencyKeysTable(TActorSyst }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create idempotency keys table error: " << status.GetIssues().ToString()); return CreateIdempotencyKeysTable(as); } @@ -314,7 +314,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateIdempotencyKeysTable(TActorSyst TAsyncStatus TYdbControlPlaneStorageActor::CreateResultSetsTable(TActorSystem* as) { - auto tablePath = JoinPath(YdbConnection->TablePathPrefix, RESULT_SETS_TABLE_NAME); + auto tablePath = JoinPath(YdbConnection->TablePathPrefix, RESULT_SETS_TABLE_NAME); auto description = TTableBuilder() .AddNullableColumn(RESULT_ID_COLUMN_NAME, EPrimitiveType::String) @@ -332,7 +332,7 @@ TAsyncStatus TYdbControlPlaneStorageActor::CreateResultSetsTable(TActorSystem* a }) .Apply([=](const auto& future) { auto status = future.GetValue(); - if (!IsTableCreated(status)) { + if (!IsTableCreated(status)) { CPS_LOG_AS_E(*as, "create result sets table error: " << status.GetIssues().ToString()); return CreateResultSetsTable(as); } @@ -384,7 +384,7 @@ public: , Handler(handler) {} - static constexpr char ActorName[] = "YQ_CONTROL_PLANE_STORAGE_DB_REQUEST"; + static constexpr char ActorName[] = "YQ_CONTROL_PLANE_STORAGE_DB_REQUEST"; void Bootstrap() { CPS_LOG_T("DbRequest actor request. Actor id: " << SelfId()); @@ -679,4 +679,4 @@ NActors::IActor* CreateYdbControlPlaneStorageServiceActor( return new TYdbControlPlaneStorageActor(config, common, counters, yqSharedResources, credentialsProviderFactory); } -} // NYq +} // NYq diff --git a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp index ddb15b16c4f..7eb2072d065 100644 --- a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp +++ b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp @@ -637,4 +637,4 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvDeleteConne }); } -} // NYq +} // NYq diff --git a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_impl.h b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_impl.h index 1c8cda7ee72..e6ca823fd91 100644 --- a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_impl.h +++ b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_impl.h @@ -176,9 +176,9 @@ class TYdbControlPlaneStorageActor : public NActors::TActorBootstrapped<TYdbCont TConfig Config; - TYdbConnectionPtr YdbConnection; + TYdbConnectionPtr YdbConnection; - ::NYq::TYqSharedResources::TPtr YqSharedResources; + ::NYq::TYqSharedResources::TPtr YqSharedResources; TDbPool::TPtr DbPool; static constexpr int64_t InitialRevision = 1; @@ -195,7 +195,7 @@ public: : Counters(counters) , FinalStatusCounters(counters) , Config(config, common) - , YqSharedResources(yqSharedResources) + , YqSharedResources(yqSharedResources) , CredProviderFactory(credProviderFactory) { } @@ -392,46 +392,46 @@ public: const auto& request = ev->Get()->Request; NYql::TIssues issues = ValidateEvent(ev); - if (request.has_content()) { - const YandexQuery::BindingContent& content = request.content(); - if (content.acl().visibility() == YandexQuery::Acl::VISIBILITY_UNSPECIFIED) { - issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding.acl.visibility field is not specified")); - } + if (request.has_content()) { + const YandexQuery::BindingContent& content = request.content(); + if (content.acl().visibility() == YandexQuery::Acl::VISIBILITY_UNSPECIFIED) { + issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding.acl.visibility field is not specified")); + } if (content.name() != to_lower(content.name())) { issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "Incorrect binding name: " + content.name() + ". Please use only lower case")); } - if (!content.has_setting()) { - issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding.setting field is not specified")); - } + if (!content.has_setting()) { + issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding.setting field is not specified")); + } - const YandexQuery::BindingSetting& setting = content.setting(); + const YandexQuery::BindingSetting& setting = content.setting(); if (!Config.AvailableBindings.contains(setting.binding_case())) { issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding of the specified type is disabled")); } - switch (setting.binding_case()) { - case YandexQuery::BindingSetting::kDataStreams: { - const YandexQuery::DataStreamsBinding dataStreams = setting.data_streams(); - if (!dataStreams.has_schema()) { - issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "data streams with empty schema is forbidden")); - } - break; - } - case YandexQuery::BindingSetting::BINDING_NOT_SET: { - issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding is not set")); - break; - } - // Do not replace with default. Adding a new binding should cause a compilation error - case YandexQuery::BindingSetting::kObjectStorage: - break; - } - } else { - issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding field is not specified")); + switch (setting.binding_case()) { + case YandexQuery::BindingSetting::kDataStreams: { + const YandexQuery::DataStreamsBinding dataStreams = setting.data_streams(); + if (!dataStreams.has_schema()) { + issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "data streams with empty schema is forbidden")); + } + break; + } + case YandexQuery::BindingSetting::BINDING_NOT_SET: { + issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding is not set")); + break; + } + // Do not replace with default. Adding a new binding should cause a compilation error + case YandexQuery::BindingSetting::kObjectStorage: + break; + } + } else { + issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "binding field is not specified")); } - return issues; + return issues; } void Handle(NMon::TEvHttpInfo::TPtr& ev) { @@ -803,7 +803,7 @@ private: Send(sender, new T(issues), 0, cookie); requestCounters->InFly->Dec(); requestCounters->Error->Inc(); - requestCounters->LatencyMs->Collect(delta.MilliSeconds()); + requestCounters->LatencyMs->Collect(delta.MilliSeconds()); } static YandexQuery::CommonMeta CreateCommonMeta(const TString& id, const TString& user, const TInstant& startTime, int64_t revision) { diff --git a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp index 398725f7f46..bab48178294 100644 --- a/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp +++ b/ydb/core/yq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp @@ -72,9 +72,9 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvCreateQuery if (request.content().acl().visibility() == YandexQuery::Acl::SCOPE && !permissions.Check(TPermissions::MANAGE_PUBLIC)) { issues.AddIssue(MakeErrorIssue(TIssuesIds::ACCESS_DENIED, "Permission denied to create a query with these parameters. Please receive a permission yq.resources.managePublic")); } - if (request.disposition().has_from_last_checkpoint()) { - issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "Streaming disposition \"from_last_checkpoint\" is not allowed in CreateQuery request")); - } + if (request.disposition().has_from_last_checkpoint()) { + issues.AddIssue(MakeErrorIssue(TIssuesIds::BAD_REQUEST, "Streaming disposition \"from_last_checkpoint\" is not allowed in CreateQuery request")); + } if (issues) { CPS_LOG_D(MakeLogPrefix(scope, user, queryId) << "CreateQueryRequest, validation failed: " @@ -160,7 +160,7 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvCreateQuery } queryInternal.set_cloud_id(cloudId); queryInternal.set_state_load_mode(YandexQuery::StateLoadMode::EMPTY); - queryInternal.mutable_disposition()->CopyFrom(request.disposition()); + queryInternal.mutable_disposition()->CopyFrom(request.disposition()); if (request.execute_mode() != YandexQuery::SAVE) { // TODO: move to run actor priority selection @@ -655,8 +655,8 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvModifyQuery << NKikimr::MaskTicket(token) << " " << request.DebugString()); - if (request.content().type() == YandexQuery::QueryContent::STREAMING && request.state_load_mode() == YandexQuery::STATE_LOAD_MODE_UNSPECIFIED) { - request.set_state_load_mode(YandexQuery::EMPTY); + if (request.content().type() == YandexQuery::QueryContent::STREAMING && request.state_load_mode() == YandexQuery::STATE_LOAD_MODE_UNSPECIFIED) { + request.set_state_load_mode(YandexQuery::EMPTY); } NYql::TIssues issues = ValidateQuery(ev); @@ -667,9 +667,9 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvModifyQuery if (request.content().acl().visibility() == YandexQuery::Acl::SCOPE && !permissions.Check(TPermissions::MANAGE_PUBLIC)) { issues.AddIssue(MakeErrorIssue(TIssuesIds::ACCESS_DENIED, "Permission denied to create a query with these parameters. Please receive a permission yq.resources.managePublic")); } - if (request.state_load_mode() == YandexQuery::FROM_LAST_CHECKPOINT) { - issues.AddIssue(MakeErrorIssue(TIssuesIds::UNSUPPORTED, "State load mode \"FROM_LAST_CHECKPOINT\" is not supported")); - } + if (request.state_load_mode() == YandexQuery::FROM_LAST_CHECKPOINT) { + issues.AddIssue(MakeErrorIssue(TIssuesIds::UNSUPPORTED, "State load mode \"FROM_LAST_CHECKPOINT\" is not supported")); + } if (issues) { CPS_LOG_D(MakeLogPrefix(scope, user, queryId) << "ModifyQueryRequest, validation failed: " @@ -1672,7 +1672,7 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvDescribeJob YandexQuery::DescribeJobResult result; TResultSetParser parser(resultSets->front()); if (!parser.TryNextRow()) { - ythrow TControlPlaneStorageException(TIssuesIds::ACCESS_DENIED) << "Job does not exist or permission denied. Please check the job id or your access rights"; + ythrow TControlPlaneStorageException(TIssuesIds::ACCESS_DENIED) << "Job does not exist or permission denied. Please check the job id or your access rights"; } if (!result.mutable_job()->ParseFromString(*parser.ColumnParser(JOB_COLUMN_NAME).GetOptionalString())) { ythrow TControlPlaneStorageException(TIssuesIds::INTERNAL_ERROR) << "Error parsing proto message for job. Please contact internal support"; @@ -1682,7 +1682,7 @@ void TYdbControlPlaneStorageActor::Handle(TEvControlPlaneStorage::TEvDescribeJob bool hasViewAccces = HasViewAccess(permissions, visibility, result.job().meta().created_by(), user); if (!hasViewAccces) { - ythrow TControlPlaneStorageException(TIssuesIds::ACCESS_DENIED) << "Job does not exist or permission denied. Please check the job id or your access rights"; + ythrow TControlPlaneStorageException(TIssuesIds::ACCESS_DENIED) << "Job does not exist or permission denied. Please check the job id or your access rights"; } return result; }; diff --git a/ydb/core/yq/libs/events/event_ids.h b/ydb/core/yq/libs/events/event_ids.h index 56032d50eaa..9a6c0d5861d 100644 --- a/ydb/core/yq/libs/events/event_ids.h +++ b/ydb/core/yq/libs/events/event_ids.h @@ -41,7 +41,7 @@ struct TEventIds { EvQueryActionResult, EvForwardPingRequest, EvForwardPingResponse, - EvGraphParams, + EvGraphParams, // Special events EvEnd diff --git a/ydb/core/yq/libs/events/event_subspace.h b/ydb/core/yq/libs/events/event_subspace.h index 99814caaae0..b30ee20c73f 100644 --- a/ydb/core/yq/libs/events/event_subspace.h +++ b/ydb/core/yq/libs/events/event_subspace.h @@ -1,36 +1,36 @@ -#pragma once - -#include <library/cpp/actors/core/events.h> - -namespace NYq { - -constexpr ui32 ES_YQ = 4213; // Must be compatible with the values defined in ydb/core/base/events.h. - -// Declares YQ event subspace that can contain up to 512 events. -constexpr ui32 YqEventSubspaceBegin(ui32 subspace) { - return EventSpaceBegin(ES_YQ) + 512 * subspace; -} - -constexpr ui32 YqEventSubspaceEnd(ui32 subspace) { - return EventSpaceBegin(ES_YQ) + 512 * (subspace + 1); -} - -struct TYqEventSubspace { - enum : ui32 { - ControlPlane, - CheckpointCoordinator, - CheckpointStorage, - MetastorageProxy, - ConfigUpdater, +#pragma once + +#include <library/cpp/actors/core/events.h> + +namespace NYq { + +constexpr ui32 ES_YQ = 4213; // Must be compatible with the values defined in ydb/core/base/events.h. + +// Declares YQ event subspace that can contain up to 512 events. +constexpr ui32 YqEventSubspaceBegin(ui32 subspace) { + return EventSpaceBegin(ES_YQ) + 512 * subspace; +} + +constexpr ui32 YqEventSubspaceEnd(ui32 subspace) { + return EventSpaceBegin(ES_YQ) + 512 * (subspace + 1); +} + +struct TYqEventSubspace { + enum : ui32 { + ControlPlane, + CheckpointCoordinator, + CheckpointStorage, + MetastorageProxy, + ConfigUpdater, ControlPlaneStorage, ControlPlaneProxy, AuditService, TestConnection, - - SubspacesEnd, - }; - - static_assert(YqEventSubspaceBegin(SubspacesEnd) <= EventSpaceEnd(ES_YQ), "All YQ events must be in YQ event space"); -}; - -} // namespace NYq + + SubspacesEnd, + }; + + static_assert(YqEventSubspaceBegin(SubspacesEnd) <= EventSpaceEnd(ES_YQ), "All YQ events must be in YQ event space"); +}; + +} // namespace NYq diff --git a/ydb/core/yq/libs/events/events.h b/ydb/core/yq/libs/events/events.h index b3c70e5c5f4..2e1ee5a47b3 100644 --- a/ydb/core/yq/libs/events/events.h +++ b/ydb/core/yq/libs/events/events.h @@ -1,11 +1,11 @@ #pragma once #include "event_ids.h" - + #include <ydb/library/yql/core/facade/yql_facade.h> #include <ydb/library/yql/providers/dq/provider/yql_dq_gateway.h> #include <ydb/library/yql/public/issue/yql_issue.h> -#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> +#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> #include <ydb/public/api/protos/draft/yq_private.pb.h> #include <ydb/public/sdk/cpp/client/ydb_table/table.h> #include <ydb/public/lib/yq/scope.h> @@ -21,7 +21,7 @@ using NYdb::NYq::TScope; enum class DatabaseType { Ydb, ClickHouse, - DataStreams, + DataStreams, ObjectStorage }; @@ -176,22 +176,22 @@ struct TEvents { struct TEvDataStreamsReadRulesCreationResult : NActors::TEventLocal<TEvDataStreamsReadRulesCreationResult, TEventIds::EvDataStreamsReadRulesCreationResult> { explicit TEvDataStreamsReadRulesCreationResult(NYql::TIssues issues) - : Issues(std::move(issues)) - { - } - - NYql::TIssues Issues; - }; + : Issues(std::move(issues)) + { + } + + NYql::TIssues Issues; + }; struct TEvDataStreamsReadRulesDeletionResult : NActors::TEventLocal<TEvDataStreamsReadRulesDeletionResult, TEventIds::EvDataStreamsReadRulesDeletionResult> { - explicit TEvDataStreamsReadRulesDeletionResult(NYql::TIssues transientIssues) - : TransientIssues(std::move(transientIssues)) - { - } - - NYql::TIssues TransientIssues; - }; - + explicit TEvDataStreamsReadRulesDeletionResult(NYql::TIssues transientIssues) + : TransientIssues(std::move(transientIssues)) + { + } + + NYql::TIssues TransientIssues; + }; + struct TEvQueryActionResult : NActors::TEventLocal<TEvQueryActionResult, TEventIds::EvQueryActionResult> { explicit TEvQueryActionResult(YandexQuery::QueryAction action) : Action(action) @@ -204,35 +204,35 @@ struct TEvents { struct TEvForwardPingRequest : NActors::TEventLocal<TEvForwardPingRequest, TEventIds::EvForwardPingRequest> { explicit TEvForwardPingRequest(const Yq::Private::PingTaskRequest& request, bool final = false) : Request(request) - , Final(final) + , Final(final) { } Yq::Private::PingTaskRequest Request; - bool Final; // Is this the last ping request. + bool Final; // Is this the last ping request. }; - + struct TEvForwardPingResponse : NActors::TEventLocal<TEvForwardPingResponse, TEventIds::EvForwardPingResponse> { - TEvForwardPingResponse(bool success, YandexQuery::QueryAction action) - : Success(success) - , Action(action) - { } - - bool Success; - YandexQuery::QueryAction Action; - }; - - struct TEvGraphParams : public NActors::TEventLocal<TEvGraphParams, TEventIds::EvGraphParams> { - explicit TEvGraphParams(const NProto::TGraphParams& params) - : GraphParams(params) - { } - - explicit TEvGraphParams(NProto::TGraphParams&& params) - : GraphParams(std::move(params)) - { } - - NProto::TGraphParams GraphParams; - NThreading::TPromise<NYql::IDqGateway::TResult> Result; - }; + TEvForwardPingResponse(bool success, YandexQuery::QueryAction action) + : Success(success) + , Action(action) + { } + + bool Success; + YandexQuery::QueryAction Action; + }; + + struct TEvGraphParams : public NActors::TEventLocal<TEvGraphParams, TEventIds::EvGraphParams> { + explicit TEvGraphParams(const NProto::TGraphParams& params) + : GraphParams(params) + { } + + explicit TEvGraphParams(NProto::TGraphParams&& params) + : GraphParams(std::move(params)) + { } + + NProto::TGraphParams GraphParams; + NThreading::TPromise<NYql::IDqGateway::TResult> Result; + }; }; } // namespace NYq diff --git a/ydb/core/yq/libs/events/ya.make b/ydb/core/yq/libs/events/ya.make index c12bdd681f4..ce62ba0e1a5 100644 --- a/ydb/core/yq/libs/events/ya.make +++ b/ydb/core/yq/libs/events/ya.make @@ -1,22 +1,22 @@ OWNER(g:yq) - -LIBRARY() - -GENERATE_ENUM_SERIALIZATION(events.h) + +LIBRARY() + +GENERATE_ENUM_SERIALIZATION(events.h) GENERATE_ENUM_SERIALIZATION(event_ids.h) - -PEERDIR( - library/cpp/actors/core - ydb/core/yq/libs/graph_params/proto + +PEERDIR( + library/cpp/actors/core + ydb/core/yq/libs/graph_params/proto ydb/library/yql/core/facade ydb/library/yql/public/issue ydb/public/api/protos ydb/public/lib/yq ydb/public/sdk/cpp/client/ydb_table ydb/library/yql/providers/dq/provider -) - -YQL_LAST_ABI_VERSION() - -END() +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/core/yq/libs/gateway/empty_gateway.cpp b/ydb/core/yq/libs/gateway/empty_gateway.cpp index 01bfd9bfcba..cc90f491f65 100644 --- a/ydb/core/yq/libs/gateway/empty_gateway.cpp +++ b/ydb/core/yq/libs/gateway/empty_gateway.cpp @@ -1,9 +1,9 @@ #include "empty_gateway.h" -#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> -#include <ydb/core/yq/libs/events/events.h> +#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> +#include <ydb/core/yq/libs/events/events.h> #include <ydb/core/yq/libs/tasks_packer/tasks_packer.h> - + #include <library/cpp/actors/core/actor.h> namespace NYq { @@ -63,7 +63,7 @@ public: } params.SetSession(sessionId); - NActors::TActivationContext::Send(new NActors::IEventHandle(RunActorId, {}, new TEvents::TEvGraphParams(params))); + NActors::TActivationContext::Send(new NActors::IEventHandle(RunActorId, {}, new TEvents::TEvGraphParams(params))); auto result = NThreading::NewPromise<NYql::IDqGateway::TResult>(); NYql::IDqGateway::TResult gatewayResult; diff --git a/ydb/core/yq/libs/graph_params/proto/graph_params.proto b/ydb/core/yq/libs/graph_params/proto/graph_params.proto index 35291c3cf86..b73d153a1a2 100644 --- a/ydb/core/yq/libs/graph_params/proto/graph_params.proto +++ b/ydb/core/yq/libs/graph_params/proto/graph_params.proto @@ -1,22 +1,22 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - +syntax = "proto3"; +option cc_enable_arenas = true; + import "ydb/library/yql/providers/dq/api/protos/service.proto"; import "ydb/library/yql/dq/proto/dq_tasks.proto"; - -package NYq.NProto; - -message TGraphParams { - string GraphId = 1; - repeated NYql.NDqProto.TDqTask Tasks = 2; + +package NYq.NProto; + +message TGraphParams { + string GraphId = 1; + repeated NYql.NDqProto.TDqTask Tasks = 2; map<int64, bytes> StageProgram = 12; - uint64 SourceId = 3; - bytes ResultType = 4; - repeated string Columns = 5; - string Session = 6; - repeated Yql.DqsProto.TAttr Settings = 7; - map<string, string> SecureParams = 8; - string YqlText = 9; - string QueryAst = 10; - string QueryPlan = 11; -} + uint64 SourceId = 3; + bytes ResultType = 4; + repeated string Columns = 5; + string Session = 6; + repeated Yql.DqsProto.TAttr Settings = 7; + map<string, string> SecureParams = 8; + string YqlText = 9; + string QueryAst = 10; + string QueryPlan = 11; +} diff --git a/ydb/core/yq/libs/graph_params/proto/ya.make b/ydb/core/yq/libs/graph_params/proto/ya.make index 52f1d81f489..77db3aed0a3 100644 --- a/ydb/core/yq/libs/graph_params/proto/ya.make +++ b/ydb/core/yq/libs/graph_params/proto/ya.make @@ -1,18 +1,18 @@ -PROTO_LIBRARY() - -OWNER( - g:yq -) - -SRCS( - graph_params.proto -) - -PEERDIR( +PROTO_LIBRARY() + +OWNER( + g:yq +) + +SRCS( + graph_params.proto +) + +PEERDIR( ydb/library/yql/dq/proto ydb/library/yql/providers/dq/api/protos -) - -EXCLUDE_TAGS(GO_PROTO) - -END() +) + +EXCLUDE_TAGS(GO_PROTO) + +END() diff --git a/ydb/core/yq/libs/graph_params/ya.make b/ydb/core/yq/libs/graph_params/ya.make index 01e9284b51a..abf218d67b5 100644 --- a/ydb/core/yq/libs/graph_params/ya.make +++ b/ydb/core/yq/libs/graph_params/ya.make @@ -1,5 +1,5 @@ -OWNER(g:yq) - -RECURSE( - proto -) +OWNER(g:yq) + +RECURSE( + proto +) diff --git a/ydb/core/yq/libs/hmac/hmac.cpp b/ydb/core/yq/libs/hmac/hmac.cpp index bbd9ab6c667..3db091c5de0 100644 --- a/ydb/core/yq/libs/hmac/hmac.cpp +++ b/ydb/core/yq/libs/hmac/hmac.cpp @@ -4,7 +4,7 @@ #include <library/cpp/string_utils/base64/base64.h> #include <util/generic/yexception.h> -namespace NYq { +namespace NYq { TString HmacSha1(const TStringBuf data, const TStringBuf secret) { unsigned char md_value[EVP_MAX_MD_SIZE]; unsigned md_len = 0; diff --git a/ydb/core/yq/libs/hmac/hmac.h b/ydb/core/yq/libs/hmac/hmac.h index cc4b95b361d..92be2119076 100644 --- a/ydb/core/yq/libs/hmac/hmac.h +++ b/ydb/core/yq/libs/hmac/hmac.h @@ -3,7 +3,7 @@ #include <util/generic/string.h> #include <util/generic/strbuf.h> -namespace NYq { +namespace NYq { TString HmacSha1(const TStringBuf data, const TStringBuf secret); TString HmacSha1Base64(const TStringBuf data, const TStringBuf secret); } diff --git a/ydb/core/yq/libs/hmac/ut/hmac_ut.cpp b/ydb/core/yq/libs/hmac/ut/hmac_ut.cpp index cdc79edbcb4..5d70e50357a 100644 --- a/ydb/core/yq/libs/hmac/ut/hmac_ut.cpp +++ b/ydb/core/yq/libs/hmac/ut/hmac_ut.cpp @@ -3,7 +3,7 @@ #include <library/cpp/string_utils/base64/base64.h> #include <library/cpp/testing/unittest/registar.h> -using namespace NYq; +using namespace NYq; namespace { constexpr TStringBuf SECRET = "AAAA"; diff --git a/ydb/core/yq/libs/init/init.cpp b/ydb/core/yq/libs/init/init.cpp index d66ac202bfa..ff903ac12b9 100644 --- a/ydb/core/yq/libs/init/init.cpp +++ b/ydb/core/yq/libs/init/init.cpp @@ -7,11 +7,11 @@ #include <ydb/core/yq/libs/common/service_counters.h> #include <ydb/core/yq/libs/control_plane_proxy/control_plane_proxy.h> #include <ydb/core/yq/libs/shared_resources/shared_resources.h> -#include <ydb/core/yq/libs/checkpoint_storage/storage_service.h> +#include <ydb/core/yq/libs/checkpoint_storage/storage_service.h> #include <ydb/library/folder_service/folder_service.h> #include <library/cpp/actors/http/http_proxy.h> -#include <library/cpp/protobuf/json/json2proto.h> +#include <library/cpp/protobuf/json/json2proto.h> #include <library/cpp/protobuf/json/proto2json.h> #include <ydb/library/yql/dq/actors/compute/dq_checkpoints.h> @@ -34,7 +34,7 @@ #include <ydb/library/yql/providers/ydb/comp_nodes/yql_ydb_dq_transform.h> #include <ydb/library/yql/providers/ydb/actors/yql_ydb_source_factory.h> -#include <util/stream/file.h> +#include <util/stream/file.h> #include <util/system/hostname.h> namespace { @@ -91,8 +91,8 @@ std::tuple<TString, TString> GetLocalAddress(const TString* overrideHostname = n } -namespace NYq { - +namespace NYq { + using namespace NKikimr; void Init( @@ -101,7 +101,7 @@ void Init( const TActorRegistrator& actorRegistrator, const TAppData* appData, const TString& tenant, - ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, + ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, const IYqSharedResources::TPtr& iyqSharedResources, const std::function<IActor*(const NKikimrProto::NFolderService::TFolderServiceConfig& authConfig)>& folderServiceFactory, const std::function<IActor*(const NYq::NConfig::TAuditConfig& auditConfig)>& auditServiceFactory, @@ -109,8 +109,8 @@ void Init( const ui32& icPort ) { - Y_VERIFY(iyqSharedResources, "No YQ shared resources created"); - TYqSharedResources::TPtr yqSharedResources = TYqSharedResources::Cast(iyqSharedResources); + Y_VERIFY(iyqSharedResources, "No YQ shared resources created"); + TYqSharedResources::TPtr yqSharedResources = TYqSharedResources::Cast(iyqSharedResources); const auto clientCounters = appData->Counters->GetSubgroup("counters", "yq")->GetSubgroup("subsystem", "ClientMetrics"); if (protoConfig.GetControlPlaneStorage().GetEnabled()) { @@ -150,7 +150,7 @@ void Init( } if (protoConfig.GetCheckpointCoordinator().GetEnabled()) { - auto checkpointStorage = NYq::NewCheckpointStorageService(protoConfig.GetCheckpointCoordinator(), protoConfig.GetCommon(), credentialsProviderFactory); + auto checkpointStorage = NYq::NewCheckpointStorageService(protoConfig.GetCheckpointCoordinator(), protoConfig.GetCommon(), credentialsProviderFactory); actorRegistrator(NYql::NDq::MakeCheckpointStorageID(), checkpointStorage.release()); } @@ -159,7 +159,7 @@ void Init( NKikimr::NMiniKQL::TComputationNodeFactory dqCompFactory = NKikimr::NMiniKQL::GetCompositeWithBuiltinFactory({ NYql::GetCommonDqFactory(), - NYql::GetDqYdbFactory(yqSharedResources->YdbDriver), + NYql::GetDqYdbFactory(yqSharedResources->YdbDriver), NKikimr::NMiniKQL::GetYqlFactory() }); @@ -168,9 +168,9 @@ void Init( NYql::CreateYdbDqTaskTransformFactory() }); - auto sourceActorFactory = MakeIntrusive<NYql::NDq::TDqSourceFactory>(); - auto sinkActorFactory = MakeIntrusive<NYql::NDq::TDqSinkFactory>(); - + auto sourceActorFactory = MakeIntrusive<NYql::NDq::TDqSourceFactory>(); + auto sinkActorFactory = MakeIntrusive<NYql::NDq::TDqSinkFactory>(); + NYql::ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory; const auto httpGateway = NYql::IHTTPGateway::Make( &protoConfig.GetGateways().GetHttpGateway(), @@ -214,7 +214,7 @@ void Init( actorRegistrator(NYql::NDqs::MakeWorkerManagerActorID(nodeId), resman); } - ::NYq::NCommon::TServiceCounters serviceCounters(appData->Counters); + ::NYq::NCommon::TServiceCounters serviceCounters(appData->Counters); if (protoConfig.GetNodesManager().GetEnabled()) { const auto localAddr = GetLocalAddress(&HostName()); @@ -276,6 +276,6 @@ IYqSharedResources::TPtr CreateYqSharedResources( const NMonitoring::TDynamicCounterPtr& counters) { return CreateYqSharedResourcesImpl(config, credentialsProviderFactory, counters); -} - -} // NYq +} + +} // NYq diff --git a/ydb/core/yq/libs/init/init.h b/ydb/core/yq/libs/init/init.h index fa26651f185..72448e287f2 100644 --- a/ydb/core/yq/libs/init/init.h +++ b/ydb/core/yq/libs/init/init.h @@ -12,27 +12,27 @@ #include <ydb/core/yq/libs/config/protos/audit.pb.h> #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> - + #include <library/cpp/actors/core/actor.h> -#include <util/generic/ptr.h> - -namespace NYq { +#include <util/generic/ptr.h> +namespace NYq { + using TActorRegistrator = std::function<void(NActors::TActorId, NActors::IActor*)>; IYqSharedResources::TPtr CreateYqSharedResources( const NYq::NConfig::TConfig& config, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, const NMonitoring::TDynamicCounterPtr& counters); - + void Init( const NYq::NConfig::TConfig& config, ui32 nodeId, const TActorRegistrator& actorRegistrator, const NKikimr::TAppData* appData, const TString& tenant, - ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, + ::NPq::NConfigurationManager::IConnections::TPtr pqCmConnections, const IYqSharedResources::TPtr& yqSharedResources, const std::function<IActor*(const NKikimrProto::NFolderService::TFolderServiceConfig& authConfig)>& folderServiceFactory, const std::function<IActor*(const NYq::NConfig::TAuditConfig& auditConfig)>& auditServiceFactory, @@ -40,4 +40,4 @@ void Init( const ui32& icPort ); -} // NYq +} // NYq diff --git a/ydb/core/yq/libs/mock/yql_mock.cpp b/ydb/core/yq/libs/mock/yql_mock.cpp index 8fe694f35b3..e2d66996eb6 100644 --- a/ydb/core/yq/libs/mock/yql_mock.cpp +++ b/ydb/core/yq/libs/mock/yql_mock.cpp @@ -14,7 +14,7 @@ #include <library/cpp/protobuf/json/proto2json.h> #include <library/cpp/protobuf/json/json2proto.h> -namespace NYq { +namespace NYq { using namespace NActors; using namespace NThreading; @@ -72,10 +72,10 @@ private: TMockLocation Location; }; -void InitTest(NActors::TTestActorRuntime* runtime, int httpPort, int grpcPort, const IYqSharedResources::TPtr& yqSharedResources) +void InitTest(NActors::TTestActorRuntime* runtime, int httpPort, int grpcPort, const IYqSharedResources::TPtr& yqSharedResources) { - yqSharedResources->Init(runtime->GetAnyNodeActorSystem()); - + yqSharedResources->Init(runtime->GetAnyNodeActorSystem()); + auto httpProxyId = NYq::MakeYqlAnalyticsHttpProxyId(); TActorId mockActorId = runtime->Register(CreateYqlMockActor(grpcPort)); @@ -94,4 +94,4 @@ NActors::IActor* CreateYqlMockActor(int grpcPort) { return new TYqlMockActor(grpcPort); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/mock/yql_mock.h b/ydb/core/yq/libs/mock/yql_mock.h index da8656b0ff4..61265026b79 100644 --- a/ydb/core/yq/libs/mock/yql_mock.h +++ b/ydb/core/yq/libs/mock/yql_mock.h @@ -5,9 +5,9 @@ #include <library/cpp/actors/core/actorsystem.h> -namespace NYq { +namespace NYq { NActors::IActor* CreateYqlMockActor(int grpcPort); -void InitTest(NActors::TTestActorRuntime* runtime, int httpPort, int grpcPort, const IYqSharedResources::TPtr& yqSharedResources); +void InitTest(NActors::TTestActorRuntime* runtime, int httpPort, int grpcPort, const IYqSharedResources::TPtr& yqSharedResources); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/pretty_printers/graph_params_printer.cpp b/ydb/core/yq/libs/pretty_printers/graph_params_printer.cpp index dbdad960e8c..0c412fc642e 100644 --- a/ydb/core/yq/libs/pretty_printers/graph_params_printer.cpp +++ b/ydb/core/yq/libs/pretty_printers/graph_params_printer.cpp @@ -1,62 +1,62 @@ -#include "graph_params_printer.h" -#include "minikql_program_printer.h" - +#include "graph_params_printer.h" +#include "minikql_program_printer.h" + #include <ydb/library/protobuf_printer/hide_field_printer.h> #include <ydb/library/protobuf_printer/protobuf_printer.h> #include <ydb/library/protobuf_printer/stream_helper.h> - + #include <ydb/library/yql/providers/dq/api/protos/service.pb.h> - -#include <google/protobuf/message.h> -#include <google/protobuf/text_format.h> - -namespace NYq { -namespace { - -class TMinikqlProgramFieldValuePrinter : public ::google::protobuf::TextFormat::FastFieldValuePrinter { -public: - void PrintBytes(const TProtoStringType& value, ::google::protobuf::TextFormat::BaseTextGenerator* generator) const override { - const TString program = PrettyPrintMkqlProgram(value, /*generator->GetCurrentIndentationSize()*/ 4); // TODO: use GetCurrentIndentationSize() when we have it. - generator->PrintLiteral("\n"); - generator->Print(program.Data(), program.Size()); - } - - void PrintString(const TProtoStringType& value, ::google::protobuf::TextFormat::BaseTextGenerator* generator) const override { - PrintBytes(value, generator); - } -}; - -class TGraphParamsPrinter : public NKikimr::TCustomizableTextFormatPrinter { - void RegisterFieldPrinters(bool canonical) { - if (canonical) { - // Don't print values of these fields, but mention that they have values. - RegisterFieldValuePrinters<NProto::TGraphParams, NKikimr::THideFieldValuePrinter>("GraphId", "SourceId", "Session"); - RegisterFieldValuePrinters<NActorsProto::TActorId, NKikimr::THideFieldValuePrinter>("RawX1", "RawX2"); - RegisterFieldValuePrinters<Yql::DqsProto::TFile, NKikimr::THideFieldValuePrinter>("ObjectId"); - } - - RegisterFieldValuePrinters<NYql::NDqProto::TProgram, TMinikqlProgramFieldValuePrinter>("Raw"); - } - -public: - explicit TGraphParamsPrinter(bool canonical) { - SetExpandAny(true); - RegisterFieldPrinters(canonical); - } -}; - -} // namespace - -TString PrettyPrintGraphParams(const NProto::TGraphParams& sourceGraphParams, bool canonical) { - NProto::TGraphParams patchedGraphParams = sourceGraphParams; - for (auto& [secureKey, tokenValue] : *patchedGraphParams.MutableSecureParams()) { - tokenValue = "== token_value =="; - } - patchedGraphParams.ClearQueryPlan(); - patchedGraphParams.ClearQueryAst(); - patchedGraphParams.ClearYqlText(); - - return NKikimr::TProtobufPrinterOutputWrapper(patchedGraphParams, TGraphParamsPrinter(canonical)); -} - -} // namespace NYq + +#include <google/protobuf/message.h> +#include <google/protobuf/text_format.h> + +namespace NYq { +namespace { + +class TMinikqlProgramFieldValuePrinter : public ::google::protobuf::TextFormat::FastFieldValuePrinter { +public: + void PrintBytes(const TProtoStringType& value, ::google::protobuf::TextFormat::BaseTextGenerator* generator) const override { + const TString program = PrettyPrintMkqlProgram(value, /*generator->GetCurrentIndentationSize()*/ 4); // TODO: use GetCurrentIndentationSize() when we have it. + generator->PrintLiteral("\n"); + generator->Print(program.Data(), program.Size()); + } + + void PrintString(const TProtoStringType& value, ::google::protobuf::TextFormat::BaseTextGenerator* generator) const override { + PrintBytes(value, generator); + } +}; + +class TGraphParamsPrinter : public NKikimr::TCustomizableTextFormatPrinter { + void RegisterFieldPrinters(bool canonical) { + if (canonical) { + // Don't print values of these fields, but mention that they have values. + RegisterFieldValuePrinters<NProto::TGraphParams, NKikimr::THideFieldValuePrinter>("GraphId", "SourceId", "Session"); + RegisterFieldValuePrinters<NActorsProto::TActorId, NKikimr::THideFieldValuePrinter>("RawX1", "RawX2"); + RegisterFieldValuePrinters<Yql::DqsProto::TFile, NKikimr::THideFieldValuePrinter>("ObjectId"); + } + + RegisterFieldValuePrinters<NYql::NDqProto::TProgram, TMinikqlProgramFieldValuePrinter>("Raw"); + } + +public: + explicit TGraphParamsPrinter(bool canonical) { + SetExpandAny(true); + RegisterFieldPrinters(canonical); + } +}; + +} // namespace + +TString PrettyPrintGraphParams(const NProto::TGraphParams& sourceGraphParams, bool canonical) { + NProto::TGraphParams patchedGraphParams = sourceGraphParams; + for (auto& [secureKey, tokenValue] : *patchedGraphParams.MutableSecureParams()) { + tokenValue = "== token_value =="; + } + patchedGraphParams.ClearQueryPlan(); + patchedGraphParams.ClearQueryAst(); + patchedGraphParams.ClearYqlText(); + + return NKikimr::TProtobufPrinterOutputWrapper(patchedGraphParams, TGraphParamsPrinter(canonical)); +} + +} // namespace NYq diff --git a/ydb/core/yq/libs/pretty_printers/graph_params_printer.h b/ydb/core/yq/libs/pretty_printers/graph_params_printer.h index 8eb77db68bb..f0e833a0886 100644 --- a/ydb/core/yq/libs/pretty_printers/graph_params_printer.h +++ b/ydb/core/yq/libs/pretty_printers/graph_params_printer.h @@ -1,10 +1,10 @@ -#pragma once -#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> - -#include <util/generic/string.h> - -namespace NYq { - -TString PrettyPrintGraphParams(const NProto::TGraphParams& graphParams, bool canonical); - -} // namespace NYq +#pragma once +#include <ydb/core/yq/libs/graph_params/proto/graph_params.pb.h> + +#include <util/generic/string.h> + +namespace NYq { + +TString PrettyPrintGraphParams(const NProto::TGraphParams& graphParams, bool canonical); + +} // namespace NYq diff --git a/ydb/core/yq/libs/pretty_printers/minikql_program_printer.cpp b/ydb/core/yq/libs/pretty_printers/minikql_program_printer.cpp index 8c80588457c..0922ffaf3cf 100644 --- a/ydb/core/yq/libs/pretty_printers/minikql_program_printer.cpp +++ b/ydb/core/yq/libs/pretty_printers/minikql_program_printer.cpp @@ -1,249 +1,249 @@ -#include "minikql_program_printer.h" - +#include "minikql_program_printer.h" + #include <ydb/library/yql/minikql/mkql_node.h> #include <ydb/library/yql/minikql/mkql_node_printer.h> #include <ydb/library/yql/minikql/mkql_node_serialization.h> #include <ydb/library/yql/minikql/mkql_node_visitor.h> - -namespace NYq { - -namespace { - -using namespace NKikimr::NMiniKQL; - -struct TPrettyPrinter : public INodeVisitor { -public: - // Types - void Visit(TTypeType&) override { - Out << "TypeType"; - } - void Visit(TVoidType&) override { - Out << "VoidType"; - } - void Visit(TNullType&) override { - Out << "NullType"; - } - void Visit(TEmptyListType&) override { - Out << "EmptyListType"; - } - void Visit(TEmptyDictType&) override { - Out << "EmptyDictType"; - } - void Visit(TDataType&) override { - Out << "DataType"; - } - void Visit(TStructType&) override { - Out << "StructType"; - } - void Visit(TListType&) override { - Out << "ListType"; - } - void Visit(TOptionalType&) override { - Out << "OptionalType"; - } - void Visit(TDictType&) override { - Out << "DictType"; - } - void Visit(TCallableType&) override { - Out << "CallableType"; - } - void Visit(TAnyType&) override { - Out << "AnyType"; - } - void Visit(TTupleType&) override { - Out << "TupleType"; - } - void Visit(TResourceType&) override { - Out << "ResourceType"; - } - void Visit(TVariantType&) override { - Out << "VariantType"; - } - void Visit(TStreamType&) override { - Out << "StreamType"; - } - void Visit(TFlowType&) override { - Out << "FlowType"; - } - void Visit(TTaggedType&) override { - Out << "TaggedType"; - } + +namespace NYq { + +namespace { + +using namespace NKikimr::NMiniKQL; + +struct TPrettyPrinter : public INodeVisitor { +public: + // Types + void Visit(TTypeType&) override { + Out << "TypeType"; + } + void Visit(TVoidType&) override { + Out << "VoidType"; + } + void Visit(TNullType&) override { + Out << "NullType"; + } + void Visit(TEmptyListType&) override { + Out << "EmptyListType"; + } + void Visit(TEmptyDictType&) override { + Out << "EmptyDictType"; + } + void Visit(TDataType&) override { + Out << "DataType"; + } + void Visit(TStructType&) override { + Out << "StructType"; + } + void Visit(TListType&) override { + Out << "ListType"; + } + void Visit(TOptionalType&) override { + Out << "OptionalType"; + } + void Visit(TDictType&) override { + Out << "DictType"; + } + void Visit(TCallableType&) override { + Out << "CallableType"; + } + void Visit(TAnyType&) override { + Out << "AnyType"; + } + void Visit(TTupleType&) override { + Out << "TupleType"; + } + void Visit(TResourceType&) override { + Out << "ResourceType"; + } + void Visit(TVariantType&) override { + Out << "VariantType"; + } + void Visit(TStreamType&) override { + Out << "StreamType"; + } + void Visit(TFlowType&) override { + Out << "FlowType"; + } + void Visit(TTaggedType&) override { + Out << "TaggedType"; + } void Visit(TBlockType&) override { Out << "BlockType"; } - - // Values - void Visit(TVoid&) override { - Out << "void"; - } - - void Visit(NKikimr::NMiniKQL::TNull&) override { - Out << "null"; - } - - void Visit(TEmptyList&) override { - Out << "()"; - } - - void Visit(TEmptyDict&) override { - Out << "()"; - } - - void Visit(TDataLiteral& node) override { - if (node.GetType()->GetSchemeType() == 0) { - Out << "null"; - } else { - Out << '\'' << TString(node.AsValue().AsStringRef()).Quote(); - } - } - - void Visit(TStructLiteral& node) override { - Out << '('; - TIndentScope scope(*this); - TStructType* type = node.GetType(); - for (size_t i = 0; i < node.GetValuesCount(); ++i) { - if (i) { - NewLine(); - } - Out << "('" << type->GetMemberName(i) << ' '; - const TRuntimeNode& val = node.GetValue(i); - val.GetNode()->Accept(*this); - Out << ')'; - } - Out << ')'; - } - - void Visit(TListLiteral& node) override { - Out << '('; - TIndentScope scope(*this); - for (size_t i = 0; i < node.GetItemsCount(); ++i) { - if (i) { - Out << ' '; - } - const TRuntimeNode& val = node.GetItems()[i]; - val.GetNode()->Accept(*this); - } - Out << ')'; - } - - void Visit(TOptionalLiteral& node) override { - if (node.HasItem()) { - const TRuntimeNode& item = node.GetItem(); - item.GetNode()->Accept(*this); - } else { - Out << "null"; - } - } - - void Visit(TDictLiteral& node) override { - Out << '('; - TIndentScope scope(*this); - for (size_t i = 0; i < node.GetItemsCount(); ++i) { - NewLine(); - const std::pair<TRuntimeNode, TRuntimeNode>& item = node.GetItem(i); - item.first.GetNode()->Accept(*this); - Out << ':'; - item.second.GetNode()->Accept(*this); - } - Out << ')'; - } - - void Visit(TCallable& node) override { - TCallableType* type = node.GetType(); - Out << '(' << type->GetName(); - TIndentScope scope(*this); - for (size_t i = 0; i < node.GetInputsCount(); ++i) { - NewLine(); - const TRuntimeNode& input = node.GetInput(i); - input.GetNode()->Accept(*this); - } - Out << ')'; - } - - void Visit(TAny& node) override { - if (node.HasItem()) { - const TRuntimeNode& item = node.GetItem(); - item.GetNode()->Accept(*this); - } else { - Out << "null"; - } - } - - void Visit(TTupleLiteral& node) override { - Out << '('; - TIndentScope scope(*this); - for (size_t i = 0; i < node.GetValuesCount(); ++i) { - if (i) { - Out << ' '; - } - const TRuntimeNode& val = node.GetValue(i); - val.GetNode()->Accept(*this); - } - Out << ')'; - } - - void Visit(TVariantLiteral& node) override { - const TRuntimeNode& item = node.GetItem(); - item.GetNode()->Accept(*this); - } - - TString GetResult() { - return Out; - } - - struct TIndentScope { - TIndentScope(TPrettyPrinter& printer) - : Printer(printer) - { - ++Printer.CurrentIndent; - } - - ~TIndentScope() { - --Printer.CurrentIndent; - } - - TPrettyPrinter& Printer; - }; - - explicit TPrettyPrinter(size_t initialIndentChars) - : BaseIndent(initialIndentChars) - { - Indent(); - } - - void Indent() { - for (size_t i = 0, cnt = CurrentIndent * 2 + BaseIndent; i < cnt; ++i) { - Out << ' '; - } - } - - void NewLine() { - Out << '\n'; - Indent(); - } - - TStringBuilder Out; - size_t CurrentIndent = 0; - const size_t BaseIndent = 0; -}; - -} // namespace - -TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TNode* node, size_t initialIndentChars) { - TPrettyPrinter printer(initialIndentChars); - const_cast<NKikimr::NMiniKQL::TNode*>(node)->Accept(printer); - return printer.GetResult(); -} - -TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TRuntimeNode& node, size_t initialIndentChars) { - return PrettyPrintMkqlProgram(node.GetNode(), initialIndentChars); -} - -TString PrettyPrintMkqlProgram(const TString& rawProgram, size_t initialIndentChars) { - TScopedAlloc alloc; - NKikimr::NMiniKQL::TTypeEnvironment env(alloc); - NKikimr::NMiniKQL::TRuntimeNode node = DeserializeRuntimeNode(rawProgram, env); - return PrettyPrintMkqlProgram(node, initialIndentChars); -} - -} // namespace NYq + + // Values + void Visit(TVoid&) override { + Out << "void"; + } + + void Visit(NKikimr::NMiniKQL::TNull&) override { + Out << "null"; + } + + void Visit(TEmptyList&) override { + Out << "()"; + } + + void Visit(TEmptyDict&) override { + Out << "()"; + } + + void Visit(TDataLiteral& node) override { + if (node.GetType()->GetSchemeType() == 0) { + Out << "null"; + } else { + Out << '\'' << TString(node.AsValue().AsStringRef()).Quote(); + } + } + + void Visit(TStructLiteral& node) override { + Out << '('; + TIndentScope scope(*this); + TStructType* type = node.GetType(); + for (size_t i = 0; i < node.GetValuesCount(); ++i) { + if (i) { + NewLine(); + } + Out << "('" << type->GetMemberName(i) << ' '; + const TRuntimeNode& val = node.GetValue(i); + val.GetNode()->Accept(*this); + Out << ')'; + } + Out << ')'; + } + + void Visit(TListLiteral& node) override { + Out << '('; + TIndentScope scope(*this); + for (size_t i = 0; i < node.GetItemsCount(); ++i) { + if (i) { + Out << ' '; + } + const TRuntimeNode& val = node.GetItems()[i]; + val.GetNode()->Accept(*this); + } + Out << ')'; + } + + void Visit(TOptionalLiteral& node) override { + if (node.HasItem()) { + const TRuntimeNode& item = node.GetItem(); + item.GetNode()->Accept(*this); + } else { + Out << "null"; + } + } + + void Visit(TDictLiteral& node) override { + Out << '('; + TIndentScope scope(*this); + for (size_t i = 0; i < node.GetItemsCount(); ++i) { + NewLine(); + const std::pair<TRuntimeNode, TRuntimeNode>& item = node.GetItem(i); + item.first.GetNode()->Accept(*this); + Out << ':'; + item.second.GetNode()->Accept(*this); + } + Out << ')'; + } + + void Visit(TCallable& node) override { + TCallableType* type = node.GetType(); + Out << '(' << type->GetName(); + TIndentScope scope(*this); + for (size_t i = 0; i < node.GetInputsCount(); ++i) { + NewLine(); + const TRuntimeNode& input = node.GetInput(i); + input.GetNode()->Accept(*this); + } + Out << ')'; + } + + void Visit(TAny& node) override { + if (node.HasItem()) { + const TRuntimeNode& item = node.GetItem(); + item.GetNode()->Accept(*this); + } else { + Out << "null"; + } + } + + void Visit(TTupleLiteral& node) override { + Out << '('; + TIndentScope scope(*this); + for (size_t i = 0; i < node.GetValuesCount(); ++i) { + if (i) { + Out << ' '; + } + const TRuntimeNode& val = node.GetValue(i); + val.GetNode()->Accept(*this); + } + Out << ')'; + } + + void Visit(TVariantLiteral& node) override { + const TRuntimeNode& item = node.GetItem(); + item.GetNode()->Accept(*this); + } + + TString GetResult() { + return Out; + } + + struct TIndentScope { + TIndentScope(TPrettyPrinter& printer) + : Printer(printer) + { + ++Printer.CurrentIndent; + } + + ~TIndentScope() { + --Printer.CurrentIndent; + } + + TPrettyPrinter& Printer; + }; + + explicit TPrettyPrinter(size_t initialIndentChars) + : BaseIndent(initialIndentChars) + { + Indent(); + } + + void Indent() { + for (size_t i = 0, cnt = CurrentIndent * 2 + BaseIndent; i < cnt; ++i) { + Out << ' '; + } + } + + void NewLine() { + Out << '\n'; + Indent(); + } + + TStringBuilder Out; + size_t CurrentIndent = 0; + const size_t BaseIndent = 0; +}; + +} // namespace + +TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TNode* node, size_t initialIndentChars) { + TPrettyPrinter printer(initialIndentChars); + const_cast<NKikimr::NMiniKQL::TNode*>(node)->Accept(printer); + return printer.GetResult(); +} + +TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TRuntimeNode& node, size_t initialIndentChars) { + return PrettyPrintMkqlProgram(node.GetNode(), initialIndentChars); +} + +TString PrettyPrintMkqlProgram(const TString& rawProgram, size_t initialIndentChars) { + TScopedAlloc alloc; + NKikimr::NMiniKQL::TTypeEnvironment env(alloc); + NKikimr::NMiniKQL::TRuntimeNode node = DeserializeRuntimeNode(rawProgram, env); + return PrettyPrintMkqlProgram(node, initialIndentChars); +} + +} // namespace NYq diff --git a/ydb/core/yq/libs/pretty_printers/minikql_program_printer.h b/ydb/core/yq/libs/pretty_printers/minikql_program_printer.h index 5d438d49b61..5a4ad344dc3 100644 --- a/ydb/core/yq/libs/pretty_printers/minikql_program_printer.h +++ b/ydb/core/yq/libs/pretty_printers/minikql_program_printer.h @@ -1,17 +1,17 @@ -#pragma once -#include <util/generic/string.h> - -namespace NKikimr::NMiniKQL { - -class TNode; -struct TRuntimeNode; - -} // namespace NKikimr::NMiniKQL - -namespace NYq { - -TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TNode* node, size_t initialIndentChars = 0); -TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TRuntimeNode& node, size_t initialIndentChars = 0); -TString PrettyPrintMkqlProgram(const TString& rawProgram, size_t initialIndentChars = 0); - -} // namespace NYq +#pragma once +#include <util/generic/string.h> + +namespace NKikimr::NMiniKQL { + +class TNode; +struct TRuntimeNode; + +} // namespace NKikimr::NMiniKQL + +namespace NYq { + +TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TNode* node, size_t initialIndentChars = 0); +TString PrettyPrintMkqlProgram(const NKikimr::NMiniKQL::TRuntimeNode& node, size_t initialIndentChars = 0); +TString PrettyPrintMkqlProgram(const TString& rawProgram, size_t initialIndentChars = 0); + +} // namespace NYq diff --git a/ydb/core/yq/libs/pretty_printers/ya.make b/ydb/core/yq/libs/pretty_printers/ya.make index 540356df9df..0d9477cedf4 100644 --- a/ydb/core/yq/libs/pretty_printers/ya.make +++ b/ydb/core/yq/libs/pretty_printers/ya.make @@ -1,20 +1,20 @@ OWNER(g:yq) - -LIBRARY() - -SRCS( - graph_params_printer.cpp - minikql_program_printer.cpp -) - -PEERDIR( - contrib/libs/protobuf - ydb/core/yq/libs/graph_params/proto + +LIBRARY() + +SRCS( + graph_params_printer.cpp + minikql_program_printer.cpp +) + +PEERDIR( + contrib/libs/protobuf + ydb/core/yq/libs/graph_params/proto ydb/library/protobuf_printer ydb/library/yql/minikql ydb/library/yql/providers/dq/api/protos -) - -YQL_LAST_ABI_VERSION() - -END() +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/core/yq/libs/private_client/private_client.h b/ydb/core/yq/libs/private_client/private_client.h index e63b67eddbd..a8e4be12ce8 100644 --- a/ydb/core/yq/libs/private_client/private_client.h +++ b/ydb/core/yq/libs/private_client/private_client.h @@ -11,7 +11,7 @@ template<class TProtoResult> class TProtoResultInternalWrapper : public NYdb::TStatus { friend class TPrivateClient; -public: +public: TProtoResultInternalWrapper( NYdb::TStatus&& status, std::unique_ptr<TProtoResult> result) diff --git a/ydb/core/yq/libs/read_rule/read_rule_creator.cpp b/ydb/core/yq/libs/read_rule/read_rule_creator.cpp index 4ca1a5ca47c..80afa719ca2 100644 --- a/ydb/core/yq/libs/read_rule/read_rule_creator.cpp +++ b/ydb/core/yq/libs/read_rule/read_rule_creator.cpp @@ -1,310 +1,310 @@ -#include "read_rule_creator.h" - +#include "read_rule_creator.h" + #include <ydb/core/yq/libs/events/events.h> - + #include <ydb/core/protos/services.pb.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_public/persqueue.h> - + #include <ydb/library/yql/providers/dq/api/protos/service.pb.h> #include <ydb/library/yql/providers/pq/proto/dq_task_params.pb.h> - -#include <library/cpp/actors/core/actor_bootstrapped.h> -#include <library/cpp/actors/core/hfunc.h> -#include <library/cpp/actors/core/log.h> - -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) - -#define LOG_I(stream) \ - LOG_INFO_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) - -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) - - -namespace NYq { -namespace { - -using namespace NActors; - -struct TEvPrivate { - // Event ids. - enum EEv : ui32 { - EvBegin = EventSpaceBegin(NActors::TEvents::ES_PRIVATE), - - EvSingleReadRuleCreatorResult = EvBegin, - EvAddReadRuleStatus, - - EvEnd - }; - - static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)"); - - // Events. - - struct TEvSingleReadRuleCreatorResult : TEventLocal<TEvSingleReadRuleCreatorResult, EvSingleReadRuleCreatorResult> { - TEvSingleReadRuleCreatorResult() = default; - - explicit TEvSingleReadRuleCreatorResult(const NYql::TIssues& issues) - : Issues(issues) - { - } - - NYql::TIssues Issues; - }; - - struct TEvAddReadRuleStatus : TEventLocal<TEvAddReadRuleStatus, EvAddReadRuleStatus> { - TEvAddReadRuleStatus(NYdb::TStatus status) - : Status(std::move(status)) - { - } - - NYdb::TStatus Status; - }; -}; - -// Actor for creating read rule for one topic. -class TSingleReadRuleCreator : public TActorBootstrapped<TSingleReadRuleCreator> { -public: - TSingleReadRuleCreator( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, - NYql::NPq::NProto::TDqPqTopicSource topic, - std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider, - ui64 index - ) - : Owner(owner) - , QueryId(std::move(queryId)) - , Topic(std::move(topic)) - , YdbDriver(std::move(ydbDriver)) - , PqClient(YdbDriver, GetPqClientSettings(std::move(credentialsProvider))) - , Index(index) - { - } - - void Bootstrap() { - Become(&TSingleReadRuleCreator::StateFunc); - StartRequest(); - } - - static constexpr char ActorName[] = "YQ_SINGLE_READ_RULE_CREATOR"; - - TString GetTopicPath() const { - TStringBuilder ret; - ret << Topic.GetDatabase(); - if (ret && ret.back() != '/') { - ret << '/'; - } - ret << Topic.GetTopicPath(); - return std::move(ret); - } - - void StartRequest() { - Y_VERIFY(!RequestInFlight); - RequestInFlight = true; - LOG_D("Make request for read rule creation for topic `" << Topic.GetTopicPath() << "` [" << Index << "]"); - PqClient.AddReadRule( - GetTopicPath(), - NYdb::NPersQueue::TAddReadRuleSettings() - .ReadRule( - NYdb::NPersQueue::TReadRuleSettings() - .ConsumerName(Topic.GetConsumerName()) - .SupportedCodecs({ - NYdb::NPersQueue::ECodec::RAW, - NYdb::NPersQueue::ECodec::GZIP, - NYdb::NPersQueue::ECodec::LZOP, - NYdb::NPersQueue::ECodec::ZSTD - }) - ) - ).Subscribe( - [actorSystem = TActivationContext::ActorSystem(), selfId = SelfId()](const NYdb::TAsyncStatus& status) { - actorSystem->Send(selfId, new TEvPrivate::TEvAddReadRuleStatus(status.GetValue())); - } - ); - } - - void Handle(TEvPrivate::TEvAddReadRuleStatus::TPtr& ev) { - Y_VERIFY(RequestInFlight); - RequestInFlight = false; - const NYdb::TStatus& status = ev->Get()->Status; - if (status.IsSuccess() || status.GetStatus() == NYdb::EStatus::ALREADY_EXISTS) { - Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleCreatorResult>(), 0, Index); - PassAway(); - } else { - if (!RetryState) { - RetryState = NYdb::NPersQueue::IRetryPolicy::GetExponentialBackoffPolicy()->CreateRetryState(); - } - TMaybe<TDuration> nextRetryDelay = RetryState->GetNextRetryDelay(status); - if (status.GetStatus() == NYdb::EStatus::SCHEME_ERROR) { - nextRetryDelay = Nothing(); // Not retryable - } - - LOG_D("Failed to add read rule to `" << Topic.GetTopicPath() << "`: " << status.GetIssues().ToString() << ". Status: " << status.GetStatus() << ". Retry after: " << nextRetryDelay); - if (!nextRetryDelay) { // Not retryable - Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleCreatorResult>(status.GetIssues()), 0, Index); - PassAway(); - } else { - if (!CheckFinish()) { + +#include <library/cpp/actors/core/actor_bootstrapped.h> +#include <library/cpp/actors/core/hfunc.h> +#include <library/cpp/actors/core/log.h> + +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) + +#define LOG_I(stream) \ + LOG_INFO_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) + +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) + + +namespace NYq { +namespace { + +using namespace NActors; + +struct TEvPrivate { + // Event ids. + enum EEv : ui32 { + EvBegin = EventSpaceBegin(NActors::TEvents::ES_PRIVATE), + + EvSingleReadRuleCreatorResult = EvBegin, + EvAddReadRuleStatus, + + EvEnd + }; + + static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)"); + + // Events. + + struct TEvSingleReadRuleCreatorResult : TEventLocal<TEvSingleReadRuleCreatorResult, EvSingleReadRuleCreatorResult> { + TEvSingleReadRuleCreatorResult() = default; + + explicit TEvSingleReadRuleCreatorResult(const NYql::TIssues& issues) + : Issues(issues) + { + } + + NYql::TIssues Issues; + }; + + struct TEvAddReadRuleStatus : TEventLocal<TEvAddReadRuleStatus, EvAddReadRuleStatus> { + TEvAddReadRuleStatus(NYdb::TStatus status) + : Status(std::move(status)) + { + } + + NYdb::TStatus Status; + }; +}; + +// Actor for creating read rule for one topic. +class TSingleReadRuleCreator : public TActorBootstrapped<TSingleReadRuleCreator> { +public: + TSingleReadRuleCreator( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, + NYql::NPq::NProto::TDqPqTopicSource topic, + std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider, + ui64 index + ) + : Owner(owner) + , QueryId(std::move(queryId)) + , Topic(std::move(topic)) + , YdbDriver(std::move(ydbDriver)) + , PqClient(YdbDriver, GetPqClientSettings(std::move(credentialsProvider))) + , Index(index) + { + } + + void Bootstrap() { + Become(&TSingleReadRuleCreator::StateFunc); + StartRequest(); + } + + static constexpr char ActorName[] = "YQ_SINGLE_READ_RULE_CREATOR"; + + TString GetTopicPath() const { + TStringBuilder ret; + ret << Topic.GetDatabase(); + if (ret && ret.back() != '/') { + ret << '/'; + } + ret << Topic.GetTopicPath(); + return std::move(ret); + } + + void StartRequest() { + Y_VERIFY(!RequestInFlight); + RequestInFlight = true; + LOG_D("Make request for read rule creation for topic `" << Topic.GetTopicPath() << "` [" << Index << "]"); + PqClient.AddReadRule( + GetTopicPath(), + NYdb::NPersQueue::TAddReadRuleSettings() + .ReadRule( + NYdb::NPersQueue::TReadRuleSettings() + .ConsumerName(Topic.GetConsumerName()) + .SupportedCodecs({ + NYdb::NPersQueue::ECodec::RAW, + NYdb::NPersQueue::ECodec::GZIP, + NYdb::NPersQueue::ECodec::LZOP, + NYdb::NPersQueue::ECodec::ZSTD + }) + ) + ).Subscribe( + [actorSystem = TActivationContext::ActorSystem(), selfId = SelfId()](const NYdb::TAsyncStatus& status) { + actorSystem->Send(selfId, new TEvPrivate::TEvAddReadRuleStatus(status.GetValue())); + } + ); + } + + void Handle(TEvPrivate::TEvAddReadRuleStatus::TPtr& ev) { + Y_VERIFY(RequestInFlight); + RequestInFlight = false; + const NYdb::TStatus& status = ev->Get()->Status; + if (status.IsSuccess() || status.GetStatus() == NYdb::EStatus::ALREADY_EXISTS) { + Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleCreatorResult>(), 0, Index); + PassAway(); + } else { + if (!RetryState) { + RetryState = NYdb::NPersQueue::IRetryPolicy::GetExponentialBackoffPolicy()->CreateRetryState(); + } + TMaybe<TDuration> nextRetryDelay = RetryState->GetNextRetryDelay(status); + if (status.GetStatus() == NYdb::EStatus::SCHEME_ERROR) { + nextRetryDelay = Nothing(); // Not retryable + } + + LOG_D("Failed to add read rule to `" << Topic.GetTopicPath() << "`: " << status.GetIssues().ToString() << ". Status: " << status.GetStatus() << ". Retry after: " << nextRetryDelay); + if (!nextRetryDelay) { // Not retryable + Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleCreatorResult>(status.GetIssues()), 0, Index); + PassAway(); + } else { + if (!CheckFinish()) { Schedule(*nextRetryDelay, new NActors::TEvents::TEvWakeup()); - } - } - } - } - + } + } + } + } + void Handle(NActors::TEvents::TEvWakeup::TPtr&) { - if (!CheckFinish()) { - StartRequest(); - } - } - - void Handle(NActors::TEvents::TEvPoison::TPtr& ev) { - Y_VERIFY(ev->Sender == Owner); - Finishing = true; - CheckFinish(); - } - - bool CheckFinish() { - if (Finishing && !RequestInFlight) { - Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleCreatorResult>(), 0, Index); - PassAway(); - return true; - } - return false; - } - - STRICT_STFUNC(StateFunc, - hFunc(TEvPrivate::TEvAddReadRuleStatus, Handle); + if (!CheckFinish()) { + StartRequest(); + } + } + + void Handle(NActors::TEvents::TEvPoison::TPtr& ev) { + Y_VERIFY(ev->Sender == Owner); + Finishing = true; + CheckFinish(); + } + + bool CheckFinish() { + if (Finishing && !RequestInFlight) { + Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleCreatorResult>(), 0, Index); + PassAway(); + return true; + } + return false; + } + + STRICT_STFUNC(StateFunc, + hFunc(TEvPrivate::TEvAddReadRuleStatus, Handle); hFunc(NActors::TEvents::TEvWakeup, Handle); - hFunc(NActors::TEvents::TEvPoison, Handle); - ) - -private: - NYdb::NPersQueue::TPersQueueClientSettings GetPqClientSettings(std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider) { - return NYdb::NPersQueue::TPersQueueClientSettings() - .ClusterDiscoveryMode(NYdb::NPersQueue::EClusterDiscoveryMode::Off) - .Database(Topic.GetDatabase()) - .DiscoveryEndpoint(Topic.GetEndpoint()) - .CredentialsProviderFactory(std::move(credentialsProvider)) - .DiscoveryMode(NYdb::EDiscoveryMode::Async) - .EnableSsl(Topic.GetUseSsl()); - } - -private: - const NActors::TActorId Owner; - const TString QueryId; - const NYql::NPq::NProto::TDqPqTopicSource Topic; - NYdb::TDriver YdbDriver; - NYdb::NPersQueue::TPersQueueClient PqClient; - ui64 Index = 0; - NYdb::NPersQueue::IRetryState::TPtr RetryState; - bool RequestInFlight = false; - bool Finishing = false; -}; - -// Actor for creating read rules for all topics in the query. -class TReadRuleCreator : public TActorBootstrapped<TReadRuleCreator> { -public: - TReadRuleCreator( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, - TVector<NYql::NPq::NProto::TDqPqTopicSource> topics, - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials - ) - : Owner(owner) - , QueryId(std::move(queryId)) - , YdbDriver(std::move(ydbDriver)) - , Topics(std::move(topics)) - , Credentials(std::move(credentials)) - { - Y_VERIFY(!Topics.empty()); - Results.resize(Topics.size()); - } - - static constexpr char ActorName[] = "YQ_READ_RULE_CREATOR"; - - void Bootstrap() { - Become(&TReadRuleCreator::StateFunc); - - Children.reserve(Topics.size()); - Results.reserve(Topics.size()); - for (size_t i = 0; i < Topics.size(); ++i) { - LOG_D("Create read rule creation actor for `" << Topics[i].GetTopicPath() << "` [" << i << "]"); - Children.push_back(Register(new TSingleReadRuleCreator(SelfId(), QueryId, YdbDriver, Topics[i], Credentials[i], i))); - } - } - - void Handle(TEvPrivate::TEvSingleReadRuleCreatorResult::TPtr& ev) { - const ui64 index = ev->Cookie; - Y_VERIFY(!Results[index]); - if (ev->Get()->Issues) { - Ok = false; - } - Results[index] = std::move(ev); - ++ResultsGot; - SendResultsAndPassAwayIfDone(); - } - - void Handle(NActors::TEvents::TEvPoison::TPtr& ev) { - Y_VERIFY(ev->Sender == Owner); - for (const NActors::TActorId& child : Children) { - Send(child, new NActors::TEvents::TEvPoison()); - } - } - - void SendResultsAndPassAwayIfDone() { - Y_VERIFY(ResultsGot <= Topics.size()); - if (ResultsGot == Topics.size()) { - NYql::TIssues issues; - if (!Ok) { - NYql::TIssue mainIssue("Failed to create read rules for topics"); - for (auto& result : Results) { - for (const NYql::TIssue& issue : result->Get()->Issues) { - mainIssue.AddSubIssue(MakeIntrusive<NYql::TIssue>(issue)); - } - } - issues.AddIssue(std::move(mainIssue)); - } + hFunc(NActors::TEvents::TEvPoison, Handle); + ) + +private: + NYdb::NPersQueue::TPersQueueClientSettings GetPqClientSettings(std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider) { + return NYdb::NPersQueue::TPersQueueClientSettings() + .ClusterDiscoveryMode(NYdb::NPersQueue::EClusterDiscoveryMode::Off) + .Database(Topic.GetDatabase()) + .DiscoveryEndpoint(Topic.GetEndpoint()) + .CredentialsProviderFactory(std::move(credentialsProvider)) + .DiscoveryMode(NYdb::EDiscoveryMode::Async) + .EnableSsl(Topic.GetUseSsl()); + } + +private: + const NActors::TActorId Owner; + const TString QueryId; + const NYql::NPq::NProto::TDqPqTopicSource Topic; + NYdb::TDriver YdbDriver; + NYdb::NPersQueue::TPersQueueClient PqClient; + ui64 Index = 0; + NYdb::NPersQueue::IRetryState::TPtr RetryState; + bool RequestInFlight = false; + bool Finishing = false; +}; + +// Actor for creating read rules for all topics in the query. +class TReadRuleCreator : public TActorBootstrapped<TReadRuleCreator> { +public: + TReadRuleCreator( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, + TVector<NYql::NPq::NProto::TDqPqTopicSource> topics, + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials + ) + : Owner(owner) + , QueryId(std::move(queryId)) + , YdbDriver(std::move(ydbDriver)) + , Topics(std::move(topics)) + , Credentials(std::move(credentials)) + { + Y_VERIFY(!Topics.empty()); + Results.resize(Topics.size()); + } + + static constexpr char ActorName[] = "YQ_READ_RULE_CREATOR"; + + void Bootstrap() { + Become(&TReadRuleCreator::StateFunc); + + Children.reserve(Topics.size()); + Results.reserve(Topics.size()); + for (size_t i = 0; i < Topics.size(); ++i) { + LOG_D("Create read rule creation actor for `" << Topics[i].GetTopicPath() << "` [" << i << "]"); + Children.push_back(Register(new TSingleReadRuleCreator(SelfId(), QueryId, YdbDriver, Topics[i], Credentials[i], i))); + } + } + + void Handle(TEvPrivate::TEvSingleReadRuleCreatorResult::TPtr& ev) { + const ui64 index = ev->Cookie; + Y_VERIFY(!Results[index]); + if (ev->Get()->Issues) { + Ok = false; + } + Results[index] = std::move(ev); + ++ResultsGot; + SendResultsAndPassAwayIfDone(); + } + + void Handle(NActors::TEvents::TEvPoison::TPtr& ev) { + Y_VERIFY(ev->Sender == Owner); + for (const NActors::TActorId& child : Children) { + Send(child, new NActors::TEvents::TEvPoison()); + } + } + + void SendResultsAndPassAwayIfDone() { + Y_VERIFY(ResultsGot <= Topics.size()); + if (ResultsGot == Topics.size()) { + NYql::TIssues issues; + if (!Ok) { + NYql::TIssue mainIssue("Failed to create read rules for topics"); + for (auto& result : Results) { + for (const NYql::TIssue& issue : result->Get()->Issues) { + mainIssue.AddSubIssue(MakeIntrusive<NYql::TIssue>(issue)); + } + } + issues.AddIssue(std::move(mainIssue)); + } Send(Owner, MakeHolder<NYq::TEvents::TEvDataStreamsReadRulesCreationResult>(std::move(issues))); - PassAway(); - } - } - - STRICT_STFUNC(StateFunc, - hFunc(TEvPrivate::TEvSingleReadRuleCreatorResult, Handle); - hFunc(NActors::TEvents::TEvPoison, Handle); - ) - -private: - const NActors::TActorId Owner; - const TString QueryId; - NYdb::TDriver YdbDriver; - const TVector<NYql::NPq::NProto::TDqPqTopicSource> Topics; - const TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> Credentials; - size_t ResultsGot = 0; - bool Ok = true; - TVector<TEvPrivate::TEvSingleReadRuleCreatorResult::TPtr> Results; - TVector<NActors::TActorId> Children; -}; - -} // namespace - -NActors::IActor* MakeReadRuleCreatorActor( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, - TVector<NYql::NPq::NProto::TDqPqTopicSource> topics, - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials -) -{ - return new TReadRuleCreator( - owner, - std::move(queryId), - std::move(ydbDriver), - std::move(topics), - std::move(credentials) - ); -} - -} // namespace NYq + PassAway(); + } + } + + STRICT_STFUNC(StateFunc, + hFunc(TEvPrivate::TEvSingleReadRuleCreatorResult, Handle); + hFunc(NActors::TEvents::TEvPoison, Handle); + ) + +private: + const NActors::TActorId Owner; + const TString QueryId; + NYdb::TDriver YdbDriver; + const TVector<NYql::NPq::NProto::TDqPqTopicSource> Topics; + const TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> Credentials; + size_t ResultsGot = 0; + bool Ok = true; + TVector<TEvPrivate::TEvSingleReadRuleCreatorResult::TPtr> Results; + TVector<NActors::TActorId> Children; +}; + +} // namespace + +NActors::IActor* MakeReadRuleCreatorActor( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, + TVector<NYql::NPq::NProto::TDqPqTopicSource> topics, + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials +) +{ + return new TReadRuleCreator( + owner, + std::move(queryId), + std::move(ydbDriver), + std::move(topics), + std::move(credentials) + ); +} + +} // namespace NYq diff --git a/ydb/core/yq/libs/read_rule/read_rule_creator.h b/ydb/core/yq/libs/read_rule/read_rule_creator.h index 66f0f7c2c32..a1b7ce7c2a6 100644 --- a/ydb/core/yq/libs/read_rule/read_rule_creator.h +++ b/ydb/core/yq/libs/read_rule/read_rule_creator.h @@ -1,22 +1,22 @@ -#pragma once +#pragma once #include <ydb/library/yql/providers/pq/proto/dq_io.pb.h> - + #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> - -#include <library/cpp/actors/core/actor.h> - -#include <util/generic/maybe.h> - -#include <google/protobuf/any.pb.h> - -namespace NYq { - -NActors::IActor* MakeReadRuleCreatorActor( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, - TVector<NYql::NPq::NProto::TDqPqTopicSource> topics, - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials // For each topic -); - -} // namespace NYq + +#include <library/cpp/actors/core/actor.h> + +#include <util/generic/maybe.h> + +#include <google/protobuf/any.pb.h> + +namespace NYq { + +NActors::IActor* MakeReadRuleCreatorActor( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, + TVector<NYql::NPq::NProto::TDqPqTopicSource> topics, + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials // For each topic +); + +} // namespace NYq diff --git a/ydb/core/yq/libs/read_rule/read_rule_deleter.cpp b/ydb/core/yq/libs/read_rule/read_rule_deleter.cpp index b5c2e4d5496..eaadd65cc14 100644 --- a/ydb/core/yq/libs/read_rule/read_rule_deleter.cpp +++ b/ydb/core/yq/libs/read_rule/read_rule_deleter.cpp @@ -1,289 +1,289 @@ -#include "read_rule_deleter.h" - +#include "read_rule_deleter.h" + #include <ydb/core/yq/libs/events/events.h> - + #include <ydb/core/protos/services.pb.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_public/persqueue.h> - -#include <library/cpp/actors/core/actor_bootstrapped.h> -#include <library/cpp/actors/core/hfunc.h> -#include <library/cpp/actors/core/log.h> - -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) - -#define LOG_I(stream) \ - LOG_INFO_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) - -#define LOG_D(stream) \ - LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) - -namespace NYq { -namespace { - -using namespace NActors; - -struct TEvPrivate { - // Event ids. - enum EEv : ui32 { - EvBegin = EventSpaceBegin(NActors::TEvents::ES_PRIVATE), - - EvSingleReadRuleDeleterResult = EvBegin, - EvRemoveReadRuleStatus, - - EvEnd - }; - - static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)"); - - // Events. - - struct TEvSingleReadRuleDeleterResult : TEventLocal<TEvSingleReadRuleDeleterResult, EvSingleReadRuleDeleterResult> { - TEvSingleReadRuleDeleterResult() = default; - - explicit TEvSingleReadRuleDeleterResult(const NYql::TIssues& issues) - : Issues(issues) - { - } - - NYql::TIssues Issues; - }; - - struct TEvRemoveReadRuleStatus : TEventLocal<TEvRemoveReadRuleStatus, EvRemoveReadRuleStatus> { - TEvRemoveReadRuleStatus(NYdb::TStatus status) - : Status(std::move(status)) - { - } - - NYdb::TStatus Status; - }; -}; - -// Actor for deletion of read rule for one topic. -class TSingleReadRuleDeleter : public TActorBootstrapped<TSingleReadRuleDeleter> { -public: - TSingleReadRuleDeleter( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, + +#include <library/cpp/actors/core/actor_bootstrapped.h> +#include <library/cpp/actors/core/hfunc.h> +#include <library/cpp/actors/core/log.h> + +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) + +#define LOG_I(stream) \ + LOG_INFO_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) + +#define LOG_D(stream) \ + LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::STREAMS, QueryId << ": " << stream) + +namespace NYq { +namespace { + +using namespace NActors; + +struct TEvPrivate { + // Event ids. + enum EEv : ui32 { + EvBegin = EventSpaceBegin(NActors::TEvents::ES_PRIVATE), + + EvSingleReadRuleDeleterResult = EvBegin, + EvRemoveReadRuleStatus, + + EvEnd + }; + + static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)"); + + // Events. + + struct TEvSingleReadRuleDeleterResult : TEventLocal<TEvSingleReadRuleDeleterResult, EvSingleReadRuleDeleterResult> { + TEvSingleReadRuleDeleterResult() = default; + + explicit TEvSingleReadRuleDeleterResult(const NYql::TIssues& issues) + : Issues(issues) + { + } + + NYql::TIssues Issues; + }; + + struct TEvRemoveReadRuleStatus : TEventLocal<TEvRemoveReadRuleStatus, EvRemoveReadRuleStatus> { + TEvRemoveReadRuleStatus(NYdb::TStatus status) + : Status(std::move(status)) + { + } + + NYdb::TStatus Status; + }; +}; + +// Actor for deletion of read rule for one topic. +class TSingleReadRuleDeleter : public TActorBootstrapped<TSingleReadRuleDeleter> { +public: + TSingleReadRuleDeleter( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, Yq::Private::TopicConsumer topic, - std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider, - ui64 index, - size_t maxRetries - ) - : Owner(owner) - , QueryId(std::move(queryId)) - , Topic(std::move(topic)) - , YdbDriver(std::move(ydbDriver)) - , PqClient(YdbDriver, GetPqClientSettings(std::move(credentialsProvider))) - , Index(index) - , MaxRetries(maxRetries) - { - } - - static constexpr char ActorName[] = "YQ_SINGLE_READ_RULE_DELETER"; - - void Bootstrap() { - Become(&TSingleReadRuleDeleter::StateFunc); - StartRequest(); - } - - TString GetTopicPath() const { - TStringBuilder ret; - ret << Topic.database(); - if (ret && ret.back() != '/') { - ret << '/'; - } - ret << Topic.topic_path(); - return std::move(ret); - } - - void StartRequest() { - LOG_D("Make request for read rule deletion for topic `" << Topic.topic_path() << "` [" << Index << "]"); - PqClient.RemoveReadRule( - GetTopicPath(), - NYdb::NPersQueue::TRemoveReadRuleSettings() - .ConsumerName(Topic.consumer_name()) - ).Subscribe( - [actorSystem = TActivationContext::ActorSystem(), selfId = SelfId()](const NYdb::TAsyncStatus& status) { - actorSystem->Send(selfId, new TEvPrivate::TEvRemoveReadRuleStatus(status.GetValue())); - } - ); - } - - void Handle(TEvPrivate::TEvRemoveReadRuleStatus::TPtr& ev) { - const NYdb::TStatus& status = ev->Get()->Status; - if (status.IsSuccess() || status.GetStatus() == NYdb::EStatus::NOT_FOUND) { - Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleDeleterResult>(), 0, Index); - PassAway(); - } else { - if (!RetryState) { - // Choose default retry policy arguments from persqueue.h except maxRetries - RetryState = - NYdb::NPersQueue::IRetryPolicy::GetExponentialBackoffPolicy( - TDuration::MilliSeconds(10), // minDelay - TDuration::MilliSeconds(200), // minLongRetryDelay - TDuration::Seconds(30), // maxDelay - MaxRetries, - TDuration::Max(), // maxTime - 2.0 // scaleFactor - )->CreateRetryState(); - } - TMaybe<TDuration> nextRetryDelay = RetryState->GetNextRetryDelay(status); - if (status.GetStatus() == NYdb::EStatus::SCHEME_ERROR) { - nextRetryDelay = Nothing(); // No topic => OK. Leave just transient issues. - } - - LOG_D("Failed to remove read rule from `" << Topic.topic_path() << "`: " << status.GetIssues().ToString() << ". Status: " << status.GetStatus() << ". Retry after: " << nextRetryDelay); - if (!nextRetryDelay) { // Not retryable - Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleDeleterResult>(status.GetIssues()), 0, Index); - PassAway(); - } else { + std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider, + ui64 index, + size_t maxRetries + ) + : Owner(owner) + , QueryId(std::move(queryId)) + , Topic(std::move(topic)) + , YdbDriver(std::move(ydbDriver)) + , PqClient(YdbDriver, GetPqClientSettings(std::move(credentialsProvider))) + , Index(index) + , MaxRetries(maxRetries) + { + } + + static constexpr char ActorName[] = "YQ_SINGLE_READ_RULE_DELETER"; + + void Bootstrap() { + Become(&TSingleReadRuleDeleter::StateFunc); + StartRequest(); + } + + TString GetTopicPath() const { + TStringBuilder ret; + ret << Topic.database(); + if (ret && ret.back() != '/') { + ret << '/'; + } + ret << Topic.topic_path(); + return std::move(ret); + } + + void StartRequest() { + LOG_D("Make request for read rule deletion for topic `" << Topic.topic_path() << "` [" << Index << "]"); + PqClient.RemoveReadRule( + GetTopicPath(), + NYdb::NPersQueue::TRemoveReadRuleSettings() + .ConsumerName(Topic.consumer_name()) + ).Subscribe( + [actorSystem = TActivationContext::ActorSystem(), selfId = SelfId()](const NYdb::TAsyncStatus& status) { + actorSystem->Send(selfId, new TEvPrivate::TEvRemoveReadRuleStatus(status.GetValue())); + } + ); + } + + void Handle(TEvPrivate::TEvRemoveReadRuleStatus::TPtr& ev) { + const NYdb::TStatus& status = ev->Get()->Status; + if (status.IsSuccess() || status.GetStatus() == NYdb::EStatus::NOT_FOUND) { + Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleDeleterResult>(), 0, Index); + PassAway(); + } else { + if (!RetryState) { + // Choose default retry policy arguments from persqueue.h except maxRetries + RetryState = + NYdb::NPersQueue::IRetryPolicy::GetExponentialBackoffPolicy( + TDuration::MilliSeconds(10), // minDelay + TDuration::MilliSeconds(200), // minLongRetryDelay + TDuration::Seconds(30), // maxDelay + MaxRetries, + TDuration::Max(), // maxTime + 2.0 // scaleFactor + )->CreateRetryState(); + } + TMaybe<TDuration> nextRetryDelay = RetryState->GetNextRetryDelay(status); + if (status.GetStatus() == NYdb::EStatus::SCHEME_ERROR) { + nextRetryDelay = Nothing(); // No topic => OK. Leave just transient issues. + } + + LOG_D("Failed to remove read rule from `" << Topic.topic_path() << "`: " << status.GetIssues().ToString() << ". Status: " << status.GetStatus() << ". Retry after: " << nextRetryDelay); + if (!nextRetryDelay) { // Not retryable + Send(Owner, MakeHolder<TEvPrivate::TEvSingleReadRuleDeleterResult>(status.GetIssues()), 0, Index); + PassAway(); + } else { Schedule(*nextRetryDelay, new NActors::TEvents::TEvWakeup()); - } - } - } - + } + } + } + void Handle(NActors::TEvents::TEvWakeup::TPtr&) { - StartRequest(); - } - - STRICT_STFUNC(StateFunc, - hFunc(TEvPrivate::TEvRemoveReadRuleStatus, Handle); + StartRequest(); + } + + STRICT_STFUNC(StateFunc, + hFunc(TEvPrivate::TEvRemoveReadRuleStatus, Handle); hFunc(NActors::TEvents::TEvWakeup, Handle); - cFunc(NActors::TEvents::TEvPoison::EventType, PassAway); - ) - -private: - NYdb::NPersQueue::TPersQueueClientSettings GetPqClientSettings(std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider) { - return NYdb::NPersQueue::TPersQueueClientSettings() - .ClusterDiscoveryMode(NYdb::NPersQueue::EClusterDiscoveryMode::Off) - .Database(Topic.database()) - .DiscoveryEndpoint(Topic.cluster_endpoint()) - .CredentialsProviderFactory(std::move(credentialsProvider)) - .DiscoveryMode(NYdb::EDiscoveryMode::Async) - .EnableSsl(Topic.use_ssl()); - } - -private: - const NActors::TActorId Owner; - const TString QueryId; + cFunc(NActors::TEvents::TEvPoison::EventType, PassAway); + ) + +private: + NYdb::NPersQueue::TPersQueueClientSettings GetPqClientSettings(std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProvider) { + return NYdb::NPersQueue::TPersQueueClientSettings() + .ClusterDiscoveryMode(NYdb::NPersQueue::EClusterDiscoveryMode::Off) + .Database(Topic.database()) + .DiscoveryEndpoint(Topic.cluster_endpoint()) + .CredentialsProviderFactory(std::move(credentialsProvider)) + .DiscoveryMode(NYdb::EDiscoveryMode::Async) + .EnableSsl(Topic.use_ssl()); + } + +private: + const NActors::TActorId Owner; + const TString QueryId; const Yq::Private::TopicConsumer Topic; - NYdb::TDriver YdbDriver; - NYdb::NPersQueue::TPersQueueClient PqClient; - ui64 Index = 0; - const size_t MaxRetries; - NYdb::NPersQueue::IRetryState::TPtr RetryState; -}; - -// Actor for deletion of read rules for all topics in the query. -class TReadRuleDeleter : public TActorBootstrapped<TReadRuleDeleter> { -public: - TReadRuleDeleter( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, + NYdb::TDriver YdbDriver; + NYdb::NPersQueue::TPersQueueClient PqClient; + ui64 Index = 0; + const size_t MaxRetries; + NYdb::NPersQueue::IRetryState::TPtr RetryState; +}; + +// Actor for deletion of read rules for all topics in the query. +class TReadRuleDeleter : public TActorBootstrapped<TReadRuleDeleter> { +public: + TReadRuleDeleter( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, TVector<Yq::Private::TopicConsumer> topics, - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials, - size_t maxRetries - ) - : Owner(owner) - , QueryId(std::move(queryId)) - , YdbDriver(std::move(ydbDriver)) - , Topics(std::move(topics)) - , Credentials(std::move(credentials)) - , MaxRetries(maxRetries) - { - Y_VERIFY(!Topics.empty()); - Results.resize(Topics.size()); - } - - void Bootstrap() { - Become(&TReadRuleDeleter::StateFunc); - - Children.reserve(Topics.size()); - Results.reserve(Topics.size()); - for (size_t i = 0; i < Topics.size(); ++i) { - LOG_D("Create read rule deleter actor for `" << Topics[i].topic_path() << "` [" << i << "]"); - Children.push_back(Register(new TSingleReadRuleDeleter(SelfId(), QueryId, YdbDriver, Topics[i], Credentials[i], i, MaxRetries))); - } - } - - static constexpr char ActorName[] = "YQ_READ_RULE_DELETER"; - - void Handle(TEvPrivate::TEvSingleReadRuleDeleterResult::TPtr& ev) { - const ui64 index = ev->Cookie; - Y_VERIFY(!Results[index]); - if (ev->Get()->Issues) { - Ok = false; - } - Results[index] = std::move(ev); - ++ResultsGot; - SendResultsAndPassAwayIfDone(); - } - - void PassAway() override { - for (const NActors::TActorId& child : Children) { - Send(child, new NActors::TEvents::TEvPoison()); - } - TActorBootstrapped<TReadRuleDeleter>::PassAway(); - } - - void SendResultsAndPassAwayIfDone() { - Y_VERIFY(ResultsGot <= Topics.size()); - if (ResultsGot == Topics.size()) { - NYql::TIssues issues; - if (!Ok) { - NYql::TIssue mainIssue("Failed to delete read rules for topics"); - for (auto& result : Results) { - for (const NYql::TIssue& issue : result->Get()->Issues) { - mainIssue.AddSubIssue(MakeIntrusive<NYql::TIssue>(issue)); - } - } - issues.AddIssue(std::move(mainIssue)); - } + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials, + size_t maxRetries + ) + : Owner(owner) + , QueryId(std::move(queryId)) + , YdbDriver(std::move(ydbDriver)) + , Topics(std::move(topics)) + , Credentials(std::move(credentials)) + , MaxRetries(maxRetries) + { + Y_VERIFY(!Topics.empty()); + Results.resize(Topics.size()); + } + + void Bootstrap() { + Become(&TReadRuleDeleter::StateFunc); + + Children.reserve(Topics.size()); + Results.reserve(Topics.size()); + for (size_t i = 0; i < Topics.size(); ++i) { + LOG_D("Create read rule deleter actor for `" << Topics[i].topic_path() << "` [" << i << "]"); + Children.push_back(Register(new TSingleReadRuleDeleter(SelfId(), QueryId, YdbDriver, Topics[i], Credentials[i], i, MaxRetries))); + } + } + + static constexpr char ActorName[] = "YQ_READ_RULE_DELETER"; + + void Handle(TEvPrivate::TEvSingleReadRuleDeleterResult::TPtr& ev) { + const ui64 index = ev->Cookie; + Y_VERIFY(!Results[index]); + if (ev->Get()->Issues) { + Ok = false; + } + Results[index] = std::move(ev); + ++ResultsGot; + SendResultsAndPassAwayIfDone(); + } + + void PassAway() override { + for (const NActors::TActorId& child : Children) { + Send(child, new NActors::TEvents::TEvPoison()); + } + TActorBootstrapped<TReadRuleDeleter>::PassAway(); + } + + void SendResultsAndPassAwayIfDone() { + Y_VERIFY(ResultsGot <= Topics.size()); + if (ResultsGot == Topics.size()) { + NYql::TIssues issues; + if (!Ok) { + NYql::TIssue mainIssue("Failed to delete read rules for topics"); + for (auto& result : Results) { + for (const NYql::TIssue& issue : result->Get()->Issues) { + mainIssue.AddSubIssue(MakeIntrusive<NYql::TIssue>(issue)); + } + } + issues.AddIssue(std::move(mainIssue)); + } Send(Owner, MakeHolder<TEvents::TEvDataStreamsReadRulesDeletionResult>(std::move(issues))); - PassAway(); - } - } - - STRICT_STFUNC(StateFunc, - hFunc(TEvPrivate::TEvSingleReadRuleDeleterResult, Handle); - cFunc(NActors::TEvents::TEvPoison::EventType, PassAway); - ) - -private: - const NActors::TActorId Owner; - const TString QueryId; - NYdb::TDriver YdbDriver; + PassAway(); + } + } + + STRICT_STFUNC(StateFunc, + hFunc(TEvPrivate::TEvSingleReadRuleDeleterResult, Handle); + cFunc(NActors::TEvents::TEvPoison::EventType, PassAway); + ) + +private: + const NActors::TActorId Owner; + const TString QueryId; + NYdb::TDriver YdbDriver; const TVector<Yq::Private::TopicConsumer> Topics; - const TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> Credentials; - const size_t MaxRetries; - size_t ResultsGot = 0; - bool Ok = true; - TVector<TEvPrivate::TEvSingleReadRuleDeleterResult::TPtr> Results; - TVector<NActors::TActorId> Children; -}; - -} // namespace - -NActors::IActor* MakeReadRuleDeleterActor( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, + const TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> Credentials; + const size_t MaxRetries; + size_t ResultsGot = 0; + bool Ok = true; + TVector<TEvPrivate::TEvSingleReadRuleDeleterResult::TPtr> Results; + TVector<NActors::TActorId> Children; +}; + +} // namespace + +NActors::IActor* MakeReadRuleDeleterActor( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, TVector<Yq::Private::TopicConsumer> topics, - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials, // For each topic - size_t maxRetries -) -{ - return new TReadRuleDeleter( - owner, - std::move(queryId), - std::move(ydbDriver), - std::move(topics), - std::move(credentials), - maxRetries - ); -} - -} // namespace NYq + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials, // For each topic + size_t maxRetries +) +{ + return new TReadRuleDeleter( + owner, + std::move(queryId), + std::move(ydbDriver), + std::move(topics), + std::move(credentials), + maxRetries + ); +} + +} // namespace NYq diff --git a/ydb/core/yq/libs/read_rule/read_rule_deleter.h b/ydb/core/yq/libs/read_rule/read_rule_deleter.h index b3a92745a04..b42dddf1cd6 100644 --- a/ydb/core/yq/libs/read_rule/read_rule_deleter.h +++ b/ydb/core/yq/libs/read_rule/read_rule_deleter.h @@ -1,19 +1,19 @@ -#pragma once +#pragma once #include <ydb/public/api/protos/draft/yq_private.pb.h> - + #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> - -#include <library/cpp/actors/core/actor.h> - -namespace NYq { - -NActors::IActor* MakeReadRuleDeleterActor( - NActors::TActorId owner, - TString queryId, - NYdb::TDriver ydbDriver, + +#include <library/cpp/actors/core/actor.h> + +namespace NYq { + +NActors::IActor* MakeReadRuleDeleterActor( + NActors::TActorId owner, + TString queryId, + NYdb::TDriver ydbDriver, TVector<Yq::Private::TopicConsumer> topics, - TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials, // For each topic - size_t maxRetries = 15 -); - -} // namespace NYq + TVector<std::shared_ptr<NYdb::ICredentialsProviderFactory>> credentials, // For each topic + size_t maxRetries = 15 +); + +} // namespace NYq diff --git a/ydb/core/yq/libs/read_rule/ya.make b/ydb/core/yq/libs/read_rule/ya.make index 82283aff668..86aa1c52575 100644 --- a/ydb/core/yq/libs/read_rule/ya.make +++ b/ydb/core/yq/libs/read_rule/ya.make @@ -1,14 +1,14 @@ OWNER(g:yq) - -LIBRARY() - -SRCS( - read_rule_creator.cpp - read_rule_deleter.cpp -) - -PEERDIR( - library/cpp/actors/core + +LIBRARY() + +SRCS( + read_rule_creator.cpp + read_rule_deleter.cpp +) + +PEERDIR( + library/cpp/actors/core ydb/core/protos ydb/core/yq/libs/events ydb/public/api/protos @@ -17,8 +17,8 @@ PEERDIR( ydb/library/yql/providers/common/proto ydb/library/yql/providers/dq/api/protos ydb/library/yql/providers/pq/proto -) - -YQL_LAST_ABI_VERSION() - -END() +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/core/yq/libs/shared_resources/db_pool.cpp b/ydb/core/yq/libs/shared_resources/db_pool.cpp index 372dfd72cdf..2586f20d209 100644 --- a/ydb/core/yq/libs/shared_resources/db_pool.cpp +++ b/ydb/core/yq/libs/shared_resources/db_pool.cpp @@ -9,8 +9,8 @@ #include <util/stream/file.h> #include <util/string/strip.h> -#define LOG_E(stream) \ - LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, stream) +#define LOG_E(stream) \ + LOG_ERROR_S(*TlsActivationContext, NKikimrServices::YQL_PROXY, stream) namespace NYq { @@ -65,7 +65,7 @@ public: RequestInProgress = true; RequestInProgressTimestamp = TInstant::Now(); const auto& requestVariant = Requests.front(); - + if (auto pRequest = std::get_if<TRequest>(&requestVariant)) { auto& request = *pRequest; auto actorSystem = ctx.ActorSystem(); @@ -159,7 +159,7 @@ private: TActorId Sender; ui64 Cookie; TFunction Handler; - + TFunctionRequest() = default; TFunctionRequest(const TActorId sender, ui64 cookie, TFunction&& handler) : Sender(sender) diff --git a/ydb/core/yq/libs/shared_resources/db_pool.h b/ydb/core/yq/libs/shared_resources/db_pool.h index 68d9adc2fca..dc08ecf2f2a 100644 --- a/ydb/core/yq/libs/shared_resources/db_pool.h +++ b/ydb/core/yq/libs/shared_resources/db_pool.h @@ -9,8 +9,8 @@ #include <library/cpp/actors/core/actor.h> #include <library/cpp/monlib/dynamic_counters/counters.h> -#include <util/system/mutex.h> - +#include <util/system/mutex.h> + namespace NYq { class TDbPool: public TThrRefBase { diff --git a/ydb/core/yq/libs/shared_resources/interface/shared_resources.cpp b/ydb/core/yq/libs/shared_resources/interface/shared_resources.cpp index 6e4774b7f41..d1fdcf4f711 100644 --- a/ydb/core/yq/libs/shared_resources/interface/shared_resources.cpp +++ b/ydb/core/yq/libs/shared_resources/interface/shared_resources.cpp @@ -1 +1 @@ -#include "shared_resources.h" +#include "shared_resources.h" diff --git a/ydb/core/yq/libs/shared_resources/interface/shared_resources.h b/ydb/core/yq/libs/shared_resources/interface/shared_resources.h index 485c0d89562..a8b467863a9 100644 --- a/ydb/core/yq/libs/shared_resources/interface/shared_resources.h +++ b/ydb/core/yq/libs/shared_resources/interface/shared_resources.h @@ -1,17 +1,17 @@ -#pragma once -#include <library/cpp/actors/core/actorsystem.h> - -#include <util/generic/ptr.h> - -namespace NYq { - -struct IYqSharedResources : public TThrRefBase { - using TPtr = TIntrusivePtr<IYqSharedResources>; - - virtual void Init(NActors::TActorSystem* actorSystem) = 0; - - // Called after actor system stop. - virtual void Stop() = 0; -}; - -} // NYq +#pragma once +#include <library/cpp/actors/core/actorsystem.h> + +#include <util/generic/ptr.h> + +namespace NYq { + +struct IYqSharedResources : public TThrRefBase { + using TPtr = TIntrusivePtr<IYqSharedResources>; + + virtual void Init(NActors::TActorSystem* actorSystem) = 0; + + // Called after actor system stop. + virtual void Stop() = 0; +}; + +} // NYq diff --git a/ydb/core/yq/libs/shared_resources/interface/ya.make b/ydb/core/yq/libs/shared_resources/interface/ya.make index 629e3bf449d..1420609900f 100644 --- a/ydb/core/yq/libs/shared_resources/interface/ya.make +++ b/ydb/core/yq/libs/shared_resources/interface/ya.make @@ -1,15 +1,15 @@ OWNER(g:yq) - -LIBRARY() - -SRCS( - shared_resources.cpp -) - -PEERDIR( - library/cpp/actors/core -) - -YQL_LAST_ABI_VERSION() - -END() + +LIBRARY() + +SRCS( + shared_resources.cpp +) + +PEERDIR( + library/cpp/actors/core +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/core/yq/libs/shared_resources/shared_resources.cpp b/ydb/core/yq/libs/shared_resources/shared_resources.cpp index 3dfd566232f..c98fab83c80 100644 --- a/ydb/core/yq/libs/shared_resources/shared_resources.cpp +++ b/ydb/core/yq/libs/shared_resources/shared_resources.cpp @@ -1,147 +1,147 @@ -#include "shared_resources.h" - +#include "shared_resources.h" + #include <ydb/core/protos/services.pb.h> #include <ydb/core/yq/libs/events/events.h> - -#include <library/cpp/actors/core/actorsystem.h> -#include <library/cpp/actors/core/log.h> -#include <library/cpp/logger/backend.h> -#include <library/cpp/logger/record.h> - -#include <util/generic/cast.h> -#include <util/generic/strbuf.h> -#include <util/stream/file.h> -#include <util/stream/output.h> -#include <util/string/strip.h> -#include <util/system/compiler.h> -#include <util/system/spinlock.h> - -#include <atomic> -#include <memory> - -namespace NYq { - -namespace { - -// Log backend that allows us to create shared YDB driver early (before actor system starts), -// but log to actor system. -class TDeferredActorSystemPtrInitActorLogBackend : public TLogBackend { -public: - using TAtomicActorSystemPtr = std::atomic<NActors::TActorSystem*>; - using TSharedAtomicActorSystemPtr = std::shared_ptr<TAtomicActorSystemPtr>; - - TDeferredActorSystemPtrInitActorLogBackend(TSharedAtomicActorSystemPtr actorSystem, int logComponent) - : ActorSystemPtr(std::move(actorSystem)) - , LogComponent(logComponent) - { - } - - NActors::NLog::EPriority GetActorLogPriority(ELogPriority priority) { - switch (priority) { - case TLOG_EMERG: - return NActors::NLog::PRI_EMERG; - case TLOG_ALERT: - return NActors::NLog::PRI_ALERT; - case TLOG_CRIT: - return NActors::NLog::PRI_CRIT; - case TLOG_ERR: - return NActors::NLog::PRI_ERROR; - case TLOG_WARNING: - return NActors::NLog::PRI_WARN; - case TLOG_NOTICE: - return NActors::NLog::PRI_NOTICE; - case TLOG_INFO: - return NActors::NLog::PRI_INFO; - case TLOG_DEBUG: - return NActors::NLog::PRI_DEBUG; - default: - return NActors::NLog::PRI_TRACE; - } - } - - void WriteData(const TLogRecord& rec) override { - NActors::TActorSystem* actorSystem = ActorSystemPtr->load(std::memory_order_relaxed); - if (Y_LIKELY(actorSystem)) { - LOG_LOG(*actorSystem, GetActorLogPriority(rec.Priority), LogComponent, TString(rec.Data, rec.Len)); - } else { - // Not inited. Temporary write to stderr. - TStringBuilder out; - out << TStringBuf(rec.Data, rec.Len) << Endl; - Cerr << out; - } - } - - void ReopenLog() override { - } - -protected: - TSharedAtomicActorSystemPtr ActorSystemPtr; - const int LogComponent; -}; - -struct TActorSystemPtrMixin { - TDeferredActorSystemPtrInitActorLogBackend::TSharedAtomicActorSystemPtr ActorSystemPtr = std::make_shared<TDeferredActorSystemPtrInitActorLogBackend::TAtomicActorSystemPtr>(nullptr); -}; - -struct TYqSharedResourcesImpl : public TActorSystemPtrMixin, public TYqSharedResources { + +#include <library/cpp/actors/core/actorsystem.h> +#include <library/cpp/actors/core/log.h> +#include <library/cpp/logger/backend.h> +#include <library/cpp/logger/record.h> + +#include <util/generic/cast.h> +#include <util/generic/strbuf.h> +#include <util/stream/file.h> +#include <util/stream/output.h> +#include <util/string/strip.h> +#include <util/system/compiler.h> +#include <util/system/spinlock.h> + +#include <atomic> +#include <memory> + +namespace NYq { + +namespace { + +// Log backend that allows us to create shared YDB driver early (before actor system starts), +// but log to actor system. +class TDeferredActorSystemPtrInitActorLogBackend : public TLogBackend { +public: + using TAtomicActorSystemPtr = std::atomic<NActors::TActorSystem*>; + using TSharedAtomicActorSystemPtr = std::shared_ptr<TAtomicActorSystemPtr>; + + TDeferredActorSystemPtrInitActorLogBackend(TSharedAtomicActorSystemPtr actorSystem, int logComponent) + : ActorSystemPtr(std::move(actorSystem)) + , LogComponent(logComponent) + { + } + + NActors::NLog::EPriority GetActorLogPriority(ELogPriority priority) { + switch (priority) { + case TLOG_EMERG: + return NActors::NLog::PRI_EMERG; + case TLOG_ALERT: + return NActors::NLog::PRI_ALERT; + case TLOG_CRIT: + return NActors::NLog::PRI_CRIT; + case TLOG_ERR: + return NActors::NLog::PRI_ERROR; + case TLOG_WARNING: + return NActors::NLog::PRI_WARN; + case TLOG_NOTICE: + return NActors::NLog::PRI_NOTICE; + case TLOG_INFO: + return NActors::NLog::PRI_INFO; + case TLOG_DEBUG: + return NActors::NLog::PRI_DEBUG; + default: + return NActors::NLog::PRI_TRACE; + } + } + + void WriteData(const TLogRecord& rec) override { + NActors::TActorSystem* actorSystem = ActorSystemPtr->load(std::memory_order_relaxed); + if (Y_LIKELY(actorSystem)) { + LOG_LOG(*actorSystem, GetActorLogPriority(rec.Priority), LogComponent, TString(rec.Data, rec.Len)); + } else { + // Not inited. Temporary write to stderr. + TStringBuilder out; + out << TStringBuf(rec.Data, rec.Len) << Endl; + Cerr << out; + } + } + + void ReopenLog() override { + } + +protected: + TSharedAtomicActorSystemPtr ActorSystemPtr; + const int LogComponent; +}; + +struct TActorSystemPtrMixin { + TDeferredActorSystemPtrInitActorLogBackend::TSharedAtomicActorSystemPtr ActorSystemPtr = std::make_shared<TDeferredActorSystemPtrInitActorLogBackend::TAtomicActorSystemPtr>(nullptr); +}; + +struct TYqSharedResourcesImpl : public TActorSystemPtrMixin, public TYqSharedResources { explicit TYqSharedResourcesImpl( const NYq::NConfig::TConfig& config, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, const NMonitoring::TDynamicCounterPtr& counters) : TYqSharedResources(NYdb::TDriver(GetYdbDriverConfig(config.GetCommon().GetYdbDriverConfig()))) - { + { CreateDbPoolHolder(config.GetDbPool(), credentialsProviderFactory, counters); - } - - void Init(NActors::TActorSystem* actorSystem) override { - Y_VERIFY(!ActorSystemPtr->load(std::memory_order_relaxed), "Double IYqSharedResources init"); - ActorSystemPtr->store(actorSystem, std::memory_order_relaxed); - } - - void Stop() override { - YdbDriver.Stop(true); - } - + } + + void Init(NActors::TActorSystem* actorSystem) override { + Y_VERIFY(!ActorSystemPtr->load(std::memory_order_relaxed), "Double IYqSharedResources init"); + ActorSystemPtr->store(actorSystem, std::memory_order_relaxed); + } + + void Stop() override { + YdbDriver.Stop(true); + } + NYdb::TDriverConfig GetYdbDriverConfig(const NYq::NConfig::TYdbDriverConfig& config) { - NYdb::TDriverConfig cfg; + NYdb::TDriverConfig cfg; if (config.GetNetworkThreadsNum()) { cfg.SetNetworkThreadsNum(config.GetNetworkThreadsNum()); - } + } if (config.GetClientThreadsNum()) { cfg.SetClientThreadsNum(config.GetClientThreadsNum()); - } + } if (config.GetGrpcMemoryQuota()) { cfg.SetGrpcMemoryQuota(config.GetGrpcMemoryQuota()); - } - cfg.SetDiscoveryMode(NYdb::EDiscoveryMode::Async); // We are in actor system! - cfg.SetLog(MakeHolder<TDeferredActorSystemPtrInitActorLogBackend>(ActorSystemPtr, NKikimrServices::EServiceKikimr::YDB_SDK)); - return cfg; - } - + } + cfg.SetDiscoveryMode(NYdb::EDiscoveryMode::Async); // We are in actor system! + cfg.SetLog(MakeHolder<TDeferredActorSystemPtrInitActorLogBackend>(ActorSystemPtr, NKikimrServices::EServiceKikimr::YDB_SDK)); + return cfg; + } + void CreateDbPoolHolder( const NYq::NConfig::TDbPoolConfig& config, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, const NMonitoring::TDynamicCounterPtr& counters) { DbPoolHolder = MakeIntrusive<NYq::TDbPoolHolder>(config, YdbDriver, credentialsProviderFactory, counters); - } -}; - -} // namespace - + } +}; + +} // namespace + TYqSharedResources::TPtr CreateYqSharedResourcesImpl( const NYq::NConfig::TConfig& config, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, const NMonitoring::TDynamicCounterPtr& counters) { return MakeIntrusive<TYqSharedResourcesImpl>(config, credentialsProviderFactory, counters); -} - -TYqSharedResources::TYqSharedResources(NYdb::TDriver driver) - : YdbDriver(std::move(driver)) -{ -} - -TYqSharedResources::TPtr TYqSharedResources::Cast(const IYqSharedResources::TPtr& ptr) { - return CheckedCast<TYqSharedResources*>(ptr.Get()); -} - -} // namespace NYq +} + +TYqSharedResources::TYqSharedResources(NYdb::TDriver driver) + : YdbDriver(std::move(driver)) +{ +} + +TYqSharedResources::TPtr TYqSharedResources::Cast(const IYqSharedResources::TPtr& ptr) { + return CheckedCast<TYqSharedResources*>(ptr.Get()); +} + +} // namespace NYq diff --git a/ydb/core/yq/libs/shared_resources/shared_resources.h b/ydb/core/yq/libs/shared_resources/shared_resources.h index bfd1ec66953..d78725d6b05 100644 --- a/ydb/core/yq/libs/shared_resources/shared_resources.h +++ b/ydb/core/yq/libs/shared_resources/shared_resources.h @@ -1,31 +1,31 @@ -#pragma once -#include "db_pool.h" - +#pragma once +#include "db_pool.h" + #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> #include <ydb/core/yq/libs/config/protos/yq_config.pb.h> #include <ydb/core/yq/libs/shared_resources/interface/shared_resources.h> - + #include <ydb/library/security/ydb_credentials_provider_factory.h> -#include <library/cpp/actors/core/actorsystem.h> - -namespace NYq { - -struct TYqSharedResources : public IYqSharedResources { - using TPtr = TIntrusivePtr<TYqSharedResources>; - static TPtr Cast(const IYqSharedResources::TPtr& ptr); - - // Resources - NYdb::TDriver YdbDriver; +#include <library/cpp/actors/core/actorsystem.h> + +namespace NYq { + +struct TYqSharedResources : public IYqSharedResources { + using TPtr = TIntrusivePtr<TYqSharedResources>; + static TPtr Cast(const IYqSharedResources::TPtr& ptr); + + // Resources + NYdb::TDriver YdbDriver; TDbPoolHolder::TPtr DbPoolHolder; - -protected: - explicit TYqSharedResources(NYdb::TDriver driver); -}; - + +protected: + explicit TYqSharedResources(NYdb::TDriver driver); +}; + TYqSharedResources::TPtr CreateYqSharedResourcesImpl( const NYq::NConfig::TConfig& config, const NKikimr::TYdbCredentialsProviderFactory& credentialsProviderFactory, const NMonitoring::TDynamicCounterPtr& counters); - -} // namespace NYq + +} // namespace NYq diff --git a/ydb/core/yq/libs/shared_resources/ya.make b/ydb/core/yq/libs/shared_resources/ya.make index 46c9a3bb2b5..b5f5cb4d017 100644 --- a/ydb/core/yq/libs/shared_resources/ya.make +++ b/ydb/core/yq/libs/shared_resources/ya.make @@ -1,14 +1,14 @@ OWNER(g:yq) - -LIBRARY() - -SRCS( - db_pool.cpp - shared_resources.cpp -) - -PEERDIR( - library/cpp/actors/core + +LIBRARY() + +SRCS( + db_pool.cpp + shared_resources.cpp +) + +PEERDIR( + library/cpp/actors/core library/cpp/monlib/dynamic_counters ydb/core/protos ydb/core/yq/libs/events @@ -16,12 +16,12 @@ PEERDIR( ydb/library/security ydb/public/sdk/cpp/client/ydb_driver ydb/public/sdk/cpp/client/ydb_table -) - -YQL_LAST_ABI_VERSION() - -END() - -RECURSE( - interface -) +) + +YQL_LAST_ABI_VERSION() + +END() + +RECURSE( + interface +) diff --git a/ydb/core/yq/libs/signer/signer.cpp b/ydb/core/yq/libs/signer/signer.cpp index 79f1abc2e45..e178f9ae1ba 100644 --- a/ydb/core/yq/libs/signer/signer.cpp +++ b/ydb/core/yq/libs/signer/signer.cpp @@ -5,7 +5,7 @@ #include <util/stream/file.h> #include <util/string/builder.h> -namespace NYq { +namespace NYq { TSigner::TSigner(const TString& hmacSecret) : HmacSecret(hmacSecret) { diff --git a/ydb/core/yq/libs/signer/signer.h b/ydb/core/yq/libs/signer/signer.h index 9c44b9f6b23..686090f7c96 100644 --- a/ydb/core/yq/libs/signer/signer.h +++ b/ydb/core/yq/libs/signer/signer.h @@ -3,7 +3,7 @@ #include <util/generic/ptr.h> #include <util/generic/string.h> -namespace NYq { +namespace NYq { // keep in sync with token accessor logic class TSigner : public TThrRefBase { public: diff --git a/ydb/core/yq/libs/signer/ut/signer_ut.cpp b/ydb/core/yq/libs/signer/ut/signer_ut.cpp index 7841b5780a8..fdf9844e483 100644 --- a/ydb/core/yq/libs/signer/ut/signer_ut.cpp +++ b/ydb/core/yq/libs/signer/ut/signer_ut.cpp @@ -1,7 +1,7 @@ #include <ydb/core/yq/libs/signer/signer.h> #include <library/cpp/testing/unittest/registar.h> -using namespace NYq; +using namespace NYq; Y_UNIT_TEST_SUITE(Signer) { Y_UNIT_TEST(Basic) { diff --git a/ydb/core/yq/libs/test_connection/events/events.h b/ydb/core/yq/libs/test_connection/events/events.h index 50af463d5b5..36bc20f468b 100644 --- a/ydb/core/yq/libs/test_connection/events/events.h +++ b/ydb/core/yq/libs/test_connection/events/events.h @@ -1,7 +1,7 @@ #pragma once #include <ydb/public/api/protos/yq.pb.h> -#include <ydb/core/yq/libs/events/event_subspace.h> +#include <ydb/core/yq/libs/events/event_subspace.h> #include <ydb/core/yq/libs/control_plane_storage/events/events.h> @@ -16,12 +16,12 @@ namespace NYq { struct TEvTestConnection { // Event ids. enum EEv : ui32 { - EvTestConnectionRequest = YqEventSubspaceBegin(NYq::TYqEventSubspace::TestConnection), + EvTestConnectionRequest = YqEventSubspaceBegin(NYq::TYqEventSubspace::TestConnection), EvTestConnectionResponse, EvEnd, }; - static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::TestConnection), "All events must be in their subspace"); + static_assert(EvEnd <= YqEventSubspaceEnd(NYq::TYqEventSubspace::TestConnection), "All events must be in their subspace"); struct TEvTestConnectionRequest : NActors::TEventLocal<TEvTestConnectionRequest, EvTestConnectionRequest> { explicit TEvTestConnectionRequest(const TString& scope, diff --git a/ydb/core/yq/libs/test_connection/events/ya.make b/ydb/core/yq/libs/test_connection/events/ya.make index e447cded588..836c19322b9 100644 --- a/ydb/core/yq/libs/test_connection/events/ya.make +++ b/ydb/core/yq/libs/test_connection/events/ya.make @@ -8,7 +8,7 @@ SRCS( PEERDIR( ydb/core/yq/libs/control_plane_storage/events - ydb/core/yq/libs/events + ydb/core/yq/libs/events ydb/public/api/protos ydb/library/yql/public/issue/protos ) diff --git a/ydb/core/yq/libs/test_connection/test_connection.h b/ydb/core/yq/libs/test_connection/test_connection.h index 7be25e8e572..64a78c49d44 100644 --- a/ydb/core/yq/libs/test_connection/test_connection.h +++ b/ydb/core/yq/libs/test_connection/test_connection.h @@ -1,6 +1,6 @@ #pragma once -#include <ydb/core/yq/libs/actors/logging/log.h> +#include <ydb/core/yq/libs/actors/logging/log.h> #include <ydb/core/yq/libs/config/protos/test_connection.pb.h> #include <library/cpp/actors/core/actor.h> diff --git a/ydb/core/yq/libs/test_connection/ya.make b/ydb/core/yq/libs/test_connection/ya.make index 1b320aafcdb..38481c99b1c 100644 --- a/ydb/core/yq/libs/test_connection/ya.make +++ b/ydb/core/yq/libs/test_connection/ya.make @@ -9,9 +9,9 @@ SRCS( PEERDIR( library/cpp/lwtrace - ydb/core/yq/libs/actors/logging + ydb/core/yq/libs/actors/logging ydb/core/yq/libs/config/protos - ydb/core/yq/libs/test_connection/events + ydb/core/yq/libs/test_connection/events ) YQL_LAST_ABI_VERSION() diff --git a/ydb/core/yq/libs/ya.make b/ydb/core/yq/libs/ya.make index ccc88a55b62..03cd87e60e6 100644 --- a/ydb/core/yq/libs/ya.make +++ b/ydb/core/yq/libs/ya.make @@ -1,9 +1,9 @@ OWNER(g:yq) - + RECURSE( actors audit - checkpoint_storage + checkpoint_storage checkpointing checkpointing_common common @@ -11,19 +11,19 @@ RECURSE( control_plane_proxy control_plane_storage db_resolver - db_schema - events - gateway - graph_params + db_schema + events + gateway + graph_params hmac - init + init logs mock - pretty_printers + pretty_printers private_client read_rule result_formatter - shared_resources + shared_resources signer tasks_packer test_connection diff --git a/ydb/core/yq/libs/ydb/util.cpp b/ydb/core/yq/libs/ydb/util.cpp index 121f9aeafc8..47f1d3f4c37 100644 --- a/ydb/core/yq/libs/ydb/util.cpp +++ b/ydb/core/yq/libs/ydb/util.cpp @@ -2,7 +2,7 @@ #include <util/folder/pathsplit.h> -namespace NYq { +namespace NYq { using namespace NYdb; @@ -19,4 +19,4 @@ TString JoinPath(const TString& basePath, const TString& path) { return prefixPathSplit.Reconstruct(); } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/ydb/util.h b/ydb/core/yq/libs/ydb/util.h index 6ebdb86c7b5..42ccb1a2d0f 100644 --- a/ydb/core/yq/libs/ydb/util.h +++ b/ydb/core/yq/libs/ydb/util.h @@ -5,10 +5,10 @@ #include <util/generic/fwd.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// TString JoinPath(const TString& basePath, const TString& path); -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/ydb/ydb.cpp b/ydb/core/yq/libs/ydb/ydb.cpp index d6096abce32..b88aa81bbd7 100644 --- a/ydb/core/yq/libs/ydb/ydb.cpp +++ b/ydb/core/yq/libs/ydb/ydb.cpp @@ -10,7 +10,7 @@ #include <ydb/library/security/ydb_credentials_provider_factory.h> -namespace NYq { +namespace NYq { using namespace NThreading; using namespace NYdb; @@ -244,18 +244,18 @@ TStatus MakeErrorStatus( return TStatus(code, std::move(issues)); } -NYql::TIssues StatusToIssues(const NYdb::TStatus& status) { - TIssues issues; - if (!status.IsSuccess()) { - issues = status.GetIssues(); - } - return issues; -} - +NYql::TIssues StatusToIssues(const NYdb::TStatus& status) { + TIssues issues; + if (!status.IsSuccess()) { + issues = status.GetIssues(); + } + return issues; +} + TFuture<TIssues> StatusToIssues(const TFuture<TStatus>& future) { return future.Apply( [] (const TFuture<TStatus>& future) { - return StatusToIssues(future.GetValue()); + return StatusToIssues(future.GetValue()); }); } @@ -335,4 +335,4 @@ TFuture<TStatus> RollbackTransaction(const TGenerationContextPtr& context) { return future; } -} // namespace NYq +} // namespace NYq diff --git a/ydb/core/yq/libs/ydb/ydb.h b/ydb/core/yq/libs/ydb/ydb.h index 14c07422883..755bb06aa50 100644 --- a/ydb/core/yq/libs/ydb/ydb.h +++ b/ydb/core/yq/libs/ydb/ydb.h @@ -5,7 +5,7 @@ #include <ydb/public/sdk/cpp/client/ydb_table/table.h> -namespace NYq { +namespace NYq { //////////////////////////////////////////////////////////////////////////////// @@ -103,8 +103,8 @@ NYdb::TStatus MakeErrorStatus( const TString& msg, NYql::ESeverity severity = NYql::TSeverityIds::S_WARNING); -NYql::TIssues StatusToIssues(const NYdb::TStatus& status); - +NYql::TIssues StatusToIssues(const NYdb::TStatus& status); + NThreading::TFuture<NYql::TIssues> StatusToIssues( const NThreading::TFuture<NYdb::TStatus>& future); @@ -126,4 +126,4 @@ NThreading::TFuture<NYdb::TStatus> CheckGeneration(const TGenerationContextPtr& NThreading::TFuture<NYdb::TStatus> RollbackTransaction(const TGenerationContextPtr& context); -} // namespace NYq +} // namespace NYq diff --git a/ydb/library/http_proxy/authorization/auth_helpers.cpp b/ydb/library/http_proxy/authorization/auth_helpers.cpp index 1d3cf30400a..00945558ef5 100644 --- a/ydb/library/http_proxy/authorization/auth_helpers.cpp +++ b/ydb/library/http_proxy/authorization/auth_helpers.cpp @@ -1,56 +1,56 @@ #include "auth_helpers.h" - + #include <ydb/library/http_proxy/error/error.h> - -#include <util/string/ascii.h> -#include <util/string/strip.h> - -#include <utility> - -namespace NKikimr::NSQS { - -static void SkipSpaces(TStringBuf& value) { - while (value && isspace(value[0])) { - value.Skip(1); - } -} - -static size_t FindSpace(const TStringBuf& value) { - size_t pos = 0; - while (pos < value.size()) { - if (isspace(value[pos])) { - return pos; - } else { - ++pos; - } - } - return TStringBuf::npos; -} - -TMap<TString, TString> ParseAuthorizationParams(TStringBuf value) { - TMap<TString, TString> paramsMap; - SkipSpaces(value); - - // parse type - const size_t spaceDelim = FindSpace(value); - if (spaceDelim == TStringBuf::npos) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid authorization parameters structure."; - } - TStringBuf type, params; - value.SplitOn(spaceDelim, type, params); // delimiter is excluded - SkipSpaces(params); - - while (params) { - TStringBuf param = StripString(params.NextTok(',')); - if (param) { - TStringBuf k, v; - if (!param.TrySplit('=', k, v) || !k) { - throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid authorization parameters structure."; - } - paramsMap.insert(std::make_pair(to_lower(TString(k)), TString(v))); - } - } - return paramsMap; -} - -} // namespace NKikimr::NSQS + +#include <util/string/ascii.h> +#include <util/string/strip.h> + +#include <utility> + +namespace NKikimr::NSQS { + +static void SkipSpaces(TStringBuf& value) { + while (value && isspace(value[0])) { + value.Skip(1); + } +} + +static size_t FindSpace(const TStringBuf& value) { + size_t pos = 0; + while (pos < value.size()) { + if (isspace(value[pos])) { + return pos; + } else { + ++pos; + } + } + return TStringBuf::npos; +} + +TMap<TString, TString> ParseAuthorizationParams(TStringBuf value) { + TMap<TString, TString> paramsMap; + SkipSpaces(value); + + // parse type + const size_t spaceDelim = FindSpace(value); + if (spaceDelim == TStringBuf::npos) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid authorization parameters structure."; + } + TStringBuf type, params; + value.SplitOn(spaceDelim, type, params); // delimiter is excluded + SkipSpaces(params); + + while (params) { + TStringBuf param = StripString(params.NextTok(',')); + if (param) { + TStringBuf k, v; + if (!param.TrySplit('=', k, v) || !k) { + throw TSQSException(NErrors::INVALID_PARAMETER_VALUE) << "Invalid authorization parameters structure."; + } + paramsMap.insert(std::make_pair(to_lower(TString(k)), TString(v))); + } + } + return paramsMap; +} + +} // namespace NKikimr::NSQS diff --git a/ydb/library/http_proxy/authorization/auth_helpers.h b/ydb/library/http_proxy/authorization/auth_helpers.h index bbe6a5f7bcb..ca3ac5cff71 100644 --- a/ydb/library/http_proxy/authorization/auth_helpers.h +++ b/ydb/library/http_proxy/authorization/auth_helpers.h @@ -1,11 +1,11 @@ -#pragma once -#include <util/generic/is_in.h> -#include <util/generic/strbuf.h> -#include <util/generic/string.h> -#include <util/generic/map.h> - -namespace NKikimr::NSQS { - -TMap<TString, TString> ParseAuthorizationParams(TStringBuf value); // throws on error - -} // namespace NKikimr::NSQS +#pragma once +#include <util/generic/is_in.h> +#include <util/generic/strbuf.h> +#include <util/generic/string.h> +#include <util/generic/map.h> + +namespace NKikimr::NSQS { + +TMap<TString, TString> ParseAuthorizationParams(TStringBuf value); // throws on error + +} // namespace NKikimr::NSQS diff --git a/ydb/library/http_proxy/authorization/signature.cpp b/ydb/library/http_proxy/authorization/signature.cpp index 60368c0614e..03501c5c912 100644 --- a/ydb/library/http_proxy/authorization/signature.cpp +++ b/ydb/library/http_proxy/authorization/signature.cpp @@ -16,7 +16,7 @@ #include <util/string/join.h> #include <util/string/strip.h> -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { static TString HmacSHA256(TStringBuf key, TStringBuf data) { unsigned char hash[SHA256_DIGEST_LENGTH]; @@ -259,4 +259,4 @@ void TAwsRequestSignV4::MakeFinalStringToSign() { FinalStringToSignStr_ = finalStringToSign.Str(); } -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/library/http_proxy/authorization/signature.h b/ydb/library/http_proxy/authorization/signature.h index 57468deee65..c4590cb644f 100644 --- a/ydb/library/http_proxy/authorization/signature.h +++ b/ydb/library/http_proxy/authorization/signature.h @@ -8,7 +8,7 @@ class THttpInput; struct TParsedHttpFull; -namespace NKikimr::NSQS { +namespace NKikimr::NSQS { class TAwsRequestSignV4 { public: @@ -49,4 +49,4 @@ private: TString AwsRequest_ = "aws4_request"; }; -} // namespace NKikimr::NSQS +} // namespace NKikimr::NSQS diff --git a/ydb/library/http_proxy/authorization/ut/auth_helpers_ut.cpp b/ydb/library/http_proxy/authorization/ut/auth_helpers_ut.cpp index 0fb753120a7..6da5a44ce73 100644 --- a/ydb/library/http_proxy/authorization/ut/auth_helpers_ut.cpp +++ b/ydb/library/http_proxy/authorization/ut/auth_helpers_ut.cpp @@ -1,49 +1,49 @@ #include <ydb/library/http_proxy/authorization/auth_helpers.h> - + #include <ydb/library/http_proxy/error/error.h> #include <library/cpp/testing/unittest/registar.h> - -namespace NKikimr::NSQS { - -Y_UNIT_TEST_SUITE(AuthorizationParsingTest) { - void CheckParam(TStringBuf headerValue, const TString& name, const TString& value) { - TMap<TString, TString> kv; - UNIT_ASSERT_NO_EXCEPTION_C(kv = ParseAuthorizationParams(headerValue), "Exception while parsing header \"" << headerValue << "\""); - UNIT_ASSERT(IsIn(kv, name)); - UNIT_ASSERT_STRINGS_EQUAL(kv[name], value); - } - - Y_UNIT_TEST(ParsesUsualCredential) { - CheckParam("x credential=a", "credential", "a"); - CheckParam("x ,credential=b", "credential", "b"); - CheckParam("x ,, credential=c,", "credential", "c"); - CheckParam("x credential=d,,,", "credential", "d"); - CheckParam("x credential=e,, ,", "credential", "e"); - } - - Y_UNIT_TEST(ParsesManyKV) { - CheckParam("type k=v, x=y,", "k", "v"); - CheckParam("type k=v, x=y,", "x", "y"); - } - - Y_UNIT_TEST(LowersKey) { - CheckParam("type Key=Value", "key", "Value"); - } - - Y_UNIT_TEST(FailsOnInvalidStrings) { - const TStringBuf badStrings[] = { - "", - "type", - "type key", - "type =value", - "type =", - }; - - for (TStringBuf h : badStrings) { - UNIT_ASSERT_EXCEPTION_C(ParseAuthorizationParams(h), TSQSException, "Exception is expected while parsing header \"" << h << "\""); - } - } -} - -} // namespace NKikimr::NSQS + +namespace NKikimr::NSQS { + +Y_UNIT_TEST_SUITE(AuthorizationParsingTest) { + void CheckParam(TStringBuf headerValue, const TString& name, const TString& value) { + TMap<TString, TString> kv; + UNIT_ASSERT_NO_EXCEPTION_C(kv = ParseAuthorizationParams(headerValue), "Exception while parsing header \"" << headerValue << "\""); + UNIT_ASSERT(IsIn(kv, name)); + UNIT_ASSERT_STRINGS_EQUAL(kv[name], value); + } + + Y_UNIT_TEST(ParsesUsualCredential) { + CheckParam("x credential=a", "credential", "a"); + CheckParam("x ,credential=b", "credential", "b"); + CheckParam("x ,, credential=c,", "credential", "c"); + CheckParam("x credential=d,,,", "credential", "d"); + CheckParam("x credential=e,, ,", "credential", "e"); + } + + Y_UNIT_TEST(ParsesManyKV) { + CheckParam("type k=v, x=y,", "k", "v"); + CheckParam("type k=v, x=y,", "x", "y"); + } + + Y_UNIT_TEST(LowersKey) { + CheckParam("type Key=Value", "key", "Value"); + } + + Y_UNIT_TEST(FailsOnInvalidStrings) { + const TStringBuf badStrings[] = { + "", + "type", + "type key", + "type =value", + "type =", + }; + + for (TStringBuf h : badStrings) { + UNIT_ASSERT_EXCEPTION_C(ParseAuthorizationParams(h), TSQSException, "Exception is expected while parsing header \"" << h << "\""); + } + } +} + +} // namespace NKikimr::NSQS diff --git a/ydb/library/http_proxy/error/error.cpp b/ydb/library/http_proxy/error/error.cpp index 0c91aac8595..dc58c8bda07 100644 --- a/ydb/library/http_proxy/error/error.cpp +++ b/ydb/library/http_proxy/error/error.cpp @@ -1,196 +1,196 @@ -#include "error.h" - -namespace NKikimr::NSQS { - -THashSet<TString> TErrorClass::RegisteredCodes; - -TErrorClass::TErrorClass(TString errorCode, ui32 httpStatusCode, TString defaultMessage) - : ErrorCode(std::move(errorCode)) - , HttpStatusCode(httpStatusCode) - , DefaultMessage(std::move(defaultMessage)) -{ - RegisteredCodes.insert(ErrorCode); -} - -TSQSException::TSQSException(const TErrorClass& errorClass) - : ErrorClass(errorClass) -{ -} - -TSQSException::TSQSException() - : TSQSException(NErrors::INTERNAL_FAILURE) -{ -} - -namespace NErrors { -extern const TErrorClass ACCESS_DENIED = { - "AccessDeniedException", - 400, - "You do not have sufficient access to perform this action." -}; - -extern const TErrorClass INCOMPLETE_SIGNATURE = { - "IncompleteSignature", - 400, - "The request signature does not conform to AWS standards." -}; - -extern const TErrorClass INTERNAL_FAILURE = { - "InternalFailure", - 500, - "The request processing has failed because of an unknown error, exception or failure." -}; - -extern const TErrorClass INVALID_ACTION = { - "InvalidAction", - 400, - "The action or operation requested is invalid. Verify that the action is typed correctly." -}; - -extern const TErrorClass INVALID_CLIENT_TOKEN_ID = { - "InvalidClientTokenId", - 403, - "The X.509 certificate or AWS access key ID provided does not exist in our records." -}; - -extern const TErrorClass INVALID_PARAMETER_COMBINATION = { - "InvalidParameterCombination", - 400, - "Parameters that must not be used together were used together." -}; - -extern const TErrorClass INVALID_PARAMETER_VALUE = { - "InvalidParameterValue", - 400, - "An invalid or out-of-range value was supplied for the input parameter." -}; - -extern const TErrorClass INVALID_QUERY_PARAMETER = { - "InvalidQueryParameter", - 400, - "The AWS query string is malformed or does not adhere to AWS standards." -}; - -extern const TErrorClass MALFORMED_QUERY_STRING = { - "MalformedQueryString", - 404, - "The query string contains a syntax error." -}; - -extern const TErrorClass MISSING_ACTION = { - "MissingAction", - 400, - "The request is missing an action or a required parameter." -}; - -extern const TErrorClass MISSING_AUTENTICATION_TOKEN = { - "MissingAuthenticationToken", - 403, - "The request must contain either a valid (registered) AWS access key ID or X.509 certificate." -}; - -extern const TErrorClass MISSING_PARAMETER = { - "MissingParameter", - 400, - "A required parameter for the specified action is not supplied." -}; - -extern const TErrorClass OPT_IN_REQUIRED = { - "OptInRequired", - 403, - "The AWS access key ID needs a subscription for the service." -}; - -extern const TErrorClass REQUEST_EXPIRED = { - "RequestExpired", - 400, - "The request reached the service more than 15 minutes after the date stamp on the request or more than 15 minutes after the request expiration date (such as for pre-signed URLs), or the date stamp on the request is more than 15 minutes in the future." -}; - -extern const TErrorClass SERVICE_UNAVAILABLE = { - "ServiceUnavailable", - 503, - "The request has failed due to a temporary failure of the server." -}; - -extern const TErrorClass THROTTLING_EXCEPTION = { - "ThrottlingException", - 403, - "The request was denied due to request throttling." -}; - -extern const TErrorClass VALIDATION_ERROR = { - "ValidationError", - 400, - "The input fails to satisfy the constraints specified by an AWS service." -}; - -extern const TErrorClass BATCH_ENTRY_IDS_NOT_DISTINCT = { - "AWS.SimpleQueueService.BatchEntryIdsNotDistinct", - 400, - "Two or more batch entries in the request have the same Id." -}; - -extern const TErrorClass EMPTY_BATCH_REQUEST = { - "AWS.SimpleQueueService.EmptyBatchRequest", - 400, - "The batch request doesn't contain any entries." -}; - -extern const TErrorClass INVALID_BATCH_ENTRY_ID = { - "AWS.SimpleQueueService.InvalidBatchEntryId", - 400, - "The Id of a batch entry in a batch request doesn't abide by the specification." -}; - -extern const TErrorClass TOO_MANY_ENTRIES_IN_BATCH_REQUEST = { - "AWS.SimpleQueueService.TooManyEntriesInBatchRequest", - 400, - "The batch request contains more entries than permissible." -}; - -extern const TErrorClass BATCH_REQUEST_TOO_LONG = { - "AWS.SimpleQueueService.BatchRequestTooLong", - 400, - "The length of all the messages put together is more than the limit." -}; - -extern const TErrorClass NON_EXISTENT_QUEUE = { - "AWS.SimpleQueueService.NonExistentQueue", - 400, - "The specified queue doesn't exist." -}; - -extern const TErrorClass OVER_LIMIT = { - "OverLimit", - 403, - "The specified action violates a limit. For example, ReceiveMessage returns this error if the maximum number of inflight messages is reached and AddPermission returns this error if the maximum number of permissions for the queue is reached." -}; - -extern const TErrorClass QUEUE_DELETED_RECENTLY = { - "AWS.SimpleQueueService.QueueDeletedRecently", - 400, - "You must wait some time after deleting a queue before you can create another queue with the same name." -}; - -extern const TErrorClass MESSAGE_NOT_INFLIGHT = { - "AWS.SimpleQueueService.MessageNotInflight", - 400, - "The specified message isn't in flight." -}; - -extern const TErrorClass RECEIPT_HANDLE_IS_INVALID = { - "ReceiptHandleIsInvalid", - 400, - "The specified receipt handle isn't valid." -}; - -extern const TErrorClass INVALID_ATTRIBUTE_NAME = { - "InvalidAttributeName", - 400, - "The specified attribute doesn't exist." -}; - +#include "error.h" + +namespace NKikimr::NSQS { + +THashSet<TString> TErrorClass::RegisteredCodes; + +TErrorClass::TErrorClass(TString errorCode, ui32 httpStatusCode, TString defaultMessage) + : ErrorCode(std::move(errorCode)) + , HttpStatusCode(httpStatusCode) + , DefaultMessage(std::move(defaultMessage)) +{ + RegisteredCodes.insert(ErrorCode); +} + +TSQSException::TSQSException(const TErrorClass& errorClass) + : ErrorClass(errorClass) +{ +} + +TSQSException::TSQSException() + : TSQSException(NErrors::INTERNAL_FAILURE) +{ +} + +namespace NErrors { +extern const TErrorClass ACCESS_DENIED = { + "AccessDeniedException", + 400, + "You do not have sufficient access to perform this action." +}; + +extern const TErrorClass INCOMPLETE_SIGNATURE = { + "IncompleteSignature", + 400, + "The request signature does not conform to AWS standards." +}; + +extern const TErrorClass INTERNAL_FAILURE = { + "InternalFailure", + 500, + "The request processing has failed because of an unknown error, exception or failure." +}; + +extern const TErrorClass INVALID_ACTION = { + "InvalidAction", + 400, + "The action or operation requested is invalid. Verify that the action is typed correctly." +}; + +extern const TErrorClass INVALID_CLIENT_TOKEN_ID = { + "InvalidClientTokenId", + 403, + "The X.509 certificate or AWS access key ID provided does not exist in our records." +}; + +extern const TErrorClass INVALID_PARAMETER_COMBINATION = { + "InvalidParameterCombination", + 400, + "Parameters that must not be used together were used together." +}; + +extern const TErrorClass INVALID_PARAMETER_VALUE = { + "InvalidParameterValue", + 400, + "An invalid or out-of-range value was supplied for the input parameter." +}; + +extern const TErrorClass INVALID_QUERY_PARAMETER = { + "InvalidQueryParameter", + 400, + "The AWS query string is malformed or does not adhere to AWS standards." +}; + +extern const TErrorClass MALFORMED_QUERY_STRING = { + "MalformedQueryString", + 404, + "The query string contains a syntax error." +}; + +extern const TErrorClass MISSING_ACTION = { + "MissingAction", + 400, + "The request is missing an action or a required parameter." +}; + +extern const TErrorClass MISSING_AUTENTICATION_TOKEN = { + "MissingAuthenticationToken", + 403, + "The request must contain either a valid (registered) AWS access key ID or X.509 certificate." +}; + +extern const TErrorClass MISSING_PARAMETER = { + "MissingParameter", + 400, + "A required parameter for the specified action is not supplied." +}; + +extern const TErrorClass OPT_IN_REQUIRED = { + "OptInRequired", + 403, + "The AWS access key ID needs a subscription for the service." +}; + +extern const TErrorClass REQUEST_EXPIRED = { + "RequestExpired", + 400, + "The request reached the service more than 15 minutes after the date stamp on the request or more than 15 minutes after the request expiration date (such as for pre-signed URLs), or the date stamp on the request is more than 15 minutes in the future." +}; + +extern const TErrorClass SERVICE_UNAVAILABLE = { + "ServiceUnavailable", + 503, + "The request has failed due to a temporary failure of the server." +}; + +extern const TErrorClass THROTTLING_EXCEPTION = { + "ThrottlingException", + 403, + "The request was denied due to request throttling." +}; + +extern const TErrorClass VALIDATION_ERROR = { + "ValidationError", + 400, + "The input fails to satisfy the constraints specified by an AWS service." +}; + +extern const TErrorClass BATCH_ENTRY_IDS_NOT_DISTINCT = { + "AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + 400, + "Two or more batch entries in the request have the same Id." +}; + +extern const TErrorClass EMPTY_BATCH_REQUEST = { + "AWS.SimpleQueueService.EmptyBatchRequest", + 400, + "The batch request doesn't contain any entries." +}; + +extern const TErrorClass INVALID_BATCH_ENTRY_ID = { + "AWS.SimpleQueueService.InvalidBatchEntryId", + 400, + "The Id of a batch entry in a batch request doesn't abide by the specification." +}; + +extern const TErrorClass TOO_MANY_ENTRIES_IN_BATCH_REQUEST = { + "AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + 400, + "The batch request contains more entries than permissible." +}; + +extern const TErrorClass BATCH_REQUEST_TOO_LONG = { + "AWS.SimpleQueueService.BatchRequestTooLong", + 400, + "The length of all the messages put together is more than the limit." +}; + +extern const TErrorClass NON_EXISTENT_QUEUE = { + "AWS.SimpleQueueService.NonExistentQueue", + 400, + "The specified queue doesn't exist." +}; + +extern const TErrorClass OVER_LIMIT = { + "OverLimit", + 403, + "The specified action violates a limit. For example, ReceiveMessage returns this error if the maximum number of inflight messages is reached and AddPermission returns this error if the maximum number of permissions for the queue is reached." +}; + +extern const TErrorClass QUEUE_DELETED_RECENTLY = { + "AWS.SimpleQueueService.QueueDeletedRecently", + 400, + "You must wait some time after deleting a queue before you can create another queue with the same name." +}; + +extern const TErrorClass MESSAGE_NOT_INFLIGHT = { + "AWS.SimpleQueueService.MessageNotInflight", + 400, + "The specified message isn't in flight." +}; + +extern const TErrorClass RECEIPT_HANDLE_IS_INVALID = { + "ReceiptHandleIsInvalid", + 400, + "The specified receipt handle isn't valid." +}; + +extern const TErrorClass INVALID_ATTRIBUTE_NAME = { + "InvalidAttributeName", + 400, + "The specified attribute doesn't exist." +}; + extern const TErrorClass INVALID_ATTRIBUTE_VALUE = { "InvalidAttributeValue", 400, @@ -198,22 +198,22 @@ extern const TErrorClass INVALID_ATTRIBUTE_VALUE = { }; extern const TErrorClass LEADER_RESOLVING_ERROR = { - "InternalFailure", - 500, + "InternalFailure", + 500, "Queue leader resolving error." -}; - +}; + extern const TErrorClass LEADER_SESSION_ERROR = { - "InternalFailure", - 500, + "InternalFailure", + 500, "Queue leader session error." -}; - -extern const TErrorClass TIMEOUT = { - "InternalFailure", - 504, - "Timeout." -}; - -} // namespace NErrors -} // namespace NKikimr::NSQS +}; + +extern const TErrorClass TIMEOUT = { + "InternalFailure", + 504, + "Timeout." +}; + +} // namespace NErrors +} // namespace NKikimr::NSQS diff --git a/ydb/library/http_proxy/error/error.h b/ydb/library/http_proxy/error/error.h index 02b9dca3d97..f48cd3e63ba 100644 --- a/ydb/library/http_proxy/error/error.h +++ b/ydb/library/http_proxy/error/error.h @@ -1,97 +1,97 @@ -#pragma once - -#include <util/generic/hash_set.h> -#include <util/generic/string.h> -#include <util/generic/yexception.h> - -namespace NKikimr::NSQS { - -struct TErrorClass { - const TString ErrorCode; - const ui32 HttpStatusCode; - const TString DefaultMessage; - - TErrorClass(TString errorCode, ui32 httpStatusCode, TString defaultMessage); - TErrorClass() = delete; - TErrorClass(const TErrorClass&) = delete; - TErrorClass(TErrorClass&&) = delete; - - static const THashSet<TString>& GetAvailableErrorCodes() { - return RegisteredCodes; - } - -private: - static THashSet<TString> RegisteredCodes; -}; - -struct TSQSException: public yexception { - explicit TSQSException(const TErrorClass& errorClass); - TSQSException(); // NErrors::INTERNAL_FAILURE - - const TErrorClass& ErrorClass; -}; - -namespace NErrors { -// Common errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/CommonErrors.html -extern const TErrorClass ACCESS_DENIED; -extern const TErrorClass INCOMPLETE_SIGNATURE; -extern const TErrorClass INTERNAL_FAILURE; -extern const TErrorClass INVALID_ACTION; -extern const TErrorClass INVALID_CLIENT_TOKEN_ID; -extern const TErrorClass INVALID_PARAMETER_COMBINATION; -extern const TErrorClass INVALID_PARAMETER_VALUE; -extern const TErrorClass INVALID_QUERY_PARAMETER; -extern const TErrorClass MALFORMED_QUERY_STRING; -extern const TErrorClass MISSING_ACTION; -extern const TErrorClass MISSING_AUTENTICATION_TOKEN; -extern const TErrorClass MISSING_PARAMETER; -extern const TErrorClass OPT_IN_REQUIRED; -extern const TErrorClass REQUEST_EXPIRED; -extern const TErrorClass SERVICE_UNAVAILABLE; -extern const TErrorClass THROTTLING_EXCEPTION; -extern const TErrorClass VALIDATION_ERROR; - -// Batch requests errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessageBatch.html -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html -extern const TErrorClass BATCH_ENTRY_IDS_NOT_DISTINCT; -extern const TErrorClass EMPTY_BATCH_REQUEST; -extern const TErrorClass INVALID_BATCH_ENTRY_ID; -extern const TErrorClass TOO_MANY_ENTRIES_IN_BATCH_REQUEST; -extern const TErrorClass BATCH_REQUEST_TOO_LONG; - -// GetQueueUrl errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueUrl.html -extern const TErrorClass NON_EXISTENT_QUEUE; - -// ReceiveMessage errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html -extern const TErrorClass OVER_LIMIT; - -// CreateQueue errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html -extern const TErrorClass QUEUE_DELETED_RECENTLY; - -// ChangeMessageVisibility errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ChangeMessageVisibility.html -extern const TErrorClass MESSAGE_NOT_INFLIGHT; - -// DeleteMessage errors -// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessage.html -extern const TErrorClass RECEIPT_HANDLE_IS_INVALID; - -// GetQueueAttributes errors -// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html -extern const TErrorClass INVALID_ATTRIBUTE_NAME; +#pragma once + +#include <util/generic/hash_set.h> +#include <util/generic/string.h> +#include <util/generic/yexception.h> + +namespace NKikimr::NSQS { + +struct TErrorClass { + const TString ErrorCode; + const ui32 HttpStatusCode; + const TString DefaultMessage; + + TErrorClass(TString errorCode, ui32 httpStatusCode, TString defaultMessage); + TErrorClass() = delete; + TErrorClass(const TErrorClass&) = delete; + TErrorClass(TErrorClass&&) = delete; + + static const THashSet<TString>& GetAvailableErrorCodes() { + return RegisteredCodes; + } + +private: + static THashSet<TString> RegisteredCodes; +}; + +struct TSQSException: public yexception { + explicit TSQSException(const TErrorClass& errorClass); + TSQSException(); // NErrors::INTERNAL_FAILURE + + const TErrorClass& ErrorClass; +}; + +namespace NErrors { +// Common errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/CommonErrors.html +extern const TErrorClass ACCESS_DENIED; +extern const TErrorClass INCOMPLETE_SIGNATURE; +extern const TErrorClass INTERNAL_FAILURE; +extern const TErrorClass INVALID_ACTION; +extern const TErrorClass INVALID_CLIENT_TOKEN_ID; +extern const TErrorClass INVALID_PARAMETER_COMBINATION; +extern const TErrorClass INVALID_PARAMETER_VALUE; +extern const TErrorClass INVALID_QUERY_PARAMETER; +extern const TErrorClass MALFORMED_QUERY_STRING; +extern const TErrorClass MISSING_ACTION; +extern const TErrorClass MISSING_AUTENTICATION_TOKEN; +extern const TErrorClass MISSING_PARAMETER; +extern const TErrorClass OPT_IN_REQUIRED; +extern const TErrorClass REQUEST_EXPIRED; +extern const TErrorClass SERVICE_UNAVAILABLE; +extern const TErrorClass THROTTLING_EXCEPTION; +extern const TErrorClass VALIDATION_ERROR; + +// Batch requests errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessageBatch.html +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html +extern const TErrorClass BATCH_ENTRY_IDS_NOT_DISTINCT; +extern const TErrorClass EMPTY_BATCH_REQUEST; +extern const TErrorClass INVALID_BATCH_ENTRY_ID; +extern const TErrorClass TOO_MANY_ENTRIES_IN_BATCH_REQUEST; +extern const TErrorClass BATCH_REQUEST_TOO_LONG; + +// GetQueueUrl errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueUrl.html +extern const TErrorClass NON_EXISTENT_QUEUE; + +// ReceiveMessage errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html +extern const TErrorClass OVER_LIMIT; + +// CreateQueue errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html +extern const TErrorClass QUEUE_DELETED_RECENTLY; + +// ChangeMessageVisibility errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ChangeMessageVisibility.html +extern const TErrorClass MESSAGE_NOT_INFLIGHT; + +// DeleteMessage errors +// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessage.html +extern const TErrorClass RECEIPT_HANDLE_IS_INVALID; + +// GetQueueAttributes errors +// https://docs.aws.amazon.com/en_us/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html +extern const TErrorClass INVALID_ATTRIBUTE_NAME; extern const TErrorClass INVALID_ATTRIBUTE_VALUE; - + // Leader resolving errors extern const TErrorClass LEADER_RESOLVING_ERROR; extern const TErrorClass LEADER_SESSION_ERROR; - -// Internal timeout -extern const TErrorClass TIMEOUT; -} // namespace NErrors - -} // namespace NKikimr::NSQS + +// Internal timeout +extern const TErrorClass TIMEOUT; +} // namespace NErrors + +} // namespace NKikimr::NSQS diff --git a/ydb/library/persqueue/ya.make b/ydb/library/persqueue/ya.make index d28331e52e9..1598ed5d3f9 100644 --- a/ydb/library/persqueue/ya.make +++ b/ydb/library/persqueue/ya.make @@ -6,4 +6,4 @@ RECURSE( tests topic_parser topic_parser_public -) +) diff --git a/ydb/library/protobuf_printer/hide_field_printer.cpp b/ydb/library/protobuf_printer/hide_field_printer.cpp index 743701a31b3..029efb563d5 100644 --- a/ydb/library/protobuf_printer/hide_field_printer.cpp +++ b/ydb/library/protobuf_printer/hide_field_printer.cpp @@ -1,57 +1,57 @@ -#include "hide_field_printer.h" - -namespace NKikimr { - -void THideFieldValuePrinter::PrintBool(bool, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintInt32(i32, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintUInt32(ui32, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintInt64(i64, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintUInt64(ui64, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintFloat(float, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintDouble(double, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -void THideFieldValuePrinter::PrintString(const TString&, google::protobuf::TextFormat::BaseTextGenerator* generator) const { +#include "hide_field_printer.h" + +namespace NKikimr { + +void THideFieldValuePrinter::PrintBool(bool, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintInt32(i32, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintUInt32(ui32, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintInt64(i64, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintUInt64(ui64, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintFloat(float, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintDouble(double, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +void THideFieldValuePrinter::PrintString(const TString&, google::protobuf::TextFormat::BaseTextGenerator* generator) const { generator->PrintLiteral("\"***\""); -} - -void THideFieldValuePrinter::PrintBytes(const TString&, google::protobuf::TextFormat::BaseTextGenerator* generator) const { +} + +void THideFieldValuePrinter::PrintBytes(const TString&, google::protobuf::TextFormat::BaseTextGenerator* generator) const { generator->PrintLiteral("\"***\""); -} - -void THideFieldValuePrinter::PrintEnum(i32, const TString&, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); -} - -bool THideFieldValuePrinter::PrintMessageContent(const google::protobuf::Message&, int, - int, bool singleLineMode, - google::protobuf::TextFormat::BaseTextGenerator* generator) const { - generator->PrintLiteral("***"); - if (singleLineMode) { - generator->PrintLiteral(" "); - } else { - generator->PrintLiteral("\n"); - } - return true /* don't use default printing logic */; -} - -} // namespace NKikimr +} + +void THideFieldValuePrinter::PrintEnum(i32, const TString&, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); +} + +bool THideFieldValuePrinter::PrintMessageContent(const google::protobuf::Message&, int, + int, bool singleLineMode, + google::protobuf::TextFormat::BaseTextGenerator* generator) const { + generator->PrintLiteral("***"); + if (singleLineMode) { + generator->PrintLiteral(" "); + } else { + generator->PrintLiteral("\n"); + } + return true /* don't use default printing logic */; +} + +} // namespace NKikimr diff --git a/ydb/library/protobuf_printer/hide_field_printer.h b/ydb/library/protobuf_printer/hide_field_printer.h index 32d4c7b6b2e..1910d9ff55f 100644 --- a/ydb/library/protobuf_printer/hide_field_printer.h +++ b/ydb/library/protobuf_printer/hide_field_printer.h @@ -1,26 +1,26 @@ -#pragma once -#include <google/protobuf/text_format.h> - -namespace NKikimr { - -class THideFieldValuePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter { -public: - void PrintBool(bool val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintInt32(i32 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintUInt32(ui32 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintInt64(i64 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintUInt64(ui64 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintFloat(float val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintDouble(double val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintString(const TString& val, - google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintBytes(const TString& val, - google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - void PrintEnum(i32 val, const TString& name, - google::protobuf::TextFormat::BaseTextGenerator* generator) const override; - bool PrintMessageContent(const google::protobuf::Message& message, int fieldIndex, - int fieldCount, bool singleLineMode, - google::protobuf::TextFormat::BaseTextGenerator* generator) const override; -}; - -} // namespace NKikimr +#pragma once +#include <google/protobuf/text_format.h> + +namespace NKikimr { + +class THideFieldValuePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter { +public: + void PrintBool(bool val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintInt32(i32 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintUInt32(ui32 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintInt64(i64 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintUInt64(ui64 val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintFloat(float val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintDouble(double val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintString(const TString& val, + google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintBytes(const TString& val, + google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + void PrintEnum(i32 val, const TString& name, + google::protobuf::TextFormat::BaseTextGenerator* generator) const override; + bool PrintMessageContent(const google::protobuf::Message& message, int fieldIndex, + int fieldCount, bool singleLineMode, + google::protobuf::TextFormat::BaseTextGenerator* generator) const override; +}; + +} // namespace NKikimr diff --git a/ydb/library/protobuf_printer/protobuf_printer.h b/ydb/library/protobuf_printer/protobuf_printer.h index d9487c1cd31..6d094172200 100644 --- a/ydb/library/protobuf_printer/protobuf_printer.h +++ b/ydb/library/protobuf_printer/protobuf_printer.h @@ -1,30 +1,30 @@ -#pragma once -#include <google/protobuf/message.h> -#include <google/protobuf/text_format.h> - -namespace NKikimr { - -class TCustomizableTextFormatPrinter : public google::protobuf::TextFormat::Printer { -public: - template <class TPrinter> - bool RegisterFieldValuePrinters(const google::protobuf::Descriptor* desc, const char* name) { - const google::protobuf::FieldDescriptor* field = desc->FindFieldByName(name); - Y_ASSERT(field != nullptr); - return RegisterFieldValuePrinter(field, new TPrinter()); - } - - template <class TPrinter, class... T> - bool RegisterFieldValuePrinters(const google::protobuf::Descriptor* desc, const char* name, T... fieldNames) { - const bool firstRegister = RegisterFieldValuePrinters<TPrinter>(desc, name); - const bool otherRegisters = RegisterFieldValuePrinters<TPrinter>(desc, fieldNames...); - return firstRegister && otherRegisters; - } - - template <class TMsg, class TPrinter, class... T> - bool RegisterFieldValuePrinters(T... fieldNames) { - const google::protobuf::Descriptor* desc = TMsg::descriptor(); - return RegisterFieldValuePrinters<TPrinter>(desc, fieldNames...); - } -}; - -} // namespace NKikimr +#pragma once +#include <google/protobuf/message.h> +#include <google/protobuf/text_format.h> + +namespace NKikimr { + +class TCustomizableTextFormatPrinter : public google::protobuf::TextFormat::Printer { +public: + template <class TPrinter> + bool RegisterFieldValuePrinters(const google::protobuf::Descriptor* desc, const char* name) { + const google::protobuf::FieldDescriptor* field = desc->FindFieldByName(name); + Y_ASSERT(field != nullptr); + return RegisterFieldValuePrinter(field, new TPrinter()); + } + + template <class TPrinter, class... T> + bool RegisterFieldValuePrinters(const google::protobuf::Descriptor* desc, const char* name, T... fieldNames) { + const bool firstRegister = RegisterFieldValuePrinters<TPrinter>(desc, name); + const bool otherRegisters = RegisterFieldValuePrinters<TPrinter>(desc, fieldNames...); + return firstRegister && otherRegisters; + } + + template <class TMsg, class TPrinter, class... T> + bool RegisterFieldValuePrinters(T... fieldNames) { + const google::protobuf::Descriptor* desc = TMsg::descriptor(); + return RegisterFieldValuePrinters<TPrinter>(desc, fieldNames...); + } +}; + +} // namespace NKikimr diff --git a/ydb/library/protobuf_printer/protobuf_printer_ut.cpp b/ydb/library/protobuf_printer/protobuf_printer_ut.cpp index f71c330d46b..59a5873a601 100644 --- a/ydb/library/protobuf_printer/protobuf_printer_ut.cpp +++ b/ydb/library/protobuf_printer/protobuf_printer_ut.cpp @@ -1,66 +1,66 @@ -#include "hide_field_printer.h" -#include "protobuf_printer.h" +#include "hide_field_printer.h" +#include "protobuf_printer.h" #include "security_printer.h" -#include "stream_helper.h" -#include "token_field_printer.h" +#include "stream_helper.h" +#include "token_field_printer.h" #include <ydb/library/protobuf_printer/ut/test_proto.pb.h> - -#include <library/cpp/testing/unittest/registar.h> - -#include <util/string/builder.h> - -using namespace NKikimr; - -Y_UNIT_TEST_SUITE(PrinterWrapperTest) { - Y_UNIT_TEST(PrintsToStream) { - TStringBuilder s; - NTestProto::TTestProto m; - m.set_s1("xxx"); - s << TProtobufPrinterOutputWrapper(m, google::protobuf::TextFormat::Printer()); - UNIT_ASSERT_STRINGS_EQUAL(s, "s1: \"xxx\"\n"); - } - - Y_UNIT_TEST(PrintsToString) { - NTestProto::TTestProto m; - m.set_s1("xxx"); - const TString s = TProtobufPrinterOutputWrapper(m, google::protobuf::TextFormat::Printer()); - UNIT_ASSERT_STRINGS_EQUAL(s, "s1: \"xxx\"\n"); - } -} - -Y_UNIT_TEST_SUITE(TokenPrinterTest) { - Y_UNIT_TEST(PrintToken) { - NTestProto::TTestProto m; - m.set_token("123456789012345678901234567890"); - - TCustomizableTextFormatPrinter printer; - printer.RegisterFieldValuePrinters<NTestProto::TTestProto, TTokenFieldValuePrinter>("token"); - const TString s = TProtobufPrinterOutputWrapper(m, printer); - UNIT_ASSERT_STRINGS_EQUAL(s, "token: \"1234****7890 (F229119D)\"\n"); - } -} - -Y_UNIT_TEST_SUITE(HideFieldPrinterTest) { - Y_UNIT_TEST(PrintNoValue) { - NTestProto::TTestProto m; - m.set_s1("trololo"); - m.set_s2("trololo"); - m.mutable_msg()->set_i(42); - - TCustomizableTextFormatPrinter printer; - printer.RegisterFieldValuePrinters<NTestProto::TTestProto, THideFieldValuePrinter>("s1", "s2", "msg"); - { - const TString s = TProtobufPrinterOutputWrapper(m, printer); + +#include <library/cpp/testing/unittest/registar.h> + +#include <util/string/builder.h> + +using namespace NKikimr; + +Y_UNIT_TEST_SUITE(PrinterWrapperTest) { + Y_UNIT_TEST(PrintsToStream) { + TStringBuilder s; + NTestProto::TTestProto m; + m.set_s1("xxx"); + s << TProtobufPrinterOutputWrapper(m, google::protobuf::TextFormat::Printer()); + UNIT_ASSERT_STRINGS_EQUAL(s, "s1: \"xxx\"\n"); + } + + Y_UNIT_TEST(PrintsToString) { + NTestProto::TTestProto m; + m.set_s1("xxx"); + const TString s = TProtobufPrinterOutputWrapper(m, google::protobuf::TextFormat::Printer()); + UNIT_ASSERT_STRINGS_EQUAL(s, "s1: \"xxx\"\n"); + } +} + +Y_UNIT_TEST_SUITE(TokenPrinterTest) { + Y_UNIT_TEST(PrintToken) { + NTestProto::TTestProto m; + m.set_token("123456789012345678901234567890"); + + TCustomizableTextFormatPrinter printer; + printer.RegisterFieldValuePrinters<NTestProto::TTestProto, TTokenFieldValuePrinter>("token"); + const TString s = TProtobufPrinterOutputWrapper(m, printer); + UNIT_ASSERT_STRINGS_EQUAL(s, "token: \"1234****7890 (F229119D)\"\n"); + } +} + +Y_UNIT_TEST_SUITE(HideFieldPrinterTest) { + Y_UNIT_TEST(PrintNoValue) { + NTestProto::TTestProto m; + m.set_s1("trololo"); + m.set_s2("trololo"); + m.mutable_msg()->set_i(42); + + TCustomizableTextFormatPrinter printer; + printer.RegisterFieldValuePrinters<NTestProto::TTestProto, THideFieldValuePrinter>("s1", "s2", "msg"); + { + const TString s = TProtobufPrinterOutputWrapper(m, printer); UNIT_ASSERT_STRINGS_EQUAL(s, "s1: \"***\"\ns2: \"***\"\nmsg {\n ***\n}\n"); - } - - printer.SetSingleLineMode(true); - { - const TString s = TProtobufPrinterOutputWrapper(m, printer); + } + + printer.SetSingleLineMode(true); + { + const TString s = TProtobufPrinterOutputWrapper(m, printer); UNIT_ASSERT_STRINGS_EQUAL(s, "s1: \"***\" s2: \"***\" msg { *** } "); - } - } -} + } + } +} Y_UNIT_TEST_SUITE(SecurityPrinterTest) { Y_UNIT_TEST(PrintSensitive) { diff --git a/ydb/library/protobuf_printer/stream_helper.cpp b/ydb/library/protobuf_printer/stream_helper.cpp index a5414defc74..73aebffe2a6 100644 --- a/ydb/library/protobuf_printer/stream_helper.cpp +++ b/ydb/library/protobuf_printer/stream_helper.cpp @@ -1,25 +1,25 @@ -#include "stream_helper.h" - -#include <google/protobuf/messagext.h> - -namespace NKikimr { - -TProtobufPrinterOutputWrapper::operator TString() const { - TString string; - Printer.PrintToString(Msg, &string); - return string; -} - -void TProtobufPrinterOutputWrapper::Print(IOutputStream& o) const { - NProtoBuf::io::TCopyingOutputStreamAdaptor adaptor(&o); - if (!Printer.Print(Msg, &adaptor)) { - o << "Error printing message"; - } -} - -} // namespace NKikimr - -template<> -void Out<NKikimr::TProtobufPrinterOutputWrapper>(IOutputStream& o, typename TTypeTraits<NKikimr::TProtobufPrinterOutputWrapper>::TFuncParam x) { - return x.Print(o); -} +#include "stream_helper.h" + +#include <google/protobuf/messagext.h> + +namespace NKikimr { + +TProtobufPrinterOutputWrapper::operator TString() const { + TString string; + Printer.PrintToString(Msg, &string); + return string; +} + +void TProtobufPrinterOutputWrapper::Print(IOutputStream& o) const { + NProtoBuf::io::TCopyingOutputStreamAdaptor adaptor(&o); + if (!Printer.Print(Msg, &adaptor)) { + o << "Error printing message"; + } +} + +} // namespace NKikimr + +template<> +void Out<NKikimr::TProtobufPrinterOutputWrapper>(IOutputStream& o, typename TTypeTraits<NKikimr::TProtobufPrinterOutputWrapper>::TFuncParam x) { + return x.Print(o); +} diff --git a/ydb/library/protobuf_printer/stream_helper.h b/ydb/library/protobuf_printer/stream_helper.h index 1c67d9d84df..05c089b4181 100644 --- a/ydb/library/protobuf_printer/stream_helper.h +++ b/ydb/library/protobuf_printer/stream_helper.h @@ -1,27 +1,27 @@ -#pragma once -#include <util/generic/noncopyable.h> -#include <util/stream/output.h> - -#include <google/protobuf/message.h> -#include <google/protobuf/text_format.h> - -namespace NKikimr { - -class TProtobufPrinterOutputWrapper : public TMoveOnly { -public: - TProtobufPrinterOutputWrapper(const google::protobuf::Message& msg, const google::protobuf::TextFormat::Printer& printer) - : Msg(msg) - , Printer(printer) - { - } - - operator TString() const; - - void Print(IOutputStream& o) const; - -private: - const google::protobuf::Message& Msg; - const google::protobuf::TextFormat::Printer& Printer; -}; - -} // namespace NKikimr +#pragma once +#include <util/generic/noncopyable.h> +#include <util/stream/output.h> + +#include <google/protobuf/message.h> +#include <google/protobuf/text_format.h> + +namespace NKikimr { + +class TProtobufPrinterOutputWrapper : public TMoveOnly { +public: + TProtobufPrinterOutputWrapper(const google::protobuf::Message& msg, const google::protobuf::TextFormat::Printer& printer) + : Msg(msg) + , Printer(printer) + { + } + + operator TString() const; + + void Print(IOutputStream& o) const; + +private: + const google::protobuf::Message& Msg; + const google::protobuf::TextFormat::Printer& Printer; +}; + +} // namespace NKikimr diff --git a/ydb/library/protobuf_printer/token_field_printer.cpp b/ydb/library/protobuf_printer/token_field_printer.cpp index 3913c0ca190..d655828578c 100644 --- a/ydb/library/protobuf_printer/token_field_printer.cpp +++ b/ydb/library/protobuf_printer/token_field_printer.cpp @@ -1,14 +1,14 @@ -#include "token_field_printer.h" - +#include "token_field_printer.h" + #include <ydb/library/security/util.h> - -namespace NKikimr { - -void TTokenFieldValuePrinter::PrintString(const TProtoStringType& val, google::protobuf::TextFormat::BaseTextGenerator* generator) const { - const TString masked = MaskTicket(val); - generator->PrintLiteral("\""); - generator->Print(masked.data(), masked.size()); - generator->PrintLiteral("\""); -} - -} // namespace NKikimr + +namespace NKikimr { + +void TTokenFieldValuePrinter::PrintString(const TProtoStringType& val, google::protobuf::TextFormat::BaseTextGenerator* generator) const { + const TString masked = MaskTicket(val); + generator->PrintLiteral("\""); + generator->Print(masked.data(), masked.size()); + generator->PrintLiteral("\""); +} + +} // namespace NKikimr diff --git a/ydb/library/protobuf_printer/token_field_printer.h b/ydb/library/protobuf_printer/token_field_printer.h index 9e3ef5e3cfa..9ebdb58448d 100644 --- a/ydb/library/protobuf_printer/token_field_printer.h +++ b/ydb/library/protobuf_printer/token_field_printer.h @@ -1,11 +1,11 @@ -#pragma once -#include <google/protobuf/text_format.h> - -namespace NKikimr { - -class TTokenFieldValuePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter { -public: - void PrintString(const TProtoStringType& val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; -}; - -} // namespace NKikimr +#pragma once +#include <google/protobuf/text_format.h> + +namespace NKikimr { + +class TTokenFieldValuePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter { +public: + void PrintString(const TProtoStringType& val, google::protobuf::TextFormat::BaseTextGenerator* generator) const override; +}; + +} // namespace NKikimr diff --git a/ydb/library/protobuf_printer/ut/test_proto.proto b/ydb/library/protobuf_printer/ut/test_proto.proto index 80bf23c804c..91e3584f086 100644 --- a/ydb/library/protobuf_printer/ut/test_proto.proto +++ b/ydb/library/protobuf_printer/ut/test_proto.proto @@ -1,19 +1,19 @@ -syntax = "proto3"; - -package NTestProto; - +syntax = "proto3"; + +package NTestProto; + import "ydb/public/api/protos/annotations/sensitive.proto"; -message TTestSubProto { - uint64 i = 1; -} - -message TTestProto { - string s1 = 1; - string s2 = 2; - string token = 3; - TTestSubProto msg = 4; -} +message TTestSubProto { + uint64 i = 1; +} + +message TTestProto { + string s1 = 1; + string s2 = 2; + string token = 3; + TTestSubProto msg = 4; +} message TConnection1 { string database_id = 1; diff --git a/ydb/library/protobuf_printer/ut/ya.make b/ydb/library/protobuf_printer/ut/ya.make index 2c1576f2c62..e92fedf4cd0 100644 --- a/ydb/library/protobuf_printer/ut/ya.make +++ b/ydb/library/protobuf_printer/ut/ya.make @@ -1,10 +1,10 @@ UNITTEST_FOR(ydb/library/protobuf_printer) - -OWNER(g:kikimr) - -SRCS( - protobuf_printer_ut.cpp - test_proto.proto -) - -END() + +OWNER(g:kikimr) + +SRCS( + protobuf_printer_ut.cpp + test_proto.proto +) + +END() diff --git a/ydb/library/protobuf_printer/ya.make b/ydb/library/protobuf_printer/ya.make index 58eff7542a4..3bfe62f07b5 100644 --- a/ydb/library/protobuf_printer/ya.make +++ b/ydb/library/protobuf_printer/ya.make @@ -1,24 +1,24 @@ -LIBRARY() - -OWNER( - galaxycrab - g:kikimr -) - -SRCS( - hide_field_printer.cpp - stream_helper.cpp - token_field_printer.cpp -) - -PEERDIR( - contrib/libs/protobuf +LIBRARY() + +OWNER( + galaxycrab + g:kikimr +) + +SRCS( + hide_field_printer.cpp + stream_helper.cpp + token_field_printer.cpp +) + +PEERDIR( + contrib/libs/protobuf ydb/library/security ydb/public/api/protos/annotations -) - -END() - -RECURSE_FOR_TESTS( - ut -) +) + +END() + +RECURSE_FOR_TESTS( + ut +) diff --git a/ydb/library/yql/core/facade/yql_facade.cpp b/ydb/library/yql/core/facade/yql_facade.cpp index b74abc1c34c..3e353f9bdb4 100644 --- a/ydb/library/yql/core/facade/yql_facade.cpp +++ b/ydb/library/yql/core/facade/yql_facade.cpp @@ -1314,10 +1314,10 @@ TTypeAnnotationContextPtr TProgram::BuildTypeAnnotationContext(const TString& us resultProviderDataSources.push_back(TString(RtmrProviderName)); } - if (providerNames.contains(PqProviderName)) { - resultProviderDataSources.push_back(TString(PqProviderName)); - } - + if (providerNames.contains(PqProviderName)) { + resultProviderDataSources.push_back(TString(PqProviderName)); + } + if (providerNames.contains(DqProviderName)) { resultProviderDataSources.push_back(TString(DqProviderName)); } diff --git a/ydb/library/yql/core/services/yql_plan.cpp b/ydb/library/yql/core/services/yql_plan.cpp index 4d7cfffaf5a..90fe8d13db4 100644 --- a/ydb/library/yql/core/services/yql_plan.cpp +++ b/ydb/library/yql/core/services/yql_plan.cpp @@ -366,7 +366,7 @@ public: } else if (node->IsCallable("DqStage") || node->IsCallable("DqPhyStage") || - node->IsCallable("DqQuery!") || + node->IsCallable("DqQuery!") || node->ChildrenSize() >= 1 && node->Child(0)->IsCallable("TDqOutput")) { auto provider = Types_.DataSinkMap.FindPtr(DqProviderName); YQL_ENSURE(provider); diff --git a/ydb/library/yql/core/type_ann/type_ann_expr.cpp b/ydb/library/yql/core/type_ann/type_ann_expr.cpp index f67d3b41200..b637a805c79 100644 --- a/ydb/library/yql/core/type_ann/type_ann_expr.cpp +++ b/ydb/library/yql/core/type_ann/type_ann_expr.cpp @@ -519,7 +519,7 @@ private: if (it != Types.ExpectedTypes.end()) { YQL_ENSURE(IsSameAnnotation(*input.GetTypeAnn(), *it->second), "Rewrite error, type should be : " << - *it->second << ", but it is: " << *input.GetTypeAnn() << " for node " << input.Content()); + *it->second << ", but it is: " << *input.GetTypeAnn() << " for node " << input.Content()); } auto coIt = Types.ExpectedColumnOrders.find(input.UniqueId()); diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor.cpp b/ydb/library/yql/dq/actors/compute/dq_compute_actor.cpp index c2f6c45e2b5..a4dcb38bbc0 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor.cpp +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor.cpp @@ -30,10 +30,10 @@ class TDqComputeActor : public TDqComputeActorBase<TDqComputeActor> { using TBase = TDqComputeActorBase<TDqComputeActor>; public: - static constexpr char ActorName[] = "DQ_COMPUTE_ACTOR"; + static constexpr char ActorName[] = "DQ_COMPUTE_ACTOR"; TDqComputeActor(const TActorId& executerId, const TTxId& txId, NDqProto::TDqTask&& task, - IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, + IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, const TTaskRunnerFactory& taskRunnerFactory) : TBase(executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), settings, memoryLimits) @@ -67,8 +67,8 @@ private: IActor* CreateDqComputeActor(const TActorId& executerId, const TTxId& txId, NYql::NDqProto::TDqTask&& task, - IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, - const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, const TTaskRunnerFactory& taskRunnerFactory) + IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, + const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, const TTaskRunnerFactory& taskRunnerFactory) { return new TDqComputeActor(executerId, txId, std::move(task), std::move(sourceActorFactory), std::move(sinkActorFactory), settings, memoryLimits, taskRunnerFactory); diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor.h index 84eeda2edbf..8be5e408e74 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor.h @@ -68,12 +68,12 @@ struct TEvDqCompute { } }; - struct TEvNewCheckpointCoordinatorAck : public NActors::TEventPB<TEvNewCheckpointCoordinatorAck, - NDqProto::TEvNewCheckpointCoordinatorAck, TDqComputeEvents::EvNewCheckpointCoordinatorAck> { - - TEvNewCheckpointCoordinatorAck() = default; - }; - + struct TEvNewCheckpointCoordinatorAck : public NActors::TEventPB<TEvNewCheckpointCoordinatorAck, + NDqProto::TEvNewCheckpointCoordinatorAck, TDqComputeEvents::EvNewCheckpointCoordinatorAck> { + + TEvNewCheckpointCoordinatorAck() = default; + }; + struct TEvInjectCheckpoint : public NActors::TEventPB<TEvInjectCheckpoint, NDqProto::TEvInjectCheckpoint, TDqComputeEvents::EvInjectCheckpoint> { @@ -87,16 +87,16 @@ struct TEvDqCompute { }; struct TEvSaveTaskState : public NActors::TEventLocal<TEvSaveTaskState, TDqComputeEvents::EvSaveTaskState> { - TEvSaveTaskState(TString graphId, ui64 taskId, NDqProto::TCheckpoint checkpoint) + TEvSaveTaskState(TString graphId, ui64 taskId, NDqProto::TCheckpoint checkpoint) : GraphId(std::move(graphId)) , TaskId(taskId) , Checkpoint(std::move(checkpoint)) - {} + {} const TString GraphId; const ui64 TaskId; const NDqProto::TCheckpoint Checkpoint; - NDqProto::TComputeActorState State; + NDqProto::TComputeActorState State; }; struct TEvSaveTaskStateResult : public NActors::TEventPB<TEvSaveTaskStateResult, @@ -132,17 +132,17 @@ struct TEvDqCompute { TEvRestoreFromCheckpoint() = default; TEvRestoreFromCheckpoint(ui64 checkpointId, ui64 checkpointGeneration, ui64 coordinatorGeneration) { - Init(checkpointId, checkpointGeneration, coordinatorGeneration); - Record.MutableStateLoadPlan()->SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_OWN); // default - } - - TEvRestoreFromCheckpoint(ui64 checkpointId, ui64 checkpointGeneration, ui64 coordinatorGeneration, const NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan) { - Init(checkpointId, checkpointGeneration, coordinatorGeneration); - *Record.MutableStateLoadPlan() = taskPlan; - } - - private: - void Init(ui64 checkpointId, ui64 checkpointGeneration, ui64 coordinatorGeneration) { + Init(checkpointId, checkpointGeneration, coordinatorGeneration); + Record.MutableStateLoadPlan()->SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_OWN); // default + } + + TEvRestoreFromCheckpoint(ui64 checkpointId, ui64 checkpointGeneration, ui64 coordinatorGeneration, const NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan) { + Init(checkpointId, checkpointGeneration, coordinatorGeneration); + *Record.MutableStateLoadPlan() = taskPlan; + } + + private: + void Init(ui64 checkpointId, ui64 checkpointGeneration, ui64 coordinatorGeneration) { Record.MutableCheckpoint()->SetId(checkpointId); Record.MutableCheckpoint()->SetGeneration(checkpointGeneration); Record.SetGeneration(coordinatorGeneration); @@ -150,39 +150,39 @@ struct TEvDqCompute { }; struct TEvRestoreFromCheckpointResult : public NActors::TEventPB<TEvRestoreFromCheckpointResult, - NDqProto::TEvRestoreFromCheckpointResult, TDqComputeEvents::EvRestoreFromCheckpointResult> { - using TBaseEventPB = NActors::TEventPB<TEvRestoreFromCheckpointResult, NDqProto::TEvRestoreFromCheckpointResult, TDqComputeEvents::EvRestoreFromCheckpointResult>; - - using TBaseEventPB::TBaseEventPB; - - TEvRestoreFromCheckpointResult(const NDqProto::TCheckpoint& checkpoint, ui64 taskId, NDqProto::TEvRestoreFromCheckpointResult::ERestoreStatus status) { - Record.MutableCheckpoint()->CopyFrom(checkpoint); - Record.SetTaskId(taskId); - Record.SetStatus(status); - } - }; - + NDqProto::TEvRestoreFromCheckpointResult, TDqComputeEvents::EvRestoreFromCheckpointResult> { + using TBaseEventPB = NActors::TEventPB<TEvRestoreFromCheckpointResult, NDqProto::TEvRestoreFromCheckpointResult, TDqComputeEvents::EvRestoreFromCheckpointResult>; + + using TBaseEventPB::TBaseEventPB; + + TEvRestoreFromCheckpointResult(const NDqProto::TCheckpoint& checkpoint, ui64 taskId, NDqProto::TEvRestoreFromCheckpointResult::ERestoreStatus status) { + Record.MutableCheckpoint()->CopyFrom(checkpoint); + Record.SetTaskId(taskId); + Record.SetStatus(status); + } + }; + struct TEvGetTaskState : public NActors::TEventLocal<TEvGetTaskState, TDqComputeEvents::EvGetTaskState> { - TEvGetTaskState(TString graphId, const std::vector<ui64>& taskIds, NDqProto::TCheckpoint checkpoint, ui64 generation) + TEvGetTaskState(TString graphId, const std::vector<ui64>& taskIds, NDqProto::TCheckpoint checkpoint, ui64 generation) : GraphId(std::move(graphId)) - , TaskIds(taskIds) + , TaskIds(taskIds) , Checkpoint(std::move(checkpoint)) , Generation(generation) {} const TString GraphId; - const std::vector<ui64> TaskIds; + const std::vector<ui64> TaskIds; const NDqProto::TCheckpoint Checkpoint; const ui64 Generation; }; struct TEvGetTaskStateResult : public NActors::TEventLocal<TEvGetTaskStateResult, TDqComputeEvents::EvGetTaskStateResult> { - TEvGetTaskStateResult(NDqProto::TCheckpoint checkpoint, TIssues issues, ui64 generation) + TEvGetTaskStateResult(NDqProto::TCheckpoint checkpoint, TIssues issues, ui64 generation) : Checkpoint(std::move(checkpoint)) , Issues(std::move(issues)) , Generation(generation) {} const NDqProto::TCheckpoint Checkpoint; - std::vector<NDqProto::TComputeActorState> States; + std::vector<NDqProto::TComputeActorState> States; const TIssues Issues; const ui64 Generation; }; @@ -256,7 +256,7 @@ void FillTaskRunnerStats(ui64 taskId, ui32 stageId, const TDqTaskRunnerStats& ta NDqProto::TDqTaskStats* protoTask, bool withProfileStats); NActors::IActor* CreateDqComputeActor(const NActors::TActorId& executerId, const TTxId& txId, NDqProto::TDqTask&& task, - IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, + IDqSourceActorFactory::TPtr sourceActorFactory, IDqSinkActorFactory::TPtr sinkActorFactory, const TComputeRuntimeSettings& settings, const TComputeMemoryLimits& memoryLimits, const TTaskRunnerFactory& taskRunnerFactory); diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.cpp b/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.cpp index 1a1611ec81e..b74cc521ef3 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.cpp +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.cpp @@ -32,8 +32,8 @@ TString InFlightMessagesStr(const TCollection& inFlight) { } // anonymous namespace TDqComputeActorChannels::TDqComputeActorChannels(TActorId owner, const TTxId& txId, const NDqProto::TDqTask& task, - bool retryOnUndelivery, NDqProto::EDqStatsMode statsMode, ui64 channelBufferSize, ICallbacks* cbs, ui32 actorActivityType) - : TActor(&TDqComputeActorChannels::WorkState, actorActivityType) + bool retryOnUndelivery, NDqProto::EDqStatsMode statsMode, ui64 channelBufferSize, ICallbacks* cbs, ui32 actorActivityType) + : TActor(&TDqComputeActorChannels::WorkState, actorActivityType) , Owner(owner) , TxId(txId) , TaskId(task.GetId()) @@ -102,7 +102,7 @@ void TDqComputeActorChannels::HandleWork(TEvDqCompute::TEvChannelData::TPtr& ev) << ", seqNo: " << record.GetSeqNo() << ", size: " << channelData.GetData().GetRaw().size() << ", rows: " << channelData.GetData().GetRows() - << ", checkpoint: " << channelData.HasCheckpoint() + << ", checkpoint: " << channelData.HasCheckpoint() << ", finished: " << channelData.GetFinished() << ", from: " << ev->Sender << ", expected seqNo: " << (inputChannel.LastRecvSeqNo + 1)); @@ -178,12 +178,12 @@ void TDqComputeActorChannels::HandleWork(TEvDqCompute::TEvChannelDataAck::TPtr& // remove all messages with seqNo <= ackSeqNo auto it = outputChannel.InFlight.begin(); while (it != outputChannel.InFlight.end() && it->first <= record.GetSeqNo()) { - Y_VERIFY_DEBUG(outputChannel.PeerState.InFlightBytes >= it->second.Data.GetData().GetRaw().size()); - Y_VERIFY_DEBUG(outputChannel.PeerState.InFlightRows >= it->second.Data.GetData().GetRows()); + Y_VERIFY_DEBUG(outputChannel.PeerState.InFlightBytes >= it->second.Data.GetData().GetRaw().size()); + Y_VERIFY_DEBUG(outputChannel.PeerState.InFlightRows >= it->second.Data.GetData().GetRows()); Y_VERIFY_DEBUG(outputChannel.PeerState.InFlightCount >= 1); - outputChannel.PeerState.InFlightBytes -= it->second.Data.GetData().GetRaw().size(); - outputChannel.PeerState.InFlightRows -= it->second.Data.GetData().GetRows(); + outputChannel.PeerState.InFlightBytes -= it->second.Data.GetData().GetRaw().size(); + outputChannel.PeerState.InFlightRows -= it->second.Data.GetData().GetRows(); outputChannel.PeerState.InFlightCount -= 1; it = outputChannel.InFlight.erase(it); } @@ -248,7 +248,7 @@ void TDqComputeActorChannels::HandleWork(TEvDqCompute::TEvRetryChannelData::TPtr retryEv->Record.SetSendTime(now.MilliSeconds()); auto* data = retryEv->Record.MutableChannelData(); - data->CopyFrom(inFlight.second.Data); + data->CopyFrom(inFlight.second.Data); data->SetChannelId(msg->ChannelId); data->SetFinished(inFlight.second.Finished); @@ -385,7 +385,7 @@ void TDqComputeActorChannels::HandleUndeliveredEvChannelData(ui64 channelId, TEv TOutputChannelState& outputChannel = OutCh(channelId); - if (outputChannel.Finished && outputChannel.EarlyFinish && !SupportCheckpoints) { + if (outputChannel.Finished && outputChannel.EarlyFinish && !SupportCheckpoints) { LOG_E("Ignore undelivered TEvChannelData event due to early finish, channelId: " << channelId); outputChannel.InFlight.clear(); Cbs->ResumeExecution(); @@ -409,7 +409,7 @@ void TDqComputeActorChannels::HandleUndeliveredEvChannelDataAck(ui64 channelId, TInputChannelState& inputChannel = InCh(channelId); inputChannel.PollRequest.reset(); - if (inputChannel.Finished && !SupportCheckpoints) { + if (inputChannel.Finished && !SupportCheckpoints) { LOG_I("Handle undelivered event: TEvChannelDataAck, channelId: " << channelId << ", reason: " << reason << ". Ignore, channel is finished."); inputChannel.InFlight.clear(); @@ -510,13 +510,13 @@ void TDqComputeActorChannels::SetOutputChannelPeer(ui64 channelId, const TActorI bool TDqComputeActorChannels::CanSendChannelData(ui64 channelId) { TOutputChannelState& outputChannel = OutCh(channelId); - return outputChannel.Peer && (!outputChannel.Finished || SupportCheckpoints) && !outputChannel.RetryState; + return outputChannel.Peer && (!outputChannel.Finished || SupportCheckpoints) && !outputChannel.RetryState; } void TDqComputeActorChannels::SendChannelData(NDqProto::TChannelData&& channelData) { TOutputChannelState& outputChannel = OutCh(channelData.GetChannelId()); - YQL_ENSURE(!outputChannel.Finished || SupportCheckpoints); + YQL_ENSURE(!outputChannel.Finished || SupportCheckpoints); YQL_ENSURE(!outputChannel.RetryState); ui64 seqNo = ++outputChannel.LastSentSeqNo; @@ -528,7 +528,7 @@ void TDqComputeActorChannels::SendChannelData(NDqProto::TChannelData&& channelDa << ", peer: " << *outputChannel.Peer << ", rows: " << chunkRows << ", bytes: " << chunkBytes - << ", checkpoint: " << channelData.HasCheckpoint() + << ", checkpoint: " << channelData.HasCheckpoint() << ", seqNo: " << seqNo << ", finished: " << finished); @@ -541,7 +541,7 @@ void TDqComputeActorChannels::SendChannelData(NDqProto::TChannelData&& channelDa seqNo, TOutputChannelState::TInFlightMessage( seqNo, - NYql::NDqProto::TChannelData(dataEv->Record.GetChannelData()), + NYql::NDqProto::TChannelData(dataEv->Record.GetChannelData()), finished ) ); @@ -558,7 +558,7 @@ void TDqComputeActorChannels::SendChannelData(NDqProto::TChannelData&& channelDa bool TDqComputeActorChannels::PollChannel(ui64 channelId, i64 freeSpace) { TInputChannelState& inputChannel = InCh(channelId); - if (!inputChannel.Peer || (inputChannel.Finished && !SupportCheckpoints) || inputChannel.RetryState || + if (!inputChannel.Peer || (inputChannel.Finished && !SupportCheckpoints) || inputChannel.RetryState || inputChannel.LastRecvSeqNo == 0 || freeSpace <= 0) { LOG_D("no poll, channelId: " << channelId << ", hasPeer: " << inputChannel.Peer.has_value() @@ -663,7 +663,7 @@ bool TDqComputeActorChannels::FinishInputChannels() { ackEv->Record.SetFreeSpace(0); ackEv->Record.SetFinish(true); - ui32 flags = (inputChannel.Finished && !SupportCheckpoints) ? 0 : CalcMessageFlags(*inputChannel.Peer); + ui32 flags = (inputChannel.Finished && !SupportCheckpoints) ? 0 : CalcMessageFlags(*inputChannel.Peer); Send(*inputChannel.Peer, ackEv.Release(), flags, /* cookie */ inputChannel.ChannelId); } @@ -710,7 +710,7 @@ void TDqComputeActorChannels::SendChannelDataAck(TInputChannelState& inputChanne ackEv->Record.SetChannelId(inputChannel.ChannelId); ackEv->Record.SetFreeSpace(freeSpace); - ui32 flags = (inputChannel.Finished && !SupportCheckpoints) ? 0 : CalcMessageFlags(*inputChannel.Peer); + ui32 flags = (inputChannel.Finished && !SupportCheckpoints) ? 0 : CalcMessageFlags(*inputChannel.Peer); Send(*inputChannel.Peer, ackEv.Release(), flags, /* cookie */ inputChannel.ChannelId); } @@ -734,8 +734,8 @@ TDqComputeActorChannels::TOutputChannelState& TDqComputeActorChannels::OutCh(ui6 return *ch; } -void TDqComputeActorChannels::SetCheckpointsSupport() { - SupportCheckpoints = true; -} - +void TDqComputeActorChannels::SetCheckpointsSupport() { + SupportCheckpoints = true; +} + } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.h index 648b1d8b563..44863291dca 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_channels.h @@ -24,8 +24,8 @@ public: virtual void TakeInputChannelData(NDqProto::TChannelData&& channelData, bool ack) = 0; virtual void PeerFinished(ui64 channelId) = 0; virtual void ResumeExecution() = 0; - - virtual ~ICallbacks() = default; + + virtual ~ICallbacks() = default; }; struct TInputChannelStats { @@ -40,7 +40,7 @@ public: public: TDqComputeActorChannels(NActors::TActorId owner, const TTxId& txId, const NYql::NDqProto::TDqTask& task, bool retryOnUndelivery, - NDqProto::EDqStatsMode statsMode, ui64 channelBufferSize, ICallbacks* cbs, ui32 actorActivityType); + NDqProto::EDqStatsMode statsMode, ui64 channelBufferSize, ICallbacks* cbs, ui32 actorActivityType); private: STATEFN(WorkState); @@ -66,7 +66,7 @@ private: void PassAway() override; public: - void SetCheckpointsSupport(); // Finished channels will be polled for checkpoints. + void SetCheckpointsSupport(); // Finished channels will be polled for checkpoints. void SetInputChannelPeer(ui64 channelId, const NActors::TActorId& peer); void SetOutputChannelPeer(ui64 channelId, const NActors::TActorId& peer); bool CanSendChannelData(ui64 channelId); @@ -147,13 +147,13 @@ private: bool EarlyFinish = false; struct TInFlightMessage { - TInFlightMessage(ui64 seqNo, NYql::NDqProto::TChannelData&& data, bool finished) + TInFlightMessage(ui64 seqNo, NYql::NDqProto::TChannelData&& data, bool finished) : SeqNo(seqNo) , Data(std::move(data)) , Finished(finished) {} const ui64 SeqNo; - const NYql::NDqProto::TChannelData Data; + const NYql::NDqProto::TChannelData Data; const bool Finished; }; TMap<ui64, TInFlightMessage> InFlight; @@ -178,7 +178,7 @@ private: const TTxId TxId; const ui64 TaskId; const bool RetryOnUndelivery; - bool SupportCheckpoints = false; + bool SupportCheckpoints = false; ICallbacks* const Cbs; THashSet<ui32> TrackingNodes; THashMap<ui64, TInputChannelState> InputChannelsMap; diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.cpp b/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.cpp index 9d943cbca3d..58923faa334 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.cpp +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.cpp @@ -1,198 +1,198 @@ #include "dq_compute_actor_checkpoints.h" #include "dq_checkpoints.h" -#include "dq_compute_actor_impl.h" +#include "dq_compute_actor_impl.h" #include <ydb/library/yql/minikql/comp_nodes/mkql_saveload.h> - -#include <algorithm> + +#include <algorithm> #define LOG_D(s) \ - LOG_DEBUG_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) + LOG_DEBUG_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) #define LOG_I(s) \ - LOG_INFO_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) + LOG_INFO_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) #define LOG_W(s) \ - LOG_WARN_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) + LOG_WARN_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) #define LOG_E(s) \ - LOG_ERROR_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) + LOG_ERROR_S(*NActors::TlsActivationContext, NKikimrServices::KQP_COMPUTE, "[" << GraphId << "] Task: " << Task.GetId() << ". " << s) #define LOG_CP_D(s) \ - LOG_D("[Checkpoint " << MakeStringForLog(*PendingCheckpoint.Checkpoint) << "] " << s) + LOG_D("[Checkpoint " << MakeStringForLog(*PendingCheckpoint.Checkpoint) << "] " << s) #define LOG_CP_I(s) \ - LOG_I("[Checkpoint " << MakeStringForLog(*PendingCheckpoint.Checkpoint) << "] " << s) + LOG_I("[Checkpoint " << MakeStringForLog(*PendingCheckpoint.Checkpoint) << "] " << s) #define LOG_CP_E(s) \ - LOG_E("[Checkpoint " << MakeStringForLog(*PendingCheckpoint.Checkpoint) << "] " << s) + LOG_E("[Checkpoint " << MakeStringForLog(*PendingCheckpoint.Checkpoint) << "] " << s) namespace NYql::NDq { using namespace NActors; -namespace { - -TString MakeStringForLog(const NDqProto::TCheckpoint& checkpoint) { - return TStringBuilder() << checkpoint.GetGeneration() << "." << checkpoint.GetId(); -} - -bool IsIngressTask(const NDqProto::TDqTask& task) { - for (const auto& input : task.GetInputs()) { - if (!input.HasSource()) { - return false; - } - } - return true; -} - -std::vector<ui64> TaskIdsFromLoadPlan(const NDqProto::NDqStateLoadPlan::TTaskPlan& plan) { - std::vector<ui64> taskIds; - for (const auto& sourcePlan : plan.GetSources()) { - if (sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { - for (const auto& foreignTaskSource : sourcePlan.GetForeignTasksSources()) { - taskIds.push_back(foreignTaskSource.GetTaskId()); - } - } - } - std::sort(taskIds.begin(), taskIds.end()); - taskIds.erase(std::unique(taskIds.begin(), taskIds.end()), taskIds.end()); - return taskIds; -} - -const NDqProto::TSourceState& FindSourceState( - const NDqProto::NDqStateLoadPlan::TSourcePlan::TForeignTaskSource& foreignTaskSource, - const std::vector<NDqProto::TComputeActorState>& states, - const std::vector<ui64>& taskIds) -{ - // Find state index - const auto stateIndexIt = std::lower_bound(taskIds.begin(), taskIds.end(), foreignTaskSource.GetTaskId()); - YQL_ENSURE(stateIndexIt != taskIds.end(), "Task id was not found in plan"); - const size_t stateIndex = std::distance(taskIds.begin(), stateIndexIt); - const NDqProto::TComputeActorState& state = states[stateIndex]; - for (const NDqProto::TSourceState& sourceState : state.GetSources()) { - if (sourceState.GetInputIndex() == foreignTaskSource.GetInputIndex()) { - return sourceState; - } - } - YQL_ENSURE(false, "Source input index " << foreignTaskSource.GetInputIndex() << " was not found in state"); - // Make compiler happy - return state.GetSources(0); -} - -NDqProto::TComputeActorState CombineForeignState( - const NDqProto::NDqStateLoadPlan::TTaskPlan& plan, - const std::vector<NDqProto::TComputeActorState>& states, - const std::vector<ui64>& taskIds) -{ - NDqProto::TComputeActorState state; - state.MutableMiniKqlProgram()->MutableData()->MutableStateData()->SetVersion(ComputeActorCurrentStateVersion); - YQL_ENSURE(plan.GetProgram().GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Unsupported program state type. Plan: " << plan); - for (const auto& sinkPlan : plan.GetSinks()) { - YQL_ENSURE(sinkPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Unsupported sink state type. Plan: " << sinkPlan); - } - for (const auto& sourcePlan : plan.GetSources()) { - YQL_ENSURE(sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY || sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, "Unsupported sink state type. Plan: " << sourcePlan); - if (sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { - auto& sourceState = *state.AddSources(); - sourceState.SetInputIndex(sourcePlan.GetInputIndex()); - for (const auto& foreignTaskSource : sourcePlan.GetForeignTasksSources()) { - const NDqProto::TSourceState& srcSourceState = FindSourceState(foreignTaskSource, states, taskIds); - for (const NDqProto::TStateData& data : srcSourceState.GetData()) { - sourceState.AddData()->CopyFrom(data); - } - } - YQL_ENSURE(sourceState.DataSize(), "No data was loaded to source " << sourcePlan.GetInputIndex()); - } - } - return state; -} - -} // namespace - -TDqComputeActorCheckpoints::TDqComputeActorCheckpoints(const TTxId& txId, NDqProto::TDqTask task, ICallbacks* computeActor) +namespace { + +TString MakeStringForLog(const NDqProto::TCheckpoint& checkpoint) { + return TStringBuilder() << checkpoint.GetGeneration() << "." << checkpoint.GetId(); +} + +bool IsIngressTask(const NDqProto::TDqTask& task) { + for (const auto& input : task.GetInputs()) { + if (!input.HasSource()) { + return false; + } + } + return true; +} + +std::vector<ui64> TaskIdsFromLoadPlan(const NDqProto::NDqStateLoadPlan::TTaskPlan& plan) { + std::vector<ui64> taskIds; + for (const auto& sourcePlan : plan.GetSources()) { + if (sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { + for (const auto& foreignTaskSource : sourcePlan.GetForeignTasksSources()) { + taskIds.push_back(foreignTaskSource.GetTaskId()); + } + } + } + std::sort(taskIds.begin(), taskIds.end()); + taskIds.erase(std::unique(taskIds.begin(), taskIds.end()), taskIds.end()); + return taskIds; +} + +const NDqProto::TSourceState& FindSourceState( + const NDqProto::NDqStateLoadPlan::TSourcePlan::TForeignTaskSource& foreignTaskSource, + const std::vector<NDqProto::TComputeActorState>& states, + const std::vector<ui64>& taskIds) +{ + // Find state index + const auto stateIndexIt = std::lower_bound(taskIds.begin(), taskIds.end(), foreignTaskSource.GetTaskId()); + YQL_ENSURE(stateIndexIt != taskIds.end(), "Task id was not found in plan"); + const size_t stateIndex = std::distance(taskIds.begin(), stateIndexIt); + const NDqProto::TComputeActorState& state = states[stateIndex]; + for (const NDqProto::TSourceState& sourceState : state.GetSources()) { + if (sourceState.GetInputIndex() == foreignTaskSource.GetInputIndex()) { + return sourceState; + } + } + YQL_ENSURE(false, "Source input index " << foreignTaskSource.GetInputIndex() << " was not found in state"); + // Make compiler happy + return state.GetSources(0); +} + +NDqProto::TComputeActorState CombineForeignState( + const NDqProto::NDqStateLoadPlan::TTaskPlan& plan, + const std::vector<NDqProto::TComputeActorState>& states, + const std::vector<ui64>& taskIds) +{ + NDqProto::TComputeActorState state; + state.MutableMiniKqlProgram()->MutableData()->MutableStateData()->SetVersion(ComputeActorCurrentStateVersion); + YQL_ENSURE(plan.GetProgram().GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Unsupported program state type. Plan: " << plan); + for (const auto& sinkPlan : plan.GetSinks()) { + YQL_ENSURE(sinkPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Unsupported sink state type. Plan: " << sinkPlan); + } + for (const auto& sourcePlan : plan.GetSources()) { + YQL_ENSURE(sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY || sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, "Unsupported sink state type. Plan: " << sourcePlan); + if (sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { + auto& sourceState = *state.AddSources(); + sourceState.SetInputIndex(sourcePlan.GetInputIndex()); + for (const auto& foreignTaskSource : sourcePlan.GetForeignTasksSources()) { + const NDqProto::TSourceState& srcSourceState = FindSourceState(foreignTaskSource, states, taskIds); + for (const NDqProto::TStateData& data : srcSourceState.GetData()) { + sourceState.AddData()->CopyFrom(data); + } + } + YQL_ENSURE(sourceState.DataSize(), "No data was loaded to source " << sourcePlan.GetInputIndex()); + } + } + return state; +} + +} // namespace + +TDqComputeActorCheckpoints::TDqComputeActorCheckpoints(const TTxId& txId, NDqProto::TDqTask task, ICallbacks* computeActor) : TActor(&TDqComputeActorCheckpoints::StateFunc) - , TxId(txId) + , TxId(txId) , Task(std::move(task)) - , IngressTask(IsIngressTask(Task)) + , IngressTask(IsIngressTask(Task)) , CheckpointStorage(MakeCheckpointStorageID()) - , ComputeActor(computeActor) - , PendingCheckpoint(Task) -{ -} - -void TDqComputeActorCheckpoints::Init(NActors::TActorId computeActorId, NActors::TActorId checkpointsId) { - EventsQueue.Init(TxId, computeActorId, checkpointsId); -} - -STRICT_STFUNC(TDqComputeActorCheckpoints::StateFunc, - hFunc(TEvDqCompute::TEvNewCheckpointCoordinator, Handle); - hFunc(TEvDqCompute::TEvInjectCheckpoint, Handle); - hFunc(TEvDqCompute::TEvSaveTaskStateResult, Handle); - hFunc(TEvDqCompute::TEvCommitState, Handle); - hFunc(TEvDqCompute::TEvRestoreFromCheckpoint, Handle); - hFunc(TEvDqCompute::TEvGetTaskStateResult, Handle); - hFunc(TEvDqCompute::TEvRun, Handle); - hFunc(NActors::TEvInterconnect::TEvNodeDisconnected, Handle); - hFunc(NActors::TEvInterconnect::TEvNodeConnected, Handle); - hFunc(TEvRetryQueuePrivate::TEvRetry, Handle); - cFunc(TEvents::TEvPoisonPill::EventType, PassAway); -) - -namespace { - -// Get generation for protobuf event. + , ComputeActor(computeActor) + , PendingCheckpoint(Task) +{ +} + +void TDqComputeActorCheckpoints::Init(NActors::TActorId computeActorId, NActors::TActorId checkpointsId) { + EventsQueue.Init(TxId, computeActorId, checkpointsId); +} + +STRICT_STFUNC(TDqComputeActorCheckpoints::StateFunc, + hFunc(TEvDqCompute::TEvNewCheckpointCoordinator, Handle); + hFunc(TEvDqCompute::TEvInjectCheckpoint, Handle); + hFunc(TEvDqCompute::TEvSaveTaskStateResult, Handle); + hFunc(TEvDqCompute::TEvCommitState, Handle); + hFunc(TEvDqCompute::TEvRestoreFromCheckpoint, Handle); + hFunc(TEvDqCompute::TEvGetTaskStateResult, Handle); + hFunc(TEvDqCompute::TEvRun, Handle); + hFunc(NActors::TEvInterconnect::TEvNodeDisconnected, Handle); + hFunc(NActors::TEvInterconnect::TEvNodeConnected, Handle); + hFunc(TEvRetryQueuePrivate::TEvRetry, Handle); + cFunc(TEvents::TEvPoisonPill::EventType, PassAway); +) + +namespace { + +// Get generation for protobuf event. +template <class E> +auto GetGeneration(const E& ev) -> decltype(ev->Get()->Record.GetGeneration()) { + return ev->Get()->Record.GetGeneration(); +} + +// Get generation for local event. template <class E> -auto GetGeneration(const E& ev) -> decltype(ev->Get()->Record.GetGeneration()) { - return ev->Get()->Record.GetGeneration(); -} - -// Get generation for local event. -template <class E> -auto GetGeneration(const E& ev) -> decltype(ev->Get()->Generation) { - return ev->Get()->Generation; -} +auto GetGeneration(const E& ev) -> decltype(ev->Get()->Generation) { + return ev->Get()->Generation; +} -ui64 GetGeneration(const TEvDqCompute::TEvSaveTaskStateResult::TPtr& ev) { - return ev->Get()->Record.GetCheckpoint().GetGeneration(); +ui64 GetGeneration(const TEvDqCompute::TEvSaveTaskStateResult::TPtr& ev) { + return ev->Get()->Record.GetCheckpoint().GetGeneration(); } -} // anonymous namespace - +} // anonymous namespace + template <class E> -bool TDqComputeActorCheckpoints::ShouldIgnoreOldCoordinator(const E& ev, bool verifyOnGenerationFromFuture) { - const ui64 generation = GetGeneration(ev); - Y_VERIFY(!verifyOnGenerationFromFuture || !CheckpointCoordinator || generation <= CheckpointCoordinator->Generation, - "Got incorrect checkpoint coordinator generation: %lu > %lu", generation, CheckpointCoordinator->Generation); - if (CheckpointCoordinator && generation < CheckpointCoordinator->Generation) { - LOG_D("Ignoring event " << ev->Get()->ToStringHeader() << " from previous coordinator: " - << generation << " < " << CheckpointCoordinator->Generation); - return true; - } - return false; +bool TDqComputeActorCheckpoints::ShouldIgnoreOldCoordinator(const E& ev, bool verifyOnGenerationFromFuture) { + const ui64 generation = GetGeneration(ev); + Y_VERIFY(!verifyOnGenerationFromFuture || !CheckpointCoordinator || generation <= CheckpointCoordinator->Generation, + "Got incorrect checkpoint coordinator generation: %lu > %lu", generation, CheckpointCoordinator->Generation); + if (CheckpointCoordinator && generation < CheckpointCoordinator->Generation) { + LOG_D("Ignoring event " << ev->Get()->ToStringHeader() << " from previous coordinator: " + << generation << " < " << CheckpointCoordinator->Generation); + return true; + } + return false; } void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvNewCheckpointCoordinator::TPtr& ev) { - if (ShouldIgnoreOldCoordinator(ev, false)) { - return; - } - const ui64 newGeneration = ev->Get()->Record.GetGeneration(); + if (ShouldIgnoreOldCoordinator(ev, false)) { + return; + } + const ui64 newGeneration = ev->Get()->Record.GetGeneration(); LOG_I("Got TEvNewCheckpointCoordinator event: generation " << newGeneration << ", actorId: " << ev->Sender); - if (CheckpointCoordinator && CheckpointCoordinator->Generation == newGeneration) { // The same message. It was retry from coordinator. + if (CheckpointCoordinator && CheckpointCoordinator->Generation == newGeneration) { // The same message. It was retry from coordinator. Y_VERIFY(CheckpointCoordinator->ActorId == ev->Sender, "there shouldn't be two different checkpoint coordinators with the same generation"); - Y_VERIFY(GraphId == ev->Get()->Record.GetGraphId()); + Y_VERIFY(GraphId == ev->Get()->Record.GetGraphId()); return; } - if (CheckpointCoordinator) { - LOG_I("Replace stale checkpoint coordinator (generation = " << CheckpointCoordinator->Generation << ") with a new one"); - } else { - LOG_I("Assign checkpoint coordinator (generation = " << newGeneration << ")"); - } - + if (CheckpointCoordinator) { + LOG_I("Replace stale checkpoint coordinator (generation = " << CheckpointCoordinator->Generation << ") with a new one"); + } else { + LOG_I("Assign checkpoint coordinator (generation = " << newGeneration << ")"); + } + CheckpointCoordinator = TCheckpointCoordinatorId(ev->Sender, newGeneration); - GraphId = ev->Get()->Record.GetGraphId(); - - EventsQueue.OnNewRecipientId(ev->Sender); - Y_VERIFY(EventsQueue.OnEventReceived(ev->Get())); - EventsQueue.Send(new TEvDqCompute::TEvNewCheckpointCoordinatorAck()); + GraphId = ev->Get()->Record.GetGraphId(); + EventsQueue.OnNewRecipientId(ev->Sender); + Y_VERIFY(EventsQueue.OnEventReceived(ev->Get())); + EventsQueue.Send(new TEvDqCompute::TEvNewCheckpointCoordinatorAck()); + if (PendingCheckpoint) { LOG_I("Drop pending checkpoint since coordinator is stale"); PendingCheckpoint.Clear(); @@ -201,11 +201,11 @@ void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvNewCheckpointCoordinato } void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvInjectCheckpoint::TPtr& ev) { - if (ShouldIgnoreOldCoordinator(ev)) { - return; - } - - YQL_ENSURE(IngressTask, "Shouldn't inject barriers into non-ingress tasks"); + if (ShouldIgnoreOldCoordinator(ev)) { + return; + } + + YQL_ENSURE(IngressTask, "Shouldn't inject barriers into non-ingress tasks"); YQL_ENSURE(!PendingCheckpoint); PendingCheckpoint = ev->Get()->Record.GetCheckpoint(); @@ -214,133 +214,133 @@ void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvInjectCheckpoint::TPtr& } void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvSaveTaskStateResult::TPtr& ev) { - if (ShouldIgnoreOldCoordinator(ev)) { + if (ShouldIgnoreOldCoordinator(ev)) { return; } - - EventsQueue.Send(ev->Release().Release(), ev->Cookie); + + EventsQueue.Send(ev->Release().Release(), ev->Cookie); } void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvRestoreFromCheckpoint::TPtr& ev) { - if (ShouldIgnoreOldCoordinator(ev)) { - return; - } - - if (!EventsQueue.OnEventReceived(ev)) { + if (ShouldIgnoreOldCoordinator(ev)) { return; } + if (!EventsQueue.OnEventReceived(ev)) { + return; + } + ComputeActor->Stop(); - TaskLoadPlan = ev->Get()->Record.GetStateLoadPlan(); - const auto& checkpoint = ev->Get()->Record.GetCheckpoint(); - LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Got TEvRestoreFromCheckpoint event with plan " << TaskLoadPlan); - switch (TaskLoadPlan.GetStateType()) { - case NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY: - { - LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Restored from empty state"); - EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::OK)); - break; - } - case NDqProto::NDqStateLoadPlan::STATE_TYPE_OWN: - { - Send( - CheckpointStorage, - new TEvDqCompute::TEvGetTaskState( - GraphId, - {Task.GetId()}, - ev->Get()->Record.GetCheckpoint(), - CheckpointCoordinator->Generation)); - break; - } - case NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN: - { - Send( - CheckpointStorage, - new TEvDqCompute::TEvGetTaskState( - GraphId, - TaskIdsFromLoadPlan(TaskLoadPlan), - ev->Get()->Record.GetCheckpoint(), - CheckpointCoordinator->Generation)); - break; - } - default: - { - LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) << "] Unsupported state type: " - << NDqProto::NDqStateLoadPlan::EStateType_Name(TaskLoadPlan.GetStateType()) << " (" << static_cast<int>(TaskLoadPlan.GetStateType()) << ")"); - EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::INTERNAL_ERROR)); - break; - } - } + TaskLoadPlan = ev->Get()->Record.GetStateLoadPlan(); + const auto& checkpoint = ev->Get()->Record.GetCheckpoint(); + LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Got TEvRestoreFromCheckpoint event with plan " << TaskLoadPlan); + switch (TaskLoadPlan.GetStateType()) { + case NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY: + { + LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Restored from empty state"); + EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::OK)); + break; + } + case NDqProto::NDqStateLoadPlan::STATE_TYPE_OWN: + { + Send( + CheckpointStorage, + new TEvDqCompute::TEvGetTaskState( + GraphId, + {Task.GetId()}, + ev->Get()->Record.GetCheckpoint(), + CheckpointCoordinator->Generation)); + break; + } + case NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN: + { + Send( + CheckpointStorage, + new TEvDqCompute::TEvGetTaskState( + GraphId, + TaskIdsFromLoadPlan(TaskLoadPlan), + ev->Get()->Record.GetCheckpoint(), + CheckpointCoordinator->Generation)); + break; + } + default: + { + LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) << "] Unsupported state type: " + << NDqProto::NDqStateLoadPlan::EStateType_Name(TaskLoadPlan.GetStateType()) << " (" << static_cast<int>(TaskLoadPlan.GetStateType()) << ")"); + EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::INTERNAL_ERROR)); + break; + } + } } void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvGetTaskStateResult::TPtr& ev) { - if (ShouldIgnoreOldCoordinator(ev)) { + if (ShouldIgnoreOldCoordinator(ev)) { return; } auto& checkpoint = ev->Get()->Checkpoint; - std::vector<ui64> taskIds; - size_t taskIdsSize = 1; - if (TaskLoadPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { - taskIds = TaskIdsFromLoadPlan(TaskLoadPlan); - taskIdsSize = taskIds.size(); - } - - if (!ev->Get()->Issues.Empty()) { - LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) - << "] Can't get state from storage: " << ev->Get()->Issues.ToString()); - EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::STORAGE_ERROR), ev->Cookie); - return; - } - - if (ev->Get()->States.size() != taskIdsSize) { - LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) - << "] Got unexpected states count. States count: " << ev->Get()->States.size() - << ". Expected states count: " << taskIdsSize); - EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::STORAGE_ERROR), ev->Cookie); + std::vector<ui64> taskIds; + size_t taskIdsSize = 1; + if (TaskLoadPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { + taskIds = TaskIdsFromLoadPlan(TaskLoadPlan); + taskIdsSize = taskIds.size(); + } + + if (!ev->Get()->Issues.Empty()) { + LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) + << "] Can't get state from storage: " << ev->Get()->Issues.ToString()); + EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::STORAGE_ERROR), ev->Cookie); + return; + } + + if (ev->Get()->States.size() != taskIdsSize) { + LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) + << "] Got unexpected states count. States count: " << ev->Get()->States.size() + << ". Expected states count: " << taskIdsSize); + EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::STORAGE_ERROR), ev->Cookie); return; } - LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Got TEvGetTaskStateResult event, restoring state"); + LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Got TEvGetTaskStateResult event, restoring state"); try { - if (TaskLoadPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_OWN) { - ComputeActor->LoadState(ev->Get()->States[0]); - } else if (TaskLoadPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { - const NDqProto::TComputeActorState state = CombineForeignState(TaskLoadPlan, ev->Get()->States, taskIds); - ComputeActor->LoadState(state); - } else { - Y_FAIL("Unprocessed state type %s (%d)", - NDqProto::NDqStateLoadPlan::EStateType_Name(TaskLoadPlan.GetStateType()).c_str(), - static_cast<int>(TaskLoadPlan.GetStateType())); - } + if (TaskLoadPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_OWN) { + ComputeActor->LoadState(ev->Get()->States[0]); + } else if (TaskLoadPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { + const NDqProto::TComputeActorState state = CombineForeignState(TaskLoadPlan, ev->Get()->States, taskIds); + ComputeActor->LoadState(state); + } else { + Y_FAIL("Unprocessed state type %s (%d)", + NDqProto::NDqStateLoadPlan::EStateType_Name(TaskLoadPlan.GetStateType()).c_str(), + static_cast<int>(TaskLoadPlan.GetStateType())); + } } catch (const std::exception& e) { - LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) << "] Failed to load state: " << e.what()); - EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::INTERNAL_ERROR), ev->Cookie); - LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Checkpoint state restoration aborted"); + LOG_E("[Checkpoint " << MakeStringForLog(checkpoint) << "] Failed to load state: " << e.what()); + EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::INTERNAL_ERROR), ev->Cookie); + LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Checkpoint state restoration aborted"); return; } - EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::OK), ev->Cookie); - LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Checkpoint state restored"); -} - -void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvRun::TPtr& ev) { - EventsQueue.OnEventReceived(ev); + EventsQueue.Send(MakeHolder<TEvDqCompute::TEvRestoreFromCheckpointResult>(checkpoint, Task.GetId(), NDqProto::TEvRestoreFromCheckpointResult::OK), ev->Cookie); + LOG_I("[Checkpoint " << MakeStringForLog(checkpoint) << "] Checkpoint state restored"); } +void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvRun::TPtr& ev) { + EventsQueue.OnEventReceived(ev); +} + void TDqComputeActorCheckpoints::Handle(TEvDqCompute::TEvCommitState::TPtr& ev) { - if (ShouldIgnoreOldCoordinator(ev)) { - return; - } - - if (!EventsQueue.OnEventReceived(ev)) { + if (ShouldIgnoreOldCoordinator(ev)) { return; } + if (!EventsQueue.OnEventReceived(ev)) { + return; + } + // No actual commit at the moment: will be done in further commits auto checkpoint = ev->Get()->Record.GetCheckpoint(); - ComputeActor->CommitState(checkpoint); - EventsQueue.Send(new TEvDqCompute::TEvStateCommitted(checkpoint.GetId(), checkpoint.GetGeneration(), Task.GetId()), ev->Cookie); + ComputeActor->CommitState(checkpoint); + EventsQueue.Send(new TEvDqCompute::TEvStateCommitted(checkpoint.GetId(), checkpoint.GetGeneration(), Task.GetId()), ev->Cookie); } void TDqComputeActorCheckpoints::Handle(NActors::TEvents::TEvPoison::TPtr&) { @@ -348,38 +348,38 @@ void TDqComputeActorCheckpoints::Handle(NActors::TEvents::TEvPoison::TPtr&) { PassAway(); } -void TDqComputeActorCheckpoints::Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { - LOG_I("Handle disconnected node " << ev->Get()->NodeId); - EventsQueue.HandleNodeDisconnected(ev->Get()->NodeId); -} - -void TDqComputeActorCheckpoints::Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev) { - LOG_D("Handle connected node " << ev->Get()->NodeId); - EventsQueue.HandleNodeConnected(ev->Get()->NodeId); -} - -void TDqComputeActorCheckpoints::Handle(TEvRetryQueuePrivate::TEvRetry::TPtr& ev) { - Y_UNUSED(ev); - EventsQueue.Retry(); -} - +void TDqComputeActorCheckpoints::Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { + LOG_I("Handle disconnected node " << ev->Get()->NodeId); + EventsQueue.HandleNodeDisconnected(ev->Get()->NodeId); +} + +void TDqComputeActorCheckpoints::Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev) { + LOG_D("Handle connected node " << ev->Get()->NodeId); + EventsQueue.HandleNodeConnected(ev->Get()->NodeId); +} + +void TDqComputeActorCheckpoints::Handle(TEvRetryQueuePrivate::TEvRetry::TPtr& ev) { + Y_UNUSED(ev); + EventsQueue.Retry(); +} + bool TDqComputeActorCheckpoints::HasPendingCheckpoint() const { - return PendingCheckpoint; -} - -bool TDqComputeActorCheckpoints::ComputeActorStateSaved() const { - return PendingCheckpoint && PendingCheckpoint.SavedComputeActorState; + return PendingCheckpoint; } +bool TDqComputeActorCheckpoints::ComputeActorStateSaved() const { + return PendingCheckpoint && PendingCheckpoint.SavedComputeActorState; +} + void TDqComputeActorCheckpoints::DoCheckpoint() { Y_VERIFY(CheckpointCoordinator); - Y_VERIFY(PendingCheckpoint); + Y_VERIFY(PendingCheckpoint); LOG_CP_I("Performing task checkpoint"); if (SaveState()) { LOG_CP_D("Injecting checkpoint barrier to outputs"); - ComputeActor->InjectBarrierToOutputs(*PendingCheckpoint.Checkpoint); - TryToSavePendingCheckpoint(); + ComputeActor->InjectBarrierToOutputs(*PendingCheckpoint.Checkpoint); + TryToSavePendingCheckpoint(); } } @@ -388,23 +388,23 @@ bool TDqComputeActorCheckpoints::SaveState() { LOG_CP_D("Saving task state"); try { - Y_VERIFY(!PendingCheckpoint.SavedComputeActorState); - PendingCheckpoint.SavedComputeActorState = true; - ComputeActor->SaveState(*PendingCheckpoint.Checkpoint, PendingCheckpoint.ComputeActorState); + Y_VERIFY(!PendingCheckpoint.SavedComputeActorState); + PendingCheckpoint.SavedComputeActorState = true; + ComputeActor->SaveState(*PendingCheckpoint.Checkpoint, PendingCheckpoint.ComputeActorState); } catch (const std::exception& e) { - PendingCheckpoint.Clear(); + PendingCheckpoint.Clear(); LOG_CP_E("Failed to save state: " << e.what()); auto resultEv = MakeHolder<TEvDqCompute::TEvSaveTaskStateResult>(); - resultEv->Record.MutableCheckpoint()->CopyFrom(*PendingCheckpoint.Checkpoint); + resultEv->Record.MutableCheckpoint()->CopyFrom(*PendingCheckpoint.Checkpoint); resultEv->Record.SetTaskId(Task.GetId()); resultEv->Record.SetStatus(NDqProto::TEvSaveTaskStateResult::INTERNAL_ERROR); - EventsQueue.Send(std::move(resultEv)); + EventsQueue.Send(std::move(resultEv)); return false; } - LOG_CP_D("Compute actor state saved"); + LOG_CP_D("Compute actor state saved"); return true; } @@ -412,92 +412,92 @@ void TDqComputeActorCheckpoints::RegisterCheckpoint(const NDqProto::TCheckpoint& if (!PendingCheckpoint) { PendingCheckpoint = checkpoint; } else { - YQL_ENSURE(PendingCheckpoint.Checkpoint->GetGeneration() == checkpoint.GetGeneration()); - YQL_ENSURE(PendingCheckpoint.Checkpoint->GetId() == checkpoint.GetId()); + YQL_ENSURE(PendingCheckpoint.Checkpoint->GetGeneration() == checkpoint.GetGeneration()); + YQL_ENSURE(PendingCheckpoint.Checkpoint->GetId() == checkpoint.GetId()); } LOG_CP_I("Got checkpoint barrier from channel " << channelId); - ComputeActor->ResumeExecution(); -} - -void TDqComputeActorCheckpoints::OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint) { - Y_VERIFY(CheckpointCoordinator); - Y_VERIFY(checkpoint.GetGeneration() <= CheckpointCoordinator->Generation); - if (checkpoint.GetGeneration() < CheckpointCoordinator->Generation) { - LOG_D("Ignoring sink[" << outputIndex << "] state saved event from previous coordinator: " - << checkpoint.GetGeneration() << " < " << CheckpointCoordinator->Generation); - return; - } - Y_VERIFY(PendingCheckpoint); - Y_VERIFY(PendingCheckpoint.Checkpoint->GetId() == checkpoint.GetId(), - "Expected pending checkpoint id %lu, but got %lu", PendingCheckpoint.Checkpoint->GetId(), checkpoint.GetId()); - for (const NDqProto::TSinkState& sinkState : PendingCheckpoint.ComputeActorState.GetSinks()) { - Y_VERIFY(sinkState.GetOutputIndex() != outputIndex, "Double save sink[%lu] state", outputIndex); - } - - NDqProto::TSinkState* sinkState = PendingCheckpoint.ComputeActorState.AddSinks(); - *sinkState = std::move(state); - sinkState->SetOutputIndex(outputIndex); // Set index explicitly to avoid errors - ++PendingCheckpoint.SavedSinkStatesCount; - - TryToSavePendingCheckpoint(); -} - -void TDqComputeActorCheckpoints::TryToSavePendingCheckpoint() { - Y_VERIFY(PendingCheckpoint); - if (PendingCheckpoint.IsReady()) { - auto saveTaskStateRequest = MakeHolder<TEvDqCompute::TEvSaveTaskState>(GraphId, Task.GetId(), *PendingCheckpoint.Checkpoint); - saveTaskStateRequest->State.Swap(&PendingCheckpoint.ComputeActorState); - Send(CheckpointStorage, std::move(saveTaskStateRequest)); - - LOG_CP_I("Task checkpoint is done"); - PendingCheckpoint.Clear(); - } -} - -TDqComputeActorCheckpoints::TPendingCheckpoint& TDqComputeActorCheckpoints::TPendingCheckpoint::operator=(const NDqProto::TCheckpoint& checkpoint) { - Y_VERIFY(!Checkpoint); - Checkpoint = checkpoint; - return *this; -} - -void TDqComputeActorCheckpoints::TPendingCheckpoint::Clear() { - Checkpoint = Nothing(); - SavedComputeActorState = false; - SavedSinkStatesCount = 0; - ComputeActorState.Clear(); -} - -size_t TDqComputeActorCheckpoints::TPendingCheckpoint::GetSinksCount(const NDqProto::TDqTask& task) { - size_t sinksCount = 0; - for (int outputIndex = 0, outputsCount = task.OutputsSize(); outputIndex < outputsCount; ++outputIndex) { - if (task.GetOutputs(outputIndex).HasSink()) { - ++sinksCount; - } - } - return sinksCount; -} - -void TDqComputeActorCheckpoints::PassAway() { - EventsQueue.Unsubscribe(); - NActors::TActor<TDqComputeActorCheckpoints>::PassAway(); -} - -static bool IsInfiniteSourceType(const TString& sourceType) { - return sourceType == "PqSource"; -} - -NDqProto::ECheckpointingMode GetTaskCheckpointingMode(const NDqProto::TDqTask& task) { - for (const auto& input : task.GetInputs()) { - if (const TString& srcType = input.GetSource().GetType(); srcType && IsInfiniteSourceType(srcType)) { - return NDqProto::CHECKPOINTING_MODE_DEFAULT; - } - for (const auto& channel : input.GetChannels()) { - if (channel.GetCheckpointingMode() != NDqProto::CHECKPOINTING_MODE_DISABLED) { - return NDqProto::CHECKPOINTING_MODE_DEFAULT; - } - } - } - return NDqProto::CHECKPOINTING_MODE_DISABLED; -} - + ComputeActor->ResumeExecution(); +} + +void TDqComputeActorCheckpoints::OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint) { + Y_VERIFY(CheckpointCoordinator); + Y_VERIFY(checkpoint.GetGeneration() <= CheckpointCoordinator->Generation); + if (checkpoint.GetGeneration() < CheckpointCoordinator->Generation) { + LOG_D("Ignoring sink[" << outputIndex << "] state saved event from previous coordinator: " + << checkpoint.GetGeneration() << " < " << CheckpointCoordinator->Generation); + return; + } + Y_VERIFY(PendingCheckpoint); + Y_VERIFY(PendingCheckpoint.Checkpoint->GetId() == checkpoint.GetId(), + "Expected pending checkpoint id %lu, but got %lu", PendingCheckpoint.Checkpoint->GetId(), checkpoint.GetId()); + for (const NDqProto::TSinkState& sinkState : PendingCheckpoint.ComputeActorState.GetSinks()) { + Y_VERIFY(sinkState.GetOutputIndex() != outputIndex, "Double save sink[%lu] state", outputIndex); + } + + NDqProto::TSinkState* sinkState = PendingCheckpoint.ComputeActorState.AddSinks(); + *sinkState = std::move(state); + sinkState->SetOutputIndex(outputIndex); // Set index explicitly to avoid errors + ++PendingCheckpoint.SavedSinkStatesCount; + + TryToSavePendingCheckpoint(); +} + +void TDqComputeActorCheckpoints::TryToSavePendingCheckpoint() { + Y_VERIFY(PendingCheckpoint); + if (PendingCheckpoint.IsReady()) { + auto saveTaskStateRequest = MakeHolder<TEvDqCompute::TEvSaveTaskState>(GraphId, Task.GetId(), *PendingCheckpoint.Checkpoint); + saveTaskStateRequest->State.Swap(&PendingCheckpoint.ComputeActorState); + Send(CheckpointStorage, std::move(saveTaskStateRequest)); + + LOG_CP_I("Task checkpoint is done"); + PendingCheckpoint.Clear(); + } +} + +TDqComputeActorCheckpoints::TPendingCheckpoint& TDqComputeActorCheckpoints::TPendingCheckpoint::operator=(const NDqProto::TCheckpoint& checkpoint) { + Y_VERIFY(!Checkpoint); + Checkpoint = checkpoint; + return *this; +} + +void TDqComputeActorCheckpoints::TPendingCheckpoint::Clear() { + Checkpoint = Nothing(); + SavedComputeActorState = false; + SavedSinkStatesCount = 0; + ComputeActorState.Clear(); +} + +size_t TDqComputeActorCheckpoints::TPendingCheckpoint::GetSinksCount(const NDqProto::TDqTask& task) { + size_t sinksCount = 0; + for (int outputIndex = 0, outputsCount = task.OutputsSize(); outputIndex < outputsCount; ++outputIndex) { + if (task.GetOutputs(outputIndex).HasSink()) { + ++sinksCount; + } + } + return sinksCount; +} + +void TDqComputeActorCheckpoints::PassAway() { + EventsQueue.Unsubscribe(); + NActors::TActor<TDqComputeActorCheckpoints>::PassAway(); +} + +static bool IsInfiniteSourceType(const TString& sourceType) { + return sourceType == "PqSource"; +} + +NDqProto::ECheckpointingMode GetTaskCheckpointingMode(const NDqProto::TDqTask& task) { + for (const auto& input : task.GetInputs()) { + if (const TString& srcType = input.GetSource().GetType(); srcType && IsInfiniteSourceType(srcType)) { + return NDqProto::CHECKPOINTING_MODE_DEFAULT; + } + for (const auto& channel : input.GetChannels()) { + if (channel.GetCheckpointingMode() != NDqProto::CHECKPOINTING_MODE_DISABLED) { + return NDqProto::CHECKPOINTING_MODE_DEFAULT; + } + } + } + return NDqProto::CHECKPOINTING_MODE_DISABLED; +} + } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.h index 27fd851338d..da7e617cf3a 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_checkpoints.h @@ -1,29 +1,29 @@ #pragma once #include "dq_compute_actor.h" -#include "dq_compute_actor_sinks.h" -#include "retry_queue.h" +#include "dq_compute_actor_sinks.h" +#include "retry_queue.h" #include <ydb/library/yql/dq/common/dq_common.h> - + #include <library/cpp/actors/core/log.h> -#include <util/generic/ptr.h> - -#include <algorithm> -#include <deque> -#include <type_traits> - -namespace NYql::NDqProto { -enum ECheckpointingMode; -} // namespace NYql::NDqProto - +#include <util/generic/ptr.h> + +#include <algorithm> +#include <deque> +#include <type_traits> + +namespace NYql::NDqProto { +enum ECheckpointingMode; +} // namespace NYql::NDqProto + namespace NYql::NDq { -NDqProto::ECheckpointingMode GetTaskCheckpointingMode(const NDqProto::TDqTask& task); - -class TDqComputeActorCheckpoints : public NActors::TActor<TDqComputeActorCheckpoints> -{ +NDqProto::ECheckpointingMode GetTaskCheckpointingMode(const NDqProto::TDqTask& task); + +class TDqComputeActorCheckpoints : public NActors::TActor<TDqComputeActorCheckpoints> +{ struct TCheckpointCoordinatorId { NActors::TActorId ActorId; ui64 Generation; @@ -34,69 +34,69 @@ class TDqComputeActorCheckpoints : public NActors::TActor<TDqComputeActorCheckpo } }; - struct TPendingCheckpoint { - TPendingCheckpoint(const NDqProto::TDqTask& task) - : SinksCount(GetSinksCount(task)) - { - } - - // New checkpoint (clears previously saved data). - TPendingCheckpoint& operator=(const NDqProto::TCheckpoint& checkpoint); - - operator bool() const { - return Checkpoint.Defined(); - } - - void Clear(); - - bool IsReady() const { - Y_VERIFY(Checkpoint); - return SavedComputeActorState && SinksCount == SavedSinkStatesCount; - } - - static size_t GetSinksCount(const NDqProto::TDqTask& task); - - const size_t SinksCount; - TMaybe<NDqProto::TCheckpoint> Checkpoint; - NDqProto::TComputeActorState ComputeActorState; - size_t SavedSinkStatesCount = 0; - bool SavedComputeActorState = false; - }; - + struct TPendingCheckpoint { + TPendingCheckpoint(const NDqProto::TDqTask& task) + : SinksCount(GetSinksCount(task)) + { + } + + // New checkpoint (clears previously saved data). + TPendingCheckpoint& operator=(const NDqProto::TCheckpoint& checkpoint); + + operator bool() const { + return Checkpoint.Defined(); + } + + void Clear(); + + bool IsReady() const { + Y_VERIFY(Checkpoint); + return SavedComputeActorState && SinksCount == SavedSinkStatesCount; + } + + static size_t GetSinksCount(const NDqProto::TDqTask& task); + + const size_t SinksCount; + TMaybe<NDqProto::TCheckpoint> Checkpoint; + NDqProto::TComputeActorState ComputeActorState; + size_t SavedSinkStatesCount = 0; + bool SavedComputeActorState = false; + }; + public: static constexpr char ActorName[] = "DQ_COMPUTE_ACTOR_CHECKPOINTS"; struct ICallbacks { [[nodiscard]] virtual bool ReadyToCheckpoint() const = 0; - virtual void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TComputeActorState& state) const = 0; - virtual void CommitState(const NDqProto::TCheckpoint& checkpoint) = 0; - virtual void InjectBarrierToOutputs(const NDqProto::TCheckpoint& checkpoint) = 0; + virtual void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TComputeActorState& state) const = 0; + virtual void CommitState(const NDqProto::TCheckpoint& checkpoint) = 0; + virtual void InjectBarrierToOutputs(const NDqProto::TCheckpoint& checkpoint) = 0; virtual void ResumeInputs() = 0; virtual void Start() = 0; virtual void Stop() = 0; virtual void ResumeExecution() = 0; - virtual void LoadState(const NDqProto::TComputeActorState& state) = 0; + virtual void LoadState(const NDqProto::TComputeActorState& state) = 0; - virtual ~ICallbacks() = default; + virtual ~ICallbacks() = default; }; - TDqComputeActorCheckpoints(const TTxId& txId, NDqProto::TDqTask task, ICallbacks* computeActor); - void Init(NActors::TActorId computeActorId, NActors::TActorId checkpointsId); + TDqComputeActorCheckpoints(const TTxId& txId, NDqProto::TDqTask task, ICallbacks* computeActor); + void Init(NActors::TActorId computeActorId, NActors::TActorId checkpointsId); [[nodiscard]] bool HasPendingCheckpoint() const; - bool ComputeActorStateSaved() const; + bool ComputeActorStateSaved() const; void DoCheckpoint(); bool SaveState(); void RegisterCheckpoint(const NDqProto::TCheckpoint& checkpoint, ui64 channelId); - // Sink actor support. - void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint); - - void TryToSavePendingCheckpoint(); - + // Sink actor support. + void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint); + + void TryToSavePendingCheckpoint(); + private: STATEFN(StateFunc); void Handle(TEvDqCompute::TEvNewCheckpointCoordinator::TPtr&); @@ -105,34 +105,34 @@ private: void Handle(TEvDqCompute::TEvCommitState::TPtr&); void Handle(TEvDqCompute::TEvRestoreFromCheckpoint::TPtr&); void Handle(TEvDqCompute::TEvGetTaskStateResult::TPtr&); - void Handle(TEvDqCompute::TEvRun::TPtr& ev); + void Handle(TEvDqCompute::TEvRun::TPtr& ev); void Handle(NActors::TEvents::TEvPoison::TPtr&); - void Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev); - void Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev); - void Handle(TEvRetryQueuePrivate::TEvRetry::TPtr& ev); + void Handle(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev); + void Handle(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev); + void Handle(TEvRetryQueuePrivate::TEvRetry::TPtr& ev); - void PassAway() override; - - // Validates generation and returns true if it is from old coordinator. + void PassAway() override; + + // Validates generation and returns true if it is from old coordinator. template <class E> - bool ShouldIgnoreOldCoordinator(const E& ev, bool verifyOnGenerationFromFuture = true); + bool ShouldIgnoreOldCoordinator(const E& ev, bool verifyOnGenerationFromFuture = true); private: - const TTxId TxId; + const TTxId TxId; const NDqProto::TDqTask Task; - const bool IngressTask; + const bool IngressTask; const NActors::TActorId CheckpointStorage; TString GraphId; - ICallbacks* ComputeActor = nullptr; + ICallbacks* ComputeActor = nullptr; TMaybe<TCheckpointCoordinatorId> CheckpointCoordinator; - TPendingCheckpoint PendingCheckpoint; - TRetryEventsQueue EventsQueue; - - // Restore - NYql::NDqProto::NDqStateLoadPlan::TTaskPlan TaskLoadPlan; + TPendingCheckpoint PendingCheckpoint; + TRetryEventsQueue EventsQueue; + + // Restore + NYql::NDqProto::NDqStateLoadPlan::TTaskPlan TaskLoadPlan; }; } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h index deaadefb42c..a4a5ec4dc32 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h @@ -3,8 +3,8 @@ #include "dq_compute_actor.h" #include "dq_compute_actor_channels.h" #include "dq_compute_actor_checkpoints.h" -#include "dq_compute_actor_sinks.h" -#include "dq_compute_actor_sources.h" +#include "dq_compute_actor_sinks.h" +#include "dq_compute_actor_sources.h" #include "dq_compute_issues_buffer.h" #include <ydb/core/scheme/scheme_tabledefs.h> // TODO: TTableId @@ -49,19 +49,19 @@ namespace NYql { namespace NDq { -enum : ui64 { - ComputeActorNonProtobufStateVersion = 1, - ComputeActorCurrentStateVersion = 2, -}; - +enum : ui64 { + ComputeActorNonProtobufStateVersion = 1, + ComputeActorCurrentStateVersion = 2, +}; + constexpr ui32 IssuesBufferSize = 16; template<typename TDerived> class TDqComputeActorBase : public NActors::TActorBootstrapped<TDerived> , public TDqComputeActorChannels::ICallbacks , public TDqComputeActorCheckpoints::ICallbacks - , public IDqSourceActor::ICallbacks - , public IDqSinkActor::ICallbacks + , public IDqSourceActor::ICallbacks + , public IDqSinkActor::ICallbacks { protected: enum EEvWakeupTag : ui64 { @@ -77,7 +77,7 @@ public: CA_LOG_D("Start compute actor " << this->SelfId() << ", task: " << Task.GetId()); Channels = new TDqComputeActorChannels(this->SelfId(), TxId, Task, !RuntimeSettings.FailOnUndelivery, - RuntimeSettings.StatsMode, MemoryLimits.ChannelBufferSize, this, this->GetActivityType()); + RuntimeSettings.StatsMode, MemoryLimits.ChannelBufferSize, this, this->GetActivityType()); this->RegisterWithSameMailbox(Channels); if (RuntimeSettings.Timeout) { @@ -125,9 +125,9 @@ protected: , RuntimeSettings(settings) , MemoryLimits(memoryLimits) , CanAllocateExtraMemory(RuntimeSettings.ExtraMemoryAllocationPool != 0 && MemoryLimits.AllocateMemoryFn) - , SourceActorFactory(std::move(sourceActorFactory)) - , SinkActorFactory(std::move(sinkActorFactory)) - , CheckpointingMode(GetTaskCheckpointingMode(Task)) + , SourceActorFactory(std::move(sourceActorFactory)) + , SinkActorFactory(std::move(sinkActorFactory)) + , CheckpointingMode(GetTaskCheckpointingMode(Task)) , State(Task.GetCreateSuspended() ? NDqProto::COMPUTE_STATE_UNKNOWN : NDqProto::COMPUTE_STATE_EXECUTING) , Running(!Task.GetCreateSuspended()) { @@ -149,8 +149,8 @@ protected: , RuntimeSettings(settings) , MemoryLimits(memoryLimits) , CanAllocateExtraMemory(RuntimeSettings.ExtraMemoryAllocationPool != 0 && MemoryLimits.AllocateMemoryFn) - , SourceActorFactory(std::move(sourceActorFactory)) - , SinkActorFactory(std::move(sinkActorFactory)) + , SourceActorFactory(std::move(sourceActorFactory)) + , SinkActorFactory(std::move(sinkActorFactory)) , State(Task.GetCreateSuspended() ? NDqProto::COMPUTE_STATE_UNKNOWN : NDqProto::COMPUTE_STATE_EXECUTING) , Running(!Task.GetCreateSuspended()) { @@ -170,16 +170,16 @@ protected: } } - TString GetEventTypeString(TAutoPtr<::NActors::IEventHandle>& ev) { - try { - if (NActors::IEventBase* eventBase = ev->GetBase()) { - return eventBase->ToStringHeader(); - } - } catch (...) { - } - return "Unknown type"; - } - + TString GetEventTypeString(TAutoPtr<::NActors::IEventHandle>& ev) { + try { + if (NActors::IEventBase* eventBase = ev->GetBase()) { + return eventBase->ToStringHeader(); + } + } catch (...) { + } + return "Unknown type"; + } + STFUNC(StateFuncBase) { const bool reportTime = this->CurrentStateFunc() == &TDqComputeActorBase::StateFuncBase; @@ -198,10 +198,10 @@ protected: FFunc(TEvDqCompute::TEvInjectCheckpoint::EventType, Checkpoints->Receive); FFunc(TEvDqCompute::TEvCommitState::EventType, Checkpoints->Receive); FFunc(TEvDqCompute::TEvRestoreFromCheckpoint::EventType, Checkpoints->Receive); - hFunc(NActors::TEvInterconnect::TEvNodeDisconnected, HandleExecuteBase); - hFunc(NActors::TEvInterconnect::TEvNodeConnected, HandleExecuteBase); + hFunc(NActors::TEvInterconnect::TEvNodeDisconnected, HandleExecuteBase); + hFunc(NActors::TEvInterconnect::TEvNodeConnected, HandleExecuteBase); default: { - CA_LOG_C("TDqComputeActorBase, unexpected event: " << ev->GetTypeRewrite() << " (" << GetEventTypeString(ev) << ")"); + CA_LOG_C("TDqComputeActorBase, unexpected event: " << ev->GetTypeRewrite() << " (" << GetEventTypeString(ev) << ")"); InternalError(TIssuesIds::DEFAULT_ERROR, "Unexpected event"); } } @@ -224,11 +224,11 @@ protected: auto guard = BindAllocator(); auto* alloc = guard.GetMutex(); - if (State == NDqProto::COMPUTE_STATE_FINISHED) { - DoHandleChannelsAfterFinishImpl(); - } else { - DoExecuteImpl(); - } + if (State == NDqProto::COMPUTE_STATE_FINISHED) { + DoHandleChannelsAfterFinishImpl(); + } else { + DoExecuteImpl(); + } if (alloc->GetAllocated() - alloc->GetUsed() > MemoryLimits.MinMemFreeSize) { alloc->ReleaseFreePages(); @@ -258,7 +258,7 @@ protected: auto sourcesState = GetSourcesState(); PollSourceActors(); - ERunStatus status = TaskRunner->Run(); + ERunStatus status = TaskRunner->Run(); CA_LOG_D("Resume execution, run status: " << status); @@ -266,25 +266,25 @@ protected: PollSources(std::move(sourcesState)); } - if ((status == ERunStatus::PendingInput || status == ERunStatus::Finished) && Checkpoints && Checkpoints->HasPendingCheckpoint() && !Checkpoints->ComputeActorStateSaved() && ReadyToCheckpoint()) { - Checkpoints->DoCheckpoint(); - } - - ProcessOutputsImpl(status); - } - - void DoHandleChannelsAfterFinishImpl() { - Y_VERIFY(Checkpoints); - - if (Checkpoints->HasPendingCheckpoint() && !Checkpoints->ComputeActorStateSaved() && ReadyToCheckpoint()) { + if ((status == ERunStatus::PendingInput || status == ERunStatus::Finished) && Checkpoints && Checkpoints->HasPendingCheckpoint() && !Checkpoints->ComputeActorStateSaved() && ReadyToCheckpoint()) { Checkpoints->DoCheckpoint(); } - // Send checkpoints to output channels. - ProcessOutputsImpl(ERunStatus::Finished); - } - - void ProcessOutputsImpl(ERunStatus status) { + ProcessOutputsImpl(status); + } + + void DoHandleChannelsAfterFinishImpl() { + Y_VERIFY(Checkpoints); + + if (Checkpoints->HasPendingCheckpoint() && !Checkpoints->ComputeActorStateSaved() && ReadyToCheckpoint()) { + Checkpoints->DoCheckpoint(); + } + + // Send checkpoints to output channels. + ProcessOutputsImpl(ERunStatus::Finished); + } + + void ProcessOutputsImpl(ERunStatus status) { ProcessOutputsState.LastRunStatus = status; if (ProcessOutputsState.Inflight == 0) { @@ -292,7 +292,7 @@ protected: } for (auto& entry : OutputChannelsMap) { - const ui64 channelId = entry.first; + const ui64 channelId = entry.first; TOutputChannelInfo& outputChannel = entry.second; if (!outputChannel.HasPeer) { @@ -307,7 +307,7 @@ protected: continue; } - if (!outputChannel.Finished || Checkpoints) { + if (!outputChannel.Finished || Checkpoints) { if (Channels->CanSendChannelData(channelId)) { auto peerState = Channels->GetOutputChannelInFlightState(channelId); DrainOutputChannel(outputChannel, peerState); @@ -320,12 +320,12 @@ protected: } } - for (auto& entry : SinksMap) { - const ui64 outputIndex = entry.first; - TSinkInfo& sinkInfo = entry.second; + for (auto& entry : SinksMap) { + const ui64 outputIndex = entry.first; + TSinkInfo& sinkInfo = entry.second; DrainSink(outputIndex, sinkInfo); - } - + } + CheckRunStatus(); } @@ -337,7 +337,7 @@ protected: auto status = ProcessOutputsState.LastRunStatus; if (status == ERunStatus::PendingInput && ProcessOutputsState.AllOutputsFinished) { - CA_LOG_D("All outputs have been finished. Consider finished"); + CA_LOG_D("All outputs have been finished. Consider finished"); status = ERunStatus::Finished; } @@ -367,8 +367,8 @@ protected: return; } - // Handle finishing of our task. - if (status == ERunStatus::Finished && State != NDqProto::COMPUTE_STATE_FINISHED) { + // Handle finishing of our task. + if (status == ERunStatus::Finished && State != NDqProto::COMPUTE_STATE_FINISHED) { if (ProcessOutputsState.HasDataToSend || !ProcessOutputsState.ChannelsReady) { CA_LOG_D("Continue execution, either output buffers are not empty or not all channels are ready" << ", hasDataToSend: " << ProcessOutputsState.HasDataToSend << ", channelsReady: " << ProcessOutputsState.ChannelsReady); @@ -379,7 +379,7 @@ protected: } if (Channels->CheckInFlight("Tasks execution finished")) { State = NDqProto::COMPUTE_STATE_FINISHED; - CA_LOG_D("Compute state finished. All channels finished"); + CA_LOG_D("Compute state finished. All channels finished"); ReportStateAndMaybeDie(TIssue("success")); } } @@ -406,18 +406,18 @@ protected: Checkpoints->Receive(handle, NActors::TActivationContext::AsActorContext()); } - for (auto& [_, source] : SourcesMap) { - if (source.Actor) { - source.SourceActor->PassAway(); - } - } - - for (auto& [_, sink] : SinksMap) { - if (sink.Actor) { - sink.SinkActor->PassAway(); - } - } - + for (auto& [_, source] : SourcesMap) { + if (source.Actor) { + source.SourceActor->PassAway(); + } + } + + for (auto& [_, sink] : SinksMap) { + if (sink.Actor) { + sink.SinkActor->PassAway(); + } + } + for (auto& [_, outputChannel] : OutputChannelsMap) { if (outputChannel.Channel) { outputChannel.Channel->Terminate(); @@ -523,8 +523,8 @@ public: } if (channelData.HasCheckpoint()) { - Y_VERIFY(inputChannel->CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED); - Y_VERIFY(Checkpoints); + Y_VERIFY(inputChannel->CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED); + Y_VERIFY(Checkpoints); const auto& checkpoint = channelData.GetCheckpoint(); inputChannel->Pause(checkpoint); Checkpoints->RegisterCheckpoint(checkpoint, channelData.GetChannelId()); @@ -566,18 +566,18 @@ public: ContinueExecute(); } - void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint) override { - Y_VERIFY(Checkpoints); // If we are checkpointing, we must have already constructed "checkpoints" object. - Checkpoints->OnSinkStateSaved(std::move(state), outputIndex, checkpoint); - } - + void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint) override { + Y_VERIFY(Checkpoints); // If we are checkpointing, we must have already constructed "checkpoints" object. + Checkpoints->OnSinkStateSaved(std::move(state), outputIndex, checkpoint); + } + protected: bool ReadyToCheckpoint() const override { for (auto& [id, channelInfo] : InputChannelsMap) { - if (channelInfo.CheckpointingMode == NDqProto::CHECKPOINTING_MODE_DISABLED) { - continue; - } - + if (channelInfo.CheckpointingMode == NDqProto::CHECKPOINTING_MODE_DISABLED) { + continue; + } + if (!channelInfo.IsPaused()) { return false; } @@ -588,38 +588,38 @@ protected: return true; } - void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TComputeActorState& state) const override { - CA_LOG_D("Save state"); - NDqProto::TMiniKqlProgramState& mkqlProgramState = *state.MutableMiniKqlProgram(); - mkqlProgramState.SetRuntimeVersion(NDqProto::RUNTIME_VERSION_YQL_1_0); - NDqProto::TStateData::TData& data = *mkqlProgramState.MutableData()->MutableStateData(); - data.SetVersion(ComputeActorCurrentStateVersion); - data.SetBlob(TaskRunner->Save()); - - for (auto& [inputIndex, source] : SourcesMap) { - YQL_ENSURE(source.SourceActor, "Source[" << inputIndex << "] is not created"); - NDqProto::TSourceState& sourceState = *state.AddSources(); - source.SourceActor->SaveState(checkpoint, sourceState); - sourceState.SetInputIndex(inputIndex); - } - } - - void CommitState(const NDqProto::TCheckpoint& checkpoint) override { - CA_LOG_D("Commit state"); - for (auto& [inputIndex, source] : SourcesMap) { - Y_VERIFY(source.SourceActor); - source.SourceActor->CommitState(checkpoint); - } - } - - void InjectBarrierToOutputs(const NDqProto::TCheckpoint& checkpoint) override { - Y_VERIFY(CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED); + void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TComputeActorState& state) const override { + CA_LOG_D("Save state"); + NDqProto::TMiniKqlProgramState& mkqlProgramState = *state.MutableMiniKqlProgram(); + mkqlProgramState.SetRuntimeVersion(NDqProto::RUNTIME_VERSION_YQL_1_0); + NDqProto::TStateData::TData& data = *mkqlProgramState.MutableData()->MutableStateData(); + data.SetVersion(ComputeActorCurrentStateVersion); + data.SetBlob(TaskRunner->Save()); + + for (auto& [inputIndex, source] : SourcesMap) { + YQL_ENSURE(source.SourceActor, "Source[" << inputIndex << "] is not created"); + NDqProto::TSourceState& sourceState = *state.AddSources(); + source.SourceActor->SaveState(checkpoint, sourceState); + sourceState.SetInputIndex(inputIndex); + } + } + + void CommitState(const NDqProto::TCheckpoint& checkpoint) override { + CA_LOG_D("Commit state"); + for (auto& [inputIndex, source] : SourcesMap) { + Y_VERIFY(source.SourceActor); + source.SourceActor->CommitState(checkpoint); + } + } + + void InjectBarrierToOutputs(const NDqProto::TCheckpoint& checkpoint) override { + Y_VERIFY(CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED); for (const auto& [id, channelInfo] : OutputChannelsMap) { - channelInfo.Channel->Push(NDqProto::TCheckpoint(checkpoint)); - } - for (const auto& [outputIndex, sink] : SinksMap) { - sink.Sink->Push(NDqProto::TCheckpoint(checkpoint)); + channelInfo.Channel->Push(NDqProto::TCheckpoint(checkpoint)); } + for (const auto& [outputIndex, sink] : SinksMap) { + sink.Sink->Push(NDqProto::TCheckpoint(checkpoint)); + } } void ResumeInputs() override { @@ -628,31 +628,31 @@ protected: } } - void LoadState(const NDqProto::TComputeActorState& state) override { - CA_LOG_D("Load state"); + void LoadState(const NDqProto::TComputeActorState& state) override { + CA_LOG_D("Load state"); auto guard = BindAllocator(); - const NDqProto::TMiniKqlProgramState& mkqlProgramState = state.GetMiniKqlProgram(); - const ui64 version = mkqlProgramState.GetData().GetStateData().GetVersion(); - YQL_ENSURE(version && version <= ComputeActorCurrentStateVersion && version != ComputeActorNonProtobufStateVersion, "Unsupported state version: " << version); - if (version == ComputeActorCurrentStateVersion) { - for (const NDqProto::TSourceState& sourceState : state.GetSources()) { - TSourceInfo* source = SourcesMap.FindPtr(sourceState.GetInputIndex()); - YQL_ENSURE(source, "Failed to load state. Source with input index " << sourceState.GetInputIndex() << " was not found"); - YQL_ENSURE(source->SourceActor, "Source[" << sourceState.GetInputIndex() << "] is not created"); - source->SourceActor->LoadState(sourceState); - } - for (const NDqProto::TSinkState& sinkState : state.GetSinks()) { - TSinkInfo* sink = SinksMap.FindPtr(sinkState.GetOutputIndex()); - YQL_ENSURE(sink, "Failed to load state. Sink with input index " << sinkState.GetOutputIndex() << " was not found"); - YQL_ENSURE(sink->SinkActor, "Sink[" << sinkState.GetOutputIndex() << "] is not created"); - sink->SinkActor->LoadState(sinkState); - } - if (const TString& blob = mkqlProgramState.GetData().GetStateData().GetBlob()) { - TaskRunner->Load(blob); - } - return; - } - ythrow yexception() << "Invalid state version " << version; + const NDqProto::TMiniKqlProgramState& mkqlProgramState = state.GetMiniKqlProgram(); + const ui64 version = mkqlProgramState.GetData().GetStateData().GetVersion(); + YQL_ENSURE(version && version <= ComputeActorCurrentStateVersion && version != ComputeActorNonProtobufStateVersion, "Unsupported state version: " << version); + if (version == ComputeActorCurrentStateVersion) { + for (const NDqProto::TSourceState& sourceState : state.GetSources()) { + TSourceInfo* source = SourcesMap.FindPtr(sourceState.GetInputIndex()); + YQL_ENSURE(source, "Failed to load state. Source with input index " << sourceState.GetInputIndex() << " was not found"); + YQL_ENSURE(source->SourceActor, "Source[" << sourceState.GetInputIndex() << "] is not created"); + source->SourceActor->LoadState(sourceState); + } + for (const NDqProto::TSinkState& sinkState : state.GetSinks()) { + TSinkInfo* sink = SinksMap.FindPtr(sinkState.GetOutputIndex()); + YQL_ENSURE(sink, "Failed to load state. Sink with input index " << sinkState.GetOutputIndex() << " was not found"); + YQL_ENSURE(sink->SinkActor, "Sink[" << sinkState.GetOutputIndex() << "] is not created"); + sink->SinkActor->LoadState(sinkState); + } + if (const TString& blob = mkqlProgramState.GetData().GetStateData().GetBlob()) { + TaskRunner->Load(blob); + } + return; + } + ythrow yexception() << "Invalid state version " << version; } void Start() override { @@ -672,50 +672,50 @@ protected: IDqInputChannel::TPtr Channel; bool HasPeer = false; std::optional<NDqProto::TCheckpoint> PendingCheckpoint; - const NDqProto::ECheckpointingMode CheckpointingMode; + const NDqProto::ECheckpointingMode CheckpointingMode; ui64 FreeSpace = 0; explicit TInputChannelInfo(ui64 channelId, NDqProto::ECheckpointingMode checkpointingMode) : ChannelId(channelId) , CheckpointingMode(checkpointingMode) - { - } - + { + } + bool IsPaused() const { return PendingCheckpoint.has_value(); } void Pause(const NDqProto::TCheckpoint& checkpoint) { YQL_ENSURE(!IsPaused()); - YQL_ENSURE(CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED); + YQL_ENSURE(CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED); PendingCheckpoint = checkpoint; - Channel->Pause(); + Channel->Pause(); } void Resume() { PendingCheckpoint.reset(); - Channel->Resume(); + Channel->Resume(); } }; - struct TSourceInfo { + struct TSourceInfo { ui64 Index; - IDqSource::TPtr Source; - IDqSourceActor* SourceActor = nullptr; - NActors::IActor* Actor = nullptr; + IDqSource::TPtr Source; + IDqSourceActor* SourceActor = nullptr; + NActors::IActor* Actor = nullptr; TIssuesBuffer IssuesBuffer; - bool Finished = false; + bool Finished = false; i64 FreeSpace = 1; bool PushStarted = false; TSourceInfo(ui64 index) : Index(index), IssuesBuffer(IssuesBufferSize) {} - }; - + }; + struct TOutputChannelInfo { ui64 ChannelId; IDqOutputChannel::TPtr Channel; bool HasPeer = false; - bool Finished = false; // != Channel->IsFinished() // If channel is in finished state, it sends only checkpoints. + bool Finished = false; // != Channel->IsFinished() // If channel is in finished state, it sends only checkpoints. bool PopStarted = false; explicit TOutputChannelInfo(ui64 channelId) @@ -729,18 +729,18 @@ protected: THolder<TStats> Stats; }; - struct TSinkInfo { - IDqSink::TPtr Sink; - IDqSinkActor* SinkActor = nullptr; - NActors::IActor* Actor = nullptr; - bool Finished = false; // If sink is in finished state, it receives only checkpoints. + struct TSinkInfo { + IDqSink::TPtr Sink; + IDqSinkActor* SinkActor = nullptr; + NActors::IActor* Actor = nullptr; + bool Finished = false; // If sink is in finished state, it receives only checkpoints. TIssuesBuffer IssuesBuffer; bool PopStarted = false; i64 SinkActorFreeSpaceBeforeSend = 0; TSinkInfo() : IssuesBuffer(IssuesBufferSize) {} - }; - + }; + protected: // virtual methods (TODO: replace with static_cast<TDerived*>(this)->Foo() @@ -877,12 +877,12 @@ protected: void HandleExecuteBase(TEvDqCompute::TEvRun::TPtr& ev) { CA_LOG_D("Got TEvRun from actor " << ev->Sender); Start(); - - // Event from coordinator should be processed to confirm seq no. - TAutoPtr<NActors::IEventHandle> iev(ev.Release()); - if (Checkpoints) { - Checkpoints->Receive(iev, NActors::TActivationContext::AsActorContext()); - } + + // Event from coordinator should be processed to confirm seq no. + TAutoPtr<NActors::IEventHandle> iev(ev.Release()); + if (Checkpoints) { + Checkpoints->Receive(iev, NActors::TActivationContext::AsActorContext()); + } } void HandleExecuteBase(TEvDqCompute::TEvStateRequest::TPtr& ev) { @@ -895,11 +895,11 @@ protected: this->Send(ev->Sender, evState.Release(), NActors::IEventHandle::FlagTrackDelivery, ev->Cookie); } - void HandleExecuteBase(TEvDqCompute::TEvNewCheckpointCoordinator::TPtr& ev) { + void HandleExecuteBase(TEvDqCompute::TEvNewCheckpointCoordinator::TPtr& ev) { if (!Checkpoints) { - Checkpoints = new TDqComputeActorCheckpoints(TxId, Task, this); - Checkpoints->Init(this->SelfId(), this->RegisterWithSameMailbox(Checkpoints)); - Channels->SetCheckpointsSupport(); + Checkpoints = new TDqComputeActorCheckpoints(TxId, Task, this); + Checkpoints->Init(this->SelfId(), this->RegisterWithSameMailbox(Checkpoints)); + Channels->SetCheckpointsSupport(); } TAutoPtr<NActors::IEventHandle> handle = new NActors::IEventHandle(Checkpoints->SelfId(), ev->Sender, ev->Release().Release()); Checkpoints->Receive(handle, NActors::TActivationContext::AsActorContext()); @@ -922,37 +922,37 @@ protected: Terminate(success, message); } - void HandleExecuteBase(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { - TAutoPtr<NActors::IEventHandle> iev(ev.Release()); - if (Checkpoints) { - Checkpoints->Receive(iev, NActors::TActivationContext::AsActorContext()); - } - } - - void HandleExecuteBase(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev) { - TAutoPtr<NActors::IEventHandle> iev(ev.Release()); - if (Checkpoints) { - Checkpoints->Receive(iev, NActors::TActivationContext::AsActorContext()); - } - } - - ui32 AllowedChannelsOvercommit() const { - const auto& fc = GetDqExecutionSettings().FlowControl; - const ui32 allowedOvercommit = (fc.InFlightBytesOvercommit - 1.f) * MemoryLimits.ChannelBufferSize; - return allowedOvercommit; - } - + void HandleExecuteBase(NActors::TEvInterconnect::TEvNodeDisconnected::TPtr& ev) { + TAutoPtr<NActors::IEventHandle> iev(ev.Release()); + if (Checkpoints) { + Checkpoints->Receive(iev, NActors::TActivationContext::AsActorContext()); + } + } + + void HandleExecuteBase(NActors::TEvInterconnect::TEvNodeConnected::TPtr& ev) { + TAutoPtr<NActors::IEventHandle> iev(ev.Release()); + if (Checkpoints) { + Checkpoints->Receive(iev, NActors::TActivationContext::AsActorContext()); + } + } + + ui32 AllowedChannelsOvercommit() const { + const auto& fc = GetDqExecutionSettings().FlowControl; + const ui32 allowedOvercommit = (fc.InFlightBytesOvercommit - 1.f) * MemoryLimits.ChannelBufferSize; + return allowedOvercommit; + } + private: virtual void DrainOutputChannel(TOutputChannelInfo& outputChannel, const TDqComputeActorChannels::TPeerState& peerState) { - YQL_ENSURE(!outputChannel.Finished || Checkpoints); + YQL_ENSURE(!outputChannel.Finished || Checkpoints); - const bool wasFinished = outputChannel.Finished; + const bool wasFinished = outputChannel.Finished; auto channelId = outputChannel.Channel->GetChannelId(); - const ui32 allowedOvercommit = AllowedChannelsOvercommit(); + const ui32 allowedOvercommit = AllowedChannelsOvercommit(); - const i64 toSend = peerState.PeerFreeSpace + allowedOvercommit - peerState.InFlightBytes; + const i64 toSend = peerState.PeerFreeSpace + allowedOvercommit - peerState.InFlightBytes; CA_LOG_D("About to drain channelId: " << channelId << ", hasPeer: " << outputChannel.HasPeer @@ -974,7 +974,7 @@ private: } i64 remains = toSend; - while (remains > 0 && (!outputChannel.Finished || Checkpoints)) { + while (remains > 0 && (!outputChannel.Finished || Checkpoints)) { ui32 sent = this->SendChannelDataChunk(outputChannel, remains); if (sent == 0) { break; @@ -1001,9 +1001,9 @@ private: return 0; // channel is empty and not finished yet } } - const bool wasFinished = outputChannel.Finished; + const bool wasFinished = outputChannel.Finished; outputChannel.Finished = channel->IsFinished(); - const bool becameFinished = !wasFinished && outputChannel.Finished; + const bool becameFinished = !wasFinished && outputChannel.Finished; ui32 dataSize = data.GetRaw().size(); ui32 checkpointSize = checkpoint.ByteSize(); @@ -1020,84 +1020,84 @@ private: ResumeInputs(); } - if (hasData || hasCheckpoint || becameFinished) { - Channels->SendChannelData(std::move(channelData)); - return dataSize + checkpointSize; - } - return 0; + if (hasData || hasCheckpoint || becameFinished) { + Channels->SendChannelData(std::move(channelData)); + return dataSize + checkpointSize; + } + return 0; } virtual void DrainSink(ui64 outputIndex, TSinkInfo& sinkInfo) { ProcessOutputsState.AllOutputsFinished &= sinkInfo.Finished; - if (sinkInfo.Finished && !Checkpoints) { + if (sinkInfo.Finished && !Checkpoints) { return; - } - - Y_VERIFY(sinkInfo.Sink); - Y_VERIFY(sinkInfo.SinkActor); - Y_VERIFY(sinkInfo.Actor); - - const ui32 allowedOvercommit = AllowedChannelsOvercommit(); - const i64 sinkActorFreeSpaceBeforeSend = sinkInfo.SinkActor->GetFreeSpace(); - - i64 toSend = sinkActorFreeSpaceBeforeSend + allowedOvercommit; - CA_LOG_D("About to drain sink " << outputIndex - << ". FreeSpace: " << sinkActorFreeSpaceBeforeSend - << ", allowedOvercommit: " << allowedOvercommit - << ", toSend: " << toSend - << ", finished: " << sinkInfo.Sink->IsFinished()); - - i64 sent = 0; - while (toSend > 0 && (!sinkInfo.Finished || Checkpoints)) { - const ui32 sentChunk = SendSinkDataChunk(outputIndex, sinkInfo, toSend); - if (sentChunk == 0) { - break; - } - sent += sentChunk; - toSend = sinkInfo.SinkActor->GetFreeSpace() + allowedOvercommit; - } - - CA_LOG_D("Drain sink " << outputIndex - << ". Free space decreased: " << (sinkActorFreeSpaceBeforeSend - sinkInfo.SinkActor->GetFreeSpace()) - << ", sent data from buffer: " << sent); - + } + + Y_VERIFY(sinkInfo.Sink); + Y_VERIFY(sinkInfo.SinkActor); + Y_VERIFY(sinkInfo.Actor); + + const ui32 allowedOvercommit = AllowedChannelsOvercommit(); + const i64 sinkActorFreeSpaceBeforeSend = sinkInfo.SinkActor->GetFreeSpace(); + + i64 toSend = sinkActorFreeSpaceBeforeSend + allowedOvercommit; + CA_LOG_D("About to drain sink " << outputIndex + << ". FreeSpace: " << sinkActorFreeSpaceBeforeSend + << ", allowedOvercommit: " << allowedOvercommit + << ", toSend: " << toSend + << ", finished: " << sinkInfo.Sink->IsFinished()); + + i64 sent = 0; + while (toSend > 0 && (!sinkInfo.Finished || Checkpoints)) { + const ui32 sentChunk = SendSinkDataChunk(outputIndex, sinkInfo, toSend); + if (sentChunk == 0) { + break; + } + sent += sentChunk; + toSend = sinkInfo.SinkActor->GetFreeSpace() + allowedOvercommit; + } + + CA_LOG_D("Drain sink " << outputIndex + << ". Free space decreased: " << (sinkActorFreeSpaceBeforeSend - sinkInfo.SinkActor->GetFreeSpace()) + << ", sent data from buffer: " << sent); + ProcessOutputsState.HasDataToSend |= !sinkInfo.Finished; ProcessOutputsState.DataWasSent |= sinkInfo.Finished || sent; - } - - ui32 SendSinkDataChunk(ui64 outputIndex, TSinkInfo& sinkInfo, ui64 bytes) { - auto sink = sinkInfo.Sink; - - NKikimr::NMiniKQL::TUnboxedValueVector dataBatch; - NDqProto::TCheckpoint checkpoint; - - const ui64 dataSize = !sinkInfo.Finished ? sink->Pop(dataBatch, bytes) : 0; - const bool hasCheckpoint = sink->Pop(checkpoint); - if (!dataSize && !hasCheckpoint) { - if (!sink->IsFinished()) { - CA_LOG_D("sink " << outputIndex << ": nothing to send and is not finished"); - return 0; // sink is empty and not finished yet - } - } - sinkInfo.Finished = sink->IsFinished(); - - YQL_ENSURE(!dataSize || !dataBatch.empty()); // dataSize != 0 => !dataBatch.empty() // even if we're about to send empty rows. - - const ui32 checkpointSize = hasCheckpoint ? checkpoint.ByteSize() : 0; - - TMaybe<NDqProto::TCheckpoint> maybeCheckpoint; - if (hasCheckpoint) { - maybeCheckpoint = checkpoint; - CA_LOG_I("Resume inputs"); - ResumeInputs(); - } - - sinkInfo.SinkActor->SendData(std::move(dataBatch), dataSize, maybeCheckpoint, sinkInfo.Finished); - CA_LOG_D("sink " << outputIndex << ": sent " << dataSize << " bytes of data and " << checkpointSize << " bytes of checkpoint barrier"); - - return dataSize + checkpointSize; - } - + } + + ui32 SendSinkDataChunk(ui64 outputIndex, TSinkInfo& sinkInfo, ui64 bytes) { + auto sink = sinkInfo.Sink; + + NKikimr::NMiniKQL::TUnboxedValueVector dataBatch; + NDqProto::TCheckpoint checkpoint; + + const ui64 dataSize = !sinkInfo.Finished ? sink->Pop(dataBatch, bytes) : 0; + const bool hasCheckpoint = sink->Pop(checkpoint); + if (!dataSize && !hasCheckpoint) { + if (!sink->IsFinished()) { + CA_LOG_D("sink " << outputIndex << ": nothing to send and is not finished"); + return 0; // sink is empty and not finished yet + } + } + sinkInfo.Finished = sink->IsFinished(); + + YQL_ENSURE(!dataSize || !dataBatch.empty()); // dataSize != 0 => !dataBatch.empty() // even if we're about to send empty rows. + + const ui32 checkpointSize = hasCheckpoint ? checkpoint.ByteSize() : 0; + + TMaybe<NDqProto::TCheckpoint> maybeCheckpoint; + if (hasCheckpoint) { + maybeCheckpoint = checkpoint; + CA_LOG_I("Resume inputs"); + ResumeInputs(); + } + + sinkInfo.SinkActor->SendData(std::move(dataBatch), dataSize, maybeCheckpoint, sinkInfo.Finished); + CA_LOG_D("sink " << outputIndex << ": sent " << dataSize << " bytes of data and " << checkpointSize << " bytes of checkpoint barrier"); + + return dataSize + checkpointSize; + } + protected: const TMaybe<NDqProto::TRlPath>& GetRlPath() const { return RuntimeSettings.RlPath; @@ -1164,13 +1164,13 @@ protected: channel.Channel = TaskRunner->GetInputChannel(channelId); } } - for (auto& [inputIndex, source] : SourcesMap) { + for (auto& [inputIndex, source] : SourcesMap) { if (TaskRunner) { source.Source = TaskRunner->GetSource(inputIndex); Y_VERIFY(source.Source);} Y_VERIFY(SourceActorFactory); - const auto& inputDesc = Task.GetInputs(inputIndex); - const ui64 i = inputIndex; // Crutch for clang - CA_LOG_D("Create source actor for input " << i << " " << inputDesc); - std::tie(source.SourceActor, source.Actor) = SourceActorFactory->CreateDqSourceActor( + const auto& inputDesc = Task.GetInputs(inputIndex); + const ui64 i = inputIndex; // Crutch for clang + CA_LOG_D("Create source actor for input " << i << " " << inputDesc); + std::tie(source.SourceActor, source.Actor) = SourceActorFactory->CreateDqSourceActor( IDqSourceActorFactory::TArguments{ .InputDesc = inputDesc, .InputIndex = inputIndex, @@ -1181,20 +1181,20 @@ protected: .TypeEnv = typeEnv, .HolderFactory = holderFactory }); - this->RegisterWithSameMailbox(source.Actor); - } + this->RegisterWithSameMailbox(source.Actor); + } if (TaskRunner) { for (auto& [channelId, channel] : OutputChannelsMap) { channel.Channel = TaskRunner->GetOutputChannel(channelId); } } - for (auto& [outputIndex, sink] : SinksMap) { + for (auto& [outputIndex, sink] : SinksMap) { if (TaskRunner) { sink.Sink = TaskRunner->GetSink(outputIndex); } - Y_VERIFY(SinkActorFactory); - const auto& outputDesc = Task.GetOutputs(outputIndex); - const ui64 i = outputIndex; // Crutch for clang - CA_LOG_D("Create sink actor for output " << i << " " << outputDesc); - std::tie(sink.SinkActor, sink.Actor) = SinkActorFactory->CreateDqSinkActor( + Y_VERIFY(SinkActorFactory); + const auto& outputDesc = Task.GetOutputs(outputIndex); + const ui64 i = outputIndex; // Crutch for clang + CA_LOG_D("Create sink actor for output " << i << " " << outputDesc); + std::tie(sink.SinkActor, sink.Actor) = SinkActorFactory->CreateDqSinkActor( IDqSinkActorFactory::TArguments { .OutputDesc = outputDesc, .OutputIndex = outputIndex, @@ -1204,66 +1204,66 @@ protected: .TypeEnv = typeEnv, .HolderFactory = holderFactory }); - this->RegisterWithSameMailbox(sink.Actor); - } + this->RegisterWithSameMailbox(sink.Actor); + } } - void PollSourceActors() { // TODO: rename to PollSources() - // Don't produce any input from sources if we're about to save checkpoint. - if (!Running || (Checkpoints && Checkpoints->HasPendingCheckpoint() && !Checkpoints->ComputeActorStateSaved())) { - return; - } - - for (auto& [inputIndex, source] : SourcesMap) { + void PollSourceActors() { // TODO: rename to PollSources() + // Don't produce any input from sources if we're about to save checkpoint. + if (!Running || (Checkpoints && Checkpoints->HasPendingCheckpoint() && !Checkpoints->ComputeActorStateSaved())) { + return; + } + + for (auto& [inputIndex, source] : SourcesMap) { Y_VERIFY(!TaskRunner || source.Source); - if (source.Finished) { - const ui64 indexForLogging = inputIndex; // Crutch for clang - CA_LOG_D("Skip polling source[" << indexForLogging << "]: finished"); - continue; - } + if (source.Finished) { + const ui64 indexForLogging = inputIndex; // Crutch for clang + CA_LOG_D("Skip polling source[" << indexForLogging << "]: finished"); + continue; + } const i64 freeSpace = SourceFreeSpace(source); if (freeSpace > 0) { - NKikimr::NMiniKQL::TUnboxedValueVector batch; + NKikimr::NMiniKQL::TUnboxedValueVector batch; Y_VERIFY(source.SourceActor); bool finished = false; const i64 space = source.SourceActor->GetSourceData(batch, finished, freeSpace); - const ui64 index = inputIndex; - CA_LOG_D("Poll source " << index - << ". Buffer free space: " << freeSpace - << ", read from source: " << space << " bytes, " - << batch.size() << " rows, finished: " << finished); + const ui64 index = inputIndex; + CA_LOG_D("Poll source " << index + << ". Buffer free space: " << freeSpace + << ", read from source: " << space << " bytes, " + << batch.size() << " rows, finished: " << finished); SourcePush(std::move(batch), source, space, finished); - } - } - } - - void OnNewSourceDataArrived(ui64 inputIndex) override { - Y_VERIFY(SourcesMap.FindPtr(inputIndex)); - ContinueExecute(); - } - + } + } + } + + void OnNewSourceDataArrived(ui64 inputIndex) override { + Y_VERIFY(SourcesMap.FindPtr(inputIndex)); + ContinueExecute(); + } + void OnSourceError(ui64 inputIndex, const TIssues& issues, bool isFatal) override { if (!isFatal) { SourcesMap.at(inputIndex).IssuesBuffer.Push(issues); return; } - TString desc = issues.ToString(); - CA_LOG_E("Source[" << inputIndex << "] fatal error: " << desc); - InternalError(TIssuesIds::DEFAULT_ERROR, desc); + TString desc = issues.ToString(); + CA_LOG_E("Source[" << inputIndex << "] fatal error: " << desc); + InternalError(TIssuesIds::DEFAULT_ERROR, desc); } - + void OnSinkError(ui64 outputIndex, const TIssues& issues, bool isFatal) override { if (!isFatal) { SinksMap.at(outputIndex).IssuesBuffer.Push(issues); return; } - TString desc = issues.ToString(); - CA_LOG_E("Sink[" << outputIndex << "] fatal error: " << desc); - InternalError(TIssuesIds::DEFAULT_ERROR, desc); - } - + TString desc = issues.ToString(); + CA_LOG_E("Sink[" << outputIndex << "] fatal error: " << desc); + InternalError(TIssuesIds::DEFAULT_ERROR, desc); + } + virtual ui64 CalcMkqlMemoryLimit() { auto& opts = Task.GetProgram().GetSettings(); return opts.GetHasMapJoin()/* || opts.GetHasSort()*/ @@ -1274,37 +1274,37 @@ protected: private: void InitializeTask() { for (ui32 i = 0; i < Task.InputsSize(); ++i) { - const auto& inputDesc = Task.GetInputs(i); - Y_VERIFY(!inputDesc.HasSource() || inputDesc.ChannelsSize() == 0); // HasSource => no channels - if (inputDesc.HasSource()) { + const auto& inputDesc = Task.GetInputs(i); + Y_VERIFY(!inputDesc.HasSource() || inputDesc.ChannelsSize() == 0); // HasSource => no channels + if (inputDesc.HasSource()) { auto result = SourcesMap.emplace(i, TSourceInfo(i)); YQL_ENSURE(result.second); - } else { - for (auto& channel : inputDesc.GetChannels()) { + } else { + for (auto& channel : inputDesc.GetChannels()) { auto result = InputChannelsMap.emplace(channel.GetId(), TInputChannelInfo(channel.GetId(), channel.GetCheckpointingMode())); - YQL_ENSURE(result.second); - } + YQL_ENSURE(result.second); + } } } for (ui32 i = 0; i < Task.OutputsSize(); ++i) { - const auto& outputDesc = Task.GetOutputs(i); - Y_VERIFY(!outputDesc.HasSink() || outputDesc.ChannelsSize() == 0); // HasSink => no channels - Y_VERIFY(outputDesc.HasSink() || outputDesc.ChannelsSize() > 0); - if (outputDesc.HasSink()) { - auto result = SinksMap.emplace(i, TSinkInfo()); - YQL_ENSURE(result.second); - } else { - for (auto& channel : outputDesc.GetChannels()) { + const auto& outputDesc = Task.GetOutputs(i); + Y_VERIFY(!outputDesc.HasSink() || outputDesc.ChannelsSize() == 0); // HasSink => no channels + Y_VERIFY(outputDesc.HasSink() || outputDesc.ChannelsSize() > 0); + if (outputDesc.HasSink()) { + auto result = SinksMap.emplace(i, TSinkInfo()); + YQL_ENSURE(result.second); + } else { + for (auto& channel : outputDesc.GetChannels()) { TOutputChannelInfo outputChannel(channel.GetId()); - outputChannel.HasPeer = channel.GetDstEndpoint().HasActorId(); + outputChannel.HasPeer = channel.GetDstEndpoint().HasActorId(); if (Y_UNLIKELY(RuntimeSettings.StatsMode >= NDqProto::DQ_STATS_MODE_PROFILE)) { - outputChannel.Stats = MakeHolder<typename TOutputChannelInfo::TStats>(); - } - - auto result = OutputChannelsMap.emplace(channel.GetId(), std::move(outputChannel)); - YQL_ENSURE(result.second); + outputChannel.Stats = MakeHolder<typename TOutputChannelInfo::TStats>(); + } + + auto result = OutputChannelsMap.emplace(channel.GetId(), std::move(outputChannel)); + YQL_ENSURE(result.second); } } } @@ -1356,59 +1356,59 @@ private: dst->SetMkqlExtraMemoryRequests(ProfileStats->MkqlExtraMemoryRequests); } - if (TaskRunner) { - TaskRunner->UpdateStats(); + if (TaskRunner) { + TaskRunner->UpdateStats(); - if (auto* taskStats = TaskRunner->GetStats()) { - auto* protoTask = dst->AddTasks(); - FillTaskRunnerStats(Task.GetId(), Task.GetStageId(), *taskStats, protoTask, (bool) ProfileStats); + if (auto* taskStats = TaskRunner->GetStats()) { + auto* protoTask = dst->AddTasks(); + FillTaskRunnerStats(Task.GetId(), Task.GetStageId(), *taskStats, protoTask, (bool) ProfileStats); - for (auto& [outputIndex, sinkInfo] : SinksMap) { - if (auto* sinkStats = sinkInfo.Sink ? sinkInfo.Sink->GetStats() : nullptr) { - protoTask->SetOutputRows(protoTask->GetOutputRows() + sinkStats->RowsIn); - protoTask->SetOutputBytes(protoTask->GetOutputBytes() + sinkStats->Bytes); + for (auto& [outputIndex, sinkInfo] : SinksMap) { + if (auto* sinkStats = sinkInfo.Sink ? sinkInfo.Sink->GetStats() : nullptr) { + protoTask->SetOutputRows(protoTask->GetOutputRows() + sinkStats->RowsIn); + protoTask->SetOutputBytes(protoTask->GetOutputBytes() + sinkStats->Bytes); - if (ProfileStats) { - auto* protoSink = protoTask->AddSinks(); - protoSink->SetOutputIndex(outputIndex); + if (ProfileStats) { + auto* protoSink = protoTask->AddSinks(); + protoSink->SetOutputIndex(outputIndex); - protoSink->SetChunks(sinkStats->Chunks); - protoSink->SetBytes(sinkStats->Bytes); - protoSink->SetRowsIn(sinkStats->RowsIn); - protoSink->SetRowsOut(sinkStats->RowsOut); + protoSink->SetChunks(sinkStats->Chunks); + protoSink->SetBytes(sinkStats->Bytes); + protoSink->SetRowsIn(sinkStats->RowsIn); + protoSink->SetRowsOut(sinkStats->RowsOut); - protoSink->SetMaxMemoryUsage(sinkStats->MaxMemoryUsage); - protoSink->SetErrorsCount(sinkInfo.IssuesBuffer.GetAllAddedIssuesCount()); - } + protoSink->SetMaxMemoryUsage(sinkStats->MaxMemoryUsage); + protoSink->SetErrorsCount(sinkInfo.IssuesBuffer.GetAllAddedIssuesCount()); + } } } - if (ProfileStats) { - for (auto& protoSource : *protoTask->MutableSources()) { - if (auto* sourceInfo = SourcesMap.FindPtr(protoSource.GetInputIndex())) { - protoSource.SetErrorsCount(sourceInfo->IssuesBuffer.GetAllAddedIssuesCount()); - } + if (ProfileStats) { + for (auto& protoSource : *protoTask->MutableSources()) { + if (auto* sourceInfo = SourcesMap.FindPtr(protoSource.GetInputIndex())) { + protoSource.SetErrorsCount(sourceInfo->IssuesBuffer.GetAllAddedIssuesCount()); + } } - for (auto& protoInputChannelStats : *protoTask->MutableInputChannels()) { - if (auto* caChannelStats = Channels->GetInputChannelStats(protoInputChannelStats.GetChannelId())) { - protoInputChannelStats.SetPollRequests(caChannelStats->PollRequests); - protoInputChannelStats.SetWaitTimeUs(caChannelStats->WaitTime.MicroSeconds()); - protoInputChannelStats.SetResentMessages(caChannelStats->ResentMessages); - } + for (auto& protoInputChannelStats : *protoTask->MutableInputChannels()) { + if (auto* caChannelStats = Channels->GetInputChannelStats(protoInputChannelStats.GetChannelId())) { + protoInputChannelStats.SetPollRequests(caChannelStats->PollRequests); + protoInputChannelStats.SetWaitTimeUs(caChannelStats->WaitTime.MicroSeconds()); + protoInputChannelStats.SetResentMessages(caChannelStats->ResentMessages); + } } - for (auto& protoOutputChannelStats : *protoTask->MutableOutputChannels()) { - if (auto* x = Channels->GetOutputChannelStats(protoOutputChannelStats.GetChannelId())) { - protoOutputChannelStats.SetResentMessages(x->ResentMessages); - } - + for (auto& protoOutputChannelStats : *protoTask->MutableOutputChannels()) { + if (auto* x = Channels->GetOutputChannelStats(protoOutputChannelStats.GetChannelId())) { + protoOutputChannelStats.SetResentMessages(x->ResentMessages); + } + if (auto* outputInfo = OutputChannelsMap.FindPtr(protoOutputChannelStats.GetChannelId())) { if (auto *x = outputInfo->Stats.Get()) { protoOutputChannelStats.SetBlockedByCapacity(x->BlockedByCapacity); protoOutputChannelStats.SetNoDstActorId(x->NoDstActorId); } - } + } } } } @@ -1462,16 +1462,16 @@ protected: const TComputeRuntimeSettings RuntimeSettings; const TComputeMemoryLimits MemoryLimits; const bool CanAllocateExtraMemory = false; - const IDqSourceActorFactory::TPtr SourceActorFactory; - const IDqSinkActorFactory::TPtr SinkActorFactory; - const NDqProto::ECheckpointingMode CheckpointingMode; + const IDqSourceActorFactory::TPtr SourceActorFactory; + const IDqSinkActorFactory::TPtr SinkActorFactory; + const NDqProto::ECheckpointingMode CheckpointingMode; TIntrusivePtr<IDqTaskRunner> TaskRunner; TDqComputeActorChannels* Channels = nullptr; TDqComputeActorCheckpoints* Checkpoints = nullptr; - THashMap<ui64, TInputChannelInfo> InputChannelsMap; // Channel id -> Channel info - THashMap<ui64, TSourceInfo> SourcesMap; // Input index -> Source info - THashMap<ui64, TOutputChannelInfo> OutputChannelsMap; // Channel id -> Channel info - THashMap<ui64, TSinkInfo> SinksMap; // Output index -> Sink info + THashMap<ui64, TInputChannelInfo> InputChannelsMap; // Channel id -> Channel info + THashMap<ui64, TSourceInfo> SourcesMap; // Input index -> Source info + THashMap<ui64, TOutputChannelInfo> OutputChannelsMap; // Channel id -> Channel info + THashMap<ui64, TSinkInfo> SinksMap; // Output index -> Sink info ui64 MkqlMemoryLimit = 0; bool ResumeEventScheduled = false; NDqProto::EComputeState State; diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.cpp b/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.cpp index 5ae40ef1ab6..7d7cc64ac04 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.cpp +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.cpp @@ -1,45 +1,45 @@ -#include "dq_compute_actor_io_actors_factory.h" - +#include "dq_compute_actor_io_actors_factory.h" + #include <ydb/library/yql/dq/actors/compute/dq_compute_actor_sinks.h> #include <ydb/library/yql/dq/actors/compute/dq_compute_actor_sources.h> #include <ydb/library/yql/dq/common/dq_common.h> -namespace NYql::NDq { - +namespace NYql::NDq { + std::pair<IDqSourceActor*, NActors::IActor*> TDqSourceFactory::CreateDqSourceActor(IDqSourceActorFactory::TArguments&& args) const -{ +{ const TString& type = args.InputDesc.GetSource().GetType(); - YQL_ENSURE(!type.empty(), "Attempt to create source actor of empty type"); - const TCreatorFunction* creatorFunc = CreatorsByType.FindPtr(type); - YQL_ENSURE(creatorFunc, "Unknown type of source actor: \"" << type << "\""); + YQL_ENSURE(!type.empty(), "Attempt to create source actor of empty type"); + const TCreatorFunction* creatorFunc = CreatorsByType.FindPtr(type); + YQL_ENSURE(creatorFunc, "Unknown type of source actor: \"" << type << "\""); std::pair<IDqSourceActor*, NActors::IActor*> actor = (*creatorFunc)(std::move(args)); - Y_VERIFY(actor.first); - Y_VERIFY(actor.second); - return actor; -} - -void TDqSourceFactory::Register(const TString& type, TCreatorFunction creator) -{ - auto [_, registered] = CreatorsByType.emplace(type, std::move(creator)); - Y_VERIFY(registered); -} - + Y_VERIFY(actor.first); + Y_VERIFY(actor.second); + return actor; +} + +void TDqSourceFactory::Register(const TString& type, TCreatorFunction creator) +{ + auto [_, registered] = CreatorsByType.emplace(type, std::move(creator)); + Y_VERIFY(registered); +} + std::pair<IDqSinkActor*, NActors::IActor*> TDqSinkFactory::CreateDqSinkActor(IDqSinkActorFactory::TArguments&& args) const -{ +{ const TString& type = args.OutputDesc.GetSink().GetType(); - YQL_ENSURE(!type.empty(), "Attempt to create sink actor of empty type"); - const TCreatorFunction* creatorFunc = CreatorsByType.FindPtr(type); - YQL_ENSURE(creatorFunc, "Unknown type of sink actor: \"" << type << "\""); + YQL_ENSURE(!type.empty(), "Attempt to create sink actor of empty type"); + const TCreatorFunction* creatorFunc = CreatorsByType.FindPtr(type); + YQL_ENSURE(creatorFunc, "Unknown type of sink actor: \"" << type << "\""); std::pair<IDqSinkActor*, NActors::IActor*> actor = (*creatorFunc)(std::move(args)); - Y_VERIFY(actor.first); - Y_VERIFY(actor.second); - return actor; -} - -void TDqSinkFactory::Register(const TString& type, TCreatorFunction creator) -{ - auto [_, registered] = CreatorsByType.emplace(type, std::move(creator)); - Y_VERIFY(registered); -} - -} // namespace NYql::NDq + Y_VERIFY(actor.first); + Y_VERIFY(actor.second); + return actor; +} + +void TDqSinkFactory::Register(const TString& type, TCreatorFunction creator) +{ + auto [_, registered] = CreatorsByType.emplace(type, std::move(creator)); + Y_VERIFY(registered); +} + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.h index db57796f5d4..c2d56c79783 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_io_actors_factory.h @@ -1,87 +1,87 @@ -#pragma once -#include "dq_compute_actor_sources.h" -#include "dq_compute_actor_sinks.h" - +#pragma once +#include "dq_compute_actor_sources.h" +#include "dq_compute_actor_sinks.h" + #include <ydb/library/yql/dq/actors/protos/dq_events.pb.h> #include <ydb/library/yql/dq/common/dq_common.h> #include <ydb/library/yql/minikql/computation/mkql_computation_node_holders.h> #include <ydb/library/yql/utils/yql_panic.h> - -#include <type_traits> - -namespace NYql::NDq { - -template <class T> -concept TCastsToSourceActorPair = - std::is_convertible_v<T, std::pair<IDqSourceActor*, NActors::IActor*>>; - -template <class T, class TProto> + +#include <type_traits> + +namespace NYql::NDq { + +template <class T> +concept TCastsToSourceActorPair = + std::is_convertible_v<T, std::pair<IDqSourceActor*, NActors::IActor*>>; + +template <class T, class TProto> concept TSourceActorCreatorFunc = requires(T f, TProto&& settings, IDqSourceActorFactory::TArguments args) { { f(std::move(settings), std::move(args)) } -> TCastsToSourceActorPair; -}; - -class TDqSourceFactory : public IDqSourceActorFactory { -public: +}; + +class TDqSourceFactory : public IDqSourceActorFactory { +public: using TCreatorFunction = std::function<std::pair<IDqSourceActor*, NActors::IActor*>(TArguments&& args)>; - + std::pair<IDqSourceActor*, NActors::IActor*> CreateDqSourceActor(TArguments&& args) const override; - - void Register(const TString& type, TCreatorFunction creator); - - template <class TProtoMsg, TSourceActorCreatorFunc<TProtoMsg> TCreatorFunc> - void Register(const TString& type, TCreatorFunc creator) { - Register(type, + + void Register(const TString& type, TCreatorFunction creator); + + template <class TProtoMsg, TSourceActorCreatorFunc<TProtoMsg> TCreatorFunc> + void Register(const TString& type, TCreatorFunc creator) { + Register(type, [creator = std::move(creator), type](TArguments&& args) - { + { const google::protobuf::Any& settingsAny = args.InputDesc.GetSource().GetSettings(); - YQL_ENSURE(settingsAny.Is<TProtoMsg>(), - "Source \"" << type << "\" settings are expected to have protobuf type " << TProtoMsg::descriptor()->full_name() - << ", but got " << settingsAny.type_url()); - TProtoMsg settings; - YQL_ENSURE(settingsAny.UnpackTo(&settings), "Failed to unpack settings of type \"" << type << "\""); + YQL_ENSURE(settingsAny.Is<TProtoMsg>(), + "Source \"" << type << "\" settings are expected to have protobuf type " << TProtoMsg::descriptor()->full_name() + << ", but got " << settingsAny.type_url()); + TProtoMsg settings; + YQL_ENSURE(settingsAny.UnpackTo(&settings), "Failed to unpack settings of type \"" << type << "\""); return creator(std::move(settings), std::move(args)); - }); - } - -private: - THashMap<TString, TCreatorFunction> CreatorsByType; -}; - -template <class T> -concept TCastsToSinkActorPair = - std::is_convertible_v<T, std::pair<IDqSinkActor*, NActors::IActor*>>; - -template <class T, class TProto> + }); + } + +private: + THashMap<TString, TCreatorFunction> CreatorsByType; +}; + +template <class T> +concept TCastsToSinkActorPair = + std::is_convertible_v<T, std::pair<IDqSinkActor*, NActors::IActor*>>; + +template <class T, class TProto> concept TSinkActorCreatorFunc = requires(T f, TProto&& settings, IDqSinkActorFactory::TArguments&& args) { { f(std::move(settings), std::move(args)) } -> TCastsToSinkActorPair; -}; - -class TDqSinkFactory : public IDqSinkActorFactory { -public: +}; + +class TDqSinkFactory : public IDqSinkActorFactory { +public: using TCreatorFunction = std::function<std::pair<IDqSinkActor*, NActors::IActor*>(TArguments&& args)>; - + std::pair<IDqSinkActor*, NActors::IActor*> CreateDqSinkActor(TArguments&& args) const override; - - void Register(const TString& type, TCreatorFunction creator); - - template <class TProtoMsg, TSinkActorCreatorFunc<TProtoMsg> TCreatorFunc> - void Register(const TString& type, TCreatorFunc creator) { - Register(type, + + void Register(const TString& type, TCreatorFunction creator); + + template <class TProtoMsg, TSinkActorCreatorFunc<TProtoMsg> TCreatorFunc> + void Register(const TString& type, TCreatorFunc creator) { + Register(type, [creator = std::move(creator), type](TArguments&& args) - { + { const google::protobuf::Any& settingsAny = args.OutputDesc.GetSink().GetSettings(); - YQL_ENSURE(settingsAny.Is<TProtoMsg>(), - "Sink \"" << type << "\" settings are expected to have protobuf type " << TProtoMsg::descriptor()->full_name() - << ", but got " << settingsAny.type_url()); - TProtoMsg settings; - YQL_ENSURE(settingsAny.UnpackTo(&settings), "Failed to unpack settings of type \"" << type << "\""); + YQL_ENSURE(settingsAny.Is<TProtoMsg>(), + "Sink \"" << type << "\" settings are expected to have protobuf type " << TProtoMsg::descriptor()->full_name() + << ", but got " << settingsAny.type_url()); + TProtoMsg settings; + YQL_ENSURE(settingsAny.UnpackTo(&settings), "Failed to unpack settings of type \"" << type << "\""); return creator(std::move(settings), std::move(args)); - }); - } - -private: - THashMap<TString, TCreatorFunction> CreatorsByType; -}; - -} // namespace NYql::NDq + }); + } + +private: + THashMap<TString, TCreatorFunction> CreatorsByType; +}; + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_sinks.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_sinks.h index 5c2db2ac8d8..76f671a4fab 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_sinks.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_sinks.h @@ -1,79 +1,79 @@ -#pragma once +#pragma once #include <ydb/library/yql/dq/common/dq_common.h> #include <ydb/library/yql/minikql/computation/mkql_computation_node_holders.h> #include <ydb/library/yql/public/issue/yql_issue.h> - -#include <util/generic/ptr.h> - -#include <memory> -#include <utility> - -namespace NYql::NDqProto { -class TCheckpoint; + +#include <util/generic/ptr.h> + +#include <memory> +#include <utility> + +namespace NYql::NDqProto { +class TCheckpoint; class TTaskOutput; -class TSinkState; -} // namespace NYql::NDqProto - -namespace NActors { -class IActor; -} // namespace NActors - -namespace NYql::NDq { - -// Sink actor. -// Must be IActor. -// -// Protocol: -// 1. CA starts sink actor. -// 2. CA runs program and gets results. -// 3. CA calls IDqSinkActor::SendData(). -// 4. If SendData() returns value less than 0, loop stops running until free space appears. -// 5. When free space appears, sink actor calls ICallbacks::ResumeExecution() to start processing again. -// -// Checkpointing: -// 1. InjectCheckpoint event arrives to CA. -// 2. CA saves its state and injects special checkpoint event to all outputs (TDqComputeActorCheckpoints::ICallbacks::InjectBarrierToOutputs()). -// 3. Sink actor writes all data before checkpoint. -// 4. Sink actor waits all external sink's acks for written data. -// 5. Sink actor gathers its state and passes it into callback ICallbacks::OnSinkStateSaved(state, outputIndex). -// 6. Checkpoints actor builds state for all task node as sum of the state of CA and all its sinks and saves it. -// 7. ... -// 8. When checkpoint is written into database, checkpoints actor calls IDqSinkActor::CommitState() to apply all side effects. -struct IDqSinkActor { - struct ICallbacks { // Compute actor - virtual void ResumeExecution() = 0; +class TSinkState; +} // namespace NYql::NDqProto + +namespace NActors { +class IActor; +} // namespace NActors + +namespace NYql::NDq { + +// Sink actor. +// Must be IActor. +// +// Protocol: +// 1. CA starts sink actor. +// 2. CA runs program and gets results. +// 3. CA calls IDqSinkActor::SendData(). +// 4. If SendData() returns value less than 0, loop stops running until free space appears. +// 5. When free space appears, sink actor calls ICallbacks::ResumeExecution() to start processing again. +// +// Checkpointing: +// 1. InjectCheckpoint event arrives to CA. +// 2. CA saves its state and injects special checkpoint event to all outputs (TDqComputeActorCheckpoints::ICallbacks::InjectBarrierToOutputs()). +// 3. Sink actor writes all data before checkpoint. +// 4. Sink actor waits all external sink's acks for written data. +// 5. Sink actor gathers its state and passes it into callback ICallbacks::OnSinkStateSaved(state, outputIndex). +// 6. Checkpoints actor builds state for all task node as sum of the state of CA and all its sinks and saves it. +// 7. ... +// 8. When checkpoint is written into database, checkpoints actor calls IDqSinkActor::CommitState() to apply all side effects. +struct IDqSinkActor { + struct ICallbacks { // Compute actor + virtual void ResumeExecution() = 0; virtual void OnSinkError(ui64 outputIndex, const TIssues& issues, bool isFatal) = 0; - - // Checkpointing - virtual void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint) = 0; - - virtual ~ICallbacks() = default; - }; - - virtual ui64 GetOutputIndex() const = 0; - - virtual i64 GetFreeSpace() const = 0; - - // Sends data. - // Method shoud be called under bound mkql allocator. - // Could throw YQL errors. - // Checkpoint (if any) is supposed to be ordered after batch, - // and finished flag is supposed to be ordered after checkpoint. - virtual void SendData(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 dataSize, - const TMaybe<NDqProto::TCheckpoint>& checkpoint, bool finished) = 0; - - // Checkpointing. - virtual void CommitState(const NDqProto::TCheckpoint& checkpoint) = 0; // Apply side effects related to this checkpoint. - virtual void LoadState(const NDqProto::TSinkState& state) = 0; - - virtual void PassAway() = 0; // The same signature as IActor::PassAway() - - virtual ~IDqSinkActor() = default; -}; - -struct IDqSinkActorFactory : public TThrRefBase { - using TPtr = TIntrusivePtr<IDqSinkActorFactory>; - + + // Checkpointing + virtual void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint& checkpoint) = 0; + + virtual ~ICallbacks() = default; + }; + + virtual ui64 GetOutputIndex() const = 0; + + virtual i64 GetFreeSpace() const = 0; + + // Sends data. + // Method shoud be called under bound mkql allocator. + // Could throw YQL errors. + // Checkpoint (if any) is supposed to be ordered after batch, + // and finished flag is supposed to be ordered after checkpoint. + virtual void SendData(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 dataSize, + const TMaybe<NDqProto::TCheckpoint>& checkpoint, bool finished) = 0; + + // Checkpointing. + virtual void CommitState(const NDqProto::TCheckpoint& checkpoint) = 0; // Apply side effects related to this checkpoint. + virtual void LoadState(const NDqProto::TSinkState& state) = 0; + + virtual void PassAway() = 0; // The same signature as IActor::PassAway() + + virtual ~IDqSinkActor() = default; +}; + +struct IDqSinkActorFactory : public TThrRefBase { + using TPtr = TIntrusivePtr<IDqSinkActorFactory>; + struct TArguments { const NDqProto::TTaskOutput& OutputDesc; ui64 OutputIndex; @@ -84,10 +84,10 @@ struct IDqSinkActorFactory : public TThrRefBase { const NKikimr::NMiniKQL::THolderFactory& HolderFactory; }; - // Creates sink actor. - // Could throw YQL errors. - // IActor* and IDqSinkActor* returned by method must point to the objects with consistent lifetime. + // Creates sink actor. + // Could throw YQL errors. + // IActor* and IDqSinkActor* returned by method must point to the objects with consistent lifetime. virtual std::pair<IDqSinkActor*, NActors::IActor*> CreateDqSinkActor(TArguments&& args) const = 0; -}; - -} // namespace NYql::NDq +}; + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_sources.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_sources.h index d2a646cab62..3b742b3cfd8 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_sources.h +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_sources.h @@ -1,68 +1,68 @@ -#pragma once +#pragma once #include <ydb/library/yql/dq/common/dq_common.h> #include <ydb/library/yql/minikql/computation/mkql_computation_node_holders.h> #include <ydb/library/yql/public/issue/yql_issue.h> - -#include <util/generic/ptr.h> - -#include <memory> -#include <utility> - -namespace NYql::NDqProto { -class TCheckpoint; -class TTaskInput; -class TSourceState; -} // namespace NYql::NDqProto - -namespace NActors { -class IActor; -} // namespace NActors - -namespace NYql::NDq { - -// Source actor. -// Must be IActor. -// -// Protocol: -// 1. CA starts source actor. -// 2. CA calls IDqSourceActor::GetSourceData(batch, FreeSpace). -// 3. Source actor calls ICallbacks::OnNewSourceDataArrived() when it has data to process. -// 4. CA calls IDqSourceActor::GetSourceData(batch, FreeSpace) to get data when it is ready to process it. -// -// Checkpointing: -// 1. InjectCheckpoint event arrives to CA. -// 2. ... -// 3. CA calls IDqSourceActor::SaveState() and IDqTaskRunner::SaveGraphState() and uses this pair as state for CA. -// 3. ... -// 5. CA calls IDqSourceActor::CommitState() to apply all side effects. -struct IDqSourceActor { - struct ICallbacks { - virtual void OnNewSourceDataArrived(ui64 inputIndex) = 0; + +#include <util/generic/ptr.h> + +#include <memory> +#include <utility> + +namespace NYql::NDqProto { +class TCheckpoint; +class TTaskInput; +class TSourceState; +} // namespace NYql::NDqProto + +namespace NActors { +class IActor; +} // namespace NActors + +namespace NYql::NDq { + +// Source actor. +// Must be IActor. +// +// Protocol: +// 1. CA starts source actor. +// 2. CA calls IDqSourceActor::GetSourceData(batch, FreeSpace). +// 3. Source actor calls ICallbacks::OnNewSourceDataArrived() when it has data to process. +// 4. CA calls IDqSourceActor::GetSourceData(batch, FreeSpace) to get data when it is ready to process it. +// +// Checkpointing: +// 1. InjectCheckpoint event arrives to CA. +// 2. ... +// 3. CA calls IDqSourceActor::SaveState() and IDqTaskRunner::SaveGraphState() and uses this pair as state for CA. +// 3. ... +// 5. CA calls IDqSourceActor::CommitState() to apply all side effects. +struct IDqSourceActor { + struct ICallbacks { + virtual void OnNewSourceDataArrived(ui64 inputIndex) = 0; virtual void OnSourceError(ui64 inputIndex, const TIssues& issues, bool isFatal) = 0; - - virtual ~ICallbacks() = default; - }; - - virtual ui64 GetInputIndex() const = 0; - - // Gets data and returns space used by filled data batch. + + virtual ~ICallbacks() = default; + }; + + virtual ui64 GetInputIndex() const = 0; + + // Gets data and returns space used by filled data batch. // Method should be called under bound mkql allocator. - // Could throw YQL errors. + // Could throw YQL errors. virtual i64 GetSourceData(NKikimr::NMiniKQL::TUnboxedValueVector& batch, bool& finished, i64 freeSpace) = 0; - - // Checkpointing. - virtual void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TSourceState& state) = 0; - virtual void CommitState(const NDqProto::TCheckpoint& checkpoint) = 0; // Apply side effects related to this checkpoint. - virtual void LoadState(const NDqProto::TSourceState& state) = 0; - - virtual void PassAway() = 0; // The same signature as IActor::PassAway() - - virtual ~IDqSourceActor() = default; -}; - -struct IDqSourceActorFactory : public TThrRefBase { - using TPtr = TIntrusivePtr<IDqSourceActorFactory>; - + + // Checkpointing. + virtual void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TSourceState& state) = 0; + virtual void CommitState(const NDqProto::TCheckpoint& checkpoint) = 0; // Apply side effects related to this checkpoint. + virtual void LoadState(const NDqProto::TSourceState& state) = 0; + + virtual void PassAway() = 0; // The same signature as IActor::PassAway() + + virtual ~IDqSourceActor() = default; +}; + +struct IDqSourceActorFactory : public TThrRefBase { + using TPtr = TIntrusivePtr<IDqSourceActorFactory>; + struct TArguments { const NDqProto::TTaskInput& InputDesc; ui64 InputIndex; @@ -74,10 +74,10 @@ struct IDqSourceActorFactory : public TThrRefBase { const NKikimr::NMiniKQL::THolderFactory& HolderFactory; }; - // Creates source actor. - // Could throw YQL errors. - // IActor* and IDqSourceActor* returned by method must point to the objects with consistent lifetime. + // Creates source actor. + // Could throw YQL errors. + // IActor* and IDqSourceActor* returned by method must point to the objects with consistent lifetime. virtual std::pair<IDqSourceActor*, NActors::IActor*> CreateDqSourceActor(TArguments&& args) const = 0; -}; - -} // namespace NYql::NDq +}; + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_stats.cpp b/ydb/library/yql/dq/actors/compute/dq_compute_actor_stats.cpp index cc13ce4d272..01197720a97 100644 --- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_stats.cpp +++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_stats.cpp @@ -21,12 +21,12 @@ void FillTaskRunnerStats(ui64 taskId, ui32 stageId, const TDqTaskRunnerStats& ta protoTask->SetWaitTimeUs(taskStats.WaitTime.MicroSeconds()); protoTask->SetWaitOutputTimeUs(taskStats.WaitOutputTime.MicroSeconds()); - // All run statuses metrics - protoTask->SetPendingInputTimeUs(taskStats.RunStatusTimeMetrics[ERunStatus::PendingInput].MicroSeconds()); - protoTask->SetPendingOutputTimeUs(taskStats.RunStatusTimeMetrics[ERunStatus::PendingOutput].MicroSeconds()); - protoTask->SetFinishTimeUs(taskStats.RunStatusTimeMetrics[ERunStatus::Finished].MicroSeconds()); - static_assert(TRunStatusTimeMetrics::StatusesCount == 3); // Add all statuses here - + // All run statuses metrics + protoTask->SetPendingInputTimeUs(taskStats.RunStatusTimeMetrics[ERunStatus::PendingInput].MicroSeconds()); + protoTask->SetPendingOutputTimeUs(taskStats.RunStatusTimeMetrics[ERunStatus::PendingOutput].MicroSeconds()); + protoTask->SetFinishTimeUs(taskStats.RunStatusTimeMetrics[ERunStatus::Finished].MicroSeconds()); + static_assert(TRunStatusTimeMetrics::StatusesCount == 3); // Add all statuses here + if (taskStats.ComputeCpuTimeByRun) { auto snapshot = taskStats.ComputeCpuTimeByRun->Snapshot(); for (ui32 i = 0; i < snapshot->Count(); i++) { diff --git a/ydb/library/yql/dq/actors/compute/retry_queue.cpp b/ydb/library/yql/dq/actors/compute/retry_queue.cpp index 25399376918..fcb05aec4f1 100644 --- a/ydb/library/yql/dq/actors/compute/retry_queue.cpp +++ b/ydb/library/yql/dq/actors/compute/retry_queue.cpp @@ -1,114 +1,114 @@ -#include "retry_queue.h" - -#include <util/generic/utility.h> - -namespace NYql::NDq { - -void TRetryEventsQueue::Init(const TTxId& txId, const NActors::TActorId& senderId, const NActors::TActorId& selfId, ui64 eventQueueId) { - TxId = txId; - SenderId = senderId; - SelfId = selfId; - Y_ASSERT(SelfId.NodeId() == SenderId.NodeId()); - EventQueueId = eventQueueId; -} - -void TRetryEventsQueue::OnNewRecipientId(const NActors::TActorId& recipientId, bool unsubscribe) { - if (unsubscribe) { - Unsubscribe(); - } - RecipientId = recipientId; - LocalRecipient = RecipientId.NodeId() == SelfId.NodeId(); - NextSeqNo = 1; - Events.clear(); - MyConfirmedSeqNo = 0; - ReceivedEventsSeqNos.clear(); - Connected = false; - RetryState = Nothing(); -} - -void TRetryEventsQueue::HandleNodeDisconnected(ui32 nodeId) { - if (nodeId == RecipientId.NodeId()) { - Connected = false; - ScheduleRetry(); - } -} - -void TRetryEventsQueue::HandleNodeConnected(ui32 nodeId) { - if (nodeId == RecipientId.NodeId()) { - if (!Connected) { - Connected = true; - RetryState = Nothing(); - - // (Re)send all events - for (const IRetryableEvent::TPtr& ev : Events) { - SendRetryable(ev); - } - } - } -} - -void TRetryEventsQueue::Retry() { - RetryScheduled = false; - if (!Connected) { - Connect(); - } -} - -void TRetryEventsQueue::Connect() { - auto connectEvent = MakeHolder<NActors::TEvInterconnect::TEvConnectNode>(); - NActors::TActivationContext::Send( - new NActors::IEventHandle(NActors::TActivationContext::InterconnectProxy(RecipientId.NodeId()), SenderId, connectEvent.Release(), 0, 0)); -} - -void TRetryEventsQueue::Unsubscribe() { - if (Connected) { - Connected = false; - auto unsubscribeEvent = MakeHolder<NActors::TEvents::TEvUnsubscribe>(); - NActors::TActivationContext::Send( - new NActors::IEventHandle(NActors::TActivationContext::InterconnectProxy(RecipientId.NodeId()), SenderId, unsubscribeEvent.Release(), 0, 0)); - } -} - -void TRetryEventsQueue::RemoveConfirmedEvents(ui64 confirmedSeqNo) { - while (!Events.empty() && Events.front()->GetSeqNo() <= confirmedSeqNo) { - Events.pop_front(); - } - Y_VERIFY(Events.size() <= 10000, - "Too many unconfirmed events: %lu. Confirmed SeqNo: %lu. Unconfirmed SeqNos: %lu-%lu. TxId: \"%s\". EventQueueId: %lu", - Events.size(), - confirmedSeqNo, - Events.front()->GetSeqNo(), - Events.back()->GetSeqNo(), - (TStringBuilder() << TxId).c_str(), - EventQueueId); -} - -void TRetryEventsQueue::SendRetryable(const IRetryableEvent::TPtr& ev) { - NActors::TActivationContext::Send(ev->Clone(MyConfirmedSeqNo)); -} - -void TRetryEventsQueue::ScheduleRetry() { - if (!RetryScheduled && !Events.empty()) { - RetryScheduled = true; - if (!RetryState) { - RetryState.ConstructInPlace(); - } - auto ev = MakeHolder<TEvRetryQueuePrivate::TEvRetry>(EventQueueId); - NActors::TActivationContext::Schedule(RetryState->GetNextDelay(), new NActors::IEventHandle(SelfId, SelfId, ev.Release())); - } -} - -TDuration TRetryEventsQueue::TRetryState::GetNextDelay() { - constexpr TDuration MaxDelay = TDuration::Seconds(10); - constexpr TDuration MinDelay = TDuration::MilliSeconds(100); // from second retry - TDuration ret = Delay; // The first delay is zero - Delay = ClampVal(Delay * 2, MinDelay, MaxDelay); - return ret ? RandomizeDelay(ret) : ret; -} - -TDuration TRetryEventsQueue::TRetryState::RandomizeDelay(TDuration baseDelay) { - const TDuration::TValue half = baseDelay.GetValue() / 2; - return TDuration::FromValue(half + RandomNumber<TDuration::TValue>(half)); -} - -} // namespace NYql::NDq +#include "retry_queue.h" + +#include <util/generic/utility.h> + +namespace NYql::NDq { + +void TRetryEventsQueue::Init(const TTxId& txId, const NActors::TActorId& senderId, const NActors::TActorId& selfId, ui64 eventQueueId) { + TxId = txId; + SenderId = senderId; + SelfId = selfId; + Y_ASSERT(SelfId.NodeId() == SenderId.NodeId()); + EventQueueId = eventQueueId; +} + +void TRetryEventsQueue::OnNewRecipientId(const NActors::TActorId& recipientId, bool unsubscribe) { + if (unsubscribe) { + Unsubscribe(); + } + RecipientId = recipientId; + LocalRecipient = RecipientId.NodeId() == SelfId.NodeId(); + NextSeqNo = 1; + Events.clear(); + MyConfirmedSeqNo = 0; + ReceivedEventsSeqNos.clear(); + Connected = false; + RetryState = Nothing(); +} + +void TRetryEventsQueue::HandleNodeDisconnected(ui32 nodeId) { + if (nodeId == RecipientId.NodeId()) { + Connected = false; + ScheduleRetry(); + } +} + +void TRetryEventsQueue::HandleNodeConnected(ui32 nodeId) { + if (nodeId == RecipientId.NodeId()) { + if (!Connected) { + Connected = true; + RetryState = Nothing(); + + // (Re)send all events + for (const IRetryableEvent::TPtr& ev : Events) { + SendRetryable(ev); + } + } + } +} + +void TRetryEventsQueue::Retry() { + RetryScheduled = false; + if (!Connected) { + Connect(); + } +} + +void TRetryEventsQueue::Connect() { + auto connectEvent = MakeHolder<NActors::TEvInterconnect::TEvConnectNode>(); + NActors::TActivationContext::Send( + new NActors::IEventHandle(NActors::TActivationContext::InterconnectProxy(RecipientId.NodeId()), SenderId, connectEvent.Release(), 0, 0)); +} + +void TRetryEventsQueue::Unsubscribe() { + if (Connected) { + Connected = false; + auto unsubscribeEvent = MakeHolder<NActors::TEvents::TEvUnsubscribe>(); + NActors::TActivationContext::Send( + new NActors::IEventHandle(NActors::TActivationContext::InterconnectProxy(RecipientId.NodeId()), SenderId, unsubscribeEvent.Release(), 0, 0)); + } +} + +void TRetryEventsQueue::RemoveConfirmedEvents(ui64 confirmedSeqNo) { + while (!Events.empty() && Events.front()->GetSeqNo() <= confirmedSeqNo) { + Events.pop_front(); + } + Y_VERIFY(Events.size() <= 10000, + "Too many unconfirmed events: %lu. Confirmed SeqNo: %lu. Unconfirmed SeqNos: %lu-%lu. TxId: \"%s\". EventQueueId: %lu", + Events.size(), + confirmedSeqNo, + Events.front()->GetSeqNo(), + Events.back()->GetSeqNo(), + (TStringBuilder() << TxId).c_str(), + EventQueueId); +} + +void TRetryEventsQueue::SendRetryable(const IRetryableEvent::TPtr& ev) { + NActors::TActivationContext::Send(ev->Clone(MyConfirmedSeqNo)); +} + +void TRetryEventsQueue::ScheduleRetry() { + if (!RetryScheduled && !Events.empty()) { + RetryScheduled = true; + if (!RetryState) { + RetryState.ConstructInPlace(); + } + auto ev = MakeHolder<TEvRetryQueuePrivate::TEvRetry>(EventQueueId); + NActors::TActivationContext::Schedule(RetryState->GetNextDelay(), new NActors::IEventHandle(SelfId, SelfId, ev.Release())); + } +} + +TDuration TRetryEventsQueue::TRetryState::GetNextDelay() { + constexpr TDuration MaxDelay = TDuration::Seconds(10); + constexpr TDuration MinDelay = TDuration::MilliSeconds(100); // from second retry + TDuration ret = Delay; // The first delay is zero + Delay = ClampVal(Delay * 2, MinDelay, MaxDelay); + return ret ? RandomizeDelay(ret) : ret; +} + +TDuration TRetryEventsQueue::TRetryState::RandomizeDelay(TDuration baseDelay) { + const TDuration::TValue half = baseDelay.GetValue() / 2; + return TDuration::FromValue(half + RandomNumber<TDuration::TValue>(half)); +} + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/retry_queue.h b/ydb/library/yql/dq/actors/compute/retry_queue.h index 5900a8c986d..d72dde9be18 100644 --- a/ydb/library/yql/dq/actors/compute/retry_queue.h +++ b/ydb/library/yql/dq/actors/compute/retry_queue.h @@ -1,191 +1,191 @@ -#pragma once +#pragma once #include <ydb/library/yql/dq/actors/protos/dq_events.pb.h> #include <ydb/library/yql/dq/common/dq_common.h> - -#include <library/cpp/actors/core/actor.h> -#include <library/cpp/actors/core/event_local.h> -#include <library/cpp/actors/core/events.h> -#include <library/cpp/actors/core/interconnect.h> - -#include <util/system/types.h> - -namespace NYql::NDq { - -struct TEvRetryQueuePrivate { - // Event ids. - enum EEv : ui32 { - EvBegin = EventSpaceBegin(NActors::TEvents::ES_PRIVATE), - - EvRetry = EvBegin, - - EvEnd - }; - - static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)"); - - // Events. - - struct TEvRetry : NActors::TEventLocal<TEvRetry, EvRetry> { - explicit TEvRetry(ui64 eventQueueId) - : EventQueueId(eventQueueId) - { - } - - const ui64 EventQueueId; - }; -}; - -template <class T> -concept TProtobufEvent = - std::is_base_of_v<google::protobuf::Message, typename T::ProtoRecordType>; - -template <class T> -concept TIsMessageTransportMetaRef = - std::is_same_v<T, const NYql::NDqProto::TMessageTransportMeta&>; - -template <class T> -concept THasTransportMeta = requires(T& ev) { - { ev.Record.GetTransportMeta() } -> TIsMessageTransportMetaRef; -}; - -template <class T> -concept TProtobufEventWithTransportMeta = TProtobufEvent<T> && THasTransportMeta<T>; - -class TRetryEventsQueue { -public: - class IRetryableEvent : public TSimpleRefCount<IRetryableEvent> { - public: - using TPtr = TIntrusivePtr<IRetryableEvent>; - virtual ~IRetryableEvent() = default; - virtual THolder<NActors::IEventHandle> Clone(ui64 confirmedSeqNo) const = 0; - virtual ui64 GetSeqNo() const = 0; - }; - - void Init(const TTxId& txId, const NActors::TActorId& senderId, const NActors::TActorId& selfId, ui64 eventQueueId = 0); - - template <TProtobufEventWithTransportMeta T> - void Send(T* ev, ui64 cookie = 0) { - Send(THolder<T>(ev), cookie); - } - - template <TProtobufEventWithTransportMeta T> - void Send(THolder<T> ev, ui64 cookie = 0) { - if (LocalRecipient) { - NActors::TActivationContext::Send(new NActors::IEventHandle(RecipientId, SenderId, ev.Release(), cookie)); - return; - } - - IRetryableEvent::TPtr retryableEvent = Store(RecipientId, SenderId, std::move(ev), cookie); - if (Connected) { - SendRetryable(retryableEvent); - } else { - ScheduleRetry(); - } - } - - template <TProtobufEventWithTransportMeta T> - bool OnEventReceived(const TAutoPtr<NActors::TEventHandle<T>>& ev) { - return OnEventReceived(ev->Get()); - } - - template <TProtobufEventWithTransportMeta T> - bool OnEventReceived(const T* ev) { // Returns true if event was not processed (== it was received first time). - if (LocalRecipient) { - return true; - } - - const NYql::NDqProto::TMessageTransportMeta& meta = ev->Record.GetTransportMeta(); - RemoveConfirmedEvents(meta.GetConfirmedSeqNo()); - - const ui64 seqNo = meta.GetSeqNo(); - if (seqNo == MyConfirmedSeqNo + 1) { - ++MyConfirmedSeqNo; - while (!ReceivedEventsSeqNos.empty() && *ReceivedEventsSeqNos.begin() == MyConfirmedSeqNo + 1) { - ++MyConfirmedSeqNo; - ReceivedEventsSeqNos.erase(ReceivedEventsSeqNos.begin()); - } - return true; - } else if (seqNo > MyConfirmedSeqNo) { - Y_VERIFY(ReceivedEventsSeqNos.size() < 10000); // Too wide window. - return ReceivedEventsSeqNos.insert(seqNo).second; - } - return false; - } - - void OnNewRecipientId(const NActors::TActorId& recipientId, bool unsubscribe = true); - void HandleNodeConnected(ui32 nodeId); - void HandleNodeDisconnected(ui32 nodeId); - void Retry(); - void Unsubscribe(); - -private: - template <TProtobufEventWithTransportMeta T> - IRetryableEvent::TPtr Store(const NActors::TActorId& recipient, const NActors::TActorId& sender, THolder<T> ev, ui64 cookie) { - ev->Record.MutableTransportMeta()->SetSeqNo(NextSeqNo++); - Events.push_back(MakeIntrusive<TRetryableEvent<T>>(recipient, sender, std::move(ev), cookie)); - return Events.back(); - } - - void RemoveConfirmedEvents(ui64 confirmedSeqNo); - void SendRetryable(const IRetryableEvent::TPtr& ev); - void ScheduleRetry(); - void Connect(); - -private: - template <TProtobufEventWithTransportMeta T> - class TRetryableEvent : public IRetryableEvent { - public: - TRetryableEvent(const NActors::TActorId& recipient, const NActors::TActorId& sender, THolder<T> ev, ui64 cookie) - : Event(std::move(ev)) - , Recipient(recipient) - , Sender(sender) - , Cookie(cookie) - { - } - - ui64 GetSeqNo() const override { - return Event->Record.GetTransportMeta().GetSeqNo(); - } - - THolder<NActors::IEventHandle> Clone(ui64 confirmedSeqNo) const override { - THolder<T> ev = MakeHolder<T>(); - ev->Record = Event->Record; - ev->Record.MutableTransportMeta()->SetConfirmedSeqNo(confirmedSeqNo); - return MakeHolder<NActors::IEventHandle>(Recipient, Sender, ev.Release(), 0, Cookie); - } - - private: - const THolder<T> Event; - const NActors::TActorId Recipient; - const NActors::TActorId Sender; - const ui64 Cookie; - }; - - class TRetryState { - public: - TDuration GetNextDelay(); - - private: - static TDuration RandomizeDelay(TDuration baseDelay); - - private: - TDuration Delay; // The first time retry will be done instantly. - }; - -private: - NActors::TActorId SenderId; - NActors::TActorId SelfId; - ui64 EventQueueId = 0; - NActors::TActorId RecipientId; - bool LocalRecipient = false; - ui64 NextSeqNo = 1; - std::deque<IRetryableEvent::TPtr> Events; - ui64 MyConfirmedSeqNo = 0; // Recceived events seq no border. - std::set<ui64> ReceivedEventsSeqNos; - bool Connected = false; - bool RetryScheduled = false; - TMaybe<TRetryState> RetryState; - TTxId TxId; -}; - -} // namespace NYql::NDq + +#include <library/cpp/actors/core/actor.h> +#include <library/cpp/actors/core/event_local.h> +#include <library/cpp/actors/core/events.h> +#include <library/cpp/actors/core/interconnect.h> + +#include <util/system/types.h> + +namespace NYql::NDq { + +struct TEvRetryQueuePrivate { + // Event ids. + enum EEv : ui32 { + EvBegin = EventSpaceBegin(NActors::TEvents::ES_PRIVATE), + + EvRetry = EvBegin, + + EvEnd + }; + + static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)"); + + // Events. + + struct TEvRetry : NActors::TEventLocal<TEvRetry, EvRetry> { + explicit TEvRetry(ui64 eventQueueId) + : EventQueueId(eventQueueId) + { + } + + const ui64 EventQueueId; + }; +}; + +template <class T> +concept TProtobufEvent = + std::is_base_of_v<google::protobuf::Message, typename T::ProtoRecordType>; + +template <class T> +concept TIsMessageTransportMetaRef = + std::is_same_v<T, const NYql::NDqProto::TMessageTransportMeta&>; + +template <class T> +concept THasTransportMeta = requires(T& ev) { + { ev.Record.GetTransportMeta() } -> TIsMessageTransportMetaRef; +}; + +template <class T> +concept TProtobufEventWithTransportMeta = TProtobufEvent<T> && THasTransportMeta<T>; + +class TRetryEventsQueue { +public: + class IRetryableEvent : public TSimpleRefCount<IRetryableEvent> { + public: + using TPtr = TIntrusivePtr<IRetryableEvent>; + virtual ~IRetryableEvent() = default; + virtual THolder<NActors::IEventHandle> Clone(ui64 confirmedSeqNo) const = 0; + virtual ui64 GetSeqNo() const = 0; + }; + + void Init(const TTxId& txId, const NActors::TActorId& senderId, const NActors::TActorId& selfId, ui64 eventQueueId = 0); + + template <TProtobufEventWithTransportMeta T> + void Send(T* ev, ui64 cookie = 0) { + Send(THolder<T>(ev), cookie); + } + + template <TProtobufEventWithTransportMeta T> + void Send(THolder<T> ev, ui64 cookie = 0) { + if (LocalRecipient) { + NActors::TActivationContext::Send(new NActors::IEventHandle(RecipientId, SenderId, ev.Release(), cookie)); + return; + } + + IRetryableEvent::TPtr retryableEvent = Store(RecipientId, SenderId, std::move(ev), cookie); + if (Connected) { + SendRetryable(retryableEvent); + } else { + ScheduleRetry(); + } + } + + template <TProtobufEventWithTransportMeta T> + bool OnEventReceived(const TAutoPtr<NActors::TEventHandle<T>>& ev) { + return OnEventReceived(ev->Get()); + } + + template <TProtobufEventWithTransportMeta T> + bool OnEventReceived(const T* ev) { // Returns true if event was not processed (== it was received first time). + if (LocalRecipient) { + return true; + } + + const NYql::NDqProto::TMessageTransportMeta& meta = ev->Record.GetTransportMeta(); + RemoveConfirmedEvents(meta.GetConfirmedSeqNo()); + + const ui64 seqNo = meta.GetSeqNo(); + if (seqNo == MyConfirmedSeqNo + 1) { + ++MyConfirmedSeqNo; + while (!ReceivedEventsSeqNos.empty() && *ReceivedEventsSeqNos.begin() == MyConfirmedSeqNo + 1) { + ++MyConfirmedSeqNo; + ReceivedEventsSeqNos.erase(ReceivedEventsSeqNos.begin()); + } + return true; + } else if (seqNo > MyConfirmedSeqNo) { + Y_VERIFY(ReceivedEventsSeqNos.size() < 10000); // Too wide window. + return ReceivedEventsSeqNos.insert(seqNo).second; + } + return false; + } + + void OnNewRecipientId(const NActors::TActorId& recipientId, bool unsubscribe = true); + void HandleNodeConnected(ui32 nodeId); + void HandleNodeDisconnected(ui32 nodeId); + void Retry(); + void Unsubscribe(); + +private: + template <TProtobufEventWithTransportMeta T> + IRetryableEvent::TPtr Store(const NActors::TActorId& recipient, const NActors::TActorId& sender, THolder<T> ev, ui64 cookie) { + ev->Record.MutableTransportMeta()->SetSeqNo(NextSeqNo++); + Events.push_back(MakeIntrusive<TRetryableEvent<T>>(recipient, sender, std::move(ev), cookie)); + return Events.back(); + } + + void RemoveConfirmedEvents(ui64 confirmedSeqNo); + void SendRetryable(const IRetryableEvent::TPtr& ev); + void ScheduleRetry(); + void Connect(); + +private: + template <TProtobufEventWithTransportMeta T> + class TRetryableEvent : public IRetryableEvent { + public: + TRetryableEvent(const NActors::TActorId& recipient, const NActors::TActorId& sender, THolder<T> ev, ui64 cookie) + : Event(std::move(ev)) + , Recipient(recipient) + , Sender(sender) + , Cookie(cookie) + { + } + + ui64 GetSeqNo() const override { + return Event->Record.GetTransportMeta().GetSeqNo(); + } + + THolder<NActors::IEventHandle> Clone(ui64 confirmedSeqNo) const override { + THolder<T> ev = MakeHolder<T>(); + ev->Record = Event->Record; + ev->Record.MutableTransportMeta()->SetConfirmedSeqNo(confirmedSeqNo); + return MakeHolder<NActors::IEventHandle>(Recipient, Sender, ev.Release(), 0, Cookie); + } + + private: + const THolder<T> Event; + const NActors::TActorId Recipient; + const NActors::TActorId Sender; + const ui64 Cookie; + }; + + class TRetryState { + public: + TDuration GetNextDelay(); + + private: + static TDuration RandomizeDelay(TDuration baseDelay); + + private: + TDuration Delay; // The first time retry will be done instantly. + }; + +private: + NActors::TActorId SenderId; + NActors::TActorId SelfId; + ui64 EventQueueId = 0; + NActors::TActorId RecipientId; + bool LocalRecipient = false; + ui64 NextSeqNo = 1; + std::deque<IRetryableEvent::TPtr> Events; + ui64 MyConfirmedSeqNo = 0; // Recceived events seq no border. + std::set<ui64> ReceivedEventsSeqNos; + bool Connected = false; + bool RetryScheduled = false; + TMaybe<TRetryState> RetryState; + TTxId TxId; +}; + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/actors/compute/ya.make b/ydb/library/yql/dq/actors/compute/ya.make index c4b20bd502d..6f956f1a582 100644 --- a/ydb/library/yql/dq/actors/compute/ya.make +++ b/ydb/library/yql/dq/actors/compute/ya.make @@ -10,10 +10,10 @@ SRCS( dq_async_compute_actor.cpp dq_compute_actor_channels.cpp dq_compute_actor_checkpoints.cpp - dq_compute_actor_io_actors_factory.cpp + dq_compute_actor_io_actors_factory.cpp dq_compute_actor_stats.cpp dq_compute_issues_buffer.cpp - retry_queue.cpp + retry_queue.cpp ) PEERDIR( diff --git a/ydb/library/yql/dq/actors/dq_events_ids.h b/ydb/library/yql/dq/actors/dq_events_ids.h index 698377a393a..d4265a0572e 100644 --- a/ydb/library/yql/dq/actors/dq_events_ids.h +++ b/ydb/library/yql/dq/actors/dq_events_ids.h @@ -47,7 +47,7 @@ struct TDqComputeEvents { EvGetTaskState, EvGetTaskStateResult, EvStateRequest, - EvNewCheckpointCoordinatorAck, + EvNewCheckpointCoordinatorAck, // place all new events here diff --git a/ydb/library/yql/dq/actors/protos/dq_events.proto b/ydb/library/yql/dq/actors/protos/dq_events.proto index 0e38ab3bbc1..485df47d913 100644 --- a/ydb/library/yql/dq/actors/protos/dq_events.proto +++ b/ydb/library/yql/dq/actors/protos/dq_events.proto @@ -28,7 +28,7 @@ message TChannelData { }; message TEvRun { - optional TMessageTransportMeta TransportMeta = 100; + optional TMessageTransportMeta TransportMeta = 100; } message TEvChannelsInfo { @@ -106,19 +106,19 @@ message TComputeRuntimeSettings { message TEvNewCheckpointCoordinator { optional uint64 Generation = 1; optional string GraphId = 2; - - optional TMessageTransportMeta TransportMeta = 100; -} - -message TEvNewCheckpointCoordinatorAck { - optional TMessageTransportMeta TransportMeta = 100; + + optional TMessageTransportMeta TransportMeta = 100; } +message TEvNewCheckpointCoordinatorAck { + optional TMessageTransportMeta TransportMeta = 100; +} + message TEvInjectCheckpoint { optional TCheckpoint Checkpoint = 1; optional uint64 Generation = 2; - - optional TMessageTransportMeta TransportMeta = 100; + + optional TMessageTransportMeta TransportMeta = 100; } message TEvSaveTaskStateResult { @@ -133,32 +133,32 @@ message TEvSaveTaskStateResult { optional uint64 TaskId = 2; optional EStatus Status = 3; optional uint64 StateSizeBytes = 4; - - optional TMessageTransportMeta TransportMeta = 100; + + optional TMessageTransportMeta TransportMeta = 100; } message TEvCommitState { optional TCheckpoint Checkpoint = 1; optional uint64 Generation = 2; - - optional TMessageTransportMeta TransportMeta = 100; + + optional TMessageTransportMeta TransportMeta = 100; } message TEvStateCommitted { optional TCheckpoint Checkpoint = 1; optional uint64 TaskId = 2; - - optional TMessageTransportMeta TransportMeta = 100; + + optional TMessageTransportMeta TransportMeta = 100; } message TEvRestoreFromCheckpoint { optional TCheckpoint Checkpoint = 1; optional uint64 Generation = 2; - optional NDqStateLoadPlan.TTaskPlan StateLoadPlan = 3; - - optional TMessageTransportMeta TransportMeta = 100; + optional NDqStateLoadPlan.TTaskPlan StateLoadPlan = 3; + + optional TMessageTransportMeta TransportMeta = 100; } - + message TEvRestoreFromCheckpointResult { enum ERestoreStatus { UNSPECIFIED = 0; @@ -169,11 +169,11 @@ message TEvRestoreFromCheckpointResult { optional TCheckpoint Checkpoint = 1; optional uint64 TaskId = 2; optional ERestoreStatus Status = 3; - - optional TMessageTransportMeta TransportMeta = 100; -} - -message TMessageTransportMeta { - optional uint64 SeqNo = 1; // SeqNo of message - optional uint64 ConfirmedSeqNo = 2; // All input messages with SeqNo <= ConfirmedSeqNo are received. + + optional TMessageTransportMeta TransportMeta = 100; } + +message TMessageTransportMeta { + optional uint64 SeqNo = 1; // SeqNo of message + optional uint64 ConfirmedSeqNo = 2; // All input messages with SeqNo <= ConfirmedSeqNo are received. +} diff --git a/ydb/library/yql/dq/actors/protos/dq_stats.proto b/ydb/library/yql/dq/actors/protos/dq_stats.proto index 6b608790305..6f492aa01a5 100644 --- a/ydb/library/yql/dq/actors/protos/dq_stats.proto +++ b/ydb/library/yql/dq/actors/protos/dq_stats.proto @@ -122,12 +122,12 @@ message TDqTaskStats { // profile stats uint64 BuildCpuTimeUs = 103; // prepare task time: build computation graph, prepare parameters, ... - uint64 WaitTimeUs = 104; // total wait (input + output) wall time + uint64 WaitTimeUs = 104; // total wait (input + output) wall time uint64 WaitOutputTimeUs = 105; // wait output wall time (any output: channels, sinks, ...) - uint64 ComputeCpuTimeUs = 102; // compute time only - uint64 PendingInputTimeUs = 107; // time waiting input data - uint64 PendingOutputTimeUs = 108; // time waiting output data - uint64 FinishTimeUs = 109; // time in finished state // ComputeCpuTimeUs + PendingInputTimeUs + PendingOutputTimeUs + FinishTimeUs == 100% (or == const in aggregated graphs for several stages/tasks) + uint64 ComputeCpuTimeUs = 102; // compute time only + uint64 PendingInputTimeUs = 107; // time waiting input data + uint64 PendingOutputTimeUs = 108; // time waiting output data + uint64 FinishTimeUs = 109; // time in finished state // ComputeCpuTimeUs + PendingInputTimeUs + PendingOutputTimeUs + FinishTimeUs == 100% (or == const in aggregated graphs for several stages/tasks) repeated TDqMkqlStat MkqlStats = 110; // stats from mkql message THistBucket { diff --git a/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.json b/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.json index 67627b1b8bd..b09dc6d8a98 100644 --- a/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.json +++ b/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.json @@ -57,29 +57,29 @@ "Match": {"Type": "Callable", "Name": "DqPhyJoinDict"} }, { - "Name": "TDqSource", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "DqSource"}, - "Children": [ - {"Index": 0, "Name": "DataSource", "Type": "TCallable"}, - {"Index": 1, "Name": "Settings", "Type": "TExprBase"} - ] - }, - { - "Name": "TDqSink", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "DqSink"}, - "Children": [ - {"Index": 0, "Name": "DataSink", "Type": "TCallable"}, - {"Index": 1, "Name": "Settings", "Type": "TExprBase"}, - {"Index": 2, "Name": "Index", "Type": "TCoAtom"} - ] - }, - { - "Name": "TDqSinksList", - "ListBase": "TDqSink" - }, - { + "Name": "TDqSource", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "DqSource"}, + "Children": [ + {"Index": 0, "Name": "DataSource", "Type": "TCallable"}, + {"Index": 1, "Name": "Settings", "Type": "TExprBase"} + ] + }, + { + "Name": "TDqSink", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "DqSink"}, + "Children": [ + {"Index": 0, "Name": "DataSink", "Type": "TCallable"}, + {"Index": 1, "Name": "Settings", "Type": "TExprBase"}, + {"Index": 2, "Name": "Index", "Type": "TCoAtom"} + ] + }, + { + "Name": "TDqSinksList", + "ListBase": "TDqSink" + }, + { "Name": "TDqStageBase", "Base": "TCallable", "Match": {"Type": "CallableBase"}, @@ -87,8 +87,8 @@ "Children": [ {"Index": 0, "Name": "Inputs", "Type": "TExprList"}, {"Index": 1, "Name": "Program", "Type": "TCoLambda"}, - {"Index": 2, "Name": "Settings", "Type": "TCoNameValueTupleList"}, - {"Index": 3, "Name": "Sinks", "Type": "TDqSinksList", "Optional": true} + {"Index": 2, "Name": "Settings", "Type": "TCoNameValueTupleList"}, + {"Index": 3, "Name": "Sinks", "Type": "TDqSinksList", "Optional": true} ] }, { @@ -103,7 +103,7 @@ }, { "Name": "TDqStageList", - "ListBase": "TDqStageBase" + "ListBase": "TDqStageBase" }, { "Name": "TDqPhyStageList", @@ -191,15 +191,15 @@ "Children": [ {"Index": 0, "Name": "Input", "Type": "TExprBase"} ] - }, - { - "Name": "TDqQuery", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "DqQuery!"}, - "Children": [ - {"Index": 0, "Name": "World", "Type": "TExprBase"}, - {"Index": 1, "Name": "SinkStages", "Type": "TDqStageList"} - ] + }, + { + "Name": "TDqQuery", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "DqQuery!"}, + "Children": [ + {"Index": 0, "Name": "World", "Type": "TExprBase"}, + {"Index": 1, "Name": "SinkStages", "Type": "TDqStageList"} + ] }, { "Name": "TDqPrecompute", diff --git a/ydb/library/yql/dq/opt/dq_opt.cpp b/ydb/library/yql/dq/opt/dq_opt.cpp index f60f29dd460..37de99c7ea3 100644 --- a/ydb/library/yql/dq/opt/dq_opt.cpp +++ b/ydb/library/yql/dq/opt/dq_opt.cpp @@ -144,15 +144,15 @@ bool IsSingleConsumerConnection(const TDqConnection& node, const TParentsMap& pa && (allowStageMultiUsage || IsSingleConsumer(node.Output().Stage(), parentsMap)); } -ui32 GetStageOutputsCount(const TDqStageBase& stage, bool includingSinks) { +ui32 GetStageOutputsCount(const TDqStageBase& stage, bool includingSinks) { auto stageType = stage.Ref().GetTypeAnn(); YQL_ENSURE(stageType); auto resultsTypeTuple = stageType->Cast<TTupleExprType>(); - ui32 result = resultsTypeTuple->GetSize(); - if (!includingSinks && stage.Sinks()) { - result -= stage.Sinks().Cast().Size(); - } - return result; + ui32 result = resultsTypeTuple->GetSize(); + if (!includingSinks && stage.Sinks()) { + result -= stage.Sinks().Cast().Size(); + } + return result; } TVector<TDqConnection> FindDqConnections(const TExprBase& node) { diff --git a/ydb/library/yql/dq/opt/dq_opt.h b/ydb/library/yql/dq/opt/dq_opt.h index 5b3146d0f42..edcc7a5dafb 100644 --- a/ydb/library/yql/dq/opt/dq_opt.h +++ b/ydb/library/yql/dq/opt/dq_opt.h @@ -54,7 +54,7 @@ bool IsSingleConsumer(const NNodes::TExprBase& node, const TParentsMap& parentsM bool IsSingleConsumerConnection(const NNodes::TDqConnection& node, const TParentsMap& parentsMap, bool allowStageMultiUsage = true); -ui32 GetStageOutputsCount(const NNodes::TDqStageBase& stage, bool includingSinks); +ui32 GetStageOutputsCount(const NNodes::TDqStageBase& stage, bool includingSinks); TVector<NNodes::TDqConnection> FindDqConnections(const NNodes::TExprBase& node); bool IsDqPureExpr(const NNodes::TExprBase& node, bool isPrecomputePure = true); diff --git a/ydb/library/yql/dq/opt/dq_opt_build.cpp b/ydb/library/yql/dq/opt/dq_opt_build.cpp index e57b85a9113..7f39679cf2f 100644 --- a/ydb/library/yql/dq/opt/dq_opt_build.cpp +++ b/ydb/library/yql/dq/opt/dq_opt_build.cpp @@ -168,18 +168,18 @@ void MakeConsumerReplaces( if (info.Consumers.size() <= 1) { YQL_ENSURE(info.ConsumersCount == info.Consumers.size()); return; - } + } if (info.HasDependantConsumers && !allowDependantConsumers) { for (ui32 i = 0; i < info.Consumers.size(); ++i) { if (info.Consumers[i]) { MakeConsumerReplaces(dqStage, {info.Consumers[i].Cast()}, replaces, ctx); } - } - + } + return; - } - + } + if (info.ConsumersCount == info.Consumers.size()) { return; } @@ -215,10 +215,10 @@ public: for (const auto& item : maybeList.Cast()) { queryNodes.push_back(item); } - } else { + } else { queryRoots.push_back(head); } - } + } auto filter = [](const TExprNode::TPtr& exprNode) { return !exprNode->IsLambda(); @@ -260,7 +260,7 @@ public: consumersMap[stage].HasDependantConsumers = true; } } - } + } TNodeOnNodeOwnedMap replaces; for (const auto& [stage, info] : consumersMap) { @@ -271,15 +271,15 @@ public: return TStatus::Ok; } - TOptimizeExprSettings settings{nullptr}; - settings.VisitLambdas = false; + TOptimizeExprSettings settings{nullptr}; + settings.VisitLambdas = false; return RemapExpr(input, output, replaces, ctx, settings); } private: const bool AllowDependantConsumers; }; - + class TDqBuildPhysicalStagesTransformer : public TSyncTransformerBase { public: explicit TDqBuildPhysicalStagesTransformer() {} diff --git a/ydb/library/yql/dq/opt/dq_opt_log.cpp b/ydb/library/yql/dq/opt/dq_opt_log.cpp index fed0f09f0f0..ac3fd173fc1 100644 --- a/ydb/library/yql/dq/opt/dq_opt_log.cpp +++ b/ydb/library/yql/dq/opt/dq_opt_log.cpp @@ -142,50 +142,50 @@ TExprBase DqExpandWindowFunctions(TExprBase node, TExprContext& ctx, bool enforc } } -static void CollectSinkStages(const NNodes::TDqQuery& dqQuery, THashSet<TExprNode::TPtr, TExprNode::TPtrHash>& sinkStages) { - for (const auto& stage : dqQuery.SinkStages()) { - sinkStages.insert(stage.Ptr()); - } +static void CollectSinkStages(const NNodes::TDqQuery& dqQuery, THashSet<TExprNode::TPtr, TExprNode::TPtrHash>& sinkStages) { + for (const auto& stage : dqQuery.SinkStages()) { + sinkStages.insert(stage.Ptr()); + } } - -NNodes::TExprBase DqMergeQueriesWithSinks(NNodes::TExprBase dqQueryNode, TExprContext& ctx) { - NNodes::TDqQuery dqQuery = dqQueryNode.Cast<NNodes::TDqQuery>(); - - THashSet<TExprNode::TPtr, TExprNode::TPtrHash> sinkStages; - CollectSinkStages(dqQuery, sinkStages); - TOptimizeExprSettings settings{nullptr}; - settings.VisitLambdas = false; - bool deletedDqQueryChild = false; - TExprNode::TPtr newDqQueryNode; - auto status = OptimizeExpr(dqQueryNode.Ptr(), newDqQueryNode, [&sinkStages, &deletedDqQueryChild](const TExprNode::TPtr& node, TExprContext& ctx) -> TExprNode::TPtr { - for (ui32 childIndex = 0; childIndex < node->ChildrenSize(); ++childIndex) { - TExprNode* child = node->Child(childIndex); - if (child->IsCallable(NNodes::TDqQuery::CallableName())) { - NNodes::TDqQuery dqQueryChild(child); - CollectSinkStages(dqQueryChild, sinkStages); - deletedDqQueryChild = true; - return ctx.ChangeChild(*node, childIndex, dqQueryChild.World().Ptr()); - } - } - return node; - }, ctx, settings); - YQL_ENSURE(status != IGraphTransformer::TStatus::Error, "Failed to merge DqQuery nodes: " << status); - - if (deletedDqQueryChild) { - auto dqQueryBuilder = Build<TDqQuery>(ctx, dqQuery.Pos()); - dqQueryBuilder.World(newDqQueryNode->ChildPtr(TDqQuery::idx_World)); - - auto sinkStagesBuilder = dqQueryBuilder.SinkStages(); - for (const TExprNode::TPtr& stage : sinkStages) { - sinkStagesBuilder.Add(stage); - } - sinkStagesBuilder.Build(); - - return dqQueryBuilder.Done(); - } - return dqQueryNode; -} - + +NNodes::TExprBase DqMergeQueriesWithSinks(NNodes::TExprBase dqQueryNode, TExprContext& ctx) { + NNodes::TDqQuery dqQuery = dqQueryNode.Cast<NNodes::TDqQuery>(); + + THashSet<TExprNode::TPtr, TExprNode::TPtrHash> sinkStages; + CollectSinkStages(dqQuery, sinkStages); + TOptimizeExprSettings settings{nullptr}; + settings.VisitLambdas = false; + bool deletedDqQueryChild = false; + TExprNode::TPtr newDqQueryNode; + auto status = OptimizeExpr(dqQueryNode.Ptr(), newDqQueryNode, [&sinkStages, &deletedDqQueryChild](const TExprNode::TPtr& node, TExprContext& ctx) -> TExprNode::TPtr { + for (ui32 childIndex = 0; childIndex < node->ChildrenSize(); ++childIndex) { + TExprNode* child = node->Child(childIndex); + if (child->IsCallable(NNodes::TDqQuery::CallableName())) { + NNodes::TDqQuery dqQueryChild(child); + CollectSinkStages(dqQueryChild, sinkStages); + deletedDqQueryChild = true; + return ctx.ChangeChild(*node, childIndex, dqQueryChild.World().Ptr()); + } + } + return node; + }, ctx, settings); + YQL_ENSURE(status != IGraphTransformer::TStatus::Error, "Failed to merge DqQuery nodes: " << status); + + if (deletedDqQueryChild) { + auto dqQueryBuilder = Build<TDqQuery>(ctx, dqQuery.Pos()); + dqQueryBuilder.World(newDqQueryNode->ChildPtr(TDqQuery::idx_World)); + + auto sinkStagesBuilder = dqQueryBuilder.SinkStages(); + for (const TExprNode::TPtr& stage : sinkStages) { + sinkStagesBuilder.Add(stage); + } + sinkStagesBuilder.Build(); + + return dqQueryBuilder.Done(); + } + return dqQueryNode; +} + NNodes::TMaybeNode<NNodes::TExprBase> DqUnorderedInStage(NNodes::TExprBase node, const std::function<bool(const TExprNode*)>& stopTraverse, TExprContext& ctx, TTypeAnnotationContext* typeCtx) { @@ -202,7 +202,7 @@ NNodes::TMaybeNode<NNodes::TExprBase> DqUnorderedInStage(NNodes::TExprBase node, } return node; -} +} NNodes::TExprBase DqFlatMapOverExtend(NNodes::TExprBase node, TExprContext& ctx) { diff --git a/ydb/library/yql/dq/opt/dq_opt_log.h b/ydb/library/yql/dq/opt/dq_opt_log.h index 8e9ed97b063..01895d8e649 100644 --- a/ydb/library/yql/dq/opt/dq_opt_log.h +++ b/ydb/library/yql/dq/opt/dq_opt_log.h @@ -22,8 +22,8 @@ NNodes::TExprBase DqEnforceCompactPartition(NNodes::TExprBase node, NNodes::TExp NNodes::TExprBase DqExpandWindowFunctions(NNodes::TExprBase node, TExprContext& ctx, bool enforceCompact); -NNodes::TExprBase DqMergeQueriesWithSinks(NNodes::TExprBase dqQueryNode, TExprContext& ctx); - +NNodes::TExprBase DqMergeQueriesWithSinks(NNodes::TExprBase dqQueryNode, TExprContext& ctx); + NNodes::TExprBase DqFlatMapOverExtend(NNodes::TExprBase node, TExprContext& ctx); NNodes::TMaybeNode<NNodes::TExprBase> DqUnorderedInStage(NNodes::TExprBase node, diff --git a/ydb/library/yql/dq/opt/dq_opt_peephole.cpp b/ydb/library/yql/dq/opt/dq_opt_peephole.cpp index 87ac170eeac..56b8a2afb18 100644 --- a/ydb/library/yql/dq/opt/dq_opt_peephole.cpp +++ b/ydb/library/yql/dq/opt/dq_opt_peephole.cpp @@ -6,8 +6,8 @@ #include <ydb/library/yql/utils/log/log.h> -#include <util/generic/size_literals.h> - +#include <util/generic/size_literals.h> + namespace NYql::NDq { using namespace NYql::NNodes; @@ -515,30 +515,30 @@ NNodes::TExprBase DqPeepholeRewritePureJoin(const NNodes::TExprBase& node, TExpr } } -NNodes::TExprBase DqPeepholeRewriteReplicate(const NNodes::TExprBase& node, TExprContext& ctx) { - if (!node.Maybe<TDqReplicate>()) { - return node; - } - auto dqReplicate = node.Cast<TDqReplicate>(); - - TVector<TExprBase> branches; - branches.reserve(dqReplicate.Args().Count() - 1); - - auto inputIndex = NDq::BuildAtomList("0", dqReplicate.Pos(), ctx); - for (size_t i = 1; i < dqReplicate.Args().Count(); ++i) { - branches.emplace_back(inputIndex); - branches.emplace_back(ctx.DeepCopyLambda(dqReplicate.Args().Get(i).Ref())); - } - - return Build<TCoSwitch>(ctx, dqReplicate.Pos()) - .Input(dqReplicate.Input()) - .BufferBytes() - .Value(ToString(128_MB)) - .Build() - .FreeArgs() - .Add(branches) - .Build() - .Done(); -} - +NNodes::TExprBase DqPeepholeRewriteReplicate(const NNodes::TExprBase& node, TExprContext& ctx) { + if (!node.Maybe<TDqReplicate>()) { + return node; + } + auto dqReplicate = node.Cast<TDqReplicate>(); + + TVector<TExprBase> branches; + branches.reserve(dqReplicate.Args().Count() - 1); + + auto inputIndex = NDq::BuildAtomList("0", dqReplicate.Pos(), ctx); + for (size_t i = 1; i < dqReplicate.Args().Count(); ++i) { + branches.emplace_back(inputIndex); + branches.emplace_back(ctx.DeepCopyLambda(dqReplicate.Args().Get(i).Ref())); + } + + return Build<TCoSwitch>(ctx, dqReplicate.Pos()) + .Input(dqReplicate.Input()) + .BufferBytes() + .Value(ToString(128_MB)) + .Build() + .FreeArgs() + .Add(branches) + .Build() + .Done(); +} + } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/opt/dq_opt_peephole.h b/ydb/library/yql/dq/opt/dq_opt_peephole.h index 565ee4296d2..cb02f278423 100644 --- a/ydb/library/yql/dq/opt/dq_opt_peephole.h +++ b/ydb/library/yql/dq/opt/dq_opt_peephole.h @@ -10,7 +10,7 @@ namespace NYql::NDq { NNodes::TExprBase DqPeepholeRewriteCrossJoin(const NNodes::TExprBase& node, TExprContext& ctx); NNodes::TExprBase DqPeepholeRewriteJoinDict(const NNodes::TExprBase& node, TExprContext& ctx); NNodes::TExprBase DqPeepholeRewriteMapJoin(const NNodes::TExprBase& node, TExprContext& ctx); -NNodes::TExprBase DqPeepholeRewriteReplicate(const NNodes::TExprBase& node, TExprContext& ctx); +NNodes::TExprBase DqPeepholeRewriteReplicate(const NNodes::TExprBase& node, TExprContext& ctx); NNodes::TExprBase DqPeepholeRewritePureJoin(const NNodes::TExprBase& node, TExprContext& ctx); } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/opt/dq_opt_phy.cpp b/ydb/library/yql/dq/opt/dq_opt_phy.cpp index cbe871059d7..63134e12eab 100644 --- a/ydb/library/yql/dq/opt/dq_opt_phy.cpp +++ b/ydb/library/yql/dq/opt/dq_opt_phy.cpp @@ -204,7 +204,7 @@ TMaybeNode<TDqStage> DqPushLambdaToStage(const TDqStage& stage, const TCoAtom& o return TDqPhyPrecompute::Match(node) || TDqConnection::Match(node); }, ctx)); ui32 index = FromString<ui32>(outputIndex.Value()); - ui32 branchesCount = GetStageOutputsCount(stage, true); + ui32 branchesCount = GetStageOutputsCount(stage, true); TExprNode::TPtr newProgram; if (branchesCount == 1) { diff --git a/ydb/library/yql/dq/proto/dq_checkpoint.proto b/ydb/library/yql/dq/proto/dq_checkpoint.proto index 5f32c92c5c0..e3723a6ae9b 100644 --- a/ydb/library/yql/dq/proto/dq_checkpoint.proto +++ b/ydb/library/yql/dq/proto/dq_checkpoint.proto @@ -1,50 +1,50 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - -package NYql.NDqProto; - -message TStateData { - oneof State { - TExtStateId StateId = 1; // Unique (for TComputeActorCheckpoint) state id for big blobs. // TODO: implement - TData StateData = 2; // In-place blob data for small blobs - } - - message TExtStateId { - uint32 StateId = 1; - } - - message TData { - bytes Blob = 1; - uint64 Version = 2; - // TODO: codec - // TODO: structured data (map, array) - // TODO: minikql node explicit id (HOP) - } -} - -message TSourceState { - // State data for source. - // Typically there is only one element with state that - // source saved. But when we are migrating states - // between tasks there can be state - // from several different tasks sources. - repeated TStateData Data = 1; - uint64 InputIndex = 2; -} - -message TSinkState { - TStateData Data = 1; - uint64 OutputIndex = 2; -} - -message TMiniKqlProgramState { - TStateData Data = 1; - uint64 RuntimeVersion = 2; -} - -// Checkpoint for single compute actor. -message TComputeActorState { - TMiniKqlProgramState MiniKqlProgram = 1; - repeated TSourceState Sources = 2; - repeated TSinkState Sinks = 3; -} +syntax = "proto3"; +option cc_enable_arenas = true; + +package NYql.NDqProto; + +message TStateData { + oneof State { + TExtStateId StateId = 1; // Unique (for TComputeActorCheckpoint) state id for big blobs. // TODO: implement + TData StateData = 2; // In-place blob data for small blobs + } + + message TExtStateId { + uint32 StateId = 1; + } + + message TData { + bytes Blob = 1; + uint64 Version = 2; + // TODO: codec + // TODO: structured data (map, array) + // TODO: minikql node explicit id (HOP) + } +} + +message TSourceState { + // State data for source. + // Typically there is only one element with state that + // source saved. But when we are migrating states + // between tasks there can be state + // from several different tasks sources. + repeated TStateData Data = 1; + uint64 InputIndex = 2; +} + +message TSinkState { + TStateData Data = 1; + uint64 OutputIndex = 2; +} + +message TMiniKqlProgramState { + TStateData Data = 1; + uint64 RuntimeVersion = 2; +} + +// Checkpoint for single compute actor. +message TComputeActorState { + TMiniKqlProgramState MiniKqlProgram = 1; + repeated TSourceState Sources = 2; + repeated TSinkState Sinks = 3; +} diff --git a/ydb/library/yql/dq/proto/dq_state_load_plan.proto b/ydb/library/yql/dq/proto/dq_state_load_plan.proto index 50463b23a1e..8efe1d9d661 100644 --- a/ydb/library/yql/dq/proto/dq_state_load_plan.proto +++ b/ydb/library/yql/dq/proto/dq_state_load_plan.proto @@ -1,46 +1,46 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - -package NYql.NDqProto.NDqStateLoadPlan; - -enum EStateType { - STATE_TYPE_UNSPECIFIED = 0; - - // Don't load state, init a new one. - STATE_TYPE_EMPTY = 1; - - // Load own state. - STATE_TYPE_OWN = 2; - - // Load state from the other task. - STATE_TYPE_FOREIGN = 3; -} - -message TTaskPlan { - EStateType StateType = 1; - - // Options for foreign state. - TProgramPlan Program = 2; - repeated TSourcePlan Sources = 3; - repeated TSinkPlan Sinks = 4; -} - -message TProgramPlan { - EStateType StateType = 1; -} - -message TSourcePlan { - EStateType StateType = 1; - uint64 InputIndex = 2; - repeated TForeignTaskSource ForeignTasksSources = 3; - - message TForeignTaskSource { - uint64 TaskId = 1; - uint64 InputIndex = 2; - } -} - -message TSinkPlan { - EStateType StateType = 1; - uint64 OutputIndex = 2; -} +syntax = "proto3"; +option cc_enable_arenas = true; + +package NYql.NDqProto.NDqStateLoadPlan; + +enum EStateType { + STATE_TYPE_UNSPECIFIED = 0; + + // Don't load state, init a new one. + STATE_TYPE_EMPTY = 1; + + // Load own state. + STATE_TYPE_OWN = 2; + + // Load state from the other task. + STATE_TYPE_FOREIGN = 3; +} + +message TTaskPlan { + EStateType StateType = 1; + + // Options for foreign state. + TProgramPlan Program = 2; + repeated TSourcePlan Sources = 3; + repeated TSinkPlan Sinks = 4; +} + +message TProgramPlan { + EStateType StateType = 1; +} + +message TSourcePlan { + EStateType StateType = 1; + uint64 InputIndex = 2; + repeated TForeignTaskSource ForeignTasksSources = 3; + + message TForeignTaskSource { + uint64 TaskId = 1; + uint64 InputIndex = 2; + } +} + +message TSinkPlan { + EStateType StateType = 1; + uint64 OutputIndex = 2; +} diff --git a/ydb/library/yql/dq/proto/dq_tasks.proto b/ydb/library/yql/dq/proto/dq_tasks.proto index e29e64ef2c5..bd39d484310 100644 --- a/ydb/library/yql/dq/proto/dq_tasks.proto +++ b/ydb/library/yql/dq/proto/dq_tasks.proto @@ -10,16 +10,16 @@ import "ydb/library/yql/dq/proto/dq_transport.proto"; enum ERuntimeVersion { RUNTIME_VERSION_UNSPECIFIED = 0; RUNTIME_VERSION_YQL_1_0 = 100000; -} - -// Mode of checkpoints support. -// There can be different checkpoints settings -// in different graph parts. -enum ECheckpointingMode { - CHECKPOINTING_MODE_DEFAULT = 0; // Checkpoints are used according to common settings for our type of query. - CHECKPOINTING_MODE_DISABLED = 1; // Checkpoints are not used in this part of graph (channels, tasks). This is typically when we are deadling with finite part of graph. -} - +} + +// Mode of checkpoints support. +// There can be different checkpoints settings +// in different graph parts. +enum ECheckpointingMode { + CHECKPOINTING_MODE_DEFAULT = 0; // Checkpoints are used according to common settings for our type of query. + CHECKPOINTING_MODE_DISABLED = 1; // Checkpoints are not used in this part of graph (channels, tasks). This is typically when we are deadling with finite part of graph. +} + message TProgram { message TSettings { bool HasMapJoin = 1; @@ -30,7 +30,7 @@ message TProgram { uint32 RuntimeVersion = 1; bytes Raw = 2; TSettings Settings = 3; -} +} message TDqUri { string Protocol = 1; @@ -44,7 +44,7 @@ message TEndpoint { fixed64 TabletId = 2; TDqUri Uri = 3; } -} +} message TChannel { uint64 Id = 1; @@ -57,17 +57,17 @@ message TChannel { bool IsPersistent = 7; bool InMemory = 8; - ECheckpointingMode CheckpointingMode = 9; -} + ECheckpointingMode CheckpointingMode = 9; +} message TUnionAllInput { -} - -message TSourceInput { - string Type = 1; - google.protobuf.Any Settings = 2; -} +} +message TSourceInput { + string Type = 1; + google.protobuf.Any Settings = 2; +} + message TSortColumn { string Column = 1; bool Ascending = 2; @@ -81,10 +81,10 @@ message TTaskInput { repeated TChannel Channels = 1; oneof Type { TUnionAllInput UnionAll = 10; - TSourceInput Source = 11; + TSourceInput Source = 11; TMergeInput Merge = 12; } -} +} message TTaskOutputMap { } @@ -94,15 +94,15 @@ message TRangePartitionInfo { bool IsInclusive = 2; bool IsPoint = 3; uint64 ChannelId = 4; -} +} message TTaskOutputRangePartition { repeated string KeyColumns = 1; repeated TRangePartitionInfo Partitions = 2; -} +} message TTaskOutputBroadcast { -} +} message TTaskOutputHashPartition { repeated string KeyColumns = 1; @@ -110,13 +110,13 @@ message TTaskOutputHashPartition { } message TTaskOutputEffects { -} - -message TTaskOutputSink { - string Type = 1; - google.protobuf.Any Settings = 2; -} +} +message TTaskOutputSink { + string Type = 1; + google.protobuf.Any Settings = 2; +} + message TTaskOutput { repeated TChannel Channels = 1; oneof Type { @@ -125,9 +125,9 @@ message TTaskOutput { TTaskOutputHashPartition HashPartition = 12; TTaskOutputBroadcast Broadcast = 13; TTaskOutputEffects Effects = 14; - TTaskOutputSink Sink = 15; + TTaskOutputSink Sink = 15; } -} +} enum ETransformType { TRANSFORM_YANDEX_CLOUD = 0; @@ -150,4 +150,4 @@ message TDqTask { google.protobuf.Any Meta = 7; bool CreateSuspended = 8; optional TDqTransform OutputTransform = 12; -} +} diff --git a/ydb/library/yql/dq/proto/dq_transport.proto b/ydb/library/yql/dq/proto/dq_transport.proto index d78fa7806c8..e3bc98f8009 100644 --- a/ydb/library/yql/dq/proto/dq_transport.proto +++ b/ydb/library/yql/dq/proto/dq_transport.proto @@ -8,10 +8,10 @@ enum EDataTransportVersion { DATA_TRANSPORT_YSON_1_0 = 10000; DATA_TRANSPORT_UV_PICKLE_1_0 = 20000; DATA_TRANSPORT_ARROW_1_0 = 30000; -} +} message TData { uint32 TransportVersion = 1; bytes Raw = 2; uint32 Rows = 3; -} +} diff --git a/ydb/library/yql/dq/proto/ya.make b/ydb/library/yql/dq/proto/ya.make index d5327f26349..9cd84eaef55 100644 --- a/ydb/library/yql/dq/proto/ya.make +++ b/ydb/library/yql/dq/proto/ya.make @@ -9,8 +9,8 @@ PEERDIR( ) SRCS( - dq_checkpoint.proto - dq_state_load_plan.proto + dq_checkpoint.proto + dq_state_load_plan.proto dq_tasks.proto dq_transport.proto ) diff --git a/ydb/library/yql/dq/runtime/dq_input.h b/ydb/library/yql/dq/runtime/dq_input.h index 4067549a82f..ab271fbce12 100644 --- a/ydb/library/yql/dq/runtime/dq_input.h +++ b/ydb/library/yql/dq/runtime/dq_input.h @@ -1,51 +1,51 @@ -#pragma once - +#pragma once + #include <ydb/library/yql/minikql/computation/mkql_computation_node_holders.h> #include <ydb/library/yql/minikql/mkql_node.h> - -namespace NYql::NDq { - -struct TDqInputStats { + +namespace NYql::NDq { + +struct TDqInputStats { // basic stats - ui64 Chunks = 0; - ui64 Bytes = 0; - ui64 RowsIn = 0; - ui64 RowsOut = 0; + ui64 Chunks = 0; + ui64 Bytes = 0; + ui64 RowsIn = 0; + ui64 RowsOut = 0; TInstant FirstRowTs; // profile stats - ui64 RowsInMemory = 0; - ui64 MaxMemoryUsage = 0; -}; - -class IDqInput : public TSimpleRefCount<IDqInput> { -public: - using TPtr = TIntrusivePtr<IDqInput>; - - virtual ~IDqInput() = default; - - virtual i64 GetFreeSpace() const = 0; - virtual ui64 GetStoredBytes() const = 0; - - [[nodiscard]] - virtual bool Empty() const = 0; - - [[nodiscard]] - virtual bool Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch) = 0; - - virtual bool IsFinished() const = 0; - - virtual const TDqInputStats* GetStats() const = 0; - - virtual NKikimr::NMiniKQL::TType* GetInputType() const = 0; - - // Checkpointing - // After pause IDqInput::Pop() stops return batches that were pushed before pause - // and returns Empty() after all the data before pausing was read. - // Compute Actor can push data after pause, but program won't receive it until Resume() is called. - virtual void Pause() = 0; - virtual void Resume() = 0; - virtual bool IsPaused() const = 0; -}; - -} // namespace NYql::NDq + ui64 RowsInMemory = 0; + ui64 MaxMemoryUsage = 0; +}; + +class IDqInput : public TSimpleRefCount<IDqInput> { +public: + using TPtr = TIntrusivePtr<IDqInput>; + + virtual ~IDqInput() = default; + + virtual i64 GetFreeSpace() const = 0; + virtual ui64 GetStoredBytes() const = 0; + + [[nodiscard]] + virtual bool Empty() const = 0; + + [[nodiscard]] + virtual bool Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch) = 0; + + virtual bool IsFinished() const = 0; + + virtual const TDqInputStats* GetStats() const = 0; + + virtual NKikimr::NMiniKQL::TType* GetInputType() const = 0; + + // Checkpointing + // After pause IDqInput::Pop() stops return batches that were pushed before pause + // and returns Empty() after all the data before pausing was read. + // Compute Actor can push data after pause, but program won't receive it until Resume() is called. + virtual void Pause() = 0; + virtual void Resume() = 0; + virtual bool IsPaused() const = 0; +}; + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_input_channel.cpp b/ydb/library/yql/dq/runtime/dq_input_channel.cpp index bd0a541e377..450748dd564 100644 --- a/ydb/library/yql/dq/runtime/dq_input_channel.cpp +++ b/ydb/library/yql/dq/runtime/dq_input_channel.cpp @@ -1,17 +1,17 @@ #include "dq_input_channel.h" -#include "dq_input_impl.h" +#include "dq_input_impl.h" namespace NYql::NDq { -class TDqInputChannel : public TDqInputImpl<TDqInputChannel, IDqInputChannel> { - using TBaseImpl = TDqInputImpl<TDqInputChannel, IDqInputChannel>; - friend TBaseImpl; +class TDqInputChannel : public TDqInputImpl<TDqInputChannel, IDqInputChannel> { + using TBaseImpl = TDqInputImpl<TDqInputChannel, IDqInputChannel>; + friend TBaseImpl; public: TDqInputChannel(ui64 channelId, NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes, bool collectProfileStats, const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, const NKikimr::NMiniKQL::THolderFactory& holderFactory, NDqProto::EDataTransportVersion transportVersion) - : TBaseImpl(inputType, maxBufferBytes) - , ChannelId(channelId) + : TBaseImpl(inputType, maxBufferBytes) + , ChannelId(channelId) , BasicStats(ChannelId) , ProfileStats(collectProfileStats ? &BasicStats : nullptr) , DataSerializer(typeEnv, holderFactory, transportVersion) {} @@ -27,7 +27,7 @@ public: return; } - const i64 space = data.GetRaw().size(); + const i64 space = data.GetRaw().size(); NKikimr::NMiniKQL::TUnboxedValueVector buffer; buffer.reserve(data.GetRows()); @@ -40,7 +40,7 @@ public: DataSerializer.Deserialize(data, InputType, buffer); } - AddBatch(std::move(buffer), space); + AddBatch(std::move(buffer), space); } const TDqInputChannelStats* GetStats() const override { diff --git a/ydb/library/yql/dq/runtime/dq_input_channel.h b/ydb/library/yql/dq/runtime/dq_input_channel.h index 27020103ccd..f8b9fe105a1 100644 --- a/ydb/library/yql/dq/runtime/dq_input_channel.h +++ b/ydb/library/yql/dq/runtime/dq_input_channel.h @@ -1,5 +1,5 @@ #pragma once -#include "dq_input.h" +#include "dq_input.h" #include "dq_transport.h" #include <ydb/library/yql/minikql/computation/mkql_computation_node_holders.h> @@ -8,7 +8,7 @@ namespace NYql::NDq { -struct TDqInputChannelStats : TDqInputStats { +struct TDqInputChannelStats : TDqInputStats { ui64 ChannelId = 0; // profile stats @@ -18,7 +18,7 @@ struct TDqInputChannelStats : TDqInputStats { : ChannelId(channelId) {} }; -class IDqInputChannel : public IDqInput { +class IDqInputChannel : public IDqInput { public: using TPtr = TIntrusivePtr<IDqInputChannel>; diff --git a/ydb/library/yql/dq/runtime/dq_input_impl.h b/ydb/library/yql/dq/runtime/dq_input_impl.h index dca47f85dd5..4df8a900210 100644 --- a/ydb/library/yql/dq/runtime/dq_input_impl.h +++ b/ydb/library/yql/dq/runtime/dq_input_impl.h @@ -1,144 +1,144 @@ -#pragma once - -namespace NYql::NDq { - -template <class TDerived, class IInputInterface> -class TDqInputImpl : public IInputInterface { -public: - TDqInputImpl(NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes) - : InputType(inputType) - , MaxBufferBytes(maxBufferBytes) - { - } - - i64 GetFreeSpace() const override { - return (i64) MaxBufferBytes - StoredBytes; - } - - ui64 GetStoredBytes() const override { - return StoredBytes; - } - - [[nodiscard]] - bool Empty() const override { - return Batches.empty() || (IsPaused() && GetBatchesBeforePause() == 0); - } - - void AddBatch(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 space) { - StoredBytes += space; - StoredRows += batch.size(); - +#pragma once + +namespace NYql::NDq { + +template <class TDerived, class IInputInterface> +class TDqInputImpl : public IInputInterface { +public: + TDqInputImpl(NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes) + : InputType(inputType) + , MaxBufferBytes(maxBufferBytes) + { + } + + i64 GetFreeSpace() const override { + return (i64) MaxBufferBytes - StoredBytes; + } + + ui64 GetStoredBytes() const override { + return StoredBytes; + } + + [[nodiscard]] + bool Empty() const override { + return Batches.empty() || (IsPaused() && GetBatchesBeforePause() == 0); + } + + void AddBatch(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 space) { + StoredBytes += space; + StoredRows += batch.size(); + auto& stats = MutableBasicStats(); stats.Chunks++; stats.Bytes += space; stats.RowsIn += batch.size(); if (!stats.FirstRowTs) { stats.FirstRowTs = TInstant::Now(); - } - + } + if (auto* profile = MutableProfileStats()) { profile->MaxMemoryUsage = std::max(profile->MaxMemoryUsage, StoredBytes); } - Batches.emplace_back(std::move(batch)); - } - - [[nodiscard]] - bool Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch) override { - if (Empty()) { - return false; - } - - batch.clear(); - - if (IsPaused()) { - ui64 batchesCount = GetBatchesBeforePause(); - Y_VERIFY(batchesCount <= Batches.size()); - - batch.reserve(StoredRowsBeforePause); - - while (batchesCount--) { - auto& part = Batches.front(); - std::move(part.begin(), part.end(), std::back_inserter(batch)); - Batches.pop_front(); - } - - BatchesBeforePause = PauseMask; - Y_VERIFY(GetBatchesBeforePause() == 0); - StoredBytes -= StoredBytesBeforePause; - StoredRows -= StoredRowsBeforePause; - StoredBytesBeforePause = 0; - StoredRowsBeforePause = 0; - } else { - batch.reserve(StoredRows); - - for (auto&& part : Batches) { - std::move(part.begin(), part.end(), std::back_inserter(batch)); - } - - StoredBytes = 0; - StoredRows = 0; - Batches.clear(); - } - + Batches.emplace_back(std::move(batch)); + } + + [[nodiscard]] + bool Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch) override { + if (Empty()) { + return false; + } + + batch.clear(); + + if (IsPaused()) { + ui64 batchesCount = GetBatchesBeforePause(); + Y_VERIFY(batchesCount <= Batches.size()); + + batch.reserve(StoredRowsBeforePause); + + while (batchesCount--) { + auto& part = Batches.front(); + std::move(part.begin(), part.end(), std::back_inserter(batch)); + Batches.pop_front(); + } + + BatchesBeforePause = PauseMask; + Y_VERIFY(GetBatchesBeforePause() == 0); + StoredBytes -= StoredBytesBeforePause; + StoredRows -= StoredRowsBeforePause; + StoredBytesBeforePause = 0; + StoredRowsBeforePause = 0; + } else { + batch.reserve(StoredRows); + + for (auto&& part : Batches) { + std::move(part.begin(), part.end(), std::back_inserter(batch)); + } + + StoredBytes = 0; + StoredRows = 0; + Batches.clear(); + } + MutableBasicStats().RowsOut += batch.size(); - return true; - } - + return true; + } + void Finish() override { Finished = true; - } - - bool IsFinished() const override { - return Finished && (!IsPaused() || Batches.empty()); - } - - NKikimr::NMiniKQL::TType* GetInputType() const override { - return InputType; - } - + } + + bool IsFinished() const override { + return Finished && (!IsPaused() || Batches.empty()); + } + + NKikimr::NMiniKQL::TType* GetInputType() const override { + return InputType; + } + auto& MutableBasicStats() { return static_cast<TDerived*>(this)->BasicStats; - } - + } + auto* MutableProfileStats() { return static_cast<TDerived*>(this)->ProfileStats; } - void Pause() override { - Y_VERIFY(!IsPaused()); - if (!Finished) { - BatchesBeforePause = Batches.size() | PauseMask; - StoredRowsBeforePause = StoredRows; - StoredBytesBeforePause = StoredBytes; - } - } - - void Resume() override { - StoredBytesBeforePause = StoredRowsBeforePause = BatchesBeforePause = 0; - Y_VERIFY(!IsPaused()); - } - - bool IsPaused() const override { - return BatchesBeforePause; - } - -protected: - ui64 GetBatchesBeforePause() const { - return BatchesBeforePause & ~PauseMask; - } - -protected: - NKikimr::NMiniKQL::TType* const InputType = nullptr; - const ui64 MaxBufferBytes = 0; - TList<NKikimr::NMiniKQL::TUnboxedValueVector, NKikimr::NMiniKQL::TMKQLAllocator<NUdf::TUnboxedValue>> Batches; - ui64 StoredBytes = 0; - ui64 StoredRows = 0; - bool Finished = false; - ui64 BatchesBeforePause = 0; - ui64 StoredBytesBeforePause = 0; - ui64 StoredRowsBeforePause = 0; - static constexpr ui64 PauseMask = 1llu << 63llu; -}; - -} // namespace NYql::NDq + void Pause() override { + Y_VERIFY(!IsPaused()); + if (!Finished) { + BatchesBeforePause = Batches.size() | PauseMask; + StoredRowsBeforePause = StoredRows; + StoredBytesBeforePause = StoredBytes; + } + } + + void Resume() override { + StoredBytesBeforePause = StoredRowsBeforePause = BatchesBeforePause = 0; + Y_VERIFY(!IsPaused()); + } + + bool IsPaused() const override { + return BatchesBeforePause; + } + +protected: + ui64 GetBatchesBeforePause() const { + return BatchesBeforePause & ~PauseMask; + } + +protected: + NKikimr::NMiniKQL::TType* const InputType = nullptr; + const ui64 MaxBufferBytes = 0; + TList<NKikimr::NMiniKQL::TUnboxedValueVector, NKikimr::NMiniKQL::TMKQLAllocator<NUdf::TUnboxedValue>> Batches; + ui64 StoredBytes = 0; + ui64 StoredRows = 0; + bool Finished = false; + ui64 BatchesBeforePause = 0; + ui64 StoredBytesBeforePause = 0; + ui64 StoredRowsBeforePause = 0; + static constexpr ui64 PauseMask = 1llu << 63llu; +}; + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_input_producer.cpp b/ydb/library/yql/dq/runtime/dq_input_producer.cpp index 3fe4830a08a..c23d03a05cb 100644 --- a/ydb/library/yql/dq/runtime/dq_input_producer.cpp +++ b/ydb/library/yql/dq/runtime/dq_input_producer.cpp @@ -14,9 +14,9 @@ namespace { class TDqInputUnionStreamValue : public TComputationValue<TDqInputUnionStreamValue> { public: - TDqInputUnionStreamValue(TMemoryUsageInfo* memInfo, TVector<IDqInput::TPtr>&& inputs) + TDqInputUnionStreamValue(TMemoryUsageInfo* memInfo, TVector<IDqInput::TPtr>&& inputs) : TComputationValue<TDqInputUnionStreamValue>(memInfo) - , Inputs(std::move(inputs)) + , Inputs(std::move(inputs)) , CurrentItemIndex(0) {} private: @@ -41,19 +41,19 @@ private: bool allFinished = true; CurrentBuffer.clear(); - for (auto& input : Inputs) { - if (input->Pop(CurrentBuffer)) { + for (auto& input : Inputs) { + if (input->Pop(CurrentBuffer)) { CurrentItemIndex = 0; return NUdf::EFetchStatus::Ok; } - allFinished &= input->IsFinished(); + allFinished &= input->IsFinished(); } return allFinished ? NUdf::EFetchStatus::Finish : NUdf::EFetchStatus::Yield; } private: - TVector<IDqInput::TPtr> Inputs; + TVector<IDqInput::TPtr> Inputs; TUnboxedValueVector CurrentBuffer; ui64 CurrentItemIndex; }; @@ -171,10 +171,10 @@ private: } // namespace -NUdf::TUnboxedValue CreateInputUnionValue(TVector<IDqInput::TPtr>&& inputs, +NUdf::TUnboxedValue CreateInputUnionValue(TVector<IDqInput::TPtr>&& inputs, const NMiniKQL::THolderFactory& factory) { - return factory.Create<TDqInputUnionStreamValue>(std::move(inputs)); + return factory.Create<TDqInputUnionStreamValue>(std::move(inputs)); } NKikimr::NUdf::TUnboxedValue CreateInputMergeValue(TVector<IDqInput::TPtr>&& inputs, diff --git a/ydb/library/yql/dq/runtime/dq_input_producer.h b/ydb/library/yql/dq/runtime/dq_input_producer.h index 7a590b23be1..d1a19f9922d 100644 --- a/ydb/library/yql/dq/runtime/dq_input_producer.h +++ b/ydb/library/yql/dq/runtime/dq_input_producer.h @@ -5,7 +5,7 @@ namespace NYql::NDq { -NKikimr::NUdf::TUnboxedValue CreateInputUnionValue(TVector<IDqInput::TPtr>&& inputs, +NKikimr::NUdf::TUnboxedValue CreateInputUnionValue(TVector<IDqInput::TPtr>&& inputs, const NKikimr::NMiniKQL::THolderFactory& holderFactory); NKikimr::NUdf::TUnboxedValue CreateInputMergeValue(TVector<IDqInput::TPtr>&& inputs, diff --git a/ydb/library/yql/dq/runtime/dq_output.h b/ydb/library/yql/dq/runtime/dq_output.h index e793ff5231b..3bc0afcb650 100644 --- a/ydb/library/yql/dq/runtime/dq_output.h +++ b/ydb/library/yql/dq/runtime/dq_output.h @@ -1,23 +1,23 @@ -#pragma once - +#pragma once + #include <ydb/library/yql/minikql/mkql_node.h> #include <util/datetime/base.h> -#include <util/generic/ptr.h> - -namespace NYql { -namespace NDqProto { - -class TCheckpoint; -class TTaskInput; -} // namespace NDqProto - -namespace NUdf { -class TUnboxedValue; -} // namespace NUdf - -namespace NDq { - +#include <util/generic/ptr.h> + +namespace NYql { +namespace NDqProto { + +class TCheckpoint; +class TTaskInput; +} // namespace NDqProto + +namespace NUdf { +class TUnboxedValue; +} // namespace NUdf + +namespace NDq { + struct TDqOutputStats { // basic stats ui64 Chunks = 0; @@ -31,30 +31,30 @@ struct TDqOutputStats { ui64 MaxRowsInMemory = 0; }; -class IDqOutput : public TSimpleRefCount<IDqOutput> { -public: - using TPtr = TIntrusivePtr<IDqOutput>; - - virtual ~IDqOutput() = default; - - // <| producer methods - [[nodiscard]] - virtual bool IsFull() const = 0; - // can throw TDqChannelStorageException - virtual void Push(NUdf::TUnboxedValue&& value) = 0; +class IDqOutput : public TSimpleRefCount<IDqOutput> { +public: + using TPtr = TIntrusivePtr<IDqOutput>; + + virtual ~IDqOutput() = default; + + // <| producer methods + [[nodiscard]] + virtual bool IsFull() const = 0; + // can throw TDqChannelStorageException + virtual void Push(NUdf::TUnboxedValue&& value) = 0; // Push checkpoint. Checkpoints may be pushed to channel even after it is finished. - virtual void Push(NDqProto::TCheckpoint&& checkpoint) = 0; - virtual void Finish() = 0; - - // <| consumer methods - [[nodiscard]] - virtual bool HasData() const = 0; - virtual bool IsFinished() const = 0; + virtual void Push(NDqProto::TCheckpoint&& checkpoint) = 0; + virtual void Finish() = 0; + + // <| consumer methods + [[nodiscard]] + virtual bool HasData() const = 0; + virtual bool IsFinished() const = 0; virtual NKikimr::NMiniKQL::TType* GetOutputType() const = 0; virtual const TDqOutputStats* GetStats() const = 0; -}; - -} // namespace NDq -} // namespace NYql +}; + +} // namespace NDq +} // namespace NYql diff --git a/ydb/library/yql/dq/runtime/dq_output_channel.cpp b/ydb/library/yql/dq/runtime/dq_output_channel.cpp index 211d4ac39a3..9d85413ea63 100644 --- a/ydb/library/yql/dq/runtime/dq_output_channel.cpp +++ b/ydb/library/yql/dq/runtime/dq_output_channel.cpp @@ -671,8 +671,8 @@ public: [[nodiscard]] bool Pop(NDqProto::TCheckpoint& checkpoint) override { if (!HasData() && Checkpoint) { - checkpoint = std::move(*Checkpoint); - Checkpoint = Nothing(); + checkpoint = std::move(*Checkpoint); + Checkpoint = Nothing(); return true; } return false; @@ -747,10 +747,10 @@ public: } bool IsFinished() const override { - return Finished && !HasData(); + return Finished && !HasData(); } - ui64 Drop() override { // Drop channel data because channel was finished. Leave checkpoint because checkpoints keep going through channel after finishing channel data transfer. + ui64 Drop() override { // Drop channel data because channel was finished. Leave checkpoint because checkpoints keep going through channel after finishing channel data transfer. ui64 rows = DataHead.size() + SpilledRows + DataTail.size(); DataHead.clear(); SizeHead.clear(); diff --git a/ydb/library/yql/dq/runtime/dq_output_channel.h b/ydb/library/yql/dq/runtime/dq_output_channel.h index f7a4887d2d6..2f6d8540c2d 100644 --- a/ydb/library/yql/dq/runtime/dq_output_channel.h +++ b/ydb/library/yql/dq/runtime/dq_output_channel.h @@ -1,6 +1,6 @@ #pragma once -#include "dq_output.h" -#include "dq_channel_storage.h" +#include "dq_output.h" +#include "dq_channel_storage.h" #include <ydb/library/yql/dq/common/dq_common.h> #include <ydb/library/yql/dq/proto/dq_transport.pb.h> @@ -28,7 +28,7 @@ struct TDqOutputChannelStats : TDqOutputStats { : ChannelId(channelId) {} }; -class IDqOutputChannel : public IDqOutput { +class IDqOutputChannel : public IDqOutput { public: using TPtr = TIntrusivePtr<IDqOutputChannel>; @@ -36,17 +36,17 @@ public: virtual ui64 GetValuesCount(bool inMemoryOnly = true) const = 0; // <| consumer methods - // can throw TDqChannelStorageException + // can throw TDqChannelStorageException [[nodiscard]] virtual bool Pop(NDqProto::TData& data, ui64 bytes) = 0; - // Pop chechpoint. Checkpoints may be taken from channel even after it is finished. + // Pop chechpoint. Checkpoints may be taken from channel even after it is finished. [[nodiscard]] virtual bool Pop(NDqProto::TCheckpoint& checkpoint) = 0; // Only for data-queries // TODO: remove this method and create independent Data- and Stream-query implementations. // Stream-query implementation should be without PopAll method. // Data-query implementation should be one-shot for Pop (a-la PopAll) call and without ChannelStorage. - // can throw TDqChannelStorageException + // can throw TDqChannelStorageException [[nodiscard]] virtual bool PopAll(NDqProto::TData& data) = 0; virtual bool PopAll(NKikimr::NMiniKQL::TUnboxedValueVector& rows) = 0; diff --git a/ydb/library/yql/dq/runtime/dq_output_consumer.cpp b/ydb/library/yql/dq/runtime/dq_output_consumer.cpp index 74bd5ba0ce7..4a1a4ceb83e 100644 --- a/ydb/library/yql/dq/runtime/dq_output_consumer.cpp +++ b/ydb/library/yql/dq/runtime/dq_output_consumer.cpp @@ -50,31 +50,31 @@ private: class TDqOutputMapConsumer : public IDqOutputConsumer { public: - TDqOutputMapConsumer(IDqOutput::TPtr output) - : Output(output) {} + TDqOutputMapConsumer(IDqOutput::TPtr output) + : Output(output) {} bool IsFull() const override { - return Output->IsFull(); + return Output->IsFull(); } void Consume(TUnboxedValue&& value) override { - Output->Push(std::move(value)); + Output->Push(std::move(value)); } void Finish() override { - Output->Finish(); + Output->Finish(); } private: - IDqOutput::TPtr Output; + IDqOutput::TPtr Output; }; class TDqOutputHashPartitionConsumer : public IDqOutputConsumer { public: - TDqOutputHashPartitionConsumer(const TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs, + TDqOutputHashPartitionConsumer(const TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs, TVector<TDataTypeId>&& keyColumnTypes, TVector<ui32>&& keyColumnIndices) : TypeEnv(typeEnv) - , Outputs(std::move(outputs)) + , Outputs(std::move(outputs)) , KeyColumnTypes(std::move(keyColumnTypes)) , KeyColumnIndices(std::move(keyColumnIndices)) { @@ -96,17 +96,17 @@ public: } bool IsFull() const override { - return AnyOf(Outputs, [](const auto& output) { return output->IsFull(); }); + return AnyOf(Outputs, [](const auto& output) { return output->IsFull(); }); } void Consume(TUnboxedValue&& value) final { ui32 partitionIndex = GetHashPartitionIndex(value); - Outputs[partitionIndex]->Push(std::move(value)); + Outputs[partitionIndex]->Push(std::move(value)); } void Finish() final { - for (auto& output : Outputs) { - output->Finish(); + for (auto& output : Outputs) { + output->Finish(); } } @@ -119,7 +119,7 @@ private: hash = CombineHashes(hash, HashColumn(keyId, columnValue)); } - return hash % Outputs.size(); + return hash % Outputs.size(); } ui64 HashColumn(size_t keyId, const TUnboxedValue& value) const { @@ -144,7 +144,7 @@ private: private: const TTypeEnvironment& TypeEnv; - TVector<IDqOutput::TPtr> Outputs; + TVector<IDqOutput::TPtr> Outputs; TVector<TDataTypeId> KeyColumnTypes; TVector<ui32> KeyColumnIndices; TVector<TValueHasher> ValueHashers; @@ -153,28 +153,28 @@ private: class TDqOutputBroadcastConsumer : public IDqOutputConsumer { public: - TDqOutputBroadcastConsumer(TVector<IDqOutput::TPtr>&& outputs) - : Outputs(std::move(outputs)) {} + TDqOutputBroadcastConsumer(TVector<IDqOutput::TPtr>&& outputs) + : Outputs(std::move(outputs)) {} bool IsFull() const override { - return AnyOf(Outputs, [](const auto& output) { return output->IsFull(); }); + return AnyOf(Outputs, [](const auto& output) { return output->IsFull(); }); } void Consume(TUnboxedValue&& value) final { - for (auto& output : Outputs) { + for (auto& output : Outputs) { TUnboxedValue copy{ value }; - output->Push(std::move(copy)); + output->Push(std::move(copy)); } } void Finish() override { - for (auto& output : Outputs) { - output->Finish(); + for (auto& output : Outputs) { + output->Finish(); } } private: - TVector<IDqOutput::TPtr> Outputs; + TVector<IDqOutput::TPtr> Outputs; }; } // namespace @@ -183,21 +183,21 @@ IDqOutputConsumer::TPtr CreateOutputMultiConsumer(TVector<IDqOutputConsumer::TPt return MakeIntrusive<TDqOutputMultiConsumer>(std::move(consumers)); } -IDqOutputConsumer::TPtr CreateOutputMapConsumer(IDqOutput::TPtr output) { - return MakeIntrusive<TDqOutputMapConsumer>(output); +IDqOutputConsumer::TPtr CreateOutputMapConsumer(IDqOutput::TPtr output) { + return MakeIntrusive<TDqOutputMapConsumer>(output); } IDqOutputConsumer::TPtr CreateOutputHashPartitionConsumer( - TVector<IDqOutput::TPtr>&& outputs, + TVector<IDqOutput::TPtr>&& outputs, TVector<NUdf::TDataTypeId>&& keyColumnTypes, TVector<ui32>&& keyColumnIndices, const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv) { - return MakeIntrusive<TDqOutputHashPartitionConsumer>(typeEnv, std::move(outputs), std::move(keyColumnTypes), + return MakeIntrusive<TDqOutputHashPartitionConsumer>(typeEnv, std::move(outputs), std::move(keyColumnTypes), std::move(keyColumnIndices)); } -IDqOutputConsumer::TPtr CreateOutputBroadcastConsumer(TVector<IDqOutput::TPtr>&& outputs) { - return MakeIntrusive<TDqOutputBroadcastConsumer>(std::move(outputs)); +IDqOutputConsumer::TPtr CreateOutputBroadcastConsumer(TVector<IDqOutput::TPtr>&& outputs) { + return MakeIntrusive<TDqOutputBroadcastConsumer>(std::move(outputs)); } } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_output_consumer.h b/ydb/library/yql/dq/runtime/dq_output_consumer.h index 80e69ae065e..27f70fa8be6 100644 --- a/ydb/library/yql/dq/runtime/dq_output_consumer.h +++ b/ydb/library/yql/dq/runtime/dq_output_consumer.h @@ -1,12 +1,12 @@ #pragma once -#include "dq_output.h" +#include "dq_output.h" #include <ydb/library/yql/minikql/mkql_alloc.h> -namespace NKikimr::NMiniKQL { -class TTypeEnvironment; -} // namespace NKikimr::NMiniKQL +namespace NKikimr::NMiniKQL { +class TTypeEnvironment; +} // namespace NKikimr::NMiniKQL namespace NYql::NDq { @@ -24,14 +24,14 @@ public: IDqOutputConsumer::TPtr CreateOutputMultiConsumer(TVector<IDqOutputConsumer::TPtr>&& consumers); -IDqOutputConsumer::TPtr CreateOutputMapConsumer(IDqOutput::TPtr output); +IDqOutputConsumer::TPtr CreateOutputMapConsumer(IDqOutput::TPtr output); IDqOutputConsumer::TPtr CreateOutputHashPartitionConsumer( - TVector<IDqOutput::TPtr>&& outputs, + TVector<IDqOutput::TPtr>&& outputs, TVector<NUdf::TDataTypeId>&& keyColumnTypes, TVector<ui32>&& keyColumnIndices, const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv); -IDqOutputConsumer::TPtr CreateOutputBroadcastConsumer(TVector<IDqOutput::TPtr>&& outputs); +IDqOutputConsumer::TPtr CreateOutputBroadcastConsumer(TVector<IDqOutput::TPtr>&& outputs); } // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_sink.cpp b/ydb/library/yql/dq/runtime/dq_sink.cpp index ff86523ef48..7b8c42a5f99 100644 --- a/ydb/library/yql/dq/runtime/dq_sink.cpp +++ b/ydb/library/yql/dq/runtime/dq_sink.cpp @@ -1,142 +1,142 @@ -#include "dq_sink.h" +#include "dq_sink.h" #include "dq_transport.h" - + #include <ydb/library/yql/utils/yql_panic.h> - -#include <deque> -#include <variant> - -namespace NYql::NDq { -namespace { - -class TDqSink : public IDqSink { - struct TValueDesc { - std::variant<NUdf::TUnboxedValue, NDqProto::TCheckpoint> Value; - ui64 EstimatedSize; - - TValueDesc(NUdf::TUnboxedValue&& value, ui64 size) - : Value(std::move(value)) - , EstimatedSize(size) - { - } - - TValueDesc(NDqProto::TCheckpoint&& checkpoint, ui64 size) - : Value(std::move(checkpoint)) - , EstimatedSize(size) - { - } - - TValueDesc(const TValueDesc&) = default; - TValueDesc(TValueDesc&&) = default; - }; - -public: + +#include <deque> +#include <variant> + +namespace NYql::NDq { +namespace { + +class TDqSink : public IDqSink { + struct TValueDesc { + std::variant<NUdf::TUnboxedValue, NDqProto::TCheckpoint> Value; + ui64 EstimatedSize; + + TValueDesc(NUdf::TUnboxedValue&& value, ui64 size) + : Value(std::move(value)) + , EstimatedSize(size) + { + } + + TValueDesc(NDqProto::TCheckpoint&& checkpoint, ui64 size) + : Value(std::move(checkpoint)) + , EstimatedSize(size) + { + } + + TValueDesc(const TValueDesc&) = default; + TValueDesc(TValueDesc&&) = default; + }; + +public: TDqSink(ui64 outputIndex, NKikimr::NMiniKQL::TType* outputType, ui64 maxStoredBytes, bool collectProfileStats) - : OutputIndex(outputIndex) - , MaxStoredBytes(maxStoredBytes) - , OutputType(outputType) + : OutputIndex(outputIndex) + , MaxStoredBytes(maxStoredBytes) + , OutputType(outputType) , BasicStats(OutputIndex) , ProfileStats(collectProfileStats ? &BasicStats : nullptr) {} - - ui64 GetOutputIndex() const override { - return OutputIndex; - } - - bool IsFull() const override { - return EstimatedStoredBytes >= MaxStoredBytes; - } - - void Push(NUdf::TUnboxedValue&& value) override { + + ui64 GetOutputIndex() const override { + return OutputIndex; + } + + bool IsFull() const override { + return EstimatedStoredBytes >= MaxStoredBytes; + } + + void Push(NUdf::TUnboxedValue&& value) override { if (!BasicStats.FirstRowIn) { BasicStats.FirstRowIn = TInstant::Now(); } - if (ValuesPushed++ % 1000 == 0) { - ReestimateRowBytes(value); - } - Y_VERIFY(EstimatedRowBytes > 0); - Values.emplace_back(std::move(value), EstimatedRowBytes); - EstimatedStoredBytes += EstimatedRowBytes; + if (ValuesPushed++ % 1000 == 0) { + ReestimateRowBytes(value); + } + Y_VERIFY(EstimatedRowBytes > 0); + Values.emplace_back(std::move(value), EstimatedRowBytes); + EstimatedStoredBytes += EstimatedRowBytes; ReportChunkIn(); - } - - void Push(NDqProto::TCheckpoint&& checkpoint) override { - const ui64 bytesSize = checkpoint.ByteSize(); - Values.emplace_back(std::move(checkpoint), bytesSize); - EstimatedStoredBytes += bytesSize; + } + + void Push(NDqProto::TCheckpoint&& checkpoint) override { + const ui64 bytesSize = checkpoint.ByteSize(); + Values.emplace_back(std::move(checkpoint), bytesSize); + EstimatedStoredBytes += bytesSize; ReportChunkIn(); - } - - void Finish() override { - Finished = true; + } + + void Finish() override { + Finished = true; if (!BasicStats.FirstRowIn) { BasicStats.FirstRowIn = TInstant::Now(); } - } - - ui64 Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch, ui64 bytes) override { - batch.clear(); - ui64 valuesCount = 0; - ui64 usedBytes = 0; - - // Calc values count. - for (auto iter = Values.cbegin(), end = Values.cend(); - usedBytes < bytes && iter != end && std::holds_alternative<NUdf::TUnboxedValue>(iter->Value); - ++iter) - { - ++valuesCount; - usedBytes += iter->EstimatedSize; - } - - // Reserve size and return data. - batch.reserve(valuesCount); - while (valuesCount--) { - batch.emplace_back(std::move(std::get<NUdf::TUnboxedValue>(Values.front().Value))); - Values.pop_front(); - } - Y_VERIFY(EstimatedStoredBytes >= usedBytes); - EstimatedStoredBytes -= usedBytes; + } + + ui64 Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch, ui64 bytes) override { + batch.clear(); + ui64 valuesCount = 0; + ui64 usedBytes = 0; + + // Calc values count. + for (auto iter = Values.cbegin(), end = Values.cend(); + usedBytes < bytes && iter != end && std::holds_alternative<NUdf::TUnboxedValue>(iter->Value); + ++iter) + { + ++valuesCount; + usedBytes += iter->EstimatedSize; + } + + // Reserve size and return data. + batch.reserve(valuesCount); + while (valuesCount--) { + batch.emplace_back(std::move(std::get<NUdf::TUnboxedValue>(Values.front().Value))); + Values.pop_front(); + } + Y_VERIFY(EstimatedStoredBytes >= usedBytes); + EstimatedStoredBytes -= usedBytes; ReportChunkOut(batch.size(), usedBytes); - return usedBytes; - } - - bool Pop(NDqProto::TCheckpoint& checkpoint) override { - if (!Values.empty() && std::holds_alternative<NDqProto::TCheckpoint>(Values.front().Value)) { - checkpoint = std::move(std::get<NDqProto::TCheckpoint>(Values.front().Value)); + return usedBytes; + } + + bool Pop(NDqProto::TCheckpoint& checkpoint) override { + if (!Values.empty() && std::holds_alternative<NDqProto::TCheckpoint>(Values.front().Value)) { + checkpoint = std::move(std::get<NDqProto::TCheckpoint>(Values.front().Value)); const auto size = Values.front().EstimatedSize; Y_VERIFY(EstimatedStoredBytes >= size); EstimatedStoredBytes -= size; - Values.pop_front(); + Values.pop_front(); ReportChunkOut(1, size); - return true; - } - return false; - } - - bool HasData() const override { - return EstimatedRowBytes > 0; - } - - bool IsFinished() const override { - if (!Finished) { - return false; - } - for (const TValueDesc& v : Values) { - if (std::holds_alternative<NUdf::TUnboxedValue>(v.Value)) { - return false; - } - } - // Finished and no data values. - return true; - } - + return true; + } + return false; + } + + bool HasData() const override { + return EstimatedRowBytes > 0; + } + + bool IsFinished() const override { + if (!Finished) { + return false; + } + for (const TValueDesc& v : Values) { + if (std::holds_alternative<NUdf::TUnboxedValue>(v.Value)) { + return false; + } + } + // Finished and no data values. + return true; + } + NKikimr::NMiniKQL::TType* GetOutputType() const override { return OutputType; } @@ -145,19 +145,19 @@ public: return &BasicStats; } -private: - void ReestimateRowBytes(const NUdf::TUnboxedValue& value) { +private: + void ReestimateRowBytes(const NUdf::TUnboxedValue& value) { const ui64 valueSize = TDqDataSerializer::EstimateSize(value, OutputType); - if (EstimatedRowBytes) { - EstimatedRowBytes = static_cast<ui64>(0.6 * valueSize + 0.4 * EstimatedRowBytes); - } else { - EstimatedRowBytes = valueSize; - } - if (!EstimatedRowBytes) { - EstimatedRowBytes = 1; - } - } - + if (EstimatedRowBytes) { + EstimatedRowBytes = static_cast<ui64>(0.6 * valueSize + 0.4 * EstimatedRowBytes); + } else { + EstimatedRowBytes = valueSize; + } + if (!EstimatedRowBytes) { + EstimatedRowBytes = 1; + } + } + void ReportChunkIn() { BasicStats.Bytes += EstimatedRowBytes; BasicStats.RowsIn++; @@ -172,25 +172,25 @@ private: BasicStats.RowsOut += rowsCount; } -private: - const ui64 OutputIndex; - const ui64 MaxStoredBytes; +private: + const ui64 OutputIndex; + const ui64 MaxStoredBytes; NKikimr::NMiniKQL::TType* const OutputType; - ui64 EstimatedStoredBytes = 0; - ui64 ValuesPushed = 0; - bool Finished = false; - std::deque<TValueDesc> Values; - ui64 EstimatedRowBytes = 0; + ui64 EstimatedStoredBytes = 0; + ui64 ValuesPushed = 0; + bool Finished = false; + std::deque<TValueDesc> Values; + ui64 EstimatedRowBytes = 0; TDqSinkStats BasicStats; TDqSinkStats* ProfileStats = nullptr; -}; - -} // namespace - +}; + +} // namespace + IDqSink::TPtr CreateDqSink(ui64 outputIndex, NKikimr::NMiniKQL::TType* outputType, ui64 maxStoredBytes, bool collectProfileStats) -{ +{ return MakeIntrusive<TDqSink>(outputIndex, outputType, maxStoredBytes, collectProfileStats); -} - -} // namespace NYql::NDq +} + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_sink.h b/ydb/library/yql/dq/runtime/dq_sink.h index 63c91c96602..abb2562e47b 100644 --- a/ydb/library/yql/dq/runtime/dq_sink.h +++ b/ydb/library/yql/dq/runtime/dq_sink.h @@ -1,12 +1,12 @@ -#pragma once -#include "dq_output.h" - +#pragma once +#include "dq_output.h" + #include <ydb/library/yql/dq/actors/protos/dq_events.pb.h> #include <ydb/library/yql/minikql/computation/mkql_computation_node_holders.h> #include <ydb/library/yql/minikql/mkql_node.h> - -namespace NYql::NDq { - + +namespace NYql::NDq { + struct TDqSinkStats : TDqOutputStats { const ui64 OutputIndex; @@ -14,23 +14,23 @@ struct TDqSinkStats : TDqOutputStats { : OutputIndex(outputIndex) {} }; -class IDqSink : public IDqOutput { -public: - using TPtr = TIntrusivePtr<IDqSink>; - - virtual ui64 GetOutputIndex() const = 0; - - // Pop data to send. Return estimated size of returned data. - [[nodiscard]] - virtual ui64 Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch, ui64 bytes) = 0; - // Pop chechpoint. Checkpoints may be taken from sink even after it is finished. - [[nodiscard]] - virtual bool Pop(NDqProto::TCheckpoint& checkpoint) = 0; +class IDqSink : public IDqOutput { +public: + using TPtr = TIntrusivePtr<IDqSink>; + + virtual ui64 GetOutputIndex() const = 0; + + // Pop data to send. Return estimated size of returned data. + [[nodiscard]] + virtual ui64 Pop(NKikimr::NMiniKQL::TUnboxedValueVector& batch, ui64 bytes) = 0; + // Pop chechpoint. Checkpoints may be taken from sink even after it is finished. + [[nodiscard]] + virtual bool Pop(NDqProto::TCheckpoint& checkpoint) = 0; virtual const TDqSinkStats* GetStats() const = 0; -}; - +}; + IDqSink::TPtr CreateDqSink(ui64 outputIndex, NKikimr::NMiniKQL::TType* outputType, ui64 maxStoredBytes, bool collectProfileStats); - -} // namespace NYql::NDq + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_source.cpp b/ydb/library/yql/dq/runtime/dq_source.cpp index b9e75a15e81..431ca1f49cb 100644 --- a/ydb/library/yql/dq/runtime/dq_source.cpp +++ b/ydb/library/yql/dq/runtime/dq_source.cpp @@ -1,43 +1,43 @@ -#include "dq_source.h" -#include "dq_input_impl.h" - -namespace NYql::NDq { - -class TDqSource : public TDqInputImpl<TDqSource, IDqSource> { - using TBaseImpl = TDqInputImpl<TDqSource, IDqSource>; - friend TBaseImpl; -public: +#include "dq_source.h" +#include "dq_input_impl.h" + +namespace NYql::NDq { + +class TDqSource : public TDqInputImpl<TDqSource, IDqSource> { + using TBaseImpl = TDqInputImpl<TDqSource, IDqSource>; + friend TBaseImpl; +public: TDqSource(ui64 inputIndex, NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes, bool collectProfileStats) - : TBaseImpl(inputType, maxBufferBytes) - , InputIndex(inputIndex) + : TBaseImpl(inputType, maxBufferBytes) + , InputIndex(inputIndex) , BasicStats(InputIndex) , ProfileStats(collectProfileStats ? &BasicStats : nullptr) {} - - ui64 GetInputIndex() const override { - return InputIndex; - } - + + ui64 GetInputIndex() const override { + return InputIndex; + } + void Push(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 space) override { - Y_VERIFY(!batch.empty() || !space); - if (!batch.empty()) { - AddBatch(std::move(batch), space); - } - } - - const TDqSourceStats* GetStats() const override { + Y_VERIFY(!batch.empty() || !space); + if (!batch.empty()) { + AddBatch(std::move(batch), space); + } + } + + const TDqSourceStats* GetStats() const override { return &BasicStats; - } - -private: - const ui64 InputIndex; + } + +private: + const ui64 InputIndex; TDqSourceStats BasicStats; TDqSourceStats* ProfileStats = nullptr; -}; - -IDqSource::TPtr CreateDqSource( - ui64 inputIndex, NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes, bool collectStats) -{ - return new TDqSource(inputIndex, inputType, maxBufferBytes, collectStats); -} - -} // namespace NYql::NDq +}; + +IDqSource::TPtr CreateDqSource( + ui64 inputIndex, NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes, bool collectStats) +{ + return new TDqSource(inputIndex, inputType, maxBufferBytes, collectStats); +} + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_source.h b/ydb/library/yql/dq/runtime/dq_source.h index 01509873f53..ca58e919b66 100644 --- a/ydb/library/yql/dq/runtime/dq_source.h +++ b/ydb/library/yql/dq/runtime/dq_source.h @@ -1,29 +1,29 @@ -#pragma once -#include "dq_input.h" - -namespace NYql::NDq { - -struct TDqSourceStats : TDqInputStats { - ui64 InputIndex = 0; - - explicit TDqSourceStats(ui64 inputIndex) - : InputIndex(inputIndex) {} -}; - -class IDqSource : public IDqInput { -public: - using TPtr = TIntrusivePtr<IDqSource>; - - virtual ui64 GetInputIndex() const = 0; - - virtual void Push(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 space) = 0; - +#pragma once +#include "dq_input.h" + +namespace NYql::NDq { + +struct TDqSourceStats : TDqInputStats { + ui64 InputIndex = 0; + + explicit TDqSourceStats(ui64 inputIndex) + : InputIndex(inputIndex) {} +}; + +class IDqSource : public IDqInput { +public: + using TPtr = TIntrusivePtr<IDqSource>; + + virtual ui64 GetInputIndex() const = 0; + + virtual void Push(NKikimr::NMiniKQL::TUnboxedValueVector&& batch, i64 space) = 0; + virtual void Finish() = 0; - virtual const TDqSourceStats* GetStats() const = 0; -}; - + virtual const TDqSourceStats* GetStats() const = 0; +}; + IDqSource::TPtr CreateDqSource(ui64 inputIndex, NKikimr::NMiniKQL::TType* inputType, ui64 maxBufferBytes, bool collectProfileStats); - -} // namespace NYql::NDq + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp b/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp index 10900352c3b..4ece28df85e 100644 --- a/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp +++ b/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp @@ -124,11 +124,11 @@ NUdf::TUnboxedValue DqBuildInputValue(const NDqProto::TTaskInput& inputDesc, con TVector<IDqInput::TPtr>&& inputs, const THolderFactory& holderFactory) { switch (inputDesc.GetTypeCase()) { - case NYql::NDqProto::TTaskInput::kSource: - Y_VERIFY(inputs.size() == 1); - [[fallthrough]]; + case NYql::NDqProto::TTaskInput::kSource: + Y_VERIFY(inputs.size() == 1); + [[fallthrough]]; case NYql::NDqProto::TTaskInput::kUnionAll: - return CreateInputUnionValue(std::move(inputs), holderFactory); + return CreateInputUnionValue(std::move(inputs), holderFactory); case NYql::NDqProto::TTaskInput::kMerge: { const auto& protoSortCols = inputDesc.GetMerge().GetSortColumns(); TVector<TSortColumnInfo> sortColsInfo; @@ -143,15 +143,15 @@ NUdf::TUnboxedValue DqBuildInputValue(const NDqProto::TTaskInput& inputDesc, con } IDqOutputConsumer::TPtr DqBuildOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const NMiniKQL::TType* type, - const NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) + const NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) { switch (outputDesc.GetTypeCase()) { - case NDqProto::TTaskOutput::kSink: - Y_VERIFY(outputDesc.ChannelsSize() == 0); - [[fallthrough]]; + case NDqProto::TTaskOutput::kSink: + Y_VERIFY(outputDesc.ChannelsSize() == 0); + [[fallthrough]]; case NDqProto::TTaskOutput::kMap: { - YQL_ENSURE(outputs.size() == 1); - return CreateOutputMapConsumer(outputs[0]); + YQL_ENSURE(outputs.size() == 1); + return CreateOutputMapConsumer(outputs[0]); } case NDqProto::TTaskOutput::kHashPartition: { @@ -166,12 +166,12 @@ IDqOutputConsumer::TPtr DqBuildOutputConsumer(const NDqProto::TTaskOutput& outpu channelIds[i] = outputDesc.GetChannels(i).GetId(); } - return CreateOutputHashPartitionConsumer(std::move(outputs), std::move(keyColumnTypes), + return CreateOutputHashPartitionConsumer(std::move(outputs), std::move(keyColumnTypes), std::move(keyColumnIndices), typeEnv); } case NDqProto::TTaskOutput::kBroadcast: { - return CreateOutputBroadcastConsumer(std::move(outputs)); + return CreateOutputBroadcastConsumer(std::move(outputs)); } case NDqProto::TTaskOutput::kRangePartition: { @@ -190,9 +190,9 @@ IDqOutputConsumer::TPtr DqBuildOutputConsumer(const NDqProto::TTaskOutput& outpu IDqOutputConsumer::TPtr TDqTaskRunnerExecutionContext::CreateOutputConsumer(const TTaskOutput& outputDesc, const NKikimr::NMiniKQL::TType* type, NUdf::IApplyContext*, const TTypeEnvironment& typeEnv, - TVector<IDqOutput::TPtr>&& outputs) const + TVector<IDqOutput::TPtr>&& outputs) const { - return DqBuildOutputConsumer(outputDesc, type, typeEnv, std::move(outputs)); + return DqBuildOutputConsumer(outputDesc, type, typeEnv, std::move(outputs)); } IDqChannelStorage::TPtr TDqTaskRunnerExecutionContext::CreateChannelStorage(ui64 /* channelId */) const { @@ -410,22 +410,22 @@ public: TVector<IDqInput::TPtr> inputs{Reserve(std::max<ui64>(inputDesc.ChannelsSize(), 1))}; // 1 is for "source" type of input. - if (inputDesc.HasSource()) { - auto source = CreateDqSource(i, ProgramParsed.InputItemTypes[i], - memoryLimits.ChannelBufferSize, Settings.CollectProfileStats); - auto [_, inserted] = Sources.emplace(i, source); - Y_VERIFY(inserted); - inputs.emplace_back(source); - } else { - for (auto& inputChannelDesc : inputDesc.GetChannels()) { - ui64 channelId = inputChannelDesc.GetId(); - auto inputChannel = CreateDqInputChannel(channelId, ProgramParsed.InputItemTypes[i], + if (inputDesc.HasSource()) { + auto source = CreateDqSource(i, ProgramParsed.InputItemTypes[i], + memoryLimits.ChannelBufferSize, Settings.CollectProfileStats); + auto [_, inserted] = Sources.emplace(i, source); + Y_VERIFY(inserted); + inputs.emplace_back(source); + } else { + for (auto& inputChannelDesc : inputDesc.GetChannels()) { + ui64 channelId = inputChannelDesc.GetId(); + auto inputChannel = CreateDqInputChannel(channelId, ProgramParsed.InputItemTypes[i], memoryLimits.ChannelBufferSize, Settings.CollectProfileStats, typeEnv, holderFactory, inputChannelDesc.GetTransportVersion()); - auto ret = InputChannels.emplace(channelId, inputChannel); + auto ret = InputChannels.emplace(channelId, inputChannel); YQL_ENSURE(ret.second, "task: " << TaskId << ", duplicated input channelId: " << channelId); - inputs.emplace_back(inputChannel); - } + inputs.emplace_back(inputChannel); + } } auto entryNode = ProgramParsed.CompGraph->GetEntryPoint(i, true); @@ -444,15 +444,15 @@ public: } TVector<IDqOutput::TPtr> outputs{Reserve(std::max<ui64>(outputDesc.ChannelsSize(), 1))}; - if (outputDesc.HasSink()) { + if (outputDesc.HasSink()) { auto sink = CreateDqSink(i, ProgramParsed.OutputItemTypes[i], memoryLimits.ChannelBufferSize, Settings.CollectProfileStats); - auto [_, inserted] = Sinks.emplace(i, sink); - Y_VERIFY(inserted); - outputs.emplace_back(sink); - } else { - for (auto& outputChannelDesc : outputDesc.GetChannels()) { - ui64 channelId = outputChannelDesc.GetId(); + auto [_, inserted] = Sinks.emplace(i, sink); + Y_VERIFY(inserted); + outputs.emplace_back(sink); + } else { + for (auto& outputChannelDesc : outputDesc.GetChannels()) { + ui64 channelId = outputChannelDesc.GetId(); TDqOutputChannelSettings settings; settings.MaxStoredBytes = memoryLimits.ChannelBufferSize; @@ -461,21 +461,21 @@ public: settings.CollectProfileStats = Settings.CollectProfileStats; settings.AllowGeneratorsInUnboxedValues = Settings.AllowGeneratorsInUnboxedValues; - if (!outputChannelDesc.GetInMemory()) { + if (!outputChannelDesc.GetInMemory()) { settings.ChannelStorage = execCtx.CreateChannelStorage(channelId); - } + } auto outputChannel = CreateDqOutputChannel(channelId, ProgramParsed.OutputItemTypes[i], typeEnv, holderFactory, settings, LogFunc); - - auto ret = OutputChannels.emplace(channelId, outputChannel); + + auto ret = OutputChannels.emplace(channelId, outputChannel); YQL_ENSURE(ret.second, "task: " << TaskId << ", duplicated output channelId: " << channelId); - outputs.emplace_back(outputChannel); + outputs.emplace_back(outputChannel); } } outputConsumers[i] = execCtx.CreateOutputConsumer(outputDesc, ProgramParsed.OutputItemTypes[i], - Context.ApplyCtx, typeEnv, std::move(outputs)); + Context.ApplyCtx, typeEnv, std::move(outputs)); } if (outputConsumers.empty()) { @@ -564,24 +564,24 @@ public: return *ptr; } - IDqSource::TPtr GetSource(ui64 inputIndex) override { - auto ptr = Sources.FindPtr(inputIndex); + IDqSource::TPtr GetSource(ui64 inputIndex) override { + auto ptr = Sources.FindPtr(inputIndex); YQL_ENSURE(ptr, "task: " << TaskId << " does not have input index: " << inputIndex); - return *ptr; - } - + return *ptr; + } + IDqOutputChannel::TPtr GetOutputChannel(ui64 channelId) override { auto ptr = OutputChannels.FindPtr(channelId); YQL_ENSURE(ptr, "task: " << TaskId << " does not have output channelId: " << channelId); return *ptr; } - IDqSink::TPtr GetSink(ui64 outputIndex) override { - auto ptr = Sinks.FindPtr(outputIndex); + IDqSink::TPtr GetSink(ui64 outputIndex) override { + auto ptr = Sinks.FindPtr(outputIndex); YQL_ENSURE(ptr, "task: " << TaskId << " does not have output index: " << outputIndex); - return *ptr; - } - + return *ptr; + } + TGuard<NKikimr::NMiniKQL::TScopedAlloc> BindAllocator(TMaybe<ui64> memoryLimit) override { auto guard = Context.TypeEnv ? Context.TypeEnv->BindAllocator() : SelfTypeEnv->BindAllocator(); if (memoryLimit) { @@ -598,24 +598,24 @@ public: return Context.TypeEnv ? *Context.TypeEnv : *SelfTypeEnv; } - const NKikimr::NMiniKQL::THolderFactory& GetHolderFactory() const override { - return ProgramParsed.CompGraph->GetHolderFactory(); - } - - const THashMap<TString, TString>& GetSecureParams() const override { - return Settings.SecureParams; - } - - const THashMap<TString, TString>& GetTaskParams() const override { - return Settings.TaskParams; - } - - void UpdateStats() override { - if (Stats) { - Stats->RunStatusTimeMetrics.UpdateStatusTime(); - } - } - + const NKikimr::NMiniKQL::THolderFactory& GetHolderFactory() const override { + return ProgramParsed.CompGraph->GetHolderFactory(); + } + + const THashMap<TString, TString>& GetSecureParams() const override { + return Settings.SecureParams; + } + + const THashMap<TString, TString>& GetTaskParams() const override { + return Settings.TaskParams; + } + + void UpdateStats() override { + if (Stats) { + Stats->RunStatusTimeMetrics.UpdateStatusTime(); + } + } + const TDqTaskRunnerStats* GetStats() const override { return Stats.get(); } @@ -707,10 +707,10 @@ private: }; TProgramParsed ProgramParsed; - THashMap<ui64, IDqInputChannel::TPtr> InputChannels; // Channel id -> Channel - THashMap<ui64, IDqSource::TPtr> Sources; // Input index -> Source - THashMap<ui64, IDqOutputChannel::TPtr> OutputChannels; // Channel id -> Channel - THashMap<ui64, IDqSink::TPtr> Sinks; // Output index -> Sink + THashMap<ui64, IDqInputChannel::TPtr> InputChannels; // Channel id -> Channel + THashMap<ui64, IDqSource::TPtr> Sources; // Input index -> Source + THashMap<ui64, IDqOutputChannel::TPtr> OutputChannels; // Channel id -> Channel + THashMap<ui64, IDqSink::TPtr> Sinks; // Output index -> Sink IDqOutputConsumer::TPtr Output; NUdf::TUnboxedValue ResultStream; diff --git a/ydb/library/yql/dq/runtime/dq_tasks_runner.h b/ydb/library/yql/dq/runtime/dq_tasks_runner.h index a0cc816e91e..3c68ad13ecb 100644 --- a/ydb/library/yql/dq/runtime/dq_tasks_runner.h +++ b/ydb/library/yql/dq/runtime/dq_tasks_runner.h @@ -27,40 +27,40 @@ enum class ERunStatus : ui32 { PendingOutput }; -class TRunStatusTimeMetrics { -public: - void UpdateStatusTime(TDuration computeCpuTime = TDuration::Zero()) { - auto now = TInstant::Now(); - StatusTime[ui32(CurrentStatus)] += now - StatusStartTime - computeCpuTime; - StatusStartTime = now; - } - - void SetCurrentStatus(ERunStatus status, TDuration computeCpuTime) { - Y_VERIFY(ui32(status) < StatusesCount); - UpdateStatusTime(computeCpuTime); - CurrentStatus = status; - } - - TDuration operator[](ERunStatus status) const { - const ui32 index = ui32(status); - Y_VERIFY(index < StatusesCount); - return StatusTime[index]; - } - - void Load(ERunStatus status, TDuration d) { - const ui32 index = ui32(status); - Y_VERIFY(index < StatusesCount); - StatusTime[index] = d; - } - - static constexpr ui32 StatusesCount = 3; - -private: - TInstant StatusStartTime = TInstant::Now(); - ERunStatus CurrentStatus = ERunStatus::PendingInput; - TDuration StatusTime[StatusesCount]; -}; - +class TRunStatusTimeMetrics { +public: + void UpdateStatusTime(TDuration computeCpuTime = TDuration::Zero()) { + auto now = TInstant::Now(); + StatusTime[ui32(CurrentStatus)] += now - StatusStartTime - computeCpuTime; + StatusStartTime = now; + } + + void SetCurrentStatus(ERunStatus status, TDuration computeCpuTime) { + Y_VERIFY(ui32(status) < StatusesCount); + UpdateStatusTime(computeCpuTime); + CurrentStatus = status; + } + + TDuration operator[](ERunStatus status) const { + const ui32 index = ui32(status); + Y_VERIFY(index < StatusesCount); + return StatusTime[index]; + } + + void Load(ERunStatus status, TDuration d) { + const ui32 index = ui32(status); + Y_VERIFY(index < StatusesCount); + StatusTime[index] = d; + } + + static constexpr ui32 StatusesCount = 3; + +private: + TInstant StatusStartTime = TInstant::Now(); + ERunStatus CurrentStatus = ERunStatus::PendingInput; + TDuration StatusTime[StatusesCount]; +}; + struct TMkqlStat { NKikimr::NMiniKQL::TStatKey Key; i64 Value = 0; @@ -71,18 +71,18 @@ struct TDqTaskRunnerStats { TDuration BuildCpuTime; TInstant FinishTs; - TDuration ComputeCpuTime; - TRunStatusTimeMetrics RunStatusTimeMetrics; // ComputeCpuTime + RunStatusTimeMetrics == 100% time - + TDuration ComputeCpuTime; + TRunStatusTimeMetrics RunStatusTimeMetrics; // ComputeCpuTime + RunStatusTimeMetrics == 100% time + // profile stats TDuration WaitTime; // wall time of waiting for input, scans & output TDuration WaitOutputTime; NMonitoring::IHistogramCollectorPtr ComputeCpuTimeByRun; // in millis - THashMap<ui64, const TDqInputChannelStats*> InputChannels; // Channel id -> Channel stats - THashMap<ui64, const TDqSourceStats*> Sources; // Input index -> Source stats - THashMap<ui64, const TDqOutputChannelStats*> OutputChannels; // Channel id -> Channel stats + THashMap<ui64, const TDqInputChannelStats*> InputChannels; // Channel id -> Channel stats + THashMap<ui64, const TDqSourceStats*> Sources; // Input index -> Source stats + THashMap<ui64, const TDqOutputChannelStats*> OutputChannels; // Channel id -> Channel stats TVector<TMkqlStat> MkqlStats; }; @@ -105,7 +105,7 @@ public: virtual IDqOutputConsumer::TPtr CreateOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const NKikimr::NMiniKQL::TType* type, NUdf::IApplyContext* applyCtx, - const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) const = 0; + const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) const = 0; virtual IDqChannelStorage::TPtr CreateChannelStorage(ui64 channelId) const = 0; }; @@ -114,7 +114,7 @@ class TDqTaskRunnerExecutionContext : public IDqTaskRunnerExecutionContext { public: IDqOutputConsumer::TPtr CreateOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const NKikimr::NMiniKQL::TType* type, NUdf::IApplyContext* applyCtx, - const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) const override; + const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& outputs) const override; IDqChannelStorage::TPtr CreateChannelStorage(ui64 channelId) const override; }; @@ -126,7 +126,7 @@ struct TDqTaskRunnerSettings { bool AllowGeneratorsInUnboxedValues = true; TString OptLLVM = ""; THashMap<TString, TString> SecureParams; - THashMap<TString, TString> TaskParams; + THashMap<TString, TString> TaskParams; }; struct TDqTaskRunnerMemoryLimits { @@ -138,7 +138,7 @@ NUdf::TUnboxedValue DqBuildInputValue(const NDqProto::TTaskInput& inputDesc, con TVector<IDqInputChannel::TPtr>&& channels, const NKikimr::NMiniKQL::THolderFactory& holderFactory); IDqOutputConsumer::TPtr DqBuildOutputConsumer(const NDqProto::TTaskOutput& outputDesc, const NKikimr::NMiniKQL::TType* type, - const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& channels); + const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, TVector<IDqOutput::TPtr>&& channels); using TDqTaskRunnerParameterProvider = std::function< bool(std::string_view name, NKikimr::NMiniKQL::TType* type, const NKikimr::NMiniKQL::TTypeEnvironment& typeEnv, @@ -159,9 +159,9 @@ public: virtual bool HasEffects() const = 0; virtual IDqInputChannel::TPtr GetInputChannel(ui64 channelId) = 0; - virtual IDqSource::TPtr GetSource(ui64 inputIndex) = 0; + virtual IDqSource::TPtr GetSource(ui64 inputIndex) = 0; virtual IDqOutputChannel::TPtr GetOutputChannel(ui64 channelId) = 0; - virtual IDqSink::TPtr GetSink(ui64 outputIndex) = 0; + virtual IDqSink::TPtr GetSink(ui64 outputIndex) = 0; // if memoryLimit = Nothing() then don't set memory limit, use existing one (if any) // if memoryLimit = 0 then set unlimited @@ -169,12 +169,12 @@ public: virtual TGuard<NKikimr::NMiniKQL::TScopedAlloc> BindAllocator(TMaybe<ui64> memoryLimit = Nothing()) = 0; virtual bool IsAllocatorAttached() = 0; virtual const NKikimr::NMiniKQL::TTypeEnvironment& GetTypeEnv() const = 0; - virtual const NKikimr::NMiniKQL::THolderFactory& GetHolderFactory() const = 0; - - virtual const THashMap<TString, TString>& GetSecureParams() const = 0; - virtual const THashMap<TString, TString>& GetTaskParams() const = 0; + virtual const NKikimr::NMiniKQL::THolderFactory& GetHolderFactory() const = 0; - virtual void UpdateStats() = 0; + virtual const THashMap<TString, TString>& GetSecureParams() const = 0; + virtual const THashMap<TString, TString>& GetTaskParams() const = 0; + + virtual void UpdateStats() = 0; virtual const TDqTaskRunnerStats* GetStats() const = 0; [[nodiscard]] diff --git a/ydb/library/yql/dq/runtime/ya.make b/ydb/library/yql/dq/runtime/ya.make index 4cc8585431d..c60c6a0fa2f 100644 --- a/ydb/library/yql/dq/runtime/ya.make +++ b/ydb/library/yql/dq/runtime/ya.make @@ -26,8 +26,8 @@ SRCS( dq_input_producer.cpp dq_output_channel.cpp dq_output_consumer.cpp - dq_source.cpp - dq_sink.cpp + dq_source.cpp + dq_sink.cpp dq_tasks_runner.cpp dq_transport.cpp ) diff --git a/ydb/library/yql/dq/state/dq_state_load_plan.cpp b/ydb/library/yql/dq/state/dq_state_load_plan.cpp index 79000f07a1d..7e5a8b52b8a 100644 --- a/ydb/library/yql/dq/state/dq_state_load_plan.cpp +++ b/ydb/library/yql/dq/state/dq_state_load_plan.cpp @@ -1,251 +1,251 @@ -#include "dq_state_load_plan.h" - +#include "dq_state_load_plan.h" + #include <ydb/library/yql/core/issue/protos/issue_id.pb.h> #include <ydb/library/yql/providers/pq/proto/dq_io.pb.h> #include <ydb/library/yql/providers/pq/task_meta/task_meta.h> - -#include <util/digest/multi.h> -#include <util/generic/hash_set.h> -#include <util/string/builder.h> - -namespace NYql::NDq { -namespace { -// Pq specific -// TODO: rewrite this code to not depend on concrete providers (now it is only pq) -struct TTopic { - TString DatabaseId; - TString Database; - TString TopicPath; - - bool operator==(const TTopic& t) const { - return DatabaseId == t.DatabaseId && Database == t.Database && TopicPath == t.TopicPath; - } -}; - -struct TTopicHash { - size_t operator()(const TTopic& t) const { - return MultiHash(t.DatabaseId, t.Database, t.TopicPath); - } -}; - -struct TTaskSource { - ui64 TaskId = 0; - ui64 InputIndex = 0; - - bool operator==(const TTaskSource& t) const { - return TaskId == t.TaskId && InputIndex == t.InputIndex; - } -}; - -struct TTaskSourceHash { - size_t operator()(const TTaskSource& t) const { - return THash<std::tuple<ui64, ui64>>()(std::tie(t.TaskId, t.InputIndex)); - } -}; - -using TPartitionsMapping = THashMultiMap<ui64, TTaskSource>; // Task can have multiple sources for one partition, so multimap. - -struct TTopicMappingInfo { - TPartitionsMapping PartitionsMapping; - bool Used = false; -}; - -using TTopicsMapping = THashMap<TTopic, TTopicMappingInfo, TTopicHash>; - -// Error in case of normal mode and warning if force one is on. -#define ISSUE(stream) \ - AddForceWarningOrError(TStringBuilder() << stream, issues, force); \ - if (!force) { \ - result = false; \ - } \ - /**/ - -void AddForceWarningOrError(const TString& message, TIssues& issues, bool force) { - TIssue issue(message); - if (force) { - issue.SetCode(TIssuesIds::WARNING, TSeverityIds::S_WARNING); - } - issues.AddIssue(std::move(issue)); -} - -bool IsTopicInput(const NYql::NDqProto::TTaskInput& taskInput) { - return taskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource && taskInput.GetSource().GetType() == "PqSource"; -} - -bool ParseTopicInput( - const NYql::NDqProto::TDqTask& task, - const NYql::NDqProto::TTaskInput& taskInput, - ui64 inputIndex, - bool force, - bool isSourceGraph, - NYql::NPq::NProto::TDqPqTopicSource& srcDesc, - NPq::TTopicPartitionsSet& partitionsSet, - TIssues& issues) -{ - bool result = true; - const char* queryKindStr = isSourceGraph ? "source" : "destination"; - const google::protobuf::Any& settingsAny = taskInput.GetSource().GetSettings(); - if (!settingsAny.Is<NYql::NPq::NProto::TDqPqTopicSource>()) { - ISSUE("Can't read " << queryKindStr << " query params: input " << inputIndex << " of task " << task.GetId() << " has incorrect type"); - return false; - } - if (!settingsAny.UnpackTo(&srcDesc)) { - ISSUE("Can't read " << queryKindStr << " query params: failed to unpack input " << inputIndex << " of task " << task.GetId()); - return false; - } - - const TMaybe<NPq::TTopicPartitionsSet> foundPartitionsSet = NPq::GetTopicPartitionsSet(task.GetMeta()); - if (!foundPartitionsSet) { - ISSUE("Can't read " << queryKindStr << " query params: failed to load partitions of topic `" << srcDesc.GetTopicPath() << "` from input " << inputIndex << " of task " << task.GetId()); - return false; - } - partitionsSet = *foundPartitionsSet; - return true; -} - -void AddToMapping( - const NYql::NPq::NProto::TDqPqTopicSource& srcDesc, - const NPq::TTopicPartitionsSet& partitionsSet, - ui64 taskId, - ui64 inputIndex, - TTopicsMapping& mapping) -{ - TTopicMappingInfo& info = mapping[TTopic{srcDesc.GetDatabaseId(), srcDesc.GetDatabase(), srcDesc.GetTopicPath()}]; - ui64 currentPartition = partitionsSet.EachTopicPartitionGroupId; - do { - info.PartitionsMapping.emplace(currentPartition, TTaskSource{taskId, inputIndex}); - currentPartition += partitionsSet.DqPartitionsCount; - } while (currentPartition < partitionsSet.TopicPartitionsCount); -} - -void InitForeignPlan(const NYql::NDqProto::TDqTask& task, NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan) { - taskPlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN); - taskPlan.MutableProgram()->SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); - for (ui64 inputIndex = 0; inputIndex < task.InputsSize(); ++inputIndex) { - const NYql::NDqProto::TTaskInput& taskInput = task.GetInputs(inputIndex); - if (taskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource) { - NDqProto::NDqStateLoadPlan::TSourcePlan& sourcePlan = *taskPlan.AddSources(); - sourcePlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); - sourcePlan.SetInputIndex(inputIndex); - } - } - for (ui64 outputIndex = 0; outputIndex < task.OutputsSize(); ++outputIndex) { - const NYql::NDqProto::TTaskOutput& taskOutput = task.GetOutputs(outputIndex); - if (taskOutput.GetTypeCase() == NYql::NDqProto::TTaskOutput::kSink) { - NDqProto::NDqStateLoadPlan::TSinkPlan& sinkPlan = *taskPlan.AddSinks(); - sinkPlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); - sinkPlan.SetOutputIndex(outputIndex); - } - } -} - -NDqProto::NDqStateLoadPlan::TSourcePlan& FindSourcePlan(NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan, ui64 inputIndex) { - for (NDqProto::NDqStateLoadPlan::TSourcePlan& plan : *taskPlan.MutableSources()) { - if (plan.GetInputIndex() == inputIndex) { - return plan; - } - } - Y_FAIL("Source plan for input index %lu was not found", inputIndex); -} - -} // namespace - -bool MakeContinueFromStreamingOffsetsPlan( - const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& src, - const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& dst, - const bool force, - THashMap<ui64, NDqProto::NDqStateLoadPlan::TTaskPlan>& plan, - TIssues& issues) -{ - bool result = true; - // Build src mapping - TTopicsMapping srcMapping; - for (const NYql::NDqProto::TDqTask& task : src) { - for (ui64 inputIndex = 0; inputIndex < task.InputsSize(); ++inputIndex) { - const NYql::NDqProto::TTaskInput& taskInput = task.GetInputs(inputIndex); - if (IsTopicInput(taskInput)) { - NYql::NPq::NProto::TDqPqTopicSource srcDesc; - NPq::TTopicPartitionsSet partitionsSet; - if (!ParseTopicInput(task, taskInput, inputIndex, force, true, srcDesc, partitionsSet, issues)) { - if (!force) { - result = false; - } - continue; - } - - AddToMapping(srcDesc, partitionsSet, task.GetId(), inputIndex, srcMapping); - } - } - } - - // Watch dst query and build plan - for (const NYql::NDqProto::TDqTask& task : dst) { - NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan = plan[task.GetId()]; - taskPlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); // default if no topic sources - bool foreignStatePlanInited = false; - for (ui64 inputIndex = 0; inputIndex < task.InputsSize(); ++inputIndex) { - const NYql::NDqProto::TTaskInput& taskInput = task.GetInputs(inputIndex); - if (IsTopicInput(taskInput)) { - NYql::NPq::NProto::TDqPqTopicSource srcDesc; - NPq::TTopicPartitionsSet partitionsSet; - if (!ParseTopicInput(task, taskInput, inputIndex, force, false, srcDesc, partitionsSet, issues)) { - if (!force) { - result = false; - } - continue; - } - const auto mappingInfoIt = srcMapping.find(TTopic{srcDesc.GetDatabaseId(), srcDesc.GetDatabase(), srcDesc.GetTopicPath()}); - if (mappingInfoIt == srcMapping.end()) { - ISSUE("Topic `" << srcDesc.GetTopicPath() << "` is not found in previous query" << (force ? ". Query will use fresh offsets for its partitions" : ". Use force mode to ignore this issue")); - continue; - } - TTopicMappingInfo& mappingInfo = mappingInfoIt->second; - mappingInfo.Used = true; - - THashSet<TTaskSource, TTaskSourceHash> tasksSet; - - // Process all partitions - ui64 currentPartition = partitionsSet.EachTopicPartitionGroupId; - do { - auto [taskBegin, taskEnd] = mappingInfo.PartitionsMapping.equal_range(currentPartition); - if (taskBegin == taskEnd) { - // Normal case. Topic was extended. Print warning and continue. - TIssue issue(TStringBuilder() << "Topic `" << srcDesc.GetTopicPath() << "` partition " << currentPartition << " is not found in previous query. Query will use fresh offsets for it"); - issue.SetCode(TIssuesIds::WARNING, TSeverityIds::S_WARNING); - issues.AddIssue(std::move(issue)); - } else { - if (std::distance(taskBegin, taskEnd) > 1) { - ISSUE("Topic `" << srcDesc.GetTopicPath() << "` partition " << currentPartition << " has ambiguous offsets source in previous query checkpoint" << (force ? ". Query will use minimum offset to avoid skipping data" : ". Use force mode to ignore this issue")); - } - for (; taskBegin != taskEnd; ++taskBegin) { - tasksSet.insert(taskBegin->second); - } - } - currentPartition += partitionsSet.DqPartitionsCount; - } while (currentPartition < partitionsSet.TopicPartitionsCount); - - if (!tasksSet.empty()) { - if (!foreignStatePlanInited) { - foreignStatePlanInited = true; - InitForeignPlan(task, taskPlan); - } - NDqProto::NDqStateLoadPlan::TSourcePlan& sourcePlan = FindSourcePlan(taskPlan, inputIndex); - sourcePlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN); - for (const TTaskSource& taskSource : tasksSet) { - NDqProto::NDqStateLoadPlan::TSourcePlan::TForeignTaskSource& taskSourceProto = *sourcePlan.AddForeignTasksSources(); - taskSourceProto.SetTaskId(taskSource.TaskId); - taskSourceProto.SetInputIndex(taskSource.InputIndex); - } - } - } - } - } - for (const auto& [topic, mappingInfo] : srcMapping) { - if (!mappingInfo.Used) { - ISSUE("Topic `" << topic.TopicPath << "` is read in previous query but is not read in new query" << (force ? ". Reading offsets will be lost in next checkpoint" : ". Use force mode to ignore this issue")); - } - } - return result; -} - -} // namespace NYql::NDq + +#include <util/digest/multi.h> +#include <util/generic/hash_set.h> +#include <util/string/builder.h> + +namespace NYql::NDq { +namespace { +// Pq specific +// TODO: rewrite this code to not depend on concrete providers (now it is only pq) +struct TTopic { + TString DatabaseId; + TString Database; + TString TopicPath; + + bool operator==(const TTopic& t) const { + return DatabaseId == t.DatabaseId && Database == t.Database && TopicPath == t.TopicPath; + } +}; + +struct TTopicHash { + size_t operator()(const TTopic& t) const { + return MultiHash(t.DatabaseId, t.Database, t.TopicPath); + } +}; + +struct TTaskSource { + ui64 TaskId = 0; + ui64 InputIndex = 0; + + bool operator==(const TTaskSource& t) const { + return TaskId == t.TaskId && InputIndex == t.InputIndex; + } +}; + +struct TTaskSourceHash { + size_t operator()(const TTaskSource& t) const { + return THash<std::tuple<ui64, ui64>>()(std::tie(t.TaskId, t.InputIndex)); + } +}; + +using TPartitionsMapping = THashMultiMap<ui64, TTaskSource>; // Task can have multiple sources for one partition, so multimap. + +struct TTopicMappingInfo { + TPartitionsMapping PartitionsMapping; + bool Used = false; +}; + +using TTopicsMapping = THashMap<TTopic, TTopicMappingInfo, TTopicHash>; + +// Error in case of normal mode and warning if force one is on. +#define ISSUE(stream) \ + AddForceWarningOrError(TStringBuilder() << stream, issues, force); \ + if (!force) { \ + result = false; \ + } \ + /**/ + +void AddForceWarningOrError(const TString& message, TIssues& issues, bool force) { + TIssue issue(message); + if (force) { + issue.SetCode(TIssuesIds::WARNING, TSeverityIds::S_WARNING); + } + issues.AddIssue(std::move(issue)); +} + +bool IsTopicInput(const NYql::NDqProto::TTaskInput& taskInput) { + return taskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource && taskInput.GetSource().GetType() == "PqSource"; +} + +bool ParseTopicInput( + const NYql::NDqProto::TDqTask& task, + const NYql::NDqProto::TTaskInput& taskInput, + ui64 inputIndex, + bool force, + bool isSourceGraph, + NYql::NPq::NProto::TDqPqTopicSource& srcDesc, + NPq::TTopicPartitionsSet& partitionsSet, + TIssues& issues) +{ + bool result = true; + const char* queryKindStr = isSourceGraph ? "source" : "destination"; + const google::protobuf::Any& settingsAny = taskInput.GetSource().GetSettings(); + if (!settingsAny.Is<NYql::NPq::NProto::TDqPqTopicSource>()) { + ISSUE("Can't read " << queryKindStr << " query params: input " << inputIndex << " of task " << task.GetId() << " has incorrect type"); + return false; + } + if (!settingsAny.UnpackTo(&srcDesc)) { + ISSUE("Can't read " << queryKindStr << " query params: failed to unpack input " << inputIndex << " of task " << task.GetId()); + return false; + } + + const TMaybe<NPq::TTopicPartitionsSet> foundPartitionsSet = NPq::GetTopicPartitionsSet(task.GetMeta()); + if (!foundPartitionsSet) { + ISSUE("Can't read " << queryKindStr << " query params: failed to load partitions of topic `" << srcDesc.GetTopicPath() << "` from input " << inputIndex << " of task " << task.GetId()); + return false; + } + partitionsSet = *foundPartitionsSet; + return true; +} + +void AddToMapping( + const NYql::NPq::NProto::TDqPqTopicSource& srcDesc, + const NPq::TTopicPartitionsSet& partitionsSet, + ui64 taskId, + ui64 inputIndex, + TTopicsMapping& mapping) +{ + TTopicMappingInfo& info = mapping[TTopic{srcDesc.GetDatabaseId(), srcDesc.GetDatabase(), srcDesc.GetTopicPath()}]; + ui64 currentPartition = partitionsSet.EachTopicPartitionGroupId; + do { + info.PartitionsMapping.emplace(currentPartition, TTaskSource{taskId, inputIndex}); + currentPartition += partitionsSet.DqPartitionsCount; + } while (currentPartition < partitionsSet.TopicPartitionsCount); +} + +void InitForeignPlan(const NYql::NDqProto::TDqTask& task, NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan) { + taskPlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN); + taskPlan.MutableProgram()->SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); + for (ui64 inputIndex = 0; inputIndex < task.InputsSize(); ++inputIndex) { + const NYql::NDqProto::TTaskInput& taskInput = task.GetInputs(inputIndex); + if (taskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource) { + NDqProto::NDqStateLoadPlan::TSourcePlan& sourcePlan = *taskPlan.AddSources(); + sourcePlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); + sourcePlan.SetInputIndex(inputIndex); + } + } + for (ui64 outputIndex = 0; outputIndex < task.OutputsSize(); ++outputIndex) { + const NYql::NDqProto::TTaskOutput& taskOutput = task.GetOutputs(outputIndex); + if (taskOutput.GetTypeCase() == NYql::NDqProto::TTaskOutput::kSink) { + NDqProto::NDqStateLoadPlan::TSinkPlan& sinkPlan = *taskPlan.AddSinks(); + sinkPlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); + sinkPlan.SetOutputIndex(outputIndex); + } + } +} + +NDqProto::NDqStateLoadPlan::TSourcePlan& FindSourcePlan(NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan, ui64 inputIndex) { + for (NDqProto::NDqStateLoadPlan::TSourcePlan& plan : *taskPlan.MutableSources()) { + if (plan.GetInputIndex() == inputIndex) { + return plan; + } + } + Y_FAIL("Source plan for input index %lu was not found", inputIndex); +} + +} // namespace + +bool MakeContinueFromStreamingOffsetsPlan( + const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& src, + const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& dst, + const bool force, + THashMap<ui64, NDqProto::NDqStateLoadPlan::TTaskPlan>& plan, + TIssues& issues) +{ + bool result = true; + // Build src mapping + TTopicsMapping srcMapping; + for (const NYql::NDqProto::TDqTask& task : src) { + for (ui64 inputIndex = 0; inputIndex < task.InputsSize(); ++inputIndex) { + const NYql::NDqProto::TTaskInput& taskInput = task.GetInputs(inputIndex); + if (IsTopicInput(taskInput)) { + NYql::NPq::NProto::TDqPqTopicSource srcDesc; + NPq::TTopicPartitionsSet partitionsSet; + if (!ParseTopicInput(task, taskInput, inputIndex, force, true, srcDesc, partitionsSet, issues)) { + if (!force) { + result = false; + } + continue; + } + + AddToMapping(srcDesc, partitionsSet, task.GetId(), inputIndex, srcMapping); + } + } + } + + // Watch dst query and build plan + for (const NYql::NDqProto::TDqTask& task : dst) { + NDqProto::NDqStateLoadPlan::TTaskPlan& taskPlan = plan[task.GetId()]; + taskPlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY); // default if no topic sources + bool foreignStatePlanInited = false; + for (ui64 inputIndex = 0; inputIndex < task.InputsSize(); ++inputIndex) { + const NYql::NDqProto::TTaskInput& taskInput = task.GetInputs(inputIndex); + if (IsTopicInput(taskInput)) { + NYql::NPq::NProto::TDqPqTopicSource srcDesc; + NPq::TTopicPartitionsSet partitionsSet; + if (!ParseTopicInput(task, taskInput, inputIndex, force, false, srcDesc, partitionsSet, issues)) { + if (!force) { + result = false; + } + continue; + } + const auto mappingInfoIt = srcMapping.find(TTopic{srcDesc.GetDatabaseId(), srcDesc.GetDatabase(), srcDesc.GetTopicPath()}); + if (mappingInfoIt == srcMapping.end()) { + ISSUE("Topic `" << srcDesc.GetTopicPath() << "` is not found in previous query" << (force ? ". Query will use fresh offsets for its partitions" : ". Use force mode to ignore this issue")); + continue; + } + TTopicMappingInfo& mappingInfo = mappingInfoIt->second; + mappingInfo.Used = true; + + THashSet<TTaskSource, TTaskSourceHash> tasksSet; + + // Process all partitions + ui64 currentPartition = partitionsSet.EachTopicPartitionGroupId; + do { + auto [taskBegin, taskEnd] = mappingInfo.PartitionsMapping.equal_range(currentPartition); + if (taskBegin == taskEnd) { + // Normal case. Topic was extended. Print warning and continue. + TIssue issue(TStringBuilder() << "Topic `" << srcDesc.GetTopicPath() << "` partition " << currentPartition << " is not found in previous query. Query will use fresh offsets for it"); + issue.SetCode(TIssuesIds::WARNING, TSeverityIds::S_WARNING); + issues.AddIssue(std::move(issue)); + } else { + if (std::distance(taskBegin, taskEnd) > 1) { + ISSUE("Topic `" << srcDesc.GetTopicPath() << "` partition " << currentPartition << " has ambiguous offsets source in previous query checkpoint" << (force ? ". Query will use minimum offset to avoid skipping data" : ". Use force mode to ignore this issue")); + } + for (; taskBegin != taskEnd; ++taskBegin) { + tasksSet.insert(taskBegin->second); + } + } + currentPartition += partitionsSet.DqPartitionsCount; + } while (currentPartition < partitionsSet.TopicPartitionsCount); + + if (!tasksSet.empty()) { + if (!foreignStatePlanInited) { + foreignStatePlanInited = true; + InitForeignPlan(task, taskPlan); + } + NDqProto::NDqStateLoadPlan::TSourcePlan& sourcePlan = FindSourcePlan(taskPlan, inputIndex); + sourcePlan.SetStateType(NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN); + for (const TTaskSource& taskSource : tasksSet) { + NDqProto::NDqStateLoadPlan::TSourcePlan::TForeignTaskSource& taskSourceProto = *sourcePlan.AddForeignTasksSources(); + taskSourceProto.SetTaskId(taskSource.TaskId); + taskSourceProto.SetInputIndex(taskSource.InputIndex); + } + } + } + } + } + for (const auto& [topic, mappingInfo] : srcMapping) { + if (!mappingInfo.Used) { + ISSUE("Topic `" << topic.TopicPath << "` is read in previous query but is not read in new query" << (force ? ". Reading offsets will be lost in next checkpoint" : ". Use force mode to ignore this issue")); + } + } + return result; +} + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/state/dq_state_load_plan.h b/ydb/library/yql/dq/state/dq_state_load_plan.h index 93bc777dd54..92489e5fd4e 100644 --- a/ydb/library/yql/dq/state/dq_state_load_plan.h +++ b/ydb/library/yql/dq/state/dq_state_load_plan.h @@ -1,14 +1,14 @@ -#pragma once -#include <ydb/library/yql/public/issue/yql_issue.h> - +#pragma once +#include <ydb/library/yql/public/issue/yql_issue.h> + #include <ydb/library/yql/dq/proto/dq_state_load_plan.pb.h> #include <ydb/library/yql/dq/proto/dq_tasks.pb.h> - -#include <util/generic/hash.h> - -namespace NYql::NDq { - -// Make plan for loading streaming offsets from an old graph. -bool MakeContinueFromStreamingOffsetsPlan(const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& src, const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& dst, bool force, THashMap<ui64, NDqProto::NDqStateLoadPlan::TTaskPlan>& plan, TIssues& issues); - -} // namespace NYql::NDq + +#include <util/generic/hash.h> + +namespace NYql::NDq { + +// Make plan for loading streaming offsets from an old graph. +bool MakeContinueFromStreamingOffsetsPlan(const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& src, const google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask>& dst, bool force, THashMap<ui64, NDqProto::NDqStateLoadPlan::TTaskPlan>& plan, TIssues& issues); + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/state/ut/dq_state_load_plan_ut.cpp b/ydb/library/yql/dq/state/ut/dq_state_load_plan_ut.cpp index c690578f9c6..ddff55db4c6 100644 --- a/ydb/library/yql/dq/state/ut/dq_state_load_plan_ut.cpp +++ b/ydb/library/yql/dq/state/ut/dq_state_load_plan_ut.cpp @@ -1,448 +1,448 @@ -#include "dq_state_load_plan.h" - +#include "dq_state_load_plan.h" + #include <ydb/library/yql/providers/dq/api/protos/service.pb.h> #include <ydb/library/yql/providers/pq/proto/dq_io.pb.h> #include <ydb/library/yql/providers/pq/proto/dq_task_params.pb.h> #include <ydb/library/yql/providers/pq/task_meta/task_meta.h> - -#include <library/cpp/testing/unittest/registar.h> - -namespace NYql::NDq { - -namespace { - -struct TGraphBuilder; -struct TTaskBuilder; - -struct TTaskInputBuilder { - TTaskBuilder* Parent; - NYql::NDqProto::TTaskInput* In; - - TTaskInputBuilder& Channel() { - In->AddChannels(); - In->MutableUnionAll(); - return *this; - } - - TTaskInputBuilder& Source() { - In->MutableSource()->SetType("Unknown"); - return *this; - } - - TTaskInputBuilder& TopicSource(const TString& topic, ui64 partitionsCount, ui64 dqPartitionsCount, ui64 eachPartition); - - TTaskBuilder& Build() { - return *Parent; - } -}; - -struct TTaskOutputBuilder { - TTaskBuilder* Parent; - NYql::NDqProto::TTaskOutput* Out; - - TTaskOutputBuilder& Channel() { - Out->AddChannels(); - Out->MutableBroadcast(); - return *this; - } - - TTaskOutputBuilder& Sink() { - Out->MutableSink(); - return *this; - } - - TTaskBuilder& Build() { - return *Parent; - } -}; - -struct TTaskBuilder { - TGraphBuilder* Parent; - NYql::NDqProto::TDqTask* Task; - - TTaskInputBuilder Input() { - return TTaskInputBuilder{this, Task->AddInputs()}; - } - - TTaskOutputBuilder Output() { - return TTaskOutputBuilder{this, Task->AddOutputs()}; - } - - TGraphBuilder& Build() { - return *Parent; - } -}; - -struct TGraphBuilder { - google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask> Graph; - - TTaskBuilder Task(ui64 id = 0) { - auto* task = Graph.Add(); - task->SetId(id ? id : Graph.size()); - return TTaskBuilder{this, task}; - } - - google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask> Build() { - return std::move(Graph); - } -}; - -TTaskInputBuilder& TTaskInputBuilder::TopicSource(const TString& topic, ui64 partitionsCount, ui64 dqPartitionsCount, ui64 eachPartition) { - auto* src = In->MutableSource(); - src->SetType("PqSource"); - - NYql::NPq::NProto::TDqPqTopicSource topicSrcSettings; - topicSrcSettings.SetDatabase("DB"); - topicSrcSettings.SetDatabaseId("DBID"); - topicSrcSettings.SetTopicPath(topic); - src->MutableSettings()->PackFrom(topicSrcSettings); - - NYql::NPq::NProto::TDqReadTaskParams readTaskParams; - auto* part = readTaskParams.MutablePartitioningParams(); - part->SetTopicPartitionsCount(partitionsCount); - part->SetDqPartitionsCount(dqPartitionsCount); - part->SetEachTopicPartitionGroupId(eachPartition); - TString readTaskParamsBytes; - UNIT_ASSERT(readTaskParams.SerializeToString(&readTaskParamsBytes)); - Yql::DqsProto::TTaskMeta meta; - (*meta.MutableTaskParams())["pq"] = readTaskParamsBytes; - Parent->Task->MutableMeta()->PackFrom(meta); - return *this; -} - -ui64 SourcesCount(const NYql::NDqProto::TDqTask& task) { - ui64 cnt = 0; - for (const auto& input : task.GetInputs()) { - if (input.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource) { - ++cnt; - } - } - return cnt; -} - -ui64 SinksCount(const NYql::NDqProto::TDqTask& task) { - ui64 cnt = 0; - for (const auto& output : task.GetOutputs()) { - if (output.GetTypeCase() == NYql::NDqProto::TTaskOutput::kSink) { - ++cnt; - } - } - return cnt; -} - -struct TTestCase : public NUnitTest::TBaseTestCase { - TGraphBuilder SrcGraph; - TGraphBuilder DstGraph; - THashMap<ui64, NDqProto::NDqStateLoadPlan::TTaskPlan> Plan; - TIssues Issues; - - bool MakePlan(bool force) { - Plan.clear(); - Issues.Clear(); - const bool result = MakeContinueFromStreamingOffsetsPlan(SrcGraph.Graph, DstGraph.Graph, force, Plan, Issues); - if (result) { - ValidatePlan(); - } else { - UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); - } - return result; - } - - void SwapGraphs() { - SrcGraph.Graph.Swap(&DstGraph.Graph); - } - - const NYql::NDqProto::TDqTask& FindSrcTask(ui64 taskId) const { - for (const auto& task : SrcGraph.Graph) { - if (task.GetId() == taskId) { - return task; - } - } - UNIT_ASSERT_C(false, "Task " << taskId << " was not found in src graph"); - // Make compiler happy - return SrcGraph.Graph.Get(42); - } - - void ValidatePlan() const { - UNIT_ASSERT_VALUES_EQUAL(Plan.size(), DstGraph.Graph.size()); - for (const auto& task : DstGraph.Graph) { - const auto taskPlanIt = Plan.find(task.GetId()); - UNIT_ASSERT_C(taskPlanIt != Plan.end(), "Task " << task.GetId() << " was not found in plan"); - const auto& taskPlan = taskPlanIt->second; - UNIT_ASSERT_C(taskPlan.GetStateType() != NDqProto::NDqStateLoadPlan::STATE_TYPE_UNSPECIFIED, "Task " << task.GetId() << " plan: " << taskPlan); - if (taskPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY) { - UNIT_ASSERT_C(!taskPlan.HasProgram(), "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SourcesSize(), 0, "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SinksSize(), 0, "Task " << task.GetId() << " plan: " << taskPlan); - } else { - UNIT_ASSERT_C(taskPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(taskPlan.HasProgram(), "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(taskPlan.GetProgram().GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SourcesSize(), SourcesCount(task), "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SinksSize(), SinksCount(task), "Task " << task.GetId() << " plan: " << taskPlan); - for (const auto& sourcePlan : taskPlan.GetSources()) { - UNIT_ASSERT_C(sourcePlan.GetStateType() != NDqProto::NDqStateLoadPlan::STATE_TYPE_UNSPECIFIED, "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY || sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(sourcePlan.GetInputIndex() < task.InputsSize(), "Task " << task.GetId() << " plan: " << taskPlan); - const auto& taskInput = task.GetInputs(sourcePlan.GetInputIndex()); - UNIT_ASSERT_C(taskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource, "Task " << task.GetId() << " plan: " << taskPlan); - // State type is foreign => source type is pq - UNIT_ASSERT_C(sourcePlan.GetStateType() != NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN || taskInput.GetSource().GetType() == "PqSource", "Task " << task.GetId() << " plan: " << taskPlan << ". Task input: " << taskInput); - if (sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { - UNIT_ASSERT_C(sourcePlan.ForeignTasksSourcesSize() > 0, "Task " << task.GetId() << " plan: " << taskPlan); - const TMaybe<NPq::TTopicPartitionsSet> partitionsSet = NPq::GetTopicPartitionsSet(task.GetMeta()); - UNIT_ASSERT_C(partitionsSet, "Task " << task.GetId() << " plan: " << taskPlan); - for (const auto& taskSource : sourcePlan.GetForeignTasksSources()) { - const auto& srcTask = FindSrcTask(taskSource.GetTaskId()); // with assertion - UNIT_ASSERT_C(taskSource.GetInputIndex() < srcTask.InputsSize(), "Task " << srcTask.GetId() << " plan: " << taskPlan); - const auto& srcTaskInput = srcTask.GetInputs(taskSource.GetInputIndex()); - UNIT_ASSERT_C(srcTaskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource, "Task " << srcTask.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(srcTaskInput.GetSource().GetType() == "PqSource", "Task " << srcTask.GetId() << " plan: " << taskPlan); - const TMaybe<NPq::TTopicPartitionsSet> srcTaskPartitionsSet = NPq::GetTopicPartitionsSet(task.GetMeta()); - UNIT_ASSERT_C(srcTaskPartitionsSet, "Task " << srcTask.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(partitionsSet->Intersects(*srcTaskPartitionsSet), "Task " << srcTask.GetId() << " plan: " << taskPlan); - } - } - } - for (const auto& sinkPlan : taskPlan.GetSinks()) { - UNIT_ASSERT_C(sinkPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Task " << task.GetId() << " plan: " << taskPlan); - UNIT_ASSERT_C(sinkPlan.GetOutputIndex() < task.OutputsSize(), "Task " << task.GetId() << " plan: " << taskPlan); - const auto& taskOutput = task.GetOutputs(sinkPlan.GetOutputIndex()); - UNIT_ASSERT_C(taskOutput.GetTypeCase() == NYql::NDqProto::TTaskOutput::kSink, "Task " << task.GetId() << " plan: " << taskPlan); - } - } - } - } - - void AssertTaskPlanIsEmpty(ui64 taskId) const { - const auto taskPlanIt = Plan.find(taskId); - UNIT_ASSERT_C(taskPlanIt != Plan.end(), "Task " << taskId << " was not found in plan"); - UNIT_ASSERT_C(taskPlanIt->second.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, taskPlanIt->second); - } - - void AssertTaskPlanSourceHasSourceTask(ui64 taskId, ui64 sourceIndex, ui64 srcTaskId, ui64 srcInputIndex) const { - const auto taskPlanIt = Plan.find(taskId); - UNIT_ASSERT_C(taskPlanIt != Plan.end(), "Task " << taskId << " was not found in plan"); - const auto& taskPlan = taskPlanIt->second; - UNIT_ASSERT_C(taskPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, taskPlanIt->second); - for (const auto& sourcePlan : taskPlan.GetSources()) { - if (sourcePlan.GetInputIndex() == sourceIndex) { - for (const auto& foreignTaskSource : sourcePlan.GetForeignTasksSources()) { - if (foreignTaskSource.GetTaskId() == srcTaskId) { - UNIT_ASSERT_VALUES_EQUAL_C(foreignTaskSource.GetInputIndex(), srcInputIndex, foreignTaskSource); - return; - } - } - UNIT_ASSERT_C(false, "Source task " << srcTaskId << " was not found in source plan for index " << sourceIndex); - } - } - UNIT_ASSERT_C(false, "Source plan for index " << sourceIndex << " was not found"); - } - - TString IssuesStr() const { - return Issues.ToString(); - } -}; - -} // namespace - -Y_UNIT_TEST_SUITE_F(TContinueFromStreamingOffsetsPlanTest, TTestCase) { - Y_UNIT_TEST(Empty) { - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT(MakePlan(true)); - } - - Y_UNIT_TEST(OneToOneMapping) { - SrcGraph - .Task() - .Input().Channel().Build() - .Output().Channel().Build() - .Build() - .Task() - .Input().Channel().Build() - .Input().TopicSource("t", 3, 3, 0).Build(); - DstGraph - .Task() - .Input().TopicSource("t", 3, 3, 0).Build(); - - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - AssertTaskPlanSourceHasSourceTask(1, 0, 2, 1); - - SwapGraphs(); - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - AssertTaskPlanIsEmpty(1); - } - - Y_UNIT_TEST(DifferentPartitioning) { - SrcGraph - .Task() - .Input().Channel().Build() - .Input().TopicSource("t", 4, 1, 0).Build(); - DstGraph - .Task() - .Input().TopicSource("t", 4, 2, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 4, 2, 1).Build(); - - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - AssertTaskPlanSourceHasSourceTask(1, 0, 1, 1); - AssertTaskPlanSourceHasSourceTask(2, 0, 1, 1); - - SwapGraphs(); - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - AssertTaskPlanSourceHasSourceTask(1, 1, 1, 0); - AssertTaskPlanSourceHasSourceTask(1, 1, 2, 0); - } - - Y_UNIT_TEST(MultipleTopics) { - SrcGraph - .Task() - .Input().TopicSource("t", 1, 1, 0).Build() - .Build() - .Task() - .Input().TopicSource("p", 1, 1, 0).Build() - .Build(); - - DstGraph - .Task() - .Input().TopicSource("p", 1, 1, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 1, 1, 0).Build() - .Build(); - - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - AssertTaskPlanSourceHasSourceTask(1, 0, 2, 0); - AssertTaskPlanSourceHasSourceTask(2, 0, 1, 0); - - SwapGraphs(); - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - } - - Y_UNIT_TEST(AllTopicsMustBeUsedInNonForceMode) { - SrcGraph - .Task() - .Input().TopicSource("t", 1, 1, 0).Build() - .Build() - .Task() - .Input().TopicSource("p", 1, 1, 0).Build() - .Build(); - - DstGraph - .Task() - .Input().TopicSource("t", 1, 1, 0).Build() - .Build(); - - UNIT_ASSERT(!MakePlan(false)); - UNIT_ASSERT(MakePlan(true)); - - SwapGraphs(); - UNIT_ASSERT(!MakePlan(false)); - UNIT_ASSERT(MakePlan(true)); - AssertTaskPlanIsEmpty(2); - } - - Y_UNIT_TEST(NotMappedAllPartitionsIsOk) { - SrcGraph - .Task() - .Input().TopicSource("t", 5, 1, 0).Build() - .Build(); - - DstGraph - .Task() - .Input().TopicSource("t", 10, 2, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 10, 2, 1).Build() - .Build(); - - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); - AssertTaskPlanSourceHasSourceTask(1, 0, 1, 0); - AssertTaskPlanSourceHasSourceTask(2, 0, 1, 0); - - SwapGraphs(); - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - } - - Y_UNIT_TEST(ReadPartitionInSeveralPlacesIsOk) { - SrcGraph - .Task() - .Input().TopicSource("t", 5, 1, 0).Build() - .Build(); - - DstGraph - .Task() - .Input().TopicSource("t", 5, 1, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 5, 2, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 5, 2, 1).Build() - .Build(); - - UNIT_ASSERT(MakePlan(false)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); - - AssertTaskPlanSourceHasSourceTask(1, 0, 1, 0); - AssertTaskPlanSourceHasSourceTask(2, 0, 1, 0); - AssertTaskPlanSourceHasSourceTask(3, 0, 1, 0); - - SwapGraphs(); - UNIT_ASSERT(!MakePlan(false)); - } - - Y_UNIT_TEST(MapSeveralReadingsToOneIsAllowedOnlyInForceMode) { - SrcGraph - .Task() - .Input().TopicSource("t", 5, 1, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 5, 1, 0).Build() - .Build(); - - DstGraph - .Task() - .Input().TopicSource("t", 5, 1, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 5, 2, 0).Build() - .Build() - .Task() - .Input().TopicSource("t", 5, 2, 1).Build() - .Build(); - - UNIT_ASSERT(!MakePlan(false)); - UNIT_ASSERT(MakePlan(true)); - UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); - } -} - -} // namespace NYql::NDq + +#include <library/cpp/testing/unittest/registar.h> + +namespace NYql::NDq { + +namespace { + +struct TGraphBuilder; +struct TTaskBuilder; + +struct TTaskInputBuilder { + TTaskBuilder* Parent; + NYql::NDqProto::TTaskInput* In; + + TTaskInputBuilder& Channel() { + In->AddChannels(); + In->MutableUnionAll(); + return *this; + } + + TTaskInputBuilder& Source() { + In->MutableSource()->SetType("Unknown"); + return *this; + } + + TTaskInputBuilder& TopicSource(const TString& topic, ui64 partitionsCount, ui64 dqPartitionsCount, ui64 eachPartition); + + TTaskBuilder& Build() { + return *Parent; + } +}; + +struct TTaskOutputBuilder { + TTaskBuilder* Parent; + NYql::NDqProto::TTaskOutput* Out; + + TTaskOutputBuilder& Channel() { + Out->AddChannels(); + Out->MutableBroadcast(); + return *this; + } + + TTaskOutputBuilder& Sink() { + Out->MutableSink(); + return *this; + } + + TTaskBuilder& Build() { + return *Parent; + } +}; + +struct TTaskBuilder { + TGraphBuilder* Parent; + NYql::NDqProto::TDqTask* Task; + + TTaskInputBuilder Input() { + return TTaskInputBuilder{this, Task->AddInputs()}; + } + + TTaskOutputBuilder Output() { + return TTaskOutputBuilder{this, Task->AddOutputs()}; + } + + TGraphBuilder& Build() { + return *Parent; + } +}; + +struct TGraphBuilder { + google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask> Graph; + + TTaskBuilder Task(ui64 id = 0) { + auto* task = Graph.Add(); + task->SetId(id ? id : Graph.size()); + return TTaskBuilder{this, task}; + } + + google::protobuf::RepeatedPtrField<NYql::NDqProto::TDqTask> Build() { + return std::move(Graph); + } +}; + +TTaskInputBuilder& TTaskInputBuilder::TopicSource(const TString& topic, ui64 partitionsCount, ui64 dqPartitionsCount, ui64 eachPartition) { + auto* src = In->MutableSource(); + src->SetType("PqSource"); + + NYql::NPq::NProto::TDqPqTopicSource topicSrcSettings; + topicSrcSettings.SetDatabase("DB"); + topicSrcSettings.SetDatabaseId("DBID"); + topicSrcSettings.SetTopicPath(topic); + src->MutableSettings()->PackFrom(topicSrcSettings); + + NYql::NPq::NProto::TDqReadTaskParams readTaskParams; + auto* part = readTaskParams.MutablePartitioningParams(); + part->SetTopicPartitionsCount(partitionsCount); + part->SetDqPartitionsCount(dqPartitionsCount); + part->SetEachTopicPartitionGroupId(eachPartition); + TString readTaskParamsBytes; + UNIT_ASSERT(readTaskParams.SerializeToString(&readTaskParamsBytes)); + Yql::DqsProto::TTaskMeta meta; + (*meta.MutableTaskParams())["pq"] = readTaskParamsBytes; + Parent->Task->MutableMeta()->PackFrom(meta); + return *this; +} + +ui64 SourcesCount(const NYql::NDqProto::TDqTask& task) { + ui64 cnt = 0; + for (const auto& input : task.GetInputs()) { + if (input.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource) { + ++cnt; + } + } + return cnt; +} + +ui64 SinksCount(const NYql::NDqProto::TDqTask& task) { + ui64 cnt = 0; + for (const auto& output : task.GetOutputs()) { + if (output.GetTypeCase() == NYql::NDqProto::TTaskOutput::kSink) { + ++cnt; + } + } + return cnt; +} + +struct TTestCase : public NUnitTest::TBaseTestCase { + TGraphBuilder SrcGraph; + TGraphBuilder DstGraph; + THashMap<ui64, NDqProto::NDqStateLoadPlan::TTaskPlan> Plan; + TIssues Issues; + + bool MakePlan(bool force) { + Plan.clear(); + Issues.Clear(); + const bool result = MakeContinueFromStreamingOffsetsPlan(SrcGraph.Graph, DstGraph.Graph, force, Plan, Issues); + if (result) { + ValidatePlan(); + } else { + UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); + } + return result; + } + + void SwapGraphs() { + SrcGraph.Graph.Swap(&DstGraph.Graph); + } + + const NYql::NDqProto::TDqTask& FindSrcTask(ui64 taskId) const { + for (const auto& task : SrcGraph.Graph) { + if (task.GetId() == taskId) { + return task; + } + } + UNIT_ASSERT_C(false, "Task " << taskId << " was not found in src graph"); + // Make compiler happy + return SrcGraph.Graph.Get(42); + } + + void ValidatePlan() const { + UNIT_ASSERT_VALUES_EQUAL(Plan.size(), DstGraph.Graph.size()); + for (const auto& task : DstGraph.Graph) { + const auto taskPlanIt = Plan.find(task.GetId()); + UNIT_ASSERT_C(taskPlanIt != Plan.end(), "Task " << task.GetId() << " was not found in plan"); + const auto& taskPlan = taskPlanIt->second; + UNIT_ASSERT_C(taskPlan.GetStateType() != NDqProto::NDqStateLoadPlan::STATE_TYPE_UNSPECIFIED, "Task " << task.GetId() << " plan: " << taskPlan); + if (taskPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY) { + UNIT_ASSERT_C(!taskPlan.HasProgram(), "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SourcesSize(), 0, "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SinksSize(), 0, "Task " << task.GetId() << " plan: " << taskPlan); + } else { + UNIT_ASSERT_C(taskPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(taskPlan.HasProgram(), "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(taskPlan.GetProgram().GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SourcesSize(), SourcesCount(task), "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_VALUES_EQUAL_C(taskPlan.SinksSize(), SinksCount(task), "Task " << task.GetId() << " plan: " << taskPlan); + for (const auto& sourcePlan : taskPlan.GetSources()) { + UNIT_ASSERT_C(sourcePlan.GetStateType() != NDqProto::NDqStateLoadPlan::STATE_TYPE_UNSPECIFIED, "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY || sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(sourcePlan.GetInputIndex() < task.InputsSize(), "Task " << task.GetId() << " plan: " << taskPlan); + const auto& taskInput = task.GetInputs(sourcePlan.GetInputIndex()); + UNIT_ASSERT_C(taskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource, "Task " << task.GetId() << " plan: " << taskPlan); + // State type is foreign => source type is pq + UNIT_ASSERT_C(sourcePlan.GetStateType() != NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN || taskInput.GetSource().GetType() == "PqSource", "Task " << task.GetId() << " plan: " << taskPlan << ". Task input: " << taskInput); + if (sourcePlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN) { + UNIT_ASSERT_C(sourcePlan.ForeignTasksSourcesSize() > 0, "Task " << task.GetId() << " plan: " << taskPlan); + const TMaybe<NPq::TTopicPartitionsSet> partitionsSet = NPq::GetTopicPartitionsSet(task.GetMeta()); + UNIT_ASSERT_C(partitionsSet, "Task " << task.GetId() << " plan: " << taskPlan); + for (const auto& taskSource : sourcePlan.GetForeignTasksSources()) { + const auto& srcTask = FindSrcTask(taskSource.GetTaskId()); // with assertion + UNIT_ASSERT_C(taskSource.GetInputIndex() < srcTask.InputsSize(), "Task " << srcTask.GetId() << " plan: " << taskPlan); + const auto& srcTaskInput = srcTask.GetInputs(taskSource.GetInputIndex()); + UNIT_ASSERT_C(srcTaskInput.GetTypeCase() == NYql::NDqProto::TTaskInput::kSource, "Task " << srcTask.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(srcTaskInput.GetSource().GetType() == "PqSource", "Task " << srcTask.GetId() << " plan: " << taskPlan); + const TMaybe<NPq::TTopicPartitionsSet> srcTaskPartitionsSet = NPq::GetTopicPartitionsSet(task.GetMeta()); + UNIT_ASSERT_C(srcTaskPartitionsSet, "Task " << srcTask.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(partitionsSet->Intersects(*srcTaskPartitionsSet), "Task " << srcTask.GetId() << " plan: " << taskPlan); + } + } + } + for (const auto& sinkPlan : taskPlan.GetSinks()) { + UNIT_ASSERT_C(sinkPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, "Task " << task.GetId() << " plan: " << taskPlan); + UNIT_ASSERT_C(sinkPlan.GetOutputIndex() < task.OutputsSize(), "Task " << task.GetId() << " plan: " << taskPlan); + const auto& taskOutput = task.GetOutputs(sinkPlan.GetOutputIndex()); + UNIT_ASSERT_C(taskOutput.GetTypeCase() == NYql::NDqProto::TTaskOutput::kSink, "Task " << task.GetId() << " plan: " << taskPlan); + } + } + } + } + + void AssertTaskPlanIsEmpty(ui64 taskId) const { + const auto taskPlanIt = Plan.find(taskId); + UNIT_ASSERT_C(taskPlanIt != Plan.end(), "Task " << taskId << " was not found in plan"); + UNIT_ASSERT_C(taskPlanIt->second.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_EMPTY, taskPlanIt->second); + } + + void AssertTaskPlanSourceHasSourceTask(ui64 taskId, ui64 sourceIndex, ui64 srcTaskId, ui64 srcInputIndex) const { + const auto taskPlanIt = Plan.find(taskId); + UNIT_ASSERT_C(taskPlanIt != Plan.end(), "Task " << taskId << " was not found in plan"); + const auto& taskPlan = taskPlanIt->second; + UNIT_ASSERT_C(taskPlan.GetStateType() == NDqProto::NDqStateLoadPlan::STATE_TYPE_FOREIGN, taskPlanIt->second); + for (const auto& sourcePlan : taskPlan.GetSources()) { + if (sourcePlan.GetInputIndex() == sourceIndex) { + for (const auto& foreignTaskSource : sourcePlan.GetForeignTasksSources()) { + if (foreignTaskSource.GetTaskId() == srcTaskId) { + UNIT_ASSERT_VALUES_EQUAL_C(foreignTaskSource.GetInputIndex(), srcInputIndex, foreignTaskSource); + return; + } + } + UNIT_ASSERT_C(false, "Source task " << srcTaskId << " was not found in source plan for index " << sourceIndex); + } + } + UNIT_ASSERT_C(false, "Source plan for index " << sourceIndex << " was not found"); + } + + TString IssuesStr() const { + return Issues.ToString(); + } +}; + +} // namespace + +Y_UNIT_TEST_SUITE_F(TContinueFromStreamingOffsetsPlanTest, TTestCase) { + Y_UNIT_TEST(Empty) { + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT(MakePlan(true)); + } + + Y_UNIT_TEST(OneToOneMapping) { + SrcGraph + .Task() + .Input().Channel().Build() + .Output().Channel().Build() + .Build() + .Task() + .Input().Channel().Build() + .Input().TopicSource("t", 3, 3, 0).Build(); + DstGraph + .Task() + .Input().TopicSource("t", 3, 3, 0).Build(); + + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + AssertTaskPlanSourceHasSourceTask(1, 0, 2, 1); + + SwapGraphs(); + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + AssertTaskPlanIsEmpty(1); + } + + Y_UNIT_TEST(DifferentPartitioning) { + SrcGraph + .Task() + .Input().Channel().Build() + .Input().TopicSource("t", 4, 1, 0).Build(); + DstGraph + .Task() + .Input().TopicSource("t", 4, 2, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 4, 2, 1).Build(); + + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + AssertTaskPlanSourceHasSourceTask(1, 0, 1, 1); + AssertTaskPlanSourceHasSourceTask(2, 0, 1, 1); + + SwapGraphs(); + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + AssertTaskPlanSourceHasSourceTask(1, 1, 1, 0); + AssertTaskPlanSourceHasSourceTask(1, 1, 2, 0); + } + + Y_UNIT_TEST(MultipleTopics) { + SrcGraph + .Task() + .Input().TopicSource("t", 1, 1, 0).Build() + .Build() + .Task() + .Input().TopicSource("p", 1, 1, 0).Build() + .Build(); + + DstGraph + .Task() + .Input().TopicSource("p", 1, 1, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 1, 1, 0).Build() + .Build(); + + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + AssertTaskPlanSourceHasSourceTask(1, 0, 2, 0); + AssertTaskPlanSourceHasSourceTask(2, 0, 1, 0); + + SwapGraphs(); + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + } + + Y_UNIT_TEST(AllTopicsMustBeUsedInNonForceMode) { + SrcGraph + .Task() + .Input().TopicSource("t", 1, 1, 0).Build() + .Build() + .Task() + .Input().TopicSource("p", 1, 1, 0).Build() + .Build(); + + DstGraph + .Task() + .Input().TopicSource("t", 1, 1, 0).Build() + .Build(); + + UNIT_ASSERT(!MakePlan(false)); + UNIT_ASSERT(MakePlan(true)); + + SwapGraphs(); + UNIT_ASSERT(!MakePlan(false)); + UNIT_ASSERT(MakePlan(true)); + AssertTaskPlanIsEmpty(2); + } + + Y_UNIT_TEST(NotMappedAllPartitionsIsOk) { + SrcGraph + .Task() + .Input().TopicSource("t", 5, 1, 0).Build() + .Build(); + + DstGraph + .Task() + .Input().TopicSource("t", 10, 2, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 10, 2, 1).Build() + .Build(); + + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); + AssertTaskPlanSourceHasSourceTask(1, 0, 1, 0); + AssertTaskPlanSourceHasSourceTask(2, 0, 1, 0); + + SwapGraphs(); + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + } + + Y_UNIT_TEST(ReadPartitionInSeveralPlacesIsOk) { + SrcGraph + .Task() + .Input().TopicSource("t", 5, 1, 0).Build() + .Build(); + + DstGraph + .Task() + .Input().TopicSource("t", 5, 1, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 5, 2, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 5, 2, 1).Build() + .Build(); + + UNIT_ASSERT(MakePlan(false)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_VALUES_EQUAL(Issues.Size(), 0); + + AssertTaskPlanSourceHasSourceTask(1, 0, 1, 0); + AssertTaskPlanSourceHasSourceTask(2, 0, 1, 0); + AssertTaskPlanSourceHasSourceTask(3, 0, 1, 0); + + SwapGraphs(); + UNIT_ASSERT(!MakePlan(false)); + } + + Y_UNIT_TEST(MapSeveralReadingsToOneIsAllowedOnlyInForceMode) { + SrcGraph + .Task() + .Input().TopicSource("t", 5, 1, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 5, 1, 0).Build() + .Build(); + + DstGraph + .Task() + .Input().TopicSource("t", 5, 1, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 5, 2, 0).Build() + .Build() + .Task() + .Input().TopicSource("t", 5, 2, 1).Build() + .Build(); + + UNIT_ASSERT(!MakePlan(false)); + UNIT_ASSERT(MakePlan(true)); + UNIT_ASSERT_UNEQUAL(Issues.Size(), 0); + } +} + +} // namespace NYql::NDq diff --git a/ydb/library/yql/dq/state/ut/ya.make b/ydb/library/yql/dq/state/ut/ya.make index a5dd59b32ca..773a926a9eb 100644 --- a/ydb/library/yql/dq/state/ut/ya.make +++ b/ydb/library/yql/dq/state/ut/ya.make @@ -1,14 +1,14 @@ UNITTEST_FOR(ydb/library/yql/dq/state) - -OWNER( - g:yql - g:yql_ydb_core -) - -SRCS( - dq_state_load_plan_ut.cpp -) - -YQL_LAST_ABI_VERSION() - -END() + +OWNER( + g:yql + g:yql_ydb_core +) + +SRCS( + dq_state_load_plan_ut.cpp +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/library/yql/dq/state/ya.make b/ydb/library/yql/dq/state/ya.make index f92d07a4612..19ee666ed8c 100644 --- a/ydb/library/yql/dq/state/ya.make +++ b/ydb/library/yql/dq/state/ya.make @@ -1,24 +1,24 @@ -LIBRARY() - -OWNER( - g:yql - g:yql_ydb_core -) - -PEERDIR( - ydb/library/yql/public/issue +LIBRARY() + +OWNER( + g:yql + g:yql_ydb_core +) + +PEERDIR( + ydb/library/yql/public/issue ydb/library/yql/core/issue/protos ydb/library/yql/dq/proto ydb/library/yql/providers/pq/proto ydb/library/yql/providers/pq/task_meta -) - -SRCS( - dq_state_load_plan.cpp -) - -END() - -RECURSE_FOR_TESTS( - ut -) +) + +SRCS( + dq_state_load_plan.cpp +) + +END() + +RECURSE_FOR_TESTS( + ut +) diff --git a/ydb/library/yql/dq/tasks/dq_tasks_graph.h b/ydb/library/yql/dq/tasks/dq_tasks_graph.h index e5c772310bf..46a4ac8e0e7 100644 --- a/ydb/library/yql/dq/tasks/dq_tasks_graph.h +++ b/ydb/library/yql/dq/tasks/dq_tasks_graph.h @@ -87,7 +87,7 @@ struct TChannel { ui64 DstTask = 0; ui32 DstInputIndex = 0; bool InMemory = true; - NDqProto::ECheckpointingMode CheckpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; + NDqProto::ECheckpointingMode CheckpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; }; using TChannelList = TVector<ui64>; @@ -120,8 +120,8 @@ template <class TInputMeta> struct TTaskInput { std::variant<std::monostate, TMergeTaskInput> ConnectionInfo; TChannelList Channels; - TMaybe<::google::protobuf::Any> SourceSettings; - TString SourceType; + TMaybe<::google::protobuf::Any> SourceSettings; + TString SourceType; TInputMeta Meta; TTaskInputType Type() const { @@ -136,7 +136,7 @@ struct TTaskOutputType { HashPartition, Broadcast, Effects, - Sink, + Sink, COMMON_TASK_OUTPUT_TYPE_END }; }; @@ -147,8 +147,8 @@ struct TTaskOutput { NYql::NDq::TChannelList Channels; TVector<TString> KeyColumns; ui32 PartitionsCount = 0; - TMaybe<::google::protobuf::Any> SinkSettings; - TString SinkType; + TMaybe<::google::protobuf::Any> SinkSettings; + TString SinkType; TOutputMeta Meta; }; @@ -175,7 +175,7 @@ struct TTask { TVector<TOutputType> Outputs; NActors::TActorId ComputeActorId; TTaskMeta Meta; - NDqProto::ECheckpointingMode CheckpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; + NDqProto::ECheckpointingMode CheckpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; TTransform OutputTransform; }; diff --git a/ydb/library/yql/dq/type_ann/dq_type_ann.cpp b/ydb/library/yql/dq/type_ann/dq_type_ann.cpp index fcbd3bb9ecf..975974a3149 100644 --- a/ydb/library/yql/dq/type_ann/dq_type_ann.cpp +++ b/ydb/library/yql/dq/type_ann/dq_type_ann.cpp @@ -716,59 +716,59 @@ TStatus AnnotateDqCrossJoin(const TExprNode::TPtr& input, TExprContext& ctx) { return TStatus::Ok; } -TStatus AnnotateDqSource(const TExprNode::TPtr& input, TExprContext& ctx) { - if (!EnsureArgsCount(*input, 2, ctx)) { - return TStatus::Error; - } - - const TExprNode* dataSourceChild = input->Child(TDqSource::idx_DataSource); - if (!EnsureDataSource(*dataSourceChild, ctx)) { - return TStatus::Error; - } - - const TExprNode* settingsChild = input->Child(TDqSource::idx_Settings); - if (!EnsureCallable(*settingsChild, ctx)) { - return TStatus::Error; - } - - input->SetTypeAnn(settingsChild->GetTypeAnn()); - return TStatus::Ok; -} - -TStatus AnnotateDqSink(const TExprNode::TPtr& input, TExprContext& ctx) { - if (!EnsureArgsCount(*input, 3, ctx)) { - return TStatus::Error; - } - - const TExprNode* dataSinkChild = input->Child(TDqSink::idx_DataSink); - if (!EnsureDataSink(*dataSinkChild, ctx)) { - return TStatus::Error; - } - - const TExprNode* settingsChild = input->Child(TDqSink::idx_Settings); - if (!EnsureCallable(*settingsChild, ctx)) { - return TStatus::Error; - } - - const TExprNode* indexChild = input->Child(TDqSink::idx_Index); - if (!EnsureAtom(*indexChild, ctx)) { - return TStatus::Error; - } - - input->SetTypeAnn(settingsChild->GetTypeAnn()); - return TStatus::Ok; -} - -TStatus AnnotateDqQuery(const TExprNode::TPtr& input, TExprContext& ctx) { - if (!EnsureArgsCount(*input, 2, ctx)) { - return TStatus::Error; - } - - TDqQuery query(input); - input->SetTypeAnn(query.World().Ref().GetTypeAnn()); - return TStatus::Ok; -} - +TStatus AnnotateDqSource(const TExprNode::TPtr& input, TExprContext& ctx) { + if (!EnsureArgsCount(*input, 2, ctx)) { + return TStatus::Error; + } + + const TExprNode* dataSourceChild = input->Child(TDqSource::idx_DataSource); + if (!EnsureDataSource(*dataSourceChild, ctx)) { + return TStatus::Error; + } + + const TExprNode* settingsChild = input->Child(TDqSource::idx_Settings); + if (!EnsureCallable(*settingsChild, ctx)) { + return TStatus::Error; + } + + input->SetTypeAnn(settingsChild->GetTypeAnn()); + return TStatus::Ok; +} + +TStatus AnnotateDqSink(const TExprNode::TPtr& input, TExprContext& ctx) { + if (!EnsureArgsCount(*input, 3, ctx)) { + return TStatus::Error; + } + + const TExprNode* dataSinkChild = input->Child(TDqSink::idx_DataSink); + if (!EnsureDataSink(*dataSinkChild, ctx)) { + return TStatus::Error; + } + + const TExprNode* settingsChild = input->Child(TDqSink::idx_Settings); + if (!EnsureCallable(*settingsChild, ctx)) { + return TStatus::Error; + } + + const TExprNode* indexChild = input->Child(TDqSink::idx_Index); + if (!EnsureAtom(*indexChild, ctx)) { + return TStatus::Error; + } + + input->SetTypeAnn(settingsChild->GetTypeAnn()); + return TStatus::Ok; +} + +TStatus AnnotateDqQuery(const TExprNode::TPtr& input, TExprContext& ctx) { + if (!EnsureArgsCount(*input, 2, ctx)) { + return TStatus::Error; + } + + TDqQuery query(input); + input->SetTypeAnn(query.World().Ref().GetTypeAnn()); + return TStatus::Ok; +} + THolder<IGraphTransformer> CreateDqTypeAnnotationTransformer(TTypeAnnotationContext& typesCtx) { auto coreTransformer = CreateExtCallableTypeAnnotationTransformer(typesCtx); @@ -840,18 +840,18 @@ THolder<IGraphTransformer> CreateDqTypeAnnotationTransformer(TTypeAnnotationCont return AnnotateDqCrossJoin(input, ctx); } - if (TDqSource::Match(input.Get())) { - return AnnotateDqSource(input, ctx); - } - - if (TDqSink::Match(input.Get())) { - return AnnotateDqSink(input, ctx); - } - - if (TDqQuery::Match(input.Get())) { - return AnnotateDqQuery(input, ctx); - } - + if (TDqSource::Match(input.Get())) { + return AnnotateDqSource(input, ctx); + } + + if (TDqSink::Match(input.Get())) { + return AnnotateDqSink(input, ctx); + } + + if (TDqQuery::Match(input.Get())) { + return AnnotateDqQuery(input, ctx); + } + if (TDqPrecompute::Match(input.Get())) { return AnnotateDqPrecompute(input, ctx); } diff --git a/ydb/library/yql/dq/type_ann/dq_type_ann.h b/ydb/library/yql/dq/type_ann/dq_type_ann.h index 03aefe701ff..82b576413d4 100644 --- a/ydb/library/yql/dq/type_ann/dq_type_ann.h +++ b/ydb/library/yql/dq/type_ann/dq_type_ann.h @@ -19,9 +19,9 @@ IGraphTransformer::TStatus AnnotateDqCnMerge(const TExprNode::TPtr& input, TExpr IGraphTransformer::TStatus AnnotateDqJoin(const TExprNode::TPtr& input, TExprContext& ctx); IGraphTransformer::TStatus AnnotateDqMapOrDictJoin(const TExprNode::TPtr& input, TExprContext& ctx); IGraphTransformer::TStatus AnnotateDqCrossJoin(const TExprNode::TPtr& input, TExprContext& ctx); -IGraphTransformer::TStatus AnnotateDqSource(const TExprNode::TPtr& input, TExprContext& ctx); -IGraphTransformer::TStatus AnnotateDqSink(const TExprNode::TPtr& input, TExprContext& ctx); -IGraphTransformer::TStatus AnnotateDqQuery(const TExprNode::TPtr& input, TExprContext& ctx); +IGraphTransformer::TStatus AnnotateDqSource(const TExprNode::TPtr& input, TExprContext& ctx); +IGraphTransformer::TStatus AnnotateDqSink(const TExprNode::TPtr& input, TExprContext& ctx); +IGraphTransformer::TStatus AnnotateDqQuery(const TExprNode::TPtr& input, TExprContext& ctx); THolder<IGraphTransformer> CreateDqTypeAnnotationTransformer(NYql::TTypeAnnotationContext& typesCtx); diff --git a/ydb/library/yql/dq/ya.make b/ydb/library/yql/dq/ya.make index c206ab90ef1..df1b27ef4f4 100644 --- a/ydb/library/yql/dq/ya.make +++ b/ydb/library/yql/dq/ya.make @@ -6,7 +6,7 @@ RECURSE( opt proto runtime - state + state tasks type_ann ) diff --git a/ydb/library/yql/minikql/comp_nodes/mkql_hopping.cpp b/ydb/library/yql/minikql/comp_nodes/mkql_hopping.cpp index 27d1e000ac8..a10b90406be 100644 --- a/ydb/library/yql/minikql/comp_nodes/mkql_hopping.cpp +++ b/ydb/library/yql/minikql/comp_nodes/mkql_hopping.cpp @@ -6,16 +6,16 @@ #include <ydb/library/yql/minikql/mkql_stats_registry.h> #include <ydb/library/yql/minikql/mkql_string_util.h> -#include <util/generic/scope.h> - +#include <util/generic/scope.h> + namespace NKikimr { namespace NMiniKQL { namespace { -const TStatKey Hop_NewHopsCount("Hop_NewHopsCount", true); -const TStatKey Hop_ThrownEventsCount("Hop_ThrownEventsCount", true); - +const TStatKey Hop_NewHopsCount("Hop_NewHopsCount", true); +const TStatKey Hop_ThrownEventsCount("Hop_ThrownEventsCount", true); + class THoppingCoreWrapper : public TMutableComputationNode<THoppingCoreWrapper> { typedef TMutableComputationNode<THoppingCoreWrapper> TBaseComputation; public: @@ -106,17 +106,17 @@ public: return NUdf::EFetchStatus::Finish; } - i64 thrownEvents = 0; - i64 newHops = 0; - Y_DEFER { - if (thrownEvents) { - MKQL_ADD_STAT(Ctx.Stats, Hop_ThrownEventsCount, thrownEvents); - } - if (newHops) { - MKQL_ADD_STAT(Ctx.Stats, Hop_NewHopsCount, newHops); - } - }; - + i64 thrownEvents = 0; + i64 newHops = 0; + Y_DEFER { + if (thrownEvents) { + MKQL_ADD_STAT(Ctx.Stats, Hop_ThrownEventsCount, thrownEvents); + } + if (newHops) { + MKQL_ADD_STAT(Ctx.Stats, Hop_NewHopsCount, newHops); + } + }; + for (NUdf::TUnboxedValue item;;) { if (!Ready.empty()) { result = std::move(Ready.front()); @@ -179,7 +179,7 @@ public: Ready.emplace_back(Self->OutFinish->GetValue(Ctx)); } - ++newHops; + ++newHops; ++HopIndex; } @@ -192,8 +192,8 @@ public: Self->State->SetValue(Ctx, NUdf::TUnboxedValue(bucket.Value)); bucket.Value = Self->OutUpdate->GetValue(Ctx); } - } else { - ++thrownEvents; + } else { + ++thrownEvents; } } } diff --git a/ydb/library/yql/minikql/comp_nodes/mkql_multihopping.cpp b/ydb/library/yql/minikql/comp_nodes/mkql_multihopping.cpp index cb2b13f86d0..510cb6ba832 100644 --- a/ydb/library/yql/minikql/comp_nodes/mkql_multihopping.cpp +++ b/ydb/library/yql/minikql/comp_nodes/mkql_multihopping.cpp @@ -24,8 +24,8 @@ constexpr ui32 StateVersion = 1; using TEqualsFunc = std::function<bool(NUdf::TUnboxedValuePod, NUdf::TUnboxedValuePod)>; using THashFunc = std::function<NYql::NUdf::THashType(NUdf::TUnboxedValuePod)>; -class TMultiHoppingCoreWrapper : public TStatefulSourceComputationNode<TMultiHoppingCoreWrapper, true> { - using TBaseComputation = TStatefulSourceComputationNode<TMultiHoppingCoreWrapper, true>; +class TMultiHoppingCoreWrapper : public TStatefulSourceComputationNode<TMultiHoppingCoreWrapper, true> { + using TBaseComputation = TStatefulSourceComputationNode<TMultiHoppingCoreWrapper, true>; public: using TSelf = TMultiHoppingCoreWrapper; @@ -402,7 +402,7 @@ public: Y_VERIFY(!encoded, "TODO"); } - NUdf::TUnboxedValuePod CreateStream(TComputationContext& ctx) const { + NUdf::TUnboxedValuePod CreateStream(TComputationContext& ctx) const { const auto hopTime = Hop->GetValue(ctx).Get<i64>(); const auto interval = Interval->GetValue(ctx).Get<i64>(); const auto delay = Delay->GetValue(ctx).Get<i64>(); @@ -426,21 +426,21 @@ public: TValueEqual(KeyTypes, IsTuple)); } - NUdf::TUnboxedValue GetValue(TComputationContext& compCtx) const override { - NUdf::TUnboxedValue& valueRef = ValueRef(compCtx); - if (valueRef.IsInvalid()) { - // Create new. - valueRef = CreateStream(compCtx); - } else if (valueRef.HasValue() && !valueRef.IsBoxed()) { - // Load from saved state. - NUdf::TUnboxedValue stream = CreateStream(compCtx); - stream.Load(valueRef.AsStringRef()); - valueRef = stream; - } - - return valueRef; - } - + NUdf::TUnboxedValue GetValue(TComputationContext& compCtx) const override { + NUdf::TUnboxedValue& valueRef = ValueRef(compCtx); + if (valueRef.IsInvalid()) { + // Create new. + valueRef = CreateStream(compCtx); + } else if (valueRef.HasValue() && !valueRef.IsBoxed()) { + // Load from saved state. + NUdf::TUnboxedValue stream = CreateStream(compCtx); + stream.Load(valueRef.AsStringRef()); + valueRef = stream; + } + + return valueRef; + } + private: void RegisterDependencies() const final { DependsOn(Stream); diff --git a/ydb/library/yql/minikql/comp_nodes/mkql_saveload.h b/ydb/library/yql/minikql/comp_nodes/mkql_saveload.h index 0440994f8d2..8db618f9ae3 100644 --- a/ydb/library/yql/minikql/comp_nodes/mkql_saveload.h +++ b/ydb/library/yql/minikql/comp_nodes/mkql_saveload.h @@ -6,8 +6,8 @@ #include <util/generic/strbuf.h> -#include <string_view> - +#include <string_view> + namespace NKikimr { namespace NMiniKQL { @@ -59,19 +59,19 @@ Y_FORCE_INLINE ui64 ReadUi64(TStringBuf& in) { return result; } -Y_FORCE_INLINE void WriteString(TString& out, std::string_view str) { - WriteUi32(out, str.size()); - out.AppendNoAlias(str.data(), str.size()); -} - -Y_FORCE_INLINE std::string_view ReadString(TStringBuf& in) { - const ui32 size = ReadUi32(in); - MKQL_ENSURE(in.Size() >= size, "Serialized state is corrupted"); - TStringBuf head = in.Head(size); - in = in.Tail(size); - return head; -} - +Y_FORCE_INLINE void WriteString(TString& out, std::string_view str) { + WriteUi32(out, str.size()); + out.AppendNoAlias(str.data(), str.size()); +} + +Y_FORCE_INLINE std::string_view ReadString(TStringBuf& in) { + const ui32 size = ReadUi32(in); + MKQL_ENSURE(in.Size() >= size, "Serialized state is corrupted"); + TStringBuf head = in.Head(size); + in = in.Tail(size); + return head; +} + Y_FORCE_INLINE void WriteUnboxedValue(TString& out, const TValuePacker& packer, const NUdf::TUnboxedValue& value) { auto state = packer.Pack(value); WriteUi32(out, state.size()); diff --git a/ydb/library/yql/minikql/comp_nodes/ut/mkql_multihopping_ut.cpp b/ydb/library/yql/minikql/comp_nodes/ut/mkql_multihopping_ut.cpp index 719ba1f6ea1..df3726f47fd 100644 --- a/ydb/library/yql/minikql/comp_nodes/ut/mkql_multihopping_ut.cpp +++ b/ydb/library/yql/minikql/comp_nodes/ut/mkql_multihopping_ut.cpp @@ -256,7 +256,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLMultiHoppingTest) { status = root1.Fetch(val); if (status == NUdf::EFetchStatus::Ok) { curResult.emplace_back(TOutputItem{val.GetElement(0).Get<ui32>(), val.GetElement(1).Get<ui32>(), val.GetElement(2).Get<ui64>()}); - } + } } check(); @@ -284,7 +284,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLMultiHoppingTest) { }; TestImpl(input, expected, true); } - + Y_UNIT_TEST(TestValidness1) { const std::vector<TInputItem> input1 = { // Group; Time; Value @@ -355,8 +355,8 @@ Y_UNIT_TEST_SUITE(TMiniKQLMultiHoppingTest) { }; TestImpl(input, expected, true, 5, 10, 10); - } - + } + Y_UNIT_TEST(TestDelay) { const std::vector<TInputItem> input = { // Group; Time; Value @@ -370,7 +370,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLMultiHoppingTest) { }; TestImpl(input, expected, false); - } + } Y_UNIT_TEST(TestWindowsBeforeFirstElement) { const std::vector<TInputItem> input = { diff --git a/ydb/library/yql/minikql/computation/mkql_computation_node.h b/ydb/library/yql/minikql/computation/mkql_computation_node.h index a5d11b180b0..590d2c79ef8 100644 --- a/ydb/library/yql/minikql/computation/mkql_computation_node.h +++ b/ydb/library/yql/minikql/computation/mkql_computation_node.h @@ -62,7 +62,7 @@ struct TComputationOptsFull: public TComputationOpts { struct TComputationMutables { ui32 CurValueIndex = 0U; - std::vector<ui32> SerializableValues; // Indices of values that need to be saved in IComputationGraph::SaveGraphState() and restored in IComputationGraph::LoadGraphState(). + std::vector<ui32> SerializableValues; // Indices of values that need to be saved in IComputationGraph::SaveGraphState() and restored in IComputationGraph::LoadGraphState(). }; class THolderFactory; @@ -112,8 +112,8 @@ public: virtual ~IComputationNode() {} - virtual void InitNode(TComputationContext&) const = 0; - + virtual void InitNode(TComputationContext&) const = 0; + virtual NUdf::TUnboxedValue GetValue(TComputationContext& compCtx) const = 0; virtual IComputationNode* AddDependence(const IComputationNode* node) = 0; @@ -191,8 +191,8 @@ public: virtual const THolderFactory& GetHolderFactory() const = 0; virtual ITerminator* GetTerminator() const = 0; virtual bool SetExecuteLLVM(bool value) = 0; - virtual TString SaveGraphState() = 0; - virtual void LoadGraphState(TStringBuf state) = 0; + virtual TString SaveGraphState() = 0; + virtual void LoadGraphState(TStringBuf state) = 0; }; class TNodeFactory; diff --git a/ydb/library/yql/minikql/computation/mkql_computation_node_codegen.h b/ydb/library/yql/minikql/computation/mkql_computation_node_codegen.h index d9feede204e..495ede0134a 100644 --- a/ydb/library/yql/minikql/computation/mkql_computation_node_codegen.h +++ b/ydb/library/yql/minikql/computation/mkql_computation_node_codegen.h @@ -45,8 +45,8 @@ using TStatelessFlowCodegeneratorNode = TStatelessFlowComputationNode<TDerived>; template <typename TDerived> using TStatelessWideFlowCodegeneratorNode = TStatelessWideFlowComputationNode<TDerived>; -template <typename TDerived, bool SerializableState = false> -using TStatefulWideFlowCodegeneratorNode = TStatefulWideFlowComputationNode<TDerived, SerializableState>; +template <typename TDerived, bool SerializableState = false> +using TStatefulWideFlowCodegeneratorNode = TStatefulWideFlowComputationNode<TDerived, SerializableState>; template <typename TDerived> using TPairStateWideFlowCodegeneratorNode = TPairStateWideFlowComputationNode<TDerived>; @@ -54,8 +54,8 @@ using TPairStateWideFlowCodegeneratorNode = TPairStateWideFlowComputationNode<TD template <typename TDerived> using TStatelessFlowCodegeneratorRootNode = TStatelessFlowComputationNode<TDerived>; -template <typename TDerived, bool SerializableState = false> -using TStatefulFlowCodegeneratorNode = TStatefulFlowComputationNode<TDerived, SerializableState>; +template <typename TDerived, bool SerializableState = false> +using TStatefulFlowCodegeneratorNode = TStatefulFlowComputationNode<TDerived, SerializableState>; template <typename TDerived> using TPairStateFlowCodegeneratorNode = TPairStateFlowComputationNode<TDerived>; @@ -490,10 +490,10 @@ protected: } }; -template <typename TDerived, bool SerializableState = false> -class TStatefulFlowCodegeneratorNode: public TStatefulFlowComputationNode<TDerived, SerializableState>, public ICodegeneratorInlineNode +template <typename TDerived, bool SerializableState = false> +class TStatefulFlowCodegeneratorNode: public TStatefulFlowComputationNode<TDerived, SerializableState>, public ICodegeneratorInlineNode { -using TBase = TStatefulFlowComputationNode<TDerived, SerializableState>; +using TBase = TStatefulFlowComputationNode<TDerived, SerializableState>; protected: TStatefulFlowCodegeneratorNode(TComputationMutables& mutables, const IComputationNode* source, EValueRepresentation kind, EValueRepresentation stateKind = EValueRepresentation::Any) : TBase(mutables, source, kind, stateKind) @@ -514,10 +514,10 @@ protected: } }; -template <typename TDerived, bool SerializableState = false> -class TStatefulWideFlowCodegeneratorNode: public TStatefulWideFlowComputationNode<TDerived, SerializableState>, public ICodegeneratorInlineWideNode +template <typename TDerived, bool SerializableState = false> +class TStatefulWideFlowCodegeneratorNode: public TStatefulWideFlowComputationNode<TDerived, SerializableState>, public ICodegeneratorInlineWideNode { -using TBase = TStatefulWideFlowComputationNode<TDerived, SerializableState>; +using TBase = TStatefulWideFlowComputationNode<TDerived, SerializableState>; protected: TStatefulWideFlowCodegeneratorNode(TComputationMutables& mutables, const IComputationNode* source, EValueRepresentation stateKind) : TBase(mutables, source, stateKind) diff --git a/ydb/library/yql/minikql/computation/mkql_computation_node_graph.cpp b/ydb/library/yql/minikql/computation/mkql_computation_node_graph.cpp index f2b188ae47c..253ab8c7eae 100644 --- a/ydb/library/yql/minikql/computation/mkql_computation_node_graph.cpp +++ b/ydb/library/yql/minikql/computation/mkql_computation_node_graph.cpp @@ -559,9 +559,9 @@ public: PatternNodes->GetMutables(), *ArrowMemoryPool)); ValueBuilder->SetCalleePositionHolder(Ctx->CalleePosition); - for (auto& node : PatternNodes->GetNodes()) { - node->InitNode(*Ctx); - } + for (auto& node : PatternNodes->GetNodes()) { + node->InitNode(*Ctx); + } IsPrepared = true; } } @@ -607,44 +607,44 @@ public: return old; } - TString SaveGraphState() override { - Prepare(); - - TString result; - for (ui32 i : PatternNodes->GetMutables().SerializableValues) { - const NUdf::TUnboxedValuePod& mutableValue = Ctx->MutableValues[i]; - if (mutableValue.IsInvalid()) { - WriteUi32(result, std::numeric_limits<ui32>::max()); // -1. - } else if (mutableValue.IsBoxed()) { - NUdf::TUnboxedValue saved = mutableValue.Save(); - const TStringBuf savedBuf = saved.AsStringRef(); - WriteUi32(result, savedBuf.Size()); - result.AppendNoAlias(savedBuf.Data(), savedBuf.Size()); - } else { // No load was done during previous runs (if any). - MKQL_ENSURE(mutableValue.HasValue() && (mutableValue.IsString() || mutableValue.IsEmbedded()), "State is expected to have data or invalid value"); - const NUdf::TStringRef savedRef = mutableValue.AsStringRef(); - WriteUi32(result, savedRef.Size()); - result.AppendNoAlias(savedRef.Data(), savedRef.Size()); - } - } - return result; - } - - void LoadGraphState(TStringBuf state) override { - Prepare(); - - for (ui32 i : PatternNodes->GetMutables().SerializableValues) { - if (const ui32 size = ReadUi32(state); size != std::numeric_limits<ui32>::max()) { - MKQL_ENSURE(state.Size() >= size, "Serialized state is corrupted - buffer is too short (" << state.Size() << ") for specified size: " << size); - const NUdf::TStringRef savedRef(state.Data(), size); - Ctx->MutableValues[i] = MakeString(savedRef); - state.Skip(size); - } // else leave it Invalid() - } - - MKQL_ENSURE(state.Empty(), "Serialized state is corrupted - extra bytes left: " << state.Size()); - } - + TString SaveGraphState() override { + Prepare(); + + TString result; + for (ui32 i : PatternNodes->GetMutables().SerializableValues) { + const NUdf::TUnboxedValuePod& mutableValue = Ctx->MutableValues[i]; + if (mutableValue.IsInvalid()) { + WriteUi32(result, std::numeric_limits<ui32>::max()); // -1. + } else if (mutableValue.IsBoxed()) { + NUdf::TUnboxedValue saved = mutableValue.Save(); + const TStringBuf savedBuf = saved.AsStringRef(); + WriteUi32(result, savedBuf.Size()); + result.AppendNoAlias(savedBuf.Data(), savedBuf.Size()); + } else { // No load was done during previous runs (if any). + MKQL_ENSURE(mutableValue.HasValue() && (mutableValue.IsString() || mutableValue.IsEmbedded()), "State is expected to have data or invalid value"); + const NUdf::TStringRef savedRef = mutableValue.AsStringRef(); + WriteUi32(result, savedRef.Size()); + result.AppendNoAlias(savedRef.Data(), savedRef.Size()); + } + } + return result; + } + + void LoadGraphState(TStringBuf state) override { + Prepare(); + + for (ui32 i : PatternNodes->GetMutables().SerializableValues) { + if (const ui32 size = ReadUi32(state); size != std::numeric_limits<ui32>::max()) { + MKQL_ENSURE(state.Size() >= size, "Serialized state is corrupted - buffer is too short (" << state.Size() << ") for specified size: " << size); + const NUdf::TStringRef savedRef(state.Data(), size); + Ctx->MutableValues[i] = MakeString(savedRef); + state.Skip(size); + } // else leave it Invalid() + } + + MKQL_ENSURE(state.Empty(), "Serialized state is corrupted - extra bytes left: " << state.Size()); + } + private: const TPatternNodes::TPtr PatternNodes; const TIntrusivePtr<TMemoryUsageInfo> MemInfo; diff --git a/ydb/library/yql/minikql/computation/mkql_computation_node_impl.cpp b/ydb/library/yql/minikql/computation/mkql_computation_node_impl.cpp index 15777c32880..4611f90fc0b 100644 --- a/ydb/library/yql/minikql/computation/mkql_computation_node_impl.cpp +++ b/ydb/library/yql/minikql/computation/mkql_computation_node_impl.cpp @@ -82,36 +82,36 @@ EValueRepresentation TUnboxedImmutableComputationNode::GetRepresentation() const return RepresentationKind; } -template <class IComputationNodeInterface, bool SerializableState> -TStatefulComputationNode<IComputationNodeInterface, SerializableState>::TStatefulComputationNode(TComputationMutables& mutables, EValueRepresentation kind) +template <class IComputationNodeInterface, bool SerializableState> +TStatefulComputationNode<IComputationNodeInterface, SerializableState>::TStatefulComputationNode(TComputationMutables& mutables, EValueRepresentation kind) : ValueIndex(mutables.CurValueIndex++), RepresentationKind(kind) -{ - if constexpr (SerializableState) { - mutables.SerializableValues.push_back(ValueIndex); - } -} - -template <class IComputationNodeInterface, bool SerializableState> -IComputationNode* TStatefulComputationNode<IComputationNodeInterface, SerializableState>::AddDependence(const IComputationNode* node) { +{ + if constexpr (SerializableState) { + mutables.SerializableValues.push_back(ValueIndex); + } +} + +template <class IComputationNodeInterface, bool SerializableState> +IComputationNode* TStatefulComputationNode<IComputationNodeInterface, SerializableState>::AddDependence(const IComputationNode* node) { Dependencies.emplace_back(node); return this; } -template <class IComputationNodeInterface, bool SerializableState> -EValueRepresentation TStatefulComputationNode<IComputationNodeInterface, SerializableState>::GetRepresentation() const { +template <class IComputationNodeInterface, bool SerializableState> +EValueRepresentation TStatefulComputationNode<IComputationNodeInterface, SerializableState>::GetRepresentation() const { return RepresentationKind; } -template <class IComputationNodeInterface, bool SerializableState> -ui32 TStatefulComputationNode<IComputationNodeInterface, SerializableState>::GetIndex() const { return ValueIndex; } +template <class IComputationNodeInterface, bool SerializableState> +ui32 TStatefulComputationNode<IComputationNodeInterface, SerializableState>::GetIndex() const { return ValueIndex; } -template <class IComputationNodeInterface, bool SerializableState> -ui32 TStatefulComputationNode<IComputationNodeInterface, SerializableState>::GetDependencesCount() const { return Dependencies.size(); } +template <class IComputationNodeInterface, bool SerializableState> +ui32 TStatefulComputationNode<IComputationNodeInterface, SerializableState>::GetDependencesCount() const { return Dependencies.size(); } -template class TStatefulComputationNode<IComputationNode, false>; -template class TStatefulComputationNode<IComputationExternalNode, false>; -template class TStatefulComputationNode<IComputationNode, true>; -template class TStatefulComputationNode<IComputationExternalNode, true>; +template class TStatefulComputationNode<IComputationNode, false>; +template class TStatefulComputationNode<IComputationExternalNode, false>; +template class TStatefulComputationNode<IComputationNode, true>; +template class TStatefulComputationNode<IComputationExternalNode, true>; void TExternalComputationNode::CollectDependentIndexes(const IComputationNode*, TIndexesMap& map) const { map.emplace(ValueIndex, RepresentationKind); diff --git a/ydb/library/yql/minikql/computation/mkql_computation_node_impl.h b/ydb/library/yql/minikql/computation/mkql_computation_node_impl.h index 0a63ca9d349..82b1af0b4fd 100644 --- a/ydb/library/yql/minikql/computation/mkql_computation_node_impl.h +++ b/ydb/library/yql/minikql/computation/mkql_computation_node_impl.h @@ -42,8 +42,8 @@ public: ~TUnboxedImmutableComputationNode(); private: - void InitNode(TComputationContext&) const override {} - + void InitNode(TComputationContext&) const override {} + NUdf::TUnboxedValue GetValue(TComputationContext& compCtx) const final; const IComputationNode* GetSource() const final; @@ -75,15 +75,15 @@ protected: const EValueRepresentation RepresentationKind; }; -template <class IComputationNodeInterface, bool SerializableState = false> +template <class IComputationNodeInterface, bool SerializableState = false> class TStatefulComputationNode: public TRefCountedComputationNode<IComputationNodeInterface> { protected: TStatefulComputationNode(TComputationMutables& mutables, EValueRepresentation kind); protected: - void InitNode(TComputationContext&) const override {} - + void InitNode(TComputationContext&) const override {} + IComputationNode* AddDependence(const IComputationNode* node) final; EValueRepresentation GetRepresentation() const override; @@ -125,7 +125,7 @@ private: void PrepareStageOne() final; void PrepareStageTwo() final; - void CollectDependentIndexes(const IComputationNode* owner, TIndexesMap& dependencies) const final; + void CollectDependentIndexes(const IComputationNode* owner, TIndexesMap& dependencies) const final; bool IsTemporaryValue() const final; @@ -141,10 +141,10 @@ protected: TGetter Getter; }; -template <typename TDerived, bool SerializableState = false> -class TStatefulSourceComputationNode: public TStatefulComputationNode<IComputationNode, SerializableState> +template <typename TDerived, bool SerializableState = false> +class TStatefulSourceComputationNode: public TStatefulComputationNode<IComputationNode, SerializableState> { - using TStatefulComputationNode = TStatefulComputationNode<IComputationNode, SerializableState>; + using TStatefulComputationNode = TStatefulComputationNode<IComputationNode, SerializableState>; private: bool IsTemporaryValue() const final { return *Stateless; @@ -156,29 +156,29 @@ private: void PrepareStageOne() final { if (!Stateless) { - Stateless = std::accumulate(this->Dependencies.cbegin(), this->Dependencies.cend(), 0, + Stateless = std::accumulate(this->Dependencies.cbegin(), this->Dependencies.cend(), 0, std::bind(std::plus<i32>(), std::placeholders::_1, std::bind(&IComputationNode::GetDependencyWeight, std::placeholders::_2))) <= 1; } } void PrepareStageTwo() final {} - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; - if (const auto ins = dependencies.emplace(this->ValueIndex, this->RepresentationKind); ins.second) { - std::for_each(this->Dependencies.cbegin(), this->Dependencies.cend(), std::bind(&IComputationNode::CollectDependentIndexes, std::placeholders::_1, owner, std::ref(dependencies))); + if (const auto ins = dependencies.emplace(this->ValueIndex, this->RepresentationKind); ins.second) { + std::for_each(this->Dependencies.cbegin(), this->Dependencies.cend(), std::bind(&IComputationNode::CollectDependentIndexes, std::placeholders::_1, owner, std::ref(dependencies))); if (*Stateless) { - dependencies.erase(ins.first); + dependencies.erase(ins.first); } } } const IComputationNode* GetSource() const final { return this; } protected: - TStatefulSourceComputationNode(TComputationMutables& mutables, EValueRepresentation kind = EValueRepresentation::Any) + TStatefulSourceComputationNode(TComputationMutables& mutables, EValueRepresentation kind = EValueRepresentation::Any) : TStatefulComputationNode(mutables, kind) {} @@ -205,23 +205,23 @@ protected: }; template <typename TDerived> -class TMutableComputationNode: public TStatefulSourceComputationNode<TDerived> { -protected: - using TStatefulSourceComputationNode<TDerived>::TStatefulSourceComputationNode; - - NUdf::TUnboxedValue GetValue(TComputationContext& compCtx) const override { - if (*this->Stateless) - return static_cast<const TDerived*>(this)->DoCalculate(compCtx); - NUdf::TUnboxedValue& valueRef = this->ValueRef(compCtx); - if (valueRef.IsInvalid()) { - valueRef = static_cast<const TDerived*>(this)->DoCalculate(compCtx); - } - - return valueRef; - } -}; - -template <typename TDerived> +class TMutableComputationNode: public TStatefulSourceComputationNode<TDerived> { +protected: + using TStatefulSourceComputationNode<TDerived>::TStatefulSourceComputationNode; + + NUdf::TUnboxedValue GetValue(TComputationContext& compCtx) const override { + if (*this->Stateless) + return static_cast<const TDerived*>(this)->DoCalculate(compCtx); + NUdf::TUnboxedValue& valueRef = this->ValueRef(compCtx); + if (valueRef.IsInvalid()) { + valueRef = static_cast<const TDerived*>(this)->DoCalculate(compCtx); + } + + return valueRef; + } +}; + +template <typename TDerived> class TFlowSourceComputationNode: public TStatefulComputationNode<IComputationNode> { protected: @@ -255,12 +255,12 @@ private: return this->Dependencies.size() + Sources.size(); } - void CollectDependentIndexes(const IComputationNode* owner, IComputationExternalNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationExternalNode::TIndexesMap& dependencies) const final { if (this == owner) return; - if (dependencies.emplace(TStatefulComputationNode<IComputationNode>::ValueIndex, TStatefulComputationNode<IComputationNode>::RepresentationKind).second) { - std::for_each(this->Dependencies.cbegin(), this->Dependencies.cend(), std::bind(&IComputationNode::CollectDependentIndexes, std::placeholders::_1, owner, std::ref(dependencies))); + if (dependencies.emplace(TStatefulComputationNode<IComputationNode>::ValueIndex, TStatefulComputationNode<IComputationNode>::RepresentationKind).second) { + std::for_each(this->Dependencies.cbegin(), this->Dependencies.cend(), std::bind(&IComputationNode::CollectDependentIndexes, std::placeholders::_1, owner, std::ref(dependencies))); } } @@ -287,8 +287,8 @@ class TFlowBaseComputationNode: public TRefCountedComputationNode<IFlowInterface protected: TFlowBaseComputationNode(const IComputationNode* source) : Source(source) {} - void InitNode(TComputationContext&) const override {} - + void InitNode(TComputationContext&) const override {} + TString DebugString() const override { return TypeName<TDerived>(); } @@ -421,32 +421,32 @@ private: THROW yexception() << "Failed to get stateless node index."; } - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; if (this->Dependence) { - this->Dependence->CollectDependentIndexes(owner, dependencies); + this->Dependence->CollectDependentIndexes(owner, dependencies); } } }; -template <typename TDerived, bool SerializableState = false> +template <typename TDerived, bool SerializableState = false> class TStatefulFlowComputationNode: public TBaseFlowBaseComputationNode<TDerived> { protected: TStatefulFlowComputationNode(TComputationMutables& mutables, const IComputationNode* source, EValueRepresentation kind, EValueRepresentation stateKind) : TBaseFlowBaseComputationNode<TDerived>(source, kind), StateIndex(mutables.CurValueIndex++), StateKind(stateKind) - { - if constexpr (SerializableState) { - mutables.SerializableValues.push_back(StateIndex); - } - } + { + if constexpr (SerializableState) { + mutables.SerializableValues.push_back(StateIndex); + } + } NUdf::TUnboxedValue& RefState(TComputationContext& compCtx) const { return compCtx.MutableValues[GetIndex()]; } - + private: ui32 GetIndex() const final { return StateIndex; @@ -456,13 +456,13 @@ private: return static_cast<const TDerived*>(this)->DoCalculate(compCtx.MutableValues[StateIndex], compCtx); } - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; - const auto ins = dependencies.emplace(StateIndex, StateKind); + const auto ins = dependencies.emplace(StateIndex, StateKind); if (ins.second && this->Dependence) { - this->Dependence->CollectDependentIndexes(owner, dependencies); + this->Dependence->CollectDependentIndexes(owner, dependencies); } } @@ -491,14 +491,14 @@ private: return StateIndex; } - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; - const auto ins1 = dependencies.emplace(StateIndex, FirstKind); - const auto ins2 = dependencies.emplace(StateIndex + 1U, SecondKind); + const auto ins1 = dependencies.emplace(StateIndex, FirstKind); + const auto ins2 = dependencies.emplace(StateIndex + 1U, SecondKind); if (ins1.second && ins2.second && this->Dependence) { - this->Dependence->CollectDependentIndexes(owner, dependencies); + this->Dependence->CollectDependentIndexes(owner, dependencies); } } @@ -514,8 +514,8 @@ protected: TString DebugString() const final; private: - void InitNode(TComputationContext&) const override {} - + void InitNode(TComputationContext&) const override {} + EValueRepresentation GetRepresentation() const final; NUdf::TUnboxedValue GetValue(TComputationContext&) const final; @@ -589,27 +589,27 @@ private: THROW yexception() << "Failed to get stateless node index."; } - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; if (this->Dependence) { - this->Dependence->CollectDependentIndexes(owner, dependencies); + this->Dependence->CollectDependentIndexes(owner, dependencies); } } }; -template <typename TDerived, bool SerializableState = false> +template <typename TDerived, bool SerializableState = false> class TStatefulWideFlowComputationNode: public TWideFlowBaseComputationNode<TDerived> { protected: TStatefulWideFlowComputationNode(TComputationMutables& mutables, const IComputationNode* source, EValueRepresentation stateKind) : TWideFlowBaseComputationNode<TDerived>(source), StateIndex(mutables.CurValueIndex++), StateKind(stateKind) - { - if constexpr (SerializableState) { - mutables.SerializableValues.push_back(StateIndex); - } - } + { + if constexpr (SerializableState) { + mutables.SerializableValues.push_back(StateIndex); + } + } NUdf::TUnboxedValue& RefState(TComputationContext& compCtx) const { return compCtx.MutableValues[GetIndex()]; @@ -623,13 +623,13 @@ private: return StateIndex; } - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; - const auto ins = dependencies.emplace(StateIndex, StateKind); + const auto ins = dependencies.emplace(StateIndex, StateKind); if (ins.second && this->Dependence) { - this->Dependence->CollectDependentIndexes(owner, dependencies); + this->Dependence->CollectDependentIndexes(owner, dependencies); } } @@ -656,14 +656,14 @@ private: return StateIndex; } - void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { + void CollectDependentIndexes(const IComputationNode* owner, IComputationNode::TIndexesMap& dependencies) const final { if (this == owner) return; - const auto ins1 = dependencies.emplace(StateIndex, FirstKind); - const auto ins2 = dependencies.emplace(StateIndex + 1U, SecondKind); + const auto ins1 = dependencies.emplace(StateIndex, FirstKind); + const auto ins2 = dependencies.emplace(StateIndex + 1U, SecondKind); if (ins1.second && ins2.second && this->Dependence) { - this->Dependence->CollectDependentIndexes(owner, dependencies); + this->Dependence->CollectDependentIndexes(owner, dependencies); } } @@ -676,7 +676,7 @@ class TDecoratorComputationNode: public TRefCountedComputationNode<IComputationN { private: void InitNode(TComputationContext&) const final {} - + const IComputationNode* GetSource() const final { return Node; } IComputationNode* AddDependence(const IComputationNode* node) final { diff --git a/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.cpp b/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.cpp index 4da4d13d034..831f6d41871 100644 --- a/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.cpp +++ b/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.cpp @@ -42,18 +42,18 @@ bool TDqIntegrationBase::CanFallback() { return false; } -void TDqIntegrationBase::FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sourceType) { - Y_UNUSED(node); - Y_UNUSED(settings); - Y_UNUSED(sourceType); -} - -void TDqIntegrationBase::FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sinkType) { - Y_UNUSED(node); - Y_UNUSED(settings); - Y_UNUSED(sinkType); -} - +void TDqIntegrationBase::FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sourceType) { + Y_UNUSED(node); + Y_UNUSED(settings); + Y_UNUSED(sourceType); +} + +void TDqIntegrationBase::FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sinkType) { + Y_UNUSED(node); + Y_UNUSED(settings); + Y_UNUSED(sinkType); +} + void TDqIntegrationBase::Annotate(const TExprNode& node, THashMap<TString, TString>& params) { Y_UNUSED(node); Y_UNUSED(params); diff --git a/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.h b/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.h index cd1c150ebfc..910a576cbad 100644 --- a/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.h +++ b/ydb/library/yql/providers/common/dq/yql_dq_integration_impl.h @@ -13,8 +13,8 @@ public: void RegisterMkqlCompiler(NCommon::TMkqlCallableCompilerBase& compiler) override; TMaybe<bool> CanWrite(const TDqSettings& config, const TExprNode& write, TExprContext& ctx) override; bool CanFallback() override; - void FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sourceType) override; - void FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sinkType) override; + void FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sourceType) override; + void FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sinkType) override; void Annotate(const TExprNode& node, THashMap<TString, TString>& params) override; bool PrepareFullResultTableParams(const TExprNode& root, TExprContext& ctx, THashMap<TString, TString>& params, THashMap<TString, TString>& secureParams) override; void WriteFullResultTableRef(NYson::TYsonWriter& writer, const TVector<TString>& columns, const THashMap<TString, TString>& graphParams) override; diff --git a/ydb/library/yql/providers/common/proto/gateways_config.proto b/ydb/library/yql/providers/common/proto/gateways_config.proto index 8561fe25368..bfa1a8d4736 100644 --- a/ydb/library/yql/providers/common/proto/gateways_config.proto +++ b/ydb/library/yql/providers/common/proto/gateways_config.proto @@ -250,36 +250,36 @@ message TRtmrGatewayConfig { optional uint32 PreviewCollectTimeoutMs = 8 [default = 2000]; } -///////////////////////////// Pq ////////////////////////////// - -message TPqClusterConfig { - enum EClusterType { +///////////////////////////// Pq ////////////////////////////// + +message TPqClusterConfig { + enum EClusterType { CT_UNSPECIFIED = 0; CT_PERS_QUEUE = 1; CT_DATA_STREAMS = 2; - } - - optional string Name = 1; + } + + optional string Name = 1; optional EClusterType ClusterType = 2 [default = CT_PERS_QUEUE]; - optional string Endpoint = 3; - optional string ConfigManagerEndpoint = 4; - optional string Token = 5; - optional string Database = 6 [default = "/Root"]; - optional uint32 TvmId = 7 [default = 0]; - optional bool UseSsl = 8; // grpcs + optional string Endpoint = 3; + optional string ConfigManagerEndpoint = 4; + optional string Token = 5; + optional string Database = 6 [default = "/Root"]; + optional uint32 TvmId = 7 [default = 0]; + optional bool UseSsl = 8; // grpcs optional string ServiceAccountId = 9; optional string ServiceAccountIdSignature = 10; optional bool AddBearerToToken = 11; // whether to use prefix "Bearer " in token - optional string DatabaseId = 12; - repeated TAttr Settings = 100; -} - -message TPqGatewayConfig { - repeated TPqClusterConfig ClusterMapping = 1; - optional string DefaultToken = 2; - repeated TAttr DefaultSettings = 100; -} - + optional string DatabaseId = 12; + repeated TAttr Settings = 100; +} + +message TPqGatewayConfig { + repeated TPqClusterConfig ClusterMapping = 1; + optional string DefaultToken = 2; + repeated TAttr DefaultSettings = 100; +} + ///////////////////////////// Stat ///////////////////////////// message TStatClusterConfig { @@ -451,7 +451,7 @@ message TGatewaysConfig { optional TDqGatewayConfig Dq = 13; optional TMysqlGatewayConfig Mysql = 14; optional TYdbGatewayConfig Ydb = 15; - optional TPqGatewayConfig Pq = 16; + optional TPqGatewayConfig Pq = 16; optional TS3GatewayConfig S3 = 17; optional THttpGatewayConfig HttpGateway = 18; } diff --git a/ydb/library/yql/providers/common/provider/yql_provider_names.h b/ydb/library/yql/providers/common/provider/yql_provider_names.h index 7cf29b4c1d0..b333bda599c 100644 --- a/ydb/library/yql/providers/common/provider/yql_provider_names.h +++ b/ydb/library/yql/providers/common/provider/yql_provider_names.h @@ -20,8 +20,8 @@ constexpr TStringBuf S3ProviderName = "s3"; constexpr std::array<const TStringBuf, 11> Providers = { {ConfigProviderName, YtProviderName, KikimrProviderName, RtmrProviderName, S3ProviderName, - StatProviderName, SolomonProviderName, DqProviderName, ClickHouseProviderName, YdbProviderName, - PqProviderName} + StatProviderName, SolomonProviderName, DqProviderName, ClickHouseProviderName, YdbProviderName, + PqProviderName} }; } // namespace NYql diff --git a/ydb/library/yql/providers/common/structured_token/ut/ya.make b/ydb/library/yql/providers/common/structured_token/ut/ya.make index 99ad5b2da0d..d590b76631d 100644 --- a/ydb/library/yql/providers/common/structured_token/ut/ya.make +++ b/ydb/library/yql/providers/common/structured_token/ut/ya.make @@ -1,7 +1,7 @@ UNITTEST_FOR(ydb/library/yql/providers/common/structured_token) OWNER( - g:yq + g:yq g:yql ) diff --git a/ydb/library/yql/providers/common/token_accessor/grpc/ya.make b/ydb/library/yql/providers/common/token_accessor/grpc/ya.make index 2297218e2b2..d8c4fca9f82 100644 --- a/ydb/library/yql/providers/common/token_accessor/grpc/ya.make +++ b/ydb/library/yql/providers/common/token_accessor/grpc/ya.make @@ -1,4 +1,4 @@ -OWNER(g:yq) +OWNER(g:yq) PROTO_LIBRARY() diff --git a/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp b/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp index 39cae924364..b594daaf54b 100644 --- a/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp +++ b/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp @@ -50,7 +50,7 @@ void TFakeActor::InitSource(IDqSourceActor* dqSource, IActor* dqSourceAsActor) { void TFakeActor::Terminate() { if (DqSourceActorId) { - DqSourceActor->PassAway(); + DqSourceActor->PassAway(); DqSourceActorId = std::nullopt; DqSourceActor = nullptr; @@ -58,7 +58,7 @@ void TFakeActor::Terminate() { } if (DqSinkActorId) { - DqSinkActor->PassAway(); + DqSinkActor->PassAway(); DqSinkActorId = std::nullopt; DqSinkActor = nullptr; @@ -107,21 +107,21 @@ void TFakeCASetup::SinkWrite(const TWriteValueProducer valueProducer, TMaybe<NDq }); } -void TFakeCASetup::SaveSourceState(NDqProto::TCheckpoint checkpoint, NDqProto::TSourceState& state) { +void TFakeCASetup::SaveSourceState(NDqProto::TCheckpoint checkpoint, NDqProto::TSourceState& state) { Execute([&state, &checkpoint](TFakeActor& actor) { Y_ASSERT(actor.DqSourceActor); - actor.DqSourceActor->SaveState(checkpoint, state); + actor.DqSourceActor->SaveState(checkpoint, state); }); } -void TFakeCASetup::LoadSource(const NDqProto::TSourceState& state) { +void TFakeCASetup::LoadSource(const NDqProto::TSourceState& state) { Execute([&state](TFakeActor& actor) { Y_ASSERT(actor.DqSourceActor); actor.DqSourceActor->LoadState(state); }); } -void TFakeCASetup::LoadSink(const NDqProto::TSinkState& state) { +void TFakeCASetup::LoadSink(const NDqProto::TSinkState& state) { Execute([&state](TFakeActor& actor) { Y_ASSERT(actor.DqSinkActor); actor.DqSinkActor->LoadState(state); diff --git a/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.h b/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.h index 94d815a1eb8..290a73cbfbe 100644 --- a/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.h +++ b/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.h @@ -62,7 +62,7 @@ struct TSourcePromises { struct TSinkPromises { NThreading::TPromise<void> ResumeExecution = NThreading::NewPromise(); NThreading::TPromise<TIssues> Issue = NThreading::NewPromise<TIssues>(); - NThreading::TPromise<NDqProto::TSinkState> StateSaved = NThreading::NewPromise<NDqProto::TSinkState>(); + NThreading::TPromise<NDqProto::TSinkState> StateSaved = NThreading::NewPromise<NDqProto::TSinkState>(); }; NYql::NDqProto::TCheckpoint CreateCheckpoint(ui64 id = 0); @@ -99,10 +99,10 @@ class TFakeActor : public NActors::TActor<TFakeActor> { Parent.SinkPromises.Issue = NThreading::NewPromise<TIssues>(); }; - void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint&) override { + void OnSinkStateSaved(NDqProto::TSinkState&& state, ui64 outputIndex, const NDqProto::TCheckpoint&) override { Y_UNUSED(outputIndex); Parent.SinkPromises.StateSaved.SetValue(state); - Parent.SinkPromises.StateSaved = NThreading::NewPromise<NDqProto::TSinkState>(); + Parent.SinkPromises.StateSaved = NThreading::NewPromise<NDqProto::TSinkState>(); }; TFakeActor& Parent; @@ -206,10 +206,10 @@ struct TFakeCASetup { void SinkWrite(const TWriteValueProducer valueProducer, TMaybe<NDqProto::TCheckpoint> checkpoint = Nothing()); - void SaveSourceState(NDqProto::TCheckpoint checkpoint, NDqProto::TSourceState& state); + void SaveSourceState(NDqProto::TCheckpoint checkpoint, NDqProto::TSourceState& state); - void LoadSource(const NDqProto::TSourceState& state); - void LoadSink(const NDqProto::TSinkState& state); + void LoadSource(const NDqProto::TSourceState& state); + void LoadSink(const NDqProto::TSinkState& state); void Execute(TCallback callback); diff --git a/ydb/library/yql/providers/dq/actors/actor_helpers.h b/ydb/library/yql/providers/dq/actors/actor_helpers.h index b92603ab0fb..0595f1cce11 100644 --- a/ydb/library/yql/providers/dq/actors/actor_helpers.h +++ b/ydb/library/yql/providers/dq/actors/actor_helpers.h @@ -23,7 +23,7 @@ struct TRichActorFutureCallback : public TRichActor<TRichActorFutureCallback<Eve using TFailure = std::function<void(void)>; using TBase = TRichActor<TRichActorFutureCallback<EventType>>; - static constexpr char ActorName[] = "YQL_DQ_ACTOR_FUTURE_CALLBACK"; + static constexpr char ActorName[] = "YQL_DQ_ACTOR_FUTURE_CALLBACK"; TRichActorFutureCallback(TCallback&& callback, TFailure&& failure, TDuration timeout) : TBase(&TRichActorFutureCallback::StateWaitForEvent) diff --git a/ydb/library/yql/providers/dq/actors/executer_actor.cpp b/ydb/library/yql/providers/dq/actors/executer_actor.cpp index 91f1caee748..8654b03f1df 100644 --- a/ydb/library/yql/providers/dq/actors/executer_actor.cpp +++ b/ydb/library/yql/providers/dq/actors/executer_actor.cpp @@ -35,7 +35,7 @@ using namespace NYql; class TDqExecuter: public TRichActor<TDqExecuter>, NYql::TCounters { public: - static constexpr char ActorName[] = "YQL_DQ_EXECUTER"; + static constexpr char ActorName[] = "YQL_DQ_EXECUTER"; TDqExecuter( const NActors::TActorId& gwmActorId, @@ -135,11 +135,11 @@ private: ControlId = NActors::ActorIdFromProto(ev->Get()->Record.GetControlId()); ResultId = NActors::ActorIdFromProto(ev->Get()->Record.GetResultId()); CheckPointCoordinatorId = NActors::ActorIdFromProto(ev->Get()->Record.GetCheckPointCoordinatorId()); - // These actors will be killed at exit. - AddChild(ControlId); - AddChild(ResultId); - AddChild(CheckPointCoordinatorId); - + // These actors will be killed at exit. + AddChild(ControlId); + AddChild(ResultId); + AddChild(CheckPointCoordinatorId); + int workerCount = ev->Get()->Record.GetRequest().GetTask().size(); YQL_LOG(INFO) << (TStringBuilder() << "Trying to allocate " << workerCount << " workers"); @@ -173,7 +173,7 @@ private: SelfId(), ResultId)); - const bool enableComputeActor = Settings->EnableComputeActor.Get().GetOrElse(false); + const bool enableComputeActor = Settings->EnableComputeActor.Get().GetOrElse(false); const TString computeActorType = Settings->ComputeActorType.Get().GetOrElse("old"); auto resourceAllocator = RegisterChild(CreateResourceAllocator( @@ -184,7 +184,7 @@ private: computeActorType)); auto allocateRequest = MakeHolder<TEvAllocateWorkersRequest>(workerCount, Username); allocateRequest->Record.SetTraceId(TraceId); - allocateRequest->Record.SetCreateComputeActor(enableComputeActor); + allocateRequest->Record.SetCreateComputeActor(enableComputeActor); allocateRequest->Record.SetComputeActorType(computeActorType); if (enableComputeActor) { ActorIdToProto(ControlId, allocateRequest->Record.MutableResultActorId()); diff --git a/ydb/library/yql/providers/dq/actors/full_result_writer.cpp b/ydb/library/yql/providers/dq/actors/full_result_writer.cpp index 927ac060436..7ffbdbc2f13 100644 --- a/ydb/library/yql/providers/dq/actors/full_result_writer.cpp +++ b/ydb/library/yql/providers/dq/actors/full_result_writer.cpp @@ -24,7 +24,7 @@ using namespace NKikimr::NMiniKQL; class TFullResultWriterActor : public NActors::TActor<TFullResultWriterActor> { public: - static constexpr char ActorName[] = "YQL_DQ_FULL_RESULT_WRITER"; + static constexpr char ActorName[] = "YQL_DQ_FULL_RESULT_WRITER"; explicit TFullResultWriterActor(const TString& traceId, const TString& resultType, diff --git a/ydb/library/yql/providers/dq/actors/graph_execution_events_actor.cpp b/ydb/library/yql/providers/dq/actors/graph_execution_events_actor.cpp index 3ff4fb16588..c0860b248af 100644 --- a/ydb/library/yql/providers/dq/actors/graph_execution_events_actor.cpp +++ b/ydb/library/yql/providers/dq/actors/graph_execution_events_actor.cpp @@ -11,7 +11,7 @@ namespace NYql::NDqs { class TGraphExecutionEventsActor : public TRichActor<TGraphExecutionEventsActor> { public: - static constexpr const char ActorName[] = "YQL_DQ_GRAPH_EXECUTION_EVENTS_ACTOR"; + static constexpr const char ActorName[] = "YQL_DQ_GRAPH_EXECUTION_EVENTS_ACTOR"; TGraphExecutionEventsActor(const TString& traceID, std::vector<IDqTaskPreprocessor::TPtr>&& taskPreprocessors) : TRichActor<TGraphExecutionEventsActor>(&TGraphExecutionEventsActor::Handler) diff --git a/ydb/library/yql/providers/dq/actors/resource_allocator.cpp b/ydb/library/yql/providers/dq/actors/resource_allocator.cpp index 6acfb1277e0..4765a73c9ae 100644 --- a/ydb/library/yql/providers/dq/actors/resource_allocator.cpp +++ b/ydb/library/yql/providers/dq/actors/resource_allocator.cpp @@ -39,7 +39,7 @@ class TResourceAllocator: public TRichActor<TResourceAllocator> public: - static constexpr char ActorName[] = "YQL_DQ_RESOURCE_ALLOCATOR"; + static constexpr char ActorName[] = "YQL_DQ_RESOURCE_ALLOCATOR"; TResourceAllocator( TActorId gwmActor, diff --git a/ydb/library/yql/providers/dq/actors/result_aggregator.cpp b/ydb/library/yql/providers/dq/actors/result_aggregator.cpp index 59ae95c3af5..de35634ad12 100644 --- a/ydb/library/yql/providers/dq/actors/result_aggregator.cpp +++ b/ydb/library/yql/providers/dq/actors/result_aggregator.cpp @@ -58,7 +58,7 @@ class TResultAggregator: public TSynchronizableRichActor<TResultAggregator>, NYq static constexpr ui32 MAX_RESULT_BATCH = 2048; public: - static constexpr char ActorName[] = "YQL_DQ_RESULT_AGGREGATOR"; + static constexpr char ActorName[] = "YQL_DQ_RESULT_AGGREGATOR"; explicit TResultAggregator(const TVector<TString>& columns, const NActors::TActorId& executerId, const TString& traceId, const TDqConfiguration::TPtr& settings, const TString& resultType, NActors::TActorId graphExecutionEventsId, bool discard) @@ -489,7 +489,7 @@ private: class TResultPrinter: public TActor<TResultPrinter> { public: - static constexpr char ActorName[] = "YQL_DQ_RESULT_PRINTER"; + static constexpr char ActorName[] = "YQL_DQ_RESULT_PRINTER"; TResultPrinter(IOutputStream& output, NThreading::TPromise<void>& promise) : TActor<TResultPrinter>(&TResultPrinter::Handler) diff --git a/ydb/library/yql/providers/dq/actors/result_receiver.cpp b/ydb/library/yql/providers/dq/actors/result_receiver.cpp index 7fdb73f257b..8031253fa7b 100644 --- a/ydb/library/yql/providers/dq/actors/result_receiver.cpp +++ b/ydb/library/yql/providers/dq/actors/result_receiver.cpp @@ -36,7 +36,7 @@ namespace { class TResultReceiver: public TRichActor<TResultReceiver> { public: - static constexpr char ActorName[] = "YQL_DQ_RESULT_RECEIVER"; + static constexpr char ActorName[] = "YQL_DQ_RESULT_RECEIVER"; explicit TResultReceiver(const TVector<TString>& columns, const NActors::TActorId& executerId, const TString& traceId, const TDqConfiguration::TPtr& settings, const THashMap<TString, TString>& secureParams, const TString& resultType, bool discard) diff --git a/ydb/library/yql/providers/dq/actors/task_controller.cpp b/ydb/library/yql/providers/dq/actors/task_controller.cpp index 238828e453b..9f1e031d0f4 100644 --- a/ydb/library/yql/providers/dq/actors/task_controller.cpp +++ b/ydb/library/yql/providers/dq/actors/task_controller.cpp @@ -43,7 +43,7 @@ public: static constexpr ui64 PING_TIMER_TAG = 1; static constexpr ui64 AGGR_TIMER_TAG = 2; - static constexpr char ActorName[] = "YQL_DQ_TASK_CONTROLLER"; + static constexpr char ActorName[] = "YQL_DQ_TASK_CONTROLLER"; explicit TTaskController( const TString& traceId, @@ -260,9 +260,9 @@ private: auto& stats = s; // basic stats ADD_COUNTER(ComputeCpuTimeUs) - ADD_COUNTER(PendingInputTimeUs) - ADD_COUNTER(PendingOutputTimeUs) - ADD_COUNTER(FinishTimeUs) + ADD_COUNTER(PendingInputTimeUs) + ADD_COUNTER(PendingOutputTimeUs) + ADD_COUNTER(FinishTimeUs) // profile stats ADD_COUNTER(BuildCpuTimeUs) @@ -329,7 +329,7 @@ private: ADD_COUNTER(MaxMemoryUsage); ADD_COUNTER(SerializationTimeUs); - ADD_COUNTER(BlockedByCapacity); + ADD_COUNTER(BlockedByCapacity); ADD_COUNTER(SpilledBytes); ADD_COUNTER(SpilledRows); diff --git a/ydb/library/yql/providers/dq/actors/task_controller.h b/ydb/library/yql/providers/dq/actors/task_controller.h index d065ac17e70..0b18ea5c315 100644 --- a/ydb/library/yql/providers/dq/actors/task_controller.h +++ b/ydb/library/yql/providers/dq/actors/task_controller.h @@ -13,7 +13,7 @@ THolder<NActors::IActor> MakeTaskController( const NActors::TActorId& executerId, const NActors::TActorId& resultId, const TDqConfiguration::TPtr& settings, - const ::NYq::NCommon::TServiceCounters& serviceCounters, + const ::NYq::NCommon::TServiceCounters& serviceCounters, const TDuration& pingPeriod = TDuration::Zero(), const TDuration& aggrPeriod = TDuration::Seconds(1)); diff --git a/ydb/library/yql/providers/dq/actors/worker_actor.cpp b/ydb/library/yql/providers/dq/actors/worker_actor.cpp index f4503a60b2e..e214f24bfe0 100644 --- a/ydb/library/yql/providers/dq/actors/worker_actor.cpp +++ b/ydb/library/yql/providers/dq/actors/worker_actor.cpp @@ -70,7 +70,7 @@ class TDqWorker: public TRichActor<TDqWorker> static constexpr ui32 INPUT_SIZE = 100000; public: - static constexpr char ActorName[] = "YQL_DQ_WORKER"; + static constexpr char ActorName[] = "YQL_DQ_WORKER"; explicit TDqWorker( const ITaskRunnerActorFactory::TPtr& taskRunnerActorFactory, diff --git a/ydb/library/yql/providers/dq/common/yql_dq_settings.h b/ydb/library/yql/providers/dq/common/yql_dq_settings.h index 3c37e4d48fd..615006c0280 100644 --- a/ydb/library/yql/providers/dq/common/yql_dq_settings.h +++ b/ydb/library/yql/providers/dq/common/yql_dq_settings.h @@ -63,7 +63,7 @@ struct TDqSettings { NCommon::TConfSetting<bool, false> EnablePorto; // Will be renamed to _EnablePorto NCommon::TConfSetting<ui64, false> _PortoMemoryLimit; NCommon::TConfSetting<bool, false> EnableFullResultWrite; - NCommon::TConfSetting<bool, false> _OneGraphPerQuery; + NCommon::TConfSetting<bool, false> _OneGraphPerQuery; NCommon::TConfSetting<TString, false> _FallbackOnRuntimeErrors; NCommon::TConfSetting<TString, false> WorkerFilter; diff --git a/ydb/library/yql/providers/dq/interface/ya.make b/ydb/library/yql/providers/dq/interface/ya.make index 8d72c16c03a..85e9c3a753b 100644 --- a/ydb/library/yql/providers/dq/interface/ya.make +++ b/ydb/library/yql/providers/dq/interface/ya.make @@ -9,7 +9,7 @@ SRCS( ) PEERDIR( - contrib/libs/protobuf + contrib/libs/protobuf library/cpp/yson ydb/library/yql/ast ydb/library/yql/core diff --git a/ydb/library/yql/providers/dq/interface/yql_dq_integration.h b/ydb/library/yql/providers/dq/interface/yql_dq_integration.h index ee1938e698c..ef48c4e9829 100644 --- a/ydb/library/yql/providers/dq/interface/yql_dq_integration.h +++ b/ydb/library/yql/providers/dq/interface/yql_dq_integration.h @@ -9,8 +9,8 @@ #include <util/generic/vector.h> #include <util/generic/maybe.h> -#include <google/protobuf/any.pb.h> - +#include <google/protobuf/any.pb.h> + namespace NYql { struct TDqSettings; @@ -33,8 +33,8 @@ public: virtual TMaybe<bool> CanWrite(const TDqSettings& config, const TExprNode& write, TExprContext& ctx) = 0; virtual void RegisterMkqlCompiler(NCommon::TMkqlCallableCompilerBase& compiler) = 0; virtual bool CanFallback() = 0; - virtual void FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sourceType) = 0; - virtual void FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sinkType) = 0; + virtual void FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sourceType) = 0; + virtual void FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& settings, TString& sinkType) = 0; virtual void Annotate(const TExprNode& node, THashMap<TString, TString>& params) = 0; virtual bool PrepareFullResultTableParams(const TExprNode& root, TExprContext& ctx, THashMap<TString, TString>& params, THashMap<TString, TString>& secureParams) = 0; virtual void WriteFullResultTableRef(NYson::TYsonWriter& writer, const TVector<TString>& columns, const THashMap<TString, TString>& graphParams) = 0; diff --git a/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp b/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp index 1a8d3172e41..89f03010aed 100644 --- a/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp +++ b/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp @@ -25,21 +25,21 @@ using namespace NActors; using NDqs::MakeWorkerManagerActorID; namespace { - // TODO: Use the only driver for both sources. + // TODO: Use the only driver for both sources. NDq::IDqSourceActorFactory::TPtr CreateSourceActorFactory(const NYdb::TDriver& driver, IHTTPGateway::TPtr httpGateway) { auto factory = MakeIntrusive<NYql::NDq::TDqSourceFactory>(); - RegisterDqPqReadActorFactory(*factory, driver, nullptr); + RegisterDqPqReadActorFactory(*factory, driver, nullptr); RegisterYdbReadActorFactory(*factory, driver, nullptr); RegisterS3ReadActorFactory(*factory, nullptr, httpGateway); RegisterClickHouseReadActorFactory(*factory, nullptr, httpGateway); return factory; } - - NDq::IDqSinkActorFactory::TPtr CreateSinkActorFactory(const NYdb::TDriver& driver) { - auto factory = MakeIntrusive<NYql::NDq::TDqSinkFactory>(); - RegisterDqPqWriteActorFactory(*factory, driver, nullptr); - return factory; - } + + NDq::IDqSinkActorFactory::TPtr CreateSinkActorFactory(const NYdb::TDriver& driver) { + auto factory = MakeIntrusive<NYql::NDq::TDqSinkFactory>(); + RegisterDqPqWriteActorFactory(*factory, driver, nullptr); + return factory; + } } class TLocalServiceHolder { diff --git a/ydb/library/yql/providers/dq/mkql/ya.make b/ydb/library/yql/providers/dq/mkql/ya.make index 03ecd4352d4..26b6a209cdc 100644 --- a/ydb/library/yql/providers/dq/mkql/ya.make +++ b/ydb/library/yql/providers/dq/mkql/ya.make @@ -15,6 +15,6 @@ SRCS( dqs_mkql_compiler.cpp ) -YQL_LAST_ABI_VERSION() +YQL_LAST_ABI_VERSION() END() diff --git a/ydb/library/yql/providers/dq/opt/dqs_opt.cpp b/ydb/library/yql/providers/dq/opt/dqs_opt.cpp index b4bc07fbef1..79cbaed2c64 100644 --- a/ydb/library/yql/providers/dq/opt/dqs_opt.cpp +++ b/ydb/library/yql/providers/dq/opt/dqs_opt.cpp @@ -99,7 +99,7 @@ namespace NYql::NDqs { }); } - THolder<IGraphTransformer> CreateDqsRewritePhyCallablesTransformer() { + THolder<IGraphTransformer> CreateDqsRewritePhyCallablesTransformer() { return CreateFunctorTransformer([](const TExprNode::TPtr& input, TExprNode::TPtr& output, TExprContext& ctx) { TOptimizeExprSettings optSettings{nullptr}; optSettings.VisitLambdas = true; @@ -110,7 +110,7 @@ namespace NYql::NDqs { PERFORM_RULE(DqPeepholeRewriteJoinDict, node, ctx); PERFORM_RULE(DqPeepholeRewriteMapJoin, node, ctx); PERFORM_RULE(DqPeepholeRewritePureJoin, node, ctx); - PERFORM_RULE(DqPeepholeRewriteReplicate, node, ctx); + PERFORM_RULE(DqPeepholeRewriteReplicate, node, ctx); return inputExpr; }, ctx, optSettings); }); @@ -133,7 +133,7 @@ namespace NYql::NDqs { return TStatus::Ok; } - auto transformer = CreateDqsRewritePhyCallablesTransformer(); + auto transformer = CreateDqsRewritePhyCallablesTransformer(); auto status = InstantTransform(*transformer, inputExpr, ctx); if (status.Level != TStatus::Ok) { ctx.AddError(TIssue(ctx.GetPosition(inputExpr->Pos()), TString("Peephole optimization failed for Dq stage"))); diff --git a/ydb/library/yql/providers/dq/opt/dqs_opt.h b/ydb/library/yql/providers/dq/opt/dqs_opt.h index 6c9e1a7c18b..f18e0f9475b 100644 --- a/ydb/library/yql/providers/dq/opt/dqs_opt.h +++ b/ydb/library/yql/providers/dq/opt/dqs_opt.h @@ -10,7 +10,7 @@ namespace NYql::NDqs { THolder<IGraphTransformer> CreateDqsWrapListsOptTransformer(); THolder<IGraphTransformer> CreateDqsFinalizingOptTransformer(); THolder<IGraphTransformer> CreateDqsBuildTransformer(); - THolder<IGraphTransformer> CreateDqsRewritePhyCallablesTransformer(); + THolder<IGraphTransformer> CreateDqsRewritePhyCallablesTransformer(); THolder<IGraphTransformer> CreateDqsPeepholeTransformer(THolder<IGraphTransformer>&& typeAnnTransformer, TTypeAnnotationContext& typesCtx); } // namespace NYql::NDqs diff --git a/ydb/library/yql/providers/dq/opt/logical_optimize.cpp b/ydb/library/yql/providers/dq/opt/logical_optimize.cpp index 9e87f81e7cf..57579bf0a03 100644 --- a/ydb/library/yql/providers/dq/opt/logical_optimize.cpp +++ b/ydb/library/yql/providers/dq/opt/logical_optimize.cpp @@ -53,7 +53,7 @@ public: AddHandler(0, &TCoCalcOverWindow::Match, HNDL(ExpandWindowFunctions)); AddHandler(0, &TCoCalcOverWindowGroup::Match, HNDL(ExpandWindowFunctions)); AddHandler(0, &TCoFlatMapBase::Match, HNDL(FlatMapOverExtend)); - AddHandler(0, &TDqQuery::Match, HNDL(MergeQueriesWithSinks)); + AddHandler(0, &TDqQuery::Match, HNDL(MergeQueriesWithSinks)); AddHandler(0, &TDqStageBase::Match, HNDL(UnorderedInStage)); #undef HNDL } @@ -118,10 +118,10 @@ protected: return node; } - TMaybeNode<TExprBase> MergeQueriesWithSinks(TExprBase node, TExprContext& ctx) { - return DqMergeQueriesWithSinks(node, ctx); - } - + TMaybeNode<TExprBase> MergeQueriesWithSinks(TExprBase node, TExprContext& ctx) { + return DqMergeQueriesWithSinks(node, ctx); + } + TMaybeNode<TExprBase> UnorderedInStage(TExprBase node, TExprContext& ctx) const { return DqUnorderedInStage(node, TDqReadWrapBase::Match, ctx, Types); } diff --git a/ydb/library/yql/providers/dq/planner/execution_planner.cpp b/ydb/library/yql/providers/dq/planner/execution_planner.cpp index ec404c0a6dc..98c82f60e8f 100644 --- a/ydb/library/yql/providers/dq/planner/execution_planner.cpp +++ b/ydb/library/yql/providers/dq/planner/execution_planner.cpp @@ -26,8 +26,8 @@ #include <library/cpp/actors/core/event_pb.h> -#include <stack> - +#include <stack> + using namespace NYql; using namespace NYql::NCommon; using namespace NYql::NDq; @@ -40,10 +40,10 @@ using namespace Yql::DqsProto; namespace NYql::NDqs { namespace { - TVector<TDqPhyStage> GetStages(const TExprNode::TPtr& exprRoot) { + TVector<TDqPhyStage> GetStages(const TExprNode::TPtr& exprRoot) { TVector<TDqPhyStage> stages; VisitExpr( - exprRoot, + exprRoot, [](const TExprNode::TPtr& exprNode) { const auto& node = TExprBase(exprNode); return !node.Maybe<TCoLambda>(); @@ -69,16 +69,16 @@ namespace NYql::NDqs { return result; } - - static bool HasDqSource(const TDqPhyStage& stage) { - for (size_t inputIndex = 0; inputIndex < stage.Inputs().Size(); ++inputIndex) { - const auto& input = stage.Inputs().Item(inputIndex); - if (input.Maybe<TDqSource>()) { - return true; - } - } - return false; - } + + static bool HasDqSource(const TDqPhyStage& stage) { + for (size_t inputIndex = 0; inputIndex < stage.Inputs().Size(); ++inputIndex) { + const auto& input = stage.Inputs().Item(inputIndex); + if (input.Maybe<TDqSource>()) { + return true; + } + } + return false; + } } TDqsExecutionPlanner::TDqsExecutionPlanner(TIntrusivePtr<TTypeAnnotationContext> typeContext, @@ -131,8 +131,8 @@ namespace NYql::NDqs { ui32 TDqsExecutionPlanner::PlanExecution(const TDqSettings::TPtr& settings, bool canFallback) { TExprBase expr(DqExprRoot); - auto result = expr.Maybe<TDqCnResult>(); - auto query = expr.Maybe<TDqQuery>(); + auto result = expr.Maybe<TDqCnResult>(); + auto query = expr.Maybe<TDqQuery>(); const auto maxTasksPerOperation = settings->MaxTasksPerOperation.Get().GetOrElse(TDqSettings::TDefault::MaxTasksPerOperation); YQL_LOG(DEBUG) << "Execution Plan " << NCommon::ExprToPrettyString(ExprContext, *DqExprRoot); @@ -145,7 +145,7 @@ namespace NYql::NDqs { } for (const auto& stage : stages) { - const bool hasDqSource = HasDqSource(stage); + const bool hasDqSource = HasDqSource(stage); if ((hasDqSource || HasReadWraps(stage.Program().Ptr())) && BuildReadStage(settings, stage, hasDqSource, canFallback)) { YQL_LOG(DEBUG) << "Read stage " << NCommon::ExprToPrettyString(ExprContext, *stage.Ptr()); } else { @@ -153,36 +153,36 @@ namespace NYql::NDqs { NDq::CommonBuildTasks(TasksGraph, stage); } - // Sinks - if (auto maybeDqSinksList = stage.Sinks()) { - auto dqSinksList = maybeDqSinksList.Cast(); - for (const TDqSink& sink : dqSinksList) { - const ui64 index = FromString(sink.Index().Value()); - auto& stageInfo = TasksGraph.GetStageInfo(stage); - YQL_ENSURE(index < stageInfo.OutputsCount); - - auto dataSinkName = sink.Ptr()->Child(TDqSink::idx_DataSink)->Child(0)->Content(); - auto datasink = TypeContext->DataSinkMap.FindPtr(dataSinkName); - YQL_ENSURE(datasink); - auto dqIntegration = (*datasink)->GetDqIntegration(); - YQL_ENSURE(dqIntegration, "DqSink assumes that datasink has a dq integration impl"); - TString sinkType; - ::google::protobuf::Any sinkSettings; - dqIntegration->FillSinkSettings(sink.Ref(), sinkSettings, sinkType); - YQL_ENSURE(!sinkSettings.type_url().empty(), "Data sink provider \"" << dataSinkName << "\" did't fill dq sink settings for its dq sink node"); - YQL_ENSURE(sinkType, "Data sink provider \"" << dataSinkName << "\" did't fill dq sink settings type for its dq sink node"); - - for (ui64 taskId : stageInfo.Tasks) { - auto& task = TasksGraph.GetTask(taskId); - YQL_ENSURE(index < task.Outputs.size()); - auto& output = task.Outputs[index]; - output.SinkType = sinkType; - output.SinkSettings = sinkSettings; - output.Type = NDq::TTaskOutputType::Sink; - } - } - } - + // Sinks + if (auto maybeDqSinksList = stage.Sinks()) { + auto dqSinksList = maybeDqSinksList.Cast(); + for (const TDqSink& sink : dqSinksList) { + const ui64 index = FromString(sink.Index().Value()); + auto& stageInfo = TasksGraph.GetStageInfo(stage); + YQL_ENSURE(index < stageInfo.OutputsCount); + + auto dataSinkName = sink.Ptr()->Child(TDqSink::idx_DataSink)->Child(0)->Content(); + auto datasink = TypeContext->DataSinkMap.FindPtr(dataSinkName); + YQL_ENSURE(datasink); + auto dqIntegration = (*datasink)->GetDqIntegration(); + YQL_ENSURE(dqIntegration, "DqSink assumes that datasink has a dq integration impl"); + TString sinkType; + ::google::protobuf::Any sinkSettings; + dqIntegration->FillSinkSettings(sink.Ref(), sinkSettings, sinkType); + YQL_ENSURE(!sinkSettings.type_url().empty(), "Data sink provider \"" << dataSinkName << "\" did't fill dq sink settings for its dq sink node"); + YQL_ENSURE(sinkType, "Data sink provider \"" << dataSinkName << "\" did't fill dq sink settings type for its dq sink node"); + + for (ui64 taskId : stageInfo.Tasks) { + auto& task = TasksGraph.GetTask(taskId); + YQL_ENSURE(index < task.Outputs.size()); + auto& output = task.Outputs[index]; + output.SinkType = sinkType; + output.SinkSettings = sinkSettings; + output.Type = NDq::TTaskOutputType::Sink; + } + } + } + BuildConnections(stage); if (canFallback && TasksGraph.GetTasks().size() > maxTasksPerOperation) { @@ -195,110 +195,110 @@ namespace NYql::NDqs { YQL_ENSURE(!stageInfo.Tasks.empty()); } - if (result) { - auto& resultStageInfo = TasksGraph.GetStageInfo(result.Cast().Output().Stage().Cast<TDqPhyStage>()); - YQL_ENSURE(resultStageInfo.Tasks.size() == 1); - auto& resultTask = TasksGraph.GetTask(resultStageInfo.Tasks[0]); - YQL_ENSURE(resultTask.Outputs.size() == 1); - auto& output = resultTask.Outputs[0]; - output.Type = NDq::TTaskOutputType::Map; - auto& channel = TasksGraph.AddChannel(); - channel.SrcTask = resultTask.Id; - channel.SrcOutputIndex = 0; - channel.DstTask = 0; - channel.DstInputIndex = 0; - output.Channels.emplace_back(channel.Id); - SourceTaskID = resultTask.Id; - } - - BuildCheckpointingMode(); - + if (result) { + auto& resultStageInfo = TasksGraph.GetStageInfo(result.Cast().Output().Stage().Cast<TDqPhyStage>()); + YQL_ENSURE(resultStageInfo.Tasks.size() == 1); + auto& resultTask = TasksGraph.GetTask(resultStageInfo.Tasks[0]); + YQL_ENSURE(resultTask.Outputs.size() == 1); + auto& output = resultTask.Outputs[0]; + output.Type = NDq::TTaskOutputType::Map; + auto& channel = TasksGraph.AddChannel(); + channel.SrcTask = resultTask.Id; + channel.SrcOutputIndex = 0; + channel.DstTask = 0; + channel.DstInputIndex = 0; + output.Channels.emplace_back(channel.Id); + SourceTaskID = resultTask.Id; + } + + BuildCheckpointingMode(); + return TasksGraph.GetTasks().size(); } - bool TDqsExecutionPlanner::IsEgressTask(const TDqsTasksGraph::TTaskType& task) const { - for (const auto& output : task.Outputs) { - for (ui64 channelId : output.Channels) { - if (TasksGraph.GetChannel(channelId).DstTask) { - return false; - } - } - } - return true; - } - - static bool IsInfiniteSourceType(const TString& sourceType) { - return sourceType == "PqSource"; // Now it is the only infinite source type. Others are finite. - } - - void TDqsExecutionPlanner::BuildCheckpointingMode() { - std::stack<TDqsTasksGraph::TTaskType*> tasksStack; - std::vector<bool> processedTasks(TasksGraph.GetTasks().size()); - for (TDqsTasksGraph::TTaskType& task : TasksGraph.GetTasks()) { - if (IsEgressTask(task)) { - tasksStack.push(&task); - } - } - - while (!tasksStack.empty()) { - TDqsTasksGraph::TTaskType& task = *tasksStack.top(); - Y_VERIFY(task.Id && task.Id <= processedTasks.size()); - if (processedTasks[task.Id - 1]) { - tasksStack.pop(); - continue; - } - - // Make sure that all input tasks are processed - bool allInputsAreReady = true; - for (const auto& input : task.Inputs) { - for (ui64 channelId : input.Channels) { - const NDq::TChannel& channel = TasksGraph.GetChannel(channelId); - Y_VERIFY(channel.SrcTask && channel.SrcTask <= processedTasks.size()); - if (!processedTasks[channel.SrcTask - 1]) { - allInputsAreReady = false; - tasksStack.push(&TasksGraph.GetTask(channel.SrcTask)); - } - } - } - if (!allInputsAreReady) { - continue; - } - - // Current task has all inputs processed, so determine its checkpointing mode now. - NDqProto::ECheckpointingMode checkpointingMode = NDqProto::CHECKPOINTING_MODE_DISABLED; - for (const auto& input : task.Inputs) { - if (input.SourceType) { - if (IsInfiniteSourceType(input.SourceType)) { - checkpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; - break; - } - } else { - for (ui64 channelId : input.Channels) { - const NDq::TChannel& channel = TasksGraph.GetChannel(channelId); - if (channel.CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED) { - checkpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; - break; - } - } - if (checkpointingMode == NDqProto::CHECKPOINTING_MODE_DEFAULT) { - break; - } - } - } - - // Apply mode to task and its outputs. - task.CheckpointingMode = checkpointingMode; - for (const auto& output : task.Outputs) { - for (ui64 channelId : output.Channels) { - TasksGraph.GetChannel(channelId).CheckpointingMode = checkpointingMode; - } - } - - processedTasks[task.Id - 1] = true; - tasksStack.pop(); - } - } - + bool TDqsExecutionPlanner::IsEgressTask(const TDqsTasksGraph::TTaskType& task) const { + for (const auto& output : task.Outputs) { + for (ui64 channelId : output.Channels) { + if (TasksGraph.GetChannel(channelId).DstTask) { + return false; + } + } + } + return true; + } + + static bool IsInfiniteSourceType(const TString& sourceType) { + return sourceType == "PqSource"; // Now it is the only infinite source type. Others are finite. + } + + void TDqsExecutionPlanner::BuildCheckpointingMode() { + std::stack<TDqsTasksGraph::TTaskType*> tasksStack; + std::vector<bool> processedTasks(TasksGraph.GetTasks().size()); + for (TDqsTasksGraph::TTaskType& task : TasksGraph.GetTasks()) { + if (IsEgressTask(task)) { + tasksStack.push(&task); + } + } + + while (!tasksStack.empty()) { + TDqsTasksGraph::TTaskType& task = *tasksStack.top(); + Y_VERIFY(task.Id && task.Id <= processedTasks.size()); + if (processedTasks[task.Id - 1]) { + tasksStack.pop(); + continue; + } + + // Make sure that all input tasks are processed + bool allInputsAreReady = true; + for (const auto& input : task.Inputs) { + for (ui64 channelId : input.Channels) { + const NDq::TChannel& channel = TasksGraph.GetChannel(channelId); + Y_VERIFY(channel.SrcTask && channel.SrcTask <= processedTasks.size()); + if (!processedTasks[channel.SrcTask - 1]) { + allInputsAreReady = false; + tasksStack.push(&TasksGraph.GetTask(channel.SrcTask)); + } + } + } + if (!allInputsAreReady) { + continue; + } + + // Current task has all inputs processed, so determine its checkpointing mode now. + NDqProto::ECheckpointingMode checkpointingMode = NDqProto::CHECKPOINTING_MODE_DISABLED; + for (const auto& input : task.Inputs) { + if (input.SourceType) { + if (IsInfiniteSourceType(input.SourceType)) { + checkpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; + break; + } + } else { + for (ui64 channelId : input.Channels) { + const NDq::TChannel& channel = TasksGraph.GetChannel(channelId); + if (channel.CheckpointingMode != NDqProto::CHECKPOINTING_MODE_DISABLED) { + checkpointingMode = NDqProto::CHECKPOINTING_MODE_DEFAULT; + break; + } + } + if (checkpointingMode == NDqProto::CHECKPOINTING_MODE_DEFAULT) { + break; + } + } + } + + // Apply mode to task and its outputs. + task.CheckpointingMode = checkpointingMode; + for (const auto& output : task.Outputs) { + for (ui64 channelId : output.Channels) { + TasksGraph.GetChannel(channelId).CheckpointingMode = checkpointingMode; + } + } + + processedTasks[task.Id - 1] = true; + tasksStack.pop(); + } + } + // TODO: Split Build and Get stages TVector<TDqTask>& TDqsExecutionPlanner::GetTasks() { if (Tasks.empty()) { @@ -321,7 +321,7 @@ namespace NYql::NDqs { tasks[i].ComputeActorId = workers[i]; } - THashMap<TStageId, std::tuple<TString, ui64>> stagePrograms = BuildAllPrograms(); + THashMap<TStageId, std::tuple<TString, ui64>> stagePrograms = BuildAllPrograms(); TVector<TDqTask> plan; THashSet<TString> clusterNameHints; for (const auto& task : tasks) { @@ -346,11 +346,11 @@ namespace NYql::NDqs { for (auto& input : task.Inputs) { auto& inputDesc = *taskDesc.AddInputs(); - if (input.SourceSettings) { - auto* sourceProto = inputDesc.MutableSource(); - *sourceProto->MutableSettings() = *input.SourceSettings; - sourceProto->SetType(input.SourceType); - } else { + if (input.SourceSettings) { + auto* sourceProto = inputDesc.MutableSource(); + *sourceProto->MutableSettings() = *input.SourceSettings; + sourceProto->SetType(input.SourceType); + } else { FillInputDesc(inputDesc, input); } } @@ -380,55 +380,55 @@ namespace NYql::NDqs { } NActors::TActorId TDqsExecutionPlanner::GetSourceID() const { - if (SourceID) { - return *SourceID; - } else { - return {}; - } + if (SourceID) { + return *SourceID; + } else { + return {}; + } } TString TDqsExecutionPlanner::GetResultType(bool withTagged) const { - if (SourceTaskID) { - auto& stage = TasksGraph.GetStageInfo(TasksGraph.GetTask(SourceTaskID).StageId).Meta.Stage; - auto result = stage.Ref().GetTypeAnn(); - YQL_ENSURE(result->GetKind() == ETypeAnnotationKind::Tuple); - YQL_ENSURE(result->Cast<TTupleExprType>()->GetItems().size() == 1); - auto& item = result->Cast<TTupleExprType>()->GetItems()[0]; - YQL_ENSURE(item->GetKind() == ETypeAnnotationKind::List); - auto exprType = item->Cast<TListExprType>()->GetItemType(); - - TScopedAlloc alloc; - TTypeEnvironment typeEnv(alloc); - - TProgramBuilder pgmBuilder(typeEnv, *FunctionRegistry); - TStringStream errorStream; + if (SourceTaskID) { + auto& stage = TasksGraph.GetStageInfo(TasksGraph.GetTask(SourceTaskID).StageId).Meta.Stage; + auto result = stage.Ref().GetTypeAnn(); + YQL_ENSURE(result->GetKind() == ETypeAnnotationKind::Tuple); + YQL_ENSURE(result->Cast<TTupleExprType>()->GetItems().size() == 1); + auto& item = result->Cast<TTupleExprType>()->GetItems()[0]; + YQL_ENSURE(item->GetKind() == ETypeAnnotationKind::List); + auto exprType = item->Cast<TListExprType>()->GetItemType(); + + TScopedAlloc alloc; + TTypeEnvironment typeEnv(alloc); + + TProgramBuilder pgmBuilder(typeEnv, *FunctionRegistry); + TStringStream errorStream; auto type = NCommon::BuildType(*exprType, pgmBuilder, errorStream, withTagged); - return SerializeNode(type, typeEnv); - } - return {}; + return SerializeNode(type, typeEnv); + } + return {}; } bool TDqsExecutionPlanner::BuildReadStage(const TDqSettings::TPtr& settings, const TDqPhyStage& stage, bool dqSource, bool canFallback) { auto& stageInfo = TasksGraph.GetStageInfo(stage); for (ui32 i = 0; i < stageInfo.InputsCount; i++) { - const auto& input = stage.Inputs().Item(i); - YQL_ENSURE(input.Maybe<TDqCnBroadcast>() || (dqSource && input.Maybe<TDqSource>())); + const auto& input = stage.Inputs().Item(i); + YQL_ENSURE(input.Maybe<TDqCnBroadcast>() || (dqSource && input.Maybe<TDqSource>())); } const TExprNode* read = nullptr; - ui32 dqSourceInputIndex = std::numeric_limits<ui32>::max(); - if (dqSource) { - for (ui32 i = 0; i < stageInfo.InputsCount; ++i) { - const auto& input = stage.Inputs().Item(i); - if (const auto& maybeDqSource = input.Maybe<TDqSource>()) { - read = maybeDqSource.Cast().Ptr().Get(); - dqSourceInputIndex = i; - break; - } - } - YQL_ENSURE(dqSourceInputIndex < stageInfo.InputsCount); - } else { + ui32 dqSourceInputIndex = std::numeric_limits<ui32>::max(); + if (dqSource) { + for (ui32 i = 0; i < stageInfo.InputsCount; ++i) { + const auto& input = stage.Inputs().Item(i); + if (const auto& maybeDqSource = input.Maybe<TDqSource>()) { + read = maybeDqSource.Cast().Ptr().Get(); + dqSourceInputIndex = i; + break; + } + } + YQL_ENSURE(dqSourceInputIndex < stageInfo.InputsCount); + } else { if (const auto& wrap = FindNode(stage.Program().Ptr(), [](const TExprNode::TPtr& exprNode) { if (const auto wrap = TMaybeNode<TDqReadWrapBase>(exprNode)) { if (const auto flags = wrap.Cast().Flags()) @@ -437,20 +437,20 @@ namespace NYql::NDqs { return false; return true; - } + } return false; })) { read = wrap->Child(TDqReadWrapBase::idx_Input); } else { return false; } - } + } - const ui32 dataSourceChildIndex = dqSource ? 0 : 1; + const ui32 dataSourceChildIndex = dqSource ? 0 : 1; YQL_ENSURE(read->ChildrenSize() > 1); - YQL_ENSURE(read->Child(dataSourceChildIndex)->IsCallable("DataSource")); + YQL_ENSURE(read->Child(dataSourceChildIndex)->IsCallable("DataSource")); - auto dataSourceName = read->Child(dataSourceChildIndex)->Child(0)->Content(); + auto dataSourceName = read->Child(dataSourceChildIndex)->Child(0)->Content(); auto datasource = TypeContext->DataSourceMap.FindPtr(dataSourceName); YQL_ENSURE(datasource); const auto stageSettings = TDqStageSettings::Parse(stage); @@ -463,22 +463,22 @@ namespace NYql::NDqs { if (auto dqIntegration = (*datasource)->GetDqIntegration()) { TString clusterName; _MaxDataSizePerJob = Max(_MaxDataSizePerJob, dqIntegration->Partition(*settings, maxPartitions, *read, parts, &clusterName, ExprContext, canFallback)); - TMaybe<::google::protobuf::Any> sourceSettings; - TString sourceType; - if (dqSource) { - sourceSettings.ConstructInPlace(); - dqIntegration->FillSourceSettings(*read, *sourceSettings, sourceType); - YQL_ENSURE(!sourceSettings->type_url().empty(), "Data source provider \"" << dataSourceName << "\" did't fill dq source settings for its dq source node"); - YQL_ENSURE(sourceType, "Data source provider \"" << dataSourceName << "\" did't fill dq source settings type for its dq source node"); - } + TMaybe<::google::protobuf::Any> sourceSettings; + TString sourceType; + if (dqSource) { + sourceSettings.ConstructInPlace(); + dqIntegration->FillSourceSettings(*read, *sourceSettings, sourceType); + YQL_ENSURE(!sourceSettings->type_url().empty(), "Data source provider \"" << dataSourceName << "\" did't fill dq source settings for its dq source node"); + YQL_ENSURE(sourceType, "Data source provider \"" << dataSourceName << "\" did't fill dq source settings type for its dq source node"); + } for (const auto& p : parts) { auto& task = TasksGraph.AddTask(stageInfo); task.Meta.TaskParams[dataSourceName] = p; task.Meta.ClusterNameHint = clusterName; - if (dqSource) { - task.Inputs[dqSourceInputIndex].SourceSettings = sourceSettings; - task.Inputs[dqSourceInputIndex].SourceType = sourceType; - } + if (dqSource) { + task.Inputs[dqSourceInputIndex].SourceSettings = sourceSettings; + task.Inputs[dqSourceInputIndex].SourceType = sourceType; + } auto& transform = task.OutputTransform; transform.Type = stageSettings.TransformType; transform.FunctionName = stageSettings.TransformName; @@ -498,16 +498,16 @@ namespace NYql::NDqs { for (ui32 inputIndex = 0; inputIndex < stage.Inputs().Size(); ++inputIndex) { const auto& input = stage.Inputs().Item(inputIndex); - if (input.Maybe<TDqConnection>()) { - BUILD_CONNECTION(TDqCnUnionAll, BuildUnionAllChannels); + if (input.Maybe<TDqConnection>()) { + BUILD_CONNECTION(TDqCnUnionAll, BuildUnionAllChannels); BUILD_CONNECTION(TDqCnHashShuffle, BuildHashShuffleChannels); - BUILD_CONNECTION(TDqCnBroadcast, BuildBroadcastChannels); - BUILD_CONNECTION(TDqCnMap, BuildMapChannels); + BUILD_CONNECTION(TDqCnBroadcast, BuildBroadcastChannels); + BUILD_CONNECTION(TDqCnMap, BuildMapChannels); BUILD_CONNECTION(TDqCnMerge, BuildMergeChannels); - YQL_ENSURE(false, "Unknown stage connection type: " << input.Cast<NNodes::TCallable>().CallableName()); - } else { - YQL_ENSURE(input.Maybe<TDqSource>()); - } + YQL_ENSURE(false, "Unknown stage connection type: " << input.Cast<NNodes::TCallable>().CallableName()); + } else { + YQL_ENSURE(input.Maybe<TDqSource>()); + } } } @@ -564,7 +564,7 @@ namespace NYql::NDqs { channelDesc.SetId(channel.Id); channelDesc.SetSrcTaskId(channel.SrcTask); channelDesc.SetDstTaskId(channel.DstTask); - channelDesc.SetCheckpointingMode(channel.CheckpointingMode); + channelDesc.SetCheckpointingMode(channel.CheckpointingMode); if (channel.SrcTask) { NActors::ActorIdToProto(TasksGraph.GetTask(channel.SrcTask).ComputeActorId, @@ -633,18 +633,18 @@ namespace NYql::NDqs { break; } - case TTaskOutputType::Sink: { - YQL_ENSURE(output.Channels.empty()); - YQL_ENSURE(output.SinkType); - YQL_ENSURE(output.SinkSettings); - auto* sinkProto = outputDesc.MutableSink(); - sinkProto->SetType(output.SinkType); - *sinkProto->MutableSettings() = *output.SinkSettings; - break; - } - + case TTaskOutputType::Sink: { + YQL_ENSURE(output.Channels.empty()); + YQL_ENSURE(output.SinkType); + YQL_ENSURE(output.SinkSettings); + auto* sinkProto = outputDesc.MutableSink(); + sinkProto->SetType(output.SinkType); + *sinkProto->MutableSettings() = *output.SinkSettings; + break; + } + case TTaskOutputType::Undefined: { - YQL_ENSURE(false, "Unexpected task output type `TTaskOutputType::Undefined`"); + YQL_ENSURE(false, "Unexpected task output type `TTaskOutputType::Undefined`"); } } @@ -712,11 +712,11 @@ namespace NYql::NDqs { NActors::TActorId TDqsSingleExecutionPlanner::GetSourceID() const { - if (SourceID) { - return *SourceID; - } else { - return {}; - } + if (SourceID) { + return *SourceID; + } else { + return {}; + } } TString TDqsSingleExecutionPlanner::GetResultType(bool withTagged) const @@ -754,10 +754,10 @@ namespace NYql::NDqs { TVector<TDqTask> TGraphExecutionPlanner::GetTasks(const TVector<NActors::TActorId>& workers) { - if (ResultType) { - YQL_ENSURE(SourceId < workers.size()); - SourceID = workers[SourceId]; - } + if (ResultType) { + YQL_ENSURE(SourceId < workers.size()); + SourceID = workers[SourceId]; + } auto setActorId = [&](NYql::NDqProto::TEndpoint* endpoint) { if (endpoint->GetEndpointTypeCase() == NYql::NDqProto::TEndpoint::kActorId) { @@ -794,11 +794,11 @@ namespace NYql::NDqs { NActors::TActorId TGraphExecutionPlanner::GetSourceID() const { - if (SourceID) { - return *SourceID; - } else { - return {}; - } + if (SourceID) { + return *SourceID; + } else { + return {}; + } } TString TGraphExecutionPlanner::GetResultType(bool) const diff --git a/ydb/library/yql/providers/dq/planner/execution_planner.h b/ydb/library/yql/providers/dq/planner/execution_planner.h index 280600c8f7b..63b465384e7 100644 --- a/ydb/library/yql/providers/dq/planner/execution_planner.h +++ b/ydb/library/yql/providers/dq/planner/execution_planner.h @@ -58,8 +58,8 @@ namespace NYql::NDqs { void FillOutputDesc(NDqProto::TTaskOutput& outputDesc, const TTaskOutput& output); void GatherPhyMapping(THashMap<std::tuple<TString, TString>, TString>& clusters, THashMap<std::tuple<TString, TString, TString>, TString>& tables); - void BuildCheckpointingMode(); - bool IsEgressTask(const TDqsTasksGraph::TTaskType& task) const; + void BuildCheckpointingMode(); + bool IsEgressTask(const TDqsTasksGraph::TTaskType& task) const; private: TIntrusivePtr<TTypeAnnotationContext> TypeContext; @@ -124,7 +124,7 @@ namespace NYql::NDqs { private: TVector<NDqProto::TDqTask> Tasks; - ui64 SourceId = 0; + ui64 SourceId = 0; TString ResultType; NActors::TActorId ExecuterID; diff --git a/ydb/library/yql/providers/dq/provider/exec/yql_dq_exectransformer.cpp b/ydb/library/yql/providers/dq/provider/exec/yql_dq_exectransformer.cpp index 8759f442219..93b36c189a7 100644 --- a/ydb/library/yql/providers/dq/provider/exec/yql_dq_exectransformer.cpp +++ b/ydb/library/yql/providers/dq/provider/exec/yql_dq_exectransformer.cpp @@ -212,7 +212,7 @@ private: void AfterTypeAnnotation(TTransformationPipeline* pipeline) const final { pipeline->Add(NDq::CreateDqBuildPhyStagesTransformer(false), "Build-Phy"); - pipeline->Add(NDqs::CreateDqsRewritePhyCallablesTransformer(), "Rewrite-Phy-Callables"); + pipeline->Add(NDqs::CreateDqsRewritePhyCallablesTransformer(), "Rewrite-Phy-Callables"); } void AfterOptimize(TTransformationPipeline*) const final {} @@ -229,7 +229,7 @@ public: AddHandler({TStringBuf("Result")}, RequireNone(), Hndl(&TInMemoryExecTransformer::HandleResult)); AddHandler({TStringBuf("Pull")}, RequireNone(), Hndl(&TInMemoryExecTransformer::HandlePull)); AddHandler({TDqCnResult::CallableName()}, RequireNone(), Pass()); - AddHandler({TDqQuery::CallableName()}, RequireFirst(), Pass()); + AddHandler({TDqQuery::CallableName()}, RequireFirst(), Pass()); } private: @@ -838,19 +838,19 @@ private: TInstant startTime = TInstant::Now(); auto pull = TPull(input); - YQL_ENSURE(!TMaybeNode<TDqQuery>(pull.Input().Ptr()) || State->Settings->EnableComputeActor.Get().GetOrElse(false), - "DqQuery is not supported with worker actor"); - + YQL_ENSURE(!TMaybeNode<TDqQuery>(pull.Input().Ptr()) || State->Settings->EnableComputeActor.Get().GetOrElse(false), + "DqQuery is not supported with worker actor"); + TString type; TVector<TString> columns; - GetResultType(&type, &columns, pull.Ref(), pull.Input().Ref()); + GetResultType(&type, &columns, pull.Ref(), pull.Input().Ref()); - const bool oneGraphPerQuery = State->Settings->_OneGraphPerQuery.Get().GetOrElse(false); - size_t graphsCount = 0; + const bool oneGraphPerQuery = State->Settings->_OneGraphPerQuery.Get().GetOrElse(false); + size_t graphsCount = 0; THashMap<ui32, ui32> allPublicIds; THashMap<ui64, ui32> stage2publicId; bool hasStageError = false; - VisitExpr(pull.Ptr(), [&](const TExprNode::TPtr& node) { + VisitExpr(pull.Ptr(), [&](const TExprNode::TPtr& node) { if (TResTransientBase::Match(node.Get())) return false; if (const TExprBase expr(node); expr.Maybe<TDqConnection>()) { @@ -867,26 +867,26 @@ private: allPublicIds.emplace(*publicId, 0U); } } - } else if (oneGraphPerQuery) { - if (expr.Maybe<TDqCnResult>() || expr.Maybe<TDqQuery>()) { - ++graphsCount; - } + } else if (oneGraphPerQuery) { + if (expr.Maybe<TDqCnResult>() || expr.Maybe<TDqQuery>()) { + ++graphsCount; + } } return true; }); - YQL_ENSURE(!oneGraphPerQuery || graphsCount == 1, "Internal error: only one graph per query is allowed"); + YQL_ENSURE(!oneGraphPerQuery || graphsCount == 1, "Internal error: only one graph per query is allowed"); if (hasStageError) { return SyncError(); } - auto optimizedInput = pull.Input().Ptr(); + auto optimizedInput = pull.Input().Ptr(); THashMap<TString, TString> secureParams; NCommon::FillSecureParams(optimizedInput, *State->TypeCtx, secureParams); optimizedInput = ctx.ShallowCopy(*optimizedInput); - optimizedInput->SetTypeAnn(pull.Input().Ref().GetTypeAnn()); - optimizedInput->CopyConstraints(pull.Input().Ref()); + optimizedInput->SetTypeAnn(pull.Input().Ref().GetTypeAnn()); + optimizedInput->CopyConstraints(pull.Input().Ref()); TDqsPipelineConfigurator peepholeConfig; TPeepholeSettings peepholeSettings; @@ -918,7 +918,7 @@ private: State->TypeCtx, ctx, State->FunctionRegistry, optimizedInput); - // exprRoot must be DqCnResult or DqQuery + // exprRoot must be DqCnResult or DqQuery executionPlanner->SetPublicIds(stage2publicId); @@ -1006,7 +1006,7 @@ private: return FallbackWithMessage(pull.Ref(), "Too big attachment", ctx); } - IDataProvider::TFillSettings fillSettings = NCommon::GetFillSettings(pull.Ref()); + IDataProvider::TFillSettings fillSettings = NCommon::GetFillSettings(pull.Ref()); settings = settings->WithFillSettings(fillSettings); if (const auto optLLVM = State->TypeCtx->OptLLVM) { diff --git a/ydb/library/yql/providers/dq/provider/yql_dq_datasink.cpp b/ydb/library/yql/providers/dq/provider/yql_dq_datasink.cpp index a8b5e598a08..1902650bb9f 100644 --- a/ydb/library/yql/providers/dq/provider/yql_dq_datasink.cpp +++ b/ydb/library/yql/providers/dq/provider/yql_dq_datasink.cpp @@ -223,14 +223,14 @@ public: return true; } - if (TDqQuery::Match(&node)) { - auto stagesList = node.ChildPtr(TDqQuery::idx_SinkStages); - for (size_t i = 0; i < stagesList->ChildrenSize(); ++i) { - children.push_back(stagesList->ChildPtr(i)); - } - return true; - } - + if (TDqQuery::Match(&node)) { + auto stagesList = node.ChildPtr(TDqQuery::idx_SinkStages); + for (size_t i = 0; i < stagesList->ChildrenSize(); ++i) { + children.push_back(stagesList->ChildPtr(i)); + } + return true; + } + return false; } diff --git a/ydb/library/yql/providers/dq/provider/yql_dq_datasink_type_ann.cpp b/ydb/library/yql/providers/dq/provider/yql_dq_datasink_type_ann.cpp index c8d9786a7a0..ca910c8edec 100644 --- a/ydb/library/yql/providers/dq/provider/yql_dq_datasink_type_ann.cpp +++ b/ydb/library/yql/providers/dq/provider/yql_dq_datasink_type_ann.cpp @@ -31,9 +31,9 @@ public: AddHandler({TDqPhyMapJoin::CallableName()}, Hndl(&NDq::AnnotateDqMapOrDictJoin)); AddHandler({TDqPhyCrossJoin::CallableName()}, Hndl(&NDq::AnnotateDqCrossJoin)); AddHandler({TDqPhyJoinDict::CallableName()}, Hndl(&NDq::AnnotateDqMapOrDictJoin)); - AddHandler({TDqSink::CallableName()}, Hndl(&NDq::AnnotateDqSink)); + AddHandler({TDqSink::CallableName()}, Hndl(&NDq::AnnotateDqSink)); AddHandler({TDqWrite::CallableName()}, Hndl(&TDqsDataSinkTypeAnnotationTransformer::AnnotateDqWrite)); - AddHandler({TDqQuery::CallableName()}, Hndl(&NDq::AnnotateDqQuery)); + AddHandler({TDqQuery::CallableName()}, Hndl(&NDq::AnnotateDqQuery)); } private: diff --git a/ydb/library/yql/providers/dq/provider/yql_dq_datasource.cpp b/ydb/library/yql/providers/dq/provider/yql_dq_datasource.cpp index aea55435de9..25e5387a6fb 100644 --- a/ydb/library/yql/providers/dq/provider/yql_dq_datasource.cpp +++ b/ydb/library/yql/providers/dq/provider/yql_dq_datasource.cpp @@ -162,7 +162,7 @@ public: } bool CanExecute(const TExprNode& node) override { - return TDqCnResult::Match(&node) || TDqQuery::Match(&node); + return TDqCnResult::Match(&node) || TDqQuery::Match(&node); } bool CanParse(const TExprNode& node) override { diff --git a/ydb/library/yql/providers/dq/provider/yql_dq_datasource_type_ann.cpp b/ydb/library/yql/providers/dq/provider/yql_dq_datasource_type_ann.cpp index 6e03017ccb1..388280a09d4 100644 --- a/ydb/library/yql/providers/dq/provider/yql_dq_datasource_type_ann.cpp +++ b/ydb/library/yql/providers/dq/provider/yql_dq_datasource_type_ann.cpp @@ -23,7 +23,7 @@ public: AddHandler({TDqReadWrap::CallableName()}, Hndl(&TDqsDataSourceTypeAnnotationTransformer::HandleReadWrap)); AddHandler({TDqReadWideWrap::CallableName()}, Hndl(&TDqsDataSourceTypeAnnotationTransformer::HandleWideReadWrap)); AddHandler({TCoConfigure::CallableName()}, Hndl(&TDqsDataSourceTypeAnnotationTransformer::HandleConfig)); - AddHandler({TDqSource::CallableName()}, Hndl(&NDq::AnnotateDqSource)); + AddHandler({TDqSource::CallableName()}, Hndl(&NDq::AnnotateDqSource)); } private: diff --git a/ydb/library/yql/providers/dq/runtime/task_command_executor.cpp b/ydb/library/yql/providers/dq/runtime/task_command_executor.cpp index 9b41920b177..794f0a3e068 100644 --- a/ydb/library/yql/providers/dq/runtime/task_command_executor.cpp +++ b/ydb/library/yql/providers/dq/runtime/task_command_executor.cpp @@ -542,13 +542,13 @@ public: //s->SetFinishTs(stats->FinishTs.MilliSeconds()); s->SetBuildCpuTimeUs(stats->BuildCpuTime.MicroSeconds()); s->SetComputeCpuTimeUs(stats->ComputeCpuTime.MicroSeconds()); - - // All run statuses metrics - s->SetPendingInputTimeUs(stats->RunStatusTimeMetrics[NDq::ERunStatus::PendingInput].MicroSeconds()); - s->SetPendingOutputTimeUs(stats->RunStatusTimeMetrics[NDq::ERunStatus::PendingOutput].MicroSeconds()); - s->SetFinishTimeUs(stats->RunStatusTimeMetrics[NDq::ERunStatus::Finished].MicroSeconds()); - static_assert(NDq::TRunStatusTimeMetrics::StatusesCount == 3); // Add all statuses here - + + // All run statuses metrics + s->SetPendingInputTimeUs(stats->RunStatusTimeMetrics[NDq::ERunStatus::PendingInput].MicroSeconds()); + s->SetPendingOutputTimeUs(stats->RunStatusTimeMetrics[NDq::ERunStatus::PendingOutput].MicroSeconds()); + s->SetFinishTimeUs(stats->RunStatusTimeMetrics[NDq::ERunStatus::Finished].MicroSeconds()); + static_assert(NDq::TRunStatusTimeMetrics::StatusesCount == 3); // Add all statuses here + //s->SetTotalTime(stats->TotalTime.MilliSeconds()); s->SetWaitTimeUs(stats->WaitTime.MicroSeconds()); s->SetWaitOutputTimeUs(stats->WaitOutputTime.MicroSeconds()); diff --git a/ydb/library/yql/providers/dq/task_runner/tasks_runner_local.cpp b/ydb/library/yql/providers/dq/task_runner/tasks_runner_local.cpp index ab8769f44ef..244abc86c52 100644 --- a/ydb/library/yql/providers/dq/task_runner/tasks_runner_local.cpp +++ b/ydb/library/yql/providers/dq/task_runner/tasks_runner_local.cpp @@ -249,8 +249,8 @@ public: ITaskRunner::TPtr GetOld(const NDqProto::TDqTask& task, const TString& traceId) override { return new TLocalTaskRunner(task, Get(task, traceId)); - } - + } + TIntrusivePtr<NDq::IDqTaskRunner> Get(const NDqProto::TDqTask& task, const TString& traceId) override { Y_UNUSED(traceId); NDq::TDqTaskRunnerSettings settings; @@ -266,15 +266,15 @@ public: if ("OptLLVM" == s.GetName()) settings.OptLLVM = s.GetValue(); } - for (const auto& x : taskMeta.GetSecureParams()) { + for (const auto& x : taskMeta.GetSecureParams()) { settings.SecureParams[x.first] = x.second; } - for (const auto& x : taskMeta.GetTaskParams()) { - settings.TaskParams[x.first] = x.second; + for (const auto& x : taskMeta.GetTaskParams()) { + settings.TaskParams[x.first] = x.second; } auto ctx = ExecutionContext; - ctx.FuncProvider = TaskTransformFactory(settings.TaskParams, ctx.FuncRegistry); + ctx.FuncProvider = TaskTransformFactory(settings.TaskParams, ctx.FuncRegistry); return MakeDqTaskRunner(ctx, settings, { }); } diff --git a/ydb/library/yql/providers/dq/task_runner/tasks_runner_pipe.cpp b/ydb/library/yql/providers/dq/task_runner/tasks_runner_pipe.cpp index cdcbd32ef5d..ba7e3749422 100644 --- a/ydb/library/yql/providers/dq/task_runner/tasks_runner_pipe.cpp +++ b/ydb/library/yql/providers/dq/task_runner/tasks_runner_pipe.cpp @@ -102,9 +102,9 @@ void FromProto(TDqTaskRunnerStats* s, const T& f) //s->FinishTs = TInstant::MilliSeconds(f.GetFinishTs()); s->BuildCpuTime = TDuration::MicroSeconds(f.GetBuildCpuTimeUs()); s->ComputeCpuTime = TDuration::MicroSeconds(f.GetComputeCpuTimeUs()); - s->RunStatusTimeMetrics.Load(ERunStatus::PendingInput, TDuration::MicroSeconds(f.GetPendingInputTimeUs())); - s->RunStatusTimeMetrics.Load(ERunStatus::PendingOutput, TDuration::MicroSeconds(f.GetPendingOutputTimeUs())); - s->RunStatusTimeMetrics.Load(ERunStatus::Finished, TDuration::MicroSeconds(f.GetFinishTimeUs())); + s->RunStatusTimeMetrics.Load(ERunStatus::PendingInput, TDuration::MicroSeconds(f.GetPendingInputTimeUs())); + s->RunStatusTimeMetrics.Load(ERunStatus::PendingOutput, TDuration::MicroSeconds(f.GetPendingOutputTimeUs())); + s->RunStatusTimeMetrics.Load(ERunStatus::Finished, TDuration::MicroSeconds(f.GetFinishTimeUs())); //s->TotalTime = TDuration::MilliSeconds(f.GetTotalTime()); s->WaitTime = TDuration::MicroSeconds(f.GetWaitTimeUs()); s->WaitOutputTime = TDuration::MicroSeconds(f.GetWaitOutputTimeUs()); @@ -669,18 +669,18 @@ public: ythrow yexception() << "unimplemented"; } - void Pause() override { - Y_FAIL("Checkpoints are not supported"); - } - - void Resume() override { - Y_FAIL("Checkpoints are not supported"); - } - - bool IsPaused() const override { - return false; - } - + void Pause() override { + Y_FAIL("Checkpoints are not supported"); + } + + void Resume() override { + Y_FAIL("Checkpoints are not supported"); + } + + bool IsPaused() const override { + return false; + } + private: IInputChannel::TPtr Delegate; ui64 TaskId; @@ -846,18 +846,18 @@ public: return InputType; } - void Pause() override { - Y_FAIL("Checkpoints are not supported"); - } - - void Resume() override { - Y_FAIL("Checkpoints are not supported"); - } - - bool IsPaused() const override { - return false; - } - + void Pause() override { + Y_FAIL("Checkpoints are not supported"); + } + + void Resume() override { + Y_FAIL("Checkpoints are not supported"); + } + + bool IsPaused() const override { + return false; + } + private: ui64 TaskId; ui64 InputIndex; @@ -950,7 +950,7 @@ public: ythrow yexception() << "unimplemented"; }; - // can throw TDqChannelStorageException + // can throw TDqChannelStorageException void Push(NUdf::TUnboxedValue&& value) override { Y_UNUSED(value); ythrow yexception() << "unimplemented"; @@ -988,7 +988,7 @@ public: TaskRunner->RaiseException(); } } - // can throw TDqChannelStorageException + // can throw TDqChannelStorageException [[nodiscard]] bool Pop(NDqProto::TData& data, ui64 bytes) override { try { @@ -1007,7 +1007,7 @@ public: // TODO: remove this method and create independent Data- and Stream-query implementations. // Stream-query implementation should be without PopAll method. // Data-query implementation should be one-shot for Pop (a-la PopAll) call and without ChannelStorage. - // can throw TDqChannelStorageException + // can throw TDqChannelStorageException [[nodiscard]] bool PopAll(NDqProto::TData& data) override { Y_UNUSED(data); @@ -1282,7 +1282,7 @@ public: { Alloc.Release(); StderrReader->Start(); - InitTaskMeta(); + InitTaskMeta(); } ~TTaskRunner() { @@ -1372,16 +1372,16 @@ public: const NMiniKQL::THolderFactory& GetHolderFactory() const override { return HolderFactory; - } - + } + const THashMap<TString, TString>& GetSecureParams() const override { - return SecureParams; - } - + return SecureParams; + } + const THashMap<TString, TString>& GetTaskParams() const override { - return TaskParams; - } - + return TaskParams; + } + TGuard<NKikimr::NMiniKQL::TScopedAlloc> BindAllocator(TMaybe<ui64> memoryLimit) override { auto guard = TypeEnv.BindAllocator(); if (memoryLimit) { @@ -1440,24 +1440,24 @@ public: } private: - void InitTaskMeta() { - Yql::DqsProto::TTaskMeta taskMeta; - Task.GetMeta().UnpackTo(&taskMeta); - - for (const auto& x : taskMeta.GetSecureParams()) { - SecureParams[x.first] = x.second; - } - - for (const auto& x : taskMeta.GetTaskParams()) { - TaskParams[x.first] = x.second; - } - } - -private: + void InitTaskMeta() { + Yql::DqsProto::TTaskMeta taskMeta; + Task.GetMeta().UnpackTo(&taskMeta); + + for (const auto& x : taskMeta.GetSecureParams()) { + SecureParams[x.first] = x.second; + } + + for (const auto& x : taskMeta.GetTaskParams()) { + TaskParams[x.first] = x.second; + } + } + +private: const TString TraceId; NDqProto::TDqTask Task; - THashMap<TString, TString> SecureParams; - THashMap<TString, TString> TaskParams; + THashMap<TString, TString> SecureParams; + THashMap<TString, TString> TaskParams; NKikimr::NMiniKQL::TScopedAlloc Alloc; NKikimr::NMiniKQL::TTypeEnvironment TypeEnv; @@ -1535,7 +1535,7 @@ public: return channel; } - IDqSource::TPtr GetSource(ui64 inputIndex) override { + IDqSource::TPtr GetSource(ui64 inputIndex) override { auto& source = Sources[inputIndex]; if (!source) { source = new TDqSource( @@ -1545,8 +1545,8 @@ public: Stats.Sources[inputIndex] = source->GetStats(); } return source; - } - + } + IDqOutputChannel::TPtr GetOutputChannel(ui64 channelId) override { auto& channel = OutputChannels[channelId]; @@ -1561,7 +1561,7 @@ public: return channel; } - IDqSink::TPtr GetSink(ui64 outputIndex) override { + IDqSink::TPtr GetSink(ui64 outputIndex) override { auto& sink = Sinks[outputIndex]; if (!sink) { sink = new TDqSink( @@ -1571,24 +1571,24 @@ public: // Stats.Sinks[outputIndex] = sink->GetStats(); } return sink; - } - + } + const NKikimr::NMiniKQL::TTypeEnvironment& GetTypeEnv() const override { return Delegate->GetTypeEnv(); } - const NKikimr::NMiniKQL::THolderFactory& GetHolderFactory() const override { - return Delegate->GetHolderFactory(); - } - - const THashMap<TString, TString>& GetSecureParams() const override { - return Delegate->GetSecureParams(); - } - - const THashMap<TString, TString>& GetTaskParams() const override { - return Delegate->GetTaskParams(); - } - + const NKikimr::NMiniKQL::THolderFactory& GetHolderFactory() const override { + return Delegate->GetHolderFactory(); + } + + const THashMap<TString, TString>& GetSecureParams() const override { + return Delegate->GetSecureParams(); + } + + const THashMap<TString, TString>& GetTaskParams() const override { + return Delegate->GetTaskParams(); + } + TGuard<NKikimr::NMiniKQL::TScopedAlloc> BindAllocator(TMaybe<ui64> memoryLimit) override { return Delegate->BindAllocator(memoryLimit); } @@ -1597,9 +1597,9 @@ public: return Delegate->IsAllocatorAttached(); } - void UpdateStats() override { - } - + void UpdateStats() override { + } + const TDqTaskRunnerStats* GetStats() const override { try { diff --git a/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.cpp b/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.cpp index 5ecf0a4a852..d7b0a822399 100644 --- a/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.cpp +++ b/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.cpp @@ -40,7 +40,7 @@ static_assert(sizeof(TDqLocalResourceId) == 8); class TLocalWorkerManager: public TWorkerManagerCommon<TLocalWorkerManager> { public: - static constexpr char ActorName[] = "YQL_DQ_LWM"; + static constexpr char ActorName[] = "YQL_DQ_LWM"; TLocalWorkerManager(const TLocalWorkerManagerOptions& options) : TWorkerManagerCommon<TLocalWorkerManager>(&TLocalWorkerManager::Handler) diff --git a/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.h b/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.h index 144217305fb..8debd4f2470 100644 --- a/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.h +++ b/ydb/library/yql/providers/dq/worker_manager/local_worker_manager.h @@ -2,7 +2,7 @@ #include <ydb/library/yql/dq/actors/compute/dq_compute_actor_sources.h> #include <ydb/library/yql/dq/actors/compute/dq_compute_actor_sinks.h> - + #include <ydb/library/yql/providers/dq/worker_manager/interface/events.h> #include <ydb/library/yql/providers/dq/worker_manager/interface/counters.h> diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp index e26956545e9..8ffdcafabd6 100644 --- a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp +++ b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp @@ -16,7 +16,7 @@ #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> #include <ydb/public/sdk/cpp/client/ydb_types/credentials/credentials.h> - + #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/event_local.h> #include <library/cpp/actors/core/events.h> @@ -97,96 +97,96 @@ public: const THolderFactory& holderFactory, NPq::NProto::TDqPqTopicSource&& sourceParams, NPq::NProto::TDqReadTaskParams&& readParams, - NYdb::TDriver driver, + NYdb::TDriver driver, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory, ICallbacks* callbacks, i64 bufferSize) : TActor<TDqPqReadActor>(&TDqPqReadActor::StateFunc) , InputIndex(inputIndex) , TxId(txId) - , BufferSize(bufferSize) + , BufferSize(bufferSize) , HolderFactory(holderFactory) , Driver(std::move(driver)) , CredentialsProviderFactory(std::move(credentialsProviderFactory)) , SourceParams(std::move(sourceParams)) , ReadParams(std::move(readParams)) - , StartingMessageTimestamp(TInstant::Now()) + , StartingMessageTimestamp(TInstant::Now()) , Callbacks(callbacks) { Y_UNUSED(HolderFactory); - } - - NYdb::NPersQueue::TPersQueueClientSettings GetPersQueueClientSettings() const { + } + + NYdb::NPersQueue::TPersQueueClientSettings GetPersQueueClientSettings() const { NYdb::NPersQueue::TPersQueueClientSettings opts; opts.Database(SourceParams.GetDatabase()) .DiscoveryEndpoint(SourceParams.GetEndpoint()) .EnableSsl(SourceParams.GetUseSsl()) .CredentialsProviderFactory(CredentialsProviderFactory); - return opts; + return opts; } static constexpr char ActorName[] = "DQ_PQ_READ_ACTOR"; public: - void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TSourceState& state) override { - NPq::NProto::TDqPqTopicSourceState stateProto; - - NPq::NProto::TDqPqTopicSourceState::TTopicDescription* topic = stateProto.AddTopics(); - topic->SetDatabaseId(SourceParams.GetDatabaseId()); - topic->SetEndpoint(SourceParams.GetEndpoint()); - topic->SetDatabase(SourceParams.GetDatabase()); - topic->SetTopicPath(SourceParams.GetTopicPath()); - + void SaveState(const NDqProto::TCheckpoint& checkpoint, NDqProto::TSourceState& state) override { + NPq::NProto::TDqPqTopicSourceState stateProto; + + NPq::NProto::TDqPqTopicSourceState::TTopicDescription* topic = stateProto.AddTopics(); + topic->SetDatabaseId(SourceParams.GetDatabaseId()); + topic->SetEndpoint(SourceParams.GetEndpoint()); + topic->SetDatabase(SourceParams.GetDatabase()); + topic->SetTopicPath(SourceParams.GetTopicPath()); + for (const auto& [clusterAndPartition, offset] : PartitionToOffset) { const auto& [cluster, partition] = clusterAndPartition; - NPq::NProto::TDqPqTopicSourceState::TPartitionReadState* partitionState = stateProto.AddPartitions(); - partitionState->SetTopicIndex(0); // Now we are supporting only one topic per source. - partitionState->SetCluster(cluster); - partitionState->SetPartition(partition); - partitionState->SetOffset(offset); + NPq::NProto::TDqPqTopicSourceState::TPartitionReadState* partitionState = stateProto.AddPartitions(); + partitionState->SetTopicIndex(0); // Now we are supporting only one topic per source. + partitionState->SetCluster(cluster); + partitionState->SetPartition(partition); + partitionState->SetOffset(offset); } - stateProto.SetStartingMessageTimestampMs(StartingMessageTimestamp.MilliSeconds()); - - TString stateBlob; - YQL_ENSURE(stateProto.SerializeToString(&stateBlob)); - - auto* data = state.AddData()->MutableStateData(); - data->SetVersion(StateVersion); - data->SetBlob(stateBlob); - + stateProto.SetStartingMessageTimestampMs(StartingMessageTimestamp.MilliSeconds()); + + TString stateBlob; + YQL_ENSURE(stateProto.SerializeToString(&stateBlob)); + + auto* data = state.AddData()->MutableStateData(); + data->SetVersion(StateVersion); + data->SetBlob(stateBlob); + DeferredCommits.emplace(checkpoint.GetId(), std::move(CurrentDeferredCommit)); CurrentDeferredCommit = NYdb::NPersQueue::TDeferredCommit(); } - void LoadState(const NDqProto::TSourceState& state) override { - TInstant minStartingMessageTs = state.DataSize() ? TInstant::Max() : StartingMessageTimestamp; - for (const auto& stateData : state.GetData()) { - const auto& data = stateData.GetStateData(); - if (data.GetVersion() == StateVersion) { // Current version - NPq::NProto::TDqPqTopicSourceState stateProto; - YQL_ENSURE(stateProto.ParseFromString(data.GetBlob()), "Serialized state is corrupted"); - YQL_ENSURE(stateProto.TopicsSize() == 1, "One topic per source is expected"); - PartitionToOffset.reserve(PartitionToOffset.size() + stateProto.PartitionsSize()); - for (const NPq::NProto::TDqPqTopicSourceState::TPartitionReadState& partitionProto : stateProto.GetPartitions()) { - ui64& offset = PartitionToOffset[TPartitionKey{partitionProto.GetCluster(), partitionProto.GetPartition()}]; - if (offset) { - offset = Min(offset, partitionProto.GetOffset()); - } else { - offset = partitionProto.GetOffset(); - } - } - minStartingMessageTs = Min(minStartingMessageTs, TInstant::MilliSeconds(stateProto.GetStartingMessageTimestampMs())); - } else { - ythrow yexception() << "Invalid state version " << data.GetVersion(); - } - } - StartingMessageTimestamp = minStartingMessageTs; - if (ReadSession) { - ReadSession.reset(); - GetReadSession(); - } + void LoadState(const NDqProto::TSourceState& state) override { + TInstant minStartingMessageTs = state.DataSize() ? TInstant::Max() : StartingMessageTimestamp; + for (const auto& stateData : state.GetData()) { + const auto& data = stateData.GetStateData(); + if (data.GetVersion() == StateVersion) { // Current version + NPq::NProto::TDqPqTopicSourceState stateProto; + YQL_ENSURE(stateProto.ParseFromString(data.GetBlob()), "Serialized state is corrupted"); + YQL_ENSURE(stateProto.TopicsSize() == 1, "One topic per source is expected"); + PartitionToOffset.reserve(PartitionToOffset.size() + stateProto.PartitionsSize()); + for (const NPq::NProto::TDqPqTopicSourceState::TPartitionReadState& partitionProto : stateProto.GetPartitions()) { + ui64& offset = PartitionToOffset[TPartitionKey{partitionProto.GetCluster(), partitionProto.GetPartition()}]; + if (offset) { + offset = Min(offset, partitionProto.GetOffset()); + } else { + offset = partitionProto.GetOffset(); + } + } + minStartingMessageTs = Min(minStartingMessageTs, TInstant::MilliSeconds(stateProto.GetStartingMessageTimestampMs())); + } else { + ythrow yexception() << "Invalid state version " << data.GetVersion(); + } + } + StartingMessageTimestamp = minStartingMessageTs; + if (ReadSession) { + ReadSession.reset(); + GetReadSession(); + } } void CommitState(const NDqProto::TCheckpoint& checkpoint) override { @@ -201,43 +201,43 @@ public: return InputIndex; }; - NYdb::NPersQueue::TPersQueueClient& GetPersQueueClient() { - if (!PersQueueClient) { - PersQueueClient = std::make_unique<NYdb::NPersQueue::TPersQueueClient>(Driver, GetPersQueueClientSettings()); - } - return *PersQueueClient; - } - - NYdb::NPersQueue::IReadSession& GetReadSession() { - if (!ReadSession) { - ReadSession = GetPersQueueClient().CreateReadSession(GetReadSessionSettings()); - } - return *ReadSession; - } - + NYdb::NPersQueue::TPersQueueClient& GetPersQueueClient() { + if (!PersQueueClient) { + PersQueueClient = std::make_unique<NYdb::NPersQueue::TPersQueueClient>(Driver, GetPersQueueClientSettings()); + } + return *PersQueueClient; + } + + NYdb::NPersQueue::IReadSession& GetReadSession() { + if (!ReadSession) { + ReadSession = GetPersQueueClient().CreateReadSession(GetReadSessionSettings()); + } + return *ReadSession; + } + private: STRICT_STFUNC(StateFunc, hFunc(TEvPrivate::TEvSourceDataReady, Handle); ) void Handle(TEvPrivate::TEvSourceDataReady::TPtr& ev) { - SubscribedOnEvent = false; + SubscribedOnEvent = false; Y_UNUSED(ev); Callbacks->OnNewSourceDataArrived(InputIndex); } - // IActor & IDqSourceActor - void PassAway() override { // Is called from Compute Actor - if (ReadSession) { - ReadSession->Close(TDuration::Zero()); - ReadSession.reset(); - } + // IActor & IDqSourceActor + void PassAway() override { // Is called from Compute Actor + if (ReadSession) { + ReadSession->Close(TDuration::Zero()); + ReadSession.reset(); + } PersQueueClient.reset(); - TActor<TDqPqReadActor>::PassAway(); + TActor<TDqPqReadActor>::PassAway(); } i64 GetSourceData(NKikimr::NMiniKQL::TUnboxedValueVector& buffer, bool&, i64 freeSpace) override { - auto events = GetReadSession().GetEvents(false, TMaybe<size_t>(), static_cast<size_t>(Max<i64>(freeSpace, 0))); + auto events = GetReadSession().GetEvents(false, TMaybe<size_t>(), static_cast<size_t>(Max<i64>(freeSpace, 0))); ui32 batchSize = 0; for (auto& event : events) { @@ -259,7 +259,7 @@ private: } private: - NYdb::NPersQueue::TReadSessionSettings GetReadSessionSettings() const { + NYdb::NPersQueue::TReadSessionSettings GetReadSessionSettings() const { NYdb::NPersQueue::TTopicReadSettings topicReadSettings; topicReadSettings.Path(SourceParams.GetTopicPath()); ui64 currentPartition = ReadParams.GetPartitioningParams().GetEachTopicPartitionGroupId(); @@ -272,8 +272,8 @@ private: .DisableClusterDiscovery(SourceParams.GetClusterType() == NPq::NProto::DataStreams) .AppendTopics(topicReadSettings) .ConsumerName(SourceParams.GetConsumerName()) - .MaxMemoryUsageBytes(BufferSize) - .StartingMessageTimestamp(StartingMessageTimestamp); + .MaxMemoryUsageBytes(BufferSize) + .StartingMessageTimestamp(StartingMessageTimestamp); } void UpdateStateWithNewReadData(const NYdb::NPersQueue::TReadSessionEvent::TDataReceivedEvent& event) { @@ -293,13 +293,13 @@ private: } void SubscribeOnNextEvent() { - if (!SubscribedOnEvent) { - SubscribedOnEvent = true; - NActors::TActorSystem* actorSystem = NActors::TActivationContext::ActorSystem(); - EventFuture = GetReadSession().WaitEvent().Subscribe([actorSystem, selfId = SelfId()](const auto&){ - actorSystem->Send(selfId, new TEvPrivate::TEvSourceDataReady()); - }); - } + if (!SubscribedOnEvent) { + SubscribedOnEvent = true; + NActors::TActorSystem* actorSystem = NActors::TActivationContext::ActorSystem(); + EventFuture = GetReadSession().WaitEvent().Subscribe([actorSystem, selfId = SelfId()](const auto&){ + actorSystem->Send(selfId, new TEvPrivate::TEvSourceDataReady()); + }); + } } struct TPQEventProcessor { @@ -349,9 +349,9 @@ private: private: const ui64 InputIndex; const TString TxId; - const i64 BufferSize; + const i64 BufferSize; const THolderFactory& HolderFactory; - NYdb::TDriver Driver; + NYdb::TDriver Driver; std::shared_ptr<NYdb::ICredentialsProviderFactory> CredentialsProviderFactory; const NPq::NProto::TDqPqTopicSource SourceParams; const NPq::NProto::TDqReadTaskParams ReadParams; @@ -359,20 +359,20 @@ private: std::shared_ptr<NYdb::NPersQueue::IReadSession> ReadSession; NThreading::TFuture<void> EventFuture; THashMap<TPartitionKey, ui64> PartitionToOffset; // {cluster, partition} -> offset of next event. - TInstant StartingMessageTimestamp; + TInstant StartingMessageTimestamp; ICallbacks* const Callbacks; std::queue<std::pair<ui64, NYdb::NPersQueue::TDeferredCommit>> DeferredCommits; NYdb::NPersQueue::TDeferredCommit CurrentDeferredCommit; - bool SubscribedOnEvent = false; + bool SubscribedOnEvent = false; }; -std::pair<IDqSourceActor*, NActors::IActor*> CreateDqPqReadActor( - NPq::NProto::TDqPqTopicSource&& settings, +std::pair<IDqSourceActor*, NActors::IActor*> CreateDqPqReadActor( + NPq::NProto::TDqPqTopicSource&& settings, ui64 inputIndex, TTxId txId, const THashMap<TString, TString>& secureParams, const THashMap<TString, TString>& taskParams, - NYdb::TDriver driver, + NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, IDqSourceActor::ICallbacks* callbacks, const NKikimr::NMiniKQL::THolderFactory& holderFactory, @@ -382,7 +382,7 @@ std::pair<IDqSourceActor*, NActors::IActor*> CreateDqPqReadActor( auto taskParamsIt = taskParams.find("pq"); YQL_ENSURE(taskParamsIt != taskParams.end(), "Failed to get pq task params"); - NPq::NProto::TDqReadTaskParams readTaskParamsMsg; + NPq::NProto::TDqReadTaskParams readTaskParamsMsg; YQL_ENSURE(readTaskParamsMsg.ParseFromString(taskParamsIt->second), "Failed to parse DqPqRead task params"); const TString& tokenName = settings.GetToken().GetName(); @@ -404,8 +404,8 @@ std::pair<IDqSourceActor*, NActors::IActor*> CreateDqPqReadActor( return {actor, actor}; } -void RegisterDqPqReadActorFactory(TDqSourceFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory) { - factory.Register<NPq::NProto::TDqPqTopicSource>("PqSource", +void RegisterDqPqReadActorFactory(TDqSourceFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory) { + factory.Register<NPq::NProto::TDqPqTopicSource>("PqSource", [driver = std::move(driver), credentialsFactory = std::move(credentialsFactory)]( NPq::NProto::TDqPqTopicSource&& settings, IDqSourceActorFactory::TArguments&& args) diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.h b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.h index d7e2c82dd04..8520a79cc06 100644 --- a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.h +++ b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.h @@ -22,19 +22,19 @@ class TDqSourceFactory; const i64 PQReadDefaultFreeSpace = 16_MB; -std::pair<IDqSourceActor*, NActors::IActor*> CreateDqPqReadActor( - NPq::NProto::TDqPqTopicSource&& settings, +std::pair<IDqSourceActor*, NActors::IActor*> CreateDqPqReadActor( + NPq::NProto::TDqPqTopicSource&& settings, ui64 inputIndex, TTxId txId, const THashMap<TString, TString>& secureParams, const THashMap<TString, TString>& taskParams, - NYdb::TDriver driver, + NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, - IDqSourceActor::ICallbacks* callback, + IDqSourceActor::ICallbacks* callback, const NKikimr::NMiniKQL::THolderFactory& holderFactory, i64 bufferSize = PQReadDefaultFreeSpace ); -void RegisterDqPqReadActorFactory(TDqSourceFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory); +void RegisterDqPqReadActorFactory(TDqSourceFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory); } // namespace NYql::NDq diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp b/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp index 85fbc91c01d..b6eb1b73379 100644 --- a/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp +++ b/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.cpp @@ -15,7 +15,7 @@ #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> #include <ydb/public/sdk/cpp/client/ydb_types/credentials/credentials.h> - + #include <library/cpp/actors/core/actor.h> #include <library/cpp/actors/core/event_local.h> #include <library/cpp/actors/core/events.h> @@ -25,7 +25,7 @@ #include <util/generic/algorithm.h> #include <util/generic/hash.h> -#include <util/string/builder.h> +#include <util/string/builder.h> #include <algorithm> #include <queue> @@ -94,9 +94,9 @@ public: ui64 outputIndex, const TString& txId, NPq::NProto::TDqPqTopicSink&& sinkParams, - NYdb::TDriver driver, + NYdb::TDriver driver, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory, - IDqSinkActor::ICallbacks* callbacks, + IDqSinkActor::ICallbacks* callbacks, i64 freeSpace) : TActor<TDqPqWriteActor>(&TDqPqWriteActor::StateFunc) , OutputIndex(outputIndex) @@ -106,37 +106,37 @@ public: , CredentialsProviderFactory(credentialsProviderFactory) , Callbacks(callbacks) , FreeSpace(freeSpace) - , PersQueueClient(Driver, GetPersQueueClientSettings()) + , PersQueueClient(Driver, GetPersQueueClientSettings()) { } static constexpr char ActorName[] = "DQ_PQ_WRITE_ACTOR"; public: - void SendData( + void SendData( NKikimr::NMiniKQL::TUnboxedValueVector&& batch, - i64 dataSize, - const TMaybe<NDqProto::TCheckpoint>& checkpoint, - bool finished) override + i64 dataSize, + const TMaybe<NDqProto::TCheckpoint>& checkpoint, + bool finished) override { - Y_UNUSED(finished); + Y_UNUSED(finished); Y_UNUSED(dataSize); CreateSessionIfNotExists(); - for (const NUdf::TUnboxedValue& item : batch) { + for (const NUdf::TUnboxedValue& item : batch) { if (!item.IsBoxed()) { Fail("Struct with single field was expected"); return; } - const NUdf::TUnboxedValue dataCol = item.GetElement(0); + const NUdf::TUnboxedValue dataCol = item.GetElement(0); if (!dataCol.IsString() && !dataCol.IsEmbedded()) { Fail(TStringBuilder() << "Non string value could not be written to YDS stream"); return; } - TString data(dataCol.AsStringRef()); + TString data(dataCol.AsStringRef()); LWPROBE(PqWriteDataToSend, TxId, SinkParams.GetTopicPath(), data); SINK_LOG_T("Received data for sending: " << data); @@ -154,7 +154,7 @@ public: if (checkpoint) { if (Buffer.empty()) { - Callbacks->OnSinkStateSaved(BuildState(), OutputIndex, *checkpoint); + Callbacks->OnSinkStateSaved(BuildState(), OutputIndex, *checkpoint); } else { DeferredCheckpoints.emplace(NextSeqNo + Buffer.size() - 1, *checkpoint); } @@ -172,18 +172,18 @@ public: } }; - void LoadState(const NDqProto::TSinkState& state) override { - Y_VERIFY(NextSeqNo == 1); - const auto& data = state.GetData().GetStateData(); - if (data.GetVersion() == StateVersion) { // Current version - NPq::NProto::TDqPqTopicSinkState stateProto; - YQL_ENSURE(stateProto.ParseFromString(data.GetBlob()), "Serialized state is corrupted"); - SourceId = stateProto.GetSourceId(); - ConfirmedSeqNo = stateProto.GetConfirmedSeqNo(); + void LoadState(const NDqProto::TSinkState& state) override { + Y_VERIFY(NextSeqNo == 1); + const auto& data = state.GetData().GetStateData(); + if (data.GetVersion() == StateVersion) { // Current version + NPq::NProto::TDqPqTopicSinkState stateProto; + YQL_ENSURE(stateProto.ParseFromString(data.GetBlob()), "Serialized state is corrupted"); + SourceId = stateProto.GetSourceId(); + ConfirmedSeqNo = stateProto.GetConfirmedSeqNo(); NextSeqNo = ConfirmedSeqNo + 1; - return; + return; } - ythrow yexception() << "Invalid state version " << data.GetVersion(); + ythrow yexception() << "Invalid state version " << data.GetVersion(); } void CommitState(const NDqProto::TCheckpoint& checkpoint) override { @@ -208,12 +208,12 @@ private: SubscribeOnNextEvent(); } - // IActor & IDqSinkActor - void PassAway() override { // Is called from Compute Actor + // IActor & IDqSinkActor + void PassAway() override { // Is called from Compute Actor if (WriteSession) { WriteSession->Close(TDuration::Zero()); } - TActor<TDqPqWriteActor>::PassAway(); + TActor<TDqPqWriteActor>::PassAway(); } private: @@ -270,8 +270,8 @@ private: auto events = WriteSession->GetEvents(); for (auto& event : events) { - auto issues = std::visit(TPQEventProcessor{*this}, event); - if (issues) { + auto issues = std::visit(TPQEventProcessor{*this}, event); + if (issues) { WriteSession->Close(TDuration::Zero()); WriteSession.reset(); Callbacks->OnSinkError(OutputIndex, *issues, true); @@ -286,18 +286,18 @@ private: return !events.empty(); } - NDqProto::TSinkState BuildState() { - NPq::NProto::TDqPqTopicSinkState stateProto; - stateProto.SetSourceId(GetSourceId()); - stateProto.SetConfirmedSeqNo(ConfirmedSeqNo); - TString serializedState; - YQL_ENSURE(stateProto.SerializeToString(&serializedState)); - - NDqProto::TSinkState sinkState; - auto* data = sinkState.MutableData()->MutableStateData(); - data->SetVersion(StateVersion); - data->SetBlob(serializedState); - return sinkState; + NDqProto::TSinkState BuildState() { + NPq::NProto::TDqPqTopicSinkState stateProto; + stateProto.SetSourceId(GetSourceId()); + stateProto.SetConfirmedSeqNo(ConfirmedSeqNo); + TString serializedState; + YQL_ENSURE(stateProto.SerializeToString(&serializedState)); + + NDqProto::TSinkState sinkState; + auto* data = sinkState.MutableData()->MutableStateData(); + data->SetVersion(StateVersion); + data->SetBlob(serializedState); + return sinkState; } void WriteNextMessage(NYdb::NPersQueue::TContinuationToken&& token) { @@ -314,13 +314,13 @@ private: } struct TPQEventProcessor { - std::optional<TIssues> operator()(NYdb::NPersQueue::TSessionClosedEvent& ev) { - TIssues issues; + std::optional<TIssues> operator()(NYdb::NPersQueue::TSessionClosedEvent& ev) { + TIssues issues; issues.AddIssue(TStringBuilder() << "Write session to topic \"" << Self.SinkParams.GetTopicPath() << "\" was closed: " << ev.DebugString()); - return issues; + return issues; } - std::optional<TIssues> operator()(NYdb::NPersQueue::TWriteSessionEvent::TAcksEvent& ev) { + std::optional<TIssues> operator()(NYdb::NPersQueue::TWriteSessionEvent::TAcksEvent& ev) { if (ev.Acks.empty()) { return std::nullopt; } @@ -331,9 +331,9 @@ private: //Y_VERIFY(it == ev.Acks.begin() || it->SeqNo == std::prev(it)->SeqNo + 1); if (it->State == NYdb::NPersQueue::TWriteSessionEvent::TWriteAck::EEventState::EES_DISCARDED) { - TIssues issues; - issues.AddIssue(TStringBuilder() << "Message with seqNo " << it->SeqNo << " was discarded"); - return issues; + TIssues issues; + issues.AddIssue(TStringBuilder() << "Message with seqNo " << it->SeqNo << " was discarded"); + return issues; } Self.FreeSpace += Self.WaitingAcks.front(); @@ -341,7 +341,7 @@ private: if (!Self.DeferredCheckpoints.empty() && std::get<0>(Self.DeferredCheckpoints.front()) == it->SeqNo) { Self.ConfirmedSeqNo = it->SeqNo; - Self.Callbacks->OnSinkStateSaved(Self.BuildState(), Self.OutputIndex, std::get<1>(Self.DeferredCheckpoints.front())); + Self.Callbacks->OnSinkStateSaved(Self.BuildState(), Self.OutputIndex, std::get<1>(Self.DeferredCheckpoints.front())); Self.DeferredCheckpoints.pop(); } } @@ -350,8 +350,8 @@ private: return std::nullopt; } - std::optional<TIssues> operator()(NYdb::NPersQueue::TWriteSessionEvent::TReadyToAcceptEvent& ev) { - //Y_VERIFY(!Self.ContinuationToken); + std::optional<TIssues> operator()(NYdb::NPersQueue::TWriteSessionEvent::TReadyToAcceptEvent& ev) { + //Y_VERIFY(!Self.ContinuationToken); if (!Self.Buffer.empty()) { Self.WriteNextMessage(std::move(ev.ContinuationToken)); @@ -369,9 +369,9 @@ private: const ui64 OutputIndex; const TString TxId; const NPq::NProto::TDqPqTopicSink SinkParams; - NYdb::TDriver Driver; + NYdb::TDriver Driver; std::shared_ptr<NYdb::ICredentialsProviderFactory> CredentialsProviderFactory; - IDqSinkActor::ICallbacks* const Callbacks; + IDqSinkActor::ICallbacks* const Callbacks; i64 FreeSpace = 0; NYdb::NPersQueue::TPersQueueClient PersQueueClient; @@ -388,14 +388,14 @@ private: std::queue<std::tuple<ui64, NDqProto::TCheckpoint>> DeferredCheckpoints; }; -std::pair<IDqSinkActor*, NActors::IActor*> CreateDqPqWriteActor( - NPq::NProto::TDqPqTopicSink&& settings, +std::pair<IDqSinkActor*, NActors::IActor*> CreateDqPqWriteActor( + NPq::NProto::TDqPqTopicSink&& settings, ui64 outputIndex, TTxId txId, const THashMap<TString, TString>& secureParams, - NYdb::TDriver driver, + NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, - IDqSinkActor::ICallbacks* callbacks, + IDqSinkActor::ICallbacks* callbacks, i64 freeSpace) { const TString& tokenName = settings.GetToken().GetName(); @@ -413,15 +413,15 @@ std::pair<IDqSinkActor*, NActors::IActor*> CreateDqPqWriteActor( return {actor, actor}; } -void RegisterDqPqWriteActorFactory(TDqSinkFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory) { - factory.Register<NPq::NProto::TDqPqTopicSink>("PqSink", +void RegisterDqPqWriteActorFactory(TDqSinkFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory) { + factory.Register<NPq::NProto::TDqPqTopicSink>("PqSink", [driver = std::move(driver), credentialsFactory = std::move(credentialsFactory)]( NPq::NProto::TDqPqTopicSink&& settings, IDqSinkActorFactory::TArguments&& args) { NLwTraceMonPage::ProbeRegistry().AddProbesList(LWTRACE_GET_PROBES(DQ_PQ_PROVIDER)); - return CreateDqPqWriteActor( - std::move(settings), + return CreateDqPqWriteActor( + std::move(settings), args.OutputIndex, args.TxId, args.SecureParams, @@ -430,6 +430,6 @@ void RegisterDqPqWriteActorFactory(TDqSinkFactory& factory, NYdb::TDriver driver args.Callback ); }); -} - +} + } // namespace NYql::NDq diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.h b/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.h index 6736c8188b2..d6a8723d6cf 100644 --- a/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.h +++ b/ydb/library/yql/providers/pq/async_io/dq_pq_write_actor.h @@ -20,16 +20,16 @@ namespace NYql::NDq { constexpr i64 DqPqDefaultFreeSpace = 16_MB; -std::pair<IDqSinkActor*, NActors::IActor*> CreateDqPqWriteActor( - NPq::NProto::TDqPqTopicSink&& settings, +std::pair<IDqSinkActor*, NActors::IActor*> CreateDqPqWriteActor( + NPq::NProto::TDqPqTopicSink&& settings, ui64 outputIndex, TTxId txId, const THashMap<TString, TString>& secureParams, - NYdb::TDriver driver, + NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, - IDqSinkActor::ICallbacks* callbacks, + IDqSinkActor::ICallbacks* callbacks, i64 freeSpace = DqPqDefaultFreeSpace); -void RegisterDqPqWriteActorFactory(TDqSinkFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory); - +void RegisterDqPqWriteActorFactory(TDqSinkFactory& factory, NYdb::TDriver driver, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory); + } // namespace NYql::NDq diff --git a/ydb/library/yql/providers/pq/async_io/ut/dq_pq_read_actor_ut.cpp b/ydb/library/yql/providers/pq/async_io/ut/dq_pq_read_actor_ut.cpp index ed6579022cf..5fdbdb0ecb5 100644 --- a/ydb/library/yql/providers/pq/async_io/ut/dq_pq_read_actor_ut.cpp +++ b/ydb/library/yql/providers/pq/async_io/ut/dq_pq_read_actor_ut.cpp @@ -9,40 +9,40 @@ namespace NYql::NDq { Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { - Y_UNIT_TEST_F(TestReadFromTopic, TPqIoTestFixture) { + Y_UNIT_TEST_F(TestReadFromTopic, TPqIoTestFixture) { const TString topicName = "ReadFromTopic"; - InitSource(topicName); + InitSource(topicName); const std::vector<TString> data = { "1", "2", "3", "4" }; PQWrite(data, topicName); - auto result = SourceReadUntil<TString>(UVParser, 4); + auto result = SourceReadUntil<TString>(UVParser, 4); UNIT_ASSERT_EQUAL(result, data); } - Y_UNIT_TEST_F(ReadWithFreeSpace, TPqIoTestFixture) { + Y_UNIT_TEST_F(ReadWithFreeSpace, TPqIoTestFixture) { const TString topicName = "ReadWithFreeSpace"; - InitSource(topicName); + InitSource(topicName); PQWrite({"data1", "data2", "data3"}, topicName); { - auto result = SourceReadUntil<TString>(UVParser, 1, 1); + auto result = SourceReadUntil<TString>(UVParser, 1, 1); std::vector<TString> expected {"data1"}; UNIT_ASSERT_EQUAL(result, expected); } - UNIT_ASSERT_EQUAL(SourceRead<TString>(UVParser, 0).size(), 0); - UNIT_ASSERT_EQUAL(SourceRead<TString>(UVParser, -1).size(), 0); + UNIT_ASSERT_EQUAL(SourceRead<TString>(UVParser, 0).size(), 0); + UNIT_ASSERT_EQUAL(SourceRead<TString>(UVParser, -1).size(), 0); } - Y_UNIT_TEST_F(ReadNonExistentTopic, TPqIoTestFixture) { + Y_UNIT_TEST_F(ReadNonExistentTopic, TPqIoTestFixture) { const TString topicName = "NonExistentTopic"; - InitSource(topicName); + InitSource(topicName); while (true) { try { - SourceRead<TString>(UVParser); + SourceRead<TString>(UVParser); } catch (yexception& e) { UNIT_ASSERT_STRING_CONTAINS(e.what(), "Read session to topic \"NonExistentTopic\" was closed"); break; @@ -53,12 +53,12 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { } Y_UNIT_TEST(TestSaveLoadPqRead) { - NDqProto::TSourceState state; + NDqProto::TSourceState state; const TString topicName = "SaveLoadPqRead"; { - TPqIoTestFixture setup1; - setup1.InitSource(topicName); + TPqIoTestFixture setup1; + setup1.InitSource(topicName); std::vector<TString> data {"data"}; PQWrite(data, topicName); @@ -67,14 +67,14 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { UNIT_ASSERT_EQUAL(result, data); auto checkpoint = CreateCheckpoint(); - setup1.SaveSourceState(checkpoint, state); + setup1.SaveSourceState(checkpoint, state); Cerr << "State saved" << Endl; } - NDqProto::TSourceState state2; + NDqProto::TSourceState state2; { - TPqIoTestFixture setup2; - setup2.InitSource(topicName); + TPqIoTestFixture setup2; + setup2.InitSource(topicName); std::vector<TString> data {"data"}; PQWrite({"data"}, topicName); @@ -85,15 +85,15 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { UNIT_ASSERT_EQUAL(result, data); auto checkpoint = CreateCheckpoint(); - setup2.SaveSourceState(checkpoint, state2); + setup2.SaveSourceState(checkpoint, state2); PQWrite({"futherData"}, topicName); } - NDqProto::TSourceState state3; + NDqProto::TSourceState state3; { - TPqIoTestFixture setup3; - setup3.InitSource(topicName); + TPqIoTestFixture setup3; + setup3.InitSource(topicName); setup3.LoadSource(state2); auto result = setup3.SourceReadUntil<TString>(UVParser, 1); @@ -105,13 +105,13 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { PQWrite({"yetAnotherData"}, topicName); auto checkpoint = CreateCheckpoint(); - setup3.SaveSourceState(checkpoint, state3); + setup3.SaveSourceState(checkpoint, state3); } // Load the first state and check it. { - TPqIoTestFixture setup4; - setup4.InitSource(topicName); + TPqIoTestFixture setup4; + setup4.InitSource(topicName); setup4.LoadSource(state); auto result = setup4.SourceReadUntil<TString>(UVParser, 3); @@ -121,8 +121,8 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { // Load graphState2 and check it (offsets were saved). { - TPqIoTestFixture setup5; - setup5.InitSource(topicName); + TPqIoTestFixture setup5; + setup5.InitSource(topicName); setup5.LoadSource(state2); auto result = setup5.SourceReadUntil<TString>(UVParser, 2); @@ -132,8 +132,8 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { // Load graphState3 and check it (other offsets). { - TPqIoTestFixture setup6; - setup6.InitSource(topicName); + TPqIoTestFixture setup6; + setup6.InitSource(topicName); setup6.LoadSource(state3); auto result = setup6.SourceReadUntil<TString>(UVParser, 1); @@ -143,71 +143,71 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) { } Y_UNIT_TEST(LoadCorruptedState) { - NDqProto::TSourceState state; + NDqProto::TSourceState state; const TString topicName = "Invalid"; // We wouldn't read from this topic. auto checkpoint = CreateCheckpoint(); { - TPqIoTestFixture setup1; - setup1.InitSource(topicName); - setup1.SaveSourceState(checkpoint, state); + TPqIoTestFixture setup1; + setup1.InitSource(topicName); + setup1.SaveSourceState(checkpoint, state); } // Corrupt state. - TString corruptedBlob = state.GetData(0).GetStateData().GetBlob(); - corruptedBlob.append('a'); - state.MutableData(0)->MutableStateData()->SetBlob(corruptedBlob); + TString corruptedBlob = state.GetData(0).GetStateData().GetBlob(); + corruptedBlob.append('a'); + state.MutableData(0)->MutableStateData()->SetBlob(corruptedBlob); { - TPqIoTestFixture setup2; - setup2.InitSource(topicName); + TPqIoTestFixture setup2; + setup2.InitSource(topicName); UNIT_ASSERT_EXCEPTION_CONTAINS(setup2.LoadSource(state), yexception, "Serialized state is corrupted"); } } - - Y_UNIT_TEST(TestLoadFromSeveralStates) { - const TString topicName = "LoadFromSeveralStates"; - - NDqProto::TSourceState state2; - { - TPqIoTestFixture setup; - setup.InitSource(topicName); - - std::vector<TString> data {"data"}; - PQWrite(data, topicName); - - auto result1 = setup.SourceReadUntil<TString>(UVParser, 1); - UNIT_ASSERT_EQUAL(result1, data); - - NDqProto::TSourceState state1; - auto checkpoint1 = CreateCheckpoint(); - setup.SaveSourceState(checkpoint1, state1); - Cerr << "State saved" << Endl; - - std::vector<TString> data2 {"data2"}; - PQWrite(data2, topicName); - - auto result2 = setup.SourceReadUntil<TString>(UVParser, 1); - UNIT_ASSERT_EQUAL(result2, data2); - - auto checkpoint2 = CreateCheckpoint(); - setup.SaveSourceState(checkpoint2, state2); - Cerr << "State 2 saved" << Endl; - - // Add state1 to state2 - *state2.AddData() = state1.GetData(0); - } - - TPqIoTestFixture setup2; - setup2.InitSource(topicName); - setup2.LoadSource(state2); // Loads min offset - - std::vector<TString> data3 {"data3"}; - PQWrite(data3, topicName); - - auto result = setup2.SourceReadUntil<TString>(UVParser, 2); - std::vector<TString> dataResult {"data2", "data3"}; - UNIT_ASSERT_EQUAL(result, dataResult); - } + + Y_UNIT_TEST(TestLoadFromSeveralStates) { + const TString topicName = "LoadFromSeveralStates"; + + NDqProto::TSourceState state2; + { + TPqIoTestFixture setup; + setup.InitSource(topicName); + + std::vector<TString> data {"data"}; + PQWrite(data, topicName); + + auto result1 = setup.SourceReadUntil<TString>(UVParser, 1); + UNIT_ASSERT_EQUAL(result1, data); + + NDqProto::TSourceState state1; + auto checkpoint1 = CreateCheckpoint(); + setup.SaveSourceState(checkpoint1, state1); + Cerr << "State saved" << Endl; + + std::vector<TString> data2 {"data2"}; + PQWrite(data2, topicName); + + auto result2 = setup.SourceReadUntil<TString>(UVParser, 1); + UNIT_ASSERT_EQUAL(result2, data2); + + auto checkpoint2 = CreateCheckpoint(); + setup.SaveSourceState(checkpoint2, state2); + Cerr << "State 2 saved" << Endl; + + // Add state1 to state2 + *state2.AddData() = state1.GetData(0); + } + + TPqIoTestFixture setup2; + setup2.InitSource(topicName); + setup2.LoadSource(state2); // Loads min offset + + std::vector<TString> data3 {"data3"}; + PQWrite(data3, topicName); + + auto result = setup2.SourceReadUntil<TString>(UVParser, 2); + std::vector<TString> dataResult {"data2", "data3"}; + UNIT_ASSERT_EQUAL(result, dataResult); + } } } // namespace NKikimr::NMiniKQL diff --git a/ydb/library/yql/providers/pq/async_io/ut/dq_pq_write_actor_ut.cpp b/ydb/library/yql/providers/pq/async_io/ut/dq_pq_write_actor_ut.cpp index b64c7ee0e35..f863b0f7e51 100644 --- a/ydb/library/yql/providers/pq/async_io/ut/dq_pq_write_actor_ut.cpp +++ b/ydb/library/yql/providers/pq/async_io/ut/dq_pq_write_actor_ut.cpp @@ -9,43 +9,43 @@ namespace NYql::NDq { constexpr TDuration WaitTimeout = TDuration::MilliSeconds(10000); Y_UNIT_TEST_SUITE(TPqWriterTest) { - Y_UNIT_TEST_F(TestWriteToTopic, TPqIoTestFixture) { + Y_UNIT_TEST_F(TestWriteToTopic, TPqIoTestFixture) { const TString topicName = "WriteToTopic"; - InitSink(topicName); + InitSink(topicName); const std::vector<TString> data = { "1", "2", "3", "4" }; - SinkWrite(data); + SinkWrite(data); auto result = PQReadUntil(topicName, 4); UNIT_ASSERT_EQUAL(result, data); } - Y_UNIT_TEST_F(TestWriteToTopicMultiBatch, TPqIoTestFixture) { + Y_UNIT_TEST_F(TestWriteToTopicMultiBatch, TPqIoTestFixture) { const TString topicName = "WriteToTopicMultiBatch"; - InitSink(topicName); + InitSink(topicName); const std::vector<TString> data1 = { "1" }; const std::vector<TString> data2 = { "2" }; const std::vector<TString> data3 = { "3" }; - SinkWrite(data1); - SinkWrite(data2); - SinkWrite(data3); + SinkWrite(data1); + SinkWrite(data2); + SinkWrite(data3); auto result = PQReadUntil(topicName, 3); std::vector<TString> expected = { "1", "2", "3" }; UNIT_ASSERT_EQUAL(result, expected); } - Y_UNIT_TEST_F(TestDeferredWriteToTopic, TPqIoTestFixture) { + Y_UNIT_TEST_F(TestDeferredWriteToTopic, TPqIoTestFixture) { // In this case we are checking free space overflow const TString topicName = "DeferredWriteToTopic"; - InitSink(topicName, 1); + InitSink(topicName, 1); const std::vector<TString> data = { "1", "2", "3" }; - auto future = CaSetup->SinkPromises.ResumeExecution.GetFuture(); - SinkWrite(data); + auto future = CaSetup->SinkPromises.ResumeExecution.GetFuture(); + SinkWrite(data); auto result = PQReadUntil(topicName, 3); UNIT_ASSERT_EQUAL(result, data); @@ -53,51 +53,51 @@ Y_UNIT_TEST_SUITE(TPqWriterTest) { const std::vector<TString> data2 = { "4", "5", "6" }; - SinkWrite(data2); + SinkWrite(data2); auto result2 = PQReadUntil(topicName, 6); const std::vector<TString> expected = { "1", "2", "3", "4", "5", "6" }; UNIT_ASSERT_EQUAL(result2, expected); } - Y_UNIT_TEST_F(WriteNonExistentTopic, TPqIoTestFixture) { + Y_UNIT_TEST_F(WriteNonExistentTopic, TPqIoTestFixture) { const TString topicName = "NonExistentTopic"; - InitSink(topicName); + InitSink(topicName); const std::vector<TString> data = { "1" }; - auto future = CaSetup->SinkPromises.Issue.GetFuture(); - SinkWrite(data); + auto future = CaSetup->SinkPromises.Issue.GetFuture(); + SinkWrite(data); UNIT_ASSERT(future.Wait(WaitTimeout)); - UNIT_ASSERT_STRING_CONTAINS(future.GetValue().ToString(), "Write session to topic \"NonExistentTopic\" was closed"); + UNIT_ASSERT_STRING_CONTAINS(future.GetValue().ToString(), "Write session to topic \"NonExistentTopic\" was closed"); } Y_UNIT_TEST(TestCheckpoints) { const TString topicName = "Checkpoints"; - NDqProto::TSinkState state1; + NDqProto::TSinkState state1; { - TPqIoTestFixture setup; - setup.InitSink(topicName); + TPqIoTestFixture setup; + setup.InitSink(topicName); const std::vector<TString> data1 = { "1" }; - setup.SinkWrite(data1); + setup.SinkWrite(data1); const std::vector<TString> data2 = { "2", "3" }; auto checkpoint = CreateCheckpoint(); - auto future = setup.CaSetup->SinkPromises.StateSaved.GetFuture(); - setup.SinkWrite(data2, checkpoint); + auto future = setup.CaSetup->SinkPromises.StateSaved.GetFuture(); + setup.SinkWrite(data2, checkpoint); UNIT_ASSERT(future.Wait(WaitTimeout)); state1 = future.GetValue(); } { - TPqIoTestFixture setup; - setup.InitSink(topicName); + TPqIoTestFixture setup; + setup.InitSink(topicName); setup.LoadSink(state1); const std::vector<TString> data3 = { "4", "5" }; - setup.SinkWrite(data3); + setup.SinkWrite(data3); auto result = PQReadUntil(topicName, 5); const std::vector<TString> expected = { "1", "2", "3", "4", "5" }; @@ -105,12 +105,12 @@ Y_UNIT_TEST_SUITE(TPqWriterTest) { } { - TPqIoTestFixture setup; - setup.InitSink(topicName); + TPqIoTestFixture setup; + setup.InitSink(topicName); setup.LoadSink(state1); const std::vector<TString> data4 = { "4", "5" }; - setup.SinkWrite(data4); // This write should be deduplicated + setup.SinkWrite(data4); // This write should be deduplicated auto result = PQReadUntil(topicName, 4); const std::vector<TString> expected = { "1", "2", "3", "4", "5" }; @@ -118,17 +118,17 @@ Y_UNIT_TEST_SUITE(TPqWriterTest) { } } - Y_UNIT_TEST_F(TestCheckpointWithEmptyBatch, TPqIoTestFixture) { + Y_UNIT_TEST_F(TestCheckpointWithEmptyBatch, TPqIoTestFixture) { const TString topicName = "Checkpoints"; - NDqProto::TSinkState state1; + NDqProto::TSinkState state1; { - InitSink(topicName); + InitSink(topicName); const std::vector<TString> data = {}; auto checkpoint = CreateCheckpoint(); - auto future = CaSetup->SinkPromises.StateSaved.GetFuture(); - SinkWrite(data, checkpoint); + auto future = CaSetup->SinkPromises.StateSaved.GetFuture(); + SinkWrite(data, checkpoint); UNIT_ASSERT(future.Wait(WaitTimeout)); state1 = future.GetValue(); diff --git a/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.cpp b/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.cpp index 6fcae4e0a7c..7310e0b46ef 100644 --- a/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.cpp +++ b/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.cpp @@ -32,19 +32,19 @@ NYql::NPq::NProto::TDqPqTopicSink BuildPqTopicSinkSettings(TString topic) { return settings; } -TPqIoTestFixture::TPqIoTestFixture() { -} - -TPqIoTestFixture::~TPqIoTestFixture() { - CaSetup = nullptr; - Driver.Stop(true); -} - -void TPqIoTestFixture::InitSource( +TPqIoTestFixture::TPqIoTestFixture() { +} + +TPqIoTestFixture::~TPqIoTestFixture() { + CaSetup = nullptr; + Driver.Stop(true); +} + +void TPqIoTestFixture::InitSource( NYql::NPq::NProto::TDqPqTopicSource&& settings, i64 freeSpace) { - CaSetup->Execute([&](TFakeActor& actor) { + CaSetup->Execute([&](TFakeActor& actor) { NPq::NProto::TDqReadTaskParams params; auto* partitioninigParams = params.MutablePartitioningParams(); partitioninigParams->SetTopicPartitionsCount(1); @@ -63,7 +63,7 @@ void TPqIoTestFixture::InitSource( "query_1", secureParams, taskParams, - Driver, + Driver, nullptr, &actor.GetSourceCallbacks(), actor.GetHolderFactory(), @@ -73,19 +73,19 @@ void TPqIoTestFixture::InitSource( }); } -void TPqIoTestFixture::InitSink( +void TPqIoTestFixture::InitSink( NPq::NProto::TDqPqTopicSink&& settings, i64 freeSpace) { const THashMap<TString, TString> secureParams; - CaSetup->Execute([&](TFakeActor& actor) { + CaSetup->Execute([&](TFakeActor& actor) { auto [dqSink, dqSinkAsActor] = CreateDqPqWriteActor( std::move(settings), 0, "query_1", secureParams, - Driver, + Driver, nullptr, &actor.GetSinkCallbacks(), freeSpace); @@ -119,7 +119,7 @@ void PQWrite( .MessageGroupId("src_id"); auto session = client.CreateSimpleBlockingWriteSession(sessionSettings); for (const TString& data : sequence) { - UNIT_ASSERT_C(session->Write(data), "Failed to write message with body \"" << data << "\" to topic " << topic); + UNIT_ASSERT_C(session->Write(data), "Failed to write message with body \"" << data << "\" to topic " << topic); Cerr << "Message '" << data << "' was written into topic '" << topic << "'" << Endl; } session->Close(); // Wait until all data would be written into PQ. @@ -168,14 +168,14 @@ std::vector<TString> UVParser(const NUdf::TUnboxedValue& item) { return { TString(item.AsStringRef()) }; } -void TPqIoTestFixture::SinkWrite(std::vector<TString> data, TMaybe<NDqProto::TCheckpoint> checkpoint) { - CaSetup->SinkWrite([data](NKikimr::NMiniKQL::THolderFactory& factory) { +void TPqIoTestFixture::SinkWrite(std::vector<TString> data, TMaybe<NDqProto::TCheckpoint> checkpoint) { + CaSetup->SinkWrite([data](NKikimr::NMiniKQL::THolderFactory& factory) { NKikimr::NMiniKQL::TUnboxedValueVector batch; batch.reserve(data.size()); for (const auto& item : data) { - NUdf::TUnboxedValue* unboxedValueForData = nullptr; + NUdf::TUnboxedValue* unboxedValueForData = nullptr; batch.emplace_back(factory.CreateDirectArrayHolder(1, unboxedValueForData)); - unboxedValueForData[0] = NKikimr::NMiniKQL::MakeString(NUdf::TStringRef(item.Data(), item.Size())); + unboxedValueForData[0] = NKikimr::NMiniKQL::MakeString(NUdf::TStringRef(item.Data(), item.Size())); } return batch; diff --git a/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.h b/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.h index 5d9f3d11424..569835c53c9 100644 --- a/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.h +++ b/ydb/library/yql/providers/pq/async_io/ut/ut_helpers.h @@ -23,66 +23,66 @@ NYql::NPq::NProto::TDqPqTopicSource BuildPqTopicSourceSettings(TString topic); NYql::NPq::NProto::TDqPqTopicSink BuildPqTopicSinkSettings(TString topic); -struct TPqIoTestFixture : public NUnitTest::TBaseFixture { - std::unique_ptr<TFakeCASetup> CaSetup = std::make_unique<TFakeCASetup>(); - NYdb::TDriver Driver = NYdb::TDriver(NYdb::TDriverConfig().SetLog(CreateLogBackend("cerr"))); - - TPqIoTestFixture(); - ~TPqIoTestFixture(); - - void InitSource( - NYql::NPq::NProto::TDqPqTopicSource&& settings, - i64 freeSpace = 1_MB); - - void InitSource( - const TString& topic, - i64 freeSpace = 1_MB) - { - InitSource(BuildPqTopicSourceSettings(topic), freeSpace); - } - - template<typename T> - std::vector<T> SourceRead(const TReadValueParser<T> parser, i64 freeSpace = 12345) { - return CaSetup->SourceRead(parser, freeSpace); - } - - template<typename T> - std::vector<T> SourceReadUntil( - const TReadValueParser<T> parser, - ui64 size, - i64 eachReadFreeSpace = 1000, - TDuration timeout = TDuration::Seconds(10)) - { - return CaSetup->SourceReadUntil(parser, size, eachReadFreeSpace, timeout); - } - - void SaveSourceState(NDqProto::TCheckpoint checkpoint, NDqProto::TSourceState& state) { - CaSetup->SaveSourceState(checkpoint, state); - } - - void LoadSource(const NDqProto::TSourceState& state) { - return CaSetup->LoadSource(state); - } - - - void InitSink( - NYql::NPq::NProto::TDqPqTopicSink&& settings, - i64 freeSpace = 1_MB); - - void InitSink( - const TString& topic, - i64 freeSpace = 1_MB) - { - InitSink(BuildPqTopicSinkSettings(topic), freeSpace); - } - - void LoadSink(const NDqProto::TSinkState& state) { - CaSetup->LoadSink(state); - } - - void SinkWrite(std::vector<TString> data, TMaybe<NDqProto::TCheckpoint> checkpoint = Nothing()); -}; - +struct TPqIoTestFixture : public NUnitTest::TBaseFixture { + std::unique_ptr<TFakeCASetup> CaSetup = std::make_unique<TFakeCASetup>(); + NYdb::TDriver Driver = NYdb::TDriver(NYdb::TDriverConfig().SetLog(CreateLogBackend("cerr"))); + + TPqIoTestFixture(); + ~TPqIoTestFixture(); + + void InitSource( + NYql::NPq::NProto::TDqPqTopicSource&& settings, + i64 freeSpace = 1_MB); + + void InitSource( + const TString& topic, + i64 freeSpace = 1_MB) + { + InitSource(BuildPqTopicSourceSettings(topic), freeSpace); + } + + template<typename T> + std::vector<T> SourceRead(const TReadValueParser<T> parser, i64 freeSpace = 12345) { + return CaSetup->SourceRead(parser, freeSpace); + } + + template<typename T> + std::vector<T> SourceReadUntil( + const TReadValueParser<T> parser, + ui64 size, + i64 eachReadFreeSpace = 1000, + TDuration timeout = TDuration::Seconds(10)) + { + return CaSetup->SourceReadUntil(parser, size, eachReadFreeSpace, timeout); + } + + void SaveSourceState(NDqProto::TCheckpoint checkpoint, NDqProto::TSourceState& state) { + CaSetup->SaveSourceState(checkpoint, state); + } + + void LoadSource(const NDqProto::TSourceState& state) { + return CaSetup->LoadSource(state); + } + + + void InitSink( + NYql::NPq::NProto::TDqPqTopicSink&& settings, + i64 freeSpace = 1_MB); + + void InitSink( + const TString& topic, + i64 freeSpace = 1_MB) + { + InitSink(BuildPqTopicSinkSettings(topic), freeSpace); + } + + void LoadSink(const NDqProto::TSinkState& state) { + CaSetup->LoadSink(state); + } + + void SinkWrite(std::vector<TString> data, TMaybe<NDqProto::TCheckpoint> checkpoint = Nothing()); +}; + TString GetDefaultPqEndpoint(); extern const TString DefaultPqConsumer; diff --git a/ydb/library/yql/providers/pq/async_io/ut/ya.make b/ydb/library/yql/providers/pq/async_io/ut/ya.make index 1ce7d821550..5646334d47c 100644 --- a/ydb/library/yql/providers/pq/async_io/ut/ya.make +++ b/ydb/library/yql/providers/pq/async_io/ut/ya.make @@ -2,17 +2,17 @@ UNITTEST_FOR(ydb/library/yql/providers/pq/async_io) OWNER( d-mokhnatkin - g:yq + g:yq g:yql ) ENV(YDB_USE_IN_MEMORY_PDISKS=true) -ENV(LOGBROKER_CREATE_TOPICS=ReadFromTopic,ReadWithFreeSpace,SaveLoadPqRead,WriteToTopic,WriteToTopicMultiBatch,DeferredWriteToTopic,SaveLoadPqWrite,Checkpoints,LoadFromSeveralStates) +ENV(LOGBROKER_CREATE_TOPICS=ReadFromTopic,ReadWithFreeSpace,SaveLoadPqRead,WriteToTopic,WriteToTopicMultiBatch,DeferredWriteToTopic,SaveLoadPqWrite,Checkpoints,LoadFromSeveralStates) ENV(LOGBROKER_TOPICS_PARTITIONS=1) -INCLUDE(${ARCADIA_ROOT}/kikimr/yq/tools/lbk_recipe_with_dummy_cm/recipe.inc) +INCLUDE(${ARCADIA_ROOT}/kikimr/yq/tools/lbk_recipe_with_dummy_cm/recipe.inc) SRCS( dq_pq_read_actor_ut.cpp diff --git a/ydb/library/yql/providers/pq/async_io/ya.make b/ydb/library/yql/providers/pq/async_io/ya.make index 8c37ecec6fa..711e37fce7c 100644 --- a/ydb/library/yql/providers/pq/async_io/ya.make +++ b/ydb/library/yql/providers/pq/async_io/ya.make @@ -2,7 +2,7 @@ LIBRARY() OWNER( d-mokhnatkin - g:yq + g:yq g:yql ) diff --git a/ydb/library/yql/providers/pq/cm_client/interface/client.cpp b/ydb/library/yql/providers/pq/cm_client/interface/client.cpp index 66901f465ff..aa43f1a7ce5 100644 --- a/ydb/library/yql/providers/pq/cm_client/interface/client.cpp +++ b/ydb/library/yql/providers/pq/cm_client/interface/client.cpp @@ -1,8 +1,8 @@ -#include "client.h" - -#include <util/string/cast.h> - -#include <cstring> - -namespace NPq::NConfigurationManager { -} // namespace NPq::NConfigurationManager +#include "client.h" + +#include <util/string/cast.h> + +#include <cstring> + +namespace NPq::NConfigurationManager { +} // namespace NPq::NConfigurationManager diff --git a/ydb/library/yql/providers/pq/cm_client/interface/client.h b/ydb/library/yql/providers/pq/cm_client/interface/client.h index 368d2f2039b..38db3931b1d 100644 --- a/ydb/library/yql/providers/pq/cm_client/interface/client.h +++ b/ydb/library/yql/providers/pq/cm_client/interface/client.h @@ -1,203 +1,203 @@ -#pragma once +#pragma once #include <ydb/library/yql/public/issue/yql_issue.h> - + #include <ydb/public/sdk/cpp/client/ydb_types/credentials/credentials.h> - -#include <library/cpp/threading/future/future.h> - -#include <util/datetime/base.h> -#include <util/generic/hash.h> -#include <util/generic/ptr.h> -#include <util/generic/string.h> -#include <util/generic/typetraits.h> -#include <util/generic/yexception.h> -#include <util/system/defaults.h> - -#include <variant> - -namespace NPq::NConfigurationManager { - + +#include <library/cpp/threading/future/future.h> + +#include <util/datetime/base.h> +#include <util/generic/hash.h> +#include <util/generic/ptr.h> +#include <util/generic/string.h> +#include <util/generic/typetraits.h> +#include <util/generic/yexception.h> +#include <util/system/defaults.h> + +#include <variant> + +namespace NPq::NConfigurationManager { + // Statuses as in ydb/public/api/protos/ydb_status_codes.proto -enum class EStatus { - STATUS_CODE_UNSPECIFIED, - SUCCESS, - BAD_REQUEST, - UNAUTHORIZED, - INTERNAL_ERROR, - ABORTED, - UNAVAILABLE, - OVERLOADED, - SCHEME_ERROR, - GENERIC_ERROR, - TIMEOUT, - BAD_SESSION, - PRECONDITION_FAILED, - ALREADY_EXISTS, - NOT_FOUND, - SESSION_EXPIRED, - CANCELLED, - UNDETERMINED, - UNSUPPORTED, - SESSION_BUSY -}; - -class TException : public yexception { -public: - explicit TException(EStatus status) - : Status(status) - { - } - - EStatus GetStatus() const { - return Status; - } - -private: - EStatus Status = EStatus::STATUS_CODE_UNSPECIFIED; -}; - -struct TObjectDescriptionBase { - explicit TObjectDescriptionBase(const TString& path) - : Path(path) - { - } - - TString Path; - THashMap<TString, TString> Metadata; // Arbitrary key-value pairs. -}; - -struct TAccountDescription : public TObjectDescriptionBase { - using TObjectDescriptionBase::TObjectDescriptionBase; - - // TODO: If you need other properties, add them. -}; - -struct TPathDescription : public TObjectDescriptionBase { - using TObjectDescriptionBase::TObjectDescriptionBase; - - // TODO: If you need other properties, add them. -}; - -struct TTopicDescription : public TObjectDescriptionBase { - using TObjectDescriptionBase::TObjectDescriptionBase; - - size_t PartitionsCount = 0; - - // TODO: If you need other properties, add them. -}; - -struct TConsumerDescription : public TObjectDescriptionBase { - using TObjectDescriptionBase::TObjectDescriptionBase; - - // TODO: If you need other properties, add them. -}; - -struct TDescribePathResult { - using TDescription = std::variant<TAccountDescription, TPathDescription, TTopicDescription, TConsumerDescription>; - - const TDescription& GetDescriptionVariant() const { - return Description; - } - - TDescription& GetDescriptionVariant() { - return Description; - } - - template<class T> - const T& Get() const { - return std::get<T>(Description); - } - - const TAccountDescription& GetAccountDescription() const { - return Get<TAccountDescription>(); - } - - const TPathDescription& GetPathDescription() const { - return Get<TPathDescription>(); - } - - const TTopicDescription& GetTopicDescription() const { - return Get<TTopicDescription>(); - } - - const TConsumerDescription& GetConsumerDescription() const { - return Get<TConsumerDescription>(); - } - - bool IsAccount() const { - return std::holds_alternative<TAccountDescription>(Description); - } - - bool IsPath() const { - return std::holds_alternative<TPathDescription>(Description); - } - - bool IsTopic() const { - return std::holds_alternative<TTopicDescription>(Description); - } - - bool IsConsumer() const { - return std::holds_alternative<TConsumerDescription>(Description); - } - - template<class TResultType, class... T> - static TDescribePathResult Make(T&&... params) { - return {std::in_place_type_t<TResultType>(), std::forward<T>(params)...}; - } - -private: - template<class... T> - TDescribePathResult(T&&... params) - : Description(std::forward<T>(params)...) - { - } - -private: - TDescription Description; -}; - -using TAsyncDescribePathResult = NThreading::TFuture<TDescribePathResult>; - -struct IClient : public TThrRefBase { - using TPtr = TIntrusivePtr<IClient>; - +enum class EStatus { + STATUS_CODE_UNSPECIFIED, + SUCCESS, + BAD_REQUEST, + UNAUTHORIZED, + INTERNAL_ERROR, + ABORTED, + UNAVAILABLE, + OVERLOADED, + SCHEME_ERROR, + GENERIC_ERROR, + TIMEOUT, + BAD_SESSION, + PRECONDITION_FAILED, + ALREADY_EXISTS, + NOT_FOUND, + SESSION_EXPIRED, + CANCELLED, + UNDETERMINED, + UNSUPPORTED, + SESSION_BUSY +}; + +class TException : public yexception { +public: + explicit TException(EStatus status) + : Status(status) + { + } + + EStatus GetStatus() const { + return Status; + } + +private: + EStatus Status = EStatus::STATUS_CODE_UNSPECIFIED; +}; + +struct TObjectDescriptionBase { + explicit TObjectDescriptionBase(const TString& path) + : Path(path) + { + } + + TString Path; + THashMap<TString, TString> Metadata; // Arbitrary key-value pairs. +}; + +struct TAccountDescription : public TObjectDescriptionBase { + using TObjectDescriptionBase::TObjectDescriptionBase; + + // TODO: If you need other properties, add them. +}; + +struct TPathDescription : public TObjectDescriptionBase { + using TObjectDescriptionBase::TObjectDescriptionBase; + + // TODO: If you need other properties, add them. +}; + +struct TTopicDescription : public TObjectDescriptionBase { + using TObjectDescriptionBase::TObjectDescriptionBase; + + size_t PartitionsCount = 0; + + // TODO: If you need other properties, add them. +}; + +struct TConsumerDescription : public TObjectDescriptionBase { + using TObjectDescriptionBase::TObjectDescriptionBase; + + // TODO: If you need other properties, add them. +}; + +struct TDescribePathResult { + using TDescription = std::variant<TAccountDescription, TPathDescription, TTopicDescription, TConsumerDescription>; + + const TDescription& GetDescriptionVariant() const { + return Description; + } + + TDescription& GetDescriptionVariant() { + return Description; + } + + template<class T> + const T& Get() const { + return std::get<T>(Description); + } + + const TAccountDescription& GetAccountDescription() const { + return Get<TAccountDescription>(); + } + + const TPathDescription& GetPathDescription() const { + return Get<TPathDescription>(); + } + + const TTopicDescription& GetTopicDescription() const { + return Get<TTopicDescription>(); + } + + const TConsumerDescription& GetConsumerDescription() const { + return Get<TConsumerDescription>(); + } + + bool IsAccount() const { + return std::holds_alternative<TAccountDescription>(Description); + } + + bool IsPath() const { + return std::holds_alternative<TPathDescription>(Description); + } + + bool IsTopic() const { + return std::holds_alternative<TTopicDescription>(Description); + } + + bool IsConsumer() const { + return std::holds_alternative<TConsumerDescription>(Description); + } + + template<class TResultType, class... T> + static TDescribePathResult Make(T&&... params) { + return {std::in_place_type_t<TResultType>(), std::forward<T>(params)...}; + } + +private: + template<class... T> + TDescribePathResult(T&&... params) + : Description(std::forward<T>(params)...) + { + } + +private: + TDescription Description; +}; + +using TAsyncDescribePathResult = NThreading::TFuture<TDescribePathResult>; + +struct IClient : public TThrRefBase { + using TPtr = TIntrusivePtr<IClient>; + virtual TAsyncDescribePathResult DescribePath(const TString& path) const = 0; - - // TODO: If you need other methods, add them. -}; - -#define OPTION(type, name, default_exp) \ - private: \ - type name default_exp; \ - public: \ - TTypeTraits<type>::TFuncParam Y_CAT(Get, name)() const { \ - return name; \ - } \ - TSelf& Y_CAT(Set, name)(TTypeTraits<type>::TFuncParam val) { \ - name = val; \ - return *this; \ - } \ - -struct TClientOptions { - using TSelf = TClientOptions; - - OPTION(TString, Endpoint, ); - OPTION(std::shared_ptr<NYdb::ICredentialsProviderFactory>, CredentialsProviderFactory, ); - OPTION(TDuration, RequestTimeout, = TDuration::Seconds(10)); + + // TODO: If you need other methods, add them. +}; + +#define OPTION(type, name, default_exp) \ + private: \ + type name default_exp; \ + public: \ + TTypeTraits<type>::TFuncParam Y_CAT(Get, name)() const { \ + return name; \ + } \ + TSelf& Y_CAT(Set, name)(TTypeTraits<type>::TFuncParam val) { \ + name = val; \ + return *this; \ + } \ + +struct TClientOptions { + using TSelf = TClientOptions; + + OPTION(TString, Endpoint, ); + OPTION(std::shared_ptr<NYdb::ICredentialsProviderFactory>, CredentialsProviderFactory, ); + OPTION(TDuration, RequestTimeout, = TDuration::Seconds(10)); OPTION(bool, EnableSsl, = false); -}; - -#undef OPTION - -// Factory interface for creation clients to different pq clusters. -struct IConnections : public TThrRefBase { - using TPtr = TIntrusivePtr<IConnections>; - - virtual ~IConnections() = default; - - virtual void Stop(bool wait = false) = 0; - - virtual IClient::TPtr GetClient(const TClientOptions&) = 0; -}; - -} // namespace NPq::NConfigurationManager +}; + +#undef OPTION + +// Factory interface for creation clients to different pq clusters. +struct IConnections : public TThrRefBase { + using TPtr = TIntrusivePtr<IConnections>; + + virtual ~IConnections() = default; + + virtual void Stop(bool wait = false) = 0; + + virtual IClient::TPtr GetClient(const TClientOptions&) = 0; +}; + +} // namespace NPq::NConfigurationManager diff --git a/ydb/library/yql/providers/pq/cm_client/interface/ya.make b/ydb/library/yql/providers/pq/cm_client/interface/ya.make index abc4bab0087..b1712307ea5 100644 --- a/ydb/library/yql/providers/pq/cm_client/interface/ya.make +++ b/ydb/library/yql/providers/pq/cm_client/interface/ya.make @@ -1,21 +1,21 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - client.cpp -) - -PEERDIR( - library/cpp/threading/future +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + client.cpp +) + +PEERDIR( + library/cpp/threading/future ydb/public/sdk/cpp/client/ydb_types/credentials ydb/library/yql/public/issue -) - -GENERATE_ENUM_SERIALIZATION(client.h) - -END() +) + +GENERATE_ENUM_SERIALIZATION(client.h) + +END() diff --git a/ydb/library/yql/providers/pq/cm_client/lib/client.cpp b/ydb/library/yql/providers/pq/cm_client/lib/client.cpp index 71d878f5e57..07a1fbcc98d 100644 --- a/ydb/library/yql/providers/pq/cm_client/lib/client.cpp +++ b/ydb/library/yql/providers/pq/cm_client/lib/client.cpp @@ -1,274 +1,274 @@ -#include "client.h" - +#include "client.h" + #include <ydb/library/yql/public/issue/yql_issue_message.h> - -#include <logbroker/public/api/grpc/config_manager.grpc.pb.h> - -#include <library/cpp/grpc/client/grpc_client_low.h> - -#include <util/string/builder.h> - -#include <exception> - -namespace NPq::NConfigurationManager { - -class TConnections::TImpl : public TAtomicRefCount<TConnections::TImpl> { -public: - explicit TImpl(size_t numWorkerThreads) - : Client(numWorkerThreads) - { - } - - void Stop(bool wait = false) { - Client.Stop(wait); - } - - NGrpc::TGRpcClientLow& GetClient() { - return Client; - } - -private: - NGrpc::TGRpcClientLow Client; -}; - -EStatus ToStatus(const NGrpc::TGrpcStatus& status) { - if (status.InternalError) { - return EStatus::INTERNAL_ERROR; - } - - switch (status.GRpcStatusCode) { - case grpc::StatusCode::OK: - return EStatus::SUCCESS; - case grpc::StatusCode::CANCELLED: - return EStatus::CANCELLED; - case grpc::StatusCode::UNKNOWN: - return EStatus::UNDETERMINED; - case grpc::StatusCode::INVALID_ARGUMENT: - return EStatus::BAD_REQUEST; - case grpc::StatusCode::DEADLINE_EXCEEDED: - return EStatus::TIMEOUT; - case grpc::StatusCode::NOT_FOUND: - return EStatus::NOT_FOUND; - case grpc::StatusCode::ALREADY_EXISTS: - return EStatus::ALREADY_EXISTS; - case grpc::StatusCode::PERMISSION_DENIED: - return EStatus::UNAUTHORIZED; - case grpc::StatusCode::UNAUTHENTICATED: - return EStatus::UNAUTHORIZED; - case grpc::StatusCode::RESOURCE_EXHAUSTED: - return EStatus::SESSION_BUSY; - case grpc::StatusCode::FAILED_PRECONDITION: - return EStatus::BAD_REQUEST; - case grpc::StatusCode::ABORTED: - return EStatus::ABORTED; - case grpc::StatusCode::OUT_OF_RANGE: - return EStatus::GENERIC_ERROR; - case grpc::StatusCode::UNIMPLEMENTED: - return EStatus::UNSUPPORTED; - case grpc::StatusCode::INTERNAL: - return EStatus::INTERNAL_ERROR; - case grpc::StatusCode::UNAVAILABLE: - return EStatus::UNAVAILABLE; - case grpc::StatusCode::DATA_LOSS: - return EStatus::GENERIC_ERROR; - default: - return EStatus::STATUS_CODE_UNSPECIFIED; - } -} - -EStatus ToStatus(Ydb::StatusIds::StatusCode status) { - switch (status) { - case Ydb::StatusIds::SUCCESS: - return EStatus::SUCCESS; - case Ydb::StatusIds::BAD_REQUEST: - return EStatus::BAD_REQUEST; - case Ydb::StatusIds::UNAUTHORIZED: - return EStatus::UNAUTHORIZED; - case Ydb::StatusIds::INTERNAL_ERROR: - return EStatus::INTERNAL_ERROR; - case Ydb::StatusIds::ABORTED: - return EStatus::ABORTED; - case Ydb::StatusIds::UNAVAILABLE: - return EStatus::UNAVAILABLE; - case Ydb::StatusIds::OVERLOADED: - return EStatus::OVERLOADED; - case Ydb::StatusIds::SCHEME_ERROR: - return EStatus::SCHEME_ERROR; - case Ydb::StatusIds::GENERIC_ERROR: - return EStatus::GENERIC_ERROR; - case Ydb::StatusIds::TIMEOUT: - return EStatus::TIMEOUT; - case Ydb::StatusIds::BAD_SESSION: - return EStatus::BAD_SESSION; - case Ydb::StatusIds::PRECONDITION_FAILED: - return EStatus::PRECONDITION_FAILED; - case Ydb::StatusIds::ALREADY_EXISTS: - return EStatus::ALREADY_EXISTS; - case Ydb::StatusIds::NOT_FOUND: - return EStatus::NOT_FOUND; - case Ydb::StatusIds::SESSION_EXPIRED: - return EStatus::SESSION_EXPIRED; - case Ydb::StatusIds::CANCELLED: - return EStatus::CANCELLED; - case Ydb::StatusIds::UNDETERMINED: - return EStatus::UNDETERMINED; - case Ydb::StatusIds::UNSUPPORTED: - return EStatus::UNSUPPORTED; - case Ydb::StatusIds::SESSION_BUSY: - return EStatus::SESSION_BUSY; - default: - return EStatus::STATUS_CODE_UNSPECIFIED; - } -} - -TString MakeErrorText(const Ydb::Operations::Operation& operation) { - NYql::TIssues issues; - IssuesFromMessage(operation.issues(), issues); - return issues.ToString(); -} - -ui64 GetValue(const NLogBroker::IntOrDefaultValue& value) { - if (value.user_defined()) { - return value.user_defined(); - } else { - return value.default_(); - } -} - -class TClient : public IClient { -public: - TClient(const TClientOptions& options, TIntrusivePtr<TConnections::TImpl> connections) - : Options(options) - , Connections(std::move(connections)) - , Connection(Connections->GetClient().CreateGRpcServiceConnection<NLogBroker::ConfigurationManagerService>(MakeConnectionConfig())) - { - } - - template <class TDescriptionType, class TObjectProps> - static TDescribePathResult MakeCommonResult(const NLogBroker::DescribePathResult& result, const TObjectProps& props) { - TDescribePathResult ret = TDescribePathResult::Make<TDescriptionType>(result.path().path()); - TDescriptionType& desc = std::get<TDescriptionType>(ret.GetDescriptionVariant()); - for (const auto& kv : props.metadata()) { - desc.Metadata[kv.key()] = kv.value(); - } - return ret; - } - - static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeAccountResult& props) { - TDescribePathResult ret = MakeCommonResult<TAccountDescription>(result, props); - return ret; - } - - static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeDirectoryResult& props) { - TDescribePathResult ret = MakeCommonResult<TPathDescription>(result, props); - return ret; - } - - static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeTopicResult& props) { - TDescribePathResult ret = MakeCommonResult<TTopicDescription>(result, props); - std::get<TTopicDescription>(ret.GetDescriptionVariant()).PartitionsCount = static_cast<size_t>(GetValue(props.properties().partitions_count())); - return ret; - } - - static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeConsumerResult& props) { - TDescribePathResult ret = MakeCommonResult<TConsumerDescription>(result, props); - return ret; - } - - static auto DescribePathResultCallback(const NThreading::TPromise<TDescribePathResult>& promise, const TString& path) { - return [promise = promise, path = path](NGrpc::TGrpcStatus&& status, - NLogBroker::DescribePathResponse&& response) mutable { - if (!status.Ok()) { - promise.SetException( - std::make_exception_ptr( - TException(ToStatus(status)) - << "Failed to describe path '" << path << "'. Grpc status: " << status.GRpcStatusCode << " (" << status.Msg << ")")); - return; - } - if (response.operation().status() != Ydb::StatusIds::SUCCESS) { - promise.SetException( - std::make_exception_ptr( - TException(ToStatus(response.operation().status())) - << "Failed to describe path '" << path << "'. Ydb status: " << Ydb::StatusIds::StatusCode_Name(response.operation().status()) - << " (" << MakeErrorText(response.operation()) << ")")); - return; - } - NLogBroker::DescribePathResult result; - response.operation().result().UnpackTo(&result); - - switch (result.result_case()) { - case NLogBroker::DescribePathResult::RESULT_NOT_SET: - { - promise.SetException( - std::make_exception_ptr( - TException(EStatus::INTERNAL_ERROR) - << "Failed to describe path '" << path << "': object type for path is not set by logbroker configuration manager")); - break; - } - case NLogBroker::DescribePathResult::kAccount: - { - promise.SetValue(MakeResult(result, result.account())); - break; - } - case NLogBroker::DescribePathResult::kDirectory: - { - promise.SetValue(MakeResult(result, result.directory())); - break; - } - case NLogBroker::DescribePathResult::kTopic: - { - promise.SetValue(MakeResult(result, result.topic())); - break; - } - case NLogBroker::DescribePathResult::kConsumer: - { - promise.SetValue(MakeResult(result, result.consumer())); - break; - } - } - }; - } - + +#include <logbroker/public/api/grpc/config_manager.grpc.pb.h> + +#include <library/cpp/grpc/client/grpc_client_low.h> + +#include <util/string/builder.h> + +#include <exception> + +namespace NPq::NConfigurationManager { + +class TConnections::TImpl : public TAtomicRefCount<TConnections::TImpl> { +public: + explicit TImpl(size_t numWorkerThreads) + : Client(numWorkerThreads) + { + } + + void Stop(bool wait = false) { + Client.Stop(wait); + } + + NGrpc::TGRpcClientLow& GetClient() { + return Client; + } + +private: + NGrpc::TGRpcClientLow Client; +}; + +EStatus ToStatus(const NGrpc::TGrpcStatus& status) { + if (status.InternalError) { + return EStatus::INTERNAL_ERROR; + } + + switch (status.GRpcStatusCode) { + case grpc::StatusCode::OK: + return EStatus::SUCCESS; + case grpc::StatusCode::CANCELLED: + return EStatus::CANCELLED; + case grpc::StatusCode::UNKNOWN: + return EStatus::UNDETERMINED; + case grpc::StatusCode::INVALID_ARGUMENT: + return EStatus::BAD_REQUEST; + case grpc::StatusCode::DEADLINE_EXCEEDED: + return EStatus::TIMEOUT; + case grpc::StatusCode::NOT_FOUND: + return EStatus::NOT_FOUND; + case grpc::StatusCode::ALREADY_EXISTS: + return EStatus::ALREADY_EXISTS; + case grpc::StatusCode::PERMISSION_DENIED: + return EStatus::UNAUTHORIZED; + case grpc::StatusCode::UNAUTHENTICATED: + return EStatus::UNAUTHORIZED; + case grpc::StatusCode::RESOURCE_EXHAUSTED: + return EStatus::SESSION_BUSY; + case grpc::StatusCode::FAILED_PRECONDITION: + return EStatus::BAD_REQUEST; + case grpc::StatusCode::ABORTED: + return EStatus::ABORTED; + case grpc::StatusCode::OUT_OF_RANGE: + return EStatus::GENERIC_ERROR; + case grpc::StatusCode::UNIMPLEMENTED: + return EStatus::UNSUPPORTED; + case grpc::StatusCode::INTERNAL: + return EStatus::INTERNAL_ERROR; + case grpc::StatusCode::UNAVAILABLE: + return EStatus::UNAVAILABLE; + case grpc::StatusCode::DATA_LOSS: + return EStatus::GENERIC_ERROR; + default: + return EStatus::STATUS_CODE_UNSPECIFIED; + } +} + +EStatus ToStatus(Ydb::StatusIds::StatusCode status) { + switch (status) { + case Ydb::StatusIds::SUCCESS: + return EStatus::SUCCESS; + case Ydb::StatusIds::BAD_REQUEST: + return EStatus::BAD_REQUEST; + case Ydb::StatusIds::UNAUTHORIZED: + return EStatus::UNAUTHORIZED; + case Ydb::StatusIds::INTERNAL_ERROR: + return EStatus::INTERNAL_ERROR; + case Ydb::StatusIds::ABORTED: + return EStatus::ABORTED; + case Ydb::StatusIds::UNAVAILABLE: + return EStatus::UNAVAILABLE; + case Ydb::StatusIds::OVERLOADED: + return EStatus::OVERLOADED; + case Ydb::StatusIds::SCHEME_ERROR: + return EStatus::SCHEME_ERROR; + case Ydb::StatusIds::GENERIC_ERROR: + return EStatus::GENERIC_ERROR; + case Ydb::StatusIds::TIMEOUT: + return EStatus::TIMEOUT; + case Ydb::StatusIds::BAD_SESSION: + return EStatus::BAD_SESSION; + case Ydb::StatusIds::PRECONDITION_FAILED: + return EStatus::PRECONDITION_FAILED; + case Ydb::StatusIds::ALREADY_EXISTS: + return EStatus::ALREADY_EXISTS; + case Ydb::StatusIds::NOT_FOUND: + return EStatus::NOT_FOUND; + case Ydb::StatusIds::SESSION_EXPIRED: + return EStatus::SESSION_EXPIRED; + case Ydb::StatusIds::CANCELLED: + return EStatus::CANCELLED; + case Ydb::StatusIds::UNDETERMINED: + return EStatus::UNDETERMINED; + case Ydb::StatusIds::UNSUPPORTED: + return EStatus::UNSUPPORTED; + case Ydb::StatusIds::SESSION_BUSY: + return EStatus::SESSION_BUSY; + default: + return EStatus::STATUS_CODE_UNSPECIFIED; + } +} + +TString MakeErrorText(const Ydb::Operations::Operation& operation) { + NYql::TIssues issues; + IssuesFromMessage(operation.issues(), issues); + return issues.ToString(); +} + +ui64 GetValue(const NLogBroker::IntOrDefaultValue& value) { + if (value.user_defined()) { + return value.user_defined(); + } else { + return value.default_(); + } +} + +class TClient : public IClient { +public: + TClient(const TClientOptions& options, TIntrusivePtr<TConnections::TImpl> connections) + : Options(options) + , Connections(std::move(connections)) + , Connection(Connections->GetClient().CreateGRpcServiceConnection<NLogBroker::ConfigurationManagerService>(MakeConnectionConfig())) + { + } + + template <class TDescriptionType, class TObjectProps> + static TDescribePathResult MakeCommonResult(const NLogBroker::DescribePathResult& result, const TObjectProps& props) { + TDescribePathResult ret = TDescribePathResult::Make<TDescriptionType>(result.path().path()); + TDescriptionType& desc = std::get<TDescriptionType>(ret.GetDescriptionVariant()); + for (const auto& kv : props.metadata()) { + desc.Metadata[kv.key()] = kv.value(); + } + return ret; + } + + static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeAccountResult& props) { + TDescribePathResult ret = MakeCommonResult<TAccountDescription>(result, props); + return ret; + } + + static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeDirectoryResult& props) { + TDescribePathResult ret = MakeCommonResult<TPathDescription>(result, props); + return ret; + } + + static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeTopicResult& props) { + TDescribePathResult ret = MakeCommonResult<TTopicDescription>(result, props); + std::get<TTopicDescription>(ret.GetDescriptionVariant()).PartitionsCount = static_cast<size_t>(GetValue(props.properties().partitions_count())); + return ret; + } + + static TDescribePathResult MakeResult(const NLogBroker::DescribePathResult& result, const NLogBroker::DescribeConsumerResult& props) { + TDescribePathResult ret = MakeCommonResult<TConsumerDescription>(result, props); + return ret; + } + + static auto DescribePathResultCallback(const NThreading::TPromise<TDescribePathResult>& promise, const TString& path) { + return [promise = promise, path = path](NGrpc::TGrpcStatus&& status, + NLogBroker::DescribePathResponse&& response) mutable { + if (!status.Ok()) { + promise.SetException( + std::make_exception_ptr( + TException(ToStatus(status)) + << "Failed to describe path '" << path << "'. Grpc status: " << status.GRpcStatusCode << " (" << status.Msg << ")")); + return; + } + if (response.operation().status() != Ydb::StatusIds::SUCCESS) { + promise.SetException( + std::make_exception_ptr( + TException(ToStatus(response.operation().status())) + << "Failed to describe path '" << path << "'. Ydb status: " << Ydb::StatusIds::StatusCode_Name(response.operation().status()) + << " (" << MakeErrorText(response.operation()) << ")")); + return; + } + NLogBroker::DescribePathResult result; + response.operation().result().UnpackTo(&result); + + switch (result.result_case()) { + case NLogBroker::DescribePathResult::RESULT_NOT_SET: + { + promise.SetException( + std::make_exception_ptr( + TException(EStatus::INTERNAL_ERROR) + << "Failed to describe path '" << path << "': object type for path is not set by logbroker configuration manager")); + break; + } + case NLogBroker::DescribePathResult::kAccount: + { + promise.SetValue(MakeResult(result, result.account())); + break; + } + case NLogBroker::DescribePathResult::kDirectory: + { + promise.SetValue(MakeResult(result, result.directory())); + break; + } + case NLogBroker::DescribePathResult::kTopic: + { + promise.SetValue(MakeResult(result, result.topic())); + break; + } + case NLogBroker::DescribePathResult::kConsumer: + { + promise.SetValue(MakeResult(result, result.consumer())); + break; + } + } + }; + } + TAsyncDescribePathResult DescribePath(const TString& path) const override { - NLogBroker::DescribePathRequest describeRequest; + NLogBroker::DescribePathRequest describeRequest; describeRequest.set_token(Options.GetCredentialsProviderFactory()->CreateProvider()->GetAuthInfo()); - describeRequest.mutable_path()->set_path(path); - - NThreading::TPromise<TDescribePathResult> promise = NThreading::NewPromise<TDescribePathResult>(); - Connection->DoRequest<NLogBroker::DescribePathRequest, NLogBroker::DescribePathResponse>( - describeRequest, - DescribePathResultCallback(promise, path), - &NLogBroker::ConfigurationManagerService::Stub::AsyncDescribePath); - return promise.GetFuture(); - } - -private: - NGrpc::TGRpcClientConfig MakeConnectionConfig() const { - NGrpc::TGRpcClientConfig cfg(Options.GetEndpoint(), Options.GetRequestTimeout()); - cfg.CompressionAlgoritm = GRPC_COMPRESS_GZIP; + describeRequest.mutable_path()->set_path(path); + + NThreading::TPromise<TDescribePathResult> promise = NThreading::NewPromise<TDescribePathResult>(); + Connection->DoRequest<NLogBroker::DescribePathRequest, NLogBroker::DescribePathResponse>( + describeRequest, + DescribePathResultCallback(promise, path), + &NLogBroker::ConfigurationManagerService::Stub::AsyncDescribePath); + return promise.GetFuture(); + } + +private: + NGrpc::TGRpcClientConfig MakeConnectionConfig() const { + NGrpc::TGRpcClientConfig cfg(Options.GetEndpoint(), Options.GetRequestTimeout()); + cfg.CompressionAlgoritm = GRPC_COMPRESS_GZIP; cfg.EnableSsl = Options.GetEnableSsl(); - return cfg; - } - -private: - const TClientOptions Options; - TIntrusivePtr<TConnections::TImpl> Connections; - std::shared_ptr<NGrpc::TServiceConnection<NLogBroker::ConfigurationManagerService>> Connection; -}; - -TConnections::TConnections(size_t numWorkerThreads) - : Impl(MakeIntrusive<TImpl>(numWorkerThreads)) -{ -} - -TConnections::~TConnections() = default; - -void TConnections::Stop(bool wait) { - Impl->Stop(wait); -} - -IClient::TPtr TConnections::GetClient(const TClientOptions& options) { - return MakeIntrusive<TClient>(options, Impl); -} - -} // namespace NPq::NConfigurationManager + return cfg; + } + +private: + const TClientOptions Options; + TIntrusivePtr<TConnections::TImpl> Connections; + std::shared_ptr<NGrpc::TServiceConnection<NLogBroker::ConfigurationManagerService>> Connection; +}; + +TConnections::TConnections(size_t numWorkerThreads) + : Impl(MakeIntrusive<TImpl>(numWorkerThreads)) +{ +} + +TConnections::~TConnections() = default; + +void TConnections::Stop(bool wait) { + Impl->Stop(wait); +} + +IClient::TPtr TConnections::GetClient(const TClientOptions& options) { + return MakeIntrusive<TClient>(options, Impl); +} + +} // namespace NPq::NConfigurationManager diff --git a/ydb/library/yql/providers/pq/cm_client/lib/client.h b/ydb/library/yql/providers/pq/cm_client/lib/client.h index 1cc01b1a960..6eda2a0d32c 100644 --- a/ydb/library/yql/providers/pq/cm_client/lib/client.h +++ b/ydb/library/yql/providers/pq/cm_client/lib/client.h @@ -1,21 +1,21 @@ -#pragma once +#pragma once #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> - -namespace NPq::NConfigurationManager { - -struct TConnections : public IConnections { - class TImpl; - - explicit TConnections(size_t numWorkerThreads = 2); - ~TConnections(); - - void Stop(bool wait = false) override; - - IClient::TPtr GetClient(const TClientOptions& = {}) override; - -private: - TIntrusivePtr<TImpl> Impl; -}; - -} // namespace NPq::NConfigurationManager + +namespace NPq::NConfigurationManager { + +struct TConnections : public IConnections { + class TImpl; + + explicit TConnections(size_t numWorkerThreads = 2); + ~TConnections(); + + void Stop(bool wait = false) override; + + IClient::TPtr GetClient(const TClientOptions& = {}) override; + +private: + TIntrusivePtr<TImpl> Impl; +}; + +} // namespace NPq::NConfigurationManager diff --git a/ydb/library/yql/providers/pq/cm_client/lib/ya.make b/ydb/library/yql/providers/pq/cm_client/lib/ya.make index dcc155df1e8..2e4b5463beb 100644 --- a/ydb/library/yql/providers/pq/cm_client/lib/ya.make +++ b/ydb/library/yql/providers/pq/cm_client/lib/ya.make @@ -1,23 +1,23 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - client.cpp -) - -PEERDIR( - library/cpp/grpc/client - library/cpp/threading/future - logbroker/public/api/grpc - logbroker/public/api/protos +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + client.cpp +) + +PEERDIR( + library/cpp/grpc/client + library/cpp/threading/future + logbroker/public/api/grpc + logbroker/public/api/protos ydb/public/sdk/cpp/client/ydb_types/credentials ydb/library/yql/providers/pq/cm_client/interface ydb/library/yql/public/issue -) - -END() +) + +END() diff --git a/ydb/library/yql/providers/pq/cm_client/ya.make b/ydb/library/yql/providers/pq/cm_client/ya.make index fbf350a8fc5..8a556c65f8a 100644 --- a/ydb/library/yql/providers/pq/cm_client/ya.make +++ b/ydb/library/yql/providers/pq/cm_client/ya.make @@ -1,9 +1,9 @@ -OWNER( - galaxycrab - g:yq - g:yql -) - +OWNER( + galaxycrab + g:yq + g:yql +) + IF (NOT OPENSOURCE) RECURSE( interface diff --git a/ydb/library/yql/providers/pq/common/ya.make b/ydb/library/yql/providers/pq/common/ya.make index 98fe28311b5..db0b8d341bc 100644 --- a/ydb/library/yql/providers/pq/common/ya.make +++ b/ydb/library/yql/providers/pq/common/ya.make @@ -1,15 +1,15 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - yql_names.cpp -) - -YQL_LAST_ABI_VERSION() - -END() +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + yql_names.cpp +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/library/yql/providers/pq/common/yql_names.cpp b/ydb/library/yql/providers/pq/common/yql_names.cpp index ea02ce470a0..31bf20ab50a 100644 --- a/ydb/library/yql/providers/pq/common/yql_names.cpp +++ b/ydb/library/yql/providers/pq/common/yql_names.cpp @@ -1,4 +1,4 @@ -#include "yql_names.h" - -namespace NYql { -} // namespace NYql +#include "yql_names.h" + +namespace NYql { +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/common/yql_names.h b/ydb/library/yql/providers/pq/common/yql_names.h index e4ac47c5cd8..a42d8ffe378 100644 --- a/ydb/library/yql/providers/pq/common/yql_names.h +++ b/ydb/library/yql/providers/pq/common/yql_names.h @@ -1,13 +1,13 @@ -#pragma once - -#include <util/generic/strbuf.h> - -namespace NYql { - -constexpr TStringBuf PartitionsCountProp = "PartitionsCount"; -constexpr TStringBuf ConsumerSetting = "Consumer"; -constexpr TStringBuf EndpointSetting = "Endpoint"; -constexpr TStringBuf UseSslSetting = "UseSsl"; +#pragma once + +#include <util/generic/strbuf.h> + +namespace NYql { + +constexpr TStringBuf PartitionsCountProp = "PartitionsCount"; +constexpr TStringBuf ConsumerSetting = "Consumer"; +constexpr TStringBuf EndpointSetting = "Endpoint"; +constexpr TStringBuf UseSslSetting = "UseSsl"; constexpr TStringBuf AddBearerToTokenSetting = "AddBearerToToken"; - -} // namespace NYql + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/expr_nodes/ya.make b/ydb/library/yql/providers/pq/expr_nodes/ya.make index d061e1c08b9..fef87765100 100644 --- a/ydb/library/yql/providers/pq/expr_nodes/ya.make +++ b/ydb/library/yql/providers/pq/expr_nodes/ya.make @@ -1,39 +1,39 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - yql_pq_expr_nodes.cpp -) - -PEERDIR( +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + yql_pq_expr_nodes.cpp +) + +PEERDIR( ydb/library/yql/core/expr_nodes ydb/library/yql/providers/common/provider -) - -SRCDIR( +) + +SRCDIR( ydb/library/yql/core/expr_nodes_gen -) - -RUN_PROGRAM( +) + +RUN_PROGRAM( ydb/library/yql/core/expr_nodes_gen/gen - yql_expr_nodes_gen.jnj - yql_pq_expr_nodes.json - yql_pq_expr_nodes.gen.h - yql_pq_expr_nodes.decl.inl.h - yql_pq_expr_nodes.defs.inl.h - IN yql_expr_nodes_gen.jnj - IN yql_pq_expr_nodes.json - OUT yql_pq_expr_nodes.gen.h - OUT yql_pq_expr_nodes.decl.inl.h - OUT yql_pq_expr_nodes.defs.inl.h - OUTPUT_INCLUDES + yql_expr_nodes_gen.jnj + yql_pq_expr_nodes.json + yql_pq_expr_nodes.gen.h + yql_pq_expr_nodes.decl.inl.h + yql_pq_expr_nodes.defs.inl.h + IN yql_expr_nodes_gen.jnj + IN yql_pq_expr_nodes.json + OUT yql_pq_expr_nodes.gen.h + OUT yql_pq_expr_nodes.decl.inl.h + OUT yql_pq_expr_nodes.defs.inl.h + OUTPUT_INCLUDES ${ARCADIA_ROOT}/ydb/library/yql/core/expr_nodes_gen/yql_expr_nodes_gen.h - ${ARCADIA_ROOT}/util/generic/hash_set.h -) - -END() + ${ARCADIA_ROOT}/util/generic/hash_set.h +) + +END() diff --git a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.cpp b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.cpp index 45babbced84..bbb6a9da934 100644 --- a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.cpp +++ b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.cpp @@ -1 +1 @@ -#include "yql_pq_expr_nodes.h" +#include "yql_pq_expr_nodes.h" diff --git a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h index be54a2c2aed..0dfe133f552 100644 --- a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h +++ b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h @@ -1,39 +1,39 @@ -#pragma once - +#pragma once + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.gen.h> - -namespace NYql { -namespace NNodes { - + +namespace NYql { +namespace NNodes { + #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.decl.inl.h> - -class TPqDataSource: public NGenerated::TPqDataSourceStub<TExprBase, TCallable, TCoAtom> { -public: - explicit TPqDataSource(const TExprNode* node) - : TPqDataSourceStub(node) - { - } - - explicit TPqDataSource(const TExprNode::TPtr& node) - : TPqDataSourceStub(node) - { - } - - static bool Match(const TExprNode* node) { - if (!TPqDataSourceStub::Match(node)) { - return false; - } - - if (node->Head().Content() != PqProviderName) { - return false; - } - - return true; - } -}; - + +class TPqDataSource: public NGenerated::TPqDataSourceStub<TExprBase, TCallable, TCoAtom> { +public: + explicit TPqDataSource(const TExprNode* node) + : TPqDataSourceStub(node) + { + } + + explicit TPqDataSource(const TExprNode::TPtr& node) + : TPqDataSourceStub(node) + { + } + + static bool Match(const TExprNode* node) { + if (!TPqDataSourceStub::Match(node)) { + return false; + } + + if (node->Head().Content() != PqProviderName) { + return false; + } + + return true; + } +}; + class TPqDataSink : public NGenerated::TPqDataSinkStub<TExprBase, TCallable, TCoAtom> { public: explicit TPqDataSink(const TExprNode* node) @@ -56,6 +56,6 @@ public: }; #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.defs.inl.h> - -} // namespace NNodes -} // namespace NYql + +} // namespace NNodes +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json index bad47e9d70e..e763af9ba22 100644 --- a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json +++ b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json @@ -1,21 +1,21 @@ -{ - "NodeRootType": "TExprBase", - "NodeBuilderBase": "TNodeBuilderBase", - "ListBuilderBase": "TListBuilderBase", - "FreeArgCallableBase": "TFreeArgCallable", - "FreeArgBuilderBase": "TFreeArgCallableBuilderBase", - "Nodes": [ - { - "Name": "TPqDataSource", - "Base": "TCallable", - "Definition": "Custom", - "Match": {"Type": "Callable", "Name": "DataSource"}, - "Children": [ - {"Index": 0, "Name": "Category", "Type": "TCoAtom"}, - {"Index": 1, "Name": "Cluster", "Type": "TCoAtom"} - ] - }, - { +{ + "NodeRootType": "TExprBase", + "NodeBuilderBase": "TNodeBuilderBase", + "ListBuilderBase": "TListBuilderBase", + "FreeArgCallableBase": "TFreeArgCallable", + "FreeArgBuilderBase": "TFreeArgCallableBuilderBase", + "Nodes": [ + { + "Name": "TPqDataSource", + "Base": "TCallable", + "Definition": "Custom", + "Match": {"Type": "Callable", "Name": "DataSource"}, + "Children": [ + {"Index": 0, "Name": "Category", "Type": "TCoAtom"}, + {"Index": 1, "Name": "Cluster", "Type": "TCoAtom"} + ] + }, + { "Name": "TPqDataSink", "Base": "TCallable", "Definition": "Custom", @@ -26,79 +26,79 @@ ] }, { - "Name": "TPqTopic", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "PqTopic"}, - "Children": [ - {"Index": 0, "Name": "Cluster", "Type": "TCoAtom"}, - {"Index": 1, "Name": "Database", "Type": "TCoAtom"}, - {"Index": 2, "Name": "Path", "Type": "TCoAtom"}, - {"Index": 3, "Name": "Props", "Type": "TCoNameValueTupleList"}, + "Name": "TPqTopic", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "PqTopic"}, + "Children": [ + {"Index": 0, "Name": "Cluster", "Type": "TCoAtom"}, + {"Index": 1, "Name": "Database", "Type": "TCoAtom"}, + {"Index": 2, "Name": "Path", "Type": "TCoAtom"}, + {"Index": 3, "Name": "Props", "Type": "TCoNameValueTupleList"}, {"Index": 4, "Name": "Metadata", "Type": "TCoNameValueTupleList"}, {"Index": 5, "Name": "RowSpec", "Type": "TExprBase"} - ] - }, - { - "Name": "TPqRead", - "Base": "TFreeArgCallable", - "Match": {"Type": "Callable", "Name": "Read!"}, - "Children": [ - {"Index": 0, "Name": "World", "Type": "TExprBase"}, - {"Index": 1, "Name": "DataSource", "Type": "TPqDataSource"} - ] - }, - { - "Name": "TPqReadTopic", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "PqReadTopic!"}, - "Children": [ - {"Index": 0, "Name": "World", "Type": "TExprBase"}, - {"Index": 1, "Name": "DataSource", "Type": "TPqDataSource"}, - {"Index": 2, "Name": "Topic", "Type": "TPqTopic"}, - {"Index": 3, "Name": "Columns", "Type": "TExprBase"}, + ] + }, + { + "Name": "TPqRead", + "Base": "TFreeArgCallable", + "Match": {"Type": "Callable", "Name": "Read!"}, + "Children": [ + {"Index": 0, "Name": "World", "Type": "TExprBase"}, + {"Index": 1, "Name": "DataSource", "Type": "TPqDataSource"} + ] + }, + { + "Name": "TPqReadTopic", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "PqReadTopic!"}, + "Children": [ + {"Index": 0, "Name": "World", "Type": "TExprBase"}, + {"Index": 1, "Name": "DataSource", "Type": "TPqDataSource"}, + {"Index": 2, "Name": "Topic", "Type": "TPqTopic"}, + {"Index": 3, "Name": "Columns", "Type": "TExprBase"}, {"Index": 4, "Name": "Format", "Type": "TCoAtom"}, {"Index": 5, "Name": "Compression", "Type": "TCoAtom"}, {"Index": 6, "Name": "LimitHint", "Type": "TExprBase", "Optional": true} - ] + ] }, { - "Name": "TDqPqTopicSource", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "DqPqTopicSource"}, - "Children": [ - {"Index": 0, "Name": "Topic", "Type": "TPqTopic"}, - {"Index": 1, "Name": "Columns", "Type": "TExprBase"}, - {"Index": 2, "Name": "Settings", "Type": "TCoNameValueTupleList"}, + "Name": "TDqPqTopicSource", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "DqPqTopicSource"}, + "Children": [ + {"Index": 0, "Name": "Topic", "Type": "TPqTopic"}, + {"Index": 1, "Name": "Columns", "Type": "TExprBase"}, + {"Index": 2, "Name": "Settings", "Type": "TCoNameValueTupleList"}, {"Index": 3, "Name": "Token", "Type": "TCoSecureParam"} - ] - }, - { - "Name": "TDqPqTopicSink", - "Base": "TCallable", - "Match": {"Type": "Callable", "Name": "DqPqTopicSink"}, - "Children": [ - {"Index": 0, "Name": "Topic", "Type": "TPqTopic"}, - {"Index": 1, "Name": "Settings", "Type": "TCoNameValueTupleList"}, + ] + }, + { + "Name": "TDqPqTopicSink", + "Base": "TCallable", + "Match": {"Type": "Callable", "Name": "DqPqTopicSink"}, + "Children": [ + {"Index": 0, "Name": "Topic", "Type": "TPqTopic"}, + {"Index": 1, "Name": "Settings", "Type": "TCoNameValueTupleList"}, {"Index": 2, "Name": "Token", "Type": "TCoSecureParam"} - ] - }, - { - "Name": "TPqWrite", - "Base": "TFreeArgCallable", - "Match": {"Type": "Callable", "Name": "Write!"}, - "Children": [ - {"Index": 0, "Name": "World", "Type": "TExprBase"}, - {"Index": 1, "Name": "DataSink", "Type": "TPqDataSink"} - ] - }, - { + ] + }, + { + "Name": "TPqWrite", + "Base": "TFreeArgCallable", + "Match": {"Type": "Callable", "Name": "Write!"}, + "Children": [ + {"Index": 0, "Name": "World", "Type": "TExprBase"}, + {"Index": 1, "Name": "DataSink", "Type": "TPqDataSink"} + ] + }, + { "Name": "TPqWriteTopic", "Base": "TCallable", "Match": {"Type": "Callable", "Name": "PqWriteTopic!"}, "Children": [ {"Index": 0, "Name": "World", "Type": "TExprBase"}, {"Index": 1, "Name": "DataSink", "Type": "TPqDataSink"}, - {"Index": 2, "Name": "Topic", "Type": "TPqTopic"}, + {"Index": 2, "Name": "Topic", "Type": "TPqTopic"}, {"Index": 3, "Name": "Input", "Type": "TExprBase"}, {"Index": 4, "Name": "Mode", "Type": "TCoAtom"}, {"Index": 5, "Name": "Settings", "Type": "TCoNameValueTupleList"} @@ -112,6 +112,6 @@ {"Index": 0, "Name": "Endpoint", "Type": "TCoAtom"}, {"Index": 1, "Name": "TvmId", "Type": "TCoAtom"} ] - } - ] -} + } + ] +} diff --git a/ydb/library/yql/providers/pq/gateway/dummy/ya.make b/ydb/library/yql/providers/pq/gateway/dummy/ya.make index 19ae8146174..e5345065408 100644 --- a/ydb/library/yql/providers/pq/gateway/dummy/ya.make +++ b/ydb/library/yql/providers/pq/gateway/dummy/ya.make @@ -1,17 +1,17 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - yql_pq_dummy_gateway.cpp -) - -PEERDIR( +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + yql_pq_dummy_gateway.cpp +) + +PEERDIR( ydb/library/yql/providers/pq/provider -) - -END() +) + +END() diff --git a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp index b1d64befb3a..99bb0b52a60 100644 --- a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp +++ b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp @@ -1,57 +1,57 @@ -#include "yql_pq_dummy_gateway.h" - -#include <util/generic/is_in.h> -#include <util/generic/yexception.h> - -#include <exception> - -namespace NYql { - +#include "yql_pq_dummy_gateway.h" + +#include <util/generic/is_in.h> +#include <util/generic/yexception.h> + +#include <exception> + +namespace NYql { + NThreading::TFuture<void> TDummyPqGateway::OpenSession(const TString& sessionId, const TString& username) { - with_lock (Mutex) { - Y_ENSURE(sessionId); - Y_ENSURE(username); - - Y_ENSURE(!IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is already opened in pq gateway"); - OpenedSessions.insert(sessionId); - } - return NThreading::MakeFuture(); -} - -void TDummyPqGateway::CloseSession(const TString& sessionId) { - with_lock (Mutex) { - Y_ENSURE(IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is not opened in pq gateway"); - OpenedSessions.erase(sessionId); - } -} - + with_lock (Mutex) { + Y_ENSURE(sessionId); + Y_ENSURE(username); + + Y_ENSURE(!IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is already opened in pq gateway"); + OpenedSessions.insert(sessionId); + } + return NThreading::MakeFuture(); +} + +void TDummyPqGateway::CloseSession(const TString& sessionId) { + with_lock (Mutex) { + Y_ENSURE(IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is not opened in pq gateway"); + OpenedSessions.erase(sessionId); + } +} + NPq::NConfigurationManager::TAsyncDescribePathResult TDummyPqGateway::DescribePath(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) { - Y_UNUSED(database); + Y_UNUSED(database); Y_UNUSED(token); - with_lock (Mutex) { - Y_ENSURE(IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is not opened in pq gateway"); - const auto key = std::make_pair(cluster, path); - if (const auto* topic = Topics.FindPtr(key)) { - NPq::NConfigurationManager::TTopicDescription desc(path); - desc.PartitionsCount = topic->PartitionsCount; - return NThreading::MakeFuture<NPq::NConfigurationManager::TDescribePathResult>( - NPq::NConfigurationManager::TDescribePathResult::Make<NPq::NConfigurationManager::TTopicDescription>(desc)); - } - return NThreading::MakeErrorFuture<NPq::NConfigurationManager::TDescribePathResult>( - std::make_exception_ptr(NPq::NConfigurationManager::TException{NPq::NConfigurationManager::EStatus::NOT_FOUND} << "Topic " << path << " is not found on cluster " << cluster)); - } -} - -TDummyPqGateway& TDummyPqGateway::AddDummyTopic(const TDummyTopic& topic) { - with_lock (Mutex) { - Y_ENSURE(topic.Cluster); - Y_ENSURE(topic.Path); - const auto key = std::make_pair(topic.Cluster, topic.Path); - Y_ENSURE(Topics.emplace(key, topic).second, "Already inserted dummy topic {" << topic.Cluster << ", " << topic.Path << "}"); - return *this; - } -} - + with_lock (Mutex) { + Y_ENSURE(IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is not opened in pq gateway"); + const auto key = std::make_pair(cluster, path); + if (const auto* topic = Topics.FindPtr(key)) { + NPq::NConfigurationManager::TTopicDescription desc(path); + desc.PartitionsCount = topic->PartitionsCount; + return NThreading::MakeFuture<NPq::NConfigurationManager::TDescribePathResult>( + NPq::NConfigurationManager::TDescribePathResult::Make<NPq::NConfigurationManager::TTopicDescription>(desc)); + } + return NThreading::MakeErrorFuture<NPq::NConfigurationManager::TDescribePathResult>( + std::make_exception_ptr(NPq::NConfigurationManager::TException{NPq::NConfigurationManager::EStatus::NOT_FOUND} << "Topic " << path << " is not found on cluster " << cluster)); + } +} + +TDummyPqGateway& TDummyPqGateway::AddDummyTopic(const TDummyTopic& topic) { + with_lock (Mutex) { + Y_ENSURE(topic.Cluster); + Y_ENSURE(topic.Path); + const auto key = std::make_pair(topic.Cluster, topic.Path); + Y_ENSURE(Topics.emplace(key, topic).second, "Already inserted dummy topic {" << topic.Cluster << ", " << topic.Path << "}"); + return *this; + } +} + void TDummyPqGateway::UpdateClusterConfigs( const TString& clusterName, const TString& endpoint, @@ -64,4 +64,4 @@ void TDummyPqGateway::UpdateClusterConfigs( Y_UNUSED(secure); } -} // namespace NYql +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h index 753c849c201..79be2aa840d 100644 --- a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h +++ b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h @@ -1,51 +1,51 @@ -#pragma once +#pragma once #include <ydb/library/yql/providers/pq/provider/yql_pq_gateway.h> - -#include <util/generic/hash.h> -#include <util/generic/hash_set.h> -#include <util/generic/string.h> -#include <util/system/mutex.h> - -namespace NYql { - -struct TDummyTopic { - TDummyTopic(const TString& cluster, const TString& path) - : Cluster(cluster) - , Path(path) - { - } - - TDummyTopic& SetPartitionsCount(size_t count) { - PartitionsCount = count; - return *this; - } - - TString Cluster; - TString Path; - size_t PartitionsCount = 1; -}; - -// Dummy Pq gateway for tests. -class TDummyPqGateway : public IPqGateway { -public: - TDummyPqGateway& AddDummyTopic(const TDummyTopic& topic); - -public: + +#include <util/generic/hash.h> +#include <util/generic/hash_set.h> +#include <util/generic/string.h> +#include <util/system/mutex.h> + +namespace NYql { + +struct TDummyTopic { + TDummyTopic(const TString& cluster, const TString& path) + : Cluster(cluster) + , Path(path) + { + } + + TDummyTopic& SetPartitionsCount(size_t count) { + PartitionsCount = count; + return *this; + } + + TString Cluster; + TString Path; + size_t PartitionsCount = 1; +}; + +// Dummy Pq gateway for tests. +class TDummyPqGateway : public IPqGateway { +public: + TDummyPqGateway& AddDummyTopic(const TDummyTopic& topic); + +public: NThreading::TFuture<void> OpenSession(const TString& sessionId, const TString& username) override; - void CloseSession(const TString& sessionId) override; - + void CloseSession(const TString& sessionId) override; + NPq::NConfigurationManager::TAsyncDescribePathResult DescribePath(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) override; - + void UpdateClusterConfigs( const TString& clusterName, const TString& endpoint, const TString& database, bool secure) override; -private: +private: mutable TMutex Mutex; - THashMap<std::pair<TString, TString>, TDummyTopic> Topics; - THashSet<TString> OpenedSessions; -}; - -} // namespace NYql + THashMap<std::pair<TString, TString>, TDummyTopic> Topics; + THashSet<TString> OpenedSessions; +}; + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/gateway/native/ya.make b/ydb/library/yql/providers/pq/gateway/native/ya.make index 9d9ed20770d..f6a2aca00ac 100644 --- a/ydb/library/yql/providers/pq/gateway/native/ya.make +++ b/ydb/library/yql/providers/pq/gateway/native/ya.make @@ -1,17 +1,17 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - yql_pq_gateway.cpp - yql_pq_session.cpp -) - -PEERDIR( +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + yql_pq_gateway.cpp + yql_pq_session.cpp +) + +PEERDIR( ydb/library/yql/providers/common/token_accessor/client ydb/library/yql/utils ydb/public/sdk/cpp/client/ydb_driver @@ -20,6 +20,6 @@ PEERDIR( ydb/library/yql/providers/common/proto ydb/library/yql/providers/pq/cm_client/interface ydb/library/yql/providers/pq/provider -) - -END() +) + +END() diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp b/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp index bd89565050f..c0bd16ef248 100644 --- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp +++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp @@ -1,24 +1,24 @@ -#include "yql_pq_gateway.h" -#include "yql_pq_session.h" - +#include "yql_pq_gateway.h" +#include "yql_pq_session.h" + #include <ydb/library/yql/utils/log/context.h> - + #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> - -#include <util/system/mutex.h> - -#include <memory> - -namespace NYql { - -class TPqNativeGateway : public IPqGateway { -public: - explicit TPqNativeGateway(const TPqGatewayServices& services); - ~TPqNativeGateway(); - + +#include <util/system/mutex.h> + +#include <memory> + +namespace NYql { + +class TPqNativeGateway : public IPqGateway { +public: + explicit TPqNativeGateway(const TPqGatewayServices& services); + ~TPqNativeGateway(); + NThreading::TFuture<void> OpenSession(const TString& sessionId, const TString& username) override; - void CloseSession(const TString& sessionId) override; - + void CloseSession(const TString& sessionId) override; + NPq::NConfigurationManager::TAsyncDescribePathResult DescribePath( const TString& sessionId, const TString& cluster, @@ -32,42 +32,42 @@ public: const TString& database, bool secure) override; -private: - void InitClusterConfigs(); +private: + void InitClusterConfigs(); TPqSession::TPtr GetExistingSession(const TString& sessionId) const; - -private: + +private: mutable TMutex Mutex; - const NKikimr::NMiniKQL::IFunctionRegistry* FunctionRegistry = nullptr; - TPqGatewayConfigPtr Config; - IMetricsRegistryPtr Metrics; + const NKikimr::NMiniKQL::IFunctionRegistry* FunctionRegistry = nullptr; + TPqGatewayConfigPtr Config; + IMetricsRegistryPtr Metrics; ISecuredServiceAccountCredentialsFactory::TPtr CredentialsFactory; - ::NPq::NConfigurationManager::IConnections::TPtr CmConnections; - NYdb::TDriver YdbDriver; - TPqClusterConfigsMapPtr ClusterConfigs; - THashMap<TString, TPqSession::TPtr> Sessions; -}; - -TPqNativeGateway::TPqNativeGateway(const TPqGatewayServices& services) - : FunctionRegistry(services.FunctionRegistry) - , Config(services.Config) - , Metrics(services.Metrics) + ::NPq::NConfigurationManager::IConnections::TPtr CmConnections; + NYdb::TDriver YdbDriver; + TPqClusterConfigsMapPtr ClusterConfigs; + THashMap<TString, TPqSession::TPtr> Sessions; +}; + +TPqNativeGateway::TPqNativeGateway(const TPqGatewayServices& services) + : FunctionRegistry(services.FunctionRegistry) + , Config(services.Config) + , Metrics(services.Metrics) , CredentialsFactory(services.CredentialsFactory) - , CmConnections(services.CmConnections) - , YdbDriver(services.YdbDriver) -{ - Y_UNUSED(FunctionRegistry); - InitClusterConfigs(); -} - -void TPqNativeGateway::InitClusterConfigs() { - ClusterConfigs = std::make_shared<TPqClusterConfigsMap>(); - for (const auto& cfg : Config->GetClusterMapping()) { - auto& config = (*ClusterConfigs)[cfg.GetName()]; - config = cfg; - } -} - + , CmConnections(services.CmConnections) + , YdbDriver(services.YdbDriver) +{ + Y_UNUSED(FunctionRegistry); + InitClusterConfigs(); +} + +void TPqNativeGateway::InitClusterConfigs() { + ClusterConfigs = std::make_shared<TPqClusterConfigsMap>(); + for (const auto& cfg : Config->GetClusterMapping()) { + auto& config = (*ClusterConfigs)[cfg.GetName()]; + config = cfg; + } +} + void TPqNativeGateway::UpdateClusterConfigs( const TString& clusterName, const TString& endpoint, @@ -85,47 +85,47 @@ void TPqNativeGateway::UpdateClusterConfigs( } NThreading::TFuture<void> TPqNativeGateway::OpenSession(const TString& sessionId, const TString& username) { - with_lock (Mutex) { - auto [sessionIt, isNewSession] = Sessions.emplace(sessionId, - MakeIntrusive<TPqSession>(sessionId, - username, - CmConnections, - YdbDriver, - ClusterConfigs, + with_lock (Mutex) { + auto [sessionIt, isNewSession] = Sessions.emplace(sessionId, + MakeIntrusive<TPqSession>(sessionId, + username, + CmConnections, + YdbDriver, + ClusterConfigs, CredentialsFactory)); - if (!isNewSession) { - YQL_LOG_CTX_THROW yexception() << "Session already exists: " << sessionId; - } - } - return NThreading::MakeFuture(); -} - -void TPqNativeGateway::CloseSession(const TString& sessionId) { - with_lock (Mutex) { - Sessions.erase(sessionId); - } + if (!isNewSession) { + YQL_LOG_CTX_THROW yexception() << "Session already exists: " << sessionId; + } + } + return NThreading::MakeFuture(); +} + +void TPqNativeGateway::CloseSession(const TString& sessionId) { + with_lock (Mutex) { + Sessions.erase(sessionId); + } } TPqSession::TPtr TPqNativeGateway::GetExistingSession(const TString& sessionId) const { - with_lock (Mutex) { - auto sessionIt = Sessions.find(sessionId); - if (sessionIt == Sessions.end()) { - YQL_LOG_CTX_THROW yexception() << "Pq gateway session was not found: " << sessionId; - } - return sessionIt->second; + with_lock (Mutex) { + auto sessionIt = Sessions.find(sessionId); + if (sessionIt == Sessions.end()) { + YQL_LOG_CTX_THROW yexception() << "Pq gateway session was not found: " << sessionId; + } + return sessionIt->second; } -} - +} + NPq::NConfigurationManager::TAsyncDescribePathResult TPqNativeGateway::DescribePath(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) { return GetExistingSession(sessionId)->DescribePath(cluster, database, path, token); -} - -IPqGateway::TPtr CreatePqNativeGateway(const TPqGatewayServices& services) { - return MakeIntrusive<TPqNativeGateway>(services); -} - -TPqNativeGateway::~TPqNativeGateway() { - Sessions.clear(); -} - -} // namespace NYql +} + +IPqGateway::TPtr CreatePqNativeGateway(const TPqGatewayServices& services) { + return MakeIntrusive<TPqNativeGateway>(services); +} + +TPqNativeGateway::~TPqNativeGateway() { + Sessions.clear(); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.h b/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.h index 29f05d1e95d..e3e0c58268c 100644 --- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.h +++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.h @@ -1,49 +1,49 @@ -#pragma once +#pragma once #include <ydb/library/yql/providers/common/metrics/metrics_registry.h> #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> #include <ydb/library/yql/providers/pq/provider/yql_pq_gateway.h> - + #include <ydb/library/yql/providers/common/token_accessor/client/factory.h> #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> - -#include <util/generic/ptr.h> - -namespace NKikimr::NMiniKQL { -class IFunctionRegistry; -} // namespace NKikimr::NMiniKQL - -namespace NYql { - -class TPqGatewayConfig; + +#include <util/generic/ptr.h> + +namespace NKikimr::NMiniKQL { +class IFunctionRegistry; +} // namespace NKikimr::NMiniKQL + +namespace NYql { + +class TPqGatewayConfig; using TPqGatewayConfigPtr = std::shared_ptr<TPqGatewayConfig>; - -struct TPqGatewayServices { - const NKikimr::NMiniKQL::IFunctionRegistry* FunctionRegistry = nullptr; - TPqGatewayConfigPtr Config; - IMetricsRegistryPtr Metrics; + +struct TPqGatewayServices { + const NKikimr::NMiniKQL::IFunctionRegistry* FunctionRegistry = nullptr; + TPqGatewayConfigPtr Config; + IMetricsRegistryPtr Metrics; ISecuredServiceAccountCredentialsFactory::TPtr CredentialsFactory; - ::NPq::NConfigurationManager::IConnections::TPtr CmConnections; - NYdb::TDriver YdbDriver; - - TPqGatewayServices( - NYdb::TDriver driver, - ::NPq::NConfigurationManager::IConnections::TPtr cmConnections, - ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, - TPqGatewayConfigPtr config, - const NKikimr::NMiniKQL::IFunctionRegistry* functionRegistry, - IMetricsRegistryPtr metrics = nullptr) - : FunctionRegistry(functionRegistry) - , Config(std::move(config)) - , Metrics(std::move(metrics)) - , CredentialsFactory(std::move(credentialsFactory)) - , CmConnections(std::move(cmConnections)) - , YdbDriver(std::move(driver)) - { - } -}; - -IPqGateway::TPtr CreatePqNativeGateway(const TPqGatewayServices& services); - -} // namespace NYql + ::NPq::NConfigurationManager::IConnections::TPtr CmConnections; + NYdb::TDriver YdbDriver; + + TPqGatewayServices( + NYdb::TDriver driver, + ::NPq::NConfigurationManager::IConnections::TPtr cmConnections, + ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory, + TPqGatewayConfigPtr config, + const NKikimr::NMiniKQL::IFunctionRegistry* functionRegistry, + IMetricsRegistryPtr metrics = nullptr) + : FunctionRegistry(functionRegistry) + , Config(std::move(config)) + , Metrics(std::move(metrics)) + , CredentialsFactory(std::move(credentialsFactory)) + , CmConnections(std::move(cmConnections)) + , YdbDriver(std::move(driver)) + { + } +}; + +IPqGateway::TPtr CreatePqNativeGateway(const TPqGatewayServices& services); + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp index cd536762e4a..b4519821a3f 100644 --- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp +++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp @@ -1,81 +1,81 @@ -#include "yql_pq_session.h" - +#include "yql_pq_session.h" + #include <ydb/library/yql/utils/yql_panic.h> - -namespace NYql { - + +namespace NYql { + namespace { NPq::NConfigurationManager::TClientOptions GetCmClientOptions(const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) { - NPq::NConfigurationManager::TClientOptions opts; + NPq::NConfigurationManager::TClientOptions opts; opts .SetEndpoint(cfg.GetConfigManagerEndpoint()) .SetCredentialsProviderFactory(credentialsProviderFactory) .SetEnableSsl(cfg.GetUseSsl()); - return opts; -} - + return opts; +} + NYdb::NPersQueue::TPersQueueClientSettings GetYdbPqClientOptions(const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) { - NYdb::NPersQueue::TPersQueueClientSettings opts; - opts - .DiscoveryEndpoint(cfg.GetEndpoint()) + NYdb::NPersQueue::TPersQueueClientSettings opts; + opts + .DiscoveryEndpoint(cfg.GetEndpoint()) .Database(database) .EnableSsl(cfg.GetUseSsl()) .CredentialsProviderFactory(credentialsProviderFactory); - return opts; + return opts; +} } -} - + const NPq::NConfigurationManager::IClient::TPtr& TPqSession::GetConfigManagerClient(const TString& cluster, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) { auto& client = ClusterCmClients[cluster]; - if (!client && CmConnections) { + if (!client && CmConnections) { client = CmConnections->GetClient(GetCmClientOptions(cfg, credentialsProviderFactory)); } return client; } NYdb::NPersQueue::TPersQueueClient& TPqSession::GetYdbPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) { - const auto clientIt = ClusterYdbPqClients.find(cluster); - if (clientIt != ClusterYdbPqClients.end()) { - return clientIt->second; - } - return ClusterYdbPqClients.emplace(cluster, NYdb::NPersQueue::TPersQueueClient(YdbDriver, GetYdbPqClientOptions(database, cfg, credentialsProviderFactory))).first->second; -} - + const auto clientIt = ClusterYdbPqClients.find(cluster); + if (clientIt != ClusterYdbPqClients.end()) { + return clientIt->second; + } + return ClusterYdbPqClients.emplace(cluster, NYdb::NPersQueue::TPersQueueClient(YdbDriver, GetYdbPqClientOptions(database, cfg, credentialsProviderFactory))).first->second; +} + NPq::NConfigurationManager::TAsyncDescribePathResult TPqSession::DescribePath(const TString& cluster, const TString& database, const TString& path, const TString& token) { const auto* config = ClusterConfigs->FindPtr(cluster); if (!config) { - ythrow yexception() << "Pq cluster `" << cluster << "` does not exist"; + ythrow yexception() << "Pq cluster `" << cluster << "` does not exist"; } - YQL_ENSURE(config->GetEndpoint(), "Can't describe topic `" << cluster << "`.`" << path << "`: no endpoint"); - + YQL_ENSURE(config->GetEndpoint(), "Can't describe topic `" << cluster << "`.`" << path << "`: no endpoint"); + std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory = CreateCredentialsProviderFactoryForStructuredToken(CredentialsFactory, token, config->GetAddBearerToToken()); - with_lock (Mutex) { + with_lock (Mutex) { if (config->GetClusterType() == TPqClusterConfig::CT_PERS_QUEUE) { - const NPq::NConfigurationManager::IClient::TPtr& client = GetConfigManagerClient(cluster, *config, credentialsProviderFactory); - if (!client) { - NThreading::TPromise<::NPq::NConfigurationManager::TDescribePathResult> result = NThreading::NewPromise<::NPq::NConfigurationManager::TDescribePathResult>(); - result.SetException( - std::make_exception_ptr( - NPq::NConfigurationManager::TException(NPq::NConfigurationManager::EStatus::INTERNAL_ERROR) - << "Pq configuration manager is not supported")); - return result; - } - return client->DescribePath(path); - } + const NPq::NConfigurationManager::IClient::TPtr& client = GetConfigManagerClient(cluster, *config, credentialsProviderFactory); + if (!client) { + NThreading::TPromise<::NPq::NConfigurationManager::TDescribePathResult> result = NThreading::NewPromise<::NPq::NConfigurationManager::TDescribePathResult>(); + result.SetException( + std::make_exception_ptr( + NPq::NConfigurationManager::TException(NPq::NConfigurationManager::EStatus::INTERNAL_ERROR) + << "Pq configuration manager is not supported")); + return result; + } + return client->DescribePath(path); + } - return GetYdbPqClient(cluster, database, *config, credentialsProviderFactory).DescribeTopic(path).Apply([cluster, path](const NYdb::NPersQueue::TAsyncDescribeTopicResult& describeTopicResultFuture) { + return GetYdbPqClient(cluster, database, *config, credentialsProviderFactory).DescribeTopic(path).Apply([cluster, path](const NYdb::NPersQueue::TAsyncDescribeTopicResult& describeTopicResultFuture) { const NYdb::NPersQueue::TDescribeTopicResult& describeTopicResult = describeTopicResultFuture.GetValue(); if (!describeTopicResult.IsSuccess()) { - throw yexception() << "Failed to describe topic `" << cluster << "`.`" << path << "`: " << describeTopicResult.GetIssues().ToString(); - } + throw yexception() << "Failed to describe topic `" << cluster << "`.`" << path << "`: " << describeTopicResult.GetIssues().ToString(); + } NPq::NConfigurationManager::TTopicDescription desc(path); desc.PartitionsCount = describeTopicResult.TopicSettings().PartitionsCount(); return NPq::NConfigurationManager::TDescribePathResult::Make<NPq::NConfigurationManager::TTopicDescription>(std::move(desc)); }); - } -} - -} // namespace NYql + } +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h index 23129837f01..c506cdb5920 100644 --- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h +++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h @@ -1,55 +1,55 @@ -#pragma once +#pragma once #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> #include <ydb/library/yql/providers/common/token_accessor/client/factory.h> #include <ydb/library/yql/providers/common/proto/gateways_config.pb.h> #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> - -#include <util/generic/ptr.h> -#include <util/system/mutex.h> - -namespace NYql { - -using TPqClusterConfigsMap = THashMap<TString, NYql::TPqClusterConfig>; -using TPqClusterConfigsMapPtr = std::shared_ptr<TPqClusterConfigsMap>; - -class TPqSession : public TThrRefBase { -public: - using TPtr = TIntrusivePtr<TPqSession>; - - explicit TPqSession(const TString& sessionId, - const TString& username, - const NPq::NConfigurationManager::IConnections::TPtr& cmConnections, - const NYdb::TDriver& ydbDriver, - const TPqClusterConfigsMapPtr& clusterConfigs, + +#include <util/generic/ptr.h> +#include <util/system/mutex.h> + +namespace NYql { + +using TPqClusterConfigsMap = THashMap<TString, NYql::TPqClusterConfig>; +using TPqClusterConfigsMapPtr = std::shared_ptr<TPqClusterConfigsMap>; + +class TPqSession : public TThrRefBase { +public: + using TPtr = TIntrusivePtr<TPqSession>; + + explicit TPqSession(const TString& sessionId, + const TString& username, + const NPq::NConfigurationManager::IConnections::TPtr& cmConnections, + const NYdb::TDriver& ydbDriver, + const TPqClusterConfigsMapPtr& clusterConfigs, ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory) - : SessionId(sessionId) - , UserName(username) - , CmConnections(cmConnections) - , YdbDriver(ydbDriver) - , ClusterConfigs(clusterConfigs) + : SessionId(sessionId) + , UserName(username) + , CmConnections(cmConnections) + , YdbDriver(ydbDriver) + , ClusterConfigs(clusterConfigs) , CredentialsFactory(credentialsFactory) - { - } - + { + } + NPq::NConfigurationManager::TAsyncDescribePathResult DescribePath(const TString& cluster, const TString& database, const TString& path, const TString& token); - -private: + +private: const NPq::NConfigurationManager::IClient::TPtr& GetConfigManagerClient(const TString& cluster, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory); NYdb::NPersQueue::TPersQueueClient& GetYdbPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory); - -private: - const TString SessionId; - const TString UserName; - const NPq::NConfigurationManager::IConnections::TPtr CmConnections; - const NYdb::TDriver YdbDriver; - const TPqClusterConfigsMapPtr ClusterConfigs; + +private: + const TString SessionId; + const TString UserName; + const NPq::NConfigurationManager::IConnections::TPtr CmConnections; + const NYdb::TDriver YdbDriver; + const TPqClusterConfigsMapPtr ClusterConfigs; const ISecuredServiceAccountCredentialsFactory::TPtr CredentialsFactory; TMutex Mutex; - THashMap<TString, NPq::NConfigurationManager::IClient::TPtr> ClusterCmClients; // Cluster -> CM Client. + THashMap<TString, NPq::NConfigurationManager::IClient::TPtr> ClusterCmClients; // Cluster -> CM Client. THashMap<TString, NYdb::NPersQueue::TPersQueueClient> ClusterYdbPqClients; // Cluster -> PQ Client. -}; - -} // namespace NYql +}; + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/gateway/ya.make b/ydb/library/yql/providers/pq/gateway/ya.make index dfd51aabe8b..38f46d023e8 100644 --- a/ydb/library/yql/providers/pq/gateway/ya.make +++ b/ydb/library/yql/providers/pq/gateway/ya.make @@ -1,10 +1,10 @@ -OWNER( - galaxycrab - g:yq - g:yql -) - -RECURSE( - dummy - native -) +OWNER( + galaxycrab + g:yq + g:yql +) + +RECURSE( + dummy + native +) diff --git a/ydb/library/yql/providers/pq/proto/dq_io.proto b/ydb/library/yql/providers/pq/proto/dq_io.proto index e87eb72755c..55e5aaeeaba 100644 --- a/ydb/library/yql/providers/pq/proto/dq_io.proto +++ b/ydb/library/yql/providers/pq/proto/dq_io.proto @@ -21,9 +21,9 @@ message TDqPqTopicSource { TToken Token = 4; string Database = 5; EClusterType ClusterType = 6; - bool UseSsl = 7; + bool UseSsl = 7; bool AddBearerToToken = 8; - string DatabaseId = 9; + string DatabaseId = 9; } message TDqPqTopicSink { @@ -31,7 +31,7 @@ message TDqPqTopicSink { string Endpoint = 2; TToken Token = 3; string Database = 4; - bool UseSsl = 5; + bool UseSsl = 5; bool AddBearerToToken = 6; EClusterType ClusterType = 7; } diff --git a/ydb/library/yql/providers/pq/proto/dq_io_state.proto b/ydb/library/yql/providers/pq/proto/dq_io_state.proto index 73861eb78c8..b9b729a4c3d 100644 --- a/ydb/library/yql/providers/pq/proto/dq_io_state.proto +++ b/ydb/library/yql/providers/pq/proto/dq_io_state.proto @@ -1,29 +1,29 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - -package NYql.NPq.NProto; - -message TDqPqTopicSourceState { - repeated TTopicDescription Topics = 1; - repeated TPartitionReadState Partitions = 2; - uint64 StartingMessageTimestampMs = 3; // StartingMessageTimestamp in ms for ReadSession settings - - message TTopicDescription { - string DatabaseId = 1; - string Endpoint = 2; - string Database = 3; - string TopicPath = 4; - } - - message TPartitionReadState { - uint64 TopicIndex = 1; // Index in Topics array - string Cluster = 2; - uint64 Partition = 3; - uint64 Offset = 4; - } -} - -message TDqPqTopicSinkState { - string SourceId = 1; - uint64 ConfirmedSeqNo = 2; -} +syntax = "proto3"; +option cc_enable_arenas = true; + +package NYql.NPq.NProto; + +message TDqPqTopicSourceState { + repeated TTopicDescription Topics = 1; + repeated TPartitionReadState Partitions = 2; + uint64 StartingMessageTimestampMs = 3; // StartingMessageTimestamp in ms for ReadSession settings + + message TTopicDescription { + string DatabaseId = 1; + string Endpoint = 2; + string Database = 3; + string TopicPath = 4; + } + + message TPartitionReadState { + uint64 TopicIndex = 1; // Index in Topics array + string Cluster = 2; + uint64 Partition = 3; + uint64 Offset = 4; + } +} + +message TDqPqTopicSinkState { + string SourceId = 1; + uint64 ConfirmedSeqNo = 2; +} diff --git a/ydb/library/yql/providers/pq/proto/dq_task_params.proto b/ydb/library/yql/providers/pq/proto/dq_task_params.proto index a4181a85a29..11a0b7d596c 100644 --- a/ydb/library/yql/providers/pq/proto/dq_task_params.proto +++ b/ydb/library/yql/providers/pq/proto/dq_task_params.proto @@ -1,17 +1,17 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - -package NYql.NPq.NProto; - -message TDqReadTaskParams { - message TPartitioningParams { - // Total topic partitions count. - uint64 TopicPartitionsCount = 1; - - // Take each EachTopicPartitionGroupId, DqPartitionsCount + EachTopicPartitionGroupId, 2 * DqPartitionsCount + EachTopicPartitionGroupId, etc. - uint64 EachTopicPartitionGroupId = 2; // Zero-based. - uint64 DqPartitionsCount = 3; - } - - TPartitioningParams PartitioningParams = 1; -} +syntax = "proto3"; +option cc_enable_arenas = true; + +package NYql.NPq.NProto; + +message TDqReadTaskParams { + message TPartitioningParams { + // Total topic partitions count. + uint64 TopicPartitionsCount = 1; + + // Take each EachTopicPartitionGroupId, DqPartitionsCount + EachTopicPartitionGroupId, 2 * DqPartitionsCount + EachTopicPartitionGroupId, etc. + uint64 EachTopicPartitionGroupId = 2; // Zero-based. + uint64 DqPartitionsCount = 3; + } + + TPartitioningParams PartitioningParams = 1; +} diff --git a/ydb/library/yql/providers/pq/proto/ya.make b/ydb/library/yql/providers/pq/proto/ya.make index 3428b641c01..d63dae258f1 100644 --- a/ydb/library/yql/providers/pq/proto/ya.make +++ b/ydb/library/yql/providers/pq/proto/ya.make @@ -1,17 +1,17 @@ -PROTO_LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - dq_io.proto - dq_io_state.proto - dq_task_params.proto -) - -EXCLUDE_TAGS(GO_PROTO) - -END() +PROTO_LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + dq_io.proto + dq_io_state.proto + dq_task_params.proto +) + +EXCLUDE_TAGS(GO_PROTO) + +END() diff --git a/ydb/library/yql/providers/pq/provider/ya.make b/ydb/library/yql/providers/pq/provider/ya.make index ead04a48fc5..d046768c3af 100644 --- a/ydb/library/yql/providers/pq/provider/ya.make +++ b/ydb/library/yql/providers/pq/provider/ya.make @@ -1,32 +1,32 @@ -LIBRARY() - -OWNER( - galaxycrab - g:yq - g:yql -) - -SRCS( - yql_pq_datasink.cpp - yql_pq_datasink_execution.cpp +LIBRARY() + +OWNER( + galaxycrab + g:yq + g:yql +) + +SRCS( + yql_pq_datasink.cpp + yql_pq_datasink_execution.cpp yql_pq_datasink_io_discovery.cpp - yql_pq_datasink_type_ann.cpp - yql_pq_datasource.cpp - yql_pq_datasource_type_ann.cpp - yql_pq_dq_integration.cpp + yql_pq_datasink_type_ann.cpp + yql_pq_datasource.cpp + yql_pq_datasource_type_ann.cpp + yql_pq_dq_integration.cpp yql_pq_io_discovery.cpp - yql_pq_load_meta.cpp - yql_pq_logical_opt.cpp - yql_pq_mkql_compiler.cpp - yql_pq_physical_optimize.cpp - yql_pq_provider.cpp + yql_pq_load_meta.cpp + yql_pq_logical_opt.cpp + yql_pq_mkql_compiler.cpp + yql_pq_physical_optimize.cpp + yql_pq_provider.cpp yql_pq_provider_impl.cpp - yql_pq_settings.cpp - yql_pq_topic_key_parser.cpp - yql_pq_helpers.cpp -) - -PEERDIR( + yql_pq_settings.cpp + yql_pq_topic_key_parser.cpp + yql_pq_helpers.cpp +) + +PEERDIR( library/cpp/random_provider library/cpp/time_provider ydb/core/yq/libs/db_resolver @@ -55,11 +55,11 @@ PEERDIR( ydb/library/yql/providers/pq/expr_nodes ydb/library/yql/providers/pq/proto ydb/library/yql/providers/result/expr_nodes -) - -YQL_LAST_ABI_VERSION() - -END() +) + +YQL_LAST_ABI_VERSION() + +END() IF (NOT OPENSOURCE) RECURSE_FOR_TESTS( diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_datasink.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_datasink.cpp index 380df33d529..4e6c5aede81 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_datasink.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_datasink.cpp @@ -1,22 +1,22 @@ -#include "yql_pq_provider_impl.h" -#include "yql_pq_helpers.h" - +#include "yql_pq_provider_impl.h" +#include "yql_pq_helpers.h" + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> #include <ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.h> - + #include <ydb/library/yql/providers/common/provider/yql_provider.h> #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/providers/common/provider/yql_data_provider_impl.h> - + #include <ydb/library/yql/utils/log/log.h> - -namespace NYql { - -using namespace NNodes; - -namespace { - + +namespace NYql { + +using namespace NNodes; + +namespace { + void ScanPlanDependencies(const TExprNode::TPtr& input, TExprNode::TListType& children) { VisitExpr(input, [&children](const TExprNode::TPtr& node) { if (node->IsCallable("DqCnResult")) { @@ -29,103 +29,103 @@ void ScanPlanDependencies(const TExprNode::TPtr& input, TExprNode::TListType& ch } class TPqDataSinkProvider : public TDataProviderBase { -public: +public: TPqDataSinkProvider(TPqState::TPtr state, IPqGateway::TPtr gateway) - : State_(state) + : State_(state) , Gateway_(gateway) , IODiscoveryTransformer_(CreatePqDataSinkIODiscoveryTransformer(State_)) - , TypeAnnotationTransformer_(CreatePqDataSinkTypeAnnotationTransformer(State_)) - , ExecutionTransformer_(CreatePqDataSinkExecTransformer(State_)) - , LogicalOptProposalTransformer_(CreatePqLogicalOptProposalTransformer(State_)) - , PhysicalOptProposalTransformer_(CreatePqPhysicalOptProposalTransformer(State_)) - { - } - - TStringBuf GetName() const override { - return PqProviderName; - } - - bool CanParse(const TExprNode& node) override { + , TypeAnnotationTransformer_(CreatePqDataSinkTypeAnnotationTransformer(State_)) + , ExecutionTransformer_(CreatePqDataSinkExecTransformer(State_)) + , LogicalOptProposalTransformer_(CreatePqLogicalOptProposalTransformer(State_)) + , PhysicalOptProposalTransformer_(CreatePqPhysicalOptProposalTransformer(State_)) + { + } + + TStringBuf GetName() const override { + return PqProviderName; + } + + bool CanParse(const TExprNode& node) override { if (node.IsCallable(TCoWrite::CallableName())) { - return TPqDataSink::Match(node.Child(1)); + return TPqDataSink::Match(node.Child(1)); } - return TypeAnnotationTransformer_->CanParse(node); - } - + return TypeAnnotationTransformer_->CanParse(node); + } + IGraphTransformer& GetIODiscoveryTransformer() override { return *IODiscoveryTransformer_; } - IGraphTransformer& GetTypeAnnotationTransformer(bool instantOnly) override { - Y_UNUSED(instantOnly); - return *TypeAnnotationTransformer_; - } - - IGraphTransformer& GetCallableExecutionTransformer() override { - return *ExecutionTransformer_; - } - - bool CanExecute(const TExprNode& node) override { - return ExecutionTransformer_->CanExec(node); - } - - bool ValidateParameters(TExprNode& node, TExprContext& ctx, TMaybe<TString>& cluster) override { - if (node.IsCallable(TCoDataSink::CallableName())) { - if (node.Head().Content() == PqProviderName) { - const auto& clusterSettings = State_->Configuration->ClustersConfigurationSettings; - if (const auto clusterName = node.Child(1)->Content(); clusterSettings.FindPtr(clusterName)) { - cluster = clusterName; - return true; - } else { - ctx.AddError(TIssue(ctx.GetPosition(node.Child(1)->Pos()), TStringBuilder() << "Unknown cluster name: " << clusterName)); - return false; - } - } - } - ctx.AddError(TIssue(ctx.GetPosition(node.Pos()), "Invalid Pq DataSink parameters")); - return false; - } - - IGraphTransformer& GetLogicalOptProposalTransformer() override { - return *LogicalOptProposalTransformer_; - } - - IGraphTransformer& GetPhysicalOptProposalTransformer() override { - return *PhysicalOptProposalTransformer_; - } - + IGraphTransformer& GetTypeAnnotationTransformer(bool instantOnly) override { + Y_UNUSED(instantOnly); + return *TypeAnnotationTransformer_; + } + + IGraphTransformer& GetCallableExecutionTransformer() override { + return *ExecutionTransformer_; + } + + bool CanExecute(const TExprNode& node) override { + return ExecutionTransformer_->CanExec(node); + } + + bool ValidateParameters(TExprNode& node, TExprContext& ctx, TMaybe<TString>& cluster) override { + if (node.IsCallable(TCoDataSink::CallableName())) { + if (node.Head().Content() == PqProviderName) { + const auto& clusterSettings = State_->Configuration->ClustersConfigurationSettings; + if (const auto clusterName = node.Child(1)->Content(); clusterSettings.FindPtr(clusterName)) { + cluster = clusterName; + return true; + } else { + ctx.AddError(TIssue(ctx.GetPosition(node.Child(1)->Pos()), TStringBuilder() << "Unknown cluster name: " << clusterName)); + return false; + } + } + } + ctx.AddError(TIssue(ctx.GetPosition(node.Pos()), "Invalid Pq DataSink parameters")); + return false; + } + + IGraphTransformer& GetLogicalOptProposalTransformer() override { + return *LogicalOptProposalTransformer_; + } + + IGraphTransformer& GetPhysicalOptProposalTransformer() override { + return *PhysicalOptProposalTransformer_; + } + TExprNode::TPtr RewriteIO(const TExprNode::TPtr& node, TExprContext& ctx) override { - auto maybePqWrite = TMaybeNode<TPqWrite>(node); - YQL_ENSURE(maybePqWrite.DataSink(), "Expected Write!, got: " << node->Content()); - + auto maybePqWrite = TMaybeNode<TPqWrite>(node); + YQL_ENSURE(maybePqWrite.DataSink(), "Expected Write!, got: " << node->Content()); + YQL_CLOG(INFO, ProviderPq) << "Rewrite " << node->Content(); const TCoWrite write(node); TTopicKeyParser key; YQL_ENSURE(key.Parse(*node->Child(2), nullptr, ctx), "Failed to extract topic name."); const auto settings = NCommon::ParseWriteTableSettings(TExprList(node->Child(4)), ctx); - YQL_ENSURE(settings.Mode.Cast() == "append", "Only append write mode is supported for writing into topic"); - - const auto cluster = TString(maybePqWrite.Cast().DataSink().Cluster().Value()); - const auto* found = State_->FindTopicMeta(cluster, key.GetTopicPath()); - if (!found) { - ctx.AddError(TIssue(ctx.GetPosition(write.Pos()), TStringBuilder() << "Unknown topic `" << cluster << "`.`" << key.GetTopicPath() << "`")); - return nullptr; - } - - auto topicNode = Build<TPqTopic>(ctx, write.Pos()) - .Cluster().Value(cluster).Build() - .Database().Value(State_->Configuration->GetDatabaseForTopic(cluster)).Build() + YQL_ENSURE(settings.Mode.Cast() == "append", "Only append write mode is supported for writing into topic"); + + const auto cluster = TString(maybePqWrite.Cast().DataSink().Cluster().Value()); + const auto* found = State_->FindTopicMeta(cluster, key.GetTopicPath()); + if (!found) { + ctx.AddError(TIssue(ctx.GetPosition(write.Pos()), TStringBuilder() << "Unknown topic `" << cluster << "`.`" << key.GetTopicPath() << "`")); + return nullptr; + } + + auto topicNode = Build<TPqTopic>(ctx, write.Pos()) + .Cluster().Value(cluster).Build() + .Database().Value(State_->Configuration->GetDatabaseForTopic(cluster)).Build() .RowSpec(found->RowSpec) - .Path().Value(key.GetTopicPath()).Build() - .Props(BuildTopicPropsList(*found, write.Pos(), ctx)) - .Metadata().Build() - .Done(); - - return Build<TPqWriteTopic>(ctx, node->Pos()) + .Path().Value(key.GetTopicPath()).Build() + .Props(BuildTopicPropsList(*found, write.Pos(), ctx)) + .Metadata().Build() + .Done(); + + return Build<TPqWriteTopic>(ctx, node->Pos()) .World(write.World()) .DataSink(write.DataSink().Ptr()) - .Topic(topicNode) + .Topic(topicNode) .Input(node->Child(3)) .Mode(settings.Mode.Cast()) .Settings(settings.Other) @@ -133,15 +133,15 @@ public: } TExprNode::TPtr GetClusterInfo(const TString& cluster, TExprContext& ctx) override { - const auto* config = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); + const auto* config = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); if (!config) { return {}; } TPositionHandle pos; return Build<NNodes::TPqClusterConfig>(ctx, pos) - .Endpoint<TCoAtom>().Build(config->Endpoint) - .TvmId<TCoAtom>().Build(ToString(config->TvmId)) + .Endpoint<TCoAtom>().Build(config->Endpoint) + .TvmId<TCoAtom>().Build(ToString(config->TvmId)) .Done().Ptr(); } @@ -167,24 +167,24 @@ public: return false; } - IDqIntegration* GetDqIntegration() override { - return State_->DqIntegration.Get(); - } - -private: - TPqState::TPtr State_; + IDqIntegration* GetDqIntegration() override { + return State_->DqIntegration.Get(); + } + +private: + TPqState::TPtr State_; IPqGateway::TPtr Gateway_; THolder<IGraphTransformer> IODiscoveryTransformer_; - THolder<TVisitorTransformerBase> TypeAnnotationTransformer_; - THolder<TExecTransformerBase> ExecutionTransformer_; - THolder<IGraphTransformer> LogicalOptProposalTransformer_; - THolder<IGraphTransformer> PhysicalOptProposalTransformer_; -}; - -} - + THolder<TVisitorTransformerBase> TypeAnnotationTransformer_; + THolder<TExecTransformerBase> ExecutionTransformer_; + THolder<IGraphTransformer> LogicalOptProposalTransformer_; + THolder<IGraphTransformer> PhysicalOptProposalTransformer_; +}; + +} + TIntrusivePtr<IDataProvider> CreatePqDataSink(TPqState::TPtr state, IPqGateway::TPtr gateway) { return new TPqDataSinkProvider(state, gateway); -} - -} // namespace NYql +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_datasink_execution.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_datasink_execution.cpp index da27cb195ae..39adc23294a 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_datasink_execution.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_datasink_execution.cpp @@ -1,128 +1,128 @@ -#include "yql_pq_provider_impl.h" - +#include "yql_pq_provider_impl.h" + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/dq/expr_nodes/dq_expr_nodes.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> - + #include <ydb/library/yql/providers/common/provider/yql_provider.h> #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/providers/common/provider/yql_data_provider_impl.h> #include <ydb/library/yql/providers/result/expr_nodes/yql_res_expr_nodes.h> - + #include <ydb/library/yql/utils/log/log.h> - -namespace NYql { - -using namespace NNodes; - -namespace { - -class TPqDataSinkExecTransformer : public TExecTransformerBase { -public: - explicit TPqDataSinkExecTransformer(TPqState::TPtr state) - : State_(state) - { - AddHandler({TCoCommit::CallableName()}, RequireFirst(), Hndl(&TPqDataSinkExecTransformer::HandleCommit)); - } - - TStatusCallbackPair HandleCommit(const TExprNode::TPtr& input, TExprContext& ctx) { - if (TDqQuery::Match(input->Child(TCoCommit::idx_World))) { - return DelegateExecutionToDqProvider(input->ChildPtr(TCoCommit::idx_World), input, ctx); - } else { // Pass - input->SetState(TExprNode::EState::ExecutionComplete); - input->SetResult(ctx.NewWorld(input->Pos())); - return SyncOk(); - } - } - - TStatusCallbackPair DelegateExecutionToDqProvider(const TExprNode::TPtr& input, const TExprNode::TPtr& originInput, TExprContext& ctx) { - YQL_CLOG(INFO, ProviderPq) << "Delegate execution of " << input->Content() << " to dq provider"; - auto delegatedNode = Build<TPull>(ctx, input->Pos()) - .Input(input) - .BytesLimit() - .Value(TString()) - .Build() - .RowsLimit() - .Value(TString("0")) - .Build() - .FormatDetails() + +namespace NYql { + +using namespace NNodes; + +namespace { + +class TPqDataSinkExecTransformer : public TExecTransformerBase { +public: + explicit TPqDataSinkExecTransformer(TPqState::TPtr state) + : State_(state) + { + AddHandler({TCoCommit::CallableName()}, RequireFirst(), Hndl(&TPqDataSinkExecTransformer::HandleCommit)); + } + + TStatusCallbackPair HandleCommit(const TExprNode::TPtr& input, TExprContext& ctx) { + if (TDqQuery::Match(input->Child(TCoCommit::idx_World))) { + return DelegateExecutionToDqProvider(input->ChildPtr(TCoCommit::idx_World), input, ctx); + } else { // Pass + input->SetState(TExprNode::EState::ExecutionComplete); + input->SetResult(ctx.NewWorld(input->Pos())); + return SyncOk(); + } + } + + TStatusCallbackPair DelegateExecutionToDqProvider(const TExprNode::TPtr& input, const TExprNode::TPtr& originInput, TExprContext& ctx) { + YQL_CLOG(INFO, ProviderPq) << "Delegate execution of " << input->Content() << " to dq provider"; + auto delegatedNode = Build<TPull>(ctx, input->Pos()) + .Input(input) + .BytesLimit() + .Value(TString()) + .Build() + .RowsLimit() + .Value(TString("0")) + .Build() + .FormatDetails() .Value(ToString((ui32)NYson::EYsonFormat::Binary)) - .Build() - .Settings() - .Build() - .Format() - .Value(ToString("0")) - .Build() - .PublicId() - .Value("id") - .Build() - .Discard() - .Value(ToString(true)) - .Build() - .Origin(originInput) - .Done() - .Ptr(); - - auto atomType = ctx.MakeType<TUnitExprType>(); - - for (auto idx: {TResOrPullBase::idx_BytesLimit, TResOrPullBase::idx_RowsLimit, TResOrPullBase::idx_FormatDetails, - TResOrPullBase::idx_Format, TResOrPullBase::idx_PublicId, TResOrPullBase::idx_Discard }) { - delegatedNode->Child(idx)->SetTypeAnn(atomType); - delegatedNode->Child(idx)->SetState(TExprNode::EState::ConstrComplete); - } - - delegatedNode->SetTypeAnn(originInput->GetTypeAnn()); - delegatedNode->SetState(TExprNode::EState::ConstrComplete); - originInput->SetState(TExprNode::EState::ExecutionInProgress); - - auto dqProvider = State_->Types->DataSourceMap.FindPtr(DqProviderName); - YQL_ENSURE(dqProvider); - - TExprNode::TPtr delegatedNodeOutput; - auto status = dqProvider->Get()->GetCallableExecutionTransformer().Transform(delegatedNode, delegatedNodeOutput, ctx); - - if (status.Level != TStatus::Async) { - YQL_ENSURE(status.Level != TStatus::Ok, "Asynchronous execution is expected in a happy path."); - return SyncStatus(status); - } - - auto dqFuture = dqProvider->Get()->GetCallableExecutionTransformer().GetAsyncFuture(*delegatedNode); - - TAsyncTransformCallbackFuture callbackFuture = dqFuture.Apply( - [dqProvider, delegatedNode](const NThreading::TFuture<void>& completedFuture) { - return TAsyncTransformCallback( - [completedFuture, dqProvider, delegatedNode](const TExprNode::TPtr& input, TExprNode::TPtr& output, TExprContext& ctx) { - completedFuture.GetValue(); - TExprNode::TPtr delegatedNodeOutput; - auto dqWriteStatus = dqProvider->Get()->GetCallableExecutionTransformer() - .ApplyAsyncChanges(delegatedNode, delegatedNodeOutput, ctx); - - YQL_ENSURE(dqWriteStatus != TStatus::Async, "ApplyAsyncChanges should not return Async."); - - if (dqWriteStatus != TStatus::Ok) { - output = input; - return dqWriteStatus; - } - - input->SetState(TExprNode::EState::ExecutionComplete); - output = ctx.ShallowCopy(*input); - output->SetResult(ctx.NewAtom(input->Pos(), "DQ_completed")); - - return IGraphTransformer::TStatus(IGraphTransformer::TStatus::Repeat, true); - }); - }); - - return std::make_pair(IGraphTransformer::TStatus::Async, callbackFuture); - } - -private: - TPqState::TPtr State_; -}; - -} - -THolder<TExecTransformerBase> CreatePqDataSinkExecTransformer(TPqState::TPtr state) { - return THolder(new TPqDataSinkExecTransformer(state)); -} - -} // namespace NYql + .Build() + .Settings() + .Build() + .Format() + .Value(ToString("0")) + .Build() + .PublicId() + .Value("id") + .Build() + .Discard() + .Value(ToString(true)) + .Build() + .Origin(originInput) + .Done() + .Ptr(); + + auto atomType = ctx.MakeType<TUnitExprType>(); + + for (auto idx: {TResOrPullBase::idx_BytesLimit, TResOrPullBase::idx_RowsLimit, TResOrPullBase::idx_FormatDetails, + TResOrPullBase::idx_Format, TResOrPullBase::idx_PublicId, TResOrPullBase::idx_Discard }) { + delegatedNode->Child(idx)->SetTypeAnn(atomType); + delegatedNode->Child(idx)->SetState(TExprNode::EState::ConstrComplete); + } + + delegatedNode->SetTypeAnn(originInput->GetTypeAnn()); + delegatedNode->SetState(TExprNode::EState::ConstrComplete); + originInput->SetState(TExprNode::EState::ExecutionInProgress); + + auto dqProvider = State_->Types->DataSourceMap.FindPtr(DqProviderName); + YQL_ENSURE(dqProvider); + + TExprNode::TPtr delegatedNodeOutput; + auto status = dqProvider->Get()->GetCallableExecutionTransformer().Transform(delegatedNode, delegatedNodeOutput, ctx); + + if (status.Level != TStatus::Async) { + YQL_ENSURE(status.Level != TStatus::Ok, "Asynchronous execution is expected in a happy path."); + return SyncStatus(status); + } + + auto dqFuture = dqProvider->Get()->GetCallableExecutionTransformer().GetAsyncFuture(*delegatedNode); + + TAsyncTransformCallbackFuture callbackFuture = dqFuture.Apply( + [dqProvider, delegatedNode](const NThreading::TFuture<void>& completedFuture) { + return TAsyncTransformCallback( + [completedFuture, dqProvider, delegatedNode](const TExprNode::TPtr& input, TExprNode::TPtr& output, TExprContext& ctx) { + completedFuture.GetValue(); + TExprNode::TPtr delegatedNodeOutput; + auto dqWriteStatus = dqProvider->Get()->GetCallableExecutionTransformer() + .ApplyAsyncChanges(delegatedNode, delegatedNodeOutput, ctx); + + YQL_ENSURE(dqWriteStatus != TStatus::Async, "ApplyAsyncChanges should not return Async."); + + if (dqWriteStatus != TStatus::Ok) { + output = input; + return dqWriteStatus; + } + + input->SetState(TExprNode::EState::ExecutionComplete); + output = ctx.ShallowCopy(*input); + output->SetResult(ctx.NewAtom(input->Pos(), "DQ_completed")); + + return IGraphTransformer::TStatus(IGraphTransformer::TStatus::Repeat, true); + }); + }); + + return std::make_pair(IGraphTransformer::TStatus::Async, callbackFuture); + } + +private: + TPqState::TPtr State_; +}; + +} + +THolder<TExecTransformerBase> CreatePqDataSinkExecTransformer(TPqState::TPtr state) { + return THolder(new TPqDataSinkExecTransformer(state)); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_datasink_type_ann.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_datasink_type_ann.cpp index 6109bbe42e8..e622108627d 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_datasink_type_ann.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_datasink_type_ann.cpp @@ -1,20 +1,20 @@ -#include "yql_pq_provider_impl.h" - +#include "yql_pq_provider_impl.h" + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/core/yql_opt_utils.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> - + #include <ydb/library/yql/providers/common/provider/yql_provider.h> #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/providers/common/provider/yql_data_provider_impl.h> - + #include <ydb/library/yql/utils/log/log.h> - -namespace NYql { - -using namespace NNodes; - -namespace { + +namespace NYql { + +using namespace NNodes; + +namespace { bool EnsureStructTypeWithSingleStringMember(const TTypeAnnotationNode* input, TPositionHandle pos, TExprContext& ctx) { YQL_ENSURE(input); auto itemSchema = input->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>(); @@ -22,7 +22,7 @@ bool EnsureStructTypeWithSingleStringMember(const TTypeAnnotationNode* input, TP ctx.AddError(TIssue(ctx.GetPosition(pos), TStringBuilder() << "only struct with single string, yson or json field is accepted, but has struct with " << itemSchema->GetSize() << " members")); return false; } - + auto column = itemSchema->GetItems()[0]; auto columnType = column->GetItemType(); if (columnType->GetKind() != ETypeAnnotationKind::Data) { @@ -42,25 +42,25 @@ bool EnsureStructTypeWithSingleStringMember(const TTypeAnnotationNode* input, TP return true; } -class TPqDataSinkTypeAnnotationTransformer : public TVisitorTransformerBase { -public: - TPqDataSinkTypeAnnotationTransformer(TPqState::TPtr state) - : TVisitorTransformerBase(true) - , State_(state) - { - using TSelf = TPqDataSinkTypeAnnotationTransformer; - AddHandler({TCoCommit::CallableName()}, Hndl(&TSelf::HandleCommit)); +class TPqDataSinkTypeAnnotationTransformer : public TVisitorTransformerBase { +public: + TPqDataSinkTypeAnnotationTransformer(TPqState::TPtr state) + : TVisitorTransformerBase(true) + , State_(state) + { + using TSelf = TPqDataSinkTypeAnnotationTransformer; + AddHandler({TCoCommit::CallableName()}, Hndl(&TSelf::HandleCommit)); AddHandler({TPqWriteTopic::CallableName() }, Hndl(&TSelf::HandleWriteTopic)); AddHandler({NNodes::TPqClusterConfig::CallableName() }, Hndl(&TSelf::HandleClusterConfig)); - AddHandler({TDqPqTopicSink::CallableName()}, Hndl(&TSelf::HandleDqPqTopicSink)); - } - - TStatus HandleCommit(TExprBase input, TExprContext&) { - const auto commit = input.Cast<TCoCommit>(); - input.Ptr()->SetTypeAnn(commit.World().Ref().GetTypeAnn()); - return TStatus::Ok; - } - + AddHandler({TDqPqTopicSink::CallableName()}, Hndl(&TSelf::HandleDqPqTopicSink)); + } + + TStatus HandleCommit(TExprBase input, TExprContext&) { + const auto commit = input.Cast<TCoCommit>(); + input.Ptr()->SetTypeAnn(commit.World().Ref().GetTypeAnn()); + return TStatus::Ok; + } + TStatus HandleWriteTopic(TExprBase input, TExprContext& ctx) { const auto write = input.Cast<TPqWriteTopic>(); const auto& writeInput = write.Input().Ref(); @@ -86,22 +86,22 @@ public: return TStatus::Ok; } - TStatus HandleDqPqTopicSink(const TExprNode::TPtr& input, TExprContext& ctx) { + TStatus HandleDqPqTopicSink(const TExprNode::TPtr& input, TExprContext& ctx) { if (!EnsureArgsCount(*input, 3, ctx)) { - return TStatus::Error; - } - input->SetTypeAnn(ctx.MakeType<TVoidExprType>()); - return TStatus::Ok; - } - -private: - TPqState::TPtr State_; -}; - -} - -THolder<TVisitorTransformerBase> CreatePqDataSinkTypeAnnotationTransformer(TPqState::TPtr state) { - return MakeHolder<TPqDataSinkTypeAnnotationTransformer>(state); -} - -} // namespace NYql + return TStatus::Error; + } + input->SetTypeAnn(ctx.MakeType<TVoidExprType>()); + return TStatus::Ok; + } + +private: + TPqState::TPtr State_; +}; + +} + +THolder<TVisitorTransformerBase> CreatePqDataSinkTypeAnnotationTransformer(TPqState::TPtr state) { + return MakeHolder<TPqDataSinkTypeAnnotationTransformer>(state); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_datasource.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_datasource.cpp index 4d43adc7695..931fc91cdd9 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_datasource.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_datasource.cpp @@ -1,7 +1,7 @@ -#include "yql_pq_provider_impl.h" -#include "yql_pq_topic_key_parser.h" -#include "yql_pq_helpers.h" - +#include "yql_pq_provider_impl.h" +#include "yql_pq_topic_key_parser.h" +#include "yql_pq_helpers.h" + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/dq/expr_nodes/dq_expr_nodes.h> #include <ydb/library/yql/dq/opt/dq_opt.h> @@ -12,168 +12,168 @@ #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/providers/common/provider/yql_data_provider_impl.h> #include <ydb/library/yql/providers/common/transform/yql_lazy_init.h> - + #include <ydb/library/yql/utils/log/log.h> - -namespace NYql { - -using namespace NNodes; - + +namespace NYql { + +using namespace NNodes; + class TPqDataSourceProvider : public TDataProviderBase { -public: +public: TPqDataSourceProvider(TPqState::TPtr state, IPqGateway::TPtr gateway) - : State_(state) + : State_(state) , Gateway_(gateway) - , ConfigurationTransformer_([this]() { + , ConfigurationTransformer_([this]() { return MakeHolder<NCommon::TProviderConfigurationTransformer>(State_->Configuration, *State_->Types, TString{ PqProviderName }); }) - , LoadMetaDataTransformer_(CreatePqLoadTopicMetadataTransformer(State_)) - , TypeAnnotationTransformer_(CreatePqDataSourceTypeAnnotationTransformer(State_)) + , LoadMetaDataTransformer_(CreatePqLoadTopicMetadataTransformer(State_)) + , TypeAnnotationTransformer_(CreatePqDataSourceTypeAnnotationTransformer(State_)) , IODiscoveryTransformer_(CreatePqIODiscoveryTransformer(State_)) { - } - - TStringBuf GetName() const override { - return PqProviderName; - } - - bool ValidateParameters(TExprNode& node, TExprContext& ctx, TMaybe<TString>& cluster) override { - if (node.IsCallable(TCoDataSource::CallableName())) { - if (node.Child(0)->Content() == PqProviderName) { - auto clusterName = node.Child(1)->Content(); - const auto& clusterSettings = State_->Configuration->ClustersConfigurationSettings; - if (clusterName != NCommon::ALL_CLUSTERS && !clusterSettings.FindPtr(clusterName)) { - ctx.AddError(TIssue(ctx.GetPosition(node.Child(1)->Pos()), TStringBuilder() << + } + + TStringBuf GetName() const override { + return PqProviderName; + } + + bool ValidateParameters(TExprNode& node, TExprContext& ctx, TMaybe<TString>& cluster) override { + if (node.IsCallable(TCoDataSource::CallableName())) { + if (node.Child(0)->Content() == PqProviderName) { + auto clusterName = node.Child(1)->Content(); + const auto& clusterSettings = State_->Configuration->ClustersConfigurationSettings; + if (clusterName != NCommon::ALL_CLUSTERS && !clusterSettings.FindPtr(clusterName)) { + ctx.AddError(TIssue(ctx.GetPosition(node.Child(1)->Pos()), TStringBuilder() << "Unknown cluster name: " << clusterName)); - return false; - } - cluster = clusterName; - return true; - } - } - ctx.AddError(TIssue(ctx.GetPosition(node.Pos()), "Invalid Pq DataSource parameters")); - return false; - } - - bool CanParse(const TExprNode& node) override { - if (node.IsCallable(TCoRead::CallableName())) { - return TPqDataSource::Match(node.Child(1)); - } - return TypeAnnotationTransformer_->CanParse(node); - } - + return false; + } + cluster = clusterName; + return true; + } + } + ctx.AddError(TIssue(ctx.GetPosition(node.Pos()), "Invalid Pq DataSource parameters")); + return false; + } + + bool CanParse(const TExprNode& node) override { + if (node.IsCallable(TCoRead::CallableName())) { + return TPqDataSource::Match(node.Child(1)); + } + return TypeAnnotationTransformer_->CanParse(node); + } + IGraphTransformer& GetIODiscoveryTransformer() override { return *IODiscoveryTransformer_; } - IGraphTransformer& GetConfigurationTransformer() override { - return *ConfigurationTransformer_; - } - - IGraphTransformer& GetLoadTableMetadataTransformer() override { - return *LoadMetaDataTransformer_; - } - - IGraphTransformer& GetTypeAnnotationTransformer(bool instantOnly) override { - Y_UNUSED(instantOnly); - return *TypeAnnotationTransformer_; - } - - bool EnableDqSource() const { - return !State_->IsRtmrMode(); - } - - TExprNode::TPtr RewriteIO(const TExprNode::TPtr& node, TExprContext& ctx) override { + IGraphTransformer& GetConfigurationTransformer() override { + return *ConfigurationTransformer_; + } + + IGraphTransformer& GetLoadTableMetadataTransformer() override { + return *LoadMetaDataTransformer_; + } + + IGraphTransformer& GetTypeAnnotationTransformer(bool instantOnly) override { + Y_UNUSED(instantOnly); + return *TypeAnnotationTransformer_; + } + + bool EnableDqSource() const { + return !State_->IsRtmrMode(); + } + + TExprNode::TPtr RewriteIO(const TExprNode::TPtr& node, TExprContext& ctx) override { Y_UNUSED(ctx); - YQL_CLOG(INFO, ProviderPq) << "RewriteIO"; - if (auto left = TMaybeNode<TCoLeft>(node)) { - return left.Input().Maybe<TPqRead>().World().Cast().Ptr(); - } - - auto read = TCoRight(node).Input().Cast<TPqRead>(); - TIssueScopeGuard issueScopeRead(ctx.IssueManager, [&]() { - return MakeIntrusive<TIssue>(ctx.GetPosition(read.Pos()), TStringBuilder() << "At function: " << TCoRead::CallableName()); - }); - + YQL_CLOG(INFO, ProviderPq) << "RewriteIO"; + if (auto left = TMaybeNode<TCoLeft>(node)) { + return left.Input().Maybe<TPqRead>().World().Cast().Ptr(); + } + + auto read = TCoRight(node).Input().Cast<TPqRead>(); + TIssueScopeGuard issueScopeRead(ctx.IssueManager, [&]() { + return MakeIntrusive<TIssue>(ctx.GetPosition(read.Pos()), TStringBuilder() << "At function: " << TCoRead::CallableName()); + }); + TTopicKeyParser topicKeyParser(read.FreeArgs().Get(2).Ref(), read.Ref().Child(4), ctx); - const TString cluster(read.DataSource().Cluster().Value()); - const auto* topicMeta = State_->FindTopicMeta(cluster, topicKeyParser.GetTopicPath()); - if (!topicMeta) { - ctx.AddError(TIssue(ctx.GetPosition(read.Pos()), TStringBuilder() << "Unknown topic `" << cluster << "`.`" << topicKeyParser.GetTopicPath() << "`")); - return nullptr; - } - - auto topicNode = Build<TPqTopic>(ctx, read.Pos()) - .Cluster().Value(cluster).Build() - .Database().Value(State_->Configuration->GetDatabaseForTopic(cluster)).Build() - .Path().Value(topicKeyParser.GetTopicPath()).Build() + const TString cluster(read.DataSource().Cluster().Value()); + const auto* topicMeta = State_->FindTopicMeta(cluster, topicKeyParser.GetTopicPath()); + if (!topicMeta) { + ctx.AddError(TIssue(ctx.GetPosition(read.Pos()), TStringBuilder() << "Unknown topic `" << cluster << "`.`" << topicKeyParser.GetTopicPath() << "`")); + return nullptr; + } + + auto topicNode = Build<TPqTopic>(ctx, read.Pos()) + .Cluster().Value(cluster).Build() + .Database().Value(State_->Configuration->GetDatabaseForTopic(cluster)).Build() + .Path().Value(topicKeyParser.GetTopicPath()).Build() .RowSpec(topicMeta->RowSpec) - .Props(BuildTopicPropsList(*topicMeta, read.Pos(), ctx)) - .Metadata().Build() - .Done(); - + .Props(BuildTopicPropsList(*topicMeta, read.Pos(), ctx)) + .Metadata().Build() + .Done(); + auto builder = Build<TPqReadTopic>(ctx, read.Pos()) .World(read.World()) .DataSource(read.DataSource()) .Topic(std::move(topicNode)) .Format().Value(topicKeyParser.GetFormat()).Build() .Compression().Value(topicKeyParser.GetCompression()).Build(); - + if (topicKeyParser.GetColumnOrder()) { builder.Columns(topicKeyParser.GetColumnOrder()); - } else { + } else { builder.Columns<TCoVoid>().Build(); } - + return Build<TCoRight>(ctx, read.Pos()) .Input(builder.Done()) .Done().Ptr(); - } - - const THashMap<TString, TString>* GetClusterTokens() override { - return &State_->Configuration->Tokens; - } - - bool GetDependencies(const TExprNode& node, TExprNode::TListType& children, bool compact) override { - Y_UNUSED(compact); - - for (auto& child : node.Children()) { - children.push_back(child.Get()); - } - - if (TMaybeNode<TPqReadTopic>(&node)) { - return true; - } - return false; - } - - void GetInputs(const TExprNode& node, TVector<TPinInfo>& inputs) override { - if (auto maybeRead = TMaybeNode<TPqReadTopic>(&node)) { - if (auto maybeTopic = maybeRead.Topic()) { + } + + const THashMap<TString, TString>* GetClusterTokens() override { + return &State_->Configuration->Tokens; + } + + bool GetDependencies(const TExprNode& node, TExprNode::TListType& children, bool compact) override { + Y_UNUSED(compact); + + for (auto& child : node.Children()) { + children.push_back(child.Get()); + } + + if (TMaybeNode<TPqReadTopic>(&node)) { + return true; + } + return false; + } + + void GetInputs(const TExprNode& node, TVector<TPinInfo>& inputs) override { + if (auto maybeRead = TMaybeNode<TPqReadTopic>(&node)) { + if (auto maybeTopic = maybeRead.Topic()) { TStringBuf cluster; - if (auto dataSource = maybeRead.DataSource().Maybe<TPqDataSource>()) { + if (auto dataSource = maybeRead.DataSource().Maybe<TPqDataSource>()) { cluster = dataSource.Cast().Cluster().Value(); - } + } auto topicDisplayName = MakeTopicDisplayName(cluster, maybeTopic.Cast().Path().Value()); inputs.push_back(TPinInfo(maybeRead.DataSource().Raw(), nullptr, maybeTopic.Cast().Raw(), topicDisplayName, false)); - } - } - } - - IDqIntegration* GetDqIntegration() override { - return State_->DqIntegration.Get(); - } - -private: - TPqState::TPtr State_; + } + } + } + + IDqIntegration* GetDqIntegration() override { + return State_->DqIntegration.Get(); + } + +private: + TPqState::TPtr State_; IPqGateway::TPtr Gateway_; - TLazyInitHolder<IGraphTransformer> ConfigurationTransformer_; - THolder<IGraphTransformer> LoadMetaDataTransformer_; - THolder<TVisitorTransformerBase> TypeAnnotationTransformer_; + TLazyInitHolder<IGraphTransformer> ConfigurationTransformer_; + THolder<IGraphTransformer> LoadMetaDataTransformer_; + THolder<TVisitorTransformerBase> TypeAnnotationTransformer_; THolder<IGraphTransformer> IODiscoveryTransformer_; -}; - +}; + TIntrusivePtr<IDataProvider> CreatePqDataSource(TPqState::TPtr state, IPqGateway::TPtr gateway) { return new TPqDataSourceProvider(state, gateway); -} - -} // namespace NYql +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp index 1f884336ec7..3a4aaa10e66 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp @@ -1,143 +1,143 @@ -#include "yql_pq_provider_impl.h" - +#include "yql_pq_provider_impl.h" + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> - + #include <ydb/library/yql/providers/common/provider/yql_provider.h> #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/providers/common/provider/yql_data_provider_impl.h> - + #include <ydb/library/yql/utils/log/log.h> - -namespace NYql { - -using namespace NNodes; - -namespace { - -class TPqDataSourceTypeAnnotationTransformer : public TVisitorTransformerBase { -public: + +namespace NYql { + +using namespace NNodes; + +namespace { + +class TPqDataSourceTypeAnnotationTransformer : public TVisitorTransformerBase { +public: explicit TPqDataSourceTypeAnnotationTransformer(TPqState::TPtr state) - : TVisitorTransformerBase(true) - , State_(state) - { - using TSelf = TPqDataSourceTypeAnnotationTransformer; - AddHandler({TCoConfigure::CallableName()}, Hndl(&TSelf::HandleConfigure)); - AddHandler({TPqReadTopic::CallableName()}, Hndl(&TSelf::HandleReadTopic)); - AddHandler({TPqTopic::CallableName()}, Hndl(&TSelf::HandleTopic)); - AddHandler({TDqPqTopicSource::CallableName()}, Hndl(&TSelf::HandleDqTopicSource)); - } - - TStatus HandleConfigure(const TExprNode::TPtr& input, TExprContext& ctx) { - if (!EnsureMinArgsCount(*input, 2, ctx)) { - return TStatus::Error; - } - - if (!EnsureWorldType(*input->Child(TCoConfigure::idx_World), ctx)) { - return TStatus::Error; - } - - if (!EnsureSpecificDataSource(*input->Child(TCoConfigure::idx_DataSource), PqProviderName, ctx)) { - return TStatus::Error; - } - - input->SetTypeAnn(input->Child(TCoConfigure::idx_World)->GetTypeAnn()); - return TStatus::Ok; - } - + : TVisitorTransformerBase(true) + , State_(state) + { + using TSelf = TPqDataSourceTypeAnnotationTransformer; + AddHandler({TCoConfigure::CallableName()}, Hndl(&TSelf::HandleConfigure)); + AddHandler({TPqReadTopic::CallableName()}, Hndl(&TSelf::HandleReadTopic)); + AddHandler({TPqTopic::CallableName()}, Hndl(&TSelf::HandleTopic)); + AddHandler({TDqPqTopicSource::CallableName()}, Hndl(&TSelf::HandleDqTopicSource)); + } + + TStatus HandleConfigure(const TExprNode::TPtr& input, TExprContext& ctx) { + if (!EnsureMinArgsCount(*input, 2, ctx)) { + return TStatus::Error; + } + + if (!EnsureWorldType(*input->Child(TCoConfigure::idx_World), ctx)) { + return TStatus::Error; + } + + if (!EnsureSpecificDataSource(*input->Child(TCoConfigure::idx_DataSource), PqProviderName, ctx)) { + return TStatus::Error; + } + + input->SetTypeAnn(input->Child(TCoConfigure::idx_World)->GetTypeAnn()); + return TStatus::Ok; + } + const TTypeAnnotationNode* GetReadTopicSchema(TPqTopic topic, TMaybeNode<TCoAtomList> columns, TExprBase input, TExprContext& ctx, TVector<TString>& columnOrder) { - auto schema = topic.Ref().GetTypeAnn(); - if (columns) { - TVector<const TItemExprType*> items; - items.reserve(columns.Cast().Ref().ChildrenSize()); + auto schema = topic.Ref().GetTypeAnn(); + if (columns) { + TVector<const TItemExprType*> items; + items.reserve(columns.Cast().Ref().ChildrenSize()); columnOrder.reserve(items.capacity()); - - auto itemSchema = topic.Ref().GetTypeAnn()->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>(); - for (auto c : columns.Cast().Ref().ChildrenList()) { - if (!EnsureAtom(*c, ctx)) { - return nullptr; - } - auto index = itemSchema->FindItem(c->Content()); - if (!index) { - ctx.AddError(TIssue(ctx.GetPosition(input.Pos()), TStringBuilder() << "Unable to find column: " << c->Content())); - return nullptr; - } + + auto itemSchema = topic.Ref().GetTypeAnn()->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>(); + for (auto c : columns.Cast().Ref().ChildrenList()) { + if (!EnsureAtom(*c, ctx)) { + return nullptr; + } + auto index = itemSchema->FindItem(c->Content()); + if (!index) { + ctx.AddError(TIssue(ctx.GetPosition(input.Pos()), TStringBuilder() << "Unable to find column: " << c->Content())); + return nullptr; + } columnOrder.push_back(TString(c->Content())); - items.push_back(itemSchema->GetItems()[*index]); - } - schema = ctx.MakeType<TListExprType>(ctx.MakeType<TStructExprType>(items)); - } - return schema; - } - - TStatus HandleReadTopic(TExprBase input, TExprContext& ctx) { + items.push_back(itemSchema->GetItems()[*index]); + } + schema = ctx.MakeType<TListExprType>(ctx.MakeType<TStructExprType>(items)); + } + return schema; + } + + TStatus HandleReadTopic(TExprBase input, TExprContext& ctx) { if (!EnsureMinMaxArgsCount(input.Ref(), 6, 7, ctx)) { - return TStatus::Error; - } - - TPqReadTopic read = input.Cast<TPqReadTopic>(); - - if (!EnsureWorldType(read.World().Ref(), ctx)) { - return TStatus::Error; - } - - if (!EnsureSpecificDataSource(read.DataSource().Ref(), PqProviderName, ctx)) { - return TStatus::Error; - } - - TPqTopic topic = read.Topic(); - if (!EnsureCallable(topic.Ref(), ctx)) { - return TStatus::Error; - } - + return TStatus::Error; + } + + TPqReadTopic read = input.Cast<TPqReadTopic>(); + + if (!EnsureWorldType(read.World().Ref(), ctx)) { + return TStatus::Error; + } + + if (!EnsureSpecificDataSource(read.DataSource().Ref(), PqProviderName, ctx)) { + return TStatus::Error; + } + + TPqTopic topic = read.Topic(); + if (!EnsureCallable(topic.Ref(), ctx)) { + return TStatus::Error; + } + TVector<TString> columnOrder; auto schema = GetReadTopicSchema(topic, read.Columns().Maybe<TCoAtomList>(), input, ctx, columnOrder); - if (!schema) { - return TStatus::Error; + if (!schema) { + return TStatus::Error; } - input.Ptr()->SetTypeAnn(ctx.MakeType<TTupleExprType>(TTypeAnnotationNode::TListType{ - read.World().Ref().GetTypeAnn(), + input.Ptr()->SetTypeAnn(ctx.MakeType<TTupleExprType>(TTypeAnnotationNode::TListType{ + read.World().Ref().GetTypeAnn(), schema - })); + })); return State_->Types->SetColumnOrder(input.Ref(), columnOrder, ctx); - } - - TStatus HandleDqTopicSource(TExprBase input, TExprContext& ctx) { + } + + TStatus HandleDqTopicSource(TExprBase input, TExprContext& ctx) { if (!EnsureArgsCount(input.Ref(), 4, ctx)) { - return TStatus::Error; - } - - TDqPqTopicSource topicSource = input.Cast<TDqPqTopicSource>(); - TPqTopic topic = topicSource.Topic(); - - if (!EnsureCallable(topic.Ref(), ctx)) { - return TStatus::Error; - } - + return TStatus::Error; + } + + TDqPqTopicSource topicSource = input.Cast<TDqPqTopicSource>(); + TPqTopic topic = topicSource.Topic(); + + if (!EnsureCallable(topic.Ref(), ctx)) { + return TStatus::Error; + } + const auto cluster = TString(topic.Cluster().Value()); const auto topicPath = TString(topic.Path().Value()); const auto* meta = State_->FindTopicMeta(cluster, topicPath); if (!meta) { ctx.AddError(TIssue(ctx.GetPosition(input.Pos()), TStringBuilder() << "Unknown topic `" << cluster << "`.`" << topicPath << "`")); - return TStatus::Error; - } - + return TStatus::Error; + } + input.Ptr()->SetTypeAnn(ctx.MakeType<TStreamExprType>(ctx.MakeType<TDataExprType>(EDataSlot::String))); - return TStatus::Ok; - } - + return TStatus::Ok; + } + TStatus HandleTopic(const TExprNode::TPtr& input, TExprContext& ctx) { - if (State_->IsRtmrMode()) { + if (State_->IsRtmrMode()) { return HandleTopicInRtmrMode(input, ctx); } - TPqTopic topic(input); + TPqTopic topic(input); input->SetTypeAnn(ctx.MakeType<TListExprType>(topic.RowSpec().Ref().GetTypeAnn()->Cast<TTypeExprType>()->GetType()->Cast<TStructExprType>())); - return TStatus::Ok; - } - -private: + return TStatus::Ok; + } + +private: TStatus HandleTopicInRtmrMode(const TExprNode::TPtr& input, TExprContext& ctx) { TVector<const TItemExprType*> items; auto stringType = ctx.MakeType<TDataExprType>(EDataSlot::String); @@ -151,13 +151,13 @@ private: } private: - TPqState::TPtr State_; -}; - -} - -THolder<TVisitorTransformerBase> CreatePqDataSourceTypeAnnotationTransformer(TPqState::TPtr state) { - return MakeHolder<TPqDataSourceTypeAnnotationTransformer>(state); -} - -} // namespace NYql + TPqState::TPtr State_; +}; + +} + +THolder<TVisitorTransformerBase> CreatePqDataSourceTypeAnnotationTransformer(TPqState::TPtr state) { + return MakeHolder<TPqDataSourceTypeAnnotationTransformer>(state); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp index ea67ffd422c..1924dd03720 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp @@ -1,7 +1,7 @@ -#include "yql_pq_dq_integration.h" +#include "yql_pq_dq_integration.h" #include "yql_pq_helpers.h" -#include "yql_pq_mkql_compiler.h" - +#include "yql_pq_mkql_compiler.h" + #include <ydb/library/yql/ast/yql_expr.h> #include <ydb/library/yql/dq/expr_nodes/dq_expr_nodes.h> #include <ydb/library/yql/utils/log/log.h> @@ -12,62 +12,62 @@ #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> #include <ydb/library/yql/providers/pq/proto/dq_io.pb.h> #include <ydb/library/yql/providers/pq/proto/dq_task_params.pb.h> - -#include <util/string/builder.h> - -namespace NYql { - -using namespace NNodes; - -namespace { - -class TPqDqIntegration: public TDqIntegrationBase { -public: + +#include <util/string/builder.h> + +namespace NYql { + +using namespace NNodes; + +namespace { + +class TPqDqIntegration: public TDqIntegrationBase { +public: explicit TPqDqIntegration(const TPqState::TPtr& state) - : State_(state.Get()) - { - } - - ui64 PartitionTopicRead(const TPqTopic& topic, size_t maxPartitions, TVector<TString>& partitions) { - size_t topicPartitionsCount = 0; - for (auto kv : topic.Props()) { - auto key = kv.Name().Value(); - if (key == PartitionsCountProp) { - topicPartitionsCount = FromString(kv.Value().Ref().Content()); - } - } - YQL_ENSURE(topicPartitionsCount > 0); - - const size_t tasks = Min(maxPartitions, topicPartitionsCount); - partitions.reserve(tasks); - for (size_t i = 0; i < tasks; ++i) { - NPq::NProto::TDqReadTaskParams params; - auto* partitioninigParams = params.MutablePartitioningParams(); - partitioninigParams->SetTopicPartitionsCount(topicPartitionsCount); - partitioninigParams->SetEachTopicPartitionGroupId(i); - partitioninigParams->SetDqPartitionsCount(tasks); - YQL_CLOG(DEBUG, ProviderPq) << "Create DQ reading partition " << params; - - TString serializedParams; - YQL_ENSURE(params.SerializeToString(&serializedParams)); - partitions.emplace_back(std::move(serializedParams)); - } - return 0; - } - + : State_(state.Get()) + { + } + + ui64 PartitionTopicRead(const TPqTopic& topic, size_t maxPartitions, TVector<TString>& partitions) { + size_t topicPartitionsCount = 0; + for (auto kv : topic.Props()) { + auto key = kv.Name().Value(); + if (key == PartitionsCountProp) { + topicPartitionsCount = FromString(kv.Value().Ref().Content()); + } + } + YQL_ENSURE(topicPartitionsCount > 0); + + const size_t tasks = Min(maxPartitions, topicPartitionsCount); + partitions.reserve(tasks); + for (size_t i = 0; i < tasks; ++i) { + NPq::NProto::TDqReadTaskParams params; + auto* partitioninigParams = params.MutablePartitioningParams(); + partitioninigParams->SetTopicPartitionsCount(topicPartitionsCount); + partitioninigParams->SetEachTopicPartitionGroupId(i); + partitioninigParams->SetDqPartitionsCount(tasks); + YQL_CLOG(DEBUG, ProviderPq) << "Create DQ reading partition " << params; + + TString serializedParams; + YQL_ENSURE(params.SerializeToString(&serializedParams)); + partitions.emplace_back(std::move(serializedParams)); + } + return 0; + } + ui64 Partition(const TDqSettings&, size_t maxPartitions, const TExprNode& node, TVector<TString>& partitions, TString*, TExprContext&, bool) override { if (auto maybePqRead = TMaybeNode<TPqReadTopic>(&node)) { return PartitionTopicRead(maybePqRead.Cast().Topic(), maxPartitions, partitions); } - if (auto maybeDqSource = TMaybeNode<TDqSource>(&node)) { - auto settings = maybeDqSource.Cast().Settings(); - if (auto topicSource = TMaybeNode<TDqPqTopicSource>(settings.Raw())) { - return PartitionTopicRead(topicSource.Cast().Topic(), maxPartitions, partitions); - } - } + if (auto maybeDqSource = TMaybeNode<TDqSource>(&node)) { + auto settings = maybeDqSource.Cast().Settings(); + if (auto topicSource = TMaybeNode<TDqPqTopicSource>(settings.Raw())) { + return PartitionTopicRead(topicSource.Cast().Topic(), maxPartitions, partitions); + } + } return 0; - } - + } + TExprNode::TPtr WrapRead(const TDqSettings&, const TExprNode::TPtr& read, TExprContext& ctx) override { if (const auto& maybePqReadTopic = TMaybeNode<TPqReadTopic>(read)) { const auto& pqReadTopic = maybePqReadTopic.Cast(); @@ -106,132 +106,132 @@ public: .Settings(settings) .Done().Ptr(); } - return read; - } - + return read; + } + TMaybe<bool> CanWrite(const TDqSettings&, const TExprNode&, TExprContext&) override { YQL_ENSURE(false, "Unimplemented"); - } - - void RegisterMkqlCompiler(NCommon::TMkqlCallableCompilerBase& compiler) override { + } + + void RegisterMkqlCompiler(NCommon::TMkqlCallableCompilerBase& compiler) override { RegisterDqPqMkqlCompilers(compiler); - } - - static TStringBuf Name(const TCoNameValueTuple& nameValue) { - return nameValue.Name().Value(); - } - - static TStringBuf Value(const TCoNameValueTuple& nameValue) { - if (TMaybeNode<TExprBase> maybeValue = nameValue.Value()) { - const TExprNode& value = maybeValue.Cast().Ref(); - YQL_ENSURE(value.IsAtom()); - return value.Content(); - } + } + + static TStringBuf Name(const TCoNameValueTuple& nameValue) { + return nameValue.Name().Value(); + } + + static TStringBuf Value(const TCoNameValueTuple& nameValue) { + if (TMaybeNode<TExprBase> maybeValue = nameValue.Value()) { + const TExprNode& value = maybeValue.Cast().Ref(); + YQL_ENSURE(value.IsAtom()); + return value.Content(); + } return {}; - } - + } + static NPq::NProto::EClusterType ToClusterType(NYql::TPqClusterConfig::EClusterType t) { - switch (t) { + switch (t) { case NYql::TPqClusterConfig::CT_UNSPECIFIED: return NPq::NProto::Unspecified; case NYql::TPqClusterConfig::CT_PERS_QUEUE: return NPq::NProto::PersQueue; case NYql::TPqClusterConfig::CT_DATA_STREAMS: return NPq::NProto::DataStreams; - } - } - - void FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& protoSettings, TString& sourceType) override { - if (auto maybeDqSource = TMaybeNode<TDqSource>(&node)) { - auto settings = maybeDqSource.Cast().Settings(); - if (auto maybeTopicSource = TMaybeNode<TDqPqTopicSource>(settings.Raw())) { - NPq::NProto::TDqPqTopicSource srcDesc; - TDqPqTopicSource topicSource = maybeTopicSource.Cast(); - - TPqTopic topic = topicSource.Topic(); - srcDesc.SetTopicPath(TString(topic.Path().Value())); - srcDesc.SetDatabase(TString(topic.Database().Value())); - const TStringBuf cluster = topic.Cluster().Value(); - const auto* clusterDesc = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); - YQL_ENSURE(clusterDesc, "Unknown cluster " << cluster); - srcDesc.SetClusterType(ToClusterType(clusterDesc->ClusterType)); - srcDesc.SetDatabaseId(clusterDesc->DatabaseId); - - size_t const settingsCount = topicSource.Settings().Size(); - for (size_t i = 0; i < settingsCount; ++i) { - TCoNameValueTuple setting = topicSource.Settings().Item(i); - const TStringBuf name = Name(setting); - if (name == ConsumerSetting) { - srcDesc.SetConsumerName(TString(Value(setting))); - } else if (name == EndpointSetting) { - srcDesc.SetEndpoint(TString(Value(setting))); - } else if (name == UseSslSetting) { - srcDesc.SetUseSsl(FromString<bool>(Value(setting))); + } + } + + void FillSourceSettings(const TExprNode& node, ::google::protobuf::Any& protoSettings, TString& sourceType) override { + if (auto maybeDqSource = TMaybeNode<TDqSource>(&node)) { + auto settings = maybeDqSource.Cast().Settings(); + if (auto maybeTopicSource = TMaybeNode<TDqPqTopicSource>(settings.Raw())) { + NPq::NProto::TDqPqTopicSource srcDesc; + TDqPqTopicSource topicSource = maybeTopicSource.Cast(); + + TPqTopic topic = topicSource.Topic(); + srcDesc.SetTopicPath(TString(topic.Path().Value())); + srcDesc.SetDatabase(TString(topic.Database().Value())); + const TStringBuf cluster = topic.Cluster().Value(); + const auto* clusterDesc = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); + YQL_ENSURE(clusterDesc, "Unknown cluster " << cluster); + srcDesc.SetClusterType(ToClusterType(clusterDesc->ClusterType)); + srcDesc.SetDatabaseId(clusterDesc->DatabaseId); + + size_t const settingsCount = topicSource.Settings().Size(); + for (size_t i = 0; i < settingsCount; ++i) { + TCoNameValueTuple setting = topicSource.Settings().Item(i); + const TStringBuf name = Name(setting); + if (name == ConsumerSetting) { + srcDesc.SetConsumerName(TString(Value(setting))); + } else if (name == EndpointSetting) { + srcDesc.SetEndpoint(TString(Value(setting))); + } else if (name == UseSslSetting) { + srcDesc.SetUseSsl(FromString<bool>(Value(setting))); } else if (name == AddBearerToTokenSetting) { srcDesc.SetAddBearerToToken(FromString<bool>(Value(setting))); - } - } - + } + } + if (auto maybeToken = TMaybeNode<TCoSecureParam>(topicSource.Token().Raw())) { - srcDesc.MutableToken()->SetName(TString(maybeToken.Cast().Name().Value())); - } - - if (clusterDesc->ClusterType == NYql::TPqClusterConfig::CT_PERS_QUEUE) { - YQL_ENSURE(srcDesc.GetConsumerName(), "No consumer specified for PersQueue cluster"); - } - - protoSettings.PackFrom(srcDesc); - sourceType = "PqSource"; - } - } - } - - void FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& protoSettings, TString& sinkType) override { - if (auto maybeDqSink = TMaybeNode<TDqSink>(&node)) { - auto settings = maybeDqSink.Cast().Settings(); - if (auto maybeTopicSink = TMaybeNode<TDqPqTopicSink>(settings.Raw())) { - NPq::NProto::TDqPqTopicSink sinkDesc; - TDqPqTopicSink topicSink = maybeTopicSink.Cast(); - - TPqTopic topic = topicSink.Topic(); - const TStringBuf cluster = topic.Cluster().Value(); - const auto* clusterDesc = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); - YQL_ENSURE(clusterDesc, "Unknown cluster " << cluster); - sinkDesc.SetClusterType(ToClusterType(clusterDesc->ClusterType)); - sinkDesc.SetTopicPath(TString(topic.Path().Value())); - sinkDesc.SetDatabase(TString(topic.Database().Value())); - - size_t const settingsCount = topicSink.Settings().Size(); - for (size_t i = 0; i < settingsCount; ++i) { - TCoNameValueTuple setting = topicSink.Settings().Item(i); - const TStringBuf name = Name(setting); - if (name == EndpointSetting) { - sinkDesc.SetEndpoint(TString(Value(setting))); - } else if (name == UseSslSetting) { - sinkDesc.SetUseSsl(FromString<bool>(Value(setting))); + srcDesc.MutableToken()->SetName(TString(maybeToken.Cast().Name().Value())); + } + + if (clusterDesc->ClusterType == NYql::TPqClusterConfig::CT_PERS_QUEUE) { + YQL_ENSURE(srcDesc.GetConsumerName(), "No consumer specified for PersQueue cluster"); + } + + protoSettings.PackFrom(srcDesc); + sourceType = "PqSource"; + } + } + } + + void FillSinkSettings(const TExprNode& node, ::google::protobuf::Any& protoSettings, TString& sinkType) override { + if (auto maybeDqSink = TMaybeNode<TDqSink>(&node)) { + auto settings = maybeDqSink.Cast().Settings(); + if (auto maybeTopicSink = TMaybeNode<TDqPqTopicSink>(settings.Raw())) { + NPq::NProto::TDqPqTopicSink sinkDesc; + TDqPqTopicSink topicSink = maybeTopicSink.Cast(); + + TPqTopic topic = topicSink.Topic(); + const TStringBuf cluster = topic.Cluster().Value(); + const auto* clusterDesc = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); + YQL_ENSURE(clusterDesc, "Unknown cluster " << cluster); + sinkDesc.SetClusterType(ToClusterType(clusterDesc->ClusterType)); + sinkDesc.SetTopicPath(TString(topic.Path().Value())); + sinkDesc.SetDatabase(TString(topic.Database().Value())); + + size_t const settingsCount = topicSink.Settings().Size(); + for (size_t i = 0; i < settingsCount; ++i) { + TCoNameValueTuple setting = topicSink.Settings().Item(i); + const TStringBuf name = Name(setting); + if (name == EndpointSetting) { + sinkDesc.SetEndpoint(TString(Value(setting))); + } else if (name == UseSslSetting) { + sinkDesc.SetUseSsl(FromString<bool>(Value(setting))); } else if (name == AddBearerToTokenSetting) { sinkDesc.SetAddBearerToToken(FromString<bool>(Value(setting))); - } - } - + } + } + if (auto maybeToken = TMaybeNode<TCoSecureParam>(topicSink.Token().Raw())) { - sinkDesc.MutableToken()->SetName(TString(maybeToken.Cast().Name().Value())); - } - - protoSettings.PackFrom(sinkDesc); - sinkType = "PqSink"; - } - } - } - + sinkDesc.MutableToken()->SetName(TString(maybeToken.Cast().Name().Value())); + } + + protoSettings.PackFrom(sinkDesc); + sinkType = "PqSink"; + } + } + } + NNodes::TCoNameValueTupleList BuildTopicReadSettings(const TString& cluster, TPositionHandle pos, TExprContext& ctx) const { TVector<TCoNameValueTuple> props; { TMaybe<TString> consumer = State_->Configuration->Consumer.Get(); - if (consumer) { - Add(props, ConsumerSetting, *consumer, pos, ctx); + if (consumer) { + Add(props, ConsumerSetting, *consumer, pos, ctx); } } @@ -254,14 +254,14 @@ public: .Done(); } -private: - TPqState* State_; // State owns dq integration, so back reference must be not smart. -}; - -} - -THolder<IDqIntegration> CreatePqDqIntegration(const TPqState::TPtr& state) { +private: + TPqState* State_; // State owns dq integration, so back reference must be not smart. +}; + +} + +THolder<IDqIntegration> CreatePqDqIntegration(const TPqState::TPtr& state) { return MakeHolder<TPqDqIntegration>(state); -} - -} +} + +} diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.h b/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.h index 81236079cb3..b76dac7813f 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.h @@ -1,13 +1,13 @@ -#pragma once - -#include "yql_pq_provider.h" - +#pragma once + +#include "yql_pq_provider.h" + #include <ydb/library/yql/providers/dq/interface/yql_dq_integration.h> - -#include <util/generic/ptr.h> - -namespace NYql { - -THolder<IDqIntegration> CreatePqDqIntegration(const TPqState::TPtr& state); - -} + +#include <util/generic/ptr.h> + +namespace NYql { + +THolder<IDqIntegration> CreatePqDqIntegration(const TPqState::TPtr& state); + +} diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h b/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h index 118b37c012b..5e36b421948 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h @@ -1,21 +1,21 @@ -#pragma once +#pragma once #include <ydb/library/yql/providers/common/proto/gateways_config.pb.h> #include <ydb/library/yql/providers/pq/cm_client/interface/client.h> - -#include <library/cpp/threading/future/core/future.h> - -#include <util/generic/ptr.h> -#include <util/generic/strbuf.h> - -namespace NYql { - -struct IPqGateway : public TThrRefBase { - using TPtr = TIntrusivePtr<IPqGateway>; - + +#include <library/cpp/threading/future/core/future.h> + +#include <util/generic/ptr.h> +#include <util/generic/strbuf.h> + +namespace NYql { + +struct IPqGateway : public TThrRefBase { + using TPtr = TIntrusivePtr<IPqGateway>; + virtual NThreading::TFuture<void> OpenSession(const TString& sessionId, const TString& username) = 0; - virtual void CloseSession(const TString& sessionId) = 0; - - // CM API. + virtual void CloseSession(const TString& sessionId) = 0; + + // CM API. virtual ::NPq::NConfigurationManager::TAsyncDescribePathResult DescribePath(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) = 0; virtual void UpdateClusterConfigs( @@ -23,6 +23,6 @@ struct IPqGateway : public TThrRefBase { const TString& endpoint, const TString& database, bool secure) = 0; -}; - -} // namespace NYql +}; + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp index 47a21341479..fbaa22902aa 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp @@ -1,31 +1,31 @@ -#include "yql_pq_helpers.h" - +#include "yql_pq_helpers.h" + #include "yql_pq_provider_impl.h" #include <ydb/library/yql/core/yql_expr_optimize.h> #include <ydb/library/yql/utils/log/log.h> #include <ydb/library/yql/providers/pq/common/yql_names.h> - -namespace NYql { - -using namespace NNodes; - -void Add(TVector<TCoNameValueTuple>& settings, TStringBuf name, TStringBuf value, TPositionHandle pos, TExprContext& ctx) { - settings.push_back(Build<TCoNameValueTuple>(ctx, pos) - .Name().Build(name) - .Value<TCoAtom>().Build(value) - .Done()); -} - -TCoNameValueTupleList BuildTopicPropsList(const TPqState::TTopicMeta& meta, TPositionHandle pos, TExprContext& ctx) { - TVector<TCoNameValueTuple> props; - - Add(props, PartitionsCountProp, ToString(meta.Description->PartitionsCount), pos, ctx); - - return Build<TCoNameValueTupleList>(ctx, pos) - .Add(props) - .Done(); -} - + +namespace NYql { + +using namespace NNodes; + +void Add(TVector<TCoNameValueTuple>& settings, TStringBuf name, TStringBuf value, TPositionHandle pos, TExprContext& ctx) { + settings.push_back(Build<TCoNameValueTuple>(ctx, pos) + .Name().Build(name) + .Value<TCoAtom>().Build(value) + .Done()); +} + +TCoNameValueTupleList BuildTopicPropsList(const TPqState::TTopicMeta& meta, TPositionHandle pos, TExprContext& ctx) { + TVector<TCoNameValueTuple> props; + + Add(props, PartitionsCountProp, ToString(meta.Description->PartitionsCount), pos, ctx); + + return Build<TCoNameValueTupleList>(ctx, pos) + .Add(props) + .Done(); +} + void FindYdsDbIdsForResolving( const TPqState::TPtr& state, TExprNode::TPtr input, @@ -102,4 +102,4 @@ void FillSettingsWithResolvedYdsIds( } -} // namespace NYql +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_helpers.h b/ydb/library/yql/providers/pq/provider/yql_pq_helpers.h index 628ab2f65d9..40a04f87f00 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_helpers.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_helpers.h @@ -1,17 +1,17 @@ -#pragma once - +#pragma once + #include "yql_pq_provider_impl.h" #include <ydb/library/yql/ast/yql_expr.h> #include <ydb/library/yql/ast/yql_pos_handle.h> #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/core/yql_expr_optimize.h> - -namespace NYql { - -void Add(TVector<NNodes::TCoNameValueTuple>& settings, TStringBuf name, TStringBuf value, TPositionHandle pos, TExprContext& ctx); - -NNodes::TCoNameValueTupleList BuildTopicPropsList(const TPqState::TTopicMeta& meta, TPositionHandle pos, TExprContext& ctx); - + +namespace NYql { + +void Add(TVector<NNodes::TCoNameValueTuple>& settings, TStringBuf name, TStringBuf value, TPositionHandle pos, TExprContext& ctx); + +NNodes::TCoNameValueTupleList BuildTopicPropsList(const TPqState::TTopicMeta& meta, TPositionHandle pos, TExprContext& ctx); + void FindYdsDbIdsForResolving( const TPqState::TPtr& state, TExprNode::TPtr input, @@ -21,4 +21,4 @@ void FillSettingsWithResolvedYdsIds( const TPqState::TPtr& state, const THashMap<std::pair<TString, NYq::DatabaseType>, NYq::TEvents::TEvEndpointResponse::TEndpoint>& fullResolvedIds); -} // namespace NYql +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp index 51f2a1ce172..1aa26ae283a 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp @@ -1,34 +1,34 @@ -#include "yql_pq_provider_impl.h" -#include "yql_pq_topic_key_parser.h" - +#include "yql_pq_provider_impl.h" +#include "yql_pq_topic_key_parser.h" + #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> - + #include <ydb/library/yql/ast/yql_expr.h> #include <ydb/library/yql/core/yql_expr_optimize.h> #include <ydb/library/yql/core/yql_graph_transformer.h> #include <ydb/library/yql/utils/log/log.h> #include <ydb/library/yql/public/udf/udf_types.h> - -namespace NYql { - -using namespace NNodes; - -namespace { - -class TPqLoadTopicMetadataTransformer : public TGraphTransformerBase { -public: + +namespace NYql { + +using namespace NNodes; + +namespace { + +class TPqLoadTopicMetadataTransformer : public TGraphTransformerBase { +public: explicit TPqLoadTopicMetadataTransformer(TPqState::TPtr state) - : State_(std::move(state)) - {} - + : State_(std::move(state)) + {} + void AddToPendingTopics(const TString& cluster, const TString& topicPath, TPositionHandle pos, TExprNode::TPtr rowSpec, TExprNode::TPtr columnOrder) { - const auto topicKey = std::make_pair(cluster, topicPath); - const auto found = State_->Topics.FindPtr(topicKey); + const auto topicKey = std::make_pair(cluster, topicPath); + const auto found = State_->Topics.FindPtr(topicKey); if (found) { return; - } + } YQL_CLOG(INFO, ProviderPq) << "Load topic meta for: `" << cluster << "`.`" << topicPath << "`"; TPqState::TTopicMeta m; @@ -36,81 +36,81 @@ public: m.RowSpec = rowSpec; m.ColumnOrder = columnOrder; PendingTopics_.emplace(topicKey, m); - } - - TStatus DoTransform(TExprNode::TPtr input, TExprNode::TPtr& output, TExprContext& ctx) final { - output = input; - - if (ctx.Step.IsDone(TExprStep::LoadTablesMetadata)) { - return TStatus::Ok; - } - - VisitExpr(input, [&](const TExprNode::TPtr& node) { - if (auto maybePqRead = TMaybeNode<TPqRead>(node)) { - TPqRead read = maybePqRead.Cast(); + } + + TStatus DoTransform(TExprNode::TPtr input, TExprNode::TPtr& output, TExprContext& ctx) final { + output = input; + + if (ctx.Step.IsDone(TExprStep::LoadTablesMetadata)) { + return TStatus::Ok; + } + + VisitExpr(input, [&](const TExprNode::TPtr& node) { + if (auto maybePqRead = TMaybeNode<TPqRead>(node)) { + TPqRead read = maybePqRead.Cast(); if (read.DataSource().Category().Value() != PqProviderName) { return true; - } + } TTopicKeyParser topicParser(read.Arg(2).Ref(), read.Ref().Child(4), ctx); AddToPendingTopics(read.DataSource().Cluster().StringValue(), topicParser.GetTopicPath(), node->Pos(), topicParser.GetUserSchema(), topicParser.GetColumnOrder()); - } else if (auto maybePqWrite = TMaybeNode<TPqWrite>(node)) { - TPqWrite write = maybePqWrite.Cast(); - if (write.DataSink().Category().Value() == PqProviderName) { + } else if (auto maybePqWrite = TMaybeNode<TPqWrite>(node)) { + TPqWrite write = maybePqWrite.Cast(); + if (write.DataSink().Category().Value() == PqProviderName) { TTopicKeyParser topicParser(write.Arg(2).Ref(), nullptr, ctx); AddToPendingTopics(write.DataSink().Cluster().StringValue(), topicParser.GetTopicPath(), node->Pos(), {}, {}); - } - } - return true; - }); - + } + } + return true; + }); + for (auto& [x, meta] : PendingTopics_) { auto itemType = LoadTopicMeta(x.first, x.second, ctx, meta); if (!itemType) { - return TStatus::Error; - } - + return TStatus::Error; + } + meta.RawFormat = (meta.RowSpec == nullptr); if (!meta.RowSpec) { meta.RowSpec = ExpandType(meta.Pos, *itemType, ctx); } - State_->Topics.emplace(x, meta); - } - - PendingTopics_.clear(); - return TStatus::Ok; - } - - NThreading::TFuture<void> DoGetAsyncFuture(const TExprNode& input) final { - Y_UNUSED(input); - return AsyncFuture_; - } - - TStatus DoApplyAsyncChanges(TExprNode::TPtr input, TExprNode::TPtr& output, TExprContext& ctx) final { - Y_UNUSED(ctx); - YQL_ENSURE(AsyncFuture_.HasValue()); - output = input; - return TStatus::Ok; - } - -private: + State_->Topics.emplace(x, meta); + } + + PendingTopics_.clear(); + return TStatus::Ok; + } + + NThreading::TFuture<void> DoGetAsyncFuture(const TExprNode& input) final { + Y_UNUSED(input); + return AsyncFuture_; + } + + TStatus DoApplyAsyncChanges(TExprNode::TPtr input, TExprNode::TPtr& output, TExprContext& ctx) final { + Y_UNUSED(ctx); + YQL_ENSURE(AsyncFuture_.HasValue()); + output = input; + return TStatus::Ok; + } + +private: static const TStructExprType* CreateDefaultItemType(TExprContext& ctx) { - // Schema for topic: - // { - // Data:String - // } - TVector<const TItemExprType*> items; - items.reserve(1); - - // Data column. - { - const TTypeAnnotationNode* typeNode = ctx.MakeType<TDataExprType>(NYql::NUdf::EDataSlot::String); - items.push_back(ctx.MakeType<TItemExprType>(ctx.AppendString("Data"), typeNode)); - } - - return ctx.MakeType<TStructExprType>(items); - } - + // Schema for topic: + // { + // Data:String + // } + TVector<const TItemExprType*> items; + items.reserve(1); + + // Data column. + { + const TTypeAnnotationNode* typeNode = ctx.MakeType<TDataExprType>(NYql::NUdf::EDataSlot::String); + items.push_back(ctx.MakeType<TItemExprType>(ctx.AppendString("Data"), typeNode)); + } + + return ctx.MakeType<TStructExprType>(items); + } + const TStructExprType* LoadTopicMeta(const TString& cluster, const TString& topic, TExprContext& ctx, TPqState::TTopicMeta& meta) { // todo: return TFuture try { @@ -122,25 +122,25 @@ private: } meta.Description = description.GetTopicDescription(); return CreateDefaultItemType(ctx); - } catch (const std::exception& ex) { - TIssues issues; - issues.AddIssue(ex.what()); - ctx.IssueManager.AddIssues(issues); + } catch (const std::exception& ex) { + TIssues issues; + issues.AddIssue(ex.what()); + ctx.IssueManager.AddIssues(issues); return nullptr; } } -private: - TPqState::TPtr State_; +private: + TPqState::TPtr State_; // (cluster, topic) -> meta THashMap<std::pair<TString, TString>, TPqState::TTopicMeta> PendingTopics_; - NThreading::TFuture<void> AsyncFuture_; -}; - -} - -THolder<IGraphTransformer> CreatePqLoadTopicMetadataTransformer(TPqState::TPtr state) { - return MakeHolder<TPqLoadTopicMetadataTransformer>(state); -} - -} // namespace NYql + NThreading::TFuture<void> AsyncFuture_; +}; + +} + +THolder<IGraphTransformer> CreatePqLoadTopicMetadataTransformer(TPqState::TPtr state) { + return MakeHolder<TPqLoadTopicMetadataTransformer>(state); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_logical_opt.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_logical_opt.cpp index 6e8a6462d96..1db26ff9c20 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_logical_opt.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_logical_opt.cpp @@ -1,5 +1,5 @@ -#include "yql_pq_provider_impl.h" - +#include "yql_pq_provider_impl.h" + #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> #include <ydb/library/yql/providers/dq/expr_nodes/dqs_expr_nodes.h> #include <ydb/library/yql/providers/common/provider/yql_provider.h> @@ -8,81 +8,81 @@ #include <ydb/library/yql/providers/common/transform/yql_optimize.h> #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> #include <ydb/library/yql/utils/log/log.h> - - -namespace NYql { - -using namespace NNodes; - -namespace { - -class TPqLogicalOptProposalTransformer : public TOptimizeTransformerBase { -public: - TPqLogicalOptProposalTransformer(TPqState::TPtr state) - : TOptimizeTransformerBase(state->Types, NLog::EComponent::ProviderPq, {}) - , State_(state) - { -#define HNDL(name) "LogicalOptimizer-"#name, Hndl(&TPqLogicalOptProposalTransformer::name) - AddHandler(0, &TCoLeft::Match, HNDL(TrimReadWorld)); + + +namespace NYql { + +using namespace NNodes; + +namespace { + +class TPqLogicalOptProposalTransformer : public TOptimizeTransformerBase { +public: + TPqLogicalOptProposalTransformer(TPqState::TPtr state) + : TOptimizeTransformerBase(state->Types, NLog::EComponent::ProviderPq, {}) + , State_(state) + { +#define HNDL(name) "LogicalOptimizer-"#name, Hndl(&TPqLogicalOptProposalTransformer::name) + AddHandler(0, &TCoLeft::Match, HNDL(TrimReadWorld)); // AddHandler(0, &TCoExtractMembers::Match, HNDL(ExtractMembers)); // AddHandler(0, &TCoExtractMembers::Match, HNDL(ExtractMembersOverDqWrap)); - #undef HNDL - } - - TMaybeNode<TExprBase> TrimReadWorld(TExprBase node, TExprContext& ctx) const { - const auto& maybeRead = node.Cast<TCoLeft>().Input().Maybe<TPqReadTopic>(); - if (!maybeRead) { - return node; - } - - return TExprBase(ctx.NewWorld(node.Pos())); - } + #undef HNDL + } + + TMaybeNode<TExprBase> TrimReadWorld(TExprBase node, TExprContext& ctx) const { + const auto& maybeRead = node.Cast<TCoLeft>().Input().Maybe<TPqReadTopic>(); + if (!maybeRead) { + return node; + } + + return TExprBase(ctx.NewWorld(node.Pos())); + } /* - TMaybeNode<TExprBase> ExtractMembers(TExprBase node, TExprContext& ctx) const { - const auto& extract = node.Cast<TCoExtractMembers>(); - const auto& input = extract.Input(); - const auto& read = input.Maybe<TCoRight>().Input().Maybe<TPqReadTopic>(); - if (!read) { - return node; - } - - const auto& cast = read.Cast(); - return Build<TCoRight>(ctx, extract.Pos()) - .Input<TPqReadTopic>() - .World(cast.World()) - .DataSource(cast.DataSource()) - .Topic(cast.Topic()) - .Columns(extract.Members()) - .Build() - .Done(); - } - - TMaybeNode<TExprBase> ExtractMembersOverDqWrap(TExprBase node, TExprContext& ctx) const { - const auto& extract = node.Cast<TCoExtractMembers>(); - const auto& input = extract.Input(); - const auto& read = input.Maybe<TDqReadWrap>().Input().Maybe<TPqReadTopic>(); - if (!read) { - return node; - } - - const auto& cast = read.Cast(); - return Build<TDqReadWrap>(ctx, node.Pos()) - .InitFrom(input.Cast<TDqReadWrap>()) - .Input<TPqReadTopic>() - .InitFrom(cast) - .Columns(extract.Members()) - .Build() - .Done(); + TMaybeNode<TExprBase> ExtractMembers(TExprBase node, TExprContext& ctx) const { + const auto& extract = node.Cast<TCoExtractMembers>(); + const auto& input = extract.Input(); + const auto& read = input.Maybe<TCoRight>().Input().Maybe<TPqReadTopic>(); + if (!read) { + return node; + } + + const auto& cast = read.Cast(); + return Build<TCoRight>(ctx, extract.Pos()) + .Input<TPqReadTopic>() + .World(cast.World()) + .DataSource(cast.DataSource()) + .Topic(cast.Topic()) + .Columns(extract.Members()) + .Build() + .Done(); + } + + TMaybeNode<TExprBase> ExtractMembersOverDqWrap(TExprBase node, TExprContext& ctx) const { + const auto& extract = node.Cast<TCoExtractMembers>(); + const auto& input = extract.Input(); + const auto& read = input.Maybe<TDqReadWrap>().Input().Maybe<TPqReadTopic>(); + if (!read) { + return node; + } + + const auto& cast = read.Cast(); + return Build<TDqReadWrap>(ctx, node.Pos()) + .InitFrom(input.Cast<TDqReadWrap>()) + .Input<TPqReadTopic>() + .InitFrom(cast) + .Columns(extract.Members()) + .Build() + .Done(); }*/ - -private: - TPqState::TPtr State_; -}; - -} - -THolder<IGraphTransformer> CreatePqLogicalOptProposalTransformer(TPqState::TPtr state) { - return MakeHolder<TPqLogicalOptProposalTransformer>(state); -} - -} // namespace NYql + +private: + TPqState::TPtr State_; +}; + +} + +THolder<IGraphTransformer> CreatePqLogicalOptProposalTransformer(TPqState::TPtr state) { + return MakeHolder<TPqLogicalOptProposalTransformer>(state); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.cpp index 8886ddb83e7..7104c207bdb 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.cpp @@ -1,14 +1,14 @@ -#include "yql_pq_mkql_compiler.h" - +#include "yql_pq_mkql_compiler.h" + #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> #include <ydb/library/yql/providers/dq/expr_nodes/dqs_expr_nodes.h> #include <ydb/library/yql/providers/common/mkql/parser.h> - -namespace NYql { - -using namespace NKikimr::NMiniKQL; -using namespace NNodes; - + +namespace NYql { + +using namespace NKikimr::NMiniKQL; +using namespace NNodes; + void RegisterDqPqMkqlCompilers(NCommon::TMkqlCallableCompilerBase& compiler) { compiler.ChainCallable(TDqSourceWideWrap::CallableName(), [](const TExprNode& node, NCommon::TMkqlBuildContext& ctx) { diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.h b/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.h index 5cebebcca71..2d36e38f8df 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_mkql_compiler.h @@ -1,11 +1,11 @@ -#pragma once - -#include "yql_pq_provider.h" - +#pragma once + +#include "yql_pq_provider.h" + #include <ydb/library/yql/providers/common/mkql/yql_provider_mkql.h> - -namespace NYql { - + +namespace NYql { + void RegisterDqPqMkqlCompilers(NCommon::TMkqlCallableCompilerBase& compiler); - -} + +} diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_physical_optimize.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_physical_optimize.cpp index a9351eb654f..e1fbeac29d4 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_physical_optimize.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_physical_optimize.cpp @@ -1,6 +1,6 @@ -#include "yql_pq_provider_impl.h" -#include "yql_pq_helpers.h" - +#include "yql_pq_provider_impl.h" +#include "yql_pq_helpers.h" + #include <ydb/library/yql/core/yql_opt_utils.h> #include <ydb/library/yql/dq/expr_nodes/dq_expr_nodes.h> #include <ydb/library/yql/dq/opt/dq_opt.h> @@ -11,115 +11,115 @@ #include <ydb/library/yql/providers/pq/common/yql_names.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> #include <ydb/library/yql/providers/result/expr_nodes/yql_res_expr_nodes.h> - -namespace NYql { - -namespace { - -using namespace NNodes; -using namespace NDq; - -class TPqPhysicalOptProposalTransformer : public TOptimizeTransformerBase { -public: - explicit TPqPhysicalOptProposalTransformer(TPqState::TPtr state) - : TOptimizeTransformerBase(state->Types, NLog::EComponent::ProviderPq, {}) - , State_(std::move(state)) - { -#define HNDL(name) "PhysicalOptimizer-"#name, Hndl(&TPqPhysicalOptProposalTransformer::name) - AddHandler(0, &TPqWriteTopic::Match, HNDL(PqWriteTopic)); -#undef HNDL - - SetGlobal(0); // Stage 0 of this optimizer is global => we can remap nodes. - } - - NNodes::TCoNameValueTupleList BuildTopicWriteSettings(const TString& cluster, TPositionHandle pos, TExprContext& ctx) const { - TVector<TCoNameValueTuple> props; - + +namespace NYql { + +namespace { + +using namespace NNodes; +using namespace NDq; + +class TPqPhysicalOptProposalTransformer : public TOptimizeTransformerBase { +public: + explicit TPqPhysicalOptProposalTransformer(TPqState::TPtr state) + : TOptimizeTransformerBase(state->Types, NLog::EComponent::ProviderPq, {}) + , State_(std::move(state)) + { +#define HNDL(name) "PhysicalOptimizer-"#name, Hndl(&TPqPhysicalOptProposalTransformer::name) + AddHandler(0, &TPqWriteTopic::Match, HNDL(PqWriteTopic)); +#undef HNDL + + SetGlobal(0); // Stage 0 of this optimizer is global => we can remap nodes. + } + + NNodes::TCoNameValueTupleList BuildTopicWriteSettings(const TString& cluster, TPositionHandle pos, TExprContext& ctx) const { + TVector<TCoNameValueTuple> props; + auto clusterConfiguration = State_->Configuration->ClustersConfigurationSettings.FindPtr(cluster); if (!clusterConfiguration) { - ythrow yexception() << "Unknown pq cluster \"" << cluster << "\""; - } - + ythrow yexception() << "Unknown pq cluster \"" << cluster << "\""; + } + Add(props, EndpointSetting, clusterConfiguration->Endpoint, pos, ctx); if (clusterConfiguration->UseSsl) { - Add(props, UseSslSetting, "1", pos, ctx); - } - + Add(props, UseSslSetting, "1", pos, ctx); + } + if (clusterConfiguration->AddBearerToToken) { Add(props, AddBearerToTokenSetting, "1", pos, ctx); } - return Build<TCoNameValueTupleList>(ctx, pos) - .Add(props) - .Done(); - } - - TMaybeNode<TExprBase> PqWriteTopic(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx, const TGetParents& getParents) const { - auto write = node.Cast<TPqWriteTopic>(); - if (!TDqCnUnionAll::Match(write.Input().Raw())) { // => this code is not for RTMR mode. - return node; - } - - const auto& topicNode = write.Topic(); - const TString cluster(topicNode.Cluster().Value()); - - const TParentsMap* parentsMap = getParents(); - auto dqUnion = write.Input().Cast<TDqCnUnionAll>(); - if (!NDq::IsSingleConsumerConnection(dqUnion, *parentsMap)) { - return node; - } - + return Build<TCoNameValueTupleList>(ctx, pos) + .Add(props) + .Done(); + } + + TMaybeNode<TExprBase> PqWriteTopic(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx, const TGetParents& getParents) const { + auto write = node.Cast<TPqWriteTopic>(); + if (!TDqCnUnionAll::Match(write.Input().Raw())) { // => this code is not for RTMR mode. + return node; + } + + const auto& topicNode = write.Topic(); + const TString cluster(topicNode.Cluster().Value()); + + const TParentsMap* parentsMap = getParents(); + auto dqUnion = write.Input().Cast<TDqCnUnionAll>(); + if (!NDq::IsSingleConsumerConnection(dqUnion, *parentsMap)) { + return node; + } + const auto* topicMeta = State_->FindTopicMeta(topicNode); if (!topicMeta) { ctx.AddError(TIssue(ctx.GetPosition(write.Pos()), TStringBuilder() << "Unknown topic `" << topicNode.Cluster().StringValue() << "`.`" << topicNode.Path().StringValue() << "`")); return nullptr; - } - - YQL_CLOG(INFO, ProviderPq) << "Optimize PqWriteTopic `" << topicNode.Cluster().StringValue() << "`.`" << topicNode.Path().StringValue() << "`"; - - auto dqPqTopicSinkSettingsBuilder = Build<TDqPqTopicSink>(ctx, write.Pos()); - dqPqTopicSinkSettingsBuilder.Topic(topicNode); - dqPqTopicSinkSettingsBuilder.Settings(BuildTopicWriteSettings(cluster, write.Pos(), ctx)); - dqPqTopicSinkSettingsBuilder.Token<TCoSecureParam>().Name().Build("cluster:default_" + cluster).Build(); - auto dqPqTopicSinkSettings = dqPqTopicSinkSettingsBuilder.Done(); - - auto dqSink = Build<TDqSink>(ctx, write.Pos()) - .DataSink(write.DataSink()) - .Settings(dqPqTopicSinkSettings) - .Index(dqUnion.Output().Index()) - .Done(); - - TDqStage inputStage = dqUnion.Output().Stage().Cast<TDqStage>(); - - auto sinksBuilder = Build<TDqSinksList>(ctx, topicNode.Pos()); - if (inputStage.Sinks()) { - sinksBuilder.InitFrom(inputStage.Sinks().Cast()); - } - sinksBuilder.Add(dqSink); - - auto dqStageWithSink = Build<TDqStage>(ctx, inputStage.Pos()) - .InitFrom(inputStage) - .Sinks(sinksBuilder.Done()) - .Done(); - - auto dqQueryBuilder = Build<TDqQuery>(ctx, write.Pos()); - dqQueryBuilder.World(write.World()); - dqQueryBuilder.SinkStages().Add(dqStageWithSink).Build(); - - optCtx.RemapNode(inputStage.Ref(), dqStageWithSink.Ptr()); - - return dqQueryBuilder.Done(); - } - -private: - TPqState::TPtr State_; -}; - -} // namespace - -THolder<IGraphTransformer> CreatePqPhysicalOptProposalTransformer(TPqState::TPtr state) { - return MakeHolder<TPqPhysicalOptProposalTransformer>(std::move(state)); -} - -} // namespace NYql + } + + YQL_CLOG(INFO, ProviderPq) << "Optimize PqWriteTopic `" << topicNode.Cluster().StringValue() << "`.`" << topicNode.Path().StringValue() << "`"; + + auto dqPqTopicSinkSettingsBuilder = Build<TDqPqTopicSink>(ctx, write.Pos()); + dqPqTopicSinkSettingsBuilder.Topic(topicNode); + dqPqTopicSinkSettingsBuilder.Settings(BuildTopicWriteSettings(cluster, write.Pos(), ctx)); + dqPqTopicSinkSettingsBuilder.Token<TCoSecureParam>().Name().Build("cluster:default_" + cluster).Build(); + auto dqPqTopicSinkSettings = dqPqTopicSinkSettingsBuilder.Done(); + + auto dqSink = Build<TDqSink>(ctx, write.Pos()) + .DataSink(write.DataSink()) + .Settings(dqPqTopicSinkSettings) + .Index(dqUnion.Output().Index()) + .Done(); + + TDqStage inputStage = dqUnion.Output().Stage().Cast<TDqStage>(); + + auto sinksBuilder = Build<TDqSinksList>(ctx, topicNode.Pos()); + if (inputStage.Sinks()) { + sinksBuilder.InitFrom(inputStage.Sinks().Cast()); + } + sinksBuilder.Add(dqSink); + + auto dqStageWithSink = Build<TDqStage>(ctx, inputStage.Pos()) + .InitFrom(inputStage) + .Sinks(sinksBuilder.Done()) + .Done(); + + auto dqQueryBuilder = Build<TDqQuery>(ctx, write.Pos()); + dqQueryBuilder.World(write.World()); + dqQueryBuilder.SinkStages().Add(dqStageWithSink).Build(); + + optCtx.RemapNode(inputStage.Ref(), dqStageWithSink.Ptr()); + + return dqQueryBuilder.Done(); + } + +private: + TPqState::TPtr State_; +}; + +} // namespace + +THolder<IGraphTransformer> CreatePqPhysicalOptProposalTransformer(TPqState::TPtr state) { + return MakeHolder<TPqPhysicalOptProposalTransformer>(std::move(state)); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_provider.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_provider.cpp index b3f700c1475..b2afa0e415c 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_provider.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_provider.cpp @@ -1,73 +1,73 @@ -#include "yql_pq_provider.h" -#include "yql_pq_provider_impl.h" -#include "yql_pq_dq_integration.h" - +#include "yql_pq_provider.h" +#include "yql_pq_provider_impl.h" +#include "yql_pq_dq_integration.h" + #include <ydb/library/yql/core/yql_type_annotation.h> #include <ydb/library/yql/utils/log/context.h> #include <ydb/library/yql/providers/common/proto/gateways_config.pb.h> #include <ydb/library/yql/providers/common/provider/yql_provider_names.h> - -namespace NYql { - + +namespace NYql { + TDataProviderInitializer GetPqDataProviderInitializer( IPqGateway::TPtr gateway, bool supportRtmrMode, std::shared_ptr<NYq::TDatabaseAsyncResolverWithMeta> dbResolverWithMeta) { return [gateway, supportRtmrMode, dbResolverWithMeta] ( - const TString& userName, - const TString& sessionId, - const TGatewaysConfig* gatewaysConfig, - const NKikimr::NMiniKQL::IFunctionRegistry* functionRegistry, - TIntrusivePtr<IRandomProvider> randomProvider, - TIntrusivePtr<TTypeAnnotationContext> typeCtx, - const TOperationProgressWriter& progressWriter, - const TYqlOperationOptions& operationOptions) - { - Y_UNUSED(userName); - Y_UNUSED(functionRegistry); - Y_UNUSED(randomProvider); - Y_UNUSED(progressWriter); - Y_UNUSED(operationOptions); - - auto state = MakeIntrusive<TPqState>(sessionId); + const TString& userName, + const TString& sessionId, + const TGatewaysConfig* gatewaysConfig, + const NKikimr::NMiniKQL::IFunctionRegistry* functionRegistry, + TIntrusivePtr<IRandomProvider> randomProvider, + TIntrusivePtr<TTypeAnnotationContext> typeCtx, + const TOperationProgressWriter& progressWriter, + const TYqlOperationOptions& operationOptions) + { + Y_UNUSED(userName); + Y_UNUSED(functionRegistry); + Y_UNUSED(randomProvider); + Y_UNUSED(progressWriter); + Y_UNUSED(operationOptions); + + auto state = MakeIntrusive<TPqState>(sessionId); state->SupportRtmrMode = supportRtmrMode; - state->Types = typeCtx.Get(); - state->FunctionRegistry = functionRegistry; + state->Types = typeCtx.Get(); + state->FunctionRegistry = functionRegistry; state->DbResolver = dbResolverWithMeta; - if (gatewaysConfig) { + if (gatewaysConfig) { state->Configuration->Init(gatewaysConfig->GetPq(), typeCtx, dbResolverWithMeta, state->DatabaseIds); - } - state->Gateway = gateway; - state->DqIntegration = CreatePqDqIntegration(state); - - TDataProviderInfo info; - - info.Names.insert({TString{PqProviderName}}); + } + state->Gateway = gateway; + state->DqIntegration = CreatePqDqIntegration(state); + + TDataProviderInfo info; + + info.Names.insert({TString{PqProviderName}}); info.Source = CreatePqDataSource(state, gateway); info.Sink = CreatePqDataSink(state, gateway); - + info.OpenSession = [gateway](const TString& sessionId, const TString& username, - const TOperationProgressWriter& progressWriter, const TYqlOperationOptions& operationOptions, - TIntrusivePtr<IRandomProvider> randomProvider, TIntrusivePtr<ITimeProvider> timeProvider) { - Y_UNUSED(progressWriter); - Y_UNUSED(operationOptions); - Y_UNUSED(randomProvider); - Y_UNUSED(timeProvider); - + const TOperationProgressWriter& progressWriter, const TYqlOperationOptions& operationOptions, + TIntrusivePtr<IRandomProvider> randomProvider, TIntrusivePtr<ITimeProvider> timeProvider) { + Y_UNUSED(progressWriter); + Y_UNUSED(operationOptions); + Y_UNUSED(randomProvider); + Y_UNUSED(timeProvider); + return gateway->OpenSession(sessionId, username); - }; - - info.CloseSession = [gateway](const TString& sessionId) { - gateway->CloseSession(sessionId); - }; - - return info; - }; -} - -const TPqState::TTopicMeta* TPqState::FindTopicMeta(const TString& cluster, const TString& topicPath) const { - const auto topicKey = std::make_pair(cluster, topicPath); - return Topics.FindPtr(topicKey); -} - -} // namespace NYql + }; + + info.CloseSession = [gateway](const TString& sessionId) { + gateway->CloseSession(sessionId); + }; + + return info; + }; +} + +const TPqState::TTopicMeta* TPqState::FindTopicMeta(const TString& cluster, const TString& topicPath) const { + const auto topicKey = std::make_pair(cluster, topicPath); + return Topics.FindPtr(topicKey); +} + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_provider.h b/ydb/library/yql/providers/pq/provider/yql_pq_provider.h index a890e32b69b..2e5e8132ce3 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_provider.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_provider.h @@ -1,65 +1,65 @@ -#pragma once -#include "yql_pq_settings.h" -#include "yql_pq_gateway.h" - +#pragma once +#include "yql_pq_settings.h" +#include "yql_pq_gateway.h" + #include <ydb/library/yql/core/yql_data_provider.h> #include <ydb/library/yql/providers/dq/interface/yql_dq_integration.h> #include <ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.h> #include <ydb/core/yq/libs/db_resolver/db_async_resolver_with_meta.h> - -namespace NKikimr::NMiniKQL { -class IFunctionRegistry; -} - -namespace NYql { - -struct TPqState : public TThrRefBase { - using TPtr = TIntrusivePtr<TPqState>; - - struct TTopicMeta { + +namespace NKikimr::NMiniKQL { +class IFunctionRegistry; +} + +namespace NYql { + +struct TPqState : public TThrRefBase { + using TPtr = TIntrusivePtr<TPqState>; + + struct TTopicMeta { TPositionHandle Pos; bool RawFormat = true; TExprNode::TPtr RowSpec; TExprNode::TPtr ColumnOrder; - TMaybe<::NPq::NConfigurationManager::TTopicDescription> Description; - }; - -public: - explicit TPqState(const TString& sessionId) - : SessionId(sessionId) - { - } - - const TTopicMeta* FindTopicMeta(const TString& cluster, const TString& topicPath) const; - const TTopicMeta* FindTopicMeta(const NNodes::TPqTopic& topic) const { - return FindTopicMeta(topic.Cluster().StringValue(), topic.Path().StringValue()); - } - - bool IsRtmrMode() const { - if (!SupportRtmrMode) { - return false; - } - return Configuration->PqReadByRtmrCluster_.Get() != "dq"; - } - + TMaybe<::NPq::NConfigurationManager::TTopicDescription> Description; + }; + +public: + explicit TPqState(const TString& sessionId) + : SessionId(sessionId) + { + } + + const TTopicMeta* FindTopicMeta(const TString& cluster, const TString& topicPath) const; + const TTopicMeta* FindTopicMeta(const NNodes::TPqTopic& topic) const { + return FindTopicMeta(topic.Cluster().StringValue(), topic.Path().StringValue()); + } + + bool IsRtmrMode() const { + if (!SupportRtmrMode) { + return false; + } + return Configuration->PqReadByRtmrCluster_.Get() != "dq"; + } + public: bool SupportRtmrMode = false; const TString SessionId; THashMap<std::pair<TString, TString>, TTopicMeta> Topics; - TTypeAnnotationContext* Types = nullptr; - TPqConfiguration::TPtr Configuration = MakeIntrusive<TPqConfiguration>(); - const NKikimr::NMiniKQL::IFunctionRegistry* FunctionRegistry = nullptr; - IPqGateway::TPtr Gateway; - THolder<IDqIntegration> DqIntegration; + TTypeAnnotationContext* Types = nullptr; + TPqConfiguration::TPtr Configuration = MakeIntrusive<TPqConfiguration>(); + const NKikimr::NMiniKQL::IFunctionRegistry* FunctionRegistry = nullptr; + IPqGateway::TPtr Gateway; + THolder<IDqIntegration> DqIntegration; THashMap<std::pair<TString, NYq::DatabaseType>, NYq::TEvents::TDatabaseAuth> DatabaseIds; std::shared_ptr<NYq::TDatabaseAsyncResolverWithMeta> DbResolver; -}; - +}; + TDataProviderInitializer GetPqDataProviderInitializer( IPqGateway::TPtr gateway, bool supportRtmrMode = false, std::shared_ptr<NYq::TDatabaseAsyncResolverWithMeta> dbResolverWithMeta = nullptr ); - -} // namespace NYql + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_provider_impl.h b/ydb/library/yql/providers/pq/provider/yql_pq_provider_impl.h index 619eb468e29..4213dd54fad 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_provider_impl.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_provider_impl.h @@ -1,32 +1,32 @@ -#pragma once - -#include "yql_pq_provider.h" - +#pragma once + +#include "yql_pq_provider.h" + #include <ydb/library/yql/core/yql_graph_transformer.h> #include <ydb/library/yql/providers/common/transform/yql_exec.h> #include <ydb/library/yql/providers/common/transform/yql_visit.h> - -#include <util/generic/ptr.h> - -namespace NYql { - + +#include <util/generic/ptr.h> + +namespace NYql { + TIntrusivePtr<IDataProvider> CreatePqDataSource(TPqState::TPtr state, IPqGateway::TPtr gateway); TIntrusivePtr<IDataProvider> CreatePqDataSink(TPqState::TPtr state, IPqGateway::TPtr gateway); - -THolder<IGraphTransformer> CreatePqLoadTopicMetadataTransformer(TPqState::TPtr state); - + +THolder<IGraphTransformer> CreatePqLoadTopicMetadataTransformer(TPqState::TPtr state); + THolder<IGraphTransformer> CreatePqDataSinkIODiscoveryTransformer(TPqState::TPtr state); -THolder<TVisitorTransformerBase> CreatePqDataSourceTypeAnnotationTransformer(TPqState::TPtr state); -THolder<TVisitorTransformerBase> CreatePqDataSinkTypeAnnotationTransformer(TPqState::TPtr state); - -THolder<TExecTransformerBase> CreatePqDataSinkExecTransformer(TPqState::TPtr state); - -THolder<IGraphTransformer> CreatePqLogicalOptProposalTransformer(TPqState::TPtr state); - -THolder<IGraphTransformer> CreatePqPhysicalOptProposalTransformer(TPqState::TPtr state); - +THolder<TVisitorTransformerBase> CreatePqDataSourceTypeAnnotationTransformer(TPqState::TPtr state); +THolder<TVisitorTransformerBase> CreatePqDataSinkTypeAnnotationTransformer(TPqState::TPtr state); + +THolder<TExecTransformerBase> CreatePqDataSinkExecTransformer(TPqState::TPtr state); + +THolder<IGraphTransformer> CreatePqLogicalOptProposalTransformer(TPqState::TPtr state); + +THolder<IGraphTransformer> CreatePqPhysicalOptProposalTransformer(TPqState::TPtr state); + THolder<IGraphTransformer> CreatePqIODiscoveryTransformer(TPqState::TPtr state); TString MakeTopicDisplayName(TStringBuf cluster, TStringBuf path); -} // namespace NYql +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_settings.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_settings.cpp index 278db980f41..a22c711c96e 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_settings.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_settings.cpp @@ -1,19 +1,19 @@ -#include "yql_pq_settings.h" - -namespace NYql { - -using namespace NCommon; - -TPqConfiguration::TPqConfiguration() { - REGISTER_SETTING(*this, Consumer); - REGISTER_SETTING(*this, Database); +#include "yql_pq_settings.h" + +namespace NYql { + +using namespace NCommon; + +TPqConfiguration::TPqConfiguration() { + REGISTER_SETTING(*this, Consumer); + REGISTER_SETTING(*this, Database); REGISTER_SETTING(*this, PqReadByRtmrCluster_); -} - -TPqSettings::TConstPtr TPqConfiguration::Snapshot() const { +} + +TPqSettings::TConstPtr TPqConfiguration::Snapshot() const { return std::make_shared<const TPqSettings>(*this); -} - +} + void TPqConfiguration::Init( const TPqGatewayConfig& config, TIntrusivePtr<TTypeAnnotationContext> typeCtx, @@ -69,4 +69,4 @@ TString TPqConfiguration::GetDatabaseForTopic(const TString& cluster) const { return clusterSetting->Database; } -} // NYql +} // NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_settings.h b/ydb/library/yql/providers/pq/provider/yql_pq_settings.h index 5ed40d07d92..aadf99d1c1a 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_settings.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_settings.h @@ -1,56 +1,56 @@ -#pragma once +#pragma once #include <ydb/library/yql/utils/log/log.h> #include <ydb/library/yql/providers/common/structured_token/yql_token_builder.h> #include <ydb/library/yql/providers/common/config/yql_dispatch.h> #include <ydb/library/yql/providers/common/config/yql_setting.h> #include <ydb/library/yql/providers/common/proto/gateways_config.pb.h> - + #include <ydb/core/yq/libs/events/events.h> #include <ydb/core/yq/libs/db_resolver/db_async_resolver_with_meta.h> #include <ydb/core/yq/libs/common/database_token_builder.h> -namespace NYql { - -struct TPqSettings { +namespace NYql { + +struct TPqSettings { using TConstPtr = std::shared_ptr<const TPqSettings>; - - NCommon::TConfSetting<TString, false> Consumer; - NCommon::TConfSetting<TString, false> Database; // It is needed in case of Cloud.LB for external users, but can be taken from config for internal LB. + + NCommon::TConfSetting<TString, false> Consumer; + NCommon::TConfSetting<TString, false> Database; // It is needed in case of Cloud.LB for external users, but can be taken from config for internal LB. NCommon::TConfSetting<TString, false> PqReadByRtmrCluster_; -}; - -struct TPqClusterConfigurationSettings { - TString ClusterName; +}; + +struct TPqClusterConfigurationSettings { + TString ClusterName; NYql::TPqClusterConfig::EClusterType ClusterType = NYql::TPqClusterConfig::CT_UNSPECIFIED; - TString Endpoint; - TString ConfigManagerEndpoint; + TString Endpoint; + TString ConfigManagerEndpoint; bool UseSsl = false; - TString Database; - TString DatabaseId; - ui32 TvmId = 0; + TString Database; + TString DatabaseId; + ui32 TvmId = 0; TString AuthToken; bool AddBearerToToken = false; -}; - -struct TPqConfiguration : public TPqSettings, public NCommon::TSettingDispatcher { - using TPtr = TIntrusivePtr<TPqConfiguration>; - - TPqConfiguration(); - TPqConfiguration(const TPqConfiguration&) = delete; - +}; + +struct TPqConfiguration : public TPqSettings, public NCommon::TSettingDispatcher { + using TPtr = TIntrusivePtr<TPqConfiguration>; + + TPqConfiguration(); + TPqConfiguration(const TPqConfiguration&) = delete; + void Init( const TPqGatewayConfig& config, TIntrusivePtr<TTypeAnnotationContext> typeCtx, const std::shared_ptr<NYq::TDatabaseAsyncResolverWithMeta> dbResolver, THashMap<std::pair<TString, NYq::DatabaseType>, NYq::TEvents::TDatabaseAuth>& databaseIds); - + TString GetDatabaseForTopic(const TString& cluster) const; - - TPqSettings::TConstPtr Snapshot() const; - THashMap<TString, TPqClusterConfigurationSettings> ClustersConfigurationSettings; - THashMap<TString, TString> Tokens; + + TPqSettings::TConstPtr Snapshot() const; + THashMap<TString, TPqClusterConfigurationSettings> ClustersConfigurationSettings; + THashMap<TString, TString> Tokens; THashMap<TString, TVector<TString>> DbId2Clusters; // DatabaseId -> ClusterNames -}; - -} // NYql +}; + +} // NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.cpp index 2b99e52484a..4014ea331fb 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.cpp @@ -1,9 +1,9 @@ -#include "yql_pq_topic_key_parser.h" - +#include "yql_pq_topic_key_parser.h" + #include <ydb/library/yql/core/expr_nodes/yql_expr_nodes.h> - -namespace NYql { - + +namespace NYql { + namespace { std::pair<TExprNode::TPtr, TExprNode::TPtr> GetSchema(const TExprNode& settings) { for (auto i = 0U; i < settings.ChildrenSize(); ++i) { @@ -13,9 +13,9 @@ std::pair<TExprNode::TPtr, TExprNode::TPtr> GetSchema(const TExprNode& settings) } return {}; +} } -} - + TTopicKeyParser::TTopicKeyParser(const TExprNode& expr, TExprNode::TPtr readSettings, TExprContext& ctx) { YQL_ENSURE(Parse(expr, readSettings, ctx), "Failed to parse topic info"); } @@ -23,7 +23,7 @@ TTopicKeyParser::TTopicKeyParser(const TExprNode& expr, TExprNode::TPtr readSett bool TTopicKeyParser::Parse(const TExprNode& expr, TExprNode::TPtr readSettings, TExprContext& ctx) { if (expr.IsCallable("MrTableConcat")) { return TryParseKey(expr.Head(), ctx); - } + } if (expr.IsCallable(NNodes::TCoKey::CallableName())) { return TryParseKey(expr, ctx); @@ -34,26 +34,26 @@ bool TTopicKeyParser::Parse(const TExprNode& expr, TExprNode::TPtr readSettings, } ctx.AddError(TIssue(ctx.GetPosition(expr.Pos()), "Expected MrTableConcat or Key or MrObject")); return false; -} - +} + bool TTopicKeyParser::TryParseKey(const TExprNode& expr, TExprContext& ctx) { const auto maybeKey = NNodes::TExprBase(&expr).Maybe<NNodes::TCoKey>(); - if (!maybeKey) { - ctx.AddError(TIssue(ctx.GetPosition(expr.Pos()), "Expected Key")); - return false; - } - - const auto& keyArg = maybeKey.Cast().Ref().Head(); - if (!keyArg.IsList() || keyArg.ChildrenSize() != 2 || - !keyArg.Head().IsAtom("table") || !keyArg.Child(1)->IsCallable(NNodes::TCoString::CallableName())) { - ctx.AddError(TIssue(ctx.GetPosition(keyArg.Pos()), "Expected single table name")); - return false; - } - - TopicPath = TString(keyArg.Child(1)->Child(0)->Content()); - return true; -} - + if (!maybeKey) { + ctx.AddError(TIssue(ctx.GetPosition(expr.Pos()), "Expected Key")); + return false; + } + + const auto& keyArg = maybeKey.Cast().Ref().Head(); + if (!keyArg.IsList() || keyArg.ChildrenSize() != 2 || + !keyArg.Head().IsAtom("table") || !keyArg.Child(1)->IsCallable(NNodes::TCoString::CallableName())) { + ctx.AddError(TIssue(ctx.GetPosition(keyArg.Pos()), "Expected single table name")); + return false; + } + + TopicPath = TString(keyArg.Child(1)->Child(0)->Content()); + return true; +} + bool TTopicKeyParser::TryParseObject(const TExprNode& expr, TExprNode::TPtr readSettings) { std::tie(UserSchema, ColumnOrder) = GetSchema(*readSettings); TopicPath = TString(expr.Child(0)->Content()); @@ -61,4 +61,4 @@ bool TTopicKeyParser::TryParseObject(const TExprNode& expr, TExprNode::TPtr read Compression = TString(expr.Child(2)->Content()); return true; } -} // namespace NYql +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.h b/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.h index b22f61125fd..6dadcde1bc3 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.h +++ b/ydb/library/yql/providers/pq/provider/yql_pq_topic_key_parser.h @@ -1,23 +1,23 @@ -#pragma once +#pragma once #include <ydb/library/yql/ast/yql_expr.h> - -#include <util/generic/string.h> - -namespace NYql { - -class TTopicKeyParser { -public: + +#include <util/generic/string.h> + +namespace NYql { + +class TTopicKeyParser { +public: TTopicKeyParser() {} TTopicKeyParser(const TExprNode& expr, TExprNode::TPtr readSettings, TExprContext& ctx); - - const TString& GetTopicPath() const { - return TopicPath; - } - + + const TString& GetTopicPath() const { + return TopicPath; + } + TExprNode::TPtr GetUserSchema() const { return UserSchema; } - + TExprNode::TPtr GetColumnOrder() const { return ColumnOrder; } @@ -32,16 +32,16 @@ public: bool Parse(const TExprNode& expr, TExprNode::TPtr readSettings, TExprContext& ctx); -private: +private: bool TryParseKey(const TExprNode& expr, TExprContext& ctx); bool TryParseObject(const TExprNode& expr, TExprNode::TPtr readSettings); - -private: - TString TopicPath; + +private: + TString TopicPath; TString Format; TString Compression; TExprNode::TPtr UserSchema; TExprNode::TPtr ColumnOrder; -}; - -} // namespace NYql +}; + +} // namespace NYql diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_ut.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_ut.cpp index 4127452d120..58023933937 100644 --- a/ydb/library/yql/providers/pq/provider/yql_pq_ut.cpp +++ b/ydb/library/yql/providers/pq/provider/yql_pq_ut.cpp @@ -108,8 +108,8 @@ bool RunPqProgram( NYql::CreateCommonDqTaskTransformFactory() }); - const auto driverConfig = NYdb::TDriverConfig().SetLog(CreateLogBackend("cerr")); - NYdb::TDriver driver(driverConfig); + const auto driverConfig = NYdb::TDriverConfig().SetLog(CreateLogBackend("cerr")); + NYdb::TDriver driver(driverConfig); auto dqGateway = CreateLocalDqGateway(driver, functionRegistry.Get(), dqCompFactory, dqTaskTransformFactory, {}); auto storage = NYql::CreateFileStorage({}); @@ -169,8 +169,8 @@ bool RunPqProgram( return false; } - driver.Stop(true); - + driver.Stop(true); + Cerr << "Done." << Endl; return true; } diff --git a/ydb/library/yql/providers/pq/task_meta/task_meta.cpp b/ydb/library/yql/providers/pq/task_meta/task_meta.cpp index 4953489dd08..da0ca25656a 100644 --- a/ydb/library/yql/providers/pq/task_meta/task_meta.cpp +++ b/ydb/library/yql/providers/pq/task_meta/task_meta.cpp @@ -1,27 +1,27 @@ -#include "task_meta.h" - +#include "task_meta.h" + #include <ydb/library/yql/providers/dq/api/protos/service.pb.h> #include <ydb/library/yql/providers/pq/proto/dq_task_params.pb.h> - -namespace NYql::NPq { - -TMaybe<TTopicPartitionsSet> GetTopicPartitionsSet(const google::protobuf::Any& dqTaskMeta) { - if (dqTaskMeta.Is<Yql::DqsProto::TTaskMeta>()) { - Yql::DqsProto::TTaskMeta meta; - if (dqTaskMeta.UnpackTo(&meta)) { - auto pqReaderParams = meta.GetTaskParams().find("pq"); - if (pqReaderParams != meta.GetTaskParams().end()) { - NYql::NPq::NProto::TDqReadTaskParams readTaskParams; - if (readTaskParams.ParseFromString(pqReaderParams->second)) { - return TTopicPartitionsSet{ - readTaskParams.GetPartitioningParams().GetEachTopicPartitionGroupId(), - readTaskParams.GetPartitioningParams().GetDqPartitionsCount(), - readTaskParams.GetPartitioningParams().GetTopicPartitionsCount()}; - } - } - } - } - return Nothing(); -} - -} // namespace NYql::NPq + +namespace NYql::NPq { + +TMaybe<TTopicPartitionsSet> GetTopicPartitionsSet(const google::protobuf::Any& dqTaskMeta) { + if (dqTaskMeta.Is<Yql::DqsProto::TTaskMeta>()) { + Yql::DqsProto::TTaskMeta meta; + if (dqTaskMeta.UnpackTo(&meta)) { + auto pqReaderParams = meta.GetTaskParams().find("pq"); + if (pqReaderParams != meta.GetTaskParams().end()) { + NYql::NPq::NProto::TDqReadTaskParams readTaskParams; + if (readTaskParams.ParseFromString(pqReaderParams->second)) { + return TTopicPartitionsSet{ + readTaskParams.GetPartitioningParams().GetEachTopicPartitionGroupId(), + readTaskParams.GetPartitioningParams().GetDqPartitionsCount(), + readTaskParams.GetPartitioningParams().GetTopicPartitionsCount()}; + } + } + } + } + return Nothing(); +} + +} // namespace NYql::NPq diff --git a/ydb/library/yql/providers/pq/task_meta/task_meta.h b/ydb/library/yql/providers/pq/task_meta/task_meta.h index 230c65eb228..4aecf23f861 100644 --- a/ydb/library/yql/providers/pq/task_meta/task_meta.h +++ b/ydb/library/yql/providers/pq/task_meta/task_meta.h @@ -1,21 +1,21 @@ -#pragma once -#include <google/protobuf/any.pb.h> - -#include <util/generic/maybe.h> - -namespace NYql::NPq { - -// Partitioning parameters for topic -struct TTopicPartitionsSet { - ui64 EachTopicPartitionGroupId; - ui64 DqPartitionsCount; - ui64 TopicPartitionsCount; - - bool Intersects(const TTopicPartitionsSet& other) const { - return DqPartitionsCount != other.DqPartitionsCount || EachTopicPartitionGroupId == other.EachTopicPartitionGroupId; - } -}; - -TMaybe<TTopicPartitionsSet> GetTopicPartitionsSet(const google::protobuf::Any& dqTaskMeta); - -} // namespace NYql::NPq +#pragma once +#include <google/protobuf/any.pb.h> + +#include <util/generic/maybe.h> + +namespace NYql::NPq { + +// Partitioning parameters for topic +struct TTopicPartitionsSet { + ui64 EachTopicPartitionGroupId; + ui64 DqPartitionsCount; + ui64 TopicPartitionsCount; + + bool Intersects(const TTopicPartitionsSet& other) const { + return DqPartitionsCount != other.DqPartitionsCount || EachTopicPartitionGroupId == other.EachTopicPartitionGroupId; + } +}; + +TMaybe<TTopicPartitionsSet> GetTopicPartitionsSet(const google::protobuf::Any& dqTaskMeta); + +} // namespace NYql::NPq diff --git a/ydb/library/yql/providers/pq/task_meta/ya.make b/ydb/library/yql/providers/pq/task_meta/ya.make index 02f3ca3c9e4..04955ee106a 100644 --- a/ydb/library/yql/providers/pq/task_meta/ya.make +++ b/ydb/library/yql/providers/pq/task_meta/ya.make @@ -1,20 +1,20 @@ -OWNER( - galaxycrab - g:yq - g:yql -) - -LIBRARY() - -SRCS( - task_meta.cpp -) - -PEERDIR( +OWNER( + galaxycrab + g:yq + g:yql +) + +LIBRARY() + +SRCS( + task_meta.cpp +) + +PEERDIR( ydb/library/yql/providers/dq/api/protos ydb/library/yql/providers/pq/proto -) - -YQL_LAST_ABI_VERSION() - -END() +) + +YQL_LAST_ABI_VERSION() + +END() diff --git a/ydb/library/yql/providers/pq/ya.make b/ydb/library/yql/providers/pq/ya.make index 840d37c7d53..120e92e6dcf 100644 --- a/ydb/library/yql/providers/pq/ya.make +++ b/ydb/library/yql/providers/pq/ya.make @@ -1,16 +1,16 @@ -OWNER( - galaxycrab - g:yq - g:yql -) - -RECURSE( +OWNER( + galaxycrab + g:yq + g:yql +) + +RECURSE( async_io - common - cm_client - expr_nodes - gateway - proto - provider - task_meta -) + common + cm_client + expr_nodes + gateway + proto + provider + task_meta +) diff --git a/ydb/library/yql/providers/s3/actors/yql_s3_read_actor.cpp b/ydb/library/yql/providers/s3/actors/yql_s3_read_actor.cpp index eb5410a24e3..7f6b1f6114e 100644 --- a/ydb/library/yql/providers/s3/actors/yql_s3_read_actor.cpp +++ b/ydb/library/yql/providers/s3/actors/yql_s3_read_actor.cpp @@ -123,8 +123,8 @@ using TPathList = std::vector<TPath>; static constexpr char ActorName[] = "S3_READ_ACTOR"; private: - void SaveState(const NDqProto::TCheckpoint&, NDqProto::TSourceState&) final {} - void LoadState(const NDqProto::TSourceState&) final {} + void SaveState(const NDqProto::TCheckpoint&, NDqProto::TSourceState&) final {} + void LoadState(const NDqProto::TSourceState&) final {} void CommitState(const NDqProto::TCheckpoint&) final {} ui64 GetInputIndex() const final { return InputIndex; } @@ -193,11 +193,11 @@ private: Callbacks->OnSourceError(InputIndex, result->Get()->Error, true); } - // IActor & IDqSourceActor - void PassAway() override { // Is called from Compute Actor - TActorBootstrapped<TS3ReadActor>::PassAway(); - } - + // IActor & IDqSourceActor + void PassAway() override { // Is called from Compute Actor + TActorBootstrapped<TS3ReadActor>::PassAway(); + } + static IHTTPGateway::THeaders MakeHeader(const TString& token) { return token.empty() ? IHTTPGateway::THeaders() : IHTTPGateway::THeaders{TString("X-YaCloud-SubjectToken:") += token}; } diff --git a/ydb/library/yql/providers/solomon/async_io/dq_solomon_write_actor.cpp b/ydb/library/yql/providers/solomon/async_io/dq_solomon_write_actor.cpp index fc07635176f..c30b0edfcc0 100644 --- a/ydb/library/yql/providers/solomon/async_io/dq_solomon_write_actor.cpp +++ b/ydb/library/yql/providers/solomon/async_io/dq_solomon_write_actor.cpp @@ -137,7 +137,7 @@ public: } }; - void LoadState(const NDqProto::TSinkState&) override { } + void LoadState(const NDqProto::TSinkState&) override { } void CommitState(const NDqProto::TCheckpoint&) override { } @@ -228,8 +228,8 @@ private: while (TryToSendNextBatch()) {} } - // IActor & IDqSinkActor - void PassAway() override { // Is called from Compute Actor + // IActor & IDqSinkActor + void PassAway() override { // Is called from Compute Actor for (const auto& [_, metricsInflight] : InflightBuffer) { Send(metricsInflight.HttpSenderId, new TEvents::TEvPoison()); } @@ -238,11 +238,11 @@ private: Send(HttpProxyId, new TEvents::TEvPoison()); } - TActor<TDqSolomonWriteActor>::PassAway(); + TActor<TDqSolomonWriteActor>::PassAway(); } private: - NDqProto::TSinkState BuildState() { return {}; } + NDqProto::TSinkState BuildState() { return {}; } TString GetUrl() const { TStringBuilder builder; @@ -410,7 +410,7 @@ private: } void DoCheckpoint() { - Callbacks->OnSinkStateSaved(BuildState(), OutputIndex, *CheckpointInProgress); + Callbacks->OnSinkStateSaved(BuildState(), OutputIndex, *CheckpointInProgress); CheckpointInProgress = std::nullopt; } diff --git a/ydb/library/yql/providers/solomon/async_io/ut/ya.make b/ydb/library/yql/providers/solomon/async_io/ut/ya.make index 110566a0027..e3726ca1e5e 100644 --- a/ydb/library/yql/providers/solomon/async_io/ut/ya.make +++ b/ydb/library/yql/providers/solomon/async_io/ut/ya.make @@ -2,11 +2,11 @@ UNITTEST_FOR(ydb/library/yql/providers/solomon/async_io) OWNER( d-mokhnatkin - g:yq + g:yq g:yql ) -INCLUDE(${ARCADIA_ROOT}/kikimr/yq/tools/solomon_emulator/recipe/recipe.inc) +INCLUDE(${ARCADIA_ROOT}/kikimr/yq/tools/solomon_emulator/recipe/recipe.inc) SRCS( dq_solomon_write_actor_ut.cpp diff --git a/ydb/library/yql/providers/solomon/async_io/ya.make b/ydb/library/yql/providers/solomon/async_io/ya.make index ea04c723e73..d577ade9907 100644 --- a/ydb/library/yql/providers/solomon/async_io/ya.make +++ b/ydb/library/yql/providers/solomon/async_io/ya.make @@ -1,7 +1,7 @@ LIBRARY() OWNER( - g:yq + g:yq g:yql ) diff --git a/ydb/library/yql/providers/solomon/proto/ya.make b/ydb/library/yql/providers/solomon/proto/ya.make index b30b1d5cfd0..70ea94efa3f 100644 --- a/ydb/library/yql/providers/solomon/proto/ya.make +++ b/ydb/library/yql/providers/solomon/proto/ya.make @@ -1,7 +1,7 @@ PROTO_LIBRARY() OWNER( - g:yq + g:yq g:yql ) diff --git a/ydb/library/yql/providers/solomon/provider/yql_solomon_physical_optimize.cpp b/ydb/library/yql/providers/solomon/provider/yql_solomon_physical_optimize.cpp index 226205cce20..c5336fc157e 100644 --- a/ydb/library/yql/providers/solomon/provider/yql_solomon_physical_optimize.cpp +++ b/ydb/library/yql/providers/solomon/provider/yql_solomon_physical_optimize.cpp @@ -64,8 +64,8 @@ public: #define HNDL(name) "PhysicalOptimizer-"#name, Hndl(&TSoPhysicalOptProposalTransformer::name) AddHandler(0, &TSoWriteToShard::Match, HNDL(SoWriteToShard)); #undef HNDL - - SetGlobal(0); // Stage 0 of this optimizer is global => we can remap nodes. + + SetGlobal(0); // Stage 0 of this optimizer is global => we can remap nodes. } TMaybeNode<TExprBase> SoWriteToShard(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx, const TGetParents& getParents) const { @@ -98,23 +98,23 @@ public: TDqStage inputStage = dqUnion.Output().Stage().Cast<TDqStage>(); - auto sinksBuilder = Build<TDqSinksList>(ctx, inputStage.Pos()); - if (inputStage.Sinks()) { - sinksBuilder.InitFrom(inputStage.Sinks().Cast()); - } - sinksBuilder.Add(dqSink); + auto sinksBuilder = Build<TDqSinksList>(ctx, inputStage.Pos()); + if (inputStage.Sinks()) { + sinksBuilder.InitFrom(inputStage.Sinks().Cast()); + } + sinksBuilder.Add(dqSink); - auto dqStageWithSink = Build<TDqStage>(ctx, inputStage.Pos()) - .InitFrom(inputStage) - .Sinks(sinksBuilder.Done()) - .Done(); + auto dqStageWithSink = Build<TDqStage>(ctx, inputStage.Pos()) + .InitFrom(inputStage) + .Sinks(sinksBuilder.Done()) + .Done(); auto dqQueryBuilder = Build<TDqQuery>(ctx, write.Pos()); dqQueryBuilder.World(write.World()); dqQueryBuilder.SinkStages().Add(dqStageWithSink).Build(); - - optCtx.RemapNode(inputStage.Ref(), dqStageWithSink.Ptr()); - + + optCtx.RemapNode(inputStage.Ref(), dqStageWithSink.Ptr()); + return dqQueryBuilder.Done(); } diff --git a/ydb/library/yql/providers/ydb/actors/yql_ydb_read_actor.cpp b/ydb/library/yql/providers/ydb/actors/yql_ydb_read_actor.cpp index 7ab55cfcfc9..c9bb0090a5e 100644 --- a/ydb/library/yql/providers/ydb/actors/yql_ydb_read_actor.cpp +++ b/ydb/library/yql/providers/ydb/actors/yql_ydb_read_actor.cpp @@ -106,8 +106,8 @@ public: static constexpr char ActorName[] = "YQL_YDB_READ_ACTOR"; private: - void SaveState(const NDqProto::TCheckpoint&, NDqProto::TSourceState&) final {} - void LoadState(const NDqProto::TSourceState&) final {} + void SaveState(const NDqProto::TCheckpoint&, NDqProto::TSourceState&) final {} + void LoadState(const NDqProto::TSourceState&) final {} void CommitState(const NDqProto::TCheckpoint&) final {} ui64 GetInputIndex() const final { return InputIndex; } @@ -120,10 +120,10 @@ private: SendRequest(); } - // IActor & IDqSourceActor - void PassAway() override { // Is called from Compute Actor + // IActor & IDqSourceActor + void PassAway() override { // Is called from Compute Actor RequestsDone = true; - TActorBootstrapped<TYdbReadActor>::PassAway(); + TActorBootstrapped<TYdbReadActor>::PassAway(); } i64 GetSourceData(NKikimr::NMiniKQL::TUnboxedValueVector& buffer, bool& finished, i64 freeSpace) final { diff --git a/ydb/library/yql/public/issue/yql_issue.cpp b/ydb/library/yql/public/issue/yql_issue.cpp index 3b89156874b..378792b0424 100644 --- a/ydb/library/yql/public/issue/yql_issue.cpp +++ b/ydb/library/yql/public/issue/yql_issue.cpp @@ -5,10 +5,10 @@ #include <library/cpp/colorizer/output.h> -#include <util/string/ascii.h> +#include <util/string/ascii.h> #include <util/string/split.h> #include <util/string/strip.h> -#include <util/string/subst.h> +#include <util/string/subst.h> #include <util/system/compiler.h> #include <util/generic/map.h> #include <util/generic/stack.h> @@ -52,53 +52,53 @@ TTextWalker& TTextWalker::Advance(char c) { return *this; } -void TIssue::PrintTo(IOutputStream& out, bool oneLine) const { - out << Range() << ": " << SeverityToString(GetSeverity()) << ": "; - if (oneLine) { - TString message = StripString(Message); - SubstGlobal(message, '\n', ' '); - out << message; - } else { - out << Message; - } - if (GetCode()) { - out << ", code: " << GetCode(); - } -} - -void WalkThroughIssues(const TIssue& topIssue, bool leafOnly, std::function<void(const TIssue&, ui16 level)> fn, std::function<void(const TIssue&, ui16 level)> afterChildrenFn) { - enum class EFnType { - Main, - AfterChildren, - }; - - const bool hasAfterChildrenFn = bool(afterChildrenFn); - TStack<std::tuple<ui16, const TIssue*, EFnType>> issuesStack; - if (hasAfterChildrenFn) { - issuesStack.push(std::make_tuple(0, &topIssue, EFnType::AfterChildren)); - } - issuesStack.push(std::make_tuple(0, &topIssue, EFnType::Main)); +void TIssue::PrintTo(IOutputStream& out, bool oneLine) const { + out << Range() << ": " << SeverityToString(GetSeverity()) << ": "; + if (oneLine) { + TString message = StripString(Message); + SubstGlobal(message, '\n', ' '); + out << message; + } else { + out << Message; + } + if (GetCode()) { + out << ", code: " << GetCode(); + } +} + +void WalkThroughIssues(const TIssue& topIssue, bool leafOnly, std::function<void(const TIssue&, ui16 level)> fn, std::function<void(const TIssue&, ui16 level)> afterChildrenFn) { + enum class EFnType { + Main, + AfterChildren, + }; + + const bool hasAfterChildrenFn = bool(afterChildrenFn); + TStack<std::tuple<ui16, const TIssue*, EFnType>> issuesStack; + if (hasAfterChildrenFn) { + issuesStack.push(std::make_tuple(0, &topIssue, EFnType::AfterChildren)); + } + issuesStack.push(std::make_tuple(0, &topIssue, EFnType::Main)); while (!issuesStack.empty()) { - auto level = std::get<0>(issuesStack.top()); - const auto& curIssue = *std::get<1>(issuesStack.top()); - const EFnType fnType = std::get<2>(issuesStack.top()); + auto level = std::get<0>(issuesStack.top()); + const auto& curIssue = *std::get<1>(issuesStack.top()); + const EFnType fnType = std::get<2>(issuesStack.top()); issuesStack.pop(); if (!leafOnly || curIssue.GetSubIssues().empty()) { - if (fnType == EFnType::Main) { - fn(curIssue, level); - } else { - afterChildrenFn(curIssue, level); - } + if (fnType == EFnType::Main) { + fn(curIssue, level); + } else { + afterChildrenFn(curIssue, level); + } } - if (fnType == EFnType::Main) { - level++; - const auto& subIssues = curIssue.GetSubIssues(); - for (int i = subIssues.size() - 1; i >= 0; i--) { - if (hasAfterChildrenFn) { - issuesStack.push(std::make_tuple(level, subIssues[i].Get(), EFnType::AfterChildren)); - } - issuesStack.push(std::make_tuple(level, subIssues[i].Get(), EFnType::Main)); - } + if (fnType == EFnType::Main) { + level++; + const auto& subIssues = curIssue.GetSubIssues(); + for (int i = subIssues.size() - 1; i >= 0; i--) { + if (hasAfterChildrenFn) { + issuesStack.push(std::make_tuple(level, subIssues[i].Get(), EFnType::AfterChildren)); + } + issuesStack.push(std::make_tuple(level, subIssues[i].Get(), EFnType::Main)); + } } } } @@ -143,38 +143,38 @@ void ProgramLinesWithErrors( } // namspace -void TIssues::PrintTo(IOutputStream& out, bool oneLine) const +void TIssues::PrintTo(IOutputStream& out, bool oneLine) const { - if (oneLine) { - bool printWithSpace = false; - if (Issues_.size() > 1) { - printWithSpace = true; - out << "["; - } - for (const auto& topIssue: Issues_) { - WalkThroughIssues(topIssue, false, [&](const TIssue& issue, ui16 level) { - if (level > 0) { - out << " subissue: { "; - } else { - out << (printWithSpace ? " { " : "{ "); - } - issue.PrintTo(out, true); - }, - [&](const TIssue&, ui16) { - out << " }"; - }); - } - if (Issues_.size() > 1) { - out << " ]"; - } - } else { - for (const auto& topIssue: Issues_) { - WalkThroughIssues(topIssue, false, [&](const TIssue& issue, ui16 level) { - auto shift = level * 4; - Indent(out, shift); - out << issue << Endl; - }); - } + if (oneLine) { + bool printWithSpace = false; + if (Issues_.size() > 1) { + printWithSpace = true; + out << "["; + } + for (const auto& topIssue: Issues_) { + WalkThroughIssues(topIssue, false, [&](const TIssue& issue, ui16 level) { + if (level > 0) { + out << " subissue: { "; + } else { + out << (printWithSpace ? " { " : "{ "); + } + issue.PrintTo(out, true); + }, + [&](const TIssue&, ui16) { + out << " }"; + }); + } + if (Issues_.size() > 1) { + out << " ]"; + } + } else { + for (const auto& topIssue: Issues_) { + WalkThroughIssues(topIssue, false, [&](const TIssue& issue, ui16 level) { + auto shift = level * 4; + Indent(out, shift); + out << issue << Endl; + }); + } } } @@ -283,5 +283,5 @@ void Out<NYql::TRange>(IOutputStream & out, const NYql::TRange & range) { template <> void Out<NYql::TIssue>(IOutputStream& out, const NYql::TIssue& error) { - error.PrintTo(out); + error.PrintTo(out); } diff --git a/ydb/library/yql/public/issue/yql_issue.h b/ydb/library/yql/public/issue/yql_issue.h index 7b97674c9df..8b44777e578 100644 --- a/ydb/library/yql/public/issue/yql_issue.h +++ b/ydb/library/yql/public/issue/yql_issue.h @@ -186,17 +186,17 @@ public: const TVector<TIntrusivePtr<TIssue>>& GetSubIssues() const { return Children_; } - - void PrintTo(IOutputStream& out, bool oneLine = false) const; - - TString ToString(bool oneLine = false) const { - TStringStream out; - PrintTo(out, oneLine); - return out.Str(); - } + + void PrintTo(IOutputStream& out, bool oneLine = false) const; + + TString ToString(bool oneLine = false) const { + TStringStream out; + PrintTo(out, oneLine); + return out.Str(); + } }; -void WalkThroughIssues(const TIssue& topIssue, bool leafOnly, std::function<void(const TIssue&, ui16 level)> fn, std::function<void(const TIssue&, ui16 level)> afterChildrenFn = {}); +void WalkThroughIssues(const TIssue& topIssue, bool leafOnly, std::function<void(const TIssue&, ui16 level)> fn, std::function<void(const TIssue&, ui16 level)> afterChildrenFn = {}); /////////////////////////////////////////////////////////////////////////////// // TIssues @@ -278,22 +278,22 @@ public: return Issues_.size(); } - void PrintTo(IOutputStream& out, bool oneLine = false) const; + void PrintTo(IOutputStream& out, bool oneLine = false) const; void PrintWithProgramTo( IOutputStream& out, const TString& programFilename, const TString& programText) const; - inline TString ToString(bool oneLine = false) const { + inline TString ToString(bool oneLine = false) const { TStringStream out; - PrintTo(out, oneLine); + PrintTo(out, oneLine); return out.Str(); } - TString ToOneLineString() const { - return ToString(true); - } - + TString ToOneLineString() const { + return ToString(true); + } + inline void Clear() { Issues_.clear(); } diff --git a/ydb/library/yql/public/issue/yql_issue_ut.cpp b/ydb/library/yql/public/issue/yql_issue_ut.cpp index 87b417da391..30e4c3977a6 100644 --- a/ydb/library/yql/public/issue/yql_issue_ut.cpp +++ b/ydb/library/yql/public/issue/yql_issue_ut.cpp @@ -109,31 +109,31 @@ Y_UNIT_TEST_SUITE(TextWalkerTest) { UNIT_ASSERT_VALUES_EQUAL(pos, TPosition(1, 3)); } } - -Y_UNIT_TEST_SUITE(ToOneLineStringTest) { - Y_UNIT_TEST(OneMessageTest) { - TIssues issues; - issues.AddIssue(TPosition(12, 34, "file.abc"), "error"); - UNIT_ASSERT_STRINGS_EQUAL(issues.ToOneLineString(), "{ file.abc:34:12: Error: error }"); - } - - Y_UNIT_TEST(SubIssuesTest) { - TIssue issue(TPosition(12, 34, "file.abc"), "error"); - TIssue subissue("suberror"); - subissue.AddSubIssue(MakeIntrusive<TIssue>("subsuberror")); - issue.AddSubIssue(MakeIntrusive<TIssue>(subissue)); - - TIssues issues; - issues.AddIssue(issue); - UNIT_ASSERT_STRINGS_EQUAL(issues.ToOneLineString(), "{ file.abc:34:12: Error: error subissue: { <main>: Error: suberror subissue: { <main>: Error: subsuberror } } }"); - } - - Y_UNIT_TEST(ManyIssuesTest) { - TIssue issue(TPosition(12, 34, "file.abc"), "error\n"); - issue.AddSubIssue(MakeIntrusive<TIssue>("suberror")); - TIssues issues; - issues.AddIssue(issue); - issues.AddIssue(TPosition(100, 2, "abc.file"), "my\nmessage"); - UNIT_ASSERT_STRINGS_EQUAL(issues.ToOneLineString(), "[ { file.abc:34:12: Error: error subissue: { <main>: Error: suberror } } { abc.file:2:100: Error: my message } ]"); - } -} + +Y_UNIT_TEST_SUITE(ToOneLineStringTest) { + Y_UNIT_TEST(OneMessageTest) { + TIssues issues; + issues.AddIssue(TPosition(12, 34, "file.abc"), "error"); + UNIT_ASSERT_STRINGS_EQUAL(issues.ToOneLineString(), "{ file.abc:34:12: Error: error }"); + } + + Y_UNIT_TEST(SubIssuesTest) { + TIssue issue(TPosition(12, 34, "file.abc"), "error"); + TIssue subissue("suberror"); + subissue.AddSubIssue(MakeIntrusive<TIssue>("subsuberror")); + issue.AddSubIssue(MakeIntrusive<TIssue>(subissue)); + + TIssues issues; + issues.AddIssue(issue); + UNIT_ASSERT_STRINGS_EQUAL(issues.ToOneLineString(), "{ file.abc:34:12: Error: error subissue: { <main>: Error: suberror subissue: { <main>: Error: subsuberror } } }"); + } + + Y_UNIT_TEST(ManyIssuesTest) { + TIssue issue(TPosition(12, 34, "file.abc"), "error\n"); + issue.AddSubIssue(MakeIntrusive<TIssue>("suberror")); + TIssues issues; + issues.AddIssue(issue); + issues.AddIssue(TPosition(100, 2, "abc.file"), "my\nmessage"); + UNIT_ASSERT_STRINGS_EQUAL(issues.ToOneLineString(), "[ { file.abc:34:12: Error: error subissue: { <main>: Error: suberror } } { abc.file:2:100: Error: my message } ]"); + } +} diff --git a/ydb/library/yql/utils/actors/rich_actor.h b/ydb/library/yql/utils/actors/rich_actor.h index a0f2ddd94b9..513a16506a4 100644 --- a/ydb/library/yql/utils/actors/rich_actor.h +++ b/ydb/library/yql/utils/actors/rich_actor.h @@ -14,10 +14,10 @@ public: : NActors::TActor<TDerived>(func) { } - ~TRichActor() { - ForgetChildren(); - } - + ~TRichActor() { + ForgetChildren(); + } + virtual void DoPassAway() { } void PassAway() final { @@ -37,13 +37,13 @@ public: Children.clear(); } - void ForgetChildren() { // Free memory - for (auto&& [child, killEvent] : Children) { - delete killEvent; - } - Children.clear(); - } - + void ForgetChildren() { // Free memory + for (auto&& [child, killEvent] : Children) { + delete killEvent; + } + Children.clear(); + } + void UnsubscribeAll() { auto copy = Subscriptions; for (auto id : copy) { @@ -81,10 +81,10 @@ public: } } - void AddChild(NActors::TActorId id, NActors::IEventBase* killEvent = nullptr) { - Children.insert(std::make_pair(id, killEvent)); - } - + void AddChild(NActors::TActorId id, NActors::IEventBase* killEvent = nullptr) { + Children.insert(std::make_pair(id, killEvent)); + } + void Subscribe(ui32 nodeId) { Subscriptions.insert(nodeId); } diff --git a/ydb/library/yql/utils/log/log_component.h b/ydb/library/yql/utils/log/log_component.h index d7a1e10ca16..94c1003de71 100644 --- a/ydb/library/yql/utils/log/log_component.h +++ b/ydb/library/yql/utils/log/log_component.h @@ -29,7 +29,7 @@ enum class EComponent { ProviderDq, ProviderClickHouse, ProviderYdb, - ProviderPq, + ProviderPq, ProviderS3, CoreDq, // <--- put other log components here @@ -70,7 +70,7 @@ struct EComponentHelpers { case EComponent::ProviderDq: return TStringBuf("DQ"); case EComponent::ProviderClickHouse: return TStringBuf("CLICKHOUSE"); case EComponent::ProviderYdb: return TStringBuf("YDB"); - case EComponent::ProviderPq: return TStringBuf("PQ"); + case EComponent::ProviderPq: return TStringBuf("PQ"); case EComponent::ProviderS3: return TStringBuf("S3"); case EComponent::CoreDq: return TStringBuf("core dq"); default: @@ -100,7 +100,7 @@ struct EComponentHelpers { if (str == TStringBuf("DQ")) return EComponent::ProviderDq; if (str == TStringBuf("CLICKHOUSE")) return EComponent::ProviderClickHouse; if (str == TStringBuf("YDB")) return EComponent::ProviderYdb; - if (str == TStringBuf("PQ")) return EComponent::ProviderPq; + if (str == TStringBuf("PQ")) return EComponent::ProviderPq; if (str == TStringBuf("S3")) return EComponent::ProviderS3; if (str == TStringBuf("core dq")) return EComponent::CoreDq; ythrow yexception() << "unknown log component: '" << str << '\''; diff --git a/ydb/public/api/grpc/draft/ydb_persqueue_v1.proto b/ydb/public/api/grpc/draft/ydb_persqueue_v1.proto index 873ee38b3cf..587ffc5ab7a 100644 --- a/ydb/public/api/grpc/draft/ydb_persqueue_v1.proto +++ b/ydb/public/api/grpc/draft/ydb_persqueue_v1.proto @@ -67,7 +67,7 @@ service PersQueueService { * issue(description, ...) * <---------------- */ - rpc MigrationStreamingRead(stream MigrationStreamingReadClientMessage) returns (stream MigrationStreamingReadServerMessage); + rpc MigrationStreamingRead(stream MigrationStreamingReadClientMessage) returns (stream MigrationStreamingReadServerMessage); // Get information about reading rpc GetReadSessionsInfo(ReadInfoRequest) returns (ReadInfoResponse); diff --git a/ydb/public/api/grpc/ya.make b/ydb/public/api/grpc/ya.make index 24173a0264d..5271d04bac8 100644 --- a/ydb/public/api/grpc/ya.make +++ b/ydb/public/api/grpc/ya.make @@ -20,7 +20,7 @@ SRCS( ydb_monitoring_v1.proto ydb_operation_v1.proto ydb_cms_v1.proto - ydb_rate_limiter_v1.proto + ydb_rate_limiter_v1.proto ydb_scheme_v1.proto ydb_scripting_v1.proto ydb_table_v1.proto diff --git a/ydb/public/api/grpc/ydb_rate_limiter_v1.proto b/ydb/public/api/grpc/ydb_rate_limiter_v1.proto index be477d1dd59..b9641ed555d 100644 --- a/ydb/public/api/grpc/ydb_rate_limiter_v1.proto +++ b/ydb/public/api/grpc/ydb_rate_limiter_v1.proto @@ -1,35 +1,35 @@ -syntax = "proto3"; - -package Ydb.RateLimiter.V1; - -option java_package = "com.yandex.ydb.rate_limiter.v1"; -option java_outer_classname = "RateLimiterGrpc"; -option java_multiple_files = true; - +syntax = "proto3"; + +package Ydb.RateLimiter.V1; + +option java_package = "com.yandex.ydb.rate_limiter.v1"; +option java_outer_classname = "RateLimiterGrpc"; +option java_multiple_files = true; + import "ydb/public/api/protos/ydb_rate_limiter.proto"; - -// Service that implements distributed rate limiting. -// -// To use rate limiter functionality you need an existing coordination node. - -service RateLimiterService { - // Control plane API - - // Create a new resource in existing coordination node. - rpc CreateResource(CreateResourceRequest) returns (CreateResourceResponse); - - // Update a resource in coordination node. - rpc AlterResource(AlterResourceRequest) returns (AlterResourceResponse); - - // Delete a resource from coordination node. - rpc DropResource(DropResourceRequest) returns (DropResourceResponse); - - // List resources in given coordination node. - rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse); - - // Describe properties of resource in coordination node. - rpc DescribeResource(DescribeResourceRequest) returns (DescribeResourceResponse); + +// Service that implements distributed rate limiting. +// +// To use rate limiter functionality you need an existing coordination node. + +service RateLimiterService { + // Control plane API + + // Create a new resource in existing coordination node. + rpc CreateResource(CreateResourceRequest) returns (CreateResourceResponse); + + // Update a resource in coordination node. + rpc AlterResource(AlterResourceRequest) returns (AlterResourceResponse); + + // Delete a resource from coordination node. + rpc DropResource(DropResourceRequest) returns (DropResourceResponse); + + // List resources in given coordination node. + rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse); + + // Describe properties of resource in coordination node. + rpc DescribeResource(DescribeResourceRequest) returns (DescribeResourceResponse); // Take units for usage of a resource in coordination node. rpc AcquireResource(AcquireResourceRequest) returns (AcquireResourceResponse); -} +} diff --git a/ydb/public/api/protos/draft/yq_private.proto b/ydb/public/api/protos/draft/yq_private.proto index 2af1c2f3edc..3e47cd4a93e 100644 --- a/ydb/public/api/protos/draft/yq_private.proto +++ b/ydb/public/api/protos/draft/yq_private.proto @@ -25,17 +25,17 @@ message SignedIdentity { string signature = 2; } -message TopicConsumer { - string database_id = 1; - string database = 2; - string topic_path = 3; - string consumer_name = 4; - string cluster_endpoint = 5; - bool use_ssl = 6; - string token_name = 7; - bool add_bearer_to_token = 8; -} - +message TopicConsumer { + string database_id = 1; + string database = 2; + string topic_path = 3; + string consumer_name = 4; + string cluster_endpoint = 5; + bool use_ssl = 6; + string token_name = 7; + bool add_bearer_to_token = 8; +} + message GetTaskResult { message Task { // come back later in 10 sec ? @@ -67,7 +67,7 @@ message GetTaskResult { bool automatic = 22; string query_name = 23; google.protobuf.Timestamp deadline = 24; - YandexQuery.StreamingDisposition disposition = 25; + YandexQuery.StreamingDisposition disposition = 25; } repeated Task tasks = 1; } @@ -93,9 +93,9 @@ message PingTaskRequest { string ast = 12; string plan = 13; bool resign_query = 14; - repeated TopicConsumer created_topic_consumers = 17; - YandexQuery.StateLoadMode state_load_mode = 18; - YandexQuery.StreamingDisposition disposition = 19; + repeated TopicConsumer created_topic_consumers = 17; + YandexQuery.StateLoadMode state_load_mode = 18; + YandexQuery.StreamingDisposition disposition = 19; Ydb.Operations.OperationParams operation_params = 15; string scope = 100; //TODO remove google.protobuf.Timestamp started_at = 101; diff --git a/ydb/public/api/protos/ya.make b/ydb/public/api/protos/ya.make index 8a156403a3d..56fe0b7759a 100644 --- a/ydb/public/api/protos/ya.make +++ b/ydb/public/api/protos/ya.make @@ -37,7 +37,7 @@ SRCS( ydb_monitoring.proto ydb_operation.proto ydb_query_stats.proto - ydb_rate_limiter.proto + ydb_rate_limiter.proto ydb_scheme.proto ydb_scripting.proto ydb_status_codes.proto diff --git a/ydb/public/api/protos/ydb_coordination.proto b/ydb/public/api/protos/ydb_coordination.proto index 378045e78e8..e615dbacfa0 100644 --- a/ydb/public/api/protos/ydb_coordination.proto +++ b/ydb/public/api/protos/ydb_coordination.proto @@ -34,20 +34,20 @@ enum ConsistencyMode { } /** - * Counters mode - */ -enum RateLimiterCountersMode { - // The default or current value - RATE_LIMITER_COUNTERS_MODE_UNSET = 0; - - // Aggregated counters for resource tree - RATE_LIMITER_COUNTERS_MODE_AGGREGATED = 1; - - // Counters on every resource - RATE_LIMITER_COUNTERS_MODE_DETAILED = 2; -} - -/** + * Counters mode + */ +enum RateLimiterCountersMode { + // The default or current value + RATE_LIMITER_COUNTERS_MODE_UNSET = 0; + + // Aggregated counters for resource tree + RATE_LIMITER_COUNTERS_MODE_AGGREGATED = 1; + + // Counters on every resource + RATE_LIMITER_COUNTERS_MODE_DETAILED = 2; +} + +/** * Configuration settings for a coordination node */ message Config { @@ -65,9 +65,9 @@ message Config { // Consistency mode for attach operations ConsistencyMode attach_consistency_mode = 5; - - // Rate limiter counters mode - RateLimiterCountersMode rate_limiter_counters_mode = 6; + + // Rate limiter counters mode + RateLimiterCountersMode rate_limiter_counters_mode = 6; } /** diff --git a/ydb/public/api/protos/ydb_persqueue_v1.proto b/ydb/public/api/protos/ydb_persqueue_v1.proto index 93a7fb6c79d..f3bc911d662 100644 --- a/ydb/public/api/protos/ydb_persqueue_v1.proto +++ b/ydb/public/api/protos/ydb_persqueue_v1.proto @@ -16,14 +16,14 @@ option cc_enable_arenas = true; // packed encoding ('message' types can't be packed encoded). In non-repeated fields we use 'ms' for consistency. // * Any message with non-empty 'issues' property leads to streaming RPC termination. -enum Codec { - CODEC_UNSPECIFIED = 0; - CODEC_RAW = 1; - CODEC_GZIP = 2; - CODEC_LZOP = 3; - CODEC_ZSTD = 4; -} - +enum Codec { + CODEC_UNSPECIFIED = 0; + CODEC_RAW = 1; + CODEC_GZIP = 2; + CODEC_LZOP = 3; + CODEC_ZSTD = 4; +} + message SessionMetaValue { map<string, string> value = 1; } @@ -95,7 +95,7 @@ message StreamingWriteClientMessage { // If message is bigger than max_block_size - it will be transferred as SIZE/max_block_size blocks. All of // this blocks will be with block_count = 0 but not the last one - last one's block_count will be greater than 0; // Blocks can be reordered upto max_flush_window_size of uncompressed data. - // Each block contains concatenated client messages, compressed by chosen codec. + // Each block contains concatenated client messages, compressed by chosen codec. // If there is not full client message inside block, then all block contains only this part of message. // blocks: A A A B B B BCDE // offset: 1 1 1 2 2 2 2 @@ -152,8 +152,8 @@ message StreamingWriteServerMessage { // Block (see StreamingWriteClientMessage.WriteRequest.blocks_data) format version supported by server or configured for a topic. Client must write data only with them. int64 block_format_version = 6; - // Client can only use compression codecs from this set to write messages to topic, session will be closed with BAD_REQUEST otherwise. - repeated Codec supported_codecs = 10; + // Client can only use compression codecs from this set to write messages to topic, session will be closed with BAD_REQUEST otherwise. + repeated Codec supported_codecs = 10; // Maximal flush window size choosed by server. Size of uncompressed data not sended to server must not be bigger than flush window size. // In other words, this is maximal size of gap inside uncompressed data, which is not sended to server yet. @@ -235,428 +235,428 @@ message CommitOffsetRange { uint64 end_offset = 3; } -// TODO: replace with it actual protocol client message -/** - * Request for read session. Contains one of: - * InitRequest - handshake request. - * ReadRequest - request for data. - * CommitRequest - request for commit of some read data. - * CreatePartitionStreamResponse - signal for server that client is ready to get data from partition. - * DestroyPartitionStreamResponse - signal for server that client finished working with partition. Must be sent only after corresponding Release request from server. - * StopReadRequest - signal for server that client is not ready to get more data from this partition. - * ResumeReadRequest - signal for server that client is ready to get more data from this partition. - */ -message StreamingReadClientMessageNew { - oneof client_message { - InitRequest init_request = 1; - ReadRequest read_request = 2; - CreatePartitionStreamResponse create_partition_stream_response = 3; - CommitRequest commit_request = 4; - DestroyPartitionStreamResponse destroy_partition_stream_response = 5; - StopReadRequest stop_read_request = 6; - ResumeReadRequest resume_read_request = 7; - PartitionStreamStatusRequest partition_stream_status_request = 8; - AddTopicRequest add_topic_request = 9; - RemoveTopicRequest remove_topic_request = 10; - } - - // User credentials if update is needed or empty string. - string token = 20; - - // Handshake request. - message InitRequest { - // Message that describes topic to read. - // Topics that will be read by this session. - repeated TopicReadSettings topics_read_settings = 1; - // Flag that indicates reading only of original topics in cluster or all including mirrored. - bool read_only_original = 2; - // Path of consumer that is used for reading by this session. - string consumer = 3; - - // Skip all messages that has write timestamp smaller than now - max_time_lag_ms. - int64 max_lag_duration_ms = 4; - // Read data only after this timestamp from all topics. - int64 start_from_written_at_ms = 5; - - // Maximum block format version supported by the client. Server will asses this parameter and return actual data blocks version in - // StreamingReadServerMessage.InitResponse.block_format_version_by_topic (and StreamingReadServerMessage.AddTopicResponse.block_format_version) - // or error if client will not be able to read data. - int64 max_supported_block_format_version = 6; - - // Maximal size of client cache for message_group_id, ip and meta, per partition. - // There is separate caches for each partition partition streams. - // There is separate caches for message group identifiers, ip and meta inside one partition partition stream. - int64 max_meta_cache_size = 10; - - // State of client read session. Could be provided to server for retries. - message State { - message PartitionStreamState { - enum Status { - // Not used state. - STATUS_UNSPECIFIED = 0; - // Client seen Create message but not yet responded to server with Created message. - CREATING = 1; - // Client seen Destroy message but not yet responded with Released message. - DESTROYING = 2; - // Client sent Created or ResumeReadRequest message to server for this partition stream. - READING = 3; - // Client sent StopReadRequest for this partition stream. - STOPPED = 4; - } - // Partition partition stream. - PartitionStream partition_stream = 1; - // Current read offset if has one. Actual for states DESTROYING, READING and STOPPED. - int64 read_offset = 2; - // Ranges of committed by client offsets. - repeated OffsetsRange offset_ranges = 3; - // Status of partition stream. - Status status = 4; - } - repeated PartitionStreamState partition_streams_states = 1; - } - - // Session identifier for retries. Must be the same as session_id from Inited server response. If this is first connect, not retry - do not use this field. - string session_id = 100; - // 0 for first init message and incremental value for connect retries. - int64 connection_attempt = 101; - // Formed state for retries. If not retry - do not use this field. - State state = 102; - - int64 idle_timeout_ms = 200; - } - - // TODO: add topics/groups and remove them from reading - - // Message that represents client readiness for receiving more data. - message ReadRequest { - // Client acquired this amount of free bytes more for buffer. Server can send more data at most of this uncompressed size. - // Subsequent messages with 5 and 10 request_uncompressed_size are treated by server that it can send messages for at most 15 bytes. - int64 request_uncompressed_size = 1; - } - - // Signal for server that cient is ready to recive data for partition. - message CreatePartitionStreamResponse { - // Partition stream identifier of partition to start read. - int64 partition_stream_id = 1; - - // Start reading from partition offset that is not less than read_offset. - // Init.max_time_lag_ms and Init.read_timestamp_ms could lead to skip of more messages. - // The same with actual committed offset. Regardless of set read_offset server will return data from maximal offset from read_offset, actual committed offset - // and offsets calculated from Init.max_time_lag_ms and Init.read_timestamp_ms. - int64 read_offset = 2; - // All messages with offset less than commit_offset are processed by client. Server will commit this position if this is not done yet. - int64 commit_offset = 3; - - // This option will enable sanity check on server for read_offset. Server will verify that read_offset is no less that actual committed offset. - // If verification will fail then server will kill this read session and client will find out error in reading logic. - // If client is not setting read_offset, sanity check will fail so do not set verify_read_offset if you not setting correct read_offset. - bool verify_read_offset = 4; - - } - - // Signal for server that client finished working with this partition. Must be sent only after corresponding Release request from server. - // Server will give this partition to other read session only after Released signal. - message DestroyPartitionStreamResponse { - // Partition stream identifier of partition partition stream that is released by client. - int64 partition_stream_id = 1; - } - - // Signal for server that client is not ready to recieve more data from this partition. - message StopReadRequest { - repeated int64 partition_stream_ids = 1; - } - - // Signal for server that client is ready to receive more data from this partition. - message ResumeReadRequest { - repeated int64 partition_stream_ids = 1; - - // Offset to start reading - may be smaller than known one in case of dropping of read-ahead in client lib. - repeated int64 read_offsets = 2; - - // Cookie for matching data from PartitionStream after resuming. Must be greater than zero. - repeated int64 resume_cookies = 3; - } - - // Signal for server that client processed some read data. - message CommitRequest { - // Partition offsets that indicates processed data. - repeated PartitionCommit commits = 1; - } - - message PartitionStreamStatusRequest { - int64 partition_stream_id = 1; - } - - // Add topic to current read session - message AddTopicRequest { - TopicReadSettings topic_read_settings = 1; - } - - // Remove topic from current read session - message RemoveTopicRequest { - string topic = 1; - } - - message TopicReadSettings { - // Topic path. - string topic = 1; - // Partition groups that will be read by this session. - // If list is empty - then session will read all partition groups. - repeated int64 partition_group_ids = 2; - // Read data only after this timestamp from this topic. - int64 start_from_written_at_ms = 3; - } - - /** - * Message that is used for describing commit. - */ - message PartitionCommit { - // Identifier of partition stream with data to commit. - int64 partition_stream_id = 1; - // Processed ranges. - repeated OffsetsRange offsets = 2; - } -} - -// TODO: replace with it actual protocol server message +// TODO: replace with it actual protocol client message /** - * Response for read session. Contains one of : - * InitResponse - handshake response from server. - * BatchReadResponse - portion of data. - * CommitResponse - acknowledgment for commit. - * CreatePartitionStreamRequest - command from server to create a partition partition stream. - * DestroyPartitionStreamRequest - command from server to destroy a partition partition stream. - */ -message StreamingReadServerMessageNew { - oneof server_message { - InitResponse init_response = 3; - BatchReadResponse batch_read_response = 4; - CreatePartitionStreamRequest create_partition_stream_request = 5; - DestroyPartitionStreamRequest destroy_partition_stream_request = 6; - CommitResponse commit_response = 7; - PartitionStreamStatusResponse partition_stream_status_response = 8; - StopReadResponse stop_read_response = 9; - ResumeReadResponse resume_read_response = 10; - AddTopicResponse add_topic_response = 11; - RemoveTopicResponse remove_topic_response = 12; - } - - Ydb.StatusIds.StatusCode status = 1; - - repeated Ydb.Issue.IssueMessage issues = 2; - - // Handshake response. - message InitResponse { - // Read session identifier for debug purposes. - string session_id = 1; - // Block format version of data client will receive from topics. - map<string, int64> block_format_version_by_topic = 2; - - // Choosed maximan cache size by server. - // Client must use cache of this size. Could change on retries - reduce size of cache in this case. - int64 max_meta_cache_size = 10; - } - - // Command to create a partition partition stream. - // Client must react on this signal by sending StartRead when ready recieve data from this partition. - message CreatePartitionStreamRequest { - // Partition partition stream description. - PartitionStream partition_stream = 1; - - // Actual committed offset. - int64 committed_offset = 2; - // Offset of first not existing message in partition till now. - int64 end_offset = 3; - - } - - // Command to destroy concrete partition stream. - message DestroyPartitionStreamRequest { - // Identifier of partition partition stream that is ready to be closed by server. - int64 partition_stream_id = 1; - - // Flag of gracefull or not destroy. - // If True then server is waiting for Destroyed signal from client before giving of this partition for other read session. - // Server will not send more data from this partition. - // Client can process all received data and wait for commit and only after send Destroyed signal. - // If False then server gives partition for other session right now. - // All futher commits for this PartitionStream has no effect. Server is not waiting for Destroyed signal. - bool graceful = 2; - - // Last known committed offset. - int64 committed_offset = 3; - } - - // Acknowledgement for commits. - message CommitResponse { - // Per-partition commit representation. - message PartitionCommittedOffset { - // Partition partition stream identifier. - int64 partition_stream_id = 1; - // Last committed offset. - int64 committed_offset = 2; - } - // Partitions with progress. - repeated PartitionCommittedOffset partitions_committed_offsets = 1; - } - - // Readed data. - message BatchReadResponse { - // One client message representation. - // Client lib must send commit right now for all skipped data (from it's read offset till first offset in range). - message PartitionData { - // Data inside this message is from partition stream with this identifier. - int64 partition_stream_id = 1; - - // Offsets in partition that assigned for messages. - // Unique value for clientside deduplication - (topic, cluster, partition_id, offset). - repeated int64 offsets = 2; - // Sequence numbers that provided with messages on write from client. - // Same size as offsets. - // Unique value for clientside deduplication - (topic, cluster, message_group_id, sequence_number). - repeated int64 sequence_numbers = 3; - // Timestamps of creation of messages provided on write from client. - // Same size as offsets. - repeated int64 created_at_ms = 4; - // Timestamps of writing in partition for client messages. - // Same size as offsets. - repeated int64 written_at_ms = 5; - - // New messageGroupIds for updating cache. - // Size of vector is the same as number of negative values in message_group_id_indexes. - repeated string message_group_ids = 6; - // Indexes of messageGroupIds. - // same size as offsets. - // Negative values (-X) means - put first not used messageGroupId from message_group_ids on index X in cache and use it for this client message. - // Positive values (X) means -use element by index X from cache for this client message. Do not change state of cache. - // Assumptions: - // - Server will use positive values only for proposed before indexes. - // - Each value is from 1 to max_meta_cache_size by abs. - // - Do not make assumptions about choosing algorihm. - // - There is separate caches of size max_meta_cache_size for different partition and different metadata fileds - message_group_id, ip and session_meta_data. - // - Number of negative values in message_group_id_indexes vector is the same as length of message_group_ids vector. - // Example: - // max_meta_cache_size : 2 - // Cache indexes : 1 2 - // Cache state before processing : s0,? // ? means not set yet. - // - // message_group_ids : s1 s2 s3 s1 - // message_group_id_indexes : -1 -2 1 2 1 1 -1 2 -2 - // cache state : s1,? s1,s2 s1,s2 s1,s2 s1,s2 s1,s2 s3,s2 s3,s2 s3,s1 - // real message group ids : s1 s2 s1 s2 s1 s1 s3 s2 s1 - // Cache indexes : 1 2 - // Cache state after processing : s3,s1 - repeated sint64 message_group_id_indexes = 7; - - // New ips for updating ip cache. - repeated string ips = 8; - // Same as message_group_id_indexes but for ips. - repeated sint64 ip_indexes = 9; - - // New session meta datas for updating cache. - repeated SessionMetaValue message_session_meta = 10; - // Same as message_group_id_indexes but for session meta data. - repeated sint64 message_session_meta_indexes = 11; - - // Client messages sizes. - // Same size as offsets. - repeated int64 message_sizes = 12; - - // Block must contain whole client message when it's size is not bigger that max_block_size. - // If message is bigger than max_block_size - it will be transferred as SIZE/max_block_size blocks. All of this blocks will be with block_count = 0 but not the last one - last one's block_count will be 0; - // Blocks can be reordered upto provided by client uncompressed free buffer size. - // blocks: A A A B B B CDE - // offset: 1 1 1 4 4 4 6 - // part_number: 0 1 2 0 1 2 0 - // count: 0 0 1 0 0 1 3 - // Offset will be the same as in Offsets. - repeated int64 blocks_offsets = 13; - repeated int64 blocks_part_numbers = 14; - // How many complete messages and imcomplete messages end parts (one at most) this block contains - repeated int64 blocks_message_counts = 15; - repeated int64 blocks_uncompressed_sizes = 16; + * Request for read session. Contains one of: + * InitRequest - handshake request. + * ReadRequest - request for data. + * CommitRequest - request for commit of some read data. + * CreatePartitionStreamResponse - signal for server that client is ready to get data from partition. + * DestroyPartitionStreamResponse - signal for server that client finished working with partition. Must be sent only after corresponding Release request from server. + * StopReadRequest - signal for server that client is not ready to get more data from this partition. + * ResumeReadRequest - signal for server that client is ready to get more data from this partition. + */ +message StreamingReadClientMessageNew { + oneof client_message { + InitRequest init_request = 1; + ReadRequest read_request = 2; + CreatePartitionStreamResponse create_partition_stream_response = 3; + CommitRequest commit_request = 4; + DestroyPartitionStreamResponse destroy_partition_stream_response = 5; + StopReadRequest stop_read_request = 6; + ResumeReadRequest resume_read_request = 7; + PartitionStreamStatusRequest partition_stream_status_request = 8; + AddTopicRequest add_topic_request = 9; + RemoveTopicRequest remove_topic_request = 10; + } + + // User credentials if update is needed or empty string. + string token = 20; + + // Handshake request. + message InitRequest { + // Message that describes topic to read. + // Topics that will be read by this session. + repeated TopicReadSettings topics_read_settings = 1; + // Flag that indicates reading only of original topics in cluster or all including mirrored. + bool read_only_original = 2; + // Path of consumer that is used for reading by this session. + string consumer = 3; + + // Skip all messages that has write timestamp smaller than now - max_time_lag_ms. + int64 max_lag_duration_ms = 4; + // Read data only after this timestamp from all topics. + int64 start_from_written_at_ms = 5; + + // Maximum block format version supported by the client. Server will asses this parameter and return actual data blocks version in + // StreamingReadServerMessage.InitResponse.block_format_version_by_topic (and StreamingReadServerMessage.AddTopicResponse.block_format_version) + // or error if client will not be able to read data. + int64 max_supported_block_format_version = 6; + + // Maximal size of client cache for message_group_id, ip and meta, per partition. + // There is separate caches for each partition partition streams. + // There is separate caches for message group identifiers, ip and meta inside one partition partition stream. + int64 max_meta_cache_size = 10; + + // State of client read session. Could be provided to server for retries. + message State { + message PartitionStreamState { + enum Status { + // Not used state. + STATUS_UNSPECIFIED = 0; + // Client seen Create message but not yet responded to server with Created message. + CREATING = 1; + // Client seen Destroy message but not yet responded with Released message. + DESTROYING = 2; + // Client sent Created or ResumeReadRequest message to server for this partition stream. + READING = 3; + // Client sent StopReadRequest for this partition stream. + STOPPED = 4; + } + // Partition partition stream. + PartitionStream partition_stream = 1; + // Current read offset if has one. Actual for states DESTROYING, READING and STOPPED. + int64 read_offset = 2; + // Ranges of committed by client offsets. + repeated OffsetsRange offset_ranges = 3; + // Status of partition stream. + Status status = 4; + } + repeated PartitionStreamState partition_streams_states = 1; + } + + // Session identifier for retries. Must be the same as session_id from Inited server response. If this is first connect, not retry - do not use this field. + string session_id = 100; + // 0 for first init message and incremental value for connect retries. + int64 connection_attempt = 101; + // Formed state for retries. If not retry - do not use this field. + State state = 102; + + int64 idle_timeout_ms = 200; + } + + // TODO: add topics/groups and remove them from reading + + // Message that represents client readiness for receiving more data. + message ReadRequest { + // Client acquired this amount of free bytes more for buffer. Server can send more data at most of this uncompressed size. + // Subsequent messages with 5 and 10 request_uncompressed_size are treated by server that it can send messages for at most 15 bytes. + int64 request_uncompressed_size = 1; + } + + // Signal for server that cient is ready to recive data for partition. + message CreatePartitionStreamResponse { + // Partition stream identifier of partition to start read. + int64 partition_stream_id = 1; + + // Start reading from partition offset that is not less than read_offset. + // Init.max_time_lag_ms and Init.read_timestamp_ms could lead to skip of more messages. + // The same with actual committed offset. Regardless of set read_offset server will return data from maximal offset from read_offset, actual committed offset + // and offsets calculated from Init.max_time_lag_ms and Init.read_timestamp_ms. + int64 read_offset = 2; + // All messages with offset less than commit_offset are processed by client. Server will commit this position if this is not done yet. + int64 commit_offset = 3; + + // This option will enable sanity check on server for read_offset. Server will verify that read_offset is no less that actual committed offset. + // If verification will fail then server will kill this read session and client will find out error in reading logic. + // If client is not setting read_offset, sanity check will fail so do not set verify_read_offset if you not setting correct read_offset. + bool verify_read_offset = 4; + + } + + // Signal for server that client finished working with this partition. Must be sent only after corresponding Release request from server. + // Server will give this partition to other read session only after Released signal. + message DestroyPartitionStreamResponse { + // Partition stream identifier of partition partition stream that is released by client. + int64 partition_stream_id = 1; + } + + // Signal for server that client is not ready to recieve more data from this partition. + message StopReadRequest { + repeated int64 partition_stream_ids = 1; + } + + // Signal for server that client is ready to receive more data from this partition. + message ResumeReadRequest { + repeated int64 partition_stream_ids = 1; + + // Offset to start reading - may be smaller than known one in case of dropping of read-ahead in client lib. + repeated int64 read_offsets = 2; + + // Cookie for matching data from PartitionStream after resuming. Must be greater than zero. + repeated int64 resume_cookies = 3; + } + + // Signal for server that client processed some read data. + message CommitRequest { + // Partition offsets that indicates processed data. + repeated PartitionCommit commits = 1; + } + + message PartitionStreamStatusRequest { + int64 partition_stream_id = 1; + } + + // Add topic to current read session + message AddTopicRequest { + TopicReadSettings topic_read_settings = 1; + } + + // Remove topic from current read session + message RemoveTopicRequest { + string topic = 1; + } + + message TopicReadSettings { + // Topic path. + string topic = 1; + // Partition groups that will be read by this session. + // If list is empty - then session will read all partition groups. + repeated int64 partition_group_ids = 2; + // Read data only after this timestamp from this topic. + int64 start_from_written_at_ms = 3; + } + + /** + * Message that is used for describing commit. + */ + message PartitionCommit { + // Identifier of partition stream with data to commit. + int64 partition_stream_id = 1; + // Processed ranges. + repeated OffsetsRange offsets = 2; + } +} + +// TODO: replace with it actual protocol server message +/** + * Response for read session. Contains one of : + * InitResponse - handshake response from server. + * BatchReadResponse - portion of data. + * CommitResponse - acknowledgment for commit. + * CreatePartitionStreamRequest - command from server to create a partition partition stream. + * DestroyPartitionStreamRequest - command from server to destroy a partition partition stream. + */ +message StreamingReadServerMessageNew { + oneof server_message { + InitResponse init_response = 3; + BatchReadResponse batch_read_response = 4; + CreatePartitionStreamRequest create_partition_stream_request = 5; + DestroyPartitionStreamRequest destroy_partition_stream_request = 6; + CommitResponse commit_response = 7; + PartitionStreamStatusResponse partition_stream_status_response = 8; + StopReadResponse stop_read_response = 9; + ResumeReadResponse resume_read_response = 10; + AddTopicResponse add_topic_response = 11; + RemoveTopicResponse remove_topic_response = 12; + } + + Ydb.StatusIds.StatusCode status = 1; + + repeated Ydb.Issue.IssueMessage issues = 2; + + // Handshake response. + message InitResponse { + // Read session identifier for debug purposes. + string session_id = 1; + // Block format version of data client will receive from topics. + map<string, int64> block_format_version_by_topic = 2; + + // Choosed maximan cache size by server. + // Client must use cache of this size. Could change on retries - reduce size of cache in this case. + int64 max_meta_cache_size = 10; + } + + // Command to create a partition partition stream. + // Client must react on this signal by sending StartRead when ready recieve data from this partition. + message CreatePartitionStreamRequest { + // Partition partition stream description. + PartitionStream partition_stream = 1; + + // Actual committed offset. + int64 committed_offset = 2; + // Offset of first not existing message in partition till now. + int64 end_offset = 3; + + } + + // Command to destroy concrete partition stream. + message DestroyPartitionStreamRequest { + // Identifier of partition partition stream that is ready to be closed by server. + int64 partition_stream_id = 1; + + // Flag of gracefull or not destroy. + // If True then server is waiting for Destroyed signal from client before giving of this partition for other read session. + // Server will not send more data from this partition. + // Client can process all received data and wait for commit and only after send Destroyed signal. + // If False then server gives partition for other session right now. + // All futher commits for this PartitionStream has no effect. Server is not waiting for Destroyed signal. + bool graceful = 2; + + // Last known committed offset. + int64 committed_offset = 3; + } + + // Acknowledgement for commits. + message CommitResponse { + // Per-partition commit representation. + message PartitionCommittedOffset { + // Partition partition stream identifier. + int64 partition_stream_id = 1; + // Last committed offset. + int64 committed_offset = 2; + } + // Partitions with progress. + repeated PartitionCommittedOffset partitions_committed_offsets = 1; + } + + // Readed data. + message BatchReadResponse { + // One client message representation. + // Client lib must send commit right now for all skipped data (from it's read offset till first offset in range). + message PartitionData { + // Data inside this message is from partition stream with this identifier. + int64 partition_stream_id = 1; + + // Offsets in partition that assigned for messages. + // Unique value for clientside deduplication - (topic, cluster, partition_id, offset). + repeated int64 offsets = 2; + // Sequence numbers that provided with messages on write from client. + // Same size as offsets. + // Unique value for clientside deduplication - (topic, cluster, message_group_id, sequence_number). + repeated int64 sequence_numbers = 3; + // Timestamps of creation of messages provided on write from client. + // Same size as offsets. + repeated int64 created_at_ms = 4; + // Timestamps of writing in partition for client messages. + // Same size as offsets. + repeated int64 written_at_ms = 5; + + // New messageGroupIds for updating cache. + // Size of vector is the same as number of negative values in message_group_id_indexes. + repeated string message_group_ids = 6; + // Indexes of messageGroupIds. + // same size as offsets. + // Negative values (-X) means - put first not used messageGroupId from message_group_ids on index X in cache and use it for this client message. + // Positive values (X) means -use element by index X from cache for this client message. Do not change state of cache. + // Assumptions: + // - Server will use positive values only for proposed before indexes. + // - Each value is from 1 to max_meta_cache_size by abs. + // - Do not make assumptions about choosing algorihm. + // - There is separate caches of size max_meta_cache_size for different partition and different metadata fileds - message_group_id, ip and session_meta_data. + // - Number of negative values in message_group_id_indexes vector is the same as length of message_group_ids vector. + // Example: + // max_meta_cache_size : 2 + // Cache indexes : 1 2 + // Cache state before processing : s0,? // ? means not set yet. + // + // message_group_ids : s1 s2 s3 s1 + // message_group_id_indexes : -1 -2 1 2 1 1 -1 2 -2 + // cache state : s1,? s1,s2 s1,s2 s1,s2 s1,s2 s1,s2 s3,s2 s3,s2 s3,s1 + // real message group ids : s1 s2 s1 s2 s1 s1 s3 s2 s1 + // Cache indexes : 1 2 + // Cache state after processing : s3,s1 + repeated sint64 message_group_id_indexes = 7; + + // New ips for updating ip cache. + repeated string ips = 8; + // Same as message_group_id_indexes but for ips. + repeated sint64 ip_indexes = 9; + + // New session meta datas for updating cache. + repeated SessionMetaValue message_session_meta = 10; + // Same as message_group_id_indexes but for session meta data. + repeated sint64 message_session_meta_indexes = 11; + + // Client messages sizes. + // Same size as offsets. + repeated int64 message_sizes = 12; + + // Block must contain whole client message when it's size is not bigger that max_block_size. + // If message is bigger than max_block_size - it will be transferred as SIZE/max_block_size blocks. All of this blocks will be with block_count = 0 but not the last one - last one's block_count will be 0; + // Blocks can be reordered upto provided by client uncompressed free buffer size. + // blocks: A A A B B B CDE + // offset: 1 1 1 4 4 4 6 + // part_number: 0 1 2 0 1 2 0 + // count: 0 0 1 0 0 1 3 + // Offset will be the same as in Offsets. + repeated int64 blocks_offsets = 13; + repeated int64 blocks_part_numbers = 14; + // How many complete messages and imcomplete messages end parts (one at most) this block contains + repeated int64 blocks_message_counts = 15; + repeated int64 blocks_uncompressed_sizes = 16; // In block format version 0 each byte contains only block codec identifier repeated bytes blocks_headers = 17; repeated bytes blocks_data = 18; - - // Zero if this is not first portion of data after resume or provided by client cookie otherwise. - int64 resume_cookie = 50; - - message ReadStatistics { - int64 blobs_from_cache = 1; - int64 blobs_from_disk = 2; - int64 bytes_from_head = 3; - int64 bytes_from_cache = 4; - int64 bytes_from_disk = 5; - int64 repack_duration_ms = 6; - } - ReadStatistics read_statistics = 100; - } - - message SkipRange { - // Partition Stream identifier. - int64 partition_stream_id = 1; - - // When some data is skipped by client parameters (read_timestamp_ms for example) then range of skipped offsets is sended to client. - // Client lib must commit this range and change read_offset to end of this range. - OffsetsRange skip_range = 2; - } - - repeated SkipRange skip_range = 1; - - // Per-partition data. - repeated PartitionData partitions = 2; - - } - - // Response for status requst. - message PartitionStreamStatusResponse { - // Identifier of partition partition stream that is ready to be closed by server. - int64 partition_stream_id = 1; - - int64 committed_offset = 2; - int64 end_offset = 3; - - // WriteTimestamp of next message (and end_offset) will be not less that WriteWatermarkMs. - int64 written_at_watermark_ms = 4; - } - - message StopReadResponse { - } - - message ResumeReadResponse { - } - - message AddTopicResponse { - // Block format version of data client will receive from the topic. - int64 block_format_version = 1; - } - - message RemoveTopicResponse { - } -} - -/** - * Message that represens concrete partition partition stream. - */ -message PartitionStream { - // Topic path of partition. - string topic = 1; - // Cluster of topic instance. - string cluster = 2; - // Partition identifier. Explicit only for debug purposes. - int64 partition_id = 3; - // Partition group identifier. Explicit only for debug purposes. - int64 partition_group_id = 4; - - // Identitifier of partition stream. Unique inside one RPC call. - int64 partition_stream_id = 6; - - // Opaque blob. Provide it with partition stream in state for session reconnects. - bytes connection_meta = 7; -} - -/** + + // Zero if this is not first portion of data after resume or provided by client cookie otherwise. + int64 resume_cookie = 50; + + message ReadStatistics { + int64 blobs_from_cache = 1; + int64 blobs_from_disk = 2; + int64 bytes_from_head = 3; + int64 bytes_from_cache = 4; + int64 bytes_from_disk = 5; + int64 repack_duration_ms = 6; + } + ReadStatistics read_statistics = 100; + } + + message SkipRange { + // Partition Stream identifier. + int64 partition_stream_id = 1; + + // When some data is skipped by client parameters (read_timestamp_ms for example) then range of skipped offsets is sended to client. + // Client lib must commit this range and change read_offset to end of this range. + OffsetsRange skip_range = 2; + } + + repeated SkipRange skip_range = 1; + + // Per-partition data. + repeated PartitionData partitions = 2; + + } + + // Response for status requst. + message PartitionStreamStatusResponse { + // Identifier of partition partition stream that is ready to be closed by server. + int64 partition_stream_id = 1; + + int64 committed_offset = 2; + int64 end_offset = 3; + + // WriteTimestamp of next message (and end_offset) will be not less that WriteWatermarkMs. + int64 written_at_watermark_ms = 4; + } + + message StopReadResponse { + } + + message ResumeReadResponse { + } + + message AddTopicResponse { + // Block format version of data client will receive from the topic. + int64 block_format_version = 1; + } + + message RemoveTopicResponse { + } +} + +/** + * Message that represens concrete partition partition stream. + */ +message PartitionStream { + // Topic path of partition. + string topic = 1; + // Cluster of topic instance. + string cluster = 2; + // Partition identifier. Explicit only for debug purposes. + int64 partition_id = 3; + // Partition group identifier. Explicit only for debug purposes. + int64 partition_group_id = 4; + + // Identitifier of partition stream. Unique inside one RPC call. + int64 partition_stream_id = 6; + + // Opaque blob. Provide it with partition stream in state for session reconnects. + bytes connection_meta = 7; +} + +/** * Request for read session. Contains one of : * Init - handshake request. * Read - request for data. @@ -665,86 +665,86 @@ message PartitionStream { * Released - signal for server that client finished working with partition. Must be sent only after corresponding Release request from server. */ -message MigrationStreamingReadClientMessage { - message TopicReadSettings { - // Topic path. - string topic = 1; - // Partition groups that will be read by this session. - // If list is empty - then session will read all partition groups. - repeated int64 partition_group_ids = 2; - // Read data only after this timestamp from this topic. - int64 start_from_written_at_ms = 3; - } - +message MigrationStreamingReadClientMessage { + message TopicReadSettings { + // Topic path. + string topic = 1; + // Partition groups that will be read by this session. + // If list is empty - then session will read all partition groups. + repeated int64 partition_group_ids = 2; + // Read data only after this timestamp from this topic. + int64 start_from_written_at_ms = 3; + } + // Handshake request. - message InitRequest { - // Message that describes topic to read. + message InitRequest { + // Message that describes topic to read. // Topics that will be read by this session. - repeated TopicReadSettings topics_read_settings = 1; + repeated TopicReadSettings topics_read_settings = 1; // Flag that indicates reading only of original topics in cluster or all including mirrored. bool read_only_original = 2; - // Path of consumer that is used for reading by this session. - string consumer = 3; - - // Skip all messages that has write timestamp smaller than now - max_time_lag_ms. - int64 max_lag_duration_ms = 4; - // Read data only after this timestamp from all topics. - int64 start_from_written_at_ms = 5; - - // Maximum block format version supported by the client. Server will asses this parameter and return actual data blocks version in - // StreamingReadServerMessage.InitResponse.block_format_version_by_topic (and StreamingReadServerMessage.AddTopicResponse.block_format_version) - // or error if client will not be able to read data. - int64 max_supported_block_format_version = 6; - - // Maximal size of client cache for message_group_id, ip and meta, per partition. - // There is separate caches for each partition partition streams. - // There is separate caches for message group identifiers, ip and meta inside one partition partition stream. - int64 max_meta_cache_size = 10; - - // State of client read session. Could be provided to server for retries. - message State { - message PartitionStreamState { - enum Status { - // Not used state. - STATUS_UNSPECIFIED = 0; - // Client seen Create message but not yet responded to server with Created message. - CREATING = 1; - // Client seen Destroy message but not yet responded with Released message. - DESTROYING = 2; - // Client sent Created or ResumeReadRequest message to server for this partition stream. - READING = 3; - // Client sent StopReadRequest for this partition stream. - STOPPED = 4; - } - // Partition partition stream. - PartitionStream partition_stream = 1; - // Current read offset if has one. Actual for states DESTROYING, READING and STOPPED. - int64 read_offset = 2; - // Ranges of committed by client offsets. - repeated OffsetsRange offset_ranges = 3; - // Status of partition stream. - Status status = 4; - } - repeated PartitionStreamState partition_streams_states = 1; - } - - // Session identifier for retries. Must be the same as session_id from Inited server response. If this is first connect, not retry - do not use this field. - string session_id = 100; - // 0 for first init message and incremental value for connect retries. - int64 connection_attempt = 101; - // Formed state for retries. If not retry - do not use this field. - State state = 102; - - int64 idle_timeout_ms = 200; - - - //////////////////////////////////////////////////////////////////////////////////////////////////////////// - // TODO: remove after refactoring - // Single read request params. - ReadParams read_params = 42; + // Path of consumer that is used for reading by this session. + string consumer = 3; + + // Skip all messages that has write timestamp smaller than now - max_time_lag_ms. + int64 max_lag_duration_ms = 4; + // Read data only after this timestamp from all topics. + int64 start_from_written_at_ms = 5; + + // Maximum block format version supported by the client. Server will asses this parameter and return actual data blocks version in + // StreamingReadServerMessage.InitResponse.block_format_version_by_topic (and StreamingReadServerMessage.AddTopicResponse.block_format_version) + // or error if client will not be able to read data. + int64 max_supported_block_format_version = 6; + + // Maximal size of client cache for message_group_id, ip and meta, per partition. + // There is separate caches for each partition partition streams. + // There is separate caches for message group identifiers, ip and meta inside one partition partition stream. + int64 max_meta_cache_size = 10; + + // State of client read session. Could be provided to server for retries. + message State { + message PartitionStreamState { + enum Status { + // Not used state. + STATUS_UNSPECIFIED = 0; + // Client seen Create message but not yet responded to server with Created message. + CREATING = 1; + // Client seen Destroy message but not yet responded with Released message. + DESTROYING = 2; + // Client sent Created or ResumeReadRequest message to server for this partition stream. + READING = 3; + // Client sent StopReadRequest for this partition stream. + STOPPED = 4; + } + // Partition partition stream. + PartitionStream partition_stream = 1; + // Current read offset if has one. Actual for states DESTROYING, READING and STOPPED. + int64 read_offset = 2; + // Ranges of committed by client offsets. + repeated OffsetsRange offset_ranges = 3; + // Status of partition stream. + Status status = 4; + } + repeated PartitionStreamState partition_streams_states = 1; + } + + // Session identifier for retries. Must be the same as session_id from Inited server response. If this is first connect, not retry - do not use this field. + string session_id = 100; + // 0 for first init message and incremental value for connect retries. + int64 connection_attempt = 101; + // Formed state for retries. If not retry - do not use this field. + State state = 102; + + int64 idle_timeout_ms = 200; + + + //////////////////////////////////////////////////////////////////////////////////////////////////////////// + // TODO: remove after refactoring + // Single read request params. + ReadParams read_params = 42; bool ranges_mode = 442; - //////////////////////////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////////////////////////////////////// } // Request of single read. @@ -812,7 +812,7 @@ message MigrationStreamingReadClientMessage { } oneof request { - InitRequest init_request = 1; + InitRequest init_request = 1; Read read = 2; StartRead start_read = 3; Commit commit = 4; @@ -835,17 +835,17 @@ message MigrationStreamingReadClientMessage { * Release - signal from server for releasing of partition. */ -message MigrationStreamingReadServerMessage { +message MigrationStreamingReadServerMessage { // Handshake response. - message InitResponse { + message InitResponse { // Read session identifier for debug purposes. - string session_id = 1; - // Block format version of data client will receive from topics. - map<string, int64> block_format_version_by_topic = 2; - - // Choosed maximan cache size by server. - // Client must use cache of this size. Could change on retries - reduce size of cache in this case. - int64 max_meta_cache_size = 10; + string session_id = 1; + // Block format version of data client will receive from topics. + map<string, int64> block_format_version_by_topic = 2; + + // Choosed maximan cache size by server. + // Client must use cache of this size. Could change on retries - reduce size of cache in this case. + int64 max_meta_cache_size = 10; } // Signal that partition is assigned to this read session. Client must react on this signal by sending StartRead when ready. @@ -905,7 +905,7 @@ message MigrationStreamingReadServerMessage { // Timestamp of creation of message provided on write from client. uint64 create_timestamp_ms = 3; // Codec that is used for data compressing. - Codec codec = 4; + Codec codec = 4; // Compressed client message body. bytes data = 5; // Uncompressed size of client message body. @@ -974,7 +974,7 @@ message MigrationStreamingReadServerMessage { repeated Ydb.Issue.IssueMessage issues = 2; oneof response { - InitResponse init_response = 3; + InitResponse init_response = 3; DataBatch data_batch = 4; Assigned assigned = 5; Release release = 6; diff --git a/ydb/public/api/protos/ydb_rate_limiter.proto b/ydb/public/api/protos/ydb_rate_limiter.proto index c18430c32b6..cbf58bb6dc0 100644 --- a/ydb/public/api/protos/ydb_rate_limiter.proto +++ b/ydb/public/api/protos/ydb_rate_limiter.proto @@ -1,37 +1,37 @@ -syntax = "proto3"; -option cc_enable_arenas = true; - -package Ydb.RateLimiter; - -option java_package = "com.yandex.ydb.rate_limiter"; -option java_outer_classname = "RateLimiterProtos"; -option java_multiple_files = true; - +syntax = "proto3"; +option cc_enable_arenas = true; + +package Ydb.RateLimiter; + +option java_package = "com.yandex.ydb.rate_limiter"; +option java_outer_classname = "RateLimiterProtos"; +option java_multiple_files = true; + import "ydb/public/api/protos/ydb_operation.proto"; - -// -// Rate Limiter control API. -// - -// -// Resource properties. -// - -// Settings for hierarchical deficit round robin (HDRR) algorithm. -message HierarchicalDrrSettings { - // Resource consumption speed limit. - // Value is required for root resource. - // 0 is equivalent to not set. - // Must be nonnegative. - double max_units_per_second = 1; - - // Maximum burst size of resource consumption across the whole cluster - // divided by max_units_per_second. - // Default value is 1. - // This means that maximum burst size might be equal to max_units_per_second. - // 0 is equivalent to not set. - // Must be nonnegative. - double max_burst_size_coefficient = 2; + +// +// Rate Limiter control API. +// + +// +// Resource properties. +// + +// Settings for hierarchical deficit round robin (HDRR) algorithm. +message HierarchicalDrrSettings { + // Resource consumption speed limit. + // Value is required for root resource. + // 0 is equivalent to not set. + // Must be nonnegative. + double max_units_per_second = 1; + + // Maximum burst size of resource consumption across the whole cluster + // divided by max_units_per_second. + // Default value is 1. + // This means that maximum burst size might be equal to max_units_per_second. + // 0 is equivalent to not set. + // Must be nonnegative. + double max_burst_size_coefficient = 2; // Prefetch in local bucket up to prefetch_coefficient*max_units_per_second units (full size). // Default value is inherited from parent or 0.2 for root. @@ -43,138 +43,138 @@ message HierarchicalDrrSettings { // Default value is inherited from parent or 0.75 for root. // Must be nonnegative and less than or equal to 1. double prefetch_watermark = 4; -} - -// Rate limiter resource description. -message Resource { - // Resource path. Elements are separated by slash. - // The first symbol is not slash. - // The first element is root resource name. - // Resource path is the path of resource inside coordination node. - string resource_path = 1; - - oneof type { - // Settings for Hierarchical DRR algorithm. - HierarchicalDrrSettings hierarchical_drr = 2; - } -} - -// -// CreateResource method. -// - -message CreateResourceRequest { - Ydb.Operations.OperationParams operation_params = 1; - - // Path of a coordination node. - string coordination_node_path = 2; - - // Resource properties. - Resource resource = 3; -} - -message CreateResourceResponse { - // Holds CreateResourceResult in case of successful call. - Ydb.Operations.Operation operation = 1; -} - -message CreateResourceResult { -} - -// -// AlterResource method. -// - -message AlterResourceRequest { - Ydb.Operations.OperationParams operation_params = 1; - - // Path of a coordination node. - string coordination_node_path = 2; - - // New resource properties. - Resource resource = 3; -} - -message AlterResourceResponse { - // Holds AlterResourceResult in case of successful call. - Ydb.Operations.Operation operation = 1; -} - -message AlterResourceResult { -} - -// -// DropResource method. -// - -message DropResourceRequest { - Ydb.Operations.OperationParams operation_params = 1; - - // Path of a coordination node. - string coordination_node_path = 2; - - // Path of resource inside a coordination node. - string resource_path = 3; -} - -message DropResourceResponse { - // Holds DropResourceResult in case of successful call. - Ydb.Operations.Operation operation = 1; -} - -message DropResourceResult { -} - -// -// ListResources method. -// - -message ListResourcesRequest { - Ydb.Operations.OperationParams operation_params = 1; - - // Path of a coordination node. - string coordination_node_path = 2; - - // Path of resource inside a coordination node. - // May be empty. - // In that case all root resources will be listed. - string resource_path = 3; - - // List resources recursively. - bool recursive = 4; -} - -message ListResourcesResponse { - // Holds ListResourcesResult in case of successful call. - Ydb.Operations.Operation operation = 1; -} - -message ListResourcesResult { - repeated string resource_paths = 1; -} - -// -// DescribeResource method. -// - -message DescribeResourceRequest { - Ydb.Operations.OperationParams operation_params = 1; - - // Path of a coordination node. - string coordination_node_path = 2; - - // Path of resource inside a coordination node. - string resource_path = 3; -} - -message DescribeResourceResponse { - // Holds DescribeResourceResult in case of successful call. - Ydb.Operations.Operation operation = 1; -} - -message DescribeResourceResult { - Resource resource = 1; -} +} + +// Rate limiter resource description. +message Resource { + // Resource path. Elements are separated by slash. + // The first symbol is not slash. + // The first element is root resource name. + // Resource path is the path of resource inside coordination node. + string resource_path = 1; + + oneof type { + // Settings for Hierarchical DRR algorithm. + HierarchicalDrrSettings hierarchical_drr = 2; + } +} + +// +// CreateResource method. +// + +message CreateResourceRequest { + Ydb.Operations.OperationParams operation_params = 1; + + // Path of a coordination node. + string coordination_node_path = 2; + + // Resource properties. + Resource resource = 3; +} + +message CreateResourceResponse { + // Holds CreateResourceResult in case of successful call. + Ydb.Operations.Operation operation = 1; +} + +message CreateResourceResult { +} + +// +// AlterResource method. +// + +message AlterResourceRequest { + Ydb.Operations.OperationParams operation_params = 1; + + // Path of a coordination node. + string coordination_node_path = 2; + + // New resource properties. + Resource resource = 3; +} + +message AlterResourceResponse { + // Holds AlterResourceResult in case of successful call. + Ydb.Operations.Operation operation = 1; +} + +message AlterResourceResult { +} + +// +// DropResource method. +// + +message DropResourceRequest { + Ydb.Operations.OperationParams operation_params = 1; + + // Path of a coordination node. + string coordination_node_path = 2; + + // Path of resource inside a coordination node. + string resource_path = 3; +} + +message DropResourceResponse { + // Holds DropResourceResult in case of successful call. + Ydb.Operations.Operation operation = 1; +} + +message DropResourceResult { +} + +// +// ListResources method. +// + +message ListResourcesRequest { + Ydb.Operations.OperationParams operation_params = 1; + + // Path of a coordination node. + string coordination_node_path = 2; + + // Path of resource inside a coordination node. + // May be empty. + // In that case all root resources will be listed. + string resource_path = 3; + + // List resources recursively. + bool recursive = 4; +} + +message ListResourcesResponse { + // Holds ListResourcesResult in case of successful call. + Ydb.Operations.Operation operation = 1; +} + +message ListResourcesResult { + repeated string resource_paths = 1; +} + +// +// DescribeResource method. +// + +message DescribeResourceRequest { + Ydb.Operations.OperationParams operation_params = 1; + + // Path of a coordination node. + string coordination_node_path = 2; + + // Path of resource inside a coordination node. + string resource_path = 3; +} + +message DescribeResourceResponse { + // Holds DescribeResourceResult in case of successful call. + Ydb.Operations.Operation operation = 1; +} + +message DescribeResourceResult { + Resource resource = 1; +} // // AcquireResource method. diff --git a/ydb/public/api/protos/yq.proto b/ydb/public/api/protos/yq.proto index 6aa7169a890..c90610c15e5 100644 --- a/ydb/public/api/protos/yq.proto +++ b/ydb/public/api/protos/yq.proto @@ -80,20 +80,20 @@ message StreamingDisposition { google.protobuf.Duration duration = 1; } - message FromLastCheckpoint { - // By default if new query streams set doesn't equal to old query streams set, - // error will occur and query won't be allowed to load offsets for streams for the last checkpoint. - // If this flag is set all offsets that can be matched with previous query checkpoint will be matched. - // Others will use "fresh" streaming disposition. - bool force = 1; - } - + message FromLastCheckpoint { + // By default if new query streams set doesn't equal to old query streams set, + // error will occur and query won't be allowed to load offsets for streams for the last checkpoint. + // If this flag is set all offsets that can be matched with previous query checkpoint will be matched. + // Others will use "fresh" streaming disposition. + bool force = 1; + } + oneof disposition { google.protobuf.Empty oldest = 1; // Start processing with the oldest offset google.protobuf.Empty fresh = 2; // Start processing with the fresh offset FromTime from_time = 3; // Start processing with offset from the specified time TimeAgo time_ago = 4; // Start processing with offset some time ago - FromLastCheckpoint from_last_checkpoint = 5; // Start processing with offset which corresponds to the last checkpoint + FromLastCheckpoint from_last_checkpoint = 5; // Start processing with offset which corresponds to the last checkpoint } } @@ -133,10 +133,10 @@ message QueryMeta { RESUMING = 6; // Resuming query execution from PAUSED status RUNNING = 7; // Query started for execution COMPLETED = 8; // Query completed successfully - COMPLETING = 12; // Finalizing query before become COMPLETED + COMPLETING = 12; // Finalizing query before become COMPLETED FAILED = 9; // Query completed with errors - FAILING = 13; // Finalizing query before become FAILED - PAUSED = 11; // Query paused + FAILING = 13; // Finalizing query before become FAILED + PAUSED = 11; // Query paused PAUSING = 10; // Query starts pausing } @@ -154,9 +154,9 @@ message QueryMeta { string aborted_by = 11; string paused_by = 12; } - // One of the versions of this query has fully saved checkpoint. - // If this flag is not set streaming disposition mode "from last checkpoint" can't be used. - bool has_saved_checkpoints = 13; + // One of the versions of this query has fully saved checkpoint. + // If this flag is not set streaming disposition mode "from last checkpoint" can't be used. + bool has_saved_checkpoints = 13; } message BriefQuery { @@ -201,7 +201,7 @@ message CreateQueryRequest { Ydb.Operations.OperationParams operation_params = 1; QueryContent content = 2; ExecuteMode execute_mode = 3; - StreamingDisposition disposition = 4; + StreamingDisposition disposition = 4; string idempotency_key = 5 [(Ydb.length).le = 1024]; } @@ -297,7 +297,7 @@ message ModifyQueryRequest { QueryContent content = 3; ExecuteMode execute_mode = 4; StreamingDisposition disposition = 5; - StateLoadMode state_load_mode = 6; + StateLoadMode state_load_mode = 6; int64 previous_revision = 7 [(Ydb.value) = ">= 0"]; string idempotency_key = 8 [(Ydb.length).le = 1024]; } diff --git a/ydb/public/lib/value/value.h b/ydb/public/lib/value/value.h index 8ae368ac027..a54d7fdadfb 100644 --- a/ydb/public/lib/value/value.h +++ b/ydb/public/lib/value/value.h @@ -4,7 +4,7 @@ #include <ydb/public/lib/scheme_types/scheme_type_id.h> #include <util/generic/vector.h> -#include <util/string/builder.h> +#include <util/string/builder.h> #include <google/protobuf/text_format.h> @@ -100,20 +100,20 @@ public: // returns member index by name int GetMemberIndex(TStringBuf name) const; - TString DumpToString() const { - TStringBuilder dump; + TString DumpToString() const { + TStringBuilder dump; TString res; ::google::protobuf::TextFormat::PrintToString(Type, &res); - dump << "Type:" << Endl << res << Endl; + dump << "Type:" << Endl << res << Endl; ::google::protobuf::TextFormat::PrintToString(Value, &res); - dump << "Value:" << Endl << res << Endl; - return std::move(dump); - } - - void DumpValue() const { - Cerr << DumpToString(); + dump << "Value:" << Endl << res << Endl; + return std::move(dump); } + void DumpValue() const { + Cerr << DumpToString(); + } + const NKikimrMiniKQL::TType& GetType() const { return Type; }; const NKikimrMiniKQL::TValue& GetValue() const { return Value; }; diff --git a/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp b/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp index 7a99c95214b..4be0af1e315 100644 --- a/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp +++ b/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp @@ -50,9 +50,9 @@ void ConvertSettingsToProtoConfig( if (settings.AttachConsistencyMode_ != EConsistencyMode::UNSET) { config->set_attach_consistency_mode(static_cast<Ydb::Coordination::ConsistencyMode>(settings.AttachConsistencyMode_)); } - if (settings.RateLimiterCountersMode_ != ERateLimiterCountersMode::UNSET) { - config->set_rate_limiter_counters_mode(static_cast<Ydb::Coordination::RateLimiterCountersMode>(settings.RateLimiterCountersMode_)); - } + if (settings.RateLimiterCountersMode_ != ERateLimiterCountersMode::UNSET) { + config->set_rate_limiter_counters_mode(static_cast<Ydb::Coordination::RateLimiterCountersMode>(settings.RateLimiterCountersMode_)); + } } //////////////////////////////////////////////////////////////////////////////// @@ -81,7 +81,7 @@ struct TNodeDescription::TImpl { } ReadConsistencyMode_ = static_cast<EConsistencyMode>(config.read_consistency_mode()); AttachConsistencyMode_ = static_cast<EConsistencyMode>(config.attach_consistency_mode()); - RateLimiterCountersMode_ = static_cast<ERateLimiterCountersMode>(config.rate_limiter_counters_mode()); + RateLimiterCountersMode_ = static_cast<ERateLimiterCountersMode>(config.rate_limiter_counters_mode()); Owner_ = desc.self().owner(); PermissionToSchemeEntry(desc.self().effective_permissions(), &EffectivePermissions_); } @@ -90,7 +90,7 @@ struct TNodeDescription::TImpl { TMaybe<TDuration> SessionGracePeriod_; EConsistencyMode ReadConsistencyMode_; EConsistencyMode AttachConsistencyMode_; - ERateLimiterCountersMode RateLimiterCountersMode_; + ERateLimiterCountersMode RateLimiterCountersMode_; TString Owner_; TVector<NScheme::TPermissions> EffectivePermissions_; }; @@ -116,10 +116,10 @@ EConsistencyMode TNodeDescription::GetAttachConsistencyMode() const { return Impl_->AttachConsistencyMode_; } -ERateLimiterCountersMode TNodeDescription::GetRateLimiterCountersMode() const { - return Impl_->RateLimiterCountersMode_; -} - +ERateLimiterCountersMode TNodeDescription::GetRateLimiterCountersMode() const { + return Impl_->RateLimiterCountersMode_; +} + const TString& TNodeDescription::GetOwner() const { return Impl_->Owner_; } diff --git a/ydb/public/sdk/cpp/client/ydb_coordination/coordination.h b/ydb/public/sdk/cpp/client/ydb_coordination/coordination.h index fa8ab572d0d..ccc7146a7ea 100644 --- a/ydb/public/sdk/cpp/client/ydb_coordination/coordination.h +++ b/ydb/public/sdk/cpp/client/ydb_coordination/coordination.h @@ -88,12 +88,12 @@ enum class EConsistencyMode { RELAXED_MODE = 2, }; -enum class ERateLimiterCountersMode { - UNSET = 0, - AGGREGATED = 1, - DETAILED = 2, -}; - +enum class ERateLimiterCountersMode { + UNSET = 0, + AGGREGATED = 1, + DETAILED = 2, +}; + //! Represents coordination node description class TNodeDescription { public: @@ -103,7 +103,7 @@ public: const TMaybe<TDuration>& GetSessionGracePeriod() const; EConsistencyMode GetReadConsistencyMode() const; EConsistencyMode GetAttachConsistencyMode() const; - ERateLimiterCountersMode GetRateLimiterCountersMode() const; + ERateLimiterCountersMode GetRateLimiterCountersMode() const; const TString& GetOwner() const; const TVector<NScheme::TPermissions>& GetEffectivePermissions() const; @@ -186,8 +186,8 @@ struct TNodeSettings : public TOperationRequestSettings<TDerived> { FLUENT_SETTING_DEFAULT(EConsistencyMode, ReadConsistencyMode, EConsistencyMode::UNSET); FLUENT_SETTING_DEFAULT(EConsistencyMode, AttachConsistencyMode, EConsistencyMode::UNSET); - - FLUENT_SETTING_DEFAULT(ERateLimiterCountersMode, RateLimiterCountersMode, ERateLimiterCountersMode::UNSET); + + FLUENT_SETTING_DEFAULT(ERateLimiterCountersMode, RateLimiterCountersMode, ERateLimiterCountersMode::UNSET); }; struct TCreateNodeSettings : public TNodeSettings<TCreateNodeSettings> { }; diff --git a/ydb/public/sdk/cpp/client/ydb_driver/driver.cpp b/ydb/public/sdk/cpp/client/ydb_driver/driver.cpp index de17c6e679c..99e0f0d7936 100644 --- a/ydb/public/sdk/cpp/client/ydb_driver/driver.cpp +++ b/ydb/public/sdk/cpp/client/ydb_driver/driver.cpp @@ -6,12 +6,12 @@ #include <ydb/public/sdk/cpp/client/impl/ydb_internal/logger/log.h> #undef INCLUDE_YDB_INTERNAL_H -#include <library/cpp/logger/log.h> +#include <library/cpp/logger/log.h> #include <ydb/public/sdk/cpp/client/impl/ydb_internal/common/parser.h> #include <ydb/public/sdk/cpp/client/impl/ydb_internal/common/getenv.h> #include <util/stream/file.h> #include <ydb/public/sdk/cpp/client/resources/ydb_ca.h> - + namespace NYdb { using NGrpc::TGRpcClientLow; @@ -45,7 +45,7 @@ public: bool GetGRpcKeepAlivePermitWithoutCalls() const override { return GRpcKeepAlivePermitWithoutCalls; } TDuration GetSocketIdleTimeout() const override { return SocketIdleTimeout; } ui64 GetMemoryQuota() const override { return MemoryQuota; } - const TLog& GetLog() const override { return Log; } + const TLog& GetLog() const override { return Log; } TStringType Endpoint; size_t NetworkThreadsNum = 2; @@ -70,7 +70,7 @@ public: bool GRpcKeepAlivePermitWithoutCalls = false; TDuration SocketIdleTimeout = TDuration::Minutes(6); ui64 MemoryQuota = 0; - TLog Log; // Null by default. + TLog Log; // Null by default. }; TDriverConfig::TDriverConfig(const TStringType& connectionString) @@ -115,7 +115,7 @@ TDriverConfig& TDriverConfig::SetAuthToken(const TStringType& token) { TDriverConfig& TDriverConfig::SetDatabase(const TStringType& database) { Impl_->Database = database; - Impl_->Log.SetFormatter(GetPrefixLogFormatter(GetDatabaseLogPrefix(Impl_->Database))); + Impl_->Log.SetFormatter(GetPrefixLogFormatter(GetDatabaseLogPrefix(Impl_->Database))); return *this; } @@ -172,11 +172,11 @@ TDriverConfig& TDriverConfig::SetSocketIdleTimeout(TDuration timeout) { return *this; } -TDriverConfig& TDriverConfig::SetLog(THolder<TLogBackend> log) { - Impl_->Log.ResetBackend(std::move(log)); - return *this; -} - +TDriverConfig& TDriverConfig::SetLog(THolder<TLogBackend> log) { + Impl_->Log.ResetBackend(std::move(log)); + return *this; +} + //////////////////////////////////////////////////////////////////////////////// std::shared_ptr<TGRpcConnectionsImpl> CreateInternalInterface(const TDriver connection) { diff --git a/ydb/public/sdk/cpp/client/ydb_driver/driver.h b/ydb/public/sdk/cpp/client/ydb_driver/driver.h index 39c91d8852d..428f1131a3c 100644 --- a/ydb/public/sdk/cpp/client/ydb_driver/driver.h +++ b/ydb/public/sdk/cpp/client/ydb_driver/driver.h @@ -7,7 +7,7 @@ #include <ydb/public/sdk/cpp/client/ydb_types/request_settings.h> #include <ydb/public/sdk/cpp/client/ydb_types/status/status.h> -#include <library/cpp/logger/backend.h> +#include <library/cpp/logger/backend.h> //////////////////////////////////////////////////////////////////////////////// @@ -35,7 +35,7 @@ public: //! Set number of client pool threads, if 0 adaptive thread pool will be used. //! NOTE: in case of no zero value it is possible to get deadlock if all threads //! of this pool is blocked somewhere in user code. - //! default: 0 + //! default: 0 TDriverConfig& SetClientThreadsNum(size_t sz); //! Warning: not recommended to change //! Set max number of queued responses. 0 - no limit @@ -65,10 +65,10 @@ public: //! default: 100 TDriverConfig& SetMaxQueuedRequests(size_t sz); //! Limit using of memory for grpc buffer pool. 0 means disabled. - //! If enabled the size must be greater than size of recieved message. + //! If enabled the size must be greater than size of recieved message. //! default: 0 TDriverConfig& SetGrpcMemoryQuota(ui64 bytes); - //! Specify tcp keep alive settings + //! Specify tcp keep alive settings //! This option allows to adjust tcp keep alive settings, useful to work //! with balancers or to detect unexpected connectivity problem. //! enable - if true enable tcp keep alive and use following settings @@ -104,8 +104,8 @@ public: //! default: 6 minutes TDriverConfig& SetSocketIdleTimeout(TDuration timeout); - //! Log backend. - TDriverConfig& SetLog(THolder<TLogBackend> log); + //! Log backend. + TDriverConfig& SetLog(THolder<TLogBackend> log); private: class TImpl; std::shared_ptr<TImpl> Impl_; diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.cpp index dd42c8c4ed9..cd904e05495 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.cpp @@ -1,46 +1,46 @@ #include "common.h" - -#include <util/charset/unidata.h> - + +#include <util/charset/unidata.h> + namespace NYdb::NPersQueue { -IRetryPolicy::ERetryErrorClass GetRetryErrorClass(EStatus status) { - switch (status) { - case EStatus::SUCCESS: - case EStatus::INTERNAL_ERROR: - case EStatus::ABORTED: - case EStatus::UNAVAILABLE: - case EStatus::GENERIC_ERROR: - case EStatus::BAD_SESSION: - case EStatus::SESSION_EXPIRED: - case EStatus::CANCELLED: - case EStatus::UNDETERMINED: - case EStatus::SESSION_BUSY: - case EStatus::CLIENT_INTERNAL_ERROR: - case EStatus::CLIENT_CANCELLED: - case EStatus::CLIENT_OUT_OF_RANGE: - return IRetryPolicy::ERetryErrorClass::ShortRetry; - - case EStatus::OVERLOADED: - case EStatus::TIMEOUT: - case EStatus::TRANSPORT_UNAVAILABLE: - case EStatus::CLIENT_RESOURCE_EXHAUSTED: - case EStatus::CLIENT_DEADLINE_EXCEEDED: - case EStatus::CLIENT_LIMITS_REACHED: - case EStatus::CLIENT_DISCOVERY_FAILED: - return IRetryPolicy::ERetryErrorClass::LongRetry; - - case EStatus::SCHEME_ERROR: - case EStatus::STATUS_UNDEFINED: - case EStatus::BAD_REQUEST: - case EStatus::UNAUTHORIZED: - case EStatus::PRECONDITION_FAILED: - case EStatus::UNSUPPORTED: - case EStatus::ALREADY_EXISTS: - case EStatus::NOT_FOUND: - case EStatus::CLIENT_UNAUTHENTICATED: - case EStatus::CLIENT_CALL_UNIMPLEMENTED: - return IRetryPolicy::ERetryErrorClass::NoRetry; +IRetryPolicy::ERetryErrorClass GetRetryErrorClass(EStatus status) { + switch (status) { + case EStatus::SUCCESS: + case EStatus::INTERNAL_ERROR: + case EStatus::ABORTED: + case EStatus::UNAVAILABLE: + case EStatus::GENERIC_ERROR: + case EStatus::BAD_SESSION: + case EStatus::SESSION_EXPIRED: + case EStatus::CANCELLED: + case EStatus::UNDETERMINED: + case EStatus::SESSION_BUSY: + case EStatus::CLIENT_INTERNAL_ERROR: + case EStatus::CLIENT_CANCELLED: + case EStatus::CLIENT_OUT_OF_RANGE: + return IRetryPolicy::ERetryErrorClass::ShortRetry; + + case EStatus::OVERLOADED: + case EStatus::TIMEOUT: + case EStatus::TRANSPORT_UNAVAILABLE: + case EStatus::CLIENT_RESOURCE_EXHAUSTED: + case EStatus::CLIENT_DEADLINE_EXCEEDED: + case EStatus::CLIENT_LIMITS_REACHED: + case EStatus::CLIENT_DISCOVERY_FAILED: + return IRetryPolicy::ERetryErrorClass::LongRetry; + + case EStatus::SCHEME_ERROR: + case EStatus::STATUS_UNDEFINED: + case EStatus::BAD_REQUEST: + case EStatus::UNAUTHORIZED: + case EStatus::PRECONDITION_FAILED: + case EStatus::UNSUPPORTED: + case EStatus::ALREADY_EXISTS: + case EStatus::NOT_FOUND: + case EStatus::CLIENT_UNAUTHENTICATED: + case EStatus::CLIENT_CALL_UNIMPLEMENTED: + return IRetryPolicy::ERetryErrorClass::NoRetry; } } @@ -71,7 +71,7 @@ NYql::TIssues MakeIssueWithSubIssues(const TString& description, const NYql::TIs } size_t CalcDataSize(const TReadSessionEvent::TEvent& event) { - if (const TReadSessionEvent::TDataReceivedEvent* dataEvent = std::get_if<TReadSessionEvent::TDataReceivedEvent>(&event)) { + if (const TReadSessionEvent::TDataReceivedEvent* dataEvent = std::get_if<TReadSessionEvent::TDataReceivedEvent>(&event)) { size_t len = 0; if (dataEvent->IsCompressedMessages()) { for (const auto& msg : dataEvent->GetCompressedMessages()) { @@ -90,43 +90,43 @@ size_t CalcDataSize(const TReadSessionEvent::TEvent& event) { } } -static TStringBuf SplitPort(TStringBuf endpoint) { - for (int i = endpoint.Size() - 1; i >= 0; --i) { - if (endpoint[i] == ':') { - return endpoint.SubString(i + 1, TStringBuf::npos); - } - if (!IsDigit(endpoint[i])) { - return TStringBuf(); // empty - } - } - return TStringBuf(); // empty -} - -TString ApplyClusterEndpoint(TStringBuf driverEndpoint, const TString& clusterDiscoveryEndpoint) { - const TStringBuf clusterDiscoveryPort = SplitPort(clusterDiscoveryEndpoint); - if (!clusterDiscoveryPort.Empty()) { - return clusterDiscoveryEndpoint; - } - - const TStringBuf driverPort = SplitPort(driverEndpoint); - if (driverPort.Empty()) { - return clusterDiscoveryEndpoint; - } - - const bool hasColon = clusterDiscoveryEndpoint.find(':') != TString::npos; - if (hasColon) { - return TStringBuilder() << '[' << clusterDiscoveryEndpoint << "]:" << driverPort; - } else { - return TStringBuilder() << clusterDiscoveryEndpoint << ':' << driverPort; - } -} - +static TStringBuf SplitPort(TStringBuf endpoint) { + for (int i = endpoint.Size() - 1; i >= 0; --i) { + if (endpoint[i] == ':') { + return endpoint.SubString(i + 1, TStringBuf::npos); + } + if (!IsDigit(endpoint[i])) { + return TStringBuf(); // empty + } + } + return TStringBuf(); // empty +} + +TString ApplyClusterEndpoint(TStringBuf driverEndpoint, const TString& clusterDiscoveryEndpoint) { + const TStringBuf clusterDiscoveryPort = SplitPort(clusterDiscoveryEndpoint); + if (!clusterDiscoveryPort.Empty()) { + return clusterDiscoveryEndpoint; + } + + const TStringBuf driverPort = SplitPort(driverEndpoint); + if (driverPort.Empty()) { + return clusterDiscoveryEndpoint; + } + + const bool hasColon = clusterDiscoveryEndpoint.find(':') != TString::npos; + if (hasColon) { + return TStringBuilder() << '[' << clusterDiscoveryEndpoint << "]:" << driverPort; + } else { + return TStringBuilder() << clusterDiscoveryEndpoint << ':' << driverPort; + } +} + void IAsyncExecutor::Post(TFunction&& f) { PostImpl(std::move(f)); } IAsyncExecutor::TPtr CreateDefaultExecutor() { - return CreateThreadPoolExecutor(1); + return CreateThreadPoolExecutor(1); } void TThreadPoolExecutor::PostImpl(TVector<TFunction>&& fs) { @@ -140,8 +140,8 @@ void TThreadPoolExecutor::PostImpl(TFunction&& f) { } TSerialExecutor::TSerialExecutor(IAsyncExecutor::TPtr executor) - : Executor(executor) -{ + : Executor(executor) +{ Y_VERIFY(executor); } @@ -181,30 +181,30 @@ void TSerialExecutor::PostNext() { ExecutionQueue.pop(); Busy = true; } - -IExecutor::TPtr CreateThreadPoolExecutor(size_t threads) { - return MakeIntrusive<TThreadPoolExecutor>(threads); + +IExecutor::TPtr CreateThreadPoolExecutor(size_t threads) { + return MakeIntrusive<TThreadPoolExecutor>(threads); } -IExecutor::TPtr CreateGenericExecutor() { - return CreateThreadPoolExecutor(1); +IExecutor::TPtr CreateGenericExecutor() { + return CreateThreadPoolExecutor(1); } - -IExecutor::TPtr CreateThreadPoolExecutorAdapter(std::shared_ptr<IThreadPool> threadPool) { - return MakeIntrusive<TThreadPoolExecutor>(std::move(threadPool)); -} - -TThreadPoolExecutor::TThreadPoolExecutor(std::shared_ptr<IThreadPool> threadPool) - : ThreadPool(std::move(threadPool)) -{ - IsFakeThreadPool = dynamic_cast<TFakeThreadPool*>(ThreadPool.get()) != nullptr; + +IExecutor::TPtr CreateThreadPoolExecutorAdapter(std::shared_ptr<IThreadPool> threadPool) { + return MakeIntrusive<TThreadPoolExecutor>(std::move(threadPool)); } -TThreadPoolExecutor::TThreadPoolExecutor(size_t threadsCount) - : TThreadPoolExecutor(CreateThreadPool(threadsCount)) -{ - Y_VERIFY(threadsCount > 0); - ThreadsCount = threadsCount; +TThreadPoolExecutor::TThreadPoolExecutor(std::shared_ptr<IThreadPool> threadPool) + : ThreadPool(std::move(threadPool)) +{ + IsFakeThreadPool = dynamic_cast<TFakeThreadPool*>(ThreadPool.get()) != nullptr; } +TThreadPoolExecutor::TThreadPoolExecutor(size_t threadsCount) + : TThreadPoolExecutor(CreateThreadPool(threadsCount)) +{ + Y_VERIFY(threadsCount > 0); + ThreadsCount = threadsCount; } + +} diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.h index 7287b698940..d3355f20205 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.h @@ -1,17 +1,17 @@ -#pragma once - +#pragma once + #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> #include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h> - + #include <util/generic/queue.h> -#include <util/system/condvar.h> +#include <util/system/condvar.h> #include <util/thread/pool.h> - -#include <queue> - -namespace NYdb::NPersQueue { - -IRetryPolicy::ERetryErrorClass GetRetryErrorClass(EStatus status); + +#include <queue> + +namespace NYdb::NPersQueue { + +IRetryPolicy::ERetryErrorClass GetRetryErrorClass(EStatus status); IRetryPolicy::ERetryErrorClass GetRetryErrorClassV2(EStatus status); void Cancel(NGrpc::IQueueClientContextPtr& context); @@ -35,175 +35,175 @@ TPlainStatus MakeErrorFromProto(const TMessage& serverMessage) { return TPlainStatus(static_cast<EStatus>(serverMessage.status()), std::move(issues)); } -// Gets source endpoint for the whole driver (or persqueue client) -// and endpoint that was given us by the cluster discovery service -// and gives endpoint for the current LB cluster. -// For examples see tests. -TString ApplyClusterEndpoint(TStringBuf driverEndpoint, const TString& clusterDiscoveryEndpoint); - -// Factory for IStreamRequestReadWriteProcessor -// It is created in order to separate grpc transport logic from -// the logic of session. -// So there is grpc factory implementation to use in SDK -// and another one to use in tests for testing only session logic -// without transport stuff. -template <class TRequest, class TResponse> -struct ISessionConnectionProcessorFactory { - using IProcessor = NGrpc::IStreamRequestReadWriteProcessor<TRequest, TResponse>; - using TConnectedCallback = std::function<void(TPlainStatus&&, typename IProcessor::TPtr&&)>; - using TConnectTimeoutCallback = std::function<void(bool ok)>; - - virtual ~ISessionConnectionProcessorFactory() = default; - - // Creates processor - virtual void CreateProcessor( - // Params for connect. - TConnectedCallback callback, - const TRpcRequestSettings& requestSettings, - NGrpc::IQueueClientContextPtr connectContext, - // Params for timeout and its cancellation. - TDuration connectTimeout, - NGrpc::IQueueClientContextPtr connectTimeoutContext, - TConnectTimeoutCallback connectTimeoutCallback, - // Params for delay before reconnect and its cancellation. - TDuration connectDelay = TDuration::Zero(), - NGrpc::IQueueClientContextPtr connectDelayOperationContext = nullptr) = 0; -}; - -template <class TService, class TRequest, class TResponse> -class TSessionConnectionProcessorFactory : public ISessionConnectionProcessorFactory<TRequest, TResponse>, - public std::enable_shared_from_this<TSessionConnectionProcessorFactory<TService, TRequest, TResponse>> -{ -public: - using TConnectedCallback = typename ISessionConnectionProcessorFactory<TRequest, TResponse>::TConnectedCallback; - using TConnectTimeoutCallback = typename ISessionConnectionProcessorFactory<TRequest, TResponse>::TConnectTimeoutCallback; - TSessionConnectionProcessorFactory( - TGRpcConnectionsImpl::TStreamRpc<TService, TRequest, TResponse, NGrpc::TStreamRequestReadWriteProcessor> rpc, - std::shared_ptr<TGRpcConnectionsImpl> connections, - TDbDriverStatePtr dbState - ) - : Rpc(rpc) - , Connections(std::move(connections)) - , DbDriverState(dbState) - { +// Gets source endpoint for the whole driver (or persqueue client) +// and endpoint that was given us by the cluster discovery service +// and gives endpoint for the current LB cluster. +// For examples see tests. +TString ApplyClusterEndpoint(TStringBuf driverEndpoint, const TString& clusterDiscoveryEndpoint); + +// Factory for IStreamRequestReadWriteProcessor +// It is created in order to separate grpc transport logic from +// the logic of session. +// So there is grpc factory implementation to use in SDK +// and another one to use in tests for testing only session logic +// without transport stuff. +template <class TRequest, class TResponse> +struct ISessionConnectionProcessorFactory { + using IProcessor = NGrpc::IStreamRequestReadWriteProcessor<TRequest, TResponse>; + using TConnectedCallback = std::function<void(TPlainStatus&&, typename IProcessor::TPtr&&)>; + using TConnectTimeoutCallback = std::function<void(bool ok)>; + + virtual ~ISessionConnectionProcessorFactory() = default; + + // Creates processor + virtual void CreateProcessor( + // Params for connect. + TConnectedCallback callback, + const TRpcRequestSettings& requestSettings, + NGrpc::IQueueClientContextPtr connectContext, + // Params for timeout and its cancellation. + TDuration connectTimeout, + NGrpc::IQueueClientContextPtr connectTimeoutContext, + TConnectTimeoutCallback connectTimeoutCallback, + // Params for delay before reconnect and its cancellation. + TDuration connectDelay = TDuration::Zero(), + NGrpc::IQueueClientContextPtr connectDelayOperationContext = nullptr) = 0; +}; + +template <class TService, class TRequest, class TResponse> +class TSessionConnectionProcessorFactory : public ISessionConnectionProcessorFactory<TRequest, TResponse>, + public std::enable_shared_from_this<TSessionConnectionProcessorFactory<TService, TRequest, TResponse>> +{ +public: + using TConnectedCallback = typename ISessionConnectionProcessorFactory<TRequest, TResponse>::TConnectedCallback; + using TConnectTimeoutCallback = typename ISessionConnectionProcessorFactory<TRequest, TResponse>::TConnectTimeoutCallback; + TSessionConnectionProcessorFactory( + TGRpcConnectionsImpl::TStreamRpc<TService, TRequest, TResponse, NGrpc::TStreamRequestReadWriteProcessor> rpc, + std::shared_ptr<TGRpcConnectionsImpl> connections, + TDbDriverStatePtr dbState + ) + : Rpc(rpc) + , Connections(std::move(connections)) + , DbDriverState(dbState) + { + } + + void CreateProcessor( + TConnectedCallback callback, + const TRpcRequestSettings& requestSettings, + NGrpc::IQueueClientContextPtr connectContext, + TDuration connectTimeout, + NGrpc::IQueueClientContextPtr connectTimeoutContext, + TConnectTimeoutCallback connectTimeoutCallback, + TDuration connectDelay, + NGrpc::IQueueClientContextPtr connectDelayOperationContext) override + { + Y_ASSERT(connectContext); + Y_ASSERT(connectTimeoutContext); + Y_ASSERT((connectDelay == TDuration::Zero()) == !connectDelayOperationContext); + if (connectDelay == TDuration::Zero()) { + Connect(std::move(callback), + requestSettings, + std::move(connectContext), + connectTimeout, + std::move(connectTimeoutContext), + std::move(connectTimeoutCallback)); + } else { + auto connect = [ + weakThis = this->weak_from_this(), + callback = std::move(callback), + requestSettings, + connectContext = std::move(connectContext), + connectTimeout, + connectTimeoutContext = std::move(connectTimeoutContext), + connectTimeoutCallback = std::move(connectTimeoutCallback) + ] (bool ok) + { + if (!ok) { + return; + } + + if (auto sharedThis = weakThis.lock()) { + sharedThis->Connect( + std::move(callback), + requestSettings, + std::move(connectContext), + connectTimeout, + std::move(connectTimeoutContext), + std::move(connectTimeoutCallback) + ); + } + }; + + Connections->ScheduleCallback( + connectDelay, + std::move(connect), + std::move(connectDelayOperationContext) + ); + } + } + +private: + void Connect( + TConnectedCallback callback, + const TRpcRequestSettings& requestSettings, + NGrpc::IQueueClientContextPtr connectContext, + TDuration connectTimeout, + NGrpc::IQueueClientContextPtr connectTimeoutContext, + TConnectTimeoutCallback connectTimeoutCallback) + { + Connections->StartBidirectionalStream<TService, TRequest, TResponse>( + std::move(callback), + Rpc, + DbDriverState, + requestSettings, + std::move(connectContext) + ); + + Connections->ScheduleCallback( + connectTimeout, + std::move(connectTimeoutCallback), + std::move(connectTimeoutContext) + ); + } + +private: + TGRpcConnectionsImpl::TStreamRpc<TService, TRequest, TResponse, NGrpc::TStreamRequestReadWriteProcessor> Rpc; + std::shared_ptr<TGRpcConnectionsImpl> Connections; + TDbDriverStatePtr DbDriverState; +}; + +template <class TService, class TRequest, class TResponse> +std::shared_ptr<ISessionConnectionProcessorFactory<TRequest, TResponse>> + CreateConnectionProcessorFactory( + TGRpcConnectionsImpl::TStreamRpc<TService, TRequest, TResponse, NGrpc::TStreamRequestReadWriteProcessor> rpc, + std::shared_ptr<TGRpcConnectionsImpl> connections, + TDbDriverStatePtr dbState + ) +{ + return std::make_shared<TSessionConnectionProcessorFactory<TService, TRequest, TResponse>>(rpc, std::move(connections), std::move(dbState)); +} + + + +template <class TEvent_> +struct TBaseEventInfo { + using TEvent = TEvent_; + + TEvent Event; + + TEvent& GetEvent() { + return Event; } - void CreateProcessor( - TConnectedCallback callback, - const TRpcRequestSettings& requestSettings, - NGrpc::IQueueClientContextPtr connectContext, - TDuration connectTimeout, - NGrpc::IQueueClientContextPtr connectTimeoutContext, - TConnectTimeoutCallback connectTimeoutCallback, - TDuration connectDelay, - NGrpc::IQueueClientContextPtr connectDelayOperationContext) override - { - Y_ASSERT(connectContext); - Y_ASSERT(connectTimeoutContext); - Y_ASSERT((connectDelay == TDuration::Zero()) == !connectDelayOperationContext); - if (connectDelay == TDuration::Zero()) { - Connect(std::move(callback), - requestSettings, - std::move(connectContext), - connectTimeout, - std::move(connectTimeoutContext), - std::move(connectTimeoutCallback)); - } else { - auto connect = [ - weakThis = this->weak_from_this(), - callback = std::move(callback), - requestSettings, - connectContext = std::move(connectContext), - connectTimeout, - connectTimeoutContext = std::move(connectTimeoutContext), - connectTimeoutCallback = std::move(connectTimeoutCallback) - ] (bool ok) - { - if (!ok) { - return; - } - - if (auto sharedThis = weakThis.lock()) { - sharedThis->Connect( - std::move(callback), - requestSettings, - std::move(connectContext), - connectTimeout, - std::move(connectTimeoutContext), - std::move(connectTimeoutCallback) - ); - } - }; - - Connections->ScheduleCallback( - connectDelay, - std::move(connect), - std::move(connectDelayOperationContext) - ); - } - } - -private: - void Connect( - TConnectedCallback callback, - const TRpcRequestSettings& requestSettings, - NGrpc::IQueueClientContextPtr connectContext, - TDuration connectTimeout, - NGrpc::IQueueClientContextPtr connectTimeoutContext, - TConnectTimeoutCallback connectTimeoutCallback) - { - Connections->StartBidirectionalStream<TService, TRequest, TResponse>( - std::move(callback), - Rpc, - DbDriverState, - requestSettings, - std::move(connectContext) - ); - - Connections->ScheduleCallback( - connectTimeout, - std::move(connectTimeoutCallback), - std::move(connectTimeoutContext) - ); + void OnUserRetrievedEvent() { } -private: - TGRpcConnectionsImpl::TStreamRpc<TService, TRequest, TResponse, NGrpc::TStreamRequestReadWriteProcessor> Rpc; - std::shared_ptr<TGRpcConnectionsImpl> Connections; - TDbDriverStatePtr DbDriverState; -}; - -template <class TService, class TRequest, class TResponse> -std::shared_ptr<ISessionConnectionProcessorFactory<TRequest, TResponse>> - CreateConnectionProcessorFactory( - TGRpcConnectionsImpl::TStreamRpc<TService, TRequest, TResponse, NGrpc::TStreamRequestReadWriteProcessor> rpc, - std::shared_ptr<TGRpcConnectionsImpl> connections, - TDbDriverStatePtr dbState - ) -{ - return std::make_shared<TSessionConnectionProcessorFactory<TService, TRequest, TResponse>>(rpc, std::move(connections), std::move(dbState)); -} - - - -template <class TEvent_> -struct TBaseEventInfo { - using TEvent = TEvent_; - - TEvent Event; - - TEvent& GetEvent() { - return Event; - } - - void OnUserRetrievedEvent() { - } - - template <class T> - TBaseEventInfo(T&& event) - : Event(std::forward<T>(event)) - {} -}; + template <class T> + TBaseEventInfo(T&& event) + : Event(std::forward<T>(event)) + {} +}; class ISignalable { @@ -265,94 +265,94 @@ private: -// Class that is responsible for: -// - events queue; -// - signalling futures that wait for events; -// - packing events for waiters; -// - waking up waiters. -// Thread safe. -template <class TSettings_, class TEvent_, class TEventInfo_ = TBaseEventInfo<TEvent_>> +// Class that is responsible for: +// - events queue; +// - signalling futures that wait for events; +// - packing events for waiters; +// - waking up waiters. +// Thread safe. +template <class TSettings_, class TEvent_, class TEventInfo_ = TBaseEventInfo<TEvent_>> class TBaseSessionEventsQueue : public ISignalable { -protected: - using TSelf = TBaseSessionEventsQueue<TSettings_, TEvent_, TEventInfo_>; - using TSettings = TSettings_; - using TEvent = TEvent_; - using TEventInfo = TEventInfo_; +protected: + using TSelf = TBaseSessionEventsQueue<TSettings_, TEvent_, TEventInfo_>; + using TSettings = TSettings_; + using TEvent = TEvent_; + using TEventInfo = TEventInfo_; - // Template for visitor implementation. - struct TBaseHandlersVisitor { + // Template for visitor implementation. + struct TBaseHandlersVisitor { TBaseHandlersVisitor(const TSettings& settings, TEventInfo& eventInfo) - : Settings(settings) - , EventInfo(eventInfo) + : Settings(settings) + , EventInfo(eventInfo) {} - template <class TEventType, class TFunc, class TCommonFunc> - bool PushHandler(TEventInfo&& eventInfo, const TFunc& specific, const TCommonFunc& common) { - if (specific) { - PushSpecificHandler<TEventType>(std::move(eventInfo), specific); - return true; - } - if (common) { - PushCommonHandler(std::move(eventInfo), common); - return true; + template <class TEventType, class TFunc, class TCommonFunc> + bool PushHandler(TEventInfo&& eventInfo, const TFunc& specific, const TCommonFunc& common) { + if (specific) { + PushSpecificHandler<TEventType>(std::move(eventInfo), specific); + return true; } - return false; - } - - template <class TEventType, class TFunc> - void PushSpecificHandler(TEventInfo&& eventInfo, const TFunc& f) { - Post(Settings.EventHandlers_.HandlersExecutor_, [func = f, event = std::move(eventInfo)]() mutable { - event.OnUserRetrievedEvent(); - func(std::get<TEventType>(event.GetEvent())); - }); + if (common) { + PushCommonHandler(std::move(eventInfo), common); + return true; + } + return false; } - - template <class TFunc> - void PushCommonHandler(TEventInfo&& eventInfo, const TFunc& f) { - Post(Settings.EventHandlers_.HandlersExecutor_, [func = f, event = std::move(eventInfo)]() mutable { - event.OnUserRetrievedEvent(); - func(event.GetEvent()); - }); - } - + + template <class TEventType, class TFunc> + void PushSpecificHandler(TEventInfo&& eventInfo, const TFunc& f) { + Post(Settings.EventHandlers_.HandlersExecutor_, [func = f, event = std::move(eventInfo)]() mutable { + event.OnUserRetrievedEvent(); + func(std::get<TEventType>(event.GetEvent())); + }); + } + + template <class TFunc> + void PushCommonHandler(TEventInfo&& eventInfo, const TFunc& f) { + Post(Settings.EventHandlers_.HandlersExecutor_, [func = f, event = std::move(eventInfo)]() mutable { + event.OnUserRetrievedEvent(); + func(event.GetEvent()); + }); + } + virtual void Post(const IExecutor::TPtr& executor, IExecutor::TFunction&& f) { - executor->Post(std::move(f)); - } - + executor->Post(std::move(f)); + } + const TSettings& Settings; TEventInfo& EventInfo; }; -public: - TBaseSessionEventsQueue(const TSettings& settings) - : Settings(settings) +public: + TBaseSessionEventsQueue(const TSettings& settings) + : Settings(settings) , Waiter(NThreading::NewPromise<void>(), this) - {} - - virtual ~TBaseSessionEventsQueue() = default; + {} + virtual ~TBaseSessionEventsQueue() = default; + void Signal() override { CondVar.Signal(); } protected: - virtual bool HasEventsImpl() const { // Assumes that we're under lock. + virtual bool HasEventsImpl() const { // Assumes that we're under lock. return !Events.empty() || CloseEvent; } - TWaiter PopWaiterImpl() { // Assumes that we're under lock. + TWaiter PopWaiterImpl() { // Assumes that we're under lock. TWaiter waiter(Waiter.ExtractPromise(), this); return std::move(waiter); - } - - void WaitEventsImpl() { // Assumes that we're under lock. Posteffect: HasEventsImpl() is true. - while (!HasEventsImpl()) { - CondVar.WaitI(Mutex); - } - } + } + + void WaitEventsImpl() { // Assumes that we're under lock. Posteffect: HasEventsImpl() is true. + while (!HasEventsImpl()) { + CondVar.WaitI(Mutex); + } + } void RenewWaiterImpl() { if (Events.empty() && Waiter.GetFuture().HasValue()) { @@ -360,19 +360,19 @@ protected: } } -public: - NThreading::TFuture<void> WaitEvent() { - with_lock (Mutex) { - if (HasEventsImpl()) { - return NThreading::MakeFuture(); // Signalled - } else { +public: + NThreading::TFuture<void> WaitEvent() { + with_lock (Mutex) { + if (HasEventsImpl()) { + return NThreading::MakeFuture(); // Signalled + } else { Y_VERIFY(Waiter.Valid()); auto res = Waiter.GetFuture(); return res; - } - } - } - + } + } + } + protected: const TSettings& Settings; TWaiter Waiter; @@ -383,48 +383,48 @@ protected: std::atomic<bool> Closed = false; }; -class IAsyncExecutor : public IExecutor { +class IAsyncExecutor : public IExecutor { private: virtual void PostImpl(TVector<std::function<void()>>&&) = 0; virtual void PostImpl(std::function<void()>&&) = 0; public: - bool IsAsync() const override { + bool IsAsync() const override { return true; } // Post Implementation MUST NOT run f before it returns void Post(TFunction&& f) final; }; -IExecutor::TPtr CreateDefaultExecutor(); +IExecutor::TPtr CreateDefaultExecutor(); class TThreadPoolExecutor : public IAsyncExecutor { private: - std::shared_ptr<IThreadPool> ThreadPool; + std::shared_ptr<IThreadPool> ThreadPool; public: - TThreadPoolExecutor(std::shared_ptr<IThreadPool> threadPool); - TThreadPoolExecutor(size_t threadsCount); + TThreadPoolExecutor(std::shared_ptr<IThreadPool> threadPool); + TThreadPoolExecutor(size_t threadsCount); ~TThreadPoolExecutor() = default; - bool IsAsync() const override { - return !IsFakeThreadPool; - } - - void DoStart() override { - if (ThreadsCount) { - ThreadPool->Start(ThreadsCount); - } - } - + bool IsAsync() const override { + return !IsFakeThreadPool; + } + + void DoStart() override { + if (ThreadsCount) { + ThreadPool->Start(ThreadsCount); + } + } + private: void PostImpl(TVector<TFunction>&& fs) override; void PostImpl(TFunction&& f) override; - -private: - bool IsFakeThreadPool = false; - size_t ThreadsCount = 0; + +private: + bool IsFakeThreadPool = false; + size_t ThreadsCount = 0; }; class TSerialExecutor : public IAsyncExecutor, public std::enable_shared_from_this<TSerialExecutor> { @@ -444,18 +444,18 @@ private: void PostNext(); }; -class TSyncExecutor : public IExecutor { +class TSyncExecutor : public IExecutor { public: void Post(TFunction&& f) final { return f(); } - bool IsAsync() const final { + bool IsAsync() const final { return false; } - void DoStart() override { - } + void DoStart() override { + } }; -IExecutor::TPtr CreateGenericExecutor(); +IExecutor::TPtr CreateGenericExecutor(); -} // namespace NYdb::NPersQueue +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue.cpp index a22ed23cead..aeb2be286f4 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue.cpp @@ -1,163 +1,163 @@ - + #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.h> - + #include <ydb/library/persqueue/obfuscate/obfuscate.h> - -#include <util/random/random.h> -#include <util/string/cast.h> -#include <util/string/subst.h> - -namespace NYdb::NPersQueue { - -const TVector<ECodec>& GetDefaultCodecs() { - static const TVector<ECodec> codecs = {ECodec::RAW, ECodec::GZIP, ECodec::LZOP}; - return codecs; -} - -using TTopicSettingsCreate = TTopicSettings<TCreateTopicSettings>; -using TTopicSettingsAlter = TTopicSettings<TAlterTopicSettings>; - -TCredentials::TCredentials(const Ydb::PersQueue::V1::Credentials& settings) - : Credentials_(settings) -{ - switch (Credentials_.credentials_case()) { - case Ydb::PersQueue::V1::Credentials::kOauthToken: { - Mode_ = EMode::OAUTH_TOKEN; - break; - } - case Ydb::PersQueue::V1::Credentials::kJwtParams: { - Mode_ = EMode::JWT_PARAMS; - break; - } - case Ydb::PersQueue::V1::Credentials::kIam: { - Mode_ = EMode::IAM; - break; - } - case Ydb::PersQueue::V1::Credentials::CREDENTIALS_NOT_SET: { - Mode_ = EMode::NOT_SET; - break; - } - default: { - ythrow yexception() << "unsupported credentials type " << ::NPersQueue::ObfuscateString(ToString(Credentials_)); - } - } -} - -TCredentials::EMode TCredentials::GetMode() const { - return Mode_; -} - -TString TCredentials::GetOauthToken() const { - Y_ENSURE(GetMode() == EMode::OAUTH_TOKEN); - return Credentials_.oauth_token(); -} - -TString TCredentials::GetJwtParams() const { - Y_ENSURE(GetMode() == EMode::JWT_PARAMS); - return Credentials_.jwt_params(); -} - -TString TCredentials::GetIamEndpoint() const { - Y_ENSURE(GetMode() == EMode::IAM); - return Credentials_.iam().endpoint(); -} - -TString TCredentials::GetIamServiceAccountKey() const { - Y_ENSURE(GetMode() == EMode::IAM); - return Credentials_.iam().service_account_key(); -} - -TDescribeTopicResult::TDescribeTopicResult(TStatus status, const Ydb::PersQueue::V1::DescribeTopicResult& result) - : TStatus(std::move(status)) - , TopicSettings_(result.settings()) + +#include <util/random/random.h> +#include <util/string/cast.h> +#include <util/string/subst.h> + +namespace NYdb::NPersQueue { + +const TVector<ECodec>& GetDefaultCodecs() { + static const TVector<ECodec> codecs = {ECodec::RAW, ECodec::GZIP, ECodec::LZOP}; + return codecs; +} + +using TTopicSettingsCreate = TTopicSettings<TCreateTopicSettings>; +using TTopicSettingsAlter = TTopicSettings<TAlterTopicSettings>; + +TCredentials::TCredentials(const Ydb::PersQueue::V1::Credentials& settings) + : Credentials_(settings) +{ + switch (Credentials_.credentials_case()) { + case Ydb::PersQueue::V1::Credentials::kOauthToken: { + Mode_ = EMode::OAUTH_TOKEN; + break; + } + case Ydb::PersQueue::V1::Credentials::kJwtParams: { + Mode_ = EMode::JWT_PARAMS; + break; + } + case Ydb::PersQueue::V1::Credentials::kIam: { + Mode_ = EMode::IAM; + break; + } + case Ydb::PersQueue::V1::Credentials::CREDENTIALS_NOT_SET: { + Mode_ = EMode::NOT_SET; + break; + } + default: { + ythrow yexception() << "unsupported credentials type " << ::NPersQueue::ObfuscateString(ToString(Credentials_)); + } + } +} + +TCredentials::EMode TCredentials::GetMode() const { + return Mode_; +} + +TString TCredentials::GetOauthToken() const { + Y_ENSURE(GetMode() == EMode::OAUTH_TOKEN); + return Credentials_.oauth_token(); +} + +TString TCredentials::GetJwtParams() const { + Y_ENSURE(GetMode() == EMode::JWT_PARAMS); + return Credentials_.jwt_params(); +} + +TString TCredentials::GetIamEndpoint() const { + Y_ENSURE(GetMode() == EMode::IAM); + return Credentials_.iam().endpoint(); +} + +TString TCredentials::GetIamServiceAccountKey() const { + Y_ENSURE(GetMode() == EMode::IAM); + return Credentials_.iam().service_account_key(); +} + +TDescribeTopicResult::TDescribeTopicResult(TStatus status, const Ydb::PersQueue::V1::DescribeTopicResult& result) + : TStatus(std::move(status)) + , TopicSettings_(result.settings()) , Proto_(result) -{ -} - -TDescribeTopicResult::TTopicSettings::TTopicSettings(const Ydb::PersQueue::V1::TopicSettings& settings) { - - PartitionsCount_ = settings.partitions_count(); - RetentionPeriod_ = TDuration::MilliSeconds(settings.retention_period_ms()); - SupportedFormat_ = static_cast<EFormat>(settings.supported_format()); - - for (const auto& codec : settings.supported_codecs()) { - SupportedCodecs_.push_back(static_cast<ECodec>(codec)); - } - MaxPartitionStorageSize_ = settings.max_partition_storage_size(); - MaxPartitionWriteSpeed_ = settings.max_partition_write_speed(); - MaxPartitionWriteBurst_ = settings.max_partition_write_burst(); - ClientWriteDisabled_ = settings.client_write_disabled(); +{ +} + +TDescribeTopicResult::TTopicSettings::TTopicSettings(const Ydb::PersQueue::V1::TopicSettings& settings) { + + PartitionsCount_ = settings.partitions_count(); + RetentionPeriod_ = TDuration::MilliSeconds(settings.retention_period_ms()); + SupportedFormat_ = static_cast<EFormat>(settings.supported_format()); + + for (const auto& codec : settings.supported_codecs()) { + SupportedCodecs_.push_back(static_cast<ECodec>(codec)); + } + MaxPartitionStorageSize_ = settings.max_partition_storage_size(); + MaxPartitionWriteSpeed_ = settings.max_partition_write_speed(); + MaxPartitionWriteBurst_ = settings.max_partition_write_burst(); + ClientWriteDisabled_ = settings.client_write_disabled(); AllowUnauthenticatedRead_ = AllowUnauthenticatedWrite_ = false; AbcId_ = 0; AbcSlug_ = ""; - for (auto& pair : settings.attributes()) { - if (pair.first == "_partitions_per_tablet") { - PartitionsPerTablet_ = FromString<ui32>(pair.second); - } else if (pair.first == "_allow_unauthenticated_read") { - AllowUnauthenticatedRead_ = FromString<bool>(pair.second); - } else if (pair.first == "_allow_unauthenticated_write") { - AllowUnauthenticatedWrite_ = FromString<bool>(pair.second); + for (auto& pair : settings.attributes()) { + if (pair.first == "_partitions_per_tablet") { + PartitionsPerTablet_ = FromString<ui32>(pair.second); + } else if (pair.first == "_allow_unauthenticated_read") { + AllowUnauthenticatedRead_ = FromString<bool>(pair.second); + } else if (pair.first == "_allow_unauthenticated_write") { + AllowUnauthenticatedWrite_ = FromString<bool>(pair.second); } else if (pair.first == "_abc_id") { AbcId_ = FromString<ui32>(pair.second); } else if (pair.first == "_abc_slug") { AbcSlug_ = pair.second; - } - } - for (const auto& readRule : settings.read_rules()) { - ReadRules_.emplace_back(readRule); - } - if (settings.has_remote_mirror_rule()) { - RemoteMirrorRule_ = settings.remote_mirror_rule(); - } -} - - -TDescribeTopicResult::TTopicSettings::TReadRule::TReadRule(const Ydb::PersQueue::V1::TopicSettings::ReadRule& settings) { - - ConsumerName_ = settings.consumer_name(); - Important_ = settings.important(); - StartingMessageTimestamp_ = TInstant::MilliSeconds(settings.starting_message_timestamp_ms()); - - SupportedFormat_ = static_cast<EFormat>(settings.supported_format()); - for (const auto& codec : settings.supported_codecs()) { - SupportedCodecs_.push_back(static_cast<ECodec>(codec)); - } - Version_ = settings.version(); + } + } + for (const auto& readRule : settings.read_rules()) { + ReadRules_.emplace_back(readRule); + } + if (settings.has_remote_mirror_rule()) { + RemoteMirrorRule_ = settings.remote_mirror_rule(); + } +} + + +TDescribeTopicResult::TTopicSettings::TReadRule::TReadRule(const Ydb::PersQueue::V1::TopicSettings::ReadRule& settings) { + + ConsumerName_ = settings.consumer_name(); + Important_ = settings.important(); + StartingMessageTimestamp_ = TInstant::MilliSeconds(settings.starting_message_timestamp_ms()); + + SupportedFormat_ = static_cast<EFormat>(settings.supported_format()); + for (const auto& codec : settings.supported_codecs()) { + SupportedCodecs_.push_back(static_cast<ECodec>(codec)); + } + Version_ = settings.version(); ServiceType_ = settings.service_type(); -} - -TDescribeTopicResult::TTopicSettings::TRemoteMirrorRule::TRemoteMirrorRule(const Ydb::PersQueue::V1::TopicSettings::RemoteMirrorRule& settings) - : Credentials_(settings.credentials()) -{ - Endpoint_ = settings.endpoint(); - TopicPath_ = settings.topic_path(); - ConsumerName_ = settings.consumer_name(); - StartingMessageTimestamp_ = TInstant::MilliSeconds(settings.starting_message_timestamp_ms()); - Database_ = settings.database(); -} - -TPersQueueClient::TPersQueueClient(const TDriver& driver, const TPersQueueClientSettings& settings) - : Impl_(std::make_shared<TImpl>(CreateInternalInterface(driver), settings)) -{ -} - - - -TAsyncStatus TPersQueueClient::CreateTopic(const TString& path, const TCreateTopicSettings& settings) { - return Impl_->CreateTopic(path, settings); -} - -TAsyncStatus TPersQueueClient::AlterTopic(const TString& path, const TAlterTopicSettings& settings) { - return Impl_->AlterTopic(path, settings); -} - -TAsyncStatus TPersQueueClient::DropTopic(const TString& path, const TDropTopicSettings& settings) { - return Impl_->DropTopic(path, settings); -} - +} + +TDescribeTopicResult::TTopicSettings::TRemoteMirrorRule::TRemoteMirrorRule(const Ydb::PersQueue::V1::TopicSettings::RemoteMirrorRule& settings) + : Credentials_(settings.credentials()) +{ + Endpoint_ = settings.endpoint(); + TopicPath_ = settings.topic_path(); + ConsumerName_ = settings.consumer_name(); + StartingMessageTimestamp_ = TInstant::MilliSeconds(settings.starting_message_timestamp_ms()); + Database_ = settings.database(); +} + +TPersQueueClient::TPersQueueClient(const TDriver& driver, const TPersQueueClientSettings& settings) + : Impl_(std::make_shared<TImpl>(CreateInternalInterface(driver), settings)) +{ +} + + + +TAsyncStatus TPersQueueClient::CreateTopic(const TString& path, const TCreateTopicSettings& settings) { + return Impl_->CreateTopic(path, settings); +} + +TAsyncStatus TPersQueueClient::AlterTopic(const TString& path, const TAlterTopicSettings& settings) { + return Impl_->AlterTopic(path, settings); +} + +TAsyncStatus TPersQueueClient::DropTopic(const TString& path, const TDropTopicSettings& settings) { + return Impl_->DropTopic(path, settings); +} + TAsyncStatus TPersQueueClient::AddReadRule(const TString& path, const TAddReadRuleSettings& settings) { return Impl_->AddReadRule(path, settings); } @@ -166,227 +166,227 @@ TAsyncStatus TPersQueueClient::RemoveReadRule(const TString& path, const TRemove return Impl_->RemoveReadRule(path, settings); } -TAsyncDescribeTopicResult TPersQueueClient::DescribeTopic(const TString& path, const TDescribeTopicSettings& settings) { - return Impl_->DescribeTopic(path, settings); -} - -namespace { - -struct TNoRetryState : IRetryState { - TMaybe<TDuration> GetNextRetryDelay(const TStatus&) override { - return Nothing(); - } -}; - -struct TNoRetryPolicy : IRetryPolicy { - IRetryState::TPtr CreateRetryState() const override { - return std::make_unique<TNoRetryState>(); - } -}; - -TDuration RandomizeDelay(TDuration baseDelay) { - const TDuration::TValue half = baseDelay.GetValue() / 2; - return TDuration::FromValue(half + RandomNumber<TDuration::TValue>(half)); -} - -struct TExponentialBackoffState : IRetryState { - TExponentialBackoffState(TDuration minDelay, - TDuration minLongRetryDelay, - TDuration maxDelay, - size_t maxRetries, - TDuration maxTime, - double scaleFactor, - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> retryErrorClassFunction) - : MinLongRetryDelay(minLongRetryDelay) - , MaxDelay(maxDelay) - , MaxRetries(maxRetries) - , MaxTime(maxTime) - , ScaleFactor(scaleFactor) - , StartTime(maxTime != TDuration::Max() ? TInstant::Now() : TInstant::Zero()) - , CurrentDelay(minDelay) - , AttemptsDone(0) - , RetryErrorClassFunction(retryErrorClassFunction) - { - } - - TMaybe<TDuration> GetNextRetryDelay(const TStatus& status) override { - const IRetryPolicy::ERetryErrorClass errorClass = RetryErrorClassFunction(status.GetStatus()); - if (AttemptsDone >= MaxRetries || StartTime && TInstant::Now() - StartTime >= MaxTime || errorClass == IRetryPolicy::ERetryErrorClass::NoRetry) { - return Nothing(); - } - - if (errorClass == IRetryPolicy::ERetryErrorClass::LongRetry) { - CurrentDelay = Max(CurrentDelay, MinLongRetryDelay); - } - - const TDuration delay = RandomizeDelay(CurrentDelay); - - if (CurrentDelay < MaxDelay) { - CurrentDelay = Min(CurrentDelay * ScaleFactor, MaxDelay); - } - - ++AttemptsDone; - return delay; - } - - const TDuration MinLongRetryDelay; - const TDuration MaxDelay; - const size_t MaxRetries; - const TDuration MaxTime; - const double ScaleFactor; - const TInstant StartTime; - TDuration CurrentDelay; - size_t AttemptsDone; - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; -}; - -struct TExponentialBackoffPolicy : IRetryPolicy { - TExponentialBackoffPolicy(TDuration minDelay, - TDuration minLongRetryDelay, - TDuration maxDelay, - size_t maxRetries, - TDuration maxTime, - double scaleFactor, - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) - : MinDelay(minDelay) - , MinLongRetryDelay(minLongRetryDelay) - , MaxDelay(maxDelay) - , MaxRetries(maxRetries) - , MaxTime(maxTime) - , ScaleFactor(scaleFactor) - , RetryErrorClassFunction(customRetryClassFunction ? customRetryClassFunction : GetRetryErrorClass) - { - Y_ASSERT(MinDelay < MaxDelay); - Y_ASSERT(MinLongRetryDelay < MaxDelay); - Y_ASSERT(MinLongRetryDelay >= MinDelay); - Y_ASSERT(ScaleFactor > 1.0); - Y_ASSERT(MaxRetries > 0); - Y_ASSERT(MaxTime > MinDelay); - } - - IRetryState::TPtr CreateRetryState() const override { - return std::make_unique<TExponentialBackoffState>(MinDelay, MinLongRetryDelay, MaxDelay, MaxRetries, MaxTime, ScaleFactor, RetryErrorClassFunction); - } - - const TDuration MinDelay; - const TDuration MinLongRetryDelay; - const TDuration MaxDelay; - const size_t MaxRetries; - const TDuration MaxTime; - const double ScaleFactor; - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; -}; - -struct TFixedIntervalState : IRetryState { - TFixedIntervalState(TDuration delay, - TDuration longRetryDelay, - size_t maxRetries, - TDuration maxTime, - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> retryErrorClassFunction) - : Delay(delay) - , LongRetryDelay(longRetryDelay) - , MaxRetries(maxRetries) - , MaxTime(maxTime) - , StartTime(maxTime != TDuration::Max() ? TInstant::Now() : TInstant::Zero()) - , AttemptsDone(0) - , RetryErrorClassFunction(retryErrorClassFunction) - { - } - - TMaybe<TDuration> GetNextRetryDelay(const TStatus& status) override { - const IRetryPolicy::ERetryErrorClass errorClass = RetryErrorClassFunction(status.GetStatus()); - if (AttemptsDone >= MaxRetries || StartTime && TInstant::Now() - StartTime >= MaxTime || errorClass == IRetryPolicy::ERetryErrorClass::NoRetry) { - return Nothing(); - } - - const TDuration delay = RandomizeDelay(errorClass == IRetryPolicy::ERetryErrorClass::LongRetry ? LongRetryDelay : Delay); - - ++AttemptsDone; - return delay; - } - - const TDuration Delay; - const TDuration LongRetryDelay; - const size_t MaxRetries; - const TDuration MaxTime; - const TInstant StartTime; - size_t AttemptsDone; - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; -}; - -struct TFixedIntervalPolicy : IRetryPolicy { - TFixedIntervalPolicy(TDuration delay, - TDuration longRetryDelay, - size_t maxRetries, - TDuration maxTime, - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) - : Delay(delay) - , LongRetryDelay(longRetryDelay) - , MaxRetries(maxRetries) - , MaxTime(maxTime) - , RetryErrorClassFunction(customRetryClassFunction ? customRetryClassFunction : GetRetryErrorClass) - { - Y_ASSERT(MaxRetries > 0); - Y_ASSERT(MaxTime > Delay); - Y_ASSERT(MaxTime > LongRetryDelay); - Y_ASSERT(LongRetryDelay >= Delay); - } - - IRetryState::TPtr CreateRetryState() const override { - return std::make_unique<TFixedIntervalState>(Delay, LongRetryDelay, MaxRetries, MaxTime, RetryErrorClassFunction); - } - - const TDuration Delay; - const TDuration LongRetryDelay; - const size_t MaxRetries; - const TDuration MaxTime; - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; -}; - -} // namespace - -IRetryPolicy::TPtr IRetryPolicy::GetDefaultPolicy() { - static IRetryPolicy::TPtr policy = GetExponentialBackoffPolicy(); - return policy; -} - -IRetryPolicy::TPtr IRetryPolicy::GetNoRetryPolicy() { - static IRetryPolicy::TPtr policy = std::make_shared<TNoRetryPolicy>(); - return policy; -} - -IRetryPolicy::TPtr IRetryPolicy::GetExponentialBackoffPolicy(TDuration minDelay, - TDuration minLongRetryDelay, - TDuration maxDelay, - size_t maxRetries, - TDuration maxTime, - double scaleFactor, - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) -{ - return std::make_shared<TExponentialBackoffPolicy>(minDelay, minLongRetryDelay, maxDelay, maxRetries, maxTime, scaleFactor, customRetryClassFunction); -} - -IRetryPolicy::TPtr IRetryPolicy::GetFixedIntervalPolicy(TDuration delay, - TDuration longRetryDelay, - size_t maxRetries, - TDuration maxTime, - std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) -{ - return std::make_shared<TFixedIntervalPolicy>(delay, longRetryDelay, maxRetries, maxTime, customRetryClassFunction); -} - -std::shared_ptr<IReadSession> TPersQueueClient::CreateReadSession(const TReadSessionSettings& settings) { - return Impl_->CreateReadSession(settings); -} - -std::shared_ptr<IWriteSession> TPersQueueClient::CreateWriteSession(const TWriteSessionSettings& settings) { +TAsyncDescribeTopicResult TPersQueueClient::DescribeTopic(const TString& path, const TDescribeTopicSettings& settings) { + return Impl_->DescribeTopic(path, settings); +} + +namespace { + +struct TNoRetryState : IRetryState { + TMaybe<TDuration> GetNextRetryDelay(const TStatus&) override { + return Nothing(); + } +}; + +struct TNoRetryPolicy : IRetryPolicy { + IRetryState::TPtr CreateRetryState() const override { + return std::make_unique<TNoRetryState>(); + } +}; + +TDuration RandomizeDelay(TDuration baseDelay) { + const TDuration::TValue half = baseDelay.GetValue() / 2; + return TDuration::FromValue(half + RandomNumber<TDuration::TValue>(half)); +} + +struct TExponentialBackoffState : IRetryState { + TExponentialBackoffState(TDuration minDelay, + TDuration minLongRetryDelay, + TDuration maxDelay, + size_t maxRetries, + TDuration maxTime, + double scaleFactor, + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> retryErrorClassFunction) + : MinLongRetryDelay(minLongRetryDelay) + , MaxDelay(maxDelay) + , MaxRetries(maxRetries) + , MaxTime(maxTime) + , ScaleFactor(scaleFactor) + , StartTime(maxTime != TDuration::Max() ? TInstant::Now() : TInstant::Zero()) + , CurrentDelay(minDelay) + , AttemptsDone(0) + , RetryErrorClassFunction(retryErrorClassFunction) + { + } + + TMaybe<TDuration> GetNextRetryDelay(const TStatus& status) override { + const IRetryPolicy::ERetryErrorClass errorClass = RetryErrorClassFunction(status.GetStatus()); + if (AttemptsDone >= MaxRetries || StartTime && TInstant::Now() - StartTime >= MaxTime || errorClass == IRetryPolicy::ERetryErrorClass::NoRetry) { + return Nothing(); + } + + if (errorClass == IRetryPolicy::ERetryErrorClass::LongRetry) { + CurrentDelay = Max(CurrentDelay, MinLongRetryDelay); + } + + const TDuration delay = RandomizeDelay(CurrentDelay); + + if (CurrentDelay < MaxDelay) { + CurrentDelay = Min(CurrentDelay * ScaleFactor, MaxDelay); + } + + ++AttemptsDone; + return delay; + } + + const TDuration MinLongRetryDelay; + const TDuration MaxDelay; + const size_t MaxRetries; + const TDuration MaxTime; + const double ScaleFactor; + const TInstant StartTime; + TDuration CurrentDelay; + size_t AttemptsDone; + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; +}; + +struct TExponentialBackoffPolicy : IRetryPolicy { + TExponentialBackoffPolicy(TDuration minDelay, + TDuration minLongRetryDelay, + TDuration maxDelay, + size_t maxRetries, + TDuration maxTime, + double scaleFactor, + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) + : MinDelay(minDelay) + , MinLongRetryDelay(minLongRetryDelay) + , MaxDelay(maxDelay) + , MaxRetries(maxRetries) + , MaxTime(maxTime) + , ScaleFactor(scaleFactor) + , RetryErrorClassFunction(customRetryClassFunction ? customRetryClassFunction : GetRetryErrorClass) + { + Y_ASSERT(MinDelay < MaxDelay); + Y_ASSERT(MinLongRetryDelay < MaxDelay); + Y_ASSERT(MinLongRetryDelay >= MinDelay); + Y_ASSERT(ScaleFactor > 1.0); + Y_ASSERT(MaxRetries > 0); + Y_ASSERT(MaxTime > MinDelay); + } + + IRetryState::TPtr CreateRetryState() const override { + return std::make_unique<TExponentialBackoffState>(MinDelay, MinLongRetryDelay, MaxDelay, MaxRetries, MaxTime, ScaleFactor, RetryErrorClassFunction); + } + + const TDuration MinDelay; + const TDuration MinLongRetryDelay; + const TDuration MaxDelay; + const size_t MaxRetries; + const TDuration MaxTime; + const double ScaleFactor; + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; +}; + +struct TFixedIntervalState : IRetryState { + TFixedIntervalState(TDuration delay, + TDuration longRetryDelay, + size_t maxRetries, + TDuration maxTime, + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> retryErrorClassFunction) + : Delay(delay) + , LongRetryDelay(longRetryDelay) + , MaxRetries(maxRetries) + , MaxTime(maxTime) + , StartTime(maxTime != TDuration::Max() ? TInstant::Now() : TInstant::Zero()) + , AttemptsDone(0) + , RetryErrorClassFunction(retryErrorClassFunction) + { + } + + TMaybe<TDuration> GetNextRetryDelay(const TStatus& status) override { + const IRetryPolicy::ERetryErrorClass errorClass = RetryErrorClassFunction(status.GetStatus()); + if (AttemptsDone >= MaxRetries || StartTime && TInstant::Now() - StartTime >= MaxTime || errorClass == IRetryPolicy::ERetryErrorClass::NoRetry) { + return Nothing(); + } + + const TDuration delay = RandomizeDelay(errorClass == IRetryPolicy::ERetryErrorClass::LongRetry ? LongRetryDelay : Delay); + + ++AttemptsDone; + return delay; + } + + const TDuration Delay; + const TDuration LongRetryDelay; + const size_t MaxRetries; + const TDuration MaxTime; + const TInstant StartTime; + size_t AttemptsDone; + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; +}; + +struct TFixedIntervalPolicy : IRetryPolicy { + TFixedIntervalPolicy(TDuration delay, + TDuration longRetryDelay, + size_t maxRetries, + TDuration maxTime, + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) + : Delay(delay) + , LongRetryDelay(longRetryDelay) + , MaxRetries(maxRetries) + , MaxTime(maxTime) + , RetryErrorClassFunction(customRetryClassFunction ? customRetryClassFunction : GetRetryErrorClass) + { + Y_ASSERT(MaxRetries > 0); + Y_ASSERT(MaxTime > Delay); + Y_ASSERT(MaxTime > LongRetryDelay); + Y_ASSERT(LongRetryDelay >= Delay); + } + + IRetryState::TPtr CreateRetryState() const override { + return std::make_unique<TFixedIntervalState>(Delay, LongRetryDelay, MaxRetries, MaxTime, RetryErrorClassFunction); + } + + const TDuration Delay; + const TDuration LongRetryDelay; + const size_t MaxRetries; + const TDuration MaxTime; + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> RetryErrorClassFunction; +}; + +} // namespace + +IRetryPolicy::TPtr IRetryPolicy::GetDefaultPolicy() { + static IRetryPolicy::TPtr policy = GetExponentialBackoffPolicy(); + return policy; +} + +IRetryPolicy::TPtr IRetryPolicy::GetNoRetryPolicy() { + static IRetryPolicy::TPtr policy = std::make_shared<TNoRetryPolicy>(); + return policy; +} + +IRetryPolicy::TPtr IRetryPolicy::GetExponentialBackoffPolicy(TDuration minDelay, + TDuration minLongRetryDelay, + TDuration maxDelay, + size_t maxRetries, + TDuration maxTime, + double scaleFactor, + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) +{ + return std::make_shared<TExponentialBackoffPolicy>(minDelay, minLongRetryDelay, maxDelay, maxRetries, maxTime, scaleFactor, customRetryClassFunction); +} + +IRetryPolicy::TPtr IRetryPolicy::GetFixedIntervalPolicy(TDuration delay, + TDuration longRetryDelay, + size_t maxRetries, + TDuration maxTime, + std::function<IRetryPolicy::ERetryErrorClass(EStatus)> customRetryClassFunction) +{ + return std::make_shared<TFixedIntervalPolicy>(delay, longRetryDelay, maxRetries, maxTime, customRetryClassFunction); +} + +std::shared_ptr<IReadSession> TPersQueueClient::CreateReadSession(const TReadSessionSettings& settings) { + return Impl_->CreateReadSession(settings); +} + +std::shared_ptr<IWriteSession> TPersQueueClient::CreateWriteSession(const TWriteSessionSettings& settings) { return Impl_->CreateWriteSession(settings); } - -std::shared_ptr<ISimpleBlockingWriteSession> TPersQueueClient::CreateSimpleBlockingWriteSession( + +std::shared_ptr<ISimpleBlockingWriteSession> TPersQueueClient::CreateSimpleBlockingWriteSession( const TWriteSessionSettings& settings ) { return Impl_->CreateSimpleWriteSession(settings); } - -} // namespace NYdb::NPersQueue + +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.cpp index bd466fd6f3b..ca23acbca2d 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.cpp @@ -1,45 +1,45 @@ #include "persqueue_impl.h" #include "read_session.h" #include "write_session.h" - -namespace NYdb::NPersQueue { - -std::shared_ptr<IReadSession> TPersQueueClient::TImpl::CreateReadSession(const TReadSessionSettings& settings) { - TMaybe<TReadSessionSettings> maybeSettings; - if (!settings.DecompressionExecutor_ || !settings.EventHandlers_.HandlersExecutor_) { - maybeSettings = settings; - with_lock (Lock) { - if (!settings.DecompressionExecutor_) { - maybeSettings->DecompressionExecutor(Settings.DefaultCompressionExecutor_); - } - if (!settings.EventHandlers_.HandlersExecutor_) { - maybeSettings->EventHandlers_.HandlersExecutor(Settings.DefaultHandlersExecutor_); - } - } - } - auto session = std::make_shared<TReadSession>(maybeSettings.GetOrElse(settings), shared_from_this(), Connections_, DbDriverState_); + +namespace NYdb::NPersQueue { + +std::shared_ptr<IReadSession> TPersQueueClient::TImpl::CreateReadSession(const TReadSessionSettings& settings) { + TMaybe<TReadSessionSettings> maybeSettings; + if (!settings.DecompressionExecutor_ || !settings.EventHandlers_.HandlersExecutor_) { + maybeSettings = settings; + with_lock (Lock) { + if (!settings.DecompressionExecutor_) { + maybeSettings->DecompressionExecutor(Settings.DefaultCompressionExecutor_); + } + if (!settings.EventHandlers_.HandlersExecutor_) { + maybeSettings->EventHandlers_.HandlersExecutor(Settings.DefaultHandlersExecutor_); + } + } + } + auto session = std::make_shared<TReadSession>(maybeSettings.GetOrElse(settings), shared_from_this(), Connections_, DbDriverState_); session->Start(); - return std::move(session); -} - -std::shared_ptr<IWriteSession> TPersQueueClient::TImpl::CreateWriteSession( + return std::move(session); +} + +std::shared_ptr<IWriteSession> TPersQueueClient::TImpl::CreateWriteSession( const TWriteSessionSettings& settings ) { - TMaybe<TWriteSessionSettings> maybeSettings; + TMaybe<TWriteSessionSettings> maybeSettings; if (!settings.CompressionExecutor_ || !settings.EventHandlers_.HandlersExecutor_ || !settings.ClusterDiscoveryMode_) { - maybeSettings = settings; - with_lock (Lock) { - if (!settings.CompressionExecutor_) { - maybeSettings->CompressionExecutor(Settings.DefaultCompressionExecutor_); - } - if (!settings.EventHandlers_.HandlersExecutor_) { - maybeSettings->EventHandlers_.HandlersExecutor(Settings.DefaultHandlersExecutor_); - } + maybeSettings = settings; + with_lock (Lock) { + if (!settings.CompressionExecutor_) { + maybeSettings->CompressionExecutor(Settings.DefaultCompressionExecutor_); + } + if (!settings.EventHandlers_.HandlersExecutor_) { + maybeSettings->EventHandlers_.HandlersExecutor(Settings.DefaultHandlersExecutor_); + } if (!settings.ClusterDiscoveryMode_) { maybeSettings->ClusterDiscoveryMode(Settings.ClusterDiscoveryMode_); } - } - } + } + } auto session = std::make_shared<TWriteSession>( maybeSettings.GetOrElse(settings), shared_from_this(), Connections_, DbDriverState_ ); @@ -47,7 +47,7 @@ std::shared_ptr<IWriteSession> TPersQueueClient::TImpl::CreateWriteSession( return std::move(session); } -std::shared_ptr<ISimpleBlockingWriteSession> TPersQueueClient::TImpl::CreateSimpleWriteSession( +std::shared_ptr<ISimpleBlockingWriteSession> TPersQueueClient::TImpl::CreateSimpleWriteSession( const TWriteSessionSettings& settings ) { auto alteredSettings = settings; @@ -64,29 +64,29 @@ std::shared_ptr<ISimpleBlockingWriteSession> TPersQueueClient::TImpl::CreateSimp return std::move(session); } -std::shared_ptr<TPersQueueClient::TImpl> TPersQueueClient::TImpl::GetClientForEndpoint(const TString& clusterEndoint) { - with_lock (Lock) { - Y_VERIFY(!CustomEndpoint); - std::shared_ptr<TImpl>& client = Subclients[clusterEndoint]; - if (!client) { - client = std::make_shared<TImpl>(clusterEndoint, Connections_, Settings); - } - return client; - } -} - -std::shared_ptr<TPersQueueClient::TImpl::IReadSessionConnectionProcessorFactory> TPersQueueClient::TImpl::CreateReadSessionConnectionProcessorFactory() { - using TService = Ydb::PersQueue::V1::PersQueueService; - using TRequest = Ydb::PersQueue::V1::MigrationStreamingReadClientMessage; - using TResponse = Ydb::PersQueue::V1::MigrationStreamingReadServerMessage; - return CreateConnectionProcessorFactory<TService, TRequest, TResponse>(&TService::Stub::AsyncMigrationStreamingRead, Connections_, DbDriverState_); -} - -std::shared_ptr<TPersQueueClient::TImpl::IWriteSessionConnectionProcessorFactory> TPersQueueClient::TImpl::CreateWriteSessionConnectionProcessorFactory() { +std::shared_ptr<TPersQueueClient::TImpl> TPersQueueClient::TImpl::GetClientForEndpoint(const TString& clusterEndoint) { + with_lock (Lock) { + Y_VERIFY(!CustomEndpoint); + std::shared_ptr<TImpl>& client = Subclients[clusterEndoint]; + if (!client) { + client = std::make_shared<TImpl>(clusterEndoint, Connections_, Settings); + } + return client; + } +} + +std::shared_ptr<TPersQueueClient::TImpl::IReadSessionConnectionProcessorFactory> TPersQueueClient::TImpl::CreateReadSessionConnectionProcessorFactory() { + using TService = Ydb::PersQueue::V1::PersQueueService; + using TRequest = Ydb::PersQueue::V1::MigrationStreamingReadClientMessage; + using TResponse = Ydb::PersQueue::V1::MigrationStreamingReadServerMessage; + return CreateConnectionProcessorFactory<TService, TRequest, TResponse>(&TService::Stub::AsyncMigrationStreamingRead, Connections_, DbDriverState_); +} + +std::shared_ptr<TPersQueueClient::TImpl::IWriteSessionConnectionProcessorFactory> TPersQueueClient::TImpl::CreateWriteSessionConnectionProcessorFactory() { using TService = Ydb::PersQueue::V1::PersQueueService; using TRequest = Ydb::PersQueue::V1::StreamingWriteClientMessage; using TResponse = Ydb::PersQueue::V1::StreamingWriteServerMessage; return CreateConnectionProcessorFactory<TService, TRequest, TResponse>(&TService::Stub::AsyncStreamingWrite, Connections_, DbDriverState_); } -} // namespace NYdb::NPersQueue +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.h index 57e9dd43bfa..6d57e796b30 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/persqueue_impl.h @@ -1,5 +1,5 @@ -#pragma once - +#pragma once + #include "common.h" #define INCLUDE_YDB_INTERNAL_H @@ -8,28 +8,28 @@ #include <ydb/public/api/grpc/draft/ydb_persqueue_v1.grpc.pb.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> - -namespace NYdb::NPersQueue { - -class TPersQueueClient::TImpl : public TClientImplCommon<TPersQueueClient::TImpl> { -public: - // Constructor for main client. - TImpl(std::shared_ptr<TGRpcConnectionsImpl> connections, const TPersQueueClientSettings& settings) - : TClientImplCommon(std::move(connections), settings) - , Settings(settings) - { - } - - // Constructor for subclients with endpoints discovered by cluster discovery. - // Async discovery mode is used because this client is created inside SDK threads. - // See YDB-1231 and YDB-1232. - TImpl(const TString& clusterEndpoint, std::shared_ptr<TGRpcConnectionsImpl> connections, const TPersQueueClientSettings& settings) + +namespace NYdb::NPersQueue { + +class TPersQueueClient::TImpl : public TClientImplCommon<TPersQueueClient::TImpl> { +public: + // Constructor for main client. + TImpl(std::shared_ptr<TGRpcConnectionsImpl> connections, const TPersQueueClientSettings& settings) + : TClientImplCommon(std::move(connections), settings) + , Settings(settings) + { + } + + // Constructor for subclients with endpoints discovered by cluster discovery. + // Async discovery mode is used because this client is created inside SDK threads. + // See YDB-1231 and YDB-1232. + TImpl(const TString& clusterEndpoint, std::shared_ptr<TGRpcConnectionsImpl> connections, const TPersQueueClientSettings& settings) : TClientImplCommon(std::move(connections), settings.Database_, clusterEndpoint, EDiscoveryMode::Async, settings.EnableSsl_, settings.CredentialsProviderFactory_) - , Settings(settings) - , CustomEndpoint(clusterEndpoint) - { - } - + , Settings(settings) + , CustomEndpoint(clusterEndpoint) + { + } + template<class TReadRule> static void ConvertToProtoReadRule(const TReadRule& readRule, Ydb::PersQueue::V1::TopicSettings::ReadRule& rrProps) { rrProps.set_consumer_name(readRule.ConsumerName_); @@ -43,103 +43,103 @@ public: rrProps.set_service_type(readRule.ServiceType_); } - template <class TRequest, class TSettings> - static TRequest MakePropsCreateOrAlterRequest(const TString& path, const TSettings& settings) { - TRequest request = MakeOperationRequest<TRequest>(settings); - request.set_path(path); - - Ydb::PersQueue::V1::TopicSettings& props = *request.mutable_settings(); - - props.set_partitions_count(settings.PartitionsCount_); - - props.set_retention_period_ms(settings.RetentionPeriod_.MilliSeconds()); - props.set_supported_format(static_cast<Ydb::PersQueue::V1::TopicSettings::Format>(settings.SupportedFormat_)); - for (const auto& codec : settings.SupportedCodecs_) { - props.add_supported_codecs((static_cast<Ydb::PersQueue::V1::Codec>(codec))); - } - props.set_max_partition_storage_size(settings.MaxPartitionStorageSize_); - props.set_max_partition_write_speed(settings.MaxPartitionWriteSpeed_); - props.set_max_partition_write_burst(settings.MaxPartitionWriteBurst_); - props.set_client_write_disabled(settings.ClientWriteDisabled_); - (*props.mutable_attributes())["_partitions_per_tablet"] = TStringBuilder() << settings.PartitionsPerTablet_; - (*props.mutable_attributes())["_allow_unauthenticated_read"] = settings.AllowUnauthenticatedRead_ ? "true" : "false"; - (*props.mutable_attributes())["_allow_unauthenticated_write"] = settings.AllowUnauthenticatedWrite_ ? "true" : "false"; + template <class TRequest, class TSettings> + static TRequest MakePropsCreateOrAlterRequest(const TString& path, const TSettings& settings) { + TRequest request = MakeOperationRequest<TRequest>(settings); + request.set_path(path); + + Ydb::PersQueue::V1::TopicSettings& props = *request.mutable_settings(); + + props.set_partitions_count(settings.PartitionsCount_); + + props.set_retention_period_ms(settings.RetentionPeriod_.MilliSeconds()); + props.set_supported_format(static_cast<Ydb::PersQueue::V1::TopicSettings::Format>(settings.SupportedFormat_)); + for (const auto& codec : settings.SupportedCodecs_) { + props.add_supported_codecs((static_cast<Ydb::PersQueue::V1::Codec>(codec))); + } + props.set_max_partition_storage_size(settings.MaxPartitionStorageSize_); + props.set_max_partition_write_speed(settings.MaxPartitionWriteSpeed_); + props.set_max_partition_write_burst(settings.MaxPartitionWriteBurst_); + props.set_client_write_disabled(settings.ClientWriteDisabled_); + (*props.mutable_attributes())["_partitions_per_tablet"] = TStringBuilder() << settings.PartitionsPerTablet_; + (*props.mutable_attributes())["_allow_unauthenticated_read"] = settings.AllowUnauthenticatedRead_ ? "true" : "false"; + (*props.mutable_attributes())["_allow_unauthenticated_write"] = settings.AllowUnauthenticatedWrite_ ? "true" : "false"; if (settings.AbcId_) (*props.mutable_attributes())["_abc_id"] = TStringBuilder() << *settings.AbcId_; if (settings.AbcSlug_) (*props.mutable_attributes())["_abc_slug"] = TStringBuilder() << *settings.AbcSlug_; - - for (const auto& readRule : settings.ReadRules_) { - - Ydb::PersQueue::V1::TopicSettings::ReadRule& rrProps = *props.add_read_rules(); - + + for (const auto& readRule : settings.ReadRules_) { + + Ydb::PersQueue::V1::TopicSettings::ReadRule& rrProps = *props.add_read_rules(); + ConvertToProtoReadRule(readRule, rrProps); - } - - if (settings.RemoteMirrorRule_) { - auto rmr = props.mutable_remote_mirror_rule(); - rmr->set_endpoint(settings.RemoteMirrorRule_.GetRef().Endpoint_); - rmr->set_topic_path(settings.RemoteMirrorRule_.GetRef().TopicPath_); - rmr->set_consumer_name(settings.RemoteMirrorRule_.GetRef().ConsumerName_); - rmr->set_starting_message_timestamp_ms(settings.RemoteMirrorRule_.GetRef().StartingMessageTimestamp_.MilliSeconds()); - const auto& credentials = settings.RemoteMirrorRule_.GetRef().Credentials_; - switch (credentials.GetMode()) { - case TCredentials::EMode::OAUTH_TOKEN: { - rmr->mutable_credentials()->set_oauth_token(credentials.GetOauthToken()); - break; - } - case TCredentials::EMode::JWT_PARAMS: { - rmr->mutable_credentials()->set_jwt_params(credentials.GetJwtParams()); - break; - } - case TCredentials::EMode::IAM: { - rmr->mutable_credentials()->mutable_iam()->set_endpoint(credentials.GetIamEndpoint()); - rmr->mutable_credentials()->mutable_iam()->set_service_account_key(credentials.GetIamServiceAccountKey()); - break; - } - case TCredentials::EMode::NOT_SET: { - break; - } - default: { - ythrow yexception() << "unsupported credentials type for remote mirror rule"; - } - } - rmr->set_database(settings.RemoteMirrorRule_.GetRef().Database_); - } - return request; - } - - TAsyncStatus CreateTopic(const TString& path, const TCreateTopicSettings& settings) { - - auto request = MakePropsCreateOrAlterRequest<Ydb::PersQueue::V1::CreateTopicRequest>(path, settings); - - return RunSimple<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::CreateTopicRequest, Ydb::PersQueue::V1::CreateTopicResponse>( - std::move(request), - &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncCreateTopic, - TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - } - - TAsyncStatus AlterTopic(const TString& path, const TAlterTopicSettings& settings) { - auto request = MakePropsCreateOrAlterRequest<Ydb::PersQueue::V1::AlterTopicRequest>(path, settings); - - return RunSimple<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::AlterTopicRequest, Ydb::PersQueue::V1::AlterTopicResponse>( - std::move(request), - &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncAlterTopic, - TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - } - - - TAsyncStatus DropTopic(const TString& path, const TDropTopicSettings& settings) { - auto request = MakeOperationRequest<Ydb::PersQueue::V1::DropTopicRequest>(settings); - request.set_path(path); - - return RunSimple<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::DropTopicRequest, Ydb::PersQueue::V1::DropTopicResponse>( - std::move(request), - &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncDropTopic, - TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - } - + } + + if (settings.RemoteMirrorRule_) { + auto rmr = props.mutable_remote_mirror_rule(); + rmr->set_endpoint(settings.RemoteMirrorRule_.GetRef().Endpoint_); + rmr->set_topic_path(settings.RemoteMirrorRule_.GetRef().TopicPath_); + rmr->set_consumer_name(settings.RemoteMirrorRule_.GetRef().ConsumerName_); + rmr->set_starting_message_timestamp_ms(settings.RemoteMirrorRule_.GetRef().StartingMessageTimestamp_.MilliSeconds()); + const auto& credentials = settings.RemoteMirrorRule_.GetRef().Credentials_; + switch (credentials.GetMode()) { + case TCredentials::EMode::OAUTH_TOKEN: { + rmr->mutable_credentials()->set_oauth_token(credentials.GetOauthToken()); + break; + } + case TCredentials::EMode::JWT_PARAMS: { + rmr->mutable_credentials()->set_jwt_params(credentials.GetJwtParams()); + break; + } + case TCredentials::EMode::IAM: { + rmr->mutable_credentials()->mutable_iam()->set_endpoint(credentials.GetIamEndpoint()); + rmr->mutable_credentials()->mutable_iam()->set_service_account_key(credentials.GetIamServiceAccountKey()); + break; + } + case TCredentials::EMode::NOT_SET: { + break; + } + default: { + ythrow yexception() << "unsupported credentials type for remote mirror rule"; + } + } + rmr->set_database(settings.RemoteMirrorRule_.GetRef().Database_); + } + return request; + } + + TAsyncStatus CreateTopic(const TString& path, const TCreateTopicSettings& settings) { + + auto request = MakePropsCreateOrAlterRequest<Ydb::PersQueue::V1::CreateTopicRequest>(path, settings); + + return RunSimple<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::CreateTopicRequest, Ydb::PersQueue::V1::CreateTopicResponse>( + std::move(request), + &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncCreateTopic, + TRpcRequestSettings::Make(settings), + settings.ClientTimeout_); + } + + TAsyncStatus AlterTopic(const TString& path, const TAlterTopicSettings& settings) { + auto request = MakePropsCreateOrAlterRequest<Ydb::PersQueue::V1::AlterTopicRequest>(path, settings); + + return RunSimple<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::AlterTopicRequest, Ydb::PersQueue::V1::AlterTopicResponse>( + std::move(request), + &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncAlterTopic, + TRpcRequestSettings::Make(settings), + settings.ClientTimeout_); + } + + + TAsyncStatus DropTopic(const TString& path, const TDropTopicSettings& settings) { + auto request = MakeOperationRequest<Ydb::PersQueue::V1::DropTopicRequest>(settings); + request.set_path(path); + + return RunSimple<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::DropTopicRequest, Ydb::PersQueue::V1::DropTopicResponse>( + std::move(request), + &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncDropTopic, + TRpcRequestSettings::Make(settings), + settings.ClientTimeout_); + } + TAsyncStatus AddReadRule(const TString& path, const TAddReadRuleSettings& settings) { auto request = MakeOperationRequest<Ydb::PersQueue::V1::AddReadRuleRequest>(settings); request.set_path(path); @@ -164,61 +164,61 @@ public: } - TAsyncDescribeTopicResult DescribeTopic(const TString& path, const TDescribeTopicSettings& settings) { - auto request = MakeOperationRequest<Ydb::PersQueue::V1::DescribeTopicRequest>(settings); - request.set_path(path); - - auto promise = NThreading::NewPromise<TDescribeTopicResult>(); - + TAsyncDescribeTopicResult DescribeTopic(const TString& path, const TDescribeTopicSettings& settings) { + auto request = MakeOperationRequest<Ydb::PersQueue::V1::DescribeTopicRequest>(settings); + request.set_path(path); + + auto promise = NThreading::NewPromise<TDescribeTopicResult>(); + auto extractor = [promise] - (google::protobuf::Any* any, TPlainStatus status) mutable { - Ydb::PersQueue::V1::DescribeTopicResult result; - if (any) { - any->UnpackTo(&result); - } - + (google::protobuf::Any* any, TPlainStatus status) mutable { + Ydb::PersQueue::V1::DescribeTopicResult result; + if (any) { + any->UnpackTo(&result); + } + TDescribeTopicResult val(TStatus(std::move(status)), result); - promise.SetValue(std::move(val)); - }; - - Connections_->RunDeferred<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::DescribeTopicRequest, Ydb::PersQueue::V1::DescribeTopicResponse>( - std::move(request), - extractor, - &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncDescribeTopic, - DbDriverState_, - INITIAL_DEFERRED_CALL_DELAY, - TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - - return promise.GetFuture(); - } - - // Runtime API. - std::shared_ptr<IReadSession> CreateReadSession(const TReadSessionSettings& settings); + promise.SetValue(std::move(val)); + }; + + Connections_->RunDeferred<Ydb::PersQueue::V1::PersQueueService, Ydb::PersQueue::V1::DescribeTopicRequest, Ydb::PersQueue::V1::DescribeTopicResponse>( + std::move(request), + extractor, + &Ydb::PersQueue::V1::PersQueueService::Stub::AsyncDescribeTopic, + DbDriverState_, + INITIAL_DEFERRED_CALL_DELAY, + TRpcRequestSettings::Make(settings), + settings.ClientTimeout_); + + return promise.GetFuture(); + } + + // Runtime API. + std::shared_ptr<IReadSession> CreateReadSession(const TReadSessionSettings& settings); std::shared_ptr<ISimpleBlockingWriteSession> CreateSimpleWriteSession(const TWriteSessionSettings& settings); std::shared_ptr<IWriteSession> CreateWriteSession(const TWriteSessionSettings& settings); - - std::shared_ptr<TImpl> GetClientForEndpoint(const TString& clusterEndoint); - - using IReadSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; - - std::shared_ptr<IReadSessionConnectionProcessorFactory> CreateReadSessionConnectionProcessorFactory(); - + + std::shared_ptr<TImpl> GetClientForEndpoint(const TString& clusterEndoint); + + using IReadSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; + + std::shared_ptr<IReadSessionConnectionProcessorFactory> CreateReadSessionConnectionProcessorFactory(); + using IWriteSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory< Ydb::PersQueue::V1::StreamingWriteClientMessage, Ydb::PersQueue::V1::StreamingWriteServerMessage>; std::shared_ptr<IWriteSessionConnectionProcessorFactory> CreateWriteSessionConnectionProcessorFactory(); - NGrpc::IQueueClientContextPtr CreateContext() { - return Connections_->CreateContext(); - } - -private: - const TPersQueueClientSettings Settings; - const TString CustomEndpoint; - TAdaptiveLock Lock; - THashMap<TString, std::shared_ptr<TImpl>> Subclients; // Endpoint -> Subclient. -}; - -} // namespace NYdb::NPersQueue + NGrpc::IQueueClientContextPtr CreateContext() { + return Connections_->CreateContext(); + } + +private: + const TPersQueueClientSettings Settings; + const TString CustomEndpoint; + TAdaptiveLock Lock; + THashMap<TString, std::shared_ptr<TImpl>> Subclients; // Endpoint -> Subclient. +}; + +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.cpp index e7dd0a87e53..d39a592d7d3 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.cpp @@ -1,25 +1,25 @@ #include "persqueue_impl.h" #include "read_session.h" #include "common.h" - + #define INCLUDE_YDB_INTERNAL_H #include <ydb/public/sdk/cpp/client/impl/ydb_internal/logger/log.h> #undef INCLUDE_YDB_INTERNAL_H -#include <library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h> -#include <util/generic/guid.h> -#include <util/generic/size_literals.h> -#include <util/generic/utility.h> -#include <util/generic/yexception.h> -#include <util/stream/mem.h> +#include <library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h> +#include <util/generic/guid.h> +#include <util/generic/size_literals.h> +#include <util/generic/utility.h> +#include <util/generic/yexception.h> +#include <util/stream/mem.h> #include <util/system/env.h> - -#include <variant> - -namespace NYdb::NPersQueue { - -static const TString DRIVER_IS_STOPPING_DESCRIPTION = "Driver is stopping"; - + +#include <variant> + +namespace NYdb::NPersQueue { + +static const TString DRIVER_IS_STOPPING_DESCRIPTION = "Driver is stopping"; + static const bool RangesMode = !GetEnv("PQ_OFFSET_RANGES_MODE").empty(); std::pair<ui64, ui64> GetMessageOffsetRange(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent, ui64 index) { @@ -31,141 +31,141 @@ std::pair<ui64, ui64> GetMessageOffsetRange(const TReadSessionEvent::TDataReceiv return {msg.GetOffset(), msg.GetOffset() + 1}; } -TString IssuesSingleLineString(const NYql::TIssues& issues) { - return SubstGlobalCopy(issues.ToString(), '\n', ' '); -} - -void MakeCountersNotNull(TReaderCounters& counters); -bool HasNullCounters(TReaderCounters& counters); - -class TErrorHandler : public IErrorHandler { -public: +TString IssuesSingleLineString(const NYql::TIssues& issues) { + return SubstGlobalCopy(issues.ToString(), '\n', ' '); +} + +void MakeCountersNotNull(TReaderCounters& counters); +bool HasNullCounters(TReaderCounters& counters); + +class TErrorHandler : public IErrorHandler { +public: TErrorHandler(std::weak_ptr<TReadSession> session) : Session(std::move(session)) - { - } - - void AbortSession(TSessionClosedEvent&& closeEvent) override; - -private: - std::weak_ptr<TReadSession> Session; -}; - -TReadSession::TReadSession(const TReadSessionSettings& settings, - std::shared_ptr<TPersQueueClient::TImpl> client, - std::shared_ptr<TGRpcConnectionsImpl> connections, - TDbDriverStatePtr dbDriverState) - : Settings(settings) - , SessionId(CreateGuidAsString()) - , Log(dbDriverState->Log) - , Client(std::move(client)) - , Connections(std::move(connections)) - , DbDriverState(std::move(dbDriverState)) -{ - if (!Settings.RetryPolicy_) { - Settings.RetryPolicy_ = IRetryPolicy::GetDefaultPolicy(); - } - - MakeCountersIfNeeded(); - - { - TStringBuilder logPrefix; - logPrefix << GetDatabaseLogPrefix(DbDriverState->Database) << "[" << SessionId << "] "; - Log.SetFormatter(GetPrefixLogFormatter(logPrefix)); - } -} - -TReadSession::~TReadSession() { - Abort(EStatus::ABORTED, "Aborted"); - WaitAllDecompressionTasks(); - ClearAllEvents(); -} - -Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest TReadSession::MakeClusterDiscoveryRequest() const { - Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest req; - for (const TTopicReadSettings& topic : Settings.Topics_) { - auto* params = req.add_read_sessions(); - params->set_topic(topic.Path_); - params->mutable_all_original(); // set all_original - } - return req; -} - + { + } + + void AbortSession(TSessionClosedEvent&& closeEvent) override; + +private: + std::weak_ptr<TReadSession> Session; +}; + +TReadSession::TReadSession(const TReadSessionSettings& settings, + std::shared_ptr<TPersQueueClient::TImpl> client, + std::shared_ptr<TGRpcConnectionsImpl> connections, + TDbDriverStatePtr dbDriverState) + : Settings(settings) + , SessionId(CreateGuidAsString()) + , Log(dbDriverState->Log) + , Client(std::move(client)) + , Connections(std::move(connections)) + , DbDriverState(std::move(dbDriverState)) +{ + if (!Settings.RetryPolicy_) { + Settings.RetryPolicy_ = IRetryPolicy::GetDefaultPolicy(); + } + + MakeCountersIfNeeded(); + + { + TStringBuilder logPrefix; + logPrefix << GetDatabaseLogPrefix(DbDriverState->Database) << "[" << SessionId << "] "; + Log.SetFormatter(GetPrefixLogFormatter(logPrefix)); + } +} + +TReadSession::~TReadSession() { + Abort(EStatus::ABORTED, "Aborted"); + WaitAllDecompressionTasks(); + ClearAllEvents(); +} + +Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest TReadSession::MakeClusterDiscoveryRequest() const { + Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest req; + for (const TTopicReadSettings& topic : Settings.Topics_) { + auto* params = req.add_read_sessions(); + params->set_topic(topic.Path_); + params->mutable_all_original(); // set all_original + } + return req; +} + void TReadSession::Start() { ErrorHandler = MakeIntrusive<TErrorHandler>(weak_from_this()); - EventsQueue = std::make_shared<TReadSessionEventsQueue>(Settings, weak_from_this()); - - if (!ValidateSettings()) { - return; - } - - Log << TLOG_INFO << "Starting read session"; + EventsQueue = std::make_shared<TReadSessionEventsQueue>(Settings, weak_from_this()); + + if (!ValidateSettings()) { + return; + } + + Log << TLOG_INFO << "Starting read session"; if (Settings.DisableClusterDiscovery_) { ProceedWithoutClusterDiscovery(); } else { StartClusterDiscovery(); } -} - -bool TReadSession::ValidateSettings() { - NYql::TIssues issues; - if (Settings.Topics_.empty()) { - issues.AddIssue("Empty topics list."); - } - - if (Settings.ConsumerName_.empty()) { - issues.AddIssue("No consumer specified."); - } - - if (Settings.MaxMemoryUsageBytes_ < 1024 * 1024) { - issues.AddIssue("Too small max memory usage. Valid values start from 1 megabyte."); - } - - if (issues) { - Abort(EStatus::BAD_REQUEST, MakeIssueWithSubIssues("Invalid read session settings", issues)); - return false; - } else { - return true; - } -} - -void TReadSession::StartClusterDiscovery() { - with_lock (Lock) { - if (Aborting) { - return; - } - - Log << TLOG_DEBUG << "Starting cluster discovery"; - ClusterDiscoveryDelayContext = nullptr; - } - - auto extractor = [errorHandler = ErrorHandler, self = weak_from_this()] - (google::protobuf::Any* any, TPlainStatus status) mutable { - auto selfShared = self.lock(); - if (!selfShared) { - return; - } - - Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResult result; - if (any) { - any->UnpackTo(&result); - } +} + +bool TReadSession::ValidateSettings() { + NYql::TIssues issues; + if (Settings.Topics_.empty()) { + issues.AddIssue("Empty topics list."); + } + + if (Settings.ConsumerName_.empty()) { + issues.AddIssue("No consumer specified."); + } + + if (Settings.MaxMemoryUsageBytes_ < 1024 * 1024) { + issues.AddIssue("Too small max memory usage. Valid values start from 1 megabyte."); + } + + if (issues) { + Abort(EStatus::BAD_REQUEST, MakeIssueWithSubIssues("Invalid read session settings", issues)); + return false; + } else { + return true; + } +} + +void TReadSession::StartClusterDiscovery() { + with_lock (Lock) { + if (Aborting) { + return; + } + + Log << TLOG_DEBUG << "Starting cluster discovery"; + ClusterDiscoveryDelayContext = nullptr; + } + + auto extractor = [errorHandler = ErrorHandler, self = weak_from_this()] + (google::protobuf::Any* any, TPlainStatus status) mutable { + auto selfShared = self.lock(); + if (!selfShared) { + return; + } + + Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResult result; + if (any) { + any->UnpackTo(&result); + } TStatus st(std::move(status)); - selfShared->OnClusterDiscovery(st, result); - }; - - Connections->RunDeferred<Ydb::PersQueue::V1::ClusterDiscoveryService, - Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest, - Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResponse>( - MakeClusterDiscoveryRequest(), - std::move(extractor), - &Ydb::PersQueue::V1::ClusterDiscoveryService::Stub::AsyncDiscoverClusters, - DbDriverState, - INITIAL_DEFERRED_CALL_DELAY, - TRpcRequestSettings::Make(Settings), - /*ClientTimeout_*/TDuration::Seconds(5)); // TODO: make client timeout setting -} - + selfShared->OnClusterDiscovery(st, result); + }; + + Connections->RunDeferred<Ydb::PersQueue::V1::ClusterDiscoveryService, + Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest, + Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResponse>( + MakeClusterDiscoveryRequest(), + std::move(extractor), + &Ydb::PersQueue::V1::ClusterDiscoveryService::Stub::AsyncDiscoverClusters, + DbDriverState, + INITIAL_DEFERRED_CALL_DELAY, + TRpcRequestSettings::Make(Settings), + /*ClientTimeout_*/TDuration::Seconds(5)); // TODO: make client timeout setting +} + void TReadSession::ProceedWithoutClusterDiscovery() { TDeferredActions deferred; @@ -221,417 +221,417 @@ void TReadSession::CreateClusterSessionsImpl() { } } -void TReadSession::OnClusterDiscovery(const TStatus& status, const Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResult& result) { - TDeferredActions deferred; - with_lock (Lock) { - if (Aborting) { - return; - } - - if (!status.IsSuccess()) { - ++*Settings.Counters_->Errors; - if (!ClusterDiscoveryRetryState) { - ClusterDiscoveryRetryState = Settings.RetryPolicy_->CreateRetryState(); - } - TMaybe<TDuration> retryDelay = ClusterDiscoveryRetryState->GetNextRetryDelay(status); - if (retryDelay) { - Log << TLOG_INFO << "Cluster discovery request failed. Status: " << status.GetStatus() - << ". Issues: \"" << IssuesSingleLineString(status.GetIssues()) << "\""; - RestartClusterDiscoveryImpl(*retryDelay, deferred); - } else { - AbortImpl(status.GetStatus(), MakeIssueWithSubIssues("Failed to discover clusters", status.GetIssues()), deferred); - } - return; - } - - Log << TLOG_DEBUG << "Cluster discovery request succeeded"; - ClusterDiscoveryRetryState = nullptr; - - // Init ClusterSessions. - if (static_cast<size_t>(result.read_sessions_clusters_size()) != Settings.Topics_.size()) { - ++*Settings.Counters_->Errors; - AbortImpl(EStatus::INTERNAL_ERROR, TStringBuilder() << "Unexpected reply from cluster discovery. Sizes of topics arrays don't match: " - << result.read_sessions_clusters_size() << " vs " << Settings.Topics_.size(), deferred); - return; - } - - const bool explicitlySpecifiedClusters = !Settings.Clusters_.empty(); - if (explicitlySpecifiedClusters) { - for (const TString& cluster : Settings.Clusters_) { - TString normalizedName = cluster; - normalizedName.to_lower(); - ClusterSessions.emplace(normalizedName, normalizedName); - } - } - - NYql::TIssues issues; - EStatus errorStatus = EStatus::INTERNAL_ERROR; - for (size_t topicIndex = 0; topicIndex < Settings.Topics_.size(); ++topicIndex) { - const TTopicReadSettings& topicSettings = Settings.Topics_[topicIndex]; - const Ydb::PersQueue::ClusterDiscovery::ReadSessionClusters& readSessionClusters = result.read_sessions_clusters(topicIndex); - for (const Ydb::PersQueue::ClusterDiscovery::ClusterInfo& cluster : readSessionClusters.clusters()) { - TString normalizedName = cluster.name(); - normalizedName.to_lower(); - THashMap<TString, TClusterSessionInfo>::iterator clusterSessionInfoIter; - if (explicitlySpecifiedClusters) { - clusterSessionInfoIter = ClusterSessions.find(normalizedName); - if (clusterSessionInfoIter == ClusterSessions.end()) { // User hasn't specified this cluster, so it isn't in our interest. - continue; - } - } else { - clusterSessionInfoIter = ClusterSessions.emplace(normalizedName, normalizedName).first; - } - TClusterSessionInfo& clusterSessionInfo = clusterSessionInfoIter->second; - if (cluster.endpoint().empty()) { - issues.AddIssue(TStringBuilder() << "Unexpected reply from cluster discovery. Empty endpoint for cluster " - << normalizedName); - } - if (clusterSessionInfo.ClusterEndpoint && clusterSessionInfo.ClusterEndpoint != cluster.endpoint()) { - issues.AddIssue(TStringBuilder() << "Unexpected reply from cluster discovery. Different endpoints for one cluster name. Cluster: " - << normalizedName << ". \"" << clusterSessionInfo.ClusterEndpoint << "\" vs \"" - << cluster.endpoint() << "\""); - } - if (!clusterSessionInfo.ClusterEndpoint) { - clusterSessionInfo.ClusterEndpoint = ApplyClusterEndpoint(DbDriverState->DiscoveryEndpoint, cluster.endpoint()); - } - clusterSessionInfo.Topics.reserve(Settings.Topics_.size()); - clusterSessionInfo.Topics.push_back(topicSettings); - } - } - - // Check clusters. - for (const auto& [cluster, clusterInfo] : ClusterSessions) { - if (clusterInfo.Topics.empty()) { // If specified explicitly by user. - errorStatus = EStatus::BAD_REQUEST; - issues.AddIssue(TStringBuilder() << "Unsupported cluster: " << cluster); - } - } - - if (issues) { - ++*Settings.Counters_->Errors; - AbortImpl(errorStatus, std::move(issues), deferred); - return; - } - +void TReadSession::OnClusterDiscovery(const TStatus& status, const Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResult& result) { + TDeferredActions deferred; + with_lock (Lock) { + if (Aborting) { + return; + } + + if (!status.IsSuccess()) { + ++*Settings.Counters_->Errors; + if (!ClusterDiscoveryRetryState) { + ClusterDiscoveryRetryState = Settings.RetryPolicy_->CreateRetryState(); + } + TMaybe<TDuration> retryDelay = ClusterDiscoveryRetryState->GetNextRetryDelay(status); + if (retryDelay) { + Log << TLOG_INFO << "Cluster discovery request failed. Status: " << status.GetStatus() + << ". Issues: \"" << IssuesSingleLineString(status.GetIssues()) << "\""; + RestartClusterDiscoveryImpl(*retryDelay, deferred); + } else { + AbortImpl(status.GetStatus(), MakeIssueWithSubIssues("Failed to discover clusters", status.GetIssues()), deferred); + } + return; + } + + Log << TLOG_DEBUG << "Cluster discovery request succeeded"; + ClusterDiscoveryRetryState = nullptr; + + // Init ClusterSessions. + if (static_cast<size_t>(result.read_sessions_clusters_size()) != Settings.Topics_.size()) { + ++*Settings.Counters_->Errors; + AbortImpl(EStatus::INTERNAL_ERROR, TStringBuilder() << "Unexpected reply from cluster discovery. Sizes of topics arrays don't match: " + << result.read_sessions_clusters_size() << " vs " << Settings.Topics_.size(), deferred); + return; + } + + const bool explicitlySpecifiedClusters = !Settings.Clusters_.empty(); + if (explicitlySpecifiedClusters) { + for (const TString& cluster : Settings.Clusters_) { + TString normalizedName = cluster; + normalizedName.to_lower(); + ClusterSessions.emplace(normalizedName, normalizedName); + } + } + + NYql::TIssues issues; + EStatus errorStatus = EStatus::INTERNAL_ERROR; + for (size_t topicIndex = 0; topicIndex < Settings.Topics_.size(); ++topicIndex) { + const TTopicReadSettings& topicSettings = Settings.Topics_[topicIndex]; + const Ydb::PersQueue::ClusterDiscovery::ReadSessionClusters& readSessionClusters = result.read_sessions_clusters(topicIndex); + for (const Ydb::PersQueue::ClusterDiscovery::ClusterInfo& cluster : readSessionClusters.clusters()) { + TString normalizedName = cluster.name(); + normalizedName.to_lower(); + THashMap<TString, TClusterSessionInfo>::iterator clusterSessionInfoIter; + if (explicitlySpecifiedClusters) { + clusterSessionInfoIter = ClusterSessions.find(normalizedName); + if (clusterSessionInfoIter == ClusterSessions.end()) { // User hasn't specified this cluster, so it isn't in our interest. + continue; + } + } else { + clusterSessionInfoIter = ClusterSessions.emplace(normalizedName, normalizedName).first; + } + TClusterSessionInfo& clusterSessionInfo = clusterSessionInfoIter->second; + if (cluster.endpoint().empty()) { + issues.AddIssue(TStringBuilder() << "Unexpected reply from cluster discovery. Empty endpoint for cluster " + << normalizedName); + } + if (clusterSessionInfo.ClusterEndpoint && clusterSessionInfo.ClusterEndpoint != cluster.endpoint()) { + issues.AddIssue(TStringBuilder() << "Unexpected reply from cluster discovery. Different endpoints for one cluster name. Cluster: " + << normalizedName << ". \"" << clusterSessionInfo.ClusterEndpoint << "\" vs \"" + << cluster.endpoint() << "\""); + } + if (!clusterSessionInfo.ClusterEndpoint) { + clusterSessionInfo.ClusterEndpoint = ApplyClusterEndpoint(DbDriverState->DiscoveryEndpoint, cluster.endpoint()); + } + clusterSessionInfo.Topics.reserve(Settings.Topics_.size()); + clusterSessionInfo.Topics.push_back(topicSettings); + } + } + + // Check clusters. + for (const auto& [cluster, clusterInfo] : ClusterSessions) { + if (clusterInfo.Topics.empty()) { // If specified explicitly by user. + errorStatus = EStatus::BAD_REQUEST; + issues.AddIssue(TStringBuilder() << "Unsupported cluster: " << cluster); + } + } + + if (issues) { + ++*Settings.Counters_->Errors; + AbortImpl(errorStatus, std::move(issues), deferred); + return; + } + CreateClusterSessionsImpl(); - } - ScheduleDumpCountersToLog(); -} - -void TReadSession::RestartClusterDiscoveryImpl(TDuration delay, TDeferredActions& deferred) { - Log << TLOG_DEBUG << "Restart cluster discovery in " << delay; - auto startCallback = [self = weak_from_this()](bool ok) { - if (ok) { - if (auto sharedSelf = self.lock()) { - sharedSelf->StartClusterDiscovery(); - } - } - }; - - ClusterDiscoveryDelayContext = Connections->CreateContext(); - if (!ClusterDiscoveryDelayContext) { - AbortImpl(EStatus::ABORTED, DRIVER_IS_STOPPING_DESCRIPTION, deferred); - return; - } - Connections->ScheduleCallback(delay, - std::move(startCallback), - ClusterDiscoveryDelayContext); -} - -bool TReadSession::Close(TDuration timeout) { - Log << TLOG_INFO << "Closing read session. Close timeout: " << timeout; + } + ScheduleDumpCountersToLog(); +} + +void TReadSession::RestartClusterDiscoveryImpl(TDuration delay, TDeferredActions& deferred) { + Log << TLOG_DEBUG << "Restart cluster discovery in " << delay; + auto startCallback = [self = weak_from_this()](bool ok) { + if (ok) { + if (auto sharedSelf = self.lock()) { + sharedSelf->StartClusterDiscovery(); + } + } + }; + + ClusterDiscoveryDelayContext = Connections->CreateContext(); + if (!ClusterDiscoveryDelayContext) { + AbortImpl(EStatus::ABORTED, DRIVER_IS_STOPPING_DESCRIPTION, deferred); + return; + } + Connections->ScheduleCallback(delay, + std::move(startCallback), + ClusterDiscoveryDelayContext); +} + +bool TReadSession::Close(TDuration timeout) { + Log << TLOG_INFO << "Closing read session. Close timeout: " << timeout; with_lock (Lock) { Cancel(ClusterDiscoveryDelayContext); Cancel(DumpCountersContext); } - // Log final counters. - DumpCountersToLog(); - - std::vector<TSingleClusterReadSessionImpl::TPtr> sessions; - NThreading::TPromise<bool> promise = NThreading::NewPromise<bool>(); - std::shared_ptr<std::atomic<size_t>> count = std::make_shared<std::atomic<size_t>>(0); - auto callback = [=]() mutable { - if (--*count == 0) { - promise.TrySetValue(true); - } - }; - - TDeferredActions deferred; - with_lock (Lock) { - if (Closing || Aborting) { - return false; - } - - if (!timeout) { - AbortImpl(EStatus::ABORTED, "Close with zero timeout", deferred); - return false; - } - - Closing = true; - sessions.reserve(ClusterSessions.size()); - for (auto& [cluster, sessionInfo] : ClusterSessions) { - if (sessionInfo.Session) { - sessions.emplace_back(sessionInfo.Session); - } - } - } - *count = sessions.size() + 1; - for (const auto& session : sessions) { - session->Close(callback); - } - - callback(); // For the case when there are no subsessions yet. - - auto timeoutCallback = [=](bool) mutable { - promise.TrySetValue(false); - }; - - auto timeoutContext = Connections->CreateContext(); - if (!timeoutContext) { - AbortImpl(EStatus::ABORTED, DRIVER_IS_STOPPING_DESCRIPTION, deferred); - return false; - } - Connections->ScheduleCallback(timeout, - std::move(timeoutCallback), - timeoutContext); - - // Wait. - NThreading::TFuture<bool> resultFuture = promise.GetFuture(); - const bool result = resultFuture.GetValueSync(); - if (result) { - Cancel(timeoutContext); - - NYql::TIssues issues; - issues.AddIssue("Session was gracefully closed"); + // Log final counters. + DumpCountersToLog(); + + std::vector<TSingleClusterReadSessionImpl::TPtr> sessions; + NThreading::TPromise<bool> promise = NThreading::NewPromise<bool>(); + std::shared_ptr<std::atomic<size_t>> count = std::make_shared<std::atomic<size_t>>(0); + auto callback = [=]() mutable { + if (--*count == 0) { + promise.TrySetValue(true); + } + }; + + TDeferredActions deferred; + with_lock (Lock) { + if (Closing || Aborting) { + return false; + } + + if (!timeout) { + AbortImpl(EStatus::ABORTED, "Close with zero timeout", deferred); + return false; + } + + Closing = true; + sessions.reserve(ClusterSessions.size()); + for (auto& [cluster, sessionInfo] : ClusterSessions) { + if (sessionInfo.Session) { + sessions.emplace_back(sessionInfo.Session); + } + } + } + *count = sessions.size() + 1; + for (const auto& session : sessions) { + session->Close(callback); + } + + callback(); // For the case when there are no subsessions yet. + + auto timeoutCallback = [=](bool) mutable { + promise.TrySetValue(false); + }; + + auto timeoutContext = Connections->CreateContext(); + if (!timeoutContext) { + AbortImpl(EStatus::ABORTED, DRIVER_IS_STOPPING_DESCRIPTION, deferred); + return false; + } + Connections->ScheduleCallback(timeout, + std::move(timeoutCallback), + timeoutContext); + + // Wait. + NThreading::TFuture<bool> resultFuture = promise.GetFuture(); + const bool result = resultFuture.GetValueSync(); + if (result) { + Cancel(timeoutContext); + + NYql::TIssues issues; + issues.AddIssue("Session was gracefully closed"); EventsQueue->Close(TSessionClosedEvent(EStatus::SUCCESS, std::move(issues)), deferred); - } else { - ++*Settings.Counters_->Errors; - for (const auto& session : sessions) { - session->Abort(); - } - - NYql::TIssues issues; - issues.AddIssue(TStringBuilder() << "Session was closed after waiting " << timeout); + } else { + ++*Settings.Counters_->Errors; + for (const auto& session : sessions) { + session->Abort(); + } + + NYql::TIssues issues; + issues.AddIssue(TStringBuilder() << "Session was closed after waiting " << timeout); EventsQueue->Close(TSessionClosedEvent(EStatus::TIMEOUT, std::move(issues)), deferred); - } - - with_lock (Lock) { - Aborting = true; // Set abort flag for doing nothing on destructor. - } - return result; -} - -void TReadSession::AbortImpl(TSessionClosedEvent&& closeEvent, TDeferredActions& deferred) { - if (!Aborting) { - Aborting = true; + } + + with_lock (Lock) { + Aborting = true; // Set abort flag for doing nothing on destructor. + } + return result; +} + +void TReadSession::AbortImpl(TSessionClosedEvent&& closeEvent, TDeferredActions& deferred) { + if (!Aborting) { + Aborting = true; Log << TLOG_NOTICE << "Aborting read session. Description: " << closeEvent.DebugString(); - Cancel(ClusterDiscoveryDelayContext); - Cancel(DumpCountersContext); - for (auto& [cluster, sessionInfo] : ClusterSessions) { - if (sessionInfo.Session) { - sessionInfo.Session->Abort(); - } - } - EventsQueue->Close(std::move(closeEvent), deferred); - } -} - -void TReadSession::AbortImpl(EStatus statusCode, NYql::TIssues&& issues, TDeferredActions& deferred) { + Cancel(ClusterDiscoveryDelayContext); + Cancel(DumpCountersContext); + for (auto& [cluster, sessionInfo] : ClusterSessions) { + if (sessionInfo.Session) { + sessionInfo.Session->Abort(); + } + } + EventsQueue->Close(std::move(closeEvent), deferred); + } +} + +void TReadSession::AbortImpl(EStatus statusCode, NYql::TIssues&& issues, TDeferredActions& deferred) { AbortImpl(TSessionClosedEvent(statusCode, std::move(issues)), deferred); -} - -void TReadSession::AbortImpl(EStatus statusCode, const TString& message, TDeferredActions& deferred) { - NYql::TIssues issues; - issues.AddIssue(message); - AbortImpl(statusCode, std::move(issues), deferred); -} - -void TReadSession::Abort(EStatus statusCode, NYql::TIssues&& issues) { +} + +void TReadSession::AbortImpl(EStatus statusCode, const TString& message, TDeferredActions& deferred) { + NYql::TIssues issues; + issues.AddIssue(message); + AbortImpl(statusCode, std::move(issues), deferred); +} + +void TReadSession::Abort(EStatus statusCode, NYql::TIssues&& issues) { Abort(TSessionClosedEvent(statusCode, std::move(issues))); -} - -void TReadSession::Abort(EStatus statusCode, const TString& message) { - NYql::TIssues issues; - issues.AddIssue(message); - Abort(statusCode, std::move(issues)); -} - -void TReadSession::Abort(TSessionClosedEvent&& closeEvent) { - TDeferredActions deferred; - with_lock (Lock) { - AbortImpl(std::move(closeEvent), deferred); - } -} - -void TReadSession::WaitAllDecompressionTasks() { - for (auto& [cluster, sessionInfo] : ClusterSessions) { - if (sessionInfo.Session) { - sessionInfo.Session->WaitAllDecompressionTasks(); - } - } -} - -void TReadSession::ClearAllEvents() { - EventsQueue->ClearAllEvents(); -} - -NThreading::TFuture<void> TReadSession::WaitEvent() { - return EventsQueue->WaitEvent(); -} - -TVector<TReadSessionEvent::TEvent> TReadSession::GetEvents(bool block, TMaybe<size_t> maxEventsCount, size_t maxByteSize) { - return EventsQueue->GetEvents(block, maxEventsCount, maxByteSize); -} - -TMaybe<TReadSessionEvent::TEvent> TReadSession::GetEvent(bool block, size_t maxByteSize) { - return EventsQueue->GetEvent(block, maxByteSize); -} - -void TReadSession::StopReadingData() { - Log << TLOG_INFO << "Stop reading data"; - with_lock (Lock) { - if (!DataReadingSuspended) { - DataReadingSuspended = true; - - for (auto& [cluster, sessionInfo] : ClusterSessions) { - if (sessionInfo.Session) { - sessionInfo.Session->StopReadingData(); - } - } - } - } -} - -void TReadSession::ResumeReadingData() { - Log << TLOG_INFO << "Resume reading data"; - with_lock (Lock) { - if (DataReadingSuspended) { - DataReadingSuspended = false; - - for (auto& [cluster, sessionInfo] : ClusterSessions) { - if (sessionInfo.Session) { - sessionInfo.Session->ResumeReadingData(); - } - } - } - } -} - -static ELogPriority GetEventLogPriority(const TReadSessionEvent::TEvent& event) { - if (std::holds_alternative<TReadSessionEvent::TCreatePartitionStreamEvent>(event) - || std::holds_alternative<TReadSessionEvent::TDestroyPartitionStreamEvent>(event) - || std::holds_alternative<TReadSessionEvent::TPartitionStreamClosedEvent>(event) - || std::holds_alternative<TSessionClosedEvent>(event)) - { // Control event. - return TLOG_INFO; - } else { - return TLOG_DEBUG; - } -} - -void TReadSession::OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) { - Log << GetEventLogPriority(event) << "Read session event " << DebugString(event); -} - -void TReadSession::MakeCountersIfNeeded() { - if (!Settings.Counters_ || HasNullCounters(*Settings.Counters_)) { - TReaderCounters::TPtr counters = MakeIntrusive<TReaderCounters>(); - if (Settings.Counters_) { - *counters = *Settings.Counters_; // Copy all counters that have been set by user. - } - MakeCountersNotNull(*counters); - Settings.Counters(counters); - } -} - -void TReadSession::DumpCountersToLog(size_t timeNumber) { - const bool logCounters = timeNumber % 60 == 0; // Every 1 minute. - const bool dumpSessionsStatistics = timeNumber % 600 == 0; // Every 10 minutes. - - *Settings.Counters_->CurrentSessionLifetimeMs = (TInstant::Now() - StartSessionTime).MilliSeconds(); - std::vector<TSingleClusterReadSessionImpl::TPtr> sessions; - with_lock (Lock) { - if (Closing || Aborting) { - return; - } - - sessions.reserve(ClusterSessions.size()); - for (auto& [cluster, sessionInfo] : ClusterSessions) { - if (sessionInfo.Session) { - sessions.emplace_back(sessionInfo.Session); - } - } - } - - { - TMaybe<TLogElement> log; - if (dumpSessionsStatistics) { - log.ConstructInPlace(&Log, TLOG_INFO); - (*log) << "Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset):"; - } - for (const auto& session : sessions) { - session->UpdateMemoryUsageStatistics(); - if (dumpSessionsStatistics) { - session->DumpStatisticsToLog(*log); - } - } - } - -#define C(counter) \ - << " " Y_STRINGIZE(counter) ": " \ - << Settings.Counters_->counter->Val() \ - /**/ - - if (logCounters) { - Log << TLOG_INFO - << "Counters: {" - C(Errors) - C(CurrentSessionLifetimeMs) - C(BytesRead) - C(MessagesRead) - C(BytesReadCompressed) - C(BytesInflightUncompressed) - C(BytesInflightCompressed) - C(BytesInflightTotal) - C(MessagesInflight) - << " }"; - } - -#undef C - - ScheduleDumpCountersToLog(timeNumber + 1); -} - -void TReadSession::ScheduleDumpCountersToLog(size_t timeNumber) { +} + +void TReadSession::Abort(EStatus statusCode, const TString& message) { + NYql::TIssues issues; + issues.AddIssue(message); + Abort(statusCode, std::move(issues)); +} + +void TReadSession::Abort(TSessionClosedEvent&& closeEvent) { + TDeferredActions deferred; + with_lock (Lock) { + AbortImpl(std::move(closeEvent), deferred); + } +} + +void TReadSession::WaitAllDecompressionTasks() { + for (auto& [cluster, sessionInfo] : ClusterSessions) { + if (sessionInfo.Session) { + sessionInfo.Session->WaitAllDecompressionTasks(); + } + } +} + +void TReadSession::ClearAllEvents() { + EventsQueue->ClearAllEvents(); +} + +NThreading::TFuture<void> TReadSession::WaitEvent() { + return EventsQueue->WaitEvent(); +} + +TVector<TReadSessionEvent::TEvent> TReadSession::GetEvents(bool block, TMaybe<size_t> maxEventsCount, size_t maxByteSize) { + return EventsQueue->GetEvents(block, maxEventsCount, maxByteSize); +} + +TMaybe<TReadSessionEvent::TEvent> TReadSession::GetEvent(bool block, size_t maxByteSize) { + return EventsQueue->GetEvent(block, maxByteSize); +} + +void TReadSession::StopReadingData() { + Log << TLOG_INFO << "Stop reading data"; + with_lock (Lock) { + if (!DataReadingSuspended) { + DataReadingSuspended = true; + + for (auto& [cluster, sessionInfo] : ClusterSessions) { + if (sessionInfo.Session) { + sessionInfo.Session->StopReadingData(); + } + } + } + } +} + +void TReadSession::ResumeReadingData() { + Log << TLOG_INFO << "Resume reading data"; + with_lock (Lock) { + if (DataReadingSuspended) { + DataReadingSuspended = false; + + for (auto& [cluster, sessionInfo] : ClusterSessions) { + if (sessionInfo.Session) { + sessionInfo.Session->ResumeReadingData(); + } + } + } + } +} + +static ELogPriority GetEventLogPriority(const TReadSessionEvent::TEvent& event) { + if (std::holds_alternative<TReadSessionEvent::TCreatePartitionStreamEvent>(event) + || std::holds_alternative<TReadSessionEvent::TDestroyPartitionStreamEvent>(event) + || std::holds_alternative<TReadSessionEvent::TPartitionStreamClosedEvent>(event) + || std::holds_alternative<TSessionClosedEvent>(event)) + { // Control event. + return TLOG_INFO; + } else { + return TLOG_DEBUG; + } +} + +void TReadSession::OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) { + Log << GetEventLogPriority(event) << "Read session event " << DebugString(event); +} + +void TReadSession::MakeCountersIfNeeded() { + if (!Settings.Counters_ || HasNullCounters(*Settings.Counters_)) { + TReaderCounters::TPtr counters = MakeIntrusive<TReaderCounters>(); + if (Settings.Counters_) { + *counters = *Settings.Counters_; // Copy all counters that have been set by user. + } + MakeCountersNotNull(*counters); + Settings.Counters(counters); + } +} + +void TReadSession::DumpCountersToLog(size_t timeNumber) { + const bool logCounters = timeNumber % 60 == 0; // Every 1 minute. + const bool dumpSessionsStatistics = timeNumber % 600 == 0; // Every 10 minutes. + + *Settings.Counters_->CurrentSessionLifetimeMs = (TInstant::Now() - StartSessionTime).MilliSeconds(); + std::vector<TSingleClusterReadSessionImpl::TPtr> sessions; + with_lock (Lock) { + if (Closing || Aborting) { + return; + } + + sessions.reserve(ClusterSessions.size()); + for (auto& [cluster, sessionInfo] : ClusterSessions) { + if (sessionInfo.Session) { + sessions.emplace_back(sessionInfo.Session); + } + } + } + + { + TMaybe<TLogElement> log; + if (dumpSessionsStatistics) { + log.ConstructInPlace(&Log, TLOG_INFO); + (*log) << "Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset):"; + } + for (const auto& session : sessions) { + session->UpdateMemoryUsageStatistics(); + if (dumpSessionsStatistics) { + session->DumpStatisticsToLog(*log); + } + } + } + +#define C(counter) \ + << " " Y_STRINGIZE(counter) ": " \ + << Settings.Counters_->counter->Val() \ + /**/ + + if (logCounters) { + Log << TLOG_INFO + << "Counters: {" + C(Errors) + C(CurrentSessionLifetimeMs) + C(BytesRead) + C(MessagesRead) + C(BytesReadCompressed) + C(BytesInflightUncompressed) + C(BytesInflightCompressed) + C(BytesInflightTotal) + C(MessagesInflight) + << " }"; + } + +#undef C + + ScheduleDumpCountersToLog(timeNumber + 1); +} + +void TReadSession::ScheduleDumpCountersToLog(size_t timeNumber) { with_lock(Lock) { DumpCountersContext = Connections->CreateContext(); } - if (DumpCountersContext) { - auto callback = [self = weak_from_this(), timeNumber](bool ok) { - if (ok) { - if (auto sharedSelf = self.lock()) { - sharedSelf->DumpCountersToLog(timeNumber); - } - } - }; - Connections->ScheduleCallback(TDuration::Seconds(1), - std::move(callback), - DumpCountersContext); - } -} - -TPartitionStreamImpl::~TPartitionStreamImpl() = default; - -TLog TPartitionStreamImpl::GetLog() const { - if (auto session = Session.lock()) { - return session->GetLog(); - } - return {}; -} - -void TPartitionStreamImpl::Commit(ui64 startOffset, ui64 endOffset) { + if (DumpCountersContext) { + auto callback = [self = weak_from_this(), timeNumber](bool ok) { + if (ok) { + if (auto sharedSelf = self.lock()) { + sharedSelf->DumpCountersToLog(timeNumber); + } + } + }; + Connections->ScheduleCallback(TDuration::Seconds(1), + std::move(callback), + DumpCountersContext); + } +} + +TPartitionStreamImpl::~TPartitionStreamImpl() = default; + +TLog TPartitionStreamImpl::GetLog() const { + if (auto session = Session.lock()) { + return session->GetLog(); + } + return {}; +} + +void TPartitionStreamImpl::Commit(ui64 startOffset, ui64 endOffset) { std::vector<std::pair<ui64, ui64>> toCommit; - if (auto sessionShared = Session.lock()) { + if (auto sessionShared = Session.lock()) { Y_VERIFY(endOffset > startOffset); with_lock(sessionShared->Lock) { if (!AddToCommitRanges(startOffset, endOffset, true)) // Add range for real commit always. @@ -647,339 +647,339 @@ void TPartitionStreamImpl::Commit(ui64 startOffset, ui64 endOffset) { for (auto range: toCommit) { sessionShared->Commit(this, range.first, range.second); } - } -} - -void TPartitionStreamImpl::RequestStatus() { - if (auto sessionShared = Session.lock()) { - sessionShared->RequestPartitionStreamStatus(this); - } -} - -void TPartitionStreamImpl::ConfirmCreate(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) { - if (auto sessionShared = Session.lock()) { - sessionShared->ConfirmPartitionStreamCreate(this, readOffset, commitOffset); - } -} - -void TPartitionStreamImpl::ConfirmDestroy() { - if (auto sessionShared = Session.lock()) { - sessionShared->ConfirmPartitionStreamDestroy(this); - } -} - -void TPartitionStreamImpl::StopReading() { - Y_FAIL("Not implemented"); // TODO -} - -void TPartitionStreamImpl::ResumeReading() { - Y_FAIL("Not implemented"); // TODO -} - -void TPartitionStreamImpl::SignalReadyEvents(TReadSessionEventsQueue* queue, TDeferredActions& deferred) { - for (auto& event : EventsQueue) { + } +} + +void TPartitionStreamImpl::RequestStatus() { + if (auto sessionShared = Session.lock()) { + sessionShared->RequestPartitionStreamStatus(this); + } +} + +void TPartitionStreamImpl::ConfirmCreate(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) { + if (auto sessionShared = Session.lock()) { + sessionShared->ConfirmPartitionStreamCreate(this, readOffset, commitOffset); + } +} + +void TPartitionStreamImpl::ConfirmDestroy() { + if (auto sessionShared = Session.lock()) { + sessionShared->ConfirmPartitionStreamDestroy(this); + } +} + +void TPartitionStreamImpl::StopReading() { + Y_FAIL("Not implemented"); // TODO +} + +void TPartitionStreamImpl::ResumeReading() { + Y_FAIL("Not implemented"); // TODO +} + +void TPartitionStreamImpl::SignalReadyEvents(TReadSessionEventsQueue* queue, TDeferredActions& deferred) { + for (auto& event : EventsQueue) { event.Signal(this, queue, deferred); - if (!event.IsReady()) { - break; - } - } -} - -void TSingleClusterReadSessionImpl::Start() { - Settings.DecompressionExecutor_->Start(); - Settings.EventHandlers_.HandlersExecutor_->Start(); - if (!Reconnect(TPlainStatus())) { - ErrorHandler->AbortSession(EStatus::ABORTED, DRIVER_IS_STOPPING_DESCRIPTION); - } -} - -bool TSingleClusterReadSessionImpl::Reconnect(const TPlainStatus& status) { - TDuration delay = TDuration::Zero(); - NGrpc::IQueueClientContextPtr delayContext = nullptr; - NGrpc::IQueueClientContextPtr connectContext = ClientContext->CreateContext(); - NGrpc::IQueueClientContextPtr connectTimeoutContext = ClientContext->CreateContext(); - if (!connectContext || !connectTimeoutContext) { - return false; - } - - // Previous operations contexts. - NGrpc::IQueueClientContextPtr prevConnectContext; - NGrpc::IQueueClientContextPtr prevConnectTimeoutContext; - NGrpc::IQueueClientContextPtr prevConnectDelayContext; - - if (!status.Ok()) { - Log << TLOG_INFO << "Got error. Status: " << status.Status << ". Description: " << IssuesSingleLineString(status.Issues); - } - - TDeferredActions deferred; - with_lock (Lock) { - if (Aborting) { - Cancel(connectContext); - Cancel(connectTimeoutContext); - return false; - } - Processor = nullptr; - WaitingReadResponse = false; - ServerMessage = std::make_shared<Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>(); - ++ConnectionGeneration; - if (RetryState) { - TMaybe<TDuration> nextDelay = RetryState->GetNextRetryDelay(TPlainStatus(status)); - if (nextDelay) { - delay = *nextDelay; - delayContext = ClientContext->CreateContext(); - if (!delayContext) { - return false; - } - Log << TLOG_DEBUG << "Reconnecting session to cluster " << ClusterName << " in "<< delay; - } else { - return false; - } - } else { - RetryState = Settings.RetryPolicy_->CreateRetryState(); - } - ++ConnectionAttemptsDone; - - // Set new context - prevConnectContext = std::exchange(ConnectContext, connectContext); - prevConnectTimeoutContext = std::exchange(ConnectTimeoutContext, connectTimeoutContext); - prevConnectDelayContext = std::exchange(ConnectDelayContext, delayContext); - - Y_ASSERT(ConnectContext); - Y_ASSERT(ConnectTimeoutContext); - Y_ASSERT((delay == TDuration::Zero()) == !ConnectDelayContext); - - // Destroy all partition streams before connecting. - DestroyAllPartitionStreamsImpl(deferred); - } - - // Cancel previous operations. - Cancel(prevConnectContext); - Cancel(prevConnectTimeoutContext); - Cancel(prevConnectDelayContext); - - auto connectCallback = [weakThis = weak_from_this(), connectContext = connectContext](TPlainStatus&& st, typename IProcessor::TPtr&& processor) { - if (auto sharedThis = weakThis.lock()) { - sharedThis->OnConnect(std::move(st), std::move(processor), connectContext); - } - }; - - auto connectTimeoutCallback = [weakThis = weak_from_this(), connectTimeoutContext = connectTimeoutContext](bool ok) { - if (ok) { - if (auto sharedThis = weakThis.lock()) { - sharedThis->OnConnectTimeout(connectTimeoutContext); - } - } - }; - - Y_ASSERT(connectContext); - Y_ASSERT(connectTimeoutContext); - Y_ASSERT((delay == TDuration::Zero()) == !delayContext); - ConnectionFactory->CreateProcessor( - std::move(connectCallback), - TRpcRequestSettings::Make(Settings), - std::move(connectContext), - TDuration::Seconds(30) /* connect timeout */, // TODO: make connect timeout setting. - std::move(connectTimeoutContext), - std::move(connectTimeoutCallback), - delay, - std::move(delayContext)); - return true; -} - -void TSingleClusterReadSessionImpl::BreakConnectionAndReconnectImpl(TPlainStatus&& status, TDeferredActions& deferred) { - Log << TLOG_INFO << "Break connection due to unexpected message from server. Status: " << status.Status << ", Issues: \"" << IssuesSingleLineString(status.Issues) << "\""; - - Processor->Cancel(); - Processor = nullptr; - RetryState = Settings.RetryPolicy_->CreateRetryState(); // Explicitly create retry state to determine whether we should connect to server again. - - deferred.DeferReconnection(shared_from_this(), ErrorHandler, std::move(status)); -} - -void TSingleClusterReadSessionImpl::OnConnectTimeout(const NGrpc::IQueueClientContextPtr& connectTimeoutContext) { - with_lock (Lock) { - if (ConnectTimeoutContext == connectTimeoutContext) { - Cancel(ConnectContext); - ConnectContext = nullptr; - ConnectTimeoutContext = nullptr; - ConnectDelayContext = nullptr; - - if (Closing || Aborting) { - CallCloseCallbackImpl(); - return; - } - } else { - return; - } - } - - ++*Settings.Counters_->Errors; - TStringBuilder description; - description << "Failed to establish connection to server. Attempts done: " << ConnectionAttemptsDone; - if (!Reconnect(TPlainStatus(EStatus::TIMEOUT, description))) { - ErrorHandler->AbortSession(EStatus::TIMEOUT, description); - } -} - -void TSingleClusterReadSessionImpl::OnConnect(TPlainStatus&& st, typename IProcessor::TPtr&& processor, const NGrpc::IQueueClientContextPtr& connectContext) { - TDeferredActions deferred; - with_lock (Lock) { - if (ConnectContext == connectContext) { - Cancel(ConnectTimeoutContext); - ConnectContext = nullptr; - ConnectTimeoutContext = nullptr; - ConnectDelayContext = nullptr; - - if (Closing || Aborting) { - CallCloseCallbackImpl(); - return; - } - - if (st.Ok()) { - Processor = std::move(processor); - RetryState = nullptr; - ConnectionAttemptsDone = 0; - InitImpl(deferred); - return; - } - } else { - return; - } - } - - if (!st.Ok()) { - ++*Settings.Counters_->Errors; - if (!Reconnect(st)) { - ErrorHandler->AbortSession(st.Status, - MakeIssueWithSubIssues( - TStringBuilder() << "Failed to establish connection to server \"" << st.Endpoint << "\" ( cluster " << ClusterName << "). Attempts done: " - << ConnectionAttemptsDone, - st.Issues)); - } - } -} - -void TSingleClusterReadSessionImpl::InitImpl(TDeferredActions& deferred) { // Assumes that we're under lock. - Log << TLOG_DEBUG << "Successfully connected. Initializing session"; - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - auto& init = *req.mutable_init_request(); + if (!event.IsReady()) { + break; + } + } +} + +void TSingleClusterReadSessionImpl::Start() { + Settings.DecompressionExecutor_->Start(); + Settings.EventHandlers_.HandlersExecutor_->Start(); + if (!Reconnect(TPlainStatus())) { + ErrorHandler->AbortSession(EStatus::ABORTED, DRIVER_IS_STOPPING_DESCRIPTION); + } +} + +bool TSingleClusterReadSessionImpl::Reconnect(const TPlainStatus& status) { + TDuration delay = TDuration::Zero(); + NGrpc::IQueueClientContextPtr delayContext = nullptr; + NGrpc::IQueueClientContextPtr connectContext = ClientContext->CreateContext(); + NGrpc::IQueueClientContextPtr connectTimeoutContext = ClientContext->CreateContext(); + if (!connectContext || !connectTimeoutContext) { + return false; + } + + // Previous operations contexts. + NGrpc::IQueueClientContextPtr prevConnectContext; + NGrpc::IQueueClientContextPtr prevConnectTimeoutContext; + NGrpc::IQueueClientContextPtr prevConnectDelayContext; + + if (!status.Ok()) { + Log << TLOG_INFO << "Got error. Status: " << status.Status << ". Description: " << IssuesSingleLineString(status.Issues); + } + + TDeferredActions deferred; + with_lock (Lock) { + if (Aborting) { + Cancel(connectContext); + Cancel(connectTimeoutContext); + return false; + } + Processor = nullptr; + WaitingReadResponse = false; + ServerMessage = std::make_shared<Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>(); + ++ConnectionGeneration; + if (RetryState) { + TMaybe<TDuration> nextDelay = RetryState->GetNextRetryDelay(TPlainStatus(status)); + if (nextDelay) { + delay = *nextDelay; + delayContext = ClientContext->CreateContext(); + if (!delayContext) { + return false; + } + Log << TLOG_DEBUG << "Reconnecting session to cluster " << ClusterName << " in "<< delay; + } else { + return false; + } + } else { + RetryState = Settings.RetryPolicy_->CreateRetryState(); + } + ++ConnectionAttemptsDone; + + // Set new context + prevConnectContext = std::exchange(ConnectContext, connectContext); + prevConnectTimeoutContext = std::exchange(ConnectTimeoutContext, connectTimeoutContext); + prevConnectDelayContext = std::exchange(ConnectDelayContext, delayContext); + + Y_ASSERT(ConnectContext); + Y_ASSERT(ConnectTimeoutContext); + Y_ASSERT((delay == TDuration::Zero()) == !ConnectDelayContext); + + // Destroy all partition streams before connecting. + DestroyAllPartitionStreamsImpl(deferred); + } + + // Cancel previous operations. + Cancel(prevConnectContext); + Cancel(prevConnectTimeoutContext); + Cancel(prevConnectDelayContext); + + auto connectCallback = [weakThis = weak_from_this(), connectContext = connectContext](TPlainStatus&& st, typename IProcessor::TPtr&& processor) { + if (auto sharedThis = weakThis.lock()) { + sharedThis->OnConnect(std::move(st), std::move(processor), connectContext); + } + }; + + auto connectTimeoutCallback = [weakThis = weak_from_this(), connectTimeoutContext = connectTimeoutContext](bool ok) { + if (ok) { + if (auto sharedThis = weakThis.lock()) { + sharedThis->OnConnectTimeout(connectTimeoutContext); + } + } + }; + + Y_ASSERT(connectContext); + Y_ASSERT(connectTimeoutContext); + Y_ASSERT((delay == TDuration::Zero()) == !delayContext); + ConnectionFactory->CreateProcessor( + std::move(connectCallback), + TRpcRequestSettings::Make(Settings), + std::move(connectContext), + TDuration::Seconds(30) /* connect timeout */, // TODO: make connect timeout setting. + std::move(connectTimeoutContext), + std::move(connectTimeoutCallback), + delay, + std::move(delayContext)); + return true; +} + +void TSingleClusterReadSessionImpl::BreakConnectionAndReconnectImpl(TPlainStatus&& status, TDeferredActions& deferred) { + Log << TLOG_INFO << "Break connection due to unexpected message from server. Status: " << status.Status << ", Issues: \"" << IssuesSingleLineString(status.Issues) << "\""; + + Processor->Cancel(); + Processor = nullptr; + RetryState = Settings.RetryPolicy_->CreateRetryState(); // Explicitly create retry state to determine whether we should connect to server again. + + deferred.DeferReconnection(shared_from_this(), ErrorHandler, std::move(status)); +} + +void TSingleClusterReadSessionImpl::OnConnectTimeout(const NGrpc::IQueueClientContextPtr& connectTimeoutContext) { + with_lock (Lock) { + if (ConnectTimeoutContext == connectTimeoutContext) { + Cancel(ConnectContext); + ConnectContext = nullptr; + ConnectTimeoutContext = nullptr; + ConnectDelayContext = nullptr; + + if (Closing || Aborting) { + CallCloseCallbackImpl(); + return; + } + } else { + return; + } + } + + ++*Settings.Counters_->Errors; + TStringBuilder description; + description << "Failed to establish connection to server. Attempts done: " << ConnectionAttemptsDone; + if (!Reconnect(TPlainStatus(EStatus::TIMEOUT, description))) { + ErrorHandler->AbortSession(EStatus::TIMEOUT, description); + } +} + +void TSingleClusterReadSessionImpl::OnConnect(TPlainStatus&& st, typename IProcessor::TPtr&& processor, const NGrpc::IQueueClientContextPtr& connectContext) { + TDeferredActions deferred; + with_lock (Lock) { + if (ConnectContext == connectContext) { + Cancel(ConnectTimeoutContext); + ConnectContext = nullptr; + ConnectTimeoutContext = nullptr; + ConnectDelayContext = nullptr; + + if (Closing || Aborting) { + CallCloseCallbackImpl(); + return; + } + + if (st.Ok()) { + Processor = std::move(processor); + RetryState = nullptr; + ConnectionAttemptsDone = 0; + InitImpl(deferred); + return; + } + } else { + return; + } + } + + if (!st.Ok()) { + ++*Settings.Counters_->Errors; + if (!Reconnect(st)) { + ErrorHandler->AbortSession(st.Status, + MakeIssueWithSubIssues( + TStringBuilder() << "Failed to establish connection to server \"" << st.Endpoint << "\" ( cluster " << ClusterName << "). Attempts done: " + << ConnectionAttemptsDone, + st.Issues)); + } + } +} + +void TSingleClusterReadSessionImpl::InitImpl(TDeferredActions& deferred) { // Assumes that we're under lock. + Log << TLOG_DEBUG << "Successfully connected. Initializing session"; + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + auto& init = *req.mutable_init_request(); init.set_ranges_mode(RangesMode); - for (const TTopicReadSettings& topic : Settings.Topics_) { - auto* topicSettings = init.add_topics_read_settings(); - topicSettings->set_topic(topic.Path_); - if (topic.StartingMessageTimestamp_) { - topicSettings->set_start_from_written_at_ms(topic.StartingMessageTimestamp_->MilliSeconds()); - } - for (ui64 groupId : topic.PartitionGroupIds_) { - topicSettings->add_partition_group_ids(groupId); - } - } - init.set_consumer(Settings.ConsumerName_); - init.set_read_only_original(Settings.ReadOnlyOriginal_); - init.mutable_read_params()->set_max_read_size(Settings.MaxMemoryUsageBytes_); - if (Settings.MaxTimeLag_) { - init.set_max_lag_duration_ms(Settings.MaxTimeLag_->MilliSeconds()); - } - if (Settings.StartingMessageTimestamp_) { - init.set_start_from_written_at_ms(Settings.StartingMessageTimestamp_->MilliSeconds()); - } - - WriteToProcessorImpl(std::move(req)); - ReadFromProcessorImpl(deferred); -} - -void TSingleClusterReadSessionImpl::ContinueReadingDataImpl() { // Assumes that we're under lock. - if (!Closing - && !Aborting - && !WaitingReadResponse - && !DataReadingSuspended - && Processor - && CompressedDataSize < GetCompressedDataSizeLimit() - && static_cast<size_t>(CompressedDataSize + DecompressedDataSize) < Settings.MaxMemoryUsageBytes_) - { - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - req.mutable_read(); - - WriteToProcessorImpl(std::move(req)); - WaitingReadResponse = true; - } -} - -bool TSingleClusterReadSessionImpl::IsActualPartitionStreamImpl(const TPartitionStreamImpl* partitionStream) { // Assumes that we're under lock. + for (const TTopicReadSettings& topic : Settings.Topics_) { + auto* topicSettings = init.add_topics_read_settings(); + topicSettings->set_topic(topic.Path_); + if (topic.StartingMessageTimestamp_) { + topicSettings->set_start_from_written_at_ms(topic.StartingMessageTimestamp_->MilliSeconds()); + } + for (ui64 groupId : topic.PartitionGroupIds_) { + topicSettings->add_partition_group_ids(groupId); + } + } + init.set_consumer(Settings.ConsumerName_); + init.set_read_only_original(Settings.ReadOnlyOriginal_); + init.mutable_read_params()->set_max_read_size(Settings.MaxMemoryUsageBytes_); + if (Settings.MaxTimeLag_) { + init.set_max_lag_duration_ms(Settings.MaxTimeLag_->MilliSeconds()); + } + if (Settings.StartingMessageTimestamp_) { + init.set_start_from_written_at_ms(Settings.StartingMessageTimestamp_->MilliSeconds()); + } + + WriteToProcessorImpl(std::move(req)); + ReadFromProcessorImpl(deferred); +} + +void TSingleClusterReadSessionImpl::ContinueReadingDataImpl() { // Assumes that we're under lock. + if (!Closing + && !Aborting + && !WaitingReadResponse + && !DataReadingSuspended + && Processor + && CompressedDataSize < GetCompressedDataSizeLimit() + && static_cast<size_t>(CompressedDataSize + DecompressedDataSize) < Settings.MaxMemoryUsageBytes_) + { + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + req.mutable_read(); + + WriteToProcessorImpl(std::move(req)); + WaitingReadResponse = true; + } +} + +bool TSingleClusterReadSessionImpl::IsActualPartitionStreamImpl(const TPartitionStreamImpl* partitionStream) { // Assumes that we're under lock. auto actualPartitionStreamIt = PartitionStreams.find(partitionStream->GetAssignId()); - return actualPartitionStreamIt != PartitionStreams.end() - && actualPartitionStreamIt->second->GetPartitionStreamId() == partitionStream->GetPartitionStreamId(); -} - -void TSingleClusterReadSessionImpl::ConfirmPartitionStreamCreate(const TPartitionStreamImpl* partitionStream, TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) { - TStringBuilder commitOffsetLogStr; - if (commitOffset) { - commitOffsetLogStr << ". Commit offset: " << *commitOffset; - } - Log << TLOG_INFO << "Confirm partition stream create. Partition stream id: " << partitionStream->GetPartitionStreamId() - << ". Cluster: \"" << partitionStream->GetCluster() << "\". Topic: \"" << partitionStream->GetTopicPath() - << "\". Partition: " << partitionStream->GetPartitionId() - << ". Read offset: " << readOffset << commitOffsetLogStr; - - with_lock (Lock) { - if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. - Log << TLOG_DEBUG << "Skip partition stream create confirm. Partition stream id: " << partitionStream->GetPartitionStreamId(); - return; - } - - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - auto& startRead = *req.mutable_start_read(); - startRead.mutable_topic()->set_path(partitionStream->GetTopicPath()); - startRead.set_cluster(partitionStream->GetCluster()); - startRead.set_partition(partitionStream->GetPartitionId()); - startRead.set_assign_id(partitionStream->GetAssignId()); - if (readOffset) { - startRead.set_read_offset(*readOffset); - } - if (commitOffset) { - startRead.set_commit_offset(*commitOffset); - } - - WriteToProcessorImpl(std::move(req)); - } -} - -void TSingleClusterReadSessionImpl::ConfirmPartitionStreamDestroy(TPartitionStreamImpl* partitionStream) { - Log << TLOG_INFO << "Confirm partition stream destroy. Partition stream id: " << partitionStream->GetPartitionStreamId() - << ". Cluster: \"" << partitionStream->GetCluster() << "\". Topic: \"" << partitionStream->GetTopicPath() - << "\". Partition: " << partitionStream->GetPartitionId(); - - TDeferredActions deferred; - with_lock (Lock) { - if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. - Log << TLOG_DEBUG << "Skip partition stream destroy confirm. Partition stream id: " << partitionStream->GetPartitionStreamId(); - return; - } - - CookieMapping.RemoveMapping(partitionStream->GetPartitionStreamId()); + return actualPartitionStreamIt != PartitionStreams.end() + && actualPartitionStreamIt->second->GetPartitionStreamId() == partitionStream->GetPartitionStreamId(); +} + +void TSingleClusterReadSessionImpl::ConfirmPartitionStreamCreate(const TPartitionStreamImpl* partitionStream, TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) { + TStringBuilder commitOffsetLogStr; + if (commitOffset) { + commitOffsetLogStr << ". Commit offset: " << *commitOffset; + } + Log << TLOG_INFO << "Confirm partition stream create. Partition stream id: " << partitionStream->GetPartitionStreamId() + << ". Cluster: \"" << partitionStream->GetCluster() << "\". Topic: \"" << partitionStream->GetTopicPath() + << "\". Partition: " << partitionStream->GetPartitionId() + << ". Read offset: " << readOffset << commitOffsetLogStr; + + with_lock (Lock) { + if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. + Log << TLOG_DEBUG << "Skip partition stream create confirm. Partition stream id: " << partitionStream->GetPartitionStreamId(); + return; + } + + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + auto& startRead = *req.mutable_start_read(); + startRead.mutable_topic()->set_path(partitionStream->GetTopicPath()); + startRead.set_cluster(partitionStream->GetCluster()); + startRead.set_partition(partitionStream->GetPartitionId()); + startRead.set_assign_id(partitionStream->GetAssignId()); + if (readOffset) { + startRead.set_read_offset(*readOffset); + } + if (commitOffset) { + startRead.set_commit_offset(*commitOffset); + } + + WriteToProcessorImpl(std::move(req)); + } +} + +void TSingleClusterReadSessionImpl::ConfirmPartitionStreamDestroy(TPartitionStreamImpl* partitionStream) { + Log << TLOG_INFO << "Confirm partition stream destroy. Partition stream id: " << partitionStream->GetPartitionStreamId() + << ". Cluster: \"" << partitionStream->GetCluster() << "\". Topic: \"" << partitionStream->GetTopicPath() + << "\". Partition: " << partitionStream->GetPartitionId(); + + TDeferredActions deferred; + with_lock (Lock) { + if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. + Log << TLOG_DEBUG << "Skip partition stream destroy confirm. Partition stream id: " << partitionStream->GetPartitionStreamId(); + return; + } + + CookieMapping.RemoveMapping(partitionStream->GetPartitionStreamId()); PartitionStreams.erase(partitionStream->GetAssignId()); - EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(partitionStream, TReadSessionEvent::TPartitionStreamClosedEvent::EReason::DestroyConfirmedByUser)}, deferred); - - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - auto& released = *req.mutable_released(); - released.mutable_topic()->set_path(partitionStream->GetTopicPath()); - released.set_cluster(partitionStream->GetCluster()); - released.set_partition(partitionStream->GetPartitionId()); - released.set_assign_id(partitionStream->GetAssignId()); - - WriteToProcessorImpl(std::move(req)); - } -} - -void TSingleClusterReadSessionImpl::Commit(const TPartitionStreamImpl* partitionStream, ui64 startOffset, ui64 endOffset) { - Log << TLOG_DEBUG << "Commit offsets [" << startOffset << ", " << endOffset << "). Partition stream id: " << partitionStream->GetPartitionStreamId(); - with_lock (Lock) { - if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. - return; - } - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - bool hasSomethingToCommit = false; + EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(partitionStream, TReadSessionEvent::TPartitionStreamClosedEvent::EReason::DestroyConfirmedByUser)}, deferred); + + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + auto& released = *req.mutable_released(); + released.mutable_topic()->set_path(partitionStream->GetTopicPath()); + released.set_cluster(partitionStream->GetCluster()); + released.set_partition(partitionStream->GetPartitionId()); + released.set_assign_id(partitionStream->GetAssignId()); + + WriteToProcessorImpl(std::move(req)); + } +} + +void TSingleClusterReadSessionImpl::Commit(const TPartitionStreamImpl* partitionStream, ui64 startOffset, ui64 endOffset) { + Log << TLOG_DEBUG << "Commit offsets [" << startOffset << ", " << endOffset << "). Partition stream id: " << partitionStream->GetPartitionStreamId(); + with_lock (Lock) { + if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. + return; + } + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + bool hasSomethingToCommit = false; if (RangesMode) { hasSomethingToCommit = true; auto* range = req.mutable_commit()->add_offset_ranges(); @@ -995,61 +995,61 @@ void TSingleClusterReadSessionImpl::Commit(const TPartitionStreamImpl* partition cookieInfo->set_assign_id(partitionStream->GetAssignId()); cookieInfo->set_partition_cookie(cookie->Cookie); } - } - } - if (hasSomethingToCommit) { - WriteToProcessorImpl(std::move(req)); - } - } -} - -void TSingleClusterReadSessionImpl::RequestPartitionStreamStatus(const TPartitionStreamImpl* partitionStream) { - Log << TLOG_DEBUG << "Requesting status for partition stream id: " << partitionStream->GetPartitionStreamId(); - with_lock (Lock) { - if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. - return; - } - - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - auto& status = *req.mutable_status(); - status.mutable_topic()->set_path(partitionStream->GetTopicPath()); - status.set_cluster(partitionStream->GetCluster()); - status.set_partition(partitionStream->GetPartitionId()); - status.set_assign_id(partitionStream->GetAssignId()); - - WriteToProcessorImpl(std::move(req)); - } -} - -void TSingleClusterReadSessionImpl::OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) { - Log << TLOG_DEBUG << "Read session event " << DebugString(event); + } + } + if (hasSomethingToCommit) { + WriteToProcessorImpl(std::move(req)); + } + } +} + +void TSingleClusterReadSessionImpl::RequestPartitionStreamStatus(const TPartitionStreamImpl* partitionStream) { + Log << TLOG_DEBUG << "Requesting status for partition stream id: " << partitionStream->GetPartitionStreamId(); + with_lock (Lock) { + if (Aborting || Closing || !IsActualPartitionStreamImpl(partitionStream)) { // Got previous incarnation. + return; + } + + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + auto& status = *req.mutable_status(); + status.mutable_topic()->set_path(partitionStream->GetTopicPath()); + status.set_cluster(partitionStream->GetCluster()); + status.set_partition(partitionStream->GetPartitionId()); + status.set_assign_id(partitionStream->GetAssignId()); + + WriteToProcessorImpl(std::move(req)); + } +} + +void TSingleClusterReadSessionImpl::OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) { + Log << TLOG_DEBUG << "Read session event " << DebugString(event); const i64 bytesCount = static_cast<i64>(CalcDataSize(event)); - Y_ASSERT(bytesCount >= 0); - + Y_ASSERT(bytesCount >= 0); + if (!std::get_if<TReadSessionEvent::TDataReceivedEvent>(&event)) { // Event is not data event. - return; - } - + return; + } + *Settings.Counters_->MessagesInflight -= std::get<TReadSessionEvent::TDataReceivedEvent>(event).GetMessagesCount(); - *Settings.Counters_->BytesInflightTotal -= bytesCount; - *Settings.Counters_->BytesInflightUncompressed -= bytesCount; - - TDeferredActions deferred; - with_lock (Lock) { - UpdateMemoryUsageStatisticsImpl(); - Y_VERIFY(bytesCount <= DecompressedDataSize); - DecompressedDataSize -= bytesCount; - ContinueReadingDataImpl(); - StartDecompressionTasksImpl(deferred); - } -} - -void TSingleClusterReadSessionImpl::WriteToProcessorImpl(Ydb::PersQueue::V1::MigrationStreamingReadClientMessage&& req) { // Assumes that we're under lock. - if (Processor) { - Processor->Write(std::move(req)); - } -} - + *Settings.Counters_->BytesInflightTotal -= bytesCount; + *Settings.Counters_->BytesInflightUncompressed -= bytesCount; + + TDeferredActions deferred; + with_lock (Lock) { + UpdateMemoryUsageStatisticsImpl(); + Y_VERIFY(bytesCount <= DecompressedDataSize); + DecompressedDataSize -= bytesCount; + ContinueReadingDataImpl(); + StartDecompressionTasksImpl(deferred); + } +} + +void TSingleClusterReadSessionImpl::WriteToProcessorImpl(Ydb::PersQueue::V1::MigrationStreamingReadClientMessage&& req) { // Assumes that we're under lock. + if (Processor) { + Processor->Write(std::move(req)); + } +} + bool TSingleClusterReadSessionImpl::HasCommitsInflightImpl() const { for (const auto& [id, partitionStream] : PartitionStreams) { if (partitionStream->HasCommitsInflight()) @@ -1058,141 +1058,141 @@ bool TSingleClusterReadSessionImpl::HasCommitsInflightImpl() const { return false; } -void TSingleClusterReadSessionImpl::ReadFromProcessorImpl(TDeferredActions& deferred) { // Assumes that we're under lock. +void TSingleClusterReadSessionImpl::ReadFromProcessorImpl(TDeferredActions& deferred) { // Assumes that we're under lock. if (Closing && !HasCommitsInflightImpl()) { - Processor->Cancel(); - CallCloseCallbackImpl(); - return; - } - - if (Processor) { - ServerMessage->Clear(); - - auto callback = [weakThis = weak_from_this(), - connectionGeneration = ConnectionGeneration, - // Capture message & processor not to read in freed memory. - serverMessage = ServerMessage, - processor = Processor](NGrpc::TGrpcStatus&& grpcStatus) { - if (auto sharedThis = weakThis.lock()) { - sharedThis->OnReadDone(std::move(grpcStatus), connectionGeneration); - } - }; - - deferred.DeferReadFromProcessor(Processor, ServerMessage.get(), std::move(callback)); - } -} - -void TSingleClusterReadSessionImpl::OnReadDone(NGrpc::TGrpcStatus&& grpcStatus, size_t connectionGeneration) { - TPlainStatus errorStatus; - if (!grpcStatus.Ok()) { - errorStatus = TPlainStatus(std::move(grpcStatus)); - } - - TDeferredActions deferred; - with_lock (Lock) { - if (Aborting) { - return; - } - - if (connectionGeneration != ConnectionGeneration) { - return; // Message from previous connection. Ignore. - } - if (errorStatus.Ok()) { - if (IsErrorMessage(*ServerMessage)) { - errorStatus = MakeErrorFromProto(*ServerMessage); - } else { - switch (ServerMessage->response_case()) { - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kInitResponse: - OnReadDoneImpl(std::move(*ServerMessage->mutable_init_response()), deferred); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kDataBatch: - OnReadDoneImpl(std::move(*ServerMessage->mutable_data_batch()), deferred); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kAssigned: - OnReadDoneImpl(std::move(*ServerMessage->mutable_assigned()), deferred); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kRelease: - OnReadDoneImpl(std::move(*ServerMessage->mutable_release()), deferred); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kCommitted: - OnReadDoneImpl(std::move(*ServerMessage->mutable_committed()), deferred); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kPartitionStatus: - OnReadDoneImpl(std::move(*ServerMessage->mutable_partition_status()), deferred); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::RESPONSE_NOT_SET: - errorStatus = TPlainStatus::Internal("Unexpected response from server"); - break; - } - if (errorStatus.Ok()) { - ReadFromProcessorImpl(deferred); // Read next. - } - } - } - } - if (!errorStatus.Ok()) { - ++*Settings.Counters_->Errors; - RetryState = Settings.RetryPolicy_->CreateRetryState(); // Explicitly create retry state to determine whether we should connect to server again. - if (!Reconnect(errorStatus)) { - ErrorHandler->AbortSession(std::move(errorStatus)); - } - } -} - -void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::InitResponse&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. - Y_UNUSED(deferred); - - Log << TLOG_INFO << "Server session id: " << msg.session_id(); - - // Successful init. Do nothing. - ContinueReadingDataImpl(); -} - -void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. - if (Closing || Aborting) { - return; // Don't process new data. - } - UpdateMemoryUsageStatisticsImpl(); - for (Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData& partitionData : *msg.mutable_partition_data()) { + Processor->Cancel(); + CallCloseCallbackImpl(); + return; + } + + if (Processor) { + ServerMessage->Clear(); + + auto callback = [weakThis = weak_from_this(), + connectionGeneration = ConnectionGeneration, + // Capture message & processor not to read in freed memory. + serverMessage = ServerMessage, + processor = Processor](NGrpc::TGrpcStatus&& grpcStatus) { + if (auto sharedThis = weakThis.lock()) { + sharedThis->OnReadDone(std::move(grpcStatus), connectionGeneration); + } + }; + + deferred.DeferReadFromProcessor(Processor, ServerMessage.get(), std::move(callback)); + } +} + +void TSingleClusterReadSessionImpl::OnReadDone(NGrpc::TGrpcStatus&& grpcStatus, size_t connectionGeneration) { + TPlainStatus errorStatus; + if (!grpcStatus.Ok()) { + errorStatus = TPlainStatus(std::move(grpcStatus)); + } + + TDeferredActions deferred; + with_lock (Lock) { + if (Aborting) { + return; + } + + if (connectionGeneration != ConnectionGeneration) { + return; // Message from previous connection. Ignore. + } + if (errorStatus.Ok()) { + if (IsErrorMessage(*ServerMessage)) { + errorStatus = MakeErrorFromProto(*ServerMessage); + } else { + switch (ServerMessage->response_case()) { + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kInitResponse: + OnReadDoneImpl(std::move(*ServerMessage->mutable_init_response()), deferred); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kDataBatch: + OnReadDoneImpl(std::move(*ServerMessage->mutable_data_batch()), deferred); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kAssigned: + OnReadDoneImpl(std::move(*ServerMessage->mutable_assigned()), deferred); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kRelease: + OnReadDoneImpl(std::move(*ServerMessage->mutable_release()), deferred); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kCommitted: + OnReadDoneImpl(std::move(*ServerMessage->mutable_committed()), deferred); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kPartitionStatus: + OnReadDoneImpl(std::move(*ServerMessage->mutable_partition_status()), deferred); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::RESPONSE_NOT_SET: + errorStatus = TPlainStatus::Internal("Unexpected response from server"); + break; + } + if (errorStatus.Ok()) { + ReadFromProcessorImpl(deferred); // Read next. + } + } + } + } + if (!errorStatus.Ok()) { + ++*Settings.Counters_->Errors; + RetryState = Settings.RetryPolicy_->CreateRetryState(); // Explicitly create retry state to determine whether we should connect to server again. + if (!Reconnect(errorStatus)) { + ErrorHandler->AbortSession(std::move(errorStatus)); + } + } +} + +void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::InitResponse&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. + Y_UNUSED(deferred); + + Log << TLOG_INFO << "Server session id: " << msg.session_id(); + + // Successful init. Do nothing. + ContinueReadingDataImpl(); +} + +void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. + if (Closing || Aborting) { + return; // Don't process new data. + } + UpdateMemoryUsageStatisticsImpl(); + for (Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData& partitionData : *msg.mutable_partition_data()) { auto partitionStreamIt = PartitionStreams.find(partitionData.cookie().assign_id()); - if (partitionStreamIt == PartitionStreams.end()) { - ++*Settings.Counters_->Errors; - BreakConnectionAndReconnectImpl(EStatus::INTERNAL_ERROR, - TStringBuilder() << "Got unexpected partition stream data message. Topic: " - << partitionData.topic() + if (partitionStreamIt == PartitionStreams.end()) { + ++*Settings.Counters_->Errors; + BreakConnectionAndReconnectImpl(EStatus::INTERNAL_ERROR, + TStringBuilder() << "Got unexpected partition stream data message. Topic: " + << partitionData.topic() << ". Partition: " << partitionData.partition() << " AssignId: " << partitionData.cookie().assign_id(), - deferred); - return; - } - const TIntrusivePtr<TPartitionStreamImpl>& partitionStream = partitionStreamIt->second; - - TPartitionCookieMapping::TCookie::TPtr cookie = MakeIntrusive<TPartitionCookieMapping::TCookie>(partitionData.cookie().partition_cookie(), partitionStream); - - ui64 firstOffset = std::numeric_limits<ui64>::max(); - ui64 currentOffset = std::numeric_limits<ui64>::max(); + deferred); + return; + } + const TIntrusivePtr<TPartitionStreamImpl>& partitionStream = partitionStreamIt->second; + + TPartitionCookieMapping::TCookie::TPtr cookie = MakeIntrusive<TPartitionCookieMapping::TCookie>(partitionData.cookie().partition_cookie(), partitionStream); + + ui64 firstOffset = std::numeric_limits<ui64>::max(); + ui64 currentOffset = std::numeric_limits<ui64>::max(); ui64 desiredOffset = partitionStream->GetFirstNotReadOffset(); - for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch : partitionData.batches()) { - // Validate messages. - for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& messageData : batch.message_data()) { - // Check offsets continuity. + for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch : partitionData.batches()) { + // Validate messages. + for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& messageData : batch.message_data()) { + // Check offsets continuity. if (messageData.offset() != desiredOffset) { bool res = partitionStream->AddToCommitRanges(desiredOffset, messageData.offset(), RangesMode); Y_VERIFY(res); } if (firstOffset == std::numeric_limits<ui64>::max()) { - firstOffset = messageData.offset(); - } - currentOffset = messageData.offset(); + firstOffset = messageData.offset(); + } + currentOffset = messageData.offset(); desiredOffset = currentOffset + 1; - partitionStream->UpdateMaxReadOffset(currentOffset); - const i64 messageSize = static_cast<i64>(messageData.data().size()); - CompressedDataSize += messageSize; - *Settings.Counters_->BytesInflightTotal += messageSize; - *Settings.Counters_->BytesInflightCompressed += messageSize; - ++*Settings.Counters_->MessagesInflight; - } - } + partitionStream->UpdateMaxReadOffset(currentOffset); + const i64 messageSize = static_cast<i64>(messageData.data().size()); + CompressedDataSize += messageSize; + *Settings.Counters_->BytesInflightTotal += messageSize; + *Settings.Counters_->BytesInflightCompressed += messageSize; + ++*Settings.Counters_->MessagesInflight; + } + } if (firstOffset == std::numeric_limits<ui64>::max()) { BreakConnectionAndReconnectImpl(EStatus::INTERNAL_ERROR, TStringBuilder() << "Got empty data message. Topic: " @@ -1204,78 +1204,78 @@ void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::Migration } cookie->SetOffsetRange(std::make_pair(firstOffset, desiredOffset)); partitionStream->SetFirstNotReadOffset(desiredOffset); - if (!CookieMapping.AddMapping(cookie)) { - BreakConnectionAndReconnectImpl(EStatus::INTERNAL_ERROR, - TStringBuilder() << "Got unexpected data message. Topic: " - << partitionData.topic() - << ". Partition: " << partitionData.partition() - << ". Cookie mapping already has such cookie", - deferred); - return; - } - TDataDecompressionInfo* decompressionInfo = EventsQueue->PushDataEvent(partitionStream, std::move(partitionData)); - Y_VERIFY(decompressionInfo); - if (decompressionInfo) { - DecompressionQueue.emplace_back(decompressionInfo, partitionStream); - StartDecompressionTasksImpl(deferred); - } - } - - WaitingReadResponse = false; - ContinueReadingDataImpl(); -} - -void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Assigned&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. - auto partitionStream = MakeIntrusive<TPartitionStreamImpl>(NextPartitionStreamId, - msg.topic().path(), - msg.cluster(), + if (!CookieMapping.AddMapping(cookie)) { + BreakConnectionAndReconnectImpl(EStatus::INTERNAL_ERROR, + TStringBuilder() << "Got unexpected data message. Topic: " + << partitionData.topic() + << ". Partition: " << partitionData.partition() + << ". Cookie mapping already has such cookie", + deferred); + return; + } + TDataDecompressionInfo* decompressionInfo = EventsQueue->PushDataEvent(partitionStream, std::move(partitionData)); + Y_VERIFY(decompressionInfo); + if (decompressionInfo) { + DecompressionQueue.emplace_back(decompressionInfo, partitionStream); + StartDecompressionTasksImpl(deferred); + } + } + + WaitingReadResponse = false; + ContinueReadingDataImpl(); +} + +void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Assigned&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. + auto partitionStream = MakeIntrusive<TPartitionStreamImpl>(NextPartitionStreamId, + msg.topic().path(), + msg.cluster(), msg.partition() + 1, // Group. - msg.partition(), // Partition. - msg.assign_id(), + msg.partition(), // Partition. + msg.assign_id(), msg.read_offset(), - weak_from_this(), - ErrorHandler); - NextPartitionStreamId += PartitionStreamIdStep; - - // Renew partition stream. + weak_from_this(), + ErrorHandler); + NextPartitionStreamId += PartitionStreamIdStep; + + // Renew partition stream. TIntrusivePtr<TPartitionStreamImpl>& currentPartitionStream = PartitionStreams[partitionStream->GetAssignId()]; - if (currentPartitionStream) { - CookieMapping.RemoveMapping(currentPartitionStream->GetPartitionStreamId()); - EventsQueue->PushEvent({currentPartitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(currentPartitionStream, TReadSessionEvent::TPartitionStreamClosedEvent::EReason::Lost)}, deferred); - } - currentPartitionStream = partitionStream; - - // Send event to user. - EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TCreatePartitionStreamEvent(partitionStream, msg.read_offset(), msg.end_offset())}, deferred); -} - -void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Release&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. + if (currentPartitionStream) { + CookieMapping.RemoveMapping(currentPartitionStream->GetPartitionStreamId()); + EventsQueue->PushEvent({currentPartitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(currentPartitionStream, TReadSessionEvent::TPartitionStreamClosedEvent::EReason::Lost)}, deferred); + } + currentPartitionStream = partitionStream; + + // Send event to user. + EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TCreatePartitionStreamEvent(partitionStream, msg.read_offset(), msg.end_offset())}, deferred); +} + +void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Release&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. auto partitionStreamIt = PartitionStreams.find(msg.assign_id()); if (partitionStreamIt == PartitionStreams.end()) { - return; - } - TIntrusivePtr<TPartitionStreamImpl> partitionStream = partitionStreamIt->second; - if (msg.forceful_release()) { + return; + } + TIntrusivePtr<TPartitionStreamImpl> partitionStream = partitionStreamIt->second; + if (msg.forceful_release()) { PartitionStreams.erase(msg.assign_id()); - CookieMapping.RemoveMapping(partitionStream->GetPartitionStreamId()); - EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(partitionStream, TReadSessionEvent::TPartitionStreamClosedEvent::EReason::Lost)}, deferred); - } else { - EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TDestroyPartitionStreamEvent(std::move(partitionStream), msg.commit_offset())}, deferred); - } -} - -void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Committed&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. + CookieMapping.RemoveMapping(partitionStream->GetPartitionStreamId()); + EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(partitionStream, TReadSessionEvent::TPartitionStreamClosedEvent::EReason::Lost)}, deferred); + } else { + EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TDestroyPartitionStreamEvent(std::move(partitionStream), msg.commit_offset())}, deferred); + } +} + +void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Committed&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. Log << TLOG_DEBUG << "Committed response: " << msg; TMap<ui64, TIntrusivePtr<TPartitionStreamImpl>> partitionStreams; - for (const Ydb::PersQueue::V1::CommitCookie& cookieProto : msg.cookies()) { - TPartitionCookieMapping::TCookie::TPtr cookie = CookieMapping.RetrieveCommittedCookie(cookieProto); - if (cookie) { + for (const Ydb::PersQueue::V1::CommitCookie& cookieProto : msg.cookies()) { + TPartitionCookieMapping::TCookie::TPtr cookie = CookieMapping.RetrieveCommittedCookie(cookieProto); + if (cookie) { cookie->PartitionStream->UpdateMaxCommittedOffset(cookie->OffsetRange.second); partitionStreams[cookie->PartitionStream->GetPartitionStreamId()] = cookie->PartitionStream; - } - } + } + } for (auto& [id, partitionStream] : partitionStreams) { EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TCommitAcknowledgementEvent(partitionStream, partitionStream->GetMaxCommittedOffset())}, deferred); } @@ -1289,416 +1289,416 @@ void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::Migration } } -} - -void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::PartitionStatus&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. +} + +void TSingleClusterReadSessionImpl::OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::PartitionStatus&& msg, TDeferredActions& deferred) { // Assumes that we're under lock. auto partitionStreamIt = PartitionStreams.find(msg.assign_id()); if (partitionStreamIt == PartitionStreams.end()) { - return; - } - EventsQueue->PushEvent( - {partitionStreamIt->second, weak_from_this(), TReadSessionEvent::TPartitionStreamStatusEvent(partitionStreamIt->second, - msg.committed_offset(), - 0, // TODO: support read offset in status - msg.end_offset(), - TInstant::MilliSeconds(msg.write_watermark_ms()))}, - deferred); -} - -void TSingleClusterReadSessionImpl::StartDecompressionTasksImpl(TDeferredActions& deferred) { - UpdateMemoryUsageStatisticsImpl(); - const i64 limit = GetDecompressedDataSizeLimit(); - Y_VERIFY(limit > 0); - while (DecompressedDataSize < limit - && (static_cast<size_t>(CompressedDataSize + DecompressedDataSize) < Settings.MaxMemoryUsageBytes_ - || DecompressedDataSize == 0 /* Allow decompression of at least one message even if memory is full. */) - && !DecompressionQueue.empty()) - { - TDecompressionQueueItem& current = DecompressionQueue.front(); - auto sentToDecompress = current.BatchInfo->StartDecompressionTasks(Settings.DecompressionExecutor_, - Max(limit - DecompressedDataSize, static_cast<i64>(1)), - AverageCompressionRatio, - current.PartitionStream, - deferred); - DecompressedDataSize += sentToDecompress; - if (current.BatchInfo->AllDecompressionTasksStarted()) { - DecompressionQueue.pop_front(); - } else { - break; - } - } -} - -void TSingleClusterReadSessionImpl::DestroyAllPartitionStreamsImpl(TDeferredActions& deferred) { - for (auto&& [key, partitionStream] : PartitionStreams) { - EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(std::move(partitionStream), TReadSessionEvent::TPartitionStreamClosedEvent::EReason::ConnectionLost)}, deferred); - } - PartitionStreams.clear(); - CookieMapping.ClearMapping(); -} - -void TSingleClusterReadSessionImpl::OnCreateNewDecompressionTask() { - ++DecompressionTasksInflight; -} - -void TSingleClusterReadSessionImpl::OnDataDecompressed(i64 sourceSize, i64 estimatedDecompressedSize, i64 decompressedSize, size_t messagesCount) { - TDeferredActions deferred; - --DecompressionTasksInflight; - - *Settings.Counters_->BytesRead += decompressedSize; - *Settings.Counters_->BytesReadCompressed += sourceSize; - *Settings.Counters_->MessagesRead += messagesCount; - *Settings.Counters_->BytesInflightUncompressed += decompressedSize; - *Settings.Counters_->BytesInflightCompressed -= sourceSize; - *Settings.Counters_->BytesInflightTotal += (decompressedSize - sourceSize); - - with_lock (Lock) { - UpdateMemoryUsageStatisticsImpl(); - CompressedDataSize -= sourceSize; - DecompressedDataSize += decompressedSize - estimatedDecompressedSize; - constexpr double weight = 0.6; - AverageCompressionRatio = weight * static_cast<double>(decompressedSize) / static_cast<double>(sourceSize) + (1 - weight) * AverageCompressionRatio; + return; + } + EventsQueue->PushEvent( + {partitionStreamIt->second, weak_from_this(), TReadSessionEvent::TPartitionStreamStatusEvent(partitionStreamIt->second, + msg.committed_offset(), + 0, // TODO: support read offset in status + msg.end_offset(), + TInstant::MilliSeconds(msg.write_watermark_ms()))}, + deferred); +} + +void TSingleClusterReadSessionImpl::StartDecompressionTasksImpl(TDeferredActions& deferred) { + UpdateMemoryUsageStatisticsImpl(); + const i64 limit = GetDecompressedDataSizeLimit(); + Y_VERIFY(limit > 0); + while (DecompressedDataSize < limit + && (static_cast<size_t>(CompressedDataSize + DecompressedDataSize) < Settings.MaxMemoryUsageBytes_ + || DecompressedDataSize == 0 /* Allow decompression of at least one message even if memory is full. */) + && !DecompressionQueue.empty()) + { + TDecompressionQueueItem& current = DecompressionQueue.front(); + auto sentToDecompress = current.BatchInfo->StartDecompressionTasks(Settings.DecompressionExecutor_, + Max(limit - DecompressedDataSize, static_cast<i64>(1)), + AverageCompressionRatio, + current.PartitionStream, + deferred); + DecompressedDataSize += sentToDecompress; + if (current.BatchInfo->AllDecompressionTasksStarted()) { + DecompressionQueue.pop_front(); + } else { + break; + } + } +} + +void TSingleClusterReadSessionImpl::DestroyAllPartitionStreamsImpl(TDeferredActions& deferred) { + for (auto&& [key, partitionStream] : PartitionStreams) { + EventsQueue->PushEvent({partitionStream, weak_from_this(), TReadSessionEvent::TPartitionStreamClosedEvent(std::move(partitionStream), TReadSessionEvent::TPartitionStreamClosedEvent::EReason::ConnectionLost)}, deferred); + } + PartitionStreams.clear(); + CookieMapping.ClearMapping(); +} + +void TSingleClusterReadSessionImpl::OnCreateNewDecompressionTask() { + ++DecompressionTasksInflight; +} + +void TSingleClusterReadSessionImpl::OnDataDecompressed(i64 sourceSize, i64 estimatedDecompressedSize, i64 decompressedSize, size_t messagesCount) { + TDeferredActions deferred; + --DecompressionTasksInflight; + + *Settings.Counters_->BytesRead += decompressedSize; + *Settings.Counters_->BytesReadCompressed += sourceSize; + *Settings.Counters_->MessagesRead += messagesCount; + *Settings.Counters_->BytesInflightUncompressed += decompressedSize; + *Settings.Counters_->BytesInflightCompressed -= sourceSize; + *Settings.Counters_->BytesInflightTotal += (decompressedSize - sourceSize); + + with_lock (Lock) { + UpdateMemoryUsageStatisticsImpl(); + CompressedDataSize -= sourceSize; + DecompressedDataSize += decompressedSize - estimatedDecompressedSize; + constexpr double weight = 0.6; + AverageCompressionRatio = weight * static_cast<double>(decompressedSize) / static_cast<double>(sourceSize) + (1 - weight) * AverageCompressionRatio; if (Aborting) { return; } - ContinueReadingDataImpl(); - StartDecompressionTasksImpl(deferred); - } -} - -void TSingleClusterReadSessionImpl::Abort() { - Log << TLOG_DEBUG << "Abort session to cluster"; - - with_lock (Lock) { - if (!Aborting) { - Aborting = true; - CloseCallback = {}; - - // Cancel(ClientContext); // Don't cancel, because this is used only as factory for other contexts. - Cancel(ConnectContext); - Cancel(ConnectTimeoutContext); - Cancel(ConnectDelayContext); - - if (Processor) { - Processor->Cancel(); - } - } - } -} - -void TSingleClusterReadSessionImpl::Close(std::function<void()> callback) { - with_lock (Lock) { - if (Aborting) { - callback(); - } - - if (!Closing) { - Closing = true; - - CloseCallback = std::move(callback); - - Cancel(ConnectContext); - Cancel(ConnectTimeoutContext); - Cancel(ConnectDelayContext); - - if (!Processor) { - CallCloseCallbackImpl(); - } else { + ContinueReadingDataImpl(); + StartDecompressionTasksImpl(deferred); + } +} + +void TSingleClusterReadSessionImpl::Abort() { + Log << TLOG_DEBUG << "Abort session to cluster"; + + with_lock (Lock) { + if (!Aborting) { + Aborting = true; + CloseCallback = {}; + + // Cancel(ClientContext); // Don't cancel, because this is used only as factory for other contexts. + Cancel(ConnectContext); + Cancel(ConnectTimeoutContext); + Cancel(ConnectDelayContext); + + if (Processor) { + Processor->Cancel(); + } + } + } +} + +void TSingleClusterReadSessionImpl::Close(std::function<void()> callback) { + with_lock (Lock) { + if (Aborting) { + callback(); + } + + if (!Closing) { + Closing = true; + + CloseCallback = std::move(callback); + + Cancel(ConnectContext); + Cancel(ConnectTimeoutContext); + Cancel(ConnectDelayContext); + + if (!Processor) { + CallCloseCallbackImpl(); + } else { if (!HasCommitsInflightImpl()) { - Processor->Cancel(); - CallCloseCallbackImpl(); - } - } - } - } -} - -void TSingleClusterReadSessionImpl::CallCloseCallbackImpl() { - if (CloseCallback) { - CloseCallback(); - CloseCallback = {}; - } - Aborting = true; // So abort call will have no effect. -} - -void TSingleClusterReadSessionImpl::StopReadingData() { - with_lock (Lock) { - DataReadingSuspended = true; - } -} - -void TSingleClusterReadSessionImpl::ResumeReadingData() { - with_lock (Lock) { - if (DataReadingSuspended) { - DataReadingSuspended = false; - ContinueReadingDataImpl(); - } - } -} - -void TSingleClusterReadSessionImpl::WaitAllDecompressionTasks() { - Y_ASSERT(DecompressionTasksInflight >= 0); - while (DecompressionTasksInflight > 0) { - Sleep(TDuration::MilliSeconds(5)); // Perform active wait because this is aborting process and there are no decompression tasks here in normal situation. - } -} - -void TSingleClusterReadSessionImpl::DumpStatisticsToLog(TLogElement& log) { - with_lock (Lock) { - // cluster:topic:partition:stream-id:read-offset:committed-offset - for (auto&& [key, partitionStream] : PartitionStreams) { - log << " " - << ClusterName - << ':' << partitionStream->GetTopicPath() - << ':' << partitionStream->GetPartitionId() - << ':' << partitionStream->GetPartitionStreamId() - << ':' << partitionStream->GetMaxReadOffset() - << ':' << partitionStream->GetMaxCommittedOffset(); - } - } -} - -void TSingleClusterReadSessionImpl::UpdateMemoryUsageStatisticsImpl() { - const TInstant now = TInstant::Now(); - const ui64 delta = (now - UsageStatisticsLastUpdateTime).MilliSeconds(); - UsageStatisticsLastUpdateTime = now; - const double percent = 100.0 / static_cast<double>(Settings.MaxMemoryUsageBytes_); - - Settings.Counters_->TotalBytesInflightUsageByTime->Collect((DecompressedDataSize + CompressedDataSize) * percent, delta); - Settings.Counters_->UncompressedBytesInflightUsageByTime->Collect(DecompressedDataSize * percent, delta); - Settings.Counters_->CompressedBytesInflightUsageByTime->Collect(CompressedDataSize * percent, delta); -} - -void TSingleClusterReadSessionImpl::UpdateMemoryUsageStatistics() { - with_lock (Lock) { - UpdateMemoryUsageStatisticsImpl(); - } -} - -bool TSingleClusterReadSessionImpl::TPartitionCookieMapping::AddMapping(const TCookie::TPtr& cookie) { - if (!Cookies.emplace(cookie->GetKey(), cookie).second) { - return false; - } - for (ui64 offset = cookie->OffsetRange.first; offset < cookie->OffsetRange.second; ++offset) { - if (!UncommittedOffsetToCookie.emplace(std::make_pair(cookie->PartitionStream->GetPartitionStreamId(), offset), cookie).second) { - return false; - } - } - PartitionStreamIdToCookie.emplace(cookie->PartitionStream->GetPartitionStreamId(), cookie); - return true; -} - -TSingleClusterReadSessionImpl::TPartitionCookieMapping::TCookie::TPtr TSingleClusterReadSessionImpl::TPartitionCookieMapping::CommitOffset(ui64 partitionStreamId, ui64 offset) { - auto cookieIt = UncommittedOffsetToCookie.find(std::make_pair(partitionStreamId, offset)); - if (cookieIt != UncommittedOffsetToCookie.end()) { - TCookie::TPtr cookie; - if (!--cookieIt->second->UncommittedMessagesLeft) { - ++CommitInflight; - cookie = cookieIt->second; - } - UncommittedOffsetToCookie.erase(cookieIt); - return cookie; - } else { + Processor->Cancel(); + CallCloseCallbackImpl(); + } + } + } + } +} + +void TSingleClusterReadSessionImpl::CallCloseCallbackImpl() { + if (CloseCallback) { + CloseCallback(); + CloseCallback = {}; + } + Aborting = true; // So abort call will have no effect. +} + +void TSingleClusterReadSessionImpl::StopReadingData() { + with_lock (Lock) { + DataReadingSuspended = true; + } +} + +void TSingleClusterReadSessionImpl::ResumeReadingData() { + with_lock (Lock) { + if (DataReadingSuspended) { + DataReadingSuspended = false; + ContinueReadingDataImpl(); + } + } +} + +void TSingleClusterReadSessionImpl::WaitAllDecompressionTasks() { + Y_ASSERT(DecompressionTasksInflight >= 0); + while (DecompressionTasksInflight > 0) { + Sleep(TDuration::MilliSeconds(5)); // Perform active wait because this is aborting process and there are no decompression tasks here in normal situation. + } +} + +void TSingleClusterReadSessionImpl::DumpStatisticsToLog(TLogElement& log) { + with_lock (Lock) { + // cluster:topic:partition:stream-id:read-offset:committed-offset + for (auto&& [key, partitionStream] : PartitionStreams) { + log << " " + << ClusterName + << ':' << partitionStream->GetTopicPath() + << ':' << partitionStream->GetPartitionId() + << ':' << partitionStream->GetPartitionStreamId() + << ':' << partitionStream->GetMaxReadOffset() + << ':' << partitionStream->GetMaxCommittedOffset(); + } + } +} + +void TSingleClusterReadSessionImpl::UpdateMemoryUsageStatisticsImpl() { + const TInstant now = TInstant::Now(); + const ui64 delta = (now - UsageStatisticsLastUpdateTime).MilliSeconds(); + UsageStatisticsLastUpdateTime = now; + const double percent = 100.0 / static_cast<double>(Settings.MaxMemoryUsageBytes_); + + Settings.Counters_->TotalBytesInflightUsageByTime->Collect((DecompressedDataSize + CompressedDataSize) * percent, delta); + Settings.Counters_->UncompressedBytesInflightUsageByTime->Collect(DecompressedDataSize * percent, delta); + Settings.Counters_->CompressedBytesInflightUsageByTime->Collect(CompressedDataSize * percent, delta); +} + +void TSingleClusterReadSessionImpl::UpdateMemoryUsageStatistics() { + with_lock (Lock) { + UpdateMemoryUsageStatisticsImpl(); + } +} + +bool TSingleClusterReadSessionImpl::TPartitionCookieMapping::AddMapping(const TCookie::TPtr& cookie) { + if (!Cookies.emplace(cookie->GetKey(), cookie).second) { + return false; + } + for (ui64 offset = cookie->OffsetRange.first; offset < cookie->OffsetRange.second; ++offset) { + if (!UncommittedOffsetToCookie.emplace(std::make_pair(cookie->PartitionStream->GetPartitionStreamId(), offset), cookie).second) { + return false; + } + } + PartitionStreamIdToCookie.emplace(cookie->PartitionStream->GetPartitionStreamId(), cookie); + return true; +} + +TSingleClusterReadSessionImpl::TPartitionCookieMapping::TCookie::TPtr TSingleClusterReadSessionImpl::TPartitionCookieMapping::CommitOffset(ui64 partitionStreamId, ui64 offset) { + auto cookieIt = UncommittedOffsetToCookie.find(std::make_pair(partitionStreamId, offset)); + if (cookieIt != UncommittedOffsetToCookie.end()) { + TCookie::TPtr cookie; + if (!--cookieIt->second->UncommittedMessagesLeft) { + ++CommitInflight; + cookie = cookieIt->second; + } + UncommittedOffsetToCookie.erase(cookieIt); + return cookie; + } else { ThrowFatalError(TStringBuilder() << "Invalid offset " << offset << ". Partition stream id: " << partitionStreamId << Endl); - } - // If offset wasn't found, there might be already hard released partition. - // This situation is OK. - return nullptr; -} - -TSingleClusterReadSessionImpl::TPartitionCookieMapping::TCookie::TPtr TSingleClusterReadSessionImpl::TPartitionCookieMapping::RetrieveCommittedCookie(const Ydb::PersQueue::V1::CommitCookie& cookieProto) { - TCookie::TPtr cookieInfo; - auto cookieIt = Cookies.find(TCookie::TKey(cookieProto.assign_id(), cookieProto.partition_cookie())); - if (cookieIt != Cookies.end()) { - --CommitInflight; - cookieInfo = cookieIt->second; - Cookies.erase(cookieIt); - - auto [rangeBegin, rangeEnd] = PartitionStreamIdToCookie.equal_range(cookieInfo->PartitionStream->GetPartitionStreamId()); - for (auto i = rangeBegin; i != rangeEnd; ++i) { - if (i->second == cookieInfo) { - PartitionStreamIdToCookie.erase(i); - break; - } - } - } - return cookieInfo; -} - -void TSingleClusterReadSessionImpl::TPartitionCookieMapping::RemoveMapping(ui64 partitionStreamId) { - auto [rangeBegin, rangeEnd] = PartitionStreamIdToCookie.equal_range(partitionStreamId); - for (auto i = rangeBegin; i != rangeEnd; ++i) { - TCookie::TPtr cookie = i->second; - Cookies.erase(cookie->GetKey()); - for (ui64 offset = cookie->OffsetRange.first; offset < cookie->OffsetRange.second; ++offset) { - UncommittedOffsetToCookie.erase(std::make_pair(partitionStreamId, offset)); - } - } - PartitionStreamIdToCookie.erase(rangeBegin, rangeEnd); -} - -void TSingleClusterReadSessionImpl::TPartitionCookieMapping::ClearMapping() { - Cookies.clear(); - UncommittedOffsetToCookie.clear(); - PartitionStreamIdToCookie.clear(); - CommitInflight = 0; -} - -bool TSingleClusterReadSessionImpl::TPartitionCookieMapping::HasUnacknowledgedCookies() const { - return CommitInflight != 0; -} - + } + // If offset wasn't found, there might be already hard released partition. + // This situation is OK. + return nullptr; +} + +TSingleClusterReadSessionImpl::TPartitionCookieMapping::TCookie::TPtr TSingleClusterReadSessionImpl::TPartitionCookieMapping::RetrieveCommittedCookie(const Ydb::PersQueue::V1::CommitCookie& cookieProto) { + TCookie::TPtr cookieInfo; + auto cookieIt = Cookies.find(TCookie::TKey(cookieProto.assign_id(), cookieProto.partition_cookie())); + if (cookieIt != Cookies.end()) { + --CommitInflight; + cookieInfo = cookieIt->second; + Cookies.erase(cookieIt); + + auto [rangeBegin, rangeEnd] = PartitionStreamIdToCookie.equal_range(cookieInfo->PartitionStream->GetPartitionStreamId()); + for (auto i = rangeBegin; i != rangeEnd; ++i) { + if (i->second == cookieInfo) { + PartitionStreamIdToCookie.erase(i); + break; + } + } + } + return cookieInfo; +} + +void TSingleClusterReadSessionImpl::TPartitionCookieMapping::RemoveMapping(ui64 partitionStreamId) { + auto [rangeBegin, rangeEnd] = PartitionStreamIdToCookie.equal_range(partitionStreamId); + for (auto i = rangeBegin; i != rangeEnd; ++i) { + TCookie::TPtr cookie = i->second; + Cookies.erase(cookie->GetKey()); + for (ui64 offset = cookie->OffsetRange.first; offset < cookie->OffsetRange.second; ++offset) { + UncommittedOffsetToCookie.erase(std::make_pair(partitionStreamId, offset)); + } + } + PartitionStreamIdToCookie.erase(rangeBegin, rangeEnd); +} + +void TSingleClusterReadSessionImpl::TPartitionCookieMapping::ClearMapping() { + Cookies.clear(); + UncommittedOffsetToCookie.clear(); + PartitionStreamIdToCookie.clear(); + CommitInflight = 0; +} + +bool TSingleClusterReadSessionImpl::TPartitionCookieMapping::HasUnacknowledgedCookies() const { + return CommitInflight != 0; +} + TReadSessionEvent::TCreatePartitionStreamEvent::TCreatePartitionStreamEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset, ui64 endOffset) - : PartitionStream(std::move(partitionStream)) + : PartitionStream(std::move(partitionStream)) , CommittedOffset(committedOffset) - , EndOffset(endOffset) -{ -} - -void TReadSessionEvent::TCreatePartitionStreamEvent::Confirm(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) { - if (PartitionStream) { - static_cast<TPartitionStreamImpl*>(PartitionStream.Get())->ConfirmCreate(readOffset, commitOffset); - } -} - -TReadSessionEvent::TDestroyPartitionStreamEvent::TDestroyPartitionStreamEvent(TPartitionStream::TPtr partitionStream, bool committedOffset) - : PartitionStream(std::move(partitionStream)) - , CommittedOffset(committedOffset) -{ -} - -void TReadSessionEvent::TDestroyPartitionStreamEvent::Confirm() { - if (PartitionStream) { - static_cast<TPartitionStreamImpl*>(PartitionStream.Get())->ConfirmDestroy(); - } -} - -TReadSessionEvent::TPartitionStreamClosedEvent::TPartitionStreamClosedEvent(TPartitionStream::TPtr partitionStream, EReason reason) - : PartitionStream(std::move(partitionStream)) - , Reason(reason) -{ -} - -TReadSessionEvent::TDataReceivedEvent::TDataReceivedEvent(TVector<TMessage> messages, + , EndOffset(endOffset) +{ +} + +void TReadSessionEvent::TCreatePartitionStreamEvent::Confirm(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) { + if (PartitionStream) { + static_cast<TPartitionStreamImpl*>(PartitionStream.Get())->ConfirmCreate(readOffset, commitOffset); + } +} + +TReadSessionEvent::TDestroyPartitionStreamEvent::TDestroyPartitionStreamEvent(TPartitionStream::TPtr partitionStream, bool committedOffset) + : PartitionStream(std::move(partitionStream)) + , CommittedOffset(committedOffset) +{ +} + +void TReadSessionEvent::TDestroyPartitionStreamEvent::Confirm() { + if (PartitionStream) { + static_cast<TPartitionStreamImpl*>(PartitionStream.Get())->ConfirmDestroy(); + } +} + +TReadSessionEvent::TPartitionStreamClosedEvent::TPartitionStreamClosedEvent(TPartitionStream::TPtr partitionStream, EReason reason) + : PartitionStream(std::move(partitionStream)) + , Reason(reason) +{ +} + +TReadSessionEvent::TDataReceivedEvent::TDataReceivedEvent(TVector<TMessage> messages, TVector<TCompressedMessage> compressedMessages, - TPartitionStream::TPtr partitionStream) - : Messages(std::move(messages)) + TPartitionStream::TPtr partitionStream) + : Messages(std::move(messages)) , CompressedMessages(std::move(compressedMessages)) - , PartitionStream(std::move(partitionStream)) -{ + , PartitionStream(std::move(partitionStream)) +{ for (size_t i = 0; i < GetMessagesCount(); ++i) { auto [from, to] = GetMessageOffsetRange(*this, i); if (OffsetRanges.empty() || OffsetRanges.back().second != from) { OffsetRanges.emplace_back(from, to); } else { OffsetRanges.back().second = to; - } - } -} - -void TReadSessionEvent::TDataReceivedEvent::Commit() { - for (auto [from, to] : OffsetRanges) { + } + } +} + +void TReadSessionEvent::TDataReceivedEvent::Commit() { + for (auto [from, to] : OffsetRanges) { static_cast<TPartitionStreamImpl*>(PartitionStream.Get())->Commit(from, to); - } -} - -TReadSessionEvent::TCommitAcknowledgementEvent::TCommitAcknowledgementEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset) - : PartitionStream(std::move(partitionStream)) - , CommittedOffset(committedOffset) -{ -} - -TString DebugString(const TReadSessionEvent::TEvent& event) { - return std::visit([](const auto& ev) { return ev.DebugString(); }, event); -} - -TString TReadSessionEvent::TDataReceivedEvent::DebugString(bool printData) const { - TStringBuilder ret; - ret << "DataReceived { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() - << " PartitionId: " << GetPartitionStream()->GetPartitionId(); + } +} + +TReadSessionEvent::TCommitAcknowledgementEvent::TCommitAcknowledgementEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset) + : PartitionStream(std::move(partitionStream)) + , CommittedOffset(committedOffset) +{ +} + +TString DebugString(const TReadSessionEvent::TEvent& event) { + return std::visit([](const auto& ev) { return ev.DebugString(); }, event); +} + +TString TReadSessionEvent::TDataReceivedEvent::DebugString(bool printData) const { + TStringBuilder ret; + ret << "DataReceived { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() + << " PartitionId: " << GetPartitionStream()->GetPartitionId(); for (const auto& message : Messages) { - ret << " "; + ret << " "; message.DebugString(ret, printData); - } + } for (const auto& message : CompressedMessages) { ret << " "; message.DebugString(ret, printData); } - ret << " }"; - return std::move(ret); -} - -TString TReadSessionEvent::TCommitAcknowledgementEvent::DebugString() const { - return TStringBuilder() << "CommitAcknowledgement { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() - << " PartitionId: " << GetPartitionStream()->GetPartitionId() - << " CommittedOffset: " << GetCommittedOffset() - << " }"; -} - -TString TReadSessionEvent::TCreatePartitionStreamEvent::DebugString() const { - return TStringBuilder() << "CreatePartitionStream { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() - << " PartitionId: " << GetPartitionStream()->GetPartitionId() - << " CommittedOffset: " << GetCommittedOffset() - << " EndOffset: " << GetEndOffset() - << " }"; -} - -TString TReadSessionEvent::TDestroyPartitionStreamEvent::DebugString() const { - return TStringBuilder() << "DestroyPartitionStream { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() - << " PartitionId: " << GetPartitionStream()->GetPartitionId() + ret << " }"; + return std::move(ret); +} + +TString TReadSessionEvent::TCommitAcknowledgementEvent::DebugString() const { + return TStringBuilder() << "CommitAcknowledgement { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() + << " PartitionId: " << GetPartitionStream()->GetPartitionId() + << " CommittedOffset: " << GetCommittedOffset() + << " }"; +} + +TString TReadSessionEvent::TCreatePartitionStreamEvent::DebugString() const { + return TStringBuilder() << "CreatePartitionStream { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() + << " PartitionId: " << GetPartitionStream()->GetPartitionId() << " CommittedOffset: " << GetCommittedOffset() - << " }"; -} - -TString TReadSessionEvent::TPartitionStreamStatusEvent::DebugString() const { - return TStringBuilder() << "PartitionStreamStatus { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() - << " PartitionId: " << GetPartitionStream()->GetPartitionId() - << " CommittedOffset: " << GetCommittedOffset() - << " ReadOffset: " << GetReadOffset() - << " EndOffset: " << GetEndOffset() - << " WriteWatermark: " << GetWriteWatermark() - << " }"; -} - -TString TReadSessionEvent::TPartitionStreamClosedEvent::DebugString() const { - return TStringBuilder() << "PartitionStreamClosed { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() - << " PartitionId: " << GetPartitionStream()->GetPartitionId() - << " Reason: " << GetReason() - << " }"; -} - -TString TSessionClosedEvent::DebugString() const { - return - TStringBuilder() << "SessionClosed { Status: " << GetStatus() - << " Issues: \"" << IssuesSingleLineString(GetIssues()) - << "\" }"; -} - -TReadSessionEvent::TPartitionStreamStatusEvent::TPartitionStreamStatusEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset, ui64 readOffset, ui64 endOffset, TInstant writeWatermark) - : PartitionStream(std::move(partitionStream)) - , CommittedOffset(committedOffset) - , ReadOffset(readOffset) - , EndOffset(endOffset) - , WriteWatermark(writeWatermark) -{ -} - -TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session, TEvent event) - : PartitionStream(std::move(partitionStream)) - , Event(std::move(event)) - , Session(std::move(session)) -{} - -TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session) - : PartitionStream(std::move(partitionStream)) - , Session(std::move(session)) -{} - -TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, - std::weak_ptr<IUserRetrievedEventCallback> session, + << " EndOffset: " << GetEndOffset() + << " }"; +} + +TString TReadSessionEvent::TDestroyPartitionStreamEvent::DebugString() const { + return TStringBuilder() << "DestroyPartitionStream { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() + << " PartitionId: " << GetPartitionStream()->GetPartitionId() + << " CommittedOffset: " << GetCommittedOffset() + << " }"; +} + +TString TReadSessionEvent::TPartitionStreamStatusEvent::DebugString() const { + return TStringBuilder() << "PartitionStreamStatus { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() + << " PartitionId: " << GetPartitionStream()->GetPartitionId() + << " CommittedOffset: " << GetCommittedOffset() + << " ReadOffset: " << GetReadOffset() + << " EndOffset: " << GetEndOffset() + << " WriteWatermark: " << GetWriteWatermark() + << " }"; +} + +TString TReadSessionEvent::TPartitionStreamClosedEvent::DebugString() const { + return TStringBuilder() << "PartitionStreamClosed { PartitionStreamId: " << GetPartitionStream()->GetPartitionStreamId() + << " PartitionId: " << GetPartitionStream()->GetPartitionId() + << " Reason: " << GetReason() + << " }"; +} + +TString TSessionClosedEvent::DebugString() const { + return + TStringBuilder() << "SessionClosed { Status: " << GetStatus() + << " Issues: \"" << IssuesSingleLineString(GetIssues()) + << "\" }"; +} + +TReadSessionEvent::TPartitionStreamStatusEvent::TPartitionStreamStatusEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset, ui64 readOffset, ui64 endOffset, TInstant writeWatermark) + : PartitionStream(std::move(partitionStream)) + , CommittedOffset(committedOffset) + , ReadOffset(readOffset) + , EndOffset(endOffset) + , WriteWatermark(writeWatermark) +{ +} + +TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session, TEvent event) + : PartitionStream(std::move(partitionStream)) + , Event(std::move(event)) + , Session(std::move(session)) +{} + +TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session) + : PartitionStream(std::move(partitionStream)) + , Session(std::move(session)) +{} + +TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, + std::weak_ptr<IUserRetrievedEventCallback> session, TVector<TReadSessionEvent::TDataReceivedEvent::TMessage> messages, TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage> compressedMessages) - : PartitionStream(std::move(partitionStream)) + : PartitionStream(std::move(partitionStream)) , Event( NMaybe::TInPlace(), std::in_place_type_t<TReadSessionEvent::TDataReceivedEvent>(), @@ -1706,364 +1706,364 @@ TReadSessionEventInfo::TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> std::move(compressedMessages), PartitionStream ) - , Session(std::move(session)) -{ -} - -void TReadSessionEventInfo::MoveToPartitionStream() { - PartitionStream->InsertEvent(std::move(*Event)); - Event = Nothing(); - Y_ASSERT(PartitionStream->HasEvents()); -} - -void TReadSessionEventInfo::ExtractFromPartitionStream() { - if (!Event && !IsEmpty()) { - Event = std::move(PartitionStream->TopEvent().GetEvent()); - PartitionStream->PopEvent(); - } -} - -bool TReadSessionEventInfo::IsEmpty() const { - return !PartitionStream || !PartitionStream->HasEvents(); -} - -bool TReadSessionEventInfo::IsDataEvent() const { - return !IsEmpty() && PartitionStream->TopEvent().IsDataEvent(); -} - -bool TReadSessionEventInfo::HasMoreData() const { - return PartitionStream->TopEvent().GetData().HasMoreData(); -} - -bool TReadSessionEventInfo::HasReadyUnreadData() const { - return PartitionStream->TopEvent().GetData().HasReadyUnreadData(); -} - -void TReadSessionEventInfo::OnUserRetrievedEvent() { - if (auto session = Session.lock()) { - session->OnUserRetrievedEvent(*Event); - } -} - + , Session(std::move(session)) +{ +} + +void TReadSessionEventInfo::MoveToPartitionStream() { + PartitionStream->InsertEvent(std::move(*Event)); + Event = Nothing(); + Y_ASSERT(PartitionStream->HasEvents()); +} + +void TReadSessionEventInfo::ExtractFromPartitionStream() { + if (!Event && !IsEmpty()) { + Event = std::move(PartitionStream->TopEvent().GetEvent()); + PartitionStream->PopEvent(); + } +} + +bool TReadSessionEventInfo::IsEmpty() const { + return !PartitionStream || !PartitionStream->HasEvents(); +} + +bool TReadSessionEventInfo::IsDataEvent() const { + return !IsEmpty() && PartitionStream->TopEvent().IsDataEvent(); +} + +bool TReadSessionEventInfo::HasMoreData() const { + return PartitionStream->TopEvent().GetData().HasMoreData(); +} + +bool TReadSessionEventInfo::HasReadyUnreadData() const { + return PartitionStream->TopEvent().GetData().HasReadyUnreadData(); +} + +void TReadSessionEventInfo::OnUserRetrievedEvent() { + if (auto session = Session.lock()) { + session->OnUserRetrievedEvent(*Event); + } +} + bool TReadSessionEventInfo::TakeData(TVector<TReadSessionEvent::TDataReceivedEvent::TMessage>* messages, TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage>* compressedMessages, size_t* maxByteSize) { return PartitionStream->TopEvent().GetData().TakeData(PartitionStream, messages, compressedMessages, maxByteSize); -} - -TReadSessionEventsQueue::TReadSessionEventsQueue(const TSettings& settings, std::weak_ptr<IUserRetrievedEventCallback> session) - : TParent(settings) - , Session(std::move(session)) -{ - const auto& h = Settings.EventHandlers_; - if (h.CommonHandler_ - || h.DataReceivedHandler_ - || h.CommitAcknowledgementHandler_ - || h.CreatePartitionStreamHandler_ - || h.DestroyPartitionStreamHandler_ - || h.PartitionStreamStatusHandler_ - || h.PartitionStreamClosedHandler_ - || h.SessionClosedHandler_) - { - HasEventCallbacks = true; - } else { - HasEventCallbacks = false; - } -} - -void TReadSessionEventsQueue::PushEvent(TReadSessionEventInfo eventInfo, TDeferredActions& deferred) { - if (Closed) { - return; - } - - with_lock (Mutex) { +} + +TReadSessionEventsQueue::TReadSessionEventsQueue(const TSettings& settings, std::weak_ptr<IUserRetrievedEventCallback> session) + : TParent(settings) + , Session(std::move(session)) +{ + const auto& h = Settings.EventHandlers_; + if (h.CommonHandler_ + || h.DataReceivedHandler_ + || h.CommitAcknowledgementHandler_ + || h.CreatePartitionStreamHandler_ + || h.DestroyPartitionStreamHandler_ + || h.PartitionStreamStatusHandler_ + || h.PartitionStreamClosedHandler_ + || h.SessionClosedHandler_) + { + HasEventCallbacks = true; + } else { + HasEventCallbacks = false; + } +} + +void TReadSessionEventsQueue::PushEvent(TReadSessionEventInfo eventInfo, TDeferredActions& deferred) { + if (Closed) { + return; + } + + with_lock (Mutex) { auto partitionStream = eventInfo.PartitionStream; - eventInfo.MoveToPartitionStream(); + eventInfo.MoveToPartitionStream(); SignalReadyEventsImpl(partitionStream.Get(), deferred); - } -} - -void TReadSessionEventsQueue::SignalEventImpl(TIntrusivePtr<TPartitionStreamImpl> partitionStream, TDeferredActions& deferred) { - if (Closed) { - return; - } + } +} + +void TReadSessionEventsQueue::SignalEventImpl(TIntrusivePtr<TPartitionStreamImpl> partitionStream, TDeferredActions& deferred) { + if (Closed) { + return; + } auto session = partitionStream->GetSession(); Events.emplace(std::move(partitionStream), std::move(session)); - SignalWaiterImpl(deferred); -} - -TDataDecompressionInfo* TReadSessionEventsQueue::PushDataEvent(TIntrusivePtr<TPartitionStreamImpl> partitionStream, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData&& msg) { - if (Closed) { - return nullptr; - } - - with_lock (Mutex) { + SignalWaiterImpl(deferred); +} + +TDataDecompressionInfo* TReadSessionEventsQueue::PushDataEvent(TIntrusivePtr<TPartitionStreamImpl> partitionStream, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData&& msg) { + if (Closed) { + return nullptr; + } + + with_lock (Mutex) { return &partitionStream->InsertDataEvent(std::move(msg), Settings.Decompress_); - } -} - -TMaybe<TReadSessionEventsQueue::TEventInfo> TReadSessionEventsQueue::GetDataEventImpl(TEventInfo& srcDataEventInfo, size_t* maxByteSize) { // Assumes that we're under lock. - TVector<TReadSessionEvent::TDataReceivedEvent::TMessage> messages; + } +} + +TMaybe<TReadSessionEventsQueue::TEventInfo> TReadSessionEventsQueue::GetDataEventImpl(TEventInfo& srcDataEventInfo, size_t* maxByteSize) { // Assumes that we're under lock. + TVector<TReadSessionEvent::TDataReceivedEvent::TMessage> messages; TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage> compressedMessages; - TIntrusivePtr<TPartitionStreamImpl> partitionStream = srcDataEventInfo.PartitionStream; + TIntrusivePtr<TPartitionStreamImpl> partitionStream = srcDataEventInfo.PartitionStream; bool messageExtracted = false; - while (srcDataEventInfo.HasReadyUnreadData() && *maxByteSize > 0) { + while (srcDataEventInfo.HasReadyUnreadData() && *maxByteSize > 0) { const bool hasMoreUnpackedData = srcDataEventInfo.TakeData(&messages, &compressedMessages, maxByteSize); - if (!hasMoreUnpackedData) { - const bool messageIsFullyRead = !srcDataEventInfo.HasMoreData(); - if (messageIsFullyRead) { - partitionStream->PopEvent(); + if (!hasMoreUnpackedData) { + const bool messageIsFullyRead = !srcDataEventInfo.HasMoreData(); + if (messageIsFullyRead) { + partitionStream->PopEvent(); messageExtracted = true; - break; - } - } - } + break; + } + } + } if (!messageExtracted) { partitionStream->TopEvent().Signalled = false; } if (messages.empty() && compressedMessages.empty()) { - return Nothing(); - } + return Nothing(); + } return TEventInfo(partitionStream, partitionStream->GetSession(), std::move(messages), std::move(compressedMessages)); -} - -void TReadSessionEventsQueue::SignalReadyEvents(TPartitionStreamImpl* partitionStream) { - Y_ASSERT(partitionStream); - TDeferredActions deferred; - with_lock (Mutex) { - SignalReadyEventsImpl(partitionStream, deferred); - } -} - -void TReadSessionEventsQueue::SignalReadyEventsImpl(TPartitionStreamImpl* partitionStream, TDeferredActions& deferred) { - partitionStream->SignalReadyEvents(this, deferred); - ApplyCallbacksToReadyEventsImpl(deferred); -} - -bool TReadSessionEventsQueue::ApplyCallbacksToReadyEventsImpl(TDeferredActions& deferred) { - if (!HasEventCallbacks) { - return false; - } - bool applied = false; - while (HasCallbackForNextEventImpl()) { - size_t maxSize = std::numeric_limits<size_t>::max(); - TMaybe<TReadSessionEventInfo> eventInfo = GetEventImpl(&maxSize); - if (!eventInfo) { - break; - } - const TIntrusivePtr<TPartitionStreamImpl> partitionStreamForSignalling = eventInfo->IsDataEvent() ? eventInfo->PartitionStream : nullptr; - applied = true; +} + +void TReadSessionEventsQueue::SignalReadyEvents(TPartitionStreamImpl* partitionStream) { + Y_ASSERT(partitionStream); + TDeferredActions deferred; + with_lock (Mutex) { + SignalReadyEventsImpl(partitionStream, deferred); + } +} + +void TReadSessionEventsQueue::SignalReadyEventsImpl(TPartitionStreamImpl* partitionStream, TDeferredActions& deferred) { + partitionStream->SignalReadyEvents(this, deferred); + ApplyCallbacksToReadyEventsImpl(deferred); +} + +bool TReadSessionEventsQueue::ApplyCallbacksToReadyEventsImpl(TDeferredActions& deferred) { + if (!HasEventCallbacks) { + return false; + } + bool applied = false; + while (HasCallbackForNextEventImpl()) { + size_t maxSize = std::numeric_limits<size_t>::max(); + TMaybe<TReadSessionEventInfo> eventInfo = GetEventImpl(&maxSize); + if (!eventInfo) { + break; + } + const TIntrusivePtr<TPartitionStreamImpl> partitionStreamForSignalling = eventInfo->IsDataEvent() ? eventInfo->PartitionStream : nullptr; + applied = true; if (!ApplyHandler(*eventInfo, deferred)) { // Close session event. - break; - } - if (partitionStreamForSignalling) { - SignalReadyEventsImpl(partitionStreamForSignalling.Get(), deferred); - } - } - return applied; -} - -struct THasCallbackForEventVisitor { - explicit THasCallbackForEventVisitor(const TReadSessionSettings& settings) - : Settings(settings) - { - } - -#define DECLARE_HANDLER(type, handler) \ - bool operator()(const type&) { \ - return bool(Settings.EventHandlers_.handler); \ - } \ - /**/ - - DECLARE_HANDLER(TReadSessionEvent::TDataReceivedEvent, DataReceivedHandler_); - DECLARE_HANDLER(TReadSessionEvent::TCommitAcknowledgementEvent, CommitAcknowledgementHandler_); - DECLARE_HANDLER(TReadSessionEvent::TCreatePartitionStreamEvent, CreatePartitionStreamHandler_); - DECLARE_HANDLER(TReadSessionEvent::TDestroyPartitionStreamEvent, DestroyPartitionStreamHandler_); - DECLARE_HANDLER(TReadSessionEvent::TPartitionStreamStatusEvent, PartitionStreamStatusHandler_); - DECLARE_HANDLER(TReadSessionEvent::TPartitionStreamClosedEvent, PartitionStreamClosedHandler_); - DECLARE_HANDLER(TSessionClosedEvent, SessionClosedHandler_); - -#undef DECLARE_HANDLER - - const TReadSessionSettings& Settings; -}; - -bool TReadSessionEventsQueue::HasCallbackForNextEventImpl() const { - if (!HasEventsImpl()) { - return false; - } - if (Settings.EventHandlers_.CommonHandler_) { - return true; - } - - if (!Events.empty()) { - const TEventInfo& topEvent = Events.front(); - const TReadSessionEvent::TEvent* event = nullptr; - if (topEvent.Event) { - event = &*topEvent.Event; - } else if (topEvent.PartitionStream && topEvent.PartitionStream->HasEvents()) { - const TRawPartitionStreamEvent& partitionStreamTopEvent = topEvent.PartitionStream->TopEvent(); - if (partitionStreamTopEvent.IsDataEvent()) { - return bool(Settings.EventHandlers_.DataReceivedHandler_); - } else { - event = &partitionStreamTopEvent.GetEvent(); - } - } - - if (!event) { - return false; - } - - THasCallbackForEventVisitor visitor(Settings); - return std::visit(visitor, *event); - } else if (CloseEvent) { - return bool(Settings.EventHandlers_.SessionClosedHandler_); - } - Y_ASSERT(false); - return false; -} - -void TReadSessionEventsQueue::ClearAllEvents() { - TDeferredActions deferred; - with_lock (Mutex) { - while (!Events.empty()) { - auto& event = Events.front(); - if (event.PartitionStream && event.PartitionStream->HasEvents()) { - event.PartitionStream->PopEvent(); - } - Events.pop(); - } - } -} - + break; + } + if (partitionStreamForSignalling) { + SignalReadyEventsImpl(partitionStreamForSignalling.Get(), deferred); + } + } + return applied; +} + +struct THasCallbackForEventVisitor { + explicit THasCallbackForEventVisitor(const TReadSessionSettings& settings) + : Settings(settings) + { + } + +#define DECLARE_HANDLER(type, handler) \ + bool operator()(const type&) { \ + return bool(Settings.EventHandlers_.handler); \ + } \ + /**/ + + DECLARE_HANDLER(TReadSessionEvent::TDataReceivedEvent, DataReceivedHandler_); + DECLARE_HANDLER(TReadSessionEvent::TCommitAcknowledgementEvent, CommitAcknowledgementHandler_); + DECLARE_HANDLER(TReadSessionEvent::TCreatePartitionStreamEvent, CreatePartitionStreamHandler_); + DECLARE_HANDLER(TReadSessionEvent::TDestroyPartitionStreamEvent, DestroyPartitionStreamHandler_); + DECLARE_HANDLER(TReadSessionEvent::TPartitionStreamStatusEvent, PartitionStreamStatusHandler_); + DECLARE_HANDLER(TReadSessionEvent::TPartitionStreamClosedEvent, PartitionStreamClosedHandler_); + DECLARE_HANDLER(TSessionClosedEvent, SessionClosedHandler_); + +#undef DECLARE_HANDLER + + const TReadSessionSettings& Settings; +}; + +bool TReadSessionEventsQueue::HasCallbackForNextEventImpl() const { + if (!HasEventsImpl()) { + return false; + } + if (Settings.EventHandlers_.CommonHandler_) { + return true; + } + + if (!Events.empty()) { + const TEventInfo& topEvent = Events.front(); + const TReadSessionEvent::TEvent* event = nullptr; + if (topEvent.Event) { + event = &*topEvent.Event; + } else if (topEvent.PartitionStream && topEvent.PartitionStream->HasEvents()) { + const TRawPartitionStreamEvent& partitionStreamTopEvent = topEvent.PartitionStream->TopEvent(); + if (partitionStreamTopEvent.IsDataEvent()) { + return bool(Settings.EventHandlers_.DataReceivedHandler_); + } else { + event = &partitionStreamTopEvent.GetEvent(); + } + } + + if (!event) { + return false; + } + + THasCallbackForEventVisitor visitor(Settings); + return std::visit(visitor, *event); + } else if (CloseEvent) { + return bool(Settings.EventHandlers_.SessionClosedHandler_); + } + Y_ASSERT(false); + return false; +} + +void TReadSessionEventsQueue::ClearAllEvents() { + TDeferredActions deferred; + with_lock (Mutex) { + while (!Events.empty()) { + auto& event = Events.front(); + if (event.PartitionStream && event.PartitionStream->HasEvents()) { + event.PartitionStream->PopEvent(); + } + Events.pop(); + } + } +} + TDataDecompressionInfo::TDataDecompressionInfo( Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData&& msg, std::weak_ptr<TSingleClusterReadSessionImpl> session, bool doDecompress ) - : ServerMessage(std::move(msg)) - , Session(std::move(session)) + : ServerMessage(std::move(msg)) + , Session(std::move(session)) , DoDecompress(doDecompress) -{ - for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch : ServerMessage.batches()) { - for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& messageData : batch.message_data()) { - CompressedDataSize += messageData.data().size(); - } - } - SourceDataNotProcessed = CompressedDataSize; - - BuildBatchesMeta(); -} - -void TDataDecompressionInfo::BuildBatchesMeta() { - BatchesMeta.reserve(ServerMessage.batches_size()); - for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch : ServerMessage.batches()) { - // Extra fields. - TWriteSessionMeta::TPtr meta = MakeIntrusive<TWriteSessionMeta>(); - meta->Fields.reserve(batch.extra_fields_size()); - for (const Ydb::PersQueue::V1::KeyValue& kv : batch.extra_fields()) { - meta->Fields.emplace(kv.key(), kv.value()); - } - BatchesMeta.emplace_back(std::move(meta)); - } -} - -void TDataDecompressionInfo::PutDecompressionError(std::exception_ptr error, size_t batch, size_t message) { - if (!DecompressionErrorsStructCreated) { - with_lock (DecompressionErrorsStructLock) { - DecompressionErrors.resize(ServerMessage.batches_size()); - for (size_t batch = 0; batch < static_cast<size_t>(ServerMessage.batches_size()); ++batch) { - DecompressionErrors[batch].resize(static_cast<size_t>(ServerMessage.batches(batch).message_data_size())); - } - - // Set barrier. - DecompressionErrorsStructCreated = true; - } - } - Y_ASSERT(batch < DecompressionErrors.size()); - Y_ASSERT(message < DecompressionErrors[batch].size()); - DecompressionErrors[batch][message] = std::move(error); -} - -std::exception_ptr TDataDecompressionInfo::GetDecompressionError(size_t batch, size_t message) { - if (!DecompressionErrorsStructCreated) { - return {}; - } - Y_ASSERT(batch < DecompressionErrors.size()); - Y_ASSERT(message < DecompressionErrors[batch].size()); - return DecompressionErrors[batch][message]; -} - -i64 TDataDecompressionInfo::StartDecompressionTasks(const IExecutor::TPtr& executor, i64 availableMemory, double averageCompressionRatio, const TIntrusivePtr<TPartitionStreamImpl>& partitionStream, TDeferredActions& deferred) { - constexpr size_t TASK_LIMIT = 512_KB; - std::shared_ptr<TSingleClusterReadSessionImpl> session = Session.lock(); - Y_ASSERT(session); - ReadyThresholds.emplace_back(); - TDecompressionTask task(this, partitionStream, &ReadyThresholds.back()); - i64 used = 0; - while (availableMemory > 0 && !AllDecompressionTasksStarted()) { - const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch = ServerMessage.batches(CurrentDecompressingMessage.first); - if (CurrentDecompressingMessage.second < static_cast<size_t>(batch.message_data_size())) { - const auto& messageData = batch.message_data(CurrentDecompressingMessage.second); - const i64 size = static_cast<i64>(messageData.data().size()); - const i64 estimatedDecompressedSize = - messageData.uncompressed_size() ? static_cast<i64>(messageData.uncompressed_size()) : static_cast<i64>(size * averageCompressionRatio); - task.Add(CurrentDecompressingMessage.first, CurrentDecompressingMessage.second, size, estimatedDecompressedSize); - used += estimatedDecompressedSize; - availableMemory -= estimatedDecompressedSize; - } - ++CurrentDecompressingMessage.second; - if (CurrentDecompressingMessage.second >= static_cast<size_t>(batch.message_data_size())) { // next batch - ++CurrentDecompressingMessage.first; - CurrentDecompressingMessage.second = 0; - } - if (task.AddedDataSize() >= TASK_LIMIT) { - session->OnCreateNewDecompressionTask(); - deferred.DeferStartExecutorTask(executor, std::move(task)); - ReadyThresholds.emplace_back(); - task = TDecompressionTask(this, partitionStream, &ReadyThresholds.back()); - } - } +{ + for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch : ServerMessage.batches()) { + for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& messageData : batch.message_data()) { + CompressedDataSize += messageData.data().size(); + } + } + SourceDataNotProcessed = CompressedDataSize; + + BuildBatchesMeta(); +} + +void TDataDecompressionInfo::BuildBatchesMeta() { + BatchesMeta.reserve(ServerMessage.batches_size()); + for (const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch : ServerMessage.batches()) { + // Extra fields. + TWriteSessionMeta::TPtr meta = MakeIntrusive<TWriteSessionMeta>(); + meta->Fields.reserve(batch.extra_fields_size()); + for (const Ydb::PersQueue::V1::KeyValue& kv : batch.extra_fields()) { + meta->Fields.emplace(kv.key(), kv.value()); + } + BatchesMeta.emplace_back(std::move(meta)); + } +} + +void TDataDecompressionInfo::PutDecompressionError(std::exception_ptr error, size_t batch, size_t message) { + if (!DecompressionErrorsStructCreated) { + with_lock (DecompressionErrorsStructLock) { + DecompressionErrors.resize(ServerMessage.batches_size()); + for (size_t batch = 0; batch < static_cast<size_t>(ServerMessage.batches_size()); ++batch) { + DecompressionErrors[batch].resize(static_cast<size_t>(ServerMessage.batches(batch).message_data_size())); + } + + // Set barrier. + DecompressionErrorsStructCreated = true; + } + } + Y_ASSERT(batch < DecompressionErrors.size()); + Y_ASSERT(message < DecompressionErrors[batch].size()); + DecompressionErrors[batch][message] = std::move(error); +} + +std::exception_ptr TDataDecompressionInfo::GetDecompressionError(size_t batch, size_t message) { + if (!DecompressionErrorsStructCreated) { + return {}; + } + Y_ASSERT(batch < DecompressionErrors.size()); + Y_ASSERT(message < DecompressionErrors[batch].size()); + return DecompressionErrors[batch][message]; +} + +i64 TDataDecompressionInfo::StartDecompressionTasks(const IExecutor::TPtr& executor, i64 availableMemory, double averageCompressionRatio, const TIntrusivePtr<TPartitionStreamImpl>& partitionStream, TDeferredActions& deferred) { + constexpr size_t TASK_LIMIT = 512_KB; + std::shared_ptr<TSingleClusterReadSessionImpl> session = Session.lock(); + Y_ASSERT(session); + ReadyThresholds.emplace_back(); + TDecompressionTask task(this, partitionStream, &ReadyThresholds.back()); + i64 used = 0; + while (availableMemory > 0 && !AllDecompressionTasksStarted()) { + const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch = ServerMessage.batches(CurrentDecompressingMessage.first); + if (CurrentDecompressingMessage.second < static_cast<size_t>(batch.message_data_size())) { + const auto& messageData = batch.message_data(CurrentDecompressingMessage.second); + const i64 size = static_cast<i64>(messageData.data().size()); + const i64 estimatedDecompressedSize = + messageData.uncompressed_size() ? static_cast<i64>(messageData.uncompressed_size()) : static_cast<i64>(size * averageCompressionRatio); + task.Add(CurrentDecompressingMessage.first, CurrentDecompressingMessage.second, size, estimatedDecompressedSize); + used += estimatedDecompressedSize; + availableMemory -= estimatedDecompressedSize; + } + ++CurrentDecompressingMessage.second; + if (CurrentDecompressingMessage.second >= static_cast<size_t>(batch.message_data_size())) { // next batch + ++CurrentDecompressingMessage.first; + CurrentDecompressingMessage.second = 0; + } + if (task.AddedDataSize() >= TASK_LIMIT) { + session->OnCreateNewDecompressionTask(); + deferred.DeferStartExecutorTask(executor, std::move(task)); + ReadyThresholds.emplace_back(); + task = TDecompressionTask(this, partitionStream, &ReadyThresholds.back()); + } + } if (task.AddedMessagesCount() > 0) { - session->OnCreateNewDecompressionTask(); - deferred.DeferStartExecutorTask(executor, std::move(task)); - } else { - ReadyThresholds.pop_back(); // Revert. - } - return used; -} - + session->OnCreateNewDecompressionTask(); + deferred.DeferStartExecutorTask(executor, std::move(task)); + } else { + ReadyThresholds.pop_back(); // Revert. + } + return used; +} + bool TDataDecompressionInfo::TakeData(const TIntrusivePtr<TPartitionStreamImpl>& partitionStream, TVector<TReadSessionEvent::TDataReceivedEvent::TMessage>* messages, TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage>* compressedMessages, size_t* maxByteSize) { - TMaybe<std::pair<size_t, size_t>> readyThreshold = GetReadyThreshold(); - Y_ASSERT(readyThreshold); - auto& msg = GetServerMessage(); - ui64 minOffset = Max<ui64>(); - ui64 maxOffset = 0; - const auto prevReadingMessage = CurrentReadingMessage; - while (HasMoreData() && *maxByteSize > 0 && CurrentReadingMessage <= *readyThreshold) { - auto& batch = *msg.mutable_batches(CurrentReadingMessage.first); - if (CurrentReadingMessage.second < static_cast<size_t>(batch.message_data_size())) { - const auto& meta = GetBatchMeta(CurrentReadingMessage.first); - const TInstant batchWriteTimestamp = TInstant::MilliSeconds(batch.write_timestamp_ms()); - auto& messageData = *batch.mutable_message_data(CurrentReadingMessage.second); - minOffset = Min(minOffset, messageData.offset()); - maxOffset = Max(maxOffset, messageData.offset()); + TMaybe<std::pair<size_t, size_t>> readyThreshold = GetReadyThreshold(); + Y_ASSERT(readyThreshold); + auto& msg = GetServerMessage(); + ui64 minOffset = Max<ui64>(); + ui64 maxOffset = 0; + const auto prevReadingMessage = CurrentReadingMessage; + while (HasMoreData() && *maxByteSize > 0 && CurrentReadingMessage <= *readyThreshold) { + auto& batch = *msg.mutable_batches(CurrentReadingMessage.first); + if (CurrentReadingMessage.second < static_cast<size_t>(batch.message_data_size())) { + const auto& meta = GetBatchMeta(CurrentReadingMessage.first); + const TInstant batchWriteTimestamp = TInstant::MilliSeconds(batch.write_timestamp_ms()); + auto& messageData = *batch.mutable_message_data(CurrentReadingMessage.second); + minOffset = Min(minOffset, messageData.offset()); + maxOffset = Max(maxOffset, messageData.offset()); TReadSessionEvent::TDataReceivedEvent::TMessageInformation messageInfo( - messageData.offset(), - batch.source_id(), - messageData.seq_no(), - TInstant::MilliSeconds(messageData.create_timestamp_ms()), - batchWriteTimestamp, - batch.ip(), + messageData.offset(), + batch.source_id(), + messageData.seq_no(), + TInstant::MilliSeconds(messageData.create_timestamp_ms()), + batchWriteTimestamp, + batch.ip(), meta, messageData.uncompressed_size() - ); + ); if (DoDecompress) { messages->emplace_back( messageData.data(), @@ -2083,77 +2083,77 @@ bool TDataDecompressionInfo::TakeData(const TIntrusivePtr<TPartitionStreamImpl>& messageData.explicit_hash() ); } - *maxByteSize -= Min(*maxByteSize, messageData.data().size()); - - // Clear data to free internal session's memory. - messageData.clear_data(); - } - - ++CurrentReadingMessage.second; - if (CurrentReadingMessage.second >= static_cast<size_t>(batch.message_data_size())) { - CurrentReadingMessage.second = 0; - do { - ++CurrentReadingMessage.first; - } while (CurrentReadingMessage.first < static_cast<size_t>(msg.batches_size()) && msg.batches(CurrentReadingMessage.first).message_data_size() == 0); - } - } - partitionStream->GetLog() << TLOG_DEBUG << "Take Data. Partition " << partitionStream->GetPartitionId() - << ". Read: {" << prevReadingMessage.first << ", " << prevReadingMessage.second << "} -> {" - << CurrentReadingMessage.first << ", " << CurrentReadingMessage.second << "} (" - << minOffset << "-" << maxOffset << ")"; - return CurrentReadingMessage <= *readyThreshold; -} - -bool TDataDecompressionInfo::HasReadyUnreadData() const { - TMaybe<std::pair<size_t, size_t>> threshold = GetReadyThreshold(); - if (!threshold) { - return false; - } - return CurrentReadingMessage <= *threshold; -} - -void TDataDecompressionInfo::TDecompressionTask::Add(size_t batch, size_t message, size_t sourceDataSize, size_t estimatedDecompressedSize) { - if (Messages.empty() || Messages.back().Batch != batch) { - Messages.push_back({ batch, { message, message + 1 } }); - } - Messages.back().MessageRange.second = message + 1; - SourceDataSize += sourceDataSize; - EstimatedDecompressedSize += estimatedDecompressedSize; - Ready->Batch = batch; - Ready->Message = message; -} - -TDataDecompressionInfo::TDecompressionTask::TDecompressionTask(TDataDecompressionInfo* parent, TIntrusivePtr<TPartitionStreamImpl> partitionStream, TReadyMessageThreshold* ready) - : Parent(parent) - , PartitionStream(std::move(partitionStream)) - , Ready(ready) -{ -} - + *maxByteSize -= Min(*maxByteSize, messageData.data().size()); + + // Clear data to free internal session's memory. + messageData.clear_data(); + } + + ++CurrentReadingMessage.second; + if (CurrentReadingMessage.second >= static_cast<size_t>(batch.message_data_size())) { + CurrentReadingMessage.second = 0; + do { + ++CurrentReadingMessage.first; + } while (CurrentReadingMessage.first < static_cast<size_t>(msg.batches_size()) && msg.batches(CurrentReadingMessage.first).message_data_size() == 0); + } + } + partitionStream->GetLog() << TLOG_DEBUG << "Take Data. Partition " << partitionStream->GetPartitionId() + << ". Read: {" << prevReadingMessage.first << ", " << prevReadingMessage.second << "} -> {" + << CurrentReadingMessage.first << ", " << CurrentReadingMessage.second << "} (" + << minOffset << "-" << maxOffset << ")"; + return CurrentReadingMessage <= *readyThreshold; +} + +bool TDataDecompressionInfo::HasReadyUnreadData() const { + TMaybe<std::pair<size_t, size_t>> threshold = GetReadyThreshold(); + if (!threshold) { + return false; + } + return CurrentReadingMessage <= *threshold; +} + +void TDataDecompressionInfo::TDecompressionTask::Add(size_t batch, size_t message, size_t sourceDataSize, size_t estimatedDecompressedSize) { + if (Messages.empty() || Messages.back().Batch != batch) { + Messages.push_back({ batch, { message, message + 1 } }); + } + Messages.back().MessageRange.second = message + 1; + SourceDataSize += sourceDataSize; + EstimatedDecompressedSize += estimatedDecompressedSize; + Ready->Batch = batch; + Ready->Message = message; +} + +TDataDecompressionInfo::TDecompressionTask::TDecompressionTask(TDataDecompressionInfo* parent, TIntrusivePtr<TPartitionStreamImpl> partitionStream, TReadyMessageThreshold* ready) + : Parent(parent) + , PartitionStream(std::move(partitionStream)) + , Ready(ready) +{ +} + // Forward delcaration namespace NCompressionDetails { extern TString Decompress(const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& data); } -void TDataDecompressionInfo::TDecompressionTask::operator()() { - ui64 minOffset = Max<ui64>(); - ui64 maxOffset = 0; - const ui64 partition = Parent->ServerMessage.partition(); - i64 dataProcessed = 0; - size_t messagesProcessed = 0; - for (const TMessageRange& messages : Messages) { - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch = - *Parent->ServerMessage.mutable_batches(messages.Batch); - for (size_t i = messages.MessageRange.first; i < messages.MessageRange.second; ++i) { - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& data = - *batch.mutable_message_data(i); - - ++messagesProcessed; - dataProcessed += static_cast<i64>(data.data().size()); - minOffset = Min(minOffset, data.offset()); - maxOffset = Max(maxOffset, data.offset()); - - try { +void TDataDecompressionInfo::TDecompressionTask::operator()() { + ui64 minOffset = Max<ui64>(); + ui64 maxOffset = 0; + const ui64 partition = Parent->ServerMessage.partition(); + i64 dataProcessed = 0; + size_t messagesProcessed = 0; + for (const TMessageRange& messages : Messages) { + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::Batch& batch = + *Parent->ServerMessage.mutable_batches(messages.Batch); + for (size_t i = messages.MessageRange.first; i < messages.MessageRange.second; ++i) { + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::MessageData& data = + *batch.mutable_message_data(i); + + ++messagesProcessed; + dataProcessed += static_cast<i64>(data.data().size()); + minOffset = Min(minOffset, data.offset()); + maxOffset = Max(maxOffset, data.offset()); + + try { if (Parent->DoDecompress && data.codec() != Ydb::PersQueue::V1::CODEC_RAW && data.codec() != Ydb::PersQueue::V1::CODEC_UNSPECIFIED @@ -2163,467 +2163,467 @@ void TDataDecompressionInfo::TDecompressionTask::operator()() { data.set_codec(Ydb::PersQueue::V1::CODEC_RAW); } DecompressedSize += data.data().size(); - } catch (...) { - Parent->PutDecompressionError(std::current_exception(), messages.Batch, i); - data.clear_data(); // Free memory, because we don't count it. - - std::shared_ptr<TSingleClusterReadSessionImpl> session = Parent->Session.lock(); - if (session) { - session->GetLog() << TLOG_INFO << "Error decompressing data: " << CurrentExceptionMessage(); - } - } - } - } - if (auto session = Parent->Session.lock()) { + } catch (...) { + Parent->PutDecompressionError(std::current_exception(), messages.Batch, i); + data.clear_data(); // Free memory, because we don't count it. + + std::shared_ptr<TSingleClusterReadSessionImpl> session = Parent->Session.lock(); + if (session) { + session->GetLog() << TLOG_INFO << "Error decompressing data: " << CurrentExceptionMessage(); + } + } + } + } + if (auto session = Parent->Session.lock()) { session->GetLog() << TLOG_DEBUG << "Decompression task done. Partition: " << partition << " (" << minOffset << "-" << maxOffset << ")"; - } - Y_ASSERT(dataProcessed == SourceDataSize); - std::shared_ptr<TSingleClusterReadSessionImpl> session = Parent->Session.lock(); + } + Y_ASSERT(dataProcessed == SourceDataSize); + std::shared_ptr<TSingleClusterReadSessionImpl> session = Parent->Session.lock(); if (session) { session->OnDataDecompressed(SourceDataSize, EstimatedDecompressedSize, DecompressedSize, messagesProcessed); } Parent->SourceDataNotProcessed -= dataProcessed; - Ready->Ready = true; - - if (session) { - session->GetEventsQueue()->SignalReadyEvents(PartitionStream.Get()); - } -} - -void TRawPartitionStreamEvent::Signal(TPartitionStreamImpl* partitionStream, TReadSessionEventsQueue* queue, TDeferredActions& deferred) { - if (!Signalled) { + Ready->Ready = true; + + if (session) { + session->GetEventsQueue()->SignalReadyEvents(PartitionStream.Get()); + } +} + +void TRawPartitionStreamEvent::Signal(TPartitionStreamImpl* partitionStream, TReadSessionEventsQueue* queue, TDeferredActions& deferred) { + if (!Signalled) { Signalled = true; - queue->SignalEventImpl(partitionStream, deferred); - } -} - -void TDeferredActions::DeferReadFromProcessor(const IProcessor::TPtr& processor, - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* dst, - IProcessor::TReadCallback callback) -{ - Y_ASSERT(!Processor); - Y_ASSERT(!ReadDst); - Y_ASSERT(!ReadCallback); - Processor = processor; - ReadDst = dst; - ReadCallback = std::move(callback); -} - -void TDeferredActions::DeferStartExecutorTask(const IExecutor::TPtr& executor, IExecutor::TFunction task) { - ExecutorsTasks.emplace_back(executor, std::move(task)); -} - -void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TSessionClosedEvent&& closeEvent) { - ErrorHandler = errorHandler; - SessionClosedEvent.ConstructInPlace(std::move(closeEvent)); -} - -void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, NYql::TIssues&& issues) { + queue->SignalEventImpl(partitionStream, deferred); + } +} + +void TDeferredActions::DeferReadFromProcessor(const IProcessor::TPtr& processor, + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* dst, + IProcessor::TReadCallback callback) +{ + Y_ASSERT(!Processor); + Y_ASSERT(!ReadDst); + Y_ASSERT(!ReadCallback); + Processor = processor; + ReadDst = dst; + ReadCallback = std::move(callback); +} + +void TDeferredActions::DeferStartExecutorTask(const IExecutor::TPtr& executor, IExecutor::TFunction task) { + ExecutorsTasks.emplace_back(executor, std::move(task)); +} + +void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TSessionClosedEvent&& closeEvent) { + ErrorHandler = errorHandler; + SessionClosedEvent.ConstructInPlace(std::move(closeEvent)); +} + +void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, NYql::TIssues&& issues) { DeferAbortSession(errorHandler, TSessionClosedEvent(statusCode, std::move(issues))); -} - -void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, const TString& message) { - NYql::TIssues issues; - issues.AddIssue(message); - DeferAbortSession(errorHandler, statusCode, std::move(issues)); -} - -void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status) { +} + +void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, const TString& message) { + NYql::TIssues issues; + issues.AddIssue(message); + DeferAbortSession(errorHandler, statusCode, std::move(issues)); +} + +void TDeferredActions::DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status) { DeferAbortSession(errorHandler, TSessionClosedEvent(std::move(status))); -} - -void TDeferredActions::DeferReconnection(std::shared_ptr<TSingleClusterReadSessionImpl> session, const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status) { - Session = std::move(session); - ErrorHandler = errorHandler; - ReconnectionStatus = std::move(status); -} - +} + +void TDeferredActions::DeferReconnection(std::shared_ptr<TSingleClusterReadSessionImpl> session, const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status) { + Session = std::move(session); + ErrorHandler = errorHandler; + ReconnectionStatus = std::move(status); +} + void TDeferredActions::DeferSignalWaiter(TWaiter&& waiter) { Waiters.emplace_back(std::move(waiter)); -} - -void TDeferredActions::DoActions() { - Read(); - StartExecutorTasks(); - AbortSession(); - Reconnect(); +} + +void TDeferredActions::DoActions() { + Read(); + StartExecutorTasks(); + AbortSession(); + Reconnect(); SignalWaiters(); -} - -void TDeferredActions::Read() { - if (ReadDst) { - Y_ASSERT(Processor); - Y_ASSERT(ReadCallback); - Processor->Read(ReadDst, std::move(ReadCallback)); - } -} - -void TDeferredActions::StartExecutorTasks() { - for (auto&& [executor, task] : ExecutorsTasks) { - executor->Post(std::move(task)); - } -} - -void TDeferredActions::AbortSession() { - if (SessionClosedEvent) { - Y_ASSERT(ErrorHandler); - ErrorHandler->AbortSession(std::move(*SessionClosedEvent)); - } -} - -void TDeferredActions::Reconnect() { - if (Session) { - Y_ASSERT(ErrorHandler); - if (!Session->Reconnect(ReconnectionStatus)) { - ErrorHandler->AbortSession(std::move(ReconnectionStatus)); - } - } -} - +} + +void TDeferredActions::Read() { + if (ReadDst) { + Y_ASSERT(Processor); + Y_ASSERT(ReadCallback); + Processor->Read(ReadDst, std::move(ReadCallback)); + } +} + +void TDeferredActions::StartExecutorTasks() { + for (auto&& [executor, task] : ExecutorsTasks) { + executor->Post(std::move(task)); + } +} + +void TDeferredActions::AbortSession() { + if (SessionClosedEvent) { + Y_ASSERT(ErrorHandler); + ErrorHandler->AbortSession(std::move(*SessionClosedEvent)); + } +} + +void TDeferredActions::Reconnect() { + if (Session) { + Y_ASSERT(ErrorHandler); + if (!Session->Reconnect(ReconnectionStatus)) { + ErrorHandler->AbortSession(std::move(ReconnectionStatus)); + } + } +} + void TDeferredActions::SignalWaiters() { for (auto& w : Waiters) { w.Signal(); - } -} - -void TErrorHandler::AbortSession(TSessionClosedEvent&& closeEvent) { - if (auto session = Session.lock()) { - session->Abort(std::move(closeEvent)); - } -} - -class TGracefulReleasingSimpleDataHandlers : public TThrRefBase { -public: - explicit TGracefulReleasingSimpleDataHandlers(std::function<void(TReadSessionEvent::TDataReceivedEvent&)> dataHandler, bool commitAfterProcessing) - : DataHandler(std::move(dataHandler)) - , CommitAfterProcessing(commitAfterProcessing) - { - } - - void OnDataReceived(TReadSessionEvent::TDataReceivedEvent& event) { + } +} + +void TErrorHandler::AbortSession(TSessionClosedEvent&& closeEvent) { + if (auto session = Session.lock()) { + session->Abort(std::move(closeEvent)); + } +} + +class TGracefulReleasingSimpleDataHandlers : public TThrRefBase { +public: + explicit TGracefulReleasingSimpleDataHandlers(std::function<void(TReadSessionEvent::TDataReceivedEvent&)> dataHandler, bool commitAfterProcessing) + : DataHandler(std::move(dataHandler)) + , CommitAfterProcessing(commitAfterProcessing) + { + } + + void OnDataReceived(TReadSessionEvent::TDataReceivedEvent& event) { Y_ASSERT(event.GetMessagesCount()); - TDeferredCommit deferredCommit; - with_lock (Lock) { - auto& offsetSet = PartitionStreamToUncommittedOffsets[event.GetPartitionStream()->GetPartitionStreamId()]; - // Messages could contain holes in offset, but later commit ack will tell us right border. - // So we can easily insert the whole interval with holes included. - // It will be removed from set by specifying proper right border. + TDeferredCommit deferredCommit; + with_lock (Lock) { + auto& offsetSet = PartitionStreamToUncommittedOffsets[event.GetPartitionStream()->GetPartitionStreamId()]; + // Messages could contain holes in offset, but later commit ack will tell us right border. + // So we can easily insert the whole interval with holes included. + // It will be removed from set by specifying proper right border. auto firstMessageOffsets = GetMessageOffsetRange(event, 0); auto lastMessageOffsets = GetMessageOffsetRange(event, event.GetMessagesCount() - 1); offsetSet.InsertInterval(firstMessageOffsets.first, lastMessageOffsets.second); - - if (CommitAfterProcessing) { - deferredCommit.Add(event); - } - } - DataHandler(event); - deferredCommit.Commit(); - } - - void OnCommitAcknowledgement(TReadSessionEvent::TCommitAcknowledgementEvent& event) { - with_lock (Lock) { - const ui64 partitionStreamId = event.GetPartitionStream()->GetPartitionStreamId(); - auto& offsetSet = PartitionStreamToUncommittedOffsets[partitionStreamId]; - if (offsetSet.EraseInterval(0, event.GetCommittedOffset() + 1)) { // Remove some offsets. - if (offsetSet.Empty()) { // No offsets left. - auto unconfirmedDestroyIt = UnconfirmedDestroys.find(partitionStreamId); - if (unconfirmedDestroyIt != UnconfirmedDestroys.end()) { - // Confirm and forget about this partition stream. - unconfirmedDestroyIt->second.Confirm(); - UnconfirmedDestroys.erase(unconfirmedDestroyIt); - PartitionStreamToUncommittedOffsets.erase(partitionStreamId); - } - } - } - } - } - - void OnCreatePartitionStream(TReadSessionEvent::TCreatePartitionStreamEvent& event) { - with_lock (Lock) { - Y_VERIFY(PartitionStreamToUncommittedOffsets[event.GetPartitionStream()->GetPartitionStreamId()].Empty()); - } - event.Confirm(); - } - - void OnDestroyPartitionStream(TReadSessionEvent::TDestroyPartitionStreamEvent& event) { - with_lock (Lock) { - const ui64 partitionStreamId = event.GetPartitionStream()->GetPartitionStreamId(); - Y_VERIFY(UnconfirmedDestroys.find(partitionStreamId) == UnconfirmedDestroys.end()); - if (PartitionStreamToUncommittedOffsets[partitionStreamId].Empty()) { - PartitionStreamToUncommittedOffsets.erase(partitionStreamId); - event.Confirm(); - } else { - UnconfirmedDestroys.emplace(partitionStreamId, std::move(event)); - } - } - } - - void OnPartitionStreamClosed(TReadSessionEvent::TPartitionStreamClosedEvent& event) { - with_lock (Lock) { - const ui64 partitionStreamId = event.GetPartitionStream()->GetPartitionStreamId(); - PartitionStreamToUncommittedOffsets.erase(partitionStreamId); - UnconfirmedDestroys.erase(partitionStreamId); - } - } - -private: - TAdaptiveLock Lock; // For the case when user gave us multithreaded executor. - const std::function<void(TReadSessionEvent::TDataReceivedEvent&)> DataHandler; - const bool CommitAfterProcessing; - THashMap<ui64, TDisjointIntervalTree<ui64>> PartitionStreamToUncommittedOffsets; // Partition stream id -> set of offsets. - THashMap<ui64, TReadSessionEvent::TDestroyPartitionStreamEvent> UnconfirmedDestroys; // Partition stream id -> destroy events. -}; - -TReadSessionSettings::TEventHandlers& TReadSessionSettings::TEventHandlers::SimpleDataHandlers(std::function<void(TReadSessionEvent::TDataReceivedEvent&)> dataHandler, - bool commitDataAfterProcessing, - bool gracefulReleaseAfterCommit) { - Y_ASSERT(dataHandler); - - PartitionStreamStatusHandler([](TReadSessionEvent::TPartitionStreamStatusEvent&){}); - - if (gracefulReleaseAfterCommit) { - auto handlers = MakeIntrusive<TGracefulReleasingSimpleDataHandlers>(std::move(dataHandler), commitDataAfterProcessing); - DataReceivedHandler([handlers](TReadSessionEvent::TDataReceivedEvent& event) { - handlers->OnDataReceived(event); - }); - CreatePartitionStreamHandler([handlers](TReadSessionEvent::TCreatePartitionStreamEvent& event) { - handlers->OnCreatePartitionStream(event); - }); - DestroyPartitionStreamHandler([handlers](TReadSessionEvent::TDestroyPartitionStreamEvent& event) { - handlers->OnDestroyPartitionStream(event); - }); - CommitAcknowledgementHandler([handlers](TReadSessionEvent::TCommitAcknowledgementEvent& event) { - handlers->OnCommitAcknowledgement(event); - }); - PartitionStreamClosedHandler([handlers](TReadSessionEvent::TPartitionStreamClosedEvent& event) { - handlers->OnPartitionStreamClosed(event); - }); - } else { - if (commitDataAfterProcessing) { - DataReceivedHandler([dataHandler = std::move(dataHandler)](TReadSessionEvent::TDataReceivedEvent& event) { - TDeferredCommit deferredCommit; - deferredCommit.Add(event); - dataHandler(event); - deferredCommit.Commit(); - }); - } else { - DataReceivedHandler(std::move(dataHandler)); - } - CreatePartitionStreamHandler([](TReadSessionEvent::TCreatePartitionStreamEvent& event) { - event.Confirm(); - }); - DestroyPartitionStreamHandler([](TReadSessionEvent::TDestroyPartitionStreamEvent& event) { - event.Confirm(); - }); - CommitAcknowledgementHandler([](TReadSessionEvent::TCommitAcknowledgementEvent&){}); - PartitionStreamClosedHandler([](TReadSessionEvent::TPartitionStreamClosedEvent&){}); - } - return *this; -} - -class TDeferredCommit::TImpl { -public: - - void Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset); - void Add(const TPartitionStream::TPtr& partitionStream, ui64 offset); - - void Add(const TReadSessionEvent::TDataReceivedEvent::TMessage& message); - void Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent); - - void Commit(); - -private: - static void Add(const TPartitionStream::TPtr& partitionStream, TDisjointIntervalTree<ui64>& offsetSet, ui64 startOffset, ui64 endOffset); - -private: - THashMap<TPartitionStream::TPtr, TDisjointIntervalTree<ui64>> Offsets; // Partition stream -> offsets set. -}; - -TDeferredCommit::TDeferredCommit() { -} - + + if (CommitAfterProcessing) { + deferredCommit.Add(event); + } + } + DataHandler(event); + deferredCommit.Commit(); + } + + void OnCommitAcknowledgement(TReadSessionEvent::TCommitAcknowledgementEvent& event) { + with_lock (Lock) { + const ui64 partitionStreamId = event.GetPartitionStream()->GetPartitionStreamId(); + auto& offsetSet = PartitionStreamToUncommittedOffsets[partitionStreamId]; + if (offsetSet.EraseInterval(0, event.GetCommittedOffset() + 1)) { // Remove some offsets. + if (offsetSet.Empty()) { // No offsets left. + auto unconfirmedDestroyIt = UnconfirmedDestroys.find(partitionStreamId); + if (unconfirmedDestroyIt != UnconfirmedDestroys.end()) { + // Confirm and forget about this partition stream. + unconfirmedDestroyIt->second.Confirm(); + UnconfirmedDestroys.erase(unconfirmedDestroyIt); + PartitionStreamToUncommittedOffsets.erase(partitionStreamId); + } + } + } + } + } + + void OnCreatePartitionStream(TReadSessionEvent::TCreatePartitionStreamEvent& event) { + with_lock (Lock) { + Y_VERIFY(PartitionStreamToUncommittedOffsets[event.GetPartitionStream()->GetPartitionStreamId()].Empty()); + } + event.Confirm(); + } + + void OnDestroyPartitionStream(TReadSessionEvent::TDestroyPartitionStreamEvent& event) { + with_lock (Lock) { + const ui64 partitionStreamId = event.GetPartitionStream()->GetPartitionStreamId(); + Y_VERIFY(UnconfirmedDestroys.find(partitionStreamId) == UnconfirmedDestroys.end()); + if (PartitionStreamToUncommittedOffsets[partitionStreamId].Empty()) { + PartitionStreamToUncommittedOffsets.erase(partitionStreamId); + event.Confirm(); + } else { + UnconfirmedDestroys.emplace(partitionStreamId, std::move(event)); + } + } + } + + void OnPartitionStreamClosed(TReadSessionEvent::TPartitionStreamClosedEvent& event) { + with_lock (Lock) { + const ui64 partitionStreamId = event.GetPartitionStream()->GetPartitionStreamId(); + PartitionStreamToUncommittedOffsets.erase(partitionStreamId); + UnconfirmedDestroys.erase(partitionStreamId); + } + } + +private: + TAdaptiveLock Lock; // For the case when user gave us multithreaded executor. + const std::function<void(TReadSessionEvent::TDataReceivedEvent&)> DataHandler; + const bool CommitAfterProcessing; + THashMap<ui64, TDisjointIntervalTree<ui64>> PartitionStreamToUncommittedOffsets; // Partition stream id -> set of offsets. + THashMap<ui64, TReadSessionEvent::TDestroyPartitionStreamEvent> UnconfirmedDestroys; // Partition stream id -> destroy events. +}; + +TReadSessionSettings::TEventHandlers& TReadSessionSettings::TEventHandlers::SimpleDataHandlers(std::function<void(TReadSessionEvent::TDataReceivedEvent&)> dataHandler, + bool commitDataAfterProcessing, + bool gracefulReleaseAfterCommit) { + Y_ASSERT(dataHandler); + + PartitionStreamStatusHandler([](TReadSessionEvent::TPartitionStreamStatusEvent&){}); + + if (gracefulReleaseAfterCommit) { + auto handlers = MakeIntrusive<TGracefulReleasingSimpleDataHandlers>(std::move(dataHandler), commitDataAfterProcessing); + DataReceivedHandler([handlers](TReadSessionEvent::TDataReceivedEvent& event) { + handlers->OnDataReceived(event); + }); + CreatePartitionStreamHandler([handlers](TReadSessionEvent::TCreatePartitionStreamEvent& event) { + handlers->OnCreatePartitionStream(event); + }); + DestroyPartitionStreamHandler([handlers](TReadSessionEvent::TDestroyPartitionStreamEvent& event) { + handlers->OnDestroyPartitionStream(event); + }); + CommitAcknowledgementHandler([handlers](TReadSessionEvent::TCommitAcknowledgementEvent& event) { + handlers->OnCommitAcknowledgement(event); + }); + PartitionStreamClosedHandler([handlers](TReadSessionEvent::TPartitionStreamClosedEvent& event) { + handlers->OnPartitionStreamClosed(event); + }); + } else { + if (commitDataAfterProcessing) { + DataReceivedHandler([dataHandler = std::move(dataHandler)](TReadSessionEvent::TDataReceivedEvent& event) { + TDeferredCommit deferredCommit; + deferredCommit.Add(event); + dataHandler(event); + deferredCommit.Commit(); + }); + } else { + DataReceivedHandler(std::move(dataHandler)); + } + CreatePartitionStreamHandler([](TReadSessionEvent::TCreatePartitionStreamEvent& event) { + event.Confirm(); + }); + DestroyPartitionStreamHandler([](TReadSessionEvent::TDestroyPartitionStreamEvent& event) { + event.Confirm(); + }); + CommitAcknowledgementHandler([](TReadSessionEvent::TCommitAcknowledgementEvent&){}); + PartitionStreamClosedHandler([](TReadSessionEvent::TPartitionStreamClosedEvent&){}); + } + return *this; +} + +class TDeferredCommit::TImpl { +public: + + void Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset); + void Add(const TPartitionStream::TPtr& partitionStream, ui64 offset); + + void Add(const TReadSessionEvent::TDataReceivedEvent::TMessage& message); + void Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent); + + void Commit(); + +private: + static void Add(const TPartitionStream::TPtr& partitionStream, TDisjointIntervalTree<ui64>& offsetSet, ui64 startOffset, ui64 endOffset); + +private: + THashMap<TPartitionStream::TPtr, TDisjointIntervalTree<ui64>> Offsets; // Partition stream -> offsets set. +}; + +TDeferredCommit::TDeferredCommit() { +} + TDeferredCommit::TDeferredCommit(TDeferredCommit&&) = default; TDeferredCommit& TDeferredCommit::operator=(TDeferredCommit&&) = default; -TDeferredCommit::~TDeferredCommit() { -} - -#define GET_IMPL() \ - if (!Impl) { \ - Impl = MakeHolder<TImpl>(); \ - } \ - Impl - -void TDeferredCommit::Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset) { - GET_IMPL()->Add(partitionStream, startOffset, endOffset); -} - -void TDeferredCommit::Add(const TPartitionStream::TPtr& partitionStream, ui64 offset) { - GET_IMPL()->Add(partitionStream, offset); -} - +TDeferredCommit::~TDeferredCommit() { +} + +#define GET_IMPL() \ + if (!Impl) { \ + Impl = MakeHolder<TImpl>(); \ + } \ + Impl + +void TDeferredCommit::Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset) { + GET_IMPL()->Add(partitionStream, startOffset, endOffset); +} + +void TDeferredCommit::Add(const TPartitionStream::TPtr& partitionStream, ui64 offset) { + GET_IMPL()->Add(partitionStream, offset); +} + void TDeferredCommit::Add(const TReadSessionEvent::TDataReceivedEvent::TMessage& message) { GET_IMPL()->Add(message); -} - -void TDeferredCommit::Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent) { - GET_IMPL()->Add(dataReceivedEvent); -} - -#undef GET_IMPL - -void TDeferredCommit::Commit() { - if (Impl) { - Impl->Commit(); - } -} - +} + +void TDeferredCommit::Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent) { + GET_IMPL()->Add(dataReceivedEvent); +} + +#undef GET_IMPL + +void TDeferredCommit::Commit() { + if (Impl) { + Impl->Commit(); + } +} + void TDeferredCommit::TImpl::Add(const TReadSessionEvent::TDataReceivedEvent::TMessage& message) { Y_ASSERT(message.GetPartitionStream()); Add(message.GetPartitionStream(), message.GetOffset()); } -void TDeferredCommit::TImpl::Add(const TPartitionStream::TPtr& partitionStream, TDisjointIntervalTree<ui64>& offsetSet, ui64 startOffset, ui64 endOffset) { - if (offsetSet.Intersects(startOffset, endOffset)) { +void TDeferredCommit::TImpl::Add(const TPartitionStream::TPtr& partitionStream, TDisjointIntervalTree<ui64>& offsetSet, ui64 startOffset, ui64 endOffset) { + if (offsetSet.Intersects(startOffset, endOffset)) { ThrowFatalError(TStringBuilder() << "Commit set already has some offsets from half-interval [" << startOffset << "; " << endOffset << ") for partition stream with id " << partitionStream->GetPartitionStreamId()); - } else { - offsetSet.InsertInterval(startOffset, endOffset); - } -} - -void TDeferredCommit::TImpl::Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset) { - Y_ASSERT(partitionStream); - Add(partitionStream, Offsets[partitionStream], startOffset, endOffset); -} - -void TDeferredCommit::TImpl::Add(const TPartitionStream::TPtr& partitionStream, ui64 offset) { - Y_ASSERT(partitionStream); - auto& offsetSet = Offsets[partitionStream]; - if (offsetSet.Has(offset)) { + } else { + offsetSet.InsertInterval(startOffset, endOffset); + } +} + +void TDeferredCommit::TImpl::Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset) { + Y_ASSERT(partitionStream); + Add(partitionStream, Offsets[partitionStream], startOffset, endOffset); +} + +void TDeferredCommit::TImpl::Add(const TPartitionStream::TPtr& partitionStream, ui64 offset) { + Y_ASSERT(partitionStream); + auto& offsetSet = Offsets[partitionStream]; + if (offsetSet.Has(offset)) { ThrowFatalError(TStringBuilder() << "Commit set already has offset " << offset << " for partition stream with id " << partitionStream->GetPartitionStreamId()); - } else { - offsetSet.Insert(offset); - } -} - -void TDeferredCommit::TImpl::Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent) { - const TPartitionStream::TPtr& partitionStream = dataReceivedEvent.GetPartitionStream(); - Y_ASSERT(partitionStream); - auto& offsetSet = Offsets[partitionStream]; + } else { + offsetSet.Insert(offset); + } +} + +void TDeferredCommit::TImpl::Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent) { + const TPartitionStream::TPtr& partitionStream = dataReceivedEvent.GetPartitionStream(); + Y_ASSERT(partitionStream); + auto& offsetSet = Offsets[partitionStream]; auto [startOffset, endOffset] = GetMessageOffsetRange(dataReceivedEvent, 0); for (size_t i = 1; i < dataReceivedEvent.GetMessagesCount(); ++i) { auto msgOffsetRange = GetMessageOffsetRange(dataReceivedEvent, i); if (msgOffsetRange.first == endOffset) { endOffset= msgOffsetRange.second; - } else { - Add(partitionStream, offsetSet, startOffset, endOffset); + } else { + Add(partitionStream, offsetSet, startOffset, endOffset); startOffset = msgOffsetRange.first; endOffset = msgOffsetRange.second; - } - } - Add(partitionStream, offsetSet, startOffset, endOffset); -} - -void TDeferredCommit::TImpl::Commit() { - for (auto&& [partitionStream, offsetRanges] : Offsets) { - for (auto&& [startOffset, endOffset] : offsetRanges) { + } + } + Add(partitionStream, offsetSet, startOffset, endOffset); +} + +void TDeferredCommit::TImpl::Commit() { + for (auto&& [partitionStream, offsetRanges] : Offsets) { + for (auto&& [startOffset, endOffset] : offsetRanges) { static_cast<TPartitionStreamImpl*>(partitionStream.Get())->Commit(startOffset, endOffset); - } - } - Offsets.clear(); -} - -#define HISTOGRAM_SETUP NMonitoring::ExplicitHistogram({0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100}) - -TReaderCounters::TReaderCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters) { - Errors = counters->GetCounter("errors", true); - CurrentSessionLifetimeMs = counters->GetCounter("currentSessionLifetimeMs", false); - BytesRead = counters->GetCounter("bytesRead", true); - MessagesRead = counters->GetCounter("messagesRead", true); - BytesReadCompressed = counters->GetCounter("bytesReadCompressed", true); - BytesInflightUncompressed = counters->GetCounter("bytesInflightUncompressed", false); - BytesInflightCompressed = counters->GetCounter("bytesInflightCompressed", false); - BytesInflightTotal = counters->GetCounter("bytesInflightTotal", false); - MessagesInflight = counters->GetCounter("messagesInflight", false); - - TotalBytesInflightUsageByTime = counters->GetHistogram("totalBytesInflightUsageByTime", HISTOGRAM_SETUP); - UncompressedBytesInflightUsageByTime = counters->GetHistogram("uncompressedBytesInflightUsageByTime", HISTOGRAM_SETUP); - CompressedBytesInflightUsageByTime = counters->GetHistogram("compressedBytesInflightUsageByTime", HISTOGRAM_SETUP); -} - -void MakeCountersNotNull(TReaderCounters& counters) { - if (!counters.Errors) { - counters.Errors = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - } - - if (!counters.CurrentSessionLifetimeMs) { - counters.CurrentSessionLifetimeMs = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - } - - if (!counters.BytesRead) { - counters.BytesRead = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - } - - if (!counters.MessagesRead) { - counters.MessagesRead = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - } - - if (!counters.BytesReadCompressed) { - counters.BytesReadCompressed = MakeIntrusive<NMonitoring::TCounterForPtr>(true); - } - - if (!counters.BytesInflightUncompressed) { - counters.BytesInflightUncompressed = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - } - - if (!counters.BytesInflightCompressed) { - counters.BytesInflightCompressed = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - } - - if (!counters.BytesInflightTotal) { - counters.BytesInflightTotal = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - } - - if (!counters.MessagesInflight) { - counters.MessagesInflight = MakeIntrusive<NMonitoring::TCounterForPtr>(false); - } - - - if (!counters.TotalBytesInflightUsageByTime) { - counters.TotalBytesInflightUsageByTime = MakeIntrusive<NMonitoring::THistogramCounter>(HISTOGRAM_SETUP); - } - - if (!counters.UncompressedBytesInflightUsageByTime) { - counters.UncompressedBytesInflightUsageByTime = MakeIntrusive<NMonitoring::THistogramCounter>(HISTOGRAM_SETUP); - } - - if (!counters.CompressedBytesInflightUsageByTime) { - counters.CompressedBytesInflightUsageByTime = MakeIntrusive<NMonitoring::THistogramCounter>(HISTOGRAM_SETUP); - } -} - -#undef HISTOGRAM_SETUP - -bool HasNullCounters(TReaderCounters& counters) { - return !counters.Errors - || !counters.CurrentSessionLifetimeMs - || !counters.BytesRead - || !counters.MessagesRead - || !counters.BytesReadCompressed - || !counters.BytesInflightUncompressed - || !counters.BytesInflightCompressed - || !counters.BytesInflightTotal - || !counters.MessagesInflight - || !counters.TotalBytesInflightUsageByTime - || !counters.UncompressedBytesInflightUsageByTime - || !counters.CompressedBytesInflightUsageByTime; -} - -} // namespace NYdb::NPersQueue + } + } + Offsets.clear(); +} + +#define HISTOGRAM_SETUP NMonitoring::ExplicitHistogram({0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100}) + +TReaderCounters::TReaderCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters) { + Errors = counters->GetCounter("errors", true); + CurrentSessionLifetimeMs = counters->GetCounter("currentSessionLifetimeMs", false); + BytesRead = counters->GetCounter("bytesRead", true); + MessagesRead = counters->GetCounter("messagesRead", true); + BytesReadCompressed = counters->GetCounter("bytesReadCompressed", true); + BytesInflightUncompressed = counters->GetCounter("bytesInflightUncompressed", false); + BytesInflightCompressed = counters->GetCounter("bytesInflightCompressed", false); + BytesInflightTotal = counters->GetCounter("bytesInflightTotal", false); + MessagesInflight = counters->GetCounter("messagesInflight", false); + + TotalBytesInflightUsageByTime = counters->GetHistogram("totalBytesInflightUsageByTime", HISTOGRAM_SETUP); + UncompressedBytesInflightUsageByTime = counters->GetHistogram("uncompressedBytesInflightUsageByTime", HISTOGRAM_SETUP); + CompressedBytesInflightUsageByTime = counters->GetHistogram("compressedBytesInflightUsageByTime", HISTOGRAM_SETUP); +} + +void MakeCountersNotNull(TReaderCounters& counters) { + if (!counters.Errors) { + counters.Errors = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + } + + if (!counters.CurrentSessionLifetimeMs) { + counters.CurrentSessionLifetimeMs = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + } + + if (!counters.BytesRead) { + counters.BytesRead = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + } + + if (!counters.MessagesRead) { + counters.MessagesRead = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + } + + if (!counters.BytesReadCompressed) { + counters.BytesReadCompressed = MakeIntrusive<NMonitoring::TCounterForPtr>(true); + } + + if (!counters.BytesInflightUncompressed) { + counters.BytesInflightUncompressed = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + } + + if (!counters.BytesInflightCompressed) { + counters.BytesInflightCompressed = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + } + + if (!counters.BytesInflightTotal) { + counters.BytesInflightTotal = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + } + + if (!counters.MessagesInflight) { + counters.MessagesInflight = MakeIntrusive<NMonitoring::TCounterForPtr>(false); + } + + + if (!counters.TotalBytesInflightUsageByTime) { + counters.TotalBytesInflightUsageByTime = MakeIntrusive<NMonitoring::THistogramCounter>(HISTOGRAM_SETUP); + } + + if (!counters.UncompressedBytesInflightUsageByTime) { + counters.UncompressedBytesInflightUsageByTime = MakeIntrusive<NMonitoring::THistogramCounter>(HISTOGRAM_SETUP); + } + + if (!counters.CompressedBytesInflightUsageByTime) { + counters.CompressedBytesInflightUsageByTime = MakeIntrusive<NMonitoring::THistogramCounter>(HISTOGRAM_SETUP); + } +} + +#undef HISTOGRAM_SETUP + +bool HasNullCounters(TReaderCounters& counters) { + return !counters.Errors + || !counters.CurrentSessionLifetimeMs + || !counters.BytesRead + || !counters.MessagesRead + || !counters.BytesReadCompressed + || !counters.BytesInflightUncompressed + || !counters.BytesInflightCompressed + || !counters.BytesInflightTotal + || !counters.MessagesInflight + || !counters.TotalBytesInflightUsageByTime + || !counters.UncompressedBytesInflightUsageByTime + || !counters.CompressedBytesInflightUsageByTime; +} + +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.h index b5b874954f4..302e8c8aae4 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.h @@ -1,388 +1,388 @@ -#pragma once +#pragma once #include "common.h" #include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h> - + #include <ydb/public/api/grpc/draft/ydb_persqueue_v1.grpc.pb.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> - + #include <library/cpp/containers/disjoint_interval_tree/disjoint_interval_tree.h> -#include <util/digest/numeric.h> -#include <util/generic/hash.h> -#include <util/system/condvar.h> - -#include <atomic> -#include <deque> - -namespace NYdb::NPersQueue { - -class TPartitionStreamImpl; -class TSingleClusterReadSessionImpl; -class TDeferredActions; -class TReadSession; -using IReadSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; -class TReadSessionEventsQueue; - -struct IErrorHandler : public TThrRefBase { - using TPtr = TIntrusivePtr<IErrorHandler>; - - virtual void AbortSession(TSessionClosedEvent&& closeEvent) = 0; - - void AbortSession(EStatus statusCode, NYql::TIssues&& issues) { +#include <util/digest/numeric.h> +#include <util/generic/hash.h> +#include <util/system/condvar.h> + +#include <atomic> +#include <deque> + +namespace NYdb::NPersQueue { + +class TPartitionStreamImpl; +class TSingleClusterReadSessionImpl; +class TDeferredActions; +class TReadSession; +using IReadSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; +class TReadSessionEventsQueue; + +struct IErrorHandler : public TThrRefBase { + using TPtr = TIntrusivePtr<IErrorHandler>; + + virtual void AbortSession(TSessionClosedEvent&& closeEvent) = 0; + + void AbortSession(EStatus statusCode, NYql::TIssues&& issues) { AbortSession(TSessionClosedEvent(statusCode, std::move(issues))); - } - - void AbortSession(EStatus statusCode, const TString& message) { - NYql::TIssues issues; - issues.AddIssue(message); - AbortSession(statusCode, std::move(issues)); - } - - void AbortSession(TPlainStatus&& status) { + } + + void AbortSession(EStatus statusCode, const TString& message) { + NYql::TIssues issues; + issues.AddIssue(message); + AbortSession(statusCode, std::move(issues)); + } + + void AbortSession(TPlainStatus&& status) { AbortSession(TSessionClosedEvent(std::move(status))); - } -}; - -// Special class that stores actions to be done after lock will be released. -class TDeferredActions { -public: - using IProcessor = IReadSessionConnectionProcessorFactory::IProcessor; - -public: - ~TDeferredActions() { - DoActions(); - } - - void DeferReadFromProcessor(const IProcessor::TPtr& processor, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* dst, IProcessor::TReadCallback callback); - void DeferStartExecutorTask(const IExecutor::TPtr& executor, IExecutor::TFunction task); - void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TSessionClosedEvent&& closeEvent); - void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, NYql::TIssues&& issues); - void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, const TString& message); - void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status); - void DeferReconnection(std::shared_ptr<TSingleClusterReadSessionImpl> session, const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status); + } +}; + +// Special class that stores actions to be done after lock will be released. +class TDeferredActions { +public: + using IProcessor = IReadSessionConnectionProcessorFactory::IProcessor; + +public: + ~TDeferredActions() { + DoActions(); + } + + void DeferReadFromProcessor(const IProcessor::TPtr& processor, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* dst, IProcessor::TReadCallback callback); + void DeferStartExecutorTask(const IExecutor::TPtr& executor, IExecutor::TFunction task); + void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TSessionClosedEvent&& closeEvent); + void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, NYql::TIssues&& issues); + void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, EStatus statusCode, const TString& message); + void DeferAbortSession(const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status); + void DeferReconnection(std::shared_ptr<TSingleClusterReadSessionImpl> session, const IErrorHandler::TPtr& errorHandler, TPlainStatus&& status); void DeferSignalWaiter(TWaiter&& waiter); - -private: - void DoActions(); - - void Read(); - void StartExecutorTasks(); - void AbortSession(); - void Reconnect(); + +private: + void DoActions(); + + void Read(); + void StartExecutorTasks(); + void AbortSession(); + void Reconnect(); void SignalWaiters(); - -private: - // Read. - IProcessor::TPtr Processor; - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* ReadDst = nullptr; - IProcessor::TReadCallback ReadCallback; - - // Executor tasks. - std::vector<std::pair<IExecutor::TPtr, IExecutor::TFunction>> ExecutorsTasks; - - // Abort session. - IErrorHandler::TPtr ErrorHandler; - TMaybe<TSessionClosedEvent> SessionClosedEvent; - + +private: + // Read. + IProcessor::TPtr Processor; + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* ReadDst = nullptr; + IProcessor::TReadCallback ReadCallback; + + // Executor tasks. + std::vector<std::pair<IExecutor::TPtr, IExecutor::TFunction>> ExecutorsTasks; + + // Abort session. + IErrorHandler::TPtr ErrorHandler; + TMaybe<TSessionClosedEvent> SessionClosedEvent; + // Waiters. std::vector<TWaiter> Waiters; - - // Reconnection. - std::shared_ptr<TSingleClusterReadSessionImpl> Session; - TPlainStatus ReconnectionStatus; -}; - -class TDataDecompressionInfo { -public: - TDataDecompressionInfo(const TDataDecompressionInfo&) = default; - TDataDecompressionInfo(TDataDecompressionInfo&&) = default; + + // Reconnection. + std::shared_ptr<TSingleClusterReadSessionImpl> Session; + TPlainStatus ReconnectionStatus; +}; + +class TDataDecompressionInfo { +public: + TDataDecompressionInfo(const TDataDecompressionInfo&) = default; + TDataDecompressionInfo(TDataDecompressionInfo&&) = default; TDataDecompressionInfo( Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData&& msg, std::weak_ptr<TSingleClusterReadSessionImpl> session, bool doDecompress ); - i64 StartDecompressionTasks(const IExecutor::TPtr& executor, - i64 availableMemory, - double averageCompressionRatio, - const TIntrusivePtr<TPartitionStreamImpl>& partitionStream, - TDeferredActions& deferred); - - bool IsReady() const { - return SourceDataNotProcessed == 0; - } - - bool AllDecompressionTasksStarted() const { - Y_VERIFY(ServerMessage.batches_size() > 0); - return CurrentDecompressingMessage.first >= static_cast<size_t>(ServerMessage.batches_size()); - } - - i64 GetCompressedDataSize() const { - return CompressedDataSize; - } - - const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData& GetServerMessage() const { - return ServerMessage; - } - - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData& GetServerMessage() { - return ServerMessage; - } - - TMaybe<std::pair<size_t, size_t>> GetReadyThreshold() const { - size_t readyCount = 0; - std::pair<size_t, size_t> ret; - for (auto i = ReadyThresholds.begin(), end = ReadyThresholds.end(); i != end; ++i) { - if (i->Ready) { - ret.first = i->Batch; - ret.second = i->Message; - ++readyCount; - } else { - break; - } - } - if (!readyCount) { - return Nothing(); - } - return ret; - } - - TWriteSessionMeta::TPtr GetBatchMeta(size_t batchIndex) const { - Y_ASSERT(batchIndex < BatchesMeta.size()); - return BatchesMeta[batchIndex]; - } - - // Takes data. Returns true if event has more unpacked data. + i64 StartDecompressionTasks(const IExecutor::TPtr& executor, + i64 availableMemory, + double averageCompressionRatio, + const TIntrusivePtr<TPartitionStreamImpl>& partitionStream, + TDeferredActions& deferred); + + bool IsReady() const { + return SourceDataNotProcessed == 0; + } + + bool AllDecompressionTasksStarted() const { + Y_VERIFY(ServerMessage.batches_size() > 0); + return CurrentDecompressingMessage.first >= static_cast<size_t>(ServerMessage.batches_size()); + } + + i64 GetCompressedDataSize() const { + return CompressedDataSize; + } + + const Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData& GetServerMessage() const { + return ServerMessage; + } + + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData& GetServerMessage() { + return ServerMessage; + } + + TMaybe<std::pair<size_t, size_t>> GetReadyThreshold() const { + size_t readyCount = 0; + std::pair<size_t, size_t> ret; + for (auto i = ReadyThresholds.begin(), end = ReadyThresholds.end(); i != end; ++i) { + if (i->Ready) { + ret.first = i->Batch; + ret.second = i->Message; + ++readyCount; + } else { + break; + } + } + if (!readyCount) { + return Nothing(); + } + return ret; + } + + TWriteSessionMeta::TPtr GetBatchMeta(size_t batchIndex) const { + Y_ASSERT(batchIndex < BatchesMeta.size()); + return BatchesMeta[batchIndex]; + } + + // Takes data. Returns true if event has more unpacked data. bool TakeData(const TIntrusivePtr<TPartitionStreamImpl>& partitionStream, TVector<TReadSessionEvent::TDataReceivedEvent::TMessage>* messages, TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage>* compressedMessages, size_t* maxByteSize); - - bool HasMoreData() const { - return CurrentReadingMessage.first < static_cast<size_t>(GetServerMessage().batches_size()); - } - - bool HasReadyUnreadData() const; - - void PutDecompressionError(std::exception_ptr error, size_t batch, size_t message); - std::exception_ptr GetDecompressionError(size_t batch, size_t message); - -private: - // Special struct for marking (batch/message) as ready. - struct TReadyMessageThreshold { - size_t Batch = 0; // Last ready batch with message index. - size_t Message = 0; // Last ready message index. - std::atomic<bool> Ready = false; - }; - - struct TDecompressionTask { - explicit TDecompressionTask(TDataDecompressionInfo* parent, TIntrusivePtr<TPartitionStreamImpl> partitionStream, TReadyMessageThreshold* ready); - - // Decompress and notify about memory consumption changes. - void operator()(); - - void Add(size_t batch, size_t message, size_t sourceDataSize, size_t estimatedDecompressedSize); - - size_t AddedDataSize() const { - return SourceDataSize; - } + + bool HasMoreData() const { + return CurrentReadingMessage.first < static_cast<size_t>(GetServerMessage().batches_size()); + } + + bool HasReadyUnreadData() const; + + void PutDecompressionError(std::exception_ptr error, size_t batch, size_t message); + std::exception_ptr GetDecompressionError(size_t batch, size_t message); + +private: + // Special struct for marking (batch/message) as ready. + struct TReadyMessageThreshold { + size_t Batch = 0; // Last ready batch with message index. + size_t Message = 0; // Last ready message index. + std::atomic<bool> Ready = false; + }; + + struct TDecompressionTask { + explicit TDecompressionTask(TDataDecompressionInfo* parent, TIntrusivePtr<TPartitionStreamImpl> partitionStream, TReadyMessageThreshold* ready); + + // Decompress and notify about memory consumption changes. + void operator()(); + + void Add(size_t batch, size_t message, size_t sourceDataSize, size_t estimatedDecompressedSize); + + size_t AddedDataSize() const { + return SourceDataSize; + } size_t AddedMessagesCount() const { return Messages.size(); } - - private: - TDataDecompressionInfo* Parent; - TIntrusivePtr<TPartitionStreamImpl> PartitionStream; - i64 SourceDataSize = 0; - i64 EstimatedDecompressedSize = 0; - i64 DecompressedSize = 0; - struct TMessageRange { - size_t Batch; - std::pair<size_t, size_t> MessageRange; - }; - std::vector<TMessageRange> Messages; - TReadyMessageThreshold* Ready; - }; - - void BuildBatchesMeta(); - -private: - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData ServerMessage; - std::vector<TWriteSessionMeta::TPtr> BatchesMeta; - std::weak_ptr<TSingleClusterReadSessionImpl> Session; + + private: + TDataDecompressionInfo* Parent; + TIntrusivePtr<TPartitionStreamImpl> PartitionStream; + i64 SourceDataSize = 0; + i64 EstimatedDecompressedSize = 0; + i64 DecompressedSize = 0; + struct TMessageRange { + size_t Batch; + std::pair<size_t, size_t> MessageRange; + }; + std::vector<TMessageRange> Messages; + TReadyMessageThreshold* Ready; + }; + + void BuildBatchesMeta(); + +private: + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData ServerMessage; + std::vector<TWriteSessionMeta::TPtr> BatchesMeta; + std::weak_ptr<TSingleClusterReadSessionImpl> Session; bool DoDecompress; - i64 CompressedDataSize = 0; - std::atomic<i64> SourceDataNotProcessed = 0; - std::pair<size_t, size_t> CurrentDecompressingMessage = {0, 0}; // (Batch, Message) - std::deque<TReadyMessageThreshold> ReadyThresholds; - std::pair<size_t, size_t> CurrentReadingMessage = {0, 0}; // (Batch, Message) - - // Decompression exceptions. - // Optimization for rare using. - std::atomic<bool> DecompressionErrorsStructCreated = false; - TAdaptiveLock DecompressionErrorsStructLock; - std::vector<std::vector<std::exception_ptr>> DecompressionErrors; -}; - -struct IUserRetrievedEventCallback { - virtual ~IUserRetrievedEventCallback() = default; - - virtual void OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) = 0; -}; - -struct TReadSessionEventInfo { - using TEvent = TReadSessionEvent::TEvent; - - // Event with only partition stream ref. - // Partition stream holds all its events. - TIntrusivePtr<TPartitionStreamImpl> PartitionStream; - TMaybe<TEvent> Event; - std::weak_ptr<IUserRetrievedEventCallback> Session; - - // Close event. - TReadSessionEventInfo(const TSessionClosedEvent& event, std::weak_ptr<IUserRetrievedEventCallback> session = {}) - : Event(TEvent(event)) - , Session(session) - { - } - - // Usual event. - TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session, TEvent event); - - // Data event. - TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session); - - TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, - std::weak_ptr<IUserRetrievedEventCallback> session, + i64 CompressedDataSize = 0; + std::atomic<i64> SourceDataNotProcessed = 0; + std::pair<size_t, size_t> CurrentDecompressingMessage = {0, 0}; // (Batch, Message) + std::deque<TReadyMessageThreshold> ReadyThresholds; + std::pair<size_t, size_t> CurrentReadingMessage = {0, 0}; // (Batch, Message) + + // Decompression exceptions. + // Optimization for rare using. + std::atomic<bool> DecompressionErrorsStructCreated = false; + TAdaptiveLock DecompressionErrorsStructLock; + std::vector<std::vector<std::exception_ptr>> DecompressionErrors; +}; + +struct IUserRetrievedEventCallback { + virtual ~IUserRetrievedEventCallback() = default; + + virtual void OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) = 0; +}; + +struct TReadSessionEventInfo { + using TEvent = TReadSessionEvent::TEvent; + + // Event with only partition stream ref. + // Partition stream holds all its events. + TIntrusivePtr<TPartitionStreamImpl> PartitionStream; + TMaybe<TEvent> Event; + std::weak_ptr<IUserRetrievedEventCallback> Session; + + // Close event. + TReadSessionEventInfo(const TSessionClosedEvent& event, std::weak_ptr<IUserRetrievedEventCallback> session = {}) + : Event(TEvent(event)) + , Session(session) + { + } + + // Usual event. + TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session, TEvent event); + + // Data event. + TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, std::weak_ptr<IUserRetrievedEventCallback> session); + + TReadSessionEventInfo(TIntrusivePtr<TPartitionStreamImpl> partitionStream, + std::weak_ptr<IUserRetrievedEventCallback> session, TVector<TReadSessionEvent::TDataReceivedEvent::TMessage> messages, TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage> compressedMessages); - - bool IsEmpty() const; - bool IsDataEvent() const; - - // Takes data. Returns true if event has more unpacked data. + + bool IsEmpty() const; + bool IsDataEvent() const; + + // Takes data. Returns true if event has more unpacked data. bool TakeData(TVector<TReadSessionEvent::TDataReceivedEvent::TMessage>* messages, TVector<TReadSessionEvent::TDataReceivedEvent::TCompressedMessage>* comressedMessages, size_t* maxByteSize); - - TEvent& GetEvent() { - Y_ASSERT(Event); - return *Event; - } - - // Move event to partition stream queue. - void MoveToPartitionStream(); - - void ExtractFromPartitionStream(); - - void OnUserRetrievedEvent(); - - bool HasMoreData() const; // Has unread data. - bool HasReadyUnreadData() const; // Has ready unread data. - - bool IsSessionClosedEvent() const { - return Event && std::holds_alternative<TSessionClosedEvent>(*Event); - } -}; - -// Raw data with maybe uncompressed parts or other read session event. -struct TRawPartitionStreamEvent { - std::variant<TDataDecompressionInfo, TReadSessionEvent::TEvent> Event; - bool Signalled = false; - - TRawPartitionStreamEvent(const TRawPartitionStreamEvent&) = default; - TRawPartitionStreamEvent(TRawPartitionStreamEvent&&) = default; - + + TEvent& GetEvent() { + Y_ASSERT(Event); + return *Event; + } + + // Move event to partition stream queue. + void MoveToPartitionStream(); + + void ExtractFromPartitionStream(); + + void OnUserRetrievedEvent(); + + bool HasMoreData() const; // Has unread data. + bool HasReadyUnreadData() const; // Has ready unread data. + + bool IsSessionClosedEvent() const { + return Event && std::holds_alternative<TSessionClosedEvent>(*Event); + } +}; + +// Raw data with maybe uncompressed parts or other read session event. +struct TRawPartitionStreamEvent { + std::variant<TDataDecompressionInfo, TReadSessionEvent::TEvent> Event; + bool Signalled = false; + + TRawPartitionStreamEvent(const TRawPartitionStreamEvent&) = default; + TRawPartitionStreamEvent(TRawPartitionStreamEvent&&) = default; + TRawPartitionStreamEvent( Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData&& msg, std::weak_ptr<TSingleClusterReadSessionImpl> session, bool doDecompress ) : Event(std::in_place_type_t<TDataDecompressionInfo>(), std::move(msg), std::move(session), doDecompress) - { - } - - template <class T> - explicit TRawPartitionStreamEvent(T&& event) - : Event(std::in_place_type_t<TReadSessionEvent::TEvent>(), std::forward<T>(event)) - { - } - - bool IsDataEvent() const { - return std::holds_alternative<TDataDecompressionInfo>(Event); - } - - const TDataDecompressionInfo& GetData() const { - Y_ASSERT(IsDataEvent()); - return std::get<TDataDecompressionInfo>(Event); - } - - TDataDecompressionInfo& GetData() { - Y_ASSERT(IsDataEvent()); - return std::get<TDataDecompressionInfo>(Event); - } - - TReadSessionEvent::TEvent& GetEvent() { - Y_ASSERT(!IsDataEvent()); - return std::get<TReadSessionEvent::TEvent>(Event); - } - - const TReadSessionEvent::TEvent& GetEvent() const { - Y_ASSERT(!IsDataEvent()); - return std::get<TReadSessionEvent::TEvent>(Event); - } - - bool IsReady() const { - return !IsDataEvent() || GetData().IsReady(); - } - - void Signal(TPartitionStreamImpl* partitionStream, TReadSessionEventsQueue* queue, TDeferredActions& deferred); -}; - - - -class TPartitionStreamImpl : public TPartitionStream { -public: - struct TKey { // Hash<TKey> is defined later in this file. - TString Topic; - TString Cluster; - ui64 Partition; - - bool operator==(const TKey& other) const { - // Compare the most variable fields first. - return Partition == other.Partition - && Cluster == other.Cluster - && Topic == other.Topic; - } - }; - - TPartitionStreamImpl(ui64 partitionStreamId, - TString topicPath, - TString cluster, - ui64 partitionGroupId, - ui64 partitionId, - ui64 assignId, + { + } + + template <class T> + explicit TRawPartitionStreamEvent(T&& event) + : Event(std::in_place_type_t<TReadSessionEvent::TEvent>(), std::forward<T>(event)) + { + } + + bool IsDataEvent() const { + return std::holds_alternative<TDataDecompressionInfo>(Event); + } + + const TDataDecompressionInfo& GetData() const { + Y_ASSERT(IsDataEvent()); + return std::get<TDataDecompressionInfo>(Event); + } + + TDataDecompressionInfo& GetData() { + Y_ASSERT(IsDataEvent()); + return std::get<TDataDecompressionInfo>(Event); + } + + TReadSessionEvent::TEvent& GetEvent() { + Y_ASSERT(!IsDataEvent()); + return std::get<TReadSessionEvent::TEvent>(Event); + } + + const TReadSessionEvent::TEvent& GetEvent() const { + Y_ASSERT(!IsDataEvent()); + return std::get<TReadSessionEvent::TEvent>(Event); + } + + bool IsReady() const { + return !IsDataEvent() || GetData().IsReady(); + } + + void Signal(TPartitionStreamImpl* partitionStream, TReadSessionEventsQueue* queue, TDeferredActions& deferred); +}; + + + +class TPartitionStreamImpl : public TPartitionStream { +public: + struct TKey { // Hash<TKey> is defined later in this file. + TString Topic; + TString Cluster; + ui64 Partition; + + bool operator==(const TKey& other) const { + // Compare the most variable fields first. + return Partition == other.Partition + && Cluster == other.Cluster + && Topic == other.Topic; + } + }; + + TPartitionStreamImpl(ui64 partitionStreamId, + TString topicPath, + TString cluster, + ui64 partitionGroupId, + ui64 partitionId, + ui64 assignId, ui64 readOffset, - std::weak_ptr<TSingleClusterReadSessionImpl> parentSession, - IErrorHandler::TPtr errorHandler) - : Key{topicPath, cluster, partitionId} - , AssignId(assignId) + std::weak_ptr<TSingleClusterReadSessionImpl> parentSession, + IErrorHandler::TPtr errorHandler) + : Key{topicPath, cluster, partitionId} + , AssignId(assignId) , FirstNotReadOffset(readOffset) - , Session(std::move(parentSession)) - , ErrorHandler(std::move(errorHandler)) - { - PartitionStreamId = partitionStreamId; - TopicPath = std::move(topicPath); - Cluster = std::move(cluster); - PartitionGroupId = partitionGroupId; - PartitionId = partitionId; + , Session(std::move(parentSession)) + , ErrorHandler(std::move(errorHandler)) + { + PartitionStreamId = partitionStreamId; + TopicPath = std::move(topicPath); + Cluster = std::move(cluster); + PartitionGroupId = partitionGroupId; + PartitionId = partitionId; MaxCommittedOffset = readOffset; - } - - ~TPartitionStreamImpl(); - + } + + ~TPartitionStreamImpl(); + ui64 GetFirstNotReadOffset() const { return FirstNotReadOffset; } @@ -392,91 +392,91 @@ public: } void Commit(ui64 startOffset, ui64 endOffset) /*override*/; - void RequestStatus() override; - - void ConfirmCreate(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset); - void ConfirmDestroy(); - - void StopReading() /*override*/; - void ResumeReading() /*override*/; - - ui64 GetAssignId() const { - return AssignId; - } - - const TKey& GetKey() const { - return Key; - } - - template <class T> - void InsertEvent(T&& event) { - EventsQueue.emplace_back(std::forward<T>(event)); - } - + void RequestStatus() override; + + void ConfirmCreate(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset); + void ConfirmDestroy(); + + void StopReading() /*override*/; + void ResumeReading() /*override*/; + + ui64 GetAssignId() const { + return AssignId; + } + + const TKey& GetKey() const { + return Key; + } + + template <class T> + void InsertEvent(T&& event) { + EventsQueue.emplace_back(std::forward<T>(event)); + } + TDataDecompressionInfo& InsertDataEvent( Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch::PartitionData&& msg, bool doDecompress ) { - ++DataDecompressionEventsCount; + ++DataDecompressionEventsCount; return EventsQueue.emplace_back(std::move(msg), Session, doDecompress).GetData(); - } - - bool IsWaitingForDataDecompression() const { - return DataDecompressionEventsCount > 0; - } - - bool HasEvents() const { - return !EventsQueue.empty(); - } - - TRawPartitionStreamEvent& TopEvent() { - return EventsQueue.front(); - } - - const TRawPartitionStreamEvent& TopEvent() const { - return EventsQueue.front(); - } - - void PopEvent() { - if (EventsQueue.front().IsDataEvent()) { - --DataDecompressionEventsCount; - } - EventsQueue.pop_front(); - } - - std::weak_ptr<TSingleClusterReadSessionImpl> GetSession() const { - return Session; - } - - TLog GetLog() const; - - void SignalReadyEvents(TReadSessionEventsQueue* queue, TDeferredActions& deferred); - - const IErrorHandler::TPtr& GetErrorHandler() const { - return ErrorHandler; - } - - ui64 GetMaxReadOffset() const { - return MaxReadOffset; - } - - ui64 GetMaxCommittedOffset() const { - return MaxCommittedOffset; - } - - void UpdateMaxReadOffset(ui64 offset) { - if (offset > MaxReadOffset) { - MaxReadOffset = offset; - } - } - - void UpdateMaxCommittedOffset(ui64 offset) { - if (offset > MaxCommittedOffset) { + } + + bool IsWaitingForDataDecompression() const { + return DataDecompressionEventsCount > 0; + } + + bool HasEvents() const { + return !EventsQueue.empty(); + } + + TRawPartitionStreamEvent& TopEvent() { + return EventsQueue.front(); + } + + const TRawPartitionStreamEvent& TopEvent() const { + return EventsQueue.front(); + } + + void PopEvent() { + if (EventsQueue.front().IsDataEvent()) { + --DataDecompressionEventsCount; + } + EventsQueue.pop_front(); + } + + std::weak_ptr<TSingleClusterReadSessionImpl> GetSession() const { + return Session; + } + + TLog GetLog() const; + + void SignalReadyEvents(TReadSessionEventsQueue* queue, TDeferredActions& deferred); + + const IErrorHandler::TPtr& GetErrorHandler() const { + return ErrorHandler; + } + + ui64 GetMaxReadOffset() const { + return MaxReadOffset; + } + + ui64 GetMaxCommittedOffset() const { + return MaxCommittedOffset; + } + + void UpdateMaxReadOffset(ui64 offset) { + if (offset > MaxReadOffset) { + MaxReadOffset = offset; + } + } + + void UpdateMaxCommittedOffset(ui64 offset) { + if (offset > MaxCommittedOffset) { ClientCommits.EraseInterval(MaxCommittedOffset, offset); - MaxCommittedOffset = offset; - } - } - + MaxCommittedOffset = offset; + } + } + bool HasCommitsInflight() const { if (ClientCommits.Empty()) return false; @@ -505,21 +505,21 @@ public: } -private: - const TKey Key; - ui64 AssignId; +private: + const TKey Key; + ui64 AssignId; ui64 FirstNotReadOffset; - std::weak_ptr<TSingleClusterReadSessionImpl> Session; - IErrorHandler::TPtr ErrorHandler; - std::deque<TRawPartitionStreamEvent> EventsQueue; - size_t DataDecompressionEventsCount = 0; - ui64 MaxReadOffset = 0; - ui64 MaxCommittedOffset = 0; + std::weak_ptr<TSingleClusterReadSessionImpl> Session; + IErrorHandler::TPtr ErrorHandler; + std::deque<TRawPartitionStreamEvent> EventsQueue; + size_t DataDecompressionEventsCount = 0; + ui64 MaxReadOffset = 0; + ui64 MaxCommittedOffset = 0; TDisjointIntervalTree<ui64> Commits; TDisjointIntervalTree<ui64> ClientCommits; -}; - +}; + class TReadSessionEventsQueue : public TBaseSessionEventsQueue<TReadSessionSettings, TReadSessionEvent::TEvent, TReadSessionEventInfo> { using TParent = TBaseSessionEventsQueue<TReadSessionSettings, TReadSessionEvent::TEvent, TReadSessionEventInfo>; @@ -730,387 +730,387 @@ private: -} // namespace NYdb::NPersQueue - -template <> -struct THash<NYdb::NPersQueue::TPartitionStreamImpl::TKey> { - size_t operator()(const NYdb::NPersQueue::TPartitionStreamImpl::TKey& key) const { - THash<TString> strHash; - const size_t h1 = strHash(key.Topic); - const size_t h2 = strHash(key.Cluster); - const size_t h3 = NumericHash(key.Partition); - return CombineHashes(h1, CombineHashes(h2, h3)); - } -}; - -namespace NYdb::NPersQueue { - -// Read session for single cluster. -// This class holds only read session logic. -// It is parametrized with output queue for client events -// and connection factory interface to separate logic from transport. -class TSingleClusterReadSessionImpl : public std::enable_shared_from_this<TSingleClusterReadSessionImpl>, - public IUserRetrievedEventCallback { -public: - using TPtr = std::shared_ptr<TSingleClusterReadSessionImpl>; - using IProcessor = IReadSessionConnectionProcessorFactory::IProcessor; - +} // namespace NYdb::NPersQueue + +template <> +struct THash<NYdb::NPersQueue::TPartitionStreamImpl::TKey> { + size_t operator()(const NYdb::NPersQueue::TPartitionStreamImpl::TKey& key) const { + THash<TString> strHash; + const size_t h1 = strHash(key.Topic); + const size_t h2 = strHash(key.Cluster); + const size_t h3 = NumericHash(key.Partition); + return CombineHashes(h1, CombineHashes(h2, h3)); + } +}; + +namespace NYdb::NPersQueue { + +// Read session for single cluster. +// This class holds only read session logic. +// It is parametrized with output queue for client events +// and connection factory interface to separate logic from transport. +class TSingleClusterReadSessionImpl : public std::enable_shared_from_this<TSingleClusterReadSessionImpl>, + public IUserRetrievedEventCallback { +public: + using TPtr = std::shared_ptr<TSingleClusterReadSessionImpl>; + using IProcessor = IReadSessionConnectionProcessorFactory::IProcessor; + friend class TPartitionStreamImpl; - TSingleClusterReadSessionImpl( - const TReadSessionSettings& settings, - const TString& clusterName, - const TLog& log, - std::shared_ptr<IReadSessionConnectionProcessorFactory> connectionFactory, - std::shared_ptr<TReadSessionEventsQueue> eventsQueue, - IErrorHandler::TPtr errorHandler, - NGrpc::IQueueClientContextPtr clientContext, - ui64 partitionStreamIdStart, ui64 partitionStreamIdStep - ) - : Settings(settings) - , ClusterName(clusterName) - , Log(log) - , NextPartitionStreamId(partitionStreamIdStart) - , PartitionStreamIdStep(partitionStreamIdStep) - , ConnectionFactory(std::move(connectionFactory)) - , EventsQueue(std::move(eventsQueue)) - , ErrorHandler(std::move(errorHandler)) - , ClientContext(std::move(clientContext)) - , CookieMapping(ErrorHandler) - { - } - - void Start(); - void ConfirmPartitionStreamCreate(const TPartitionStreamImpl* partitionStream, TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset); - void ConfirmPartitionStreamDestroy(TPartitionStreamImpl* partitionStream); - void RequestPartitionStreamStatus(const TPartitionStreamImpl* partitionStream); - void Commit(const TPartitionStreamImpl* partitionStream, ui64 startOffset, ui64 endOffset); - - void OnCreateNewDecompressionTask(); - void OnDataDecompressed(i64 sourceSize, i64 estimatedDecompressedSize, i64 decompressedSize, size_t messagesCount); - - TReadSessionEventsQueue* GetEventsQueue() { - return EventsQueue.get(); - } - - void OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) override; - - void Abort(); - void Close(std::function<void()> callback); - - bool Reconnect(const TPlainStatus& status); - - void StopReadingData(); - void ResumeReadingData(); - - void WaitAllDecompressionTasks(); - - void DumpStatisticsToLog(TLogElement& log); - void UpdateMemoryUsageStatistics(); - - const TLog& GetLog() const { - return Log; - } - -private: - void BreakConnectionAndReconnectImpl(TPlainStatus&& status, TDeferredActions& deferred); - - void BreakConnectionAndReconnectImpl(EStatus statusCode, NYql::TIssues&& issues, TDeferredActions& deferred) { - BreakConnectionAndReconnectImpl(TPlainStatus(statusCode, std::move(issues)), deferred); - } - - void BreakConnectionAndReconnectImpl(EStatus statusCode, const TString& message, TDeferredActions& deferred) { - BreakConnectionAndReconnectImpl(TPlainStatus(statusCode, message), deferred); - } - + TSingleClusterReadSessionImpl( + const TReadSessionSettings& settings, + const TString& clusterName, + const TLog& log, + std::shared_ptr<IReadSessionConnectionProcessorFactory> connectionFactory, + std::shared_ptr<TReadSessionEventsQueue> eventsQueue, + IErrorHandler::TPtr errorHandler, + NGrpc::IQueueClientContextPtr clientContext, + ui64 partitionStreamIdStart, ui64 partitionStreamIdStep + ) + : Settings(settings) + , ClusterName(clusterName) + , Log(log) + , NextPartitionStreamId(partitionStreamIdStart) + , PartitionStreamIdStep(partitionStreamIdStep) + , ConnectionFactory(std::move(connectionFactory)) + , EventsQueue(std::move(eventsQueue)) + , ErrorHandler(std::move(errorHandler)) + , ClientContext(std::move(clientContext)) + , CookieMapping(ErrorHandler) + { + } + + void Start(); + void ConfirmPartitionStreamCreate(const TPartitionStreamImpl* partitionStream, TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset); + void ConfirmPartitionStreamDestroy(TPartitionStreamImpl* partitionStream); + void RequestPartitionStreamStatus(const TPartitionStreamImpl* partitionStream); + void Commit(const TPartitionStreamImpl* partitionStream, ui64 startOffset, ui64 endOffset); + + void OnCreateNewDecompressionTask(); + void OnDataDecompressed(i64 sourceSize, i64 estimatedDecompressedSize, i64 decompressedSize, size_t messagesCount); + + TReadSessionEventsQueue* GetEventsQueue() { + return EventsQueue.get(); + } + + void OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) override; + + void Abort(); + void Close(std::function<void()> callback); + + bool Reconnect(const TPlainStatus& status); + + void StopReadingData(); + void ResumeReadingData(); + + void WaitAllDecompressionTasks(); + + void DumpStatisticsToLog(TLogElement& log); + void UpdateMemoryUsageStatistics(); + + const TLog& GetLog() const { + return Log; + } + +private: + void BreakConnectionAndReconnectImpl(TPlainStatus&& status, TDeferredActions& deferred); + + void BreakConnectionAndReconnectImpl(EStatus statusCode, NYql::TIssues&& issues, TDeferredActions& deferred) { + BreakConnectionAndReconnectImpl(TPlainStatus(statusCode, std::move(issues)), deferred); + } + + void BreakConnectionAndReconnectImpl(EStatus statusCode, const TString& message, TDeferredActions& deferred) { + BreakConnectionAndReconnectImpl(TPlainStatus(statusCode, message), deferred); + } + bool HasCommitsInflightImpl() const; - void OnConnectTimeout(const NGrpc::IQueueClientContextPtr& connectTimeoutContext); - void OnConnect(TPlainStatus&&, typename IProcessor::TPtr&&, const NGrpc::IQueueClientContextPtr& connectContext); - void DestroyAllPartitionStreamsImpl(TDeferredActions& deferred); // Destroy all streams before setting new connection // Assumes that we're under lock. - - // Initing. - void InitImpl(TDeferredActions& deferred); // Assumes that we're under lock. - - // Working logic. - void ContinueReadingDataImpl(); // Assumes that we're under lock. - bool IsActualPartitionStreamImpl(const TPartitionStreamImpl* partitionStream); // Assumes that we're under lock. - - // Read/Write. - void ReadFromProcessorImpl(TDeferredActions& deferred); // Assumes that we're under lock. - void WriteToProcessorImpl(Ydb::PersQueue::V1::MigrationStreamingReadClientMessage&& req); // Assumes that we're under lock. - void OnReadDone(NGrpc::TGrpcStatus&& grpcStatus, size_t connectionGeneration); - void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::InitResponse&& msg, TDeferredActions& deferred); // Assumes that we're under lock. - void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch&& msg, TDeferredActions& deferred); // Assumes that we're under lock. - void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Assigned&& msg, TDeferredActions& deferred); // Assumes that we're under lock. - void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Release&& msg, TDeferredActions& deferred); // Assumes that we're under lock. - void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Committed&& msg, TDeferredActions& deferred); // Assumes that we're under lock. - void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::PartitionStatus&& msg, TDeferredActions& deferred); // Assumes that we're under lock. - - void StartDecompressionTasksImpl(TDeferredActions& deferred); // Assumes that we're under lock. - - i64 GetCompressedDataSizeLimit() const { - const double overallLimit = static_cast<double>(Settings.MaxMemoryUsageBytes_); - // CompressedDataSize + CompressedDataSize * AverageCompressionRatio <= Settings.MaxMemoryUsageBytes_ + void OnConnectTimeout(const NGrpc::IQueueClientContextPtr& connectTimeoutContext); + void OnConnect(TPlainStatus&&, typename IProcessor::TPtr&&, const NGrpc::IQueueClientContextPtr& connectContext); + void DestroyAllPartitionStreamsImpl(TDeferredActions& deferred); // Destroy all streams before setting new connection // Assumes that we're under lock. + + // Initing. + void InitImpl(TDeferredActions& deferred); // Assumes that we're under lock. + + // Working logic. + void ContinueReadingDataImpl(); // Assumes that we're under lock. + bool IsActualPartitionStreamImpl(const TPartitionStreamImpl* partitionStream); // Assumes that we're under lock. + + // Read/Write. + void ReadFromProcessorImpl(TDeferredActions& deferred); // Assumes that we're under lock. + void WriteToProcessorImpl(Ydb::PersQueue::V1::MigrationStreamingReadClientMessage&& req); // Assumes that we're under lock. + void OnReadDone(NGrpc::TGrpcStatus&& grpcStatus, size_t connectionGeneration); + void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::InitResponse&& msg, TDeferredActions& deferred); // Assumes that we're under lock. + void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::DataBatch&& msg, TDeferredActions& deferred); // Assumes that we're under lock. + void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Assigned&& msg, TDeferredActions& deferred); // Assumes that we're under lock. + void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Release&& msg, TDeferredActions& deferred); // Assumes that we're under lock. + void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::Committed&& msg, TDeferredActions& deferred); // Assumes that we're under lock. + void OnReadDoneImpl(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::PartitionStatus&& msg, TDeferredActions& deferred); // Assumes that we're under lock. + + void StartDecompressionTasksImpl(TDeferredActions& deferred); // Assumes that we're under lock. + + i64 GetCompressedDataSizeLimit() const { + const double overallLimit = static_cast<double>(Settings.MaxMemoryUsageBytes_); + // CompressedDataSize + CompressedDataSize * AverageCompressionRatio <= Settings.MaxMemoryUsageBytes_ return Max<i64>(1l, static_cast<i64>(overallLimit / (1.0 + AverageCompressionRatio))); - } - - i64 GetDecompressedDataSizeLimit() const { + } + + i64 GetDecompressedDataSizeLimit() const { return Max<i64>(1l, static_cast<i64>(Settings.MaxMemoryUsageBytes_) - GetCompressedDataSizeLimit()); - } - - void CallCloseCallbackImpl(); - - void UpdateMemoryUsageStatisticsImpl(); - -private: - struct TPartitionCookieMapping { - struct TCookie : public TThrRefBase { - struct TKey { - const ui64 AssignId; - const ui64 CookieId; - - TKey(ui64 assignId, ui64 cookieId) - : AssignId(assignId) - , CookieId(cookieId) - { - } - - bool operator==(const TKey& k) const { - return AssignId == k.AssignId && CookieId == k.CookieId; - } - - struct THash { - size_t operator()(const TKey& k) const { - ::THash<std::pair<ui64, ui64>> h; - return h(std::make_pair(k.AssignId, k.CookieId)); - } - }; - - }; - - using TPtr = TIntrusivePtr<TCookie>; - - explicit TCookie(ui64 cookie, TIntrusivePtr<TPartitionStreamImpl> partitionStream) - : Cookie(cookie) - , PartitionStream(std::move(partitionStream)) - { - } - - // Sets reverse mapping for max offset in this cookie. - void SetOffsetRange(const std::pair<ui64, ui64>& offsetRange) { - OffsetRange = offsetRange; - UncommittedMessagesLeft = offsetRange.second - offsetRange.first; - } - - TKey GetKey() const { - return TKey(PartitionStream->GetAssignId(), Cookie); - } - - ui64 Cookie = 0; - TIntrusivePtr<TPartitionStreamImpl> PartitionStream; - std::pair<ui64, ui64> OffsetRange; - size_t UncommittedMessagesLeft = 0; - }; - - explicit TPartitionCookieMapping(IErrorHandler::TPtr errorHandler) - : ErrorHandler(std::move(errorHandler)) - { - } - - bool AddMapping(const TCookie::TPtr& cookie); - - // Removes (partition stream, offset) from mapping. - // Returns cookie ptr if this was the last message, otherwise nullptr. - TCookie::TPtr CommitOffset(ui64 partitionStreamId, ui64 offset); - - // Gets and then removes committed cookie from mapping. - TCookie::TPtr RetrieveCommittedCookie(const Ydb::PersQueue::V1::CommitCookie& cookieProto); - - // Removes mapping on partition stream. - void RemoveMapping(ui64 partitionStreamId); - - // Clear all mapping before reconnect. - void ClearMapping(); - - bool HasUnacknowledgedCookies() const; - - private: - THashMap<TCookie::TKey, TCookie::TPtr, TCookie::TKey::THash> Cookies; - THashMap<std::pair<ui64, ui64>, TCookie::TPtr> UncommittedOffsetToCookie; // (Partition stream id, Offset) -> Cookie. - THashMultiMap<ui64, TCookie::TPtr> PartitionStreamIdToCookie; - IErrorHandler::TPtr ErrorHandler; - size_t CommitInflight = 0; // Commit inflight to server. - }; - - struct TDecompressionQueueItem { - TDecompressionQueueItem(TDataDecompressionInfo* batchInfo, TIntrusivePtr<TPartitionStreamImpl> partitionStream) - : BatchInfo(batchInfo) - , PartitionStream(std::move(partitionStream)) - { - } - - TDataDecompressionInfo* BatchInfo; - TIntrusivePtr<TPartitionStreamImpl> PartitionStream; - }; - -private: - const TReadSessionSettings Settings; - const TString ClusterName; - TLog Log; - ui64 NextPartitionStreamId; - ui64 PartitionStreamIdStep; - std::shared_ptr<IReadSessionConnectionProcessorFactory> ConnectionFactory; - std::shared_ptr<TReadSessionEventsQueue> EventsQueue; - IErrorHandler::TPtr ErrorHandler; - NGrpc::IQueueClientContextPtr ClientContext; // Common client context. - NGrpc::IQueueClientContextPtr ConnectContext; - NGrpc::IQueueClientContextPtr ConnectTimeoutContext; - NGrpc::IQueueClientContextPtr ConnectDelayContext; - size_t ConnectionGeneration = 0; - TAdaptiveLock Lock; - IProcessor::TPtr Processor; - IRetryState::TPtr RetryState; // Current retry state (if now we are (re)connecting). - size_t ConnectionAttemptsDone = 0; - - // Memory usage. - i64 CompressedDataSize = 0; - i64 DecompressedDataSize = 0; - double AverageCompressionRatio = 1.0; // Weighted average for compression memory usage estimate. - TInstant UsageStatisticsLastUpdateTime = TInstant::Now(); - - bool WaitingReadResponse = false; - std::shared_ptr<Ydb::PersQueue::V1::MigrationStreamingReadServerMessage> ServerMessage; // Server message to write server response to. + } + + void CallCloseCallbackImpl(); + + void UpdateMemoryUsageStatisticsImpl(); + +private: + struct TPartitionCookieMapping { + struct TCookie : public TThrRefBase { + struct TKey { + const ui64 AssignId; + const ui64 CookieId; + + TKey(ui64 assignId, ui64 cookieId) + : AssignId(assignId) + , CookieId(cookieId) + { + } + + bool operator==(const TKey& k) const { + return AssignId == k.AssignId && CookieId == k.CookieId; + } + + struct THash { + size_t operator()(const TKey& k) const { + ::THash<std::pair<ui64, ui64>> h; + return h(std::make_pair(k.AssignId, k.CookieId)); + } + }; + + }; + + using TPtr = TIntrusivePtr<TCookie>; + + explicit TCookie(ui64 cookie, TIntrusivePtr<TPartitionStreamImpl> partitionStream) + : Cookie(cookie) + , PartitionStream(std::move(partitionStream)) + { + } + + // Sets reverse mapping for max offset in this cookie. + void SetOffsetRange(const std::pair<ui64, ui64>& offsetRange) { + OffsetRange = offsetRange; + UncommittedMessagesLeft = offsetRange.second - offsetRange.first; + } + + TKey GetKey() const { + return TKey(PartitionStream->GetAssignId(), Cookie); + } + + ui64 Cookie = 0; + TIntrusivePtr<TPartitionStreamImpl> PartitionStream; + std::pair<ui64, ui64> OffsetRange; + size_t UncommittedMessagesLeft = 0; + }; + + explicit TPartitionCookieMapping(IErrorHandler::TPtr errorHandler) + : ErrorHandler(std::move(errorHandler)) + { + } + + bool AddMapping(const TCookie::TPtr& cookie); + + // Removes (partition stream, offset) from mapping. + // Returns cookie ptr if this was the last message, otherwise nullptr. + TCookie::TPtr CommitOffset(ui64 partitionStreamId, ui64 offset); + + // Gets and then removes committed cookie from mapping. + TCookie::TPtr RetrieveCommittedCookie(const Ydb::PersQueue::V1::CommitCookie& cookieProto); + + // Removes mapping on partition stream. + void RemoveMapping(ui64 partitionStreamId); + + // Clear all mapping before reconnect. + void ClearMapping(); + + bool HasUnacknowledgedCookies() const; + + private: + THashMap<TCookie::TKey, TCookie::TPtr, TCookie::TKey::THash> Cookies; + THashMap<std::pair<ui64, ui64>, TCookie::TPtr> UncommittedOffsetToCookie; // (Partition stream id, Offset) -> Cookie. + THashMultiMap<ui64, TCookie::TPtr> PartitionStreamIdToCookie; + IErrorHandler::TPtr ErrorHandler; + size_t CommitInflight = 0; // Commit inflight to server. + }; + + struct TDecompressionQueueItem { + TDecompressionQueueItem(TDataDecompressionInfo* batchInfo, TIntrusivePtr<TPartitionStreamImpl> partitionStream) + : BatchInfo(batchInfo) + , PartitionStream(std::move(partitionStream)) + { + } + + TDataDecompressionInfo* BatchInfo; + TIntrusivePtr<TPartitionStreamImpl> PartitionStream; + }; + +private: + const TReadSessionSettings Settings; + const TString ClusterName; + TLog Log; + ui64 NextPartitionStreamId; + ui64 PartitionStreamIdStep; + std::shared_ptr<IReadSessionConnectionProcessorFactory> ConnectionFactory; + std::shared_ptr<TReadSessionEventsQueue> EventsQueue; + IErrorHandler::TPtr ErrorHandler; + NGrpc::IQueueClientContextPtr ClientContext; // Common client context. + NGrpc::IQueueClientContextPtr ConnectContext; + NGrpc::IQueueClientContextPtr ConnectTimeoutContext; + NGrpc::IQueueClientContextPtr ConnectDelayContext; + size_t ConnectionGeneration = 0; + TAdaptiveLock Lock; + IProcessor::TPtr Processor; + IRetryState::TPtr RetryState; // Current retry state (if now we are (re)connecting). + size_t ConnectionAttemptsDone = 0; + + // Memory usage. + i64 CompressedDataSize = 0; + i64 DecompressedDataSize = 0; + double AverageCompressionRatio = 1.0; // Weighted average for compression memory usage estimate. + TInstant UsageStatisticsLastUpdateTime = TInstant::Now(); + + bool WaitingReadResponse = false; + std::shared_ptr<Ydb::PersQueue::V1::MigrationStreamingReadServerMessage> ServerMessage; // Server message to write server response to. THashMap<ui64, TIntrusivePtr<TPartitionStreamImpl>> PartitionStreams; // assignId -> Partition stream. - TPartitionCookieMapping CookieMapping; - std::deque<TDecompressionQueueItem> DecompressionQueue; - bool DataReadingSuspended = false; - - // Exiting. - bool Aborting = false; - bool Closing = false; - std::function<void()> CloseCallback; - std::atomic<int> DecompressionTasksInflight = 0; -}; - -// High level class that manages several read session impls. -// Each one of them works with single cluster. -// This class communicates with cluster discovery service and then creates -// sessions to each cluster. -class TReadSession : public IReadSession, - public IUserRetrievedEventCallback, - public std::enable_shared_from_this<TReadSession> { - struct TClusterSessionInfo { - TClusterSessionInfo(const TString& cluster) - : ClusterName(cluster) - { - } - - TString ClusterName; // In lower case - TSingleClusterReadSessionImpl::TPtr Session; - TVector<TTopicReadSettings> Topics; - TString ClusterEndpoint; - }; - -public: - TReadSession(const TReadSessionSettings& settings, - std::shared_ptr<TPersQueueClient::TImpl> client, - std::shared_ptr<TGRpcConnectionsImpl> connections, - TDbDriverStatePtr dbDriverState); - - ~TReadSession(); - + TPartitionCookieMapping CookieMapping; + std::deque<TDecompressionQueueItem> DecompressionQueue; + bool DataReadingSuspended = false; + + // Exiting. + bool Aborting = false; + bool Closing = false; + std::function<void()> CloseCallback; + std::atomic<int> DecompressionTasksInflight = 0; +}; + +// High level class that manages several read session impls. +// Each one of them works with single cluster. +// This class communicates with cluster discovery service and then creates +// sessions to each cluster. +class TReadSession : public IReadSession, + public IUserRetrievedEventCallback, + public std::enable_shared_from_this<TReadSession> { + struct TClusterSessionInfo { + TClusterSessionInfo(const TString& cluster) + : ClusterName(cluster) + { + } + + TString ClusterName; // In lower case + TSingleClusterReadSessionImpl::TPtr Session; + TVector<TTopicReadSettings> Topics; + TString ClusterEndpoint; + }; + +public: + TReadSession(const TReadSessionSettings& settings, + std::shared_ptr<TPersQueueClient::TImpl> client, + std::shared_ptr<TGRpcConnectionsImpl> connections, + TDbDriverStatePtr dbDriverState); + + ~TReadSession(); + void Start(); - - NThreading::TFuture<void> WaitEvent() override; - TVector<TReadSessionEvent::TEvent> GetEvents(bool block, TMaybe<size_t> maxEventsCount, size_t maxByteSize) override; - TMaybe<TReadSessionEvent::TEvent> GetEvent(bool block, size_t maxByteSize) override; - - bool Close(TDuration timeout) override; - - TString GetSessionId() const override { - return SessionId; - } - - TReaderCounters::TPtr GetCounters() const override { - return Settings.Counters_; // Always not nullptr. - } - - void AddTopic(const TTopicReadSettings& topicReadSettings) /*override*/ { - Y_UNUSED(topicReadSettings); - // TODO: implement. + + NThreading::TFuture<void> WaitEvent() override; + TVector<TReadSessionEvent::TEvent> GetEvents(bool block, TMaybe<size_t> maxEventsCount, size_t maxByteSize) override; + TMaybe<TReadSessionEvent::TEvent> GetEvent(bool block, size_t maxByteSize) override; + + bool Close(TDuration timeout) override; + + TString GetSessionId() const override { + return SessionId; + } + + TReaderCounters::TPtr GetCounters() const override { + return Settings.Counters_; // Always not nullptr. + } + + void AddTopic(const TTopicReadSettings& topicReadSettings) /*override*/ { + Y_UNUSED(topicReadSettings); + // TODO: implement. ThrowFatalError("Method \"AddTopic\" is not implemented"); - } - - void RemoveTopic(const TString& path) /*override*/ { - Y_UNUSED(path); - // TODO: implement. + } + + void RemoveTopic(const TString& path) /*override*/ { + Y_UNUSED(path); + // TODO: implement. ThrowFatalError("Method \"RemoveTopic\" is not implemented"); - } - - void RemoveTopic(const TString& path, const TVector<ui64>& partitionGruops) /*override*/ { - Y_UNUSED(path); - Y_UNUSED(partitionGruops); - // TODO: implement. + } + + void RemoveTopic(const TString& path, const TVector<ui64>& partitionGruops) /*override*/ { + Y_UNUSED(path); + Y_UNUSED(partitionGruops); + // TODO: implement. ThrowFatalError("Method \"RemoveTopic\" is not implemented"); - } - - void StopReadingData() override; - void ResumeReadingData() override; - - void Abort(TSessionClosedEvent&& closeEvent); - - void WaitAllDecompressionTasks(); - void ClearAllEvents(); - -private: - // Start - bool ValidateSettings(); - - // Cluster discovery - Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest MakeClusterDiscoveryRequest() const; - void StartClusterDiscovery(); - void OnClusterDiscovery(const TStatus& status, const Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResult& result); + } + + void StopReadingData() override; + void ResumeReadingData() override; + + void Abort(TSessionClosedEvent&& closeEvent); + + void WaitAllDecompressionTasks(); + void ClearAllEvents(); + +private: + // Start + bool ValidateSettings(); + + // Cluster discovery + Ydb::PersQueue::ClusterDiscovery::DiscoverClustersRequest MakeClusterDiscoveryRequest() const; + void StartClusterDiscovery(); + void OnClusterDiscovery(const TStatus& status, const Ydb::PersQueue::ClusterDiscovery::DiscoverClustersResult& result); void ProceedWithoutClusterDiscovery(); - void RestartClusterDiscoveryImpl(TDuration delay, TDeferredActions& deferred); + void RestartClusterDiscoveryImpl(TDuration delay, TDeferredActions& deferred); void CreateClusterSessionsImpl(); - - - // Shutdown. - void Abort(EStatus statusCode, NYql::TIssues&& issues); - void Abort(EStatus statusCode, const TString& message); - - void AbortImpl(TSessionClosedEvent&& closeEvent, TDeferredActions& deferred); - void AbortImpl(EStatus statusCode, NYql::TIssues&& issues, TDeferredActions& deferred); - void AbortImpl(EStatus statusCode, const TString& message, TDeferredActions& deferred); - - void OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) override; - - void MakeCountersIfNeeded(); - void DumpCountersToLog(size_t timeNumber = 0); - void ScheduleDumpCountersToLog(size_t timeNumber = 0); - -private: - TReadSessionSettings Settings; - const TString SessionId; - const TInstant StartSessionTime = TInstant::Now(); - TLog Log; - std::shared_ptr<TPersQueueClient::TImpl> Client; - std::shared_ptr<TGRpcConnectionsImpl> Connections; - IErrorHandler::TPtr ErrorHandler; - TDbDriverStatePtr DbDriverState; - TAdaptiveLock Lock; - std::shared_ptr<TReadSessionEventsQueue> EventsQueue; - THashMap<TString, TClusterSessionInfo> ClusterSessions; // Cluster name (in lower case) -> TClusterSessionInfo - NGrpc::IQueueClientContextPtr ClusterDiscoveryDelayContext; - IRetryState::TPtr ClusterDiscoveryRetryState; - bool DataReadingSuspended = false; - - NGrpc::IQueueClientContextPtr DumpCountersContext; - - // Exiting. - bool Aborting = false; - bool Closing = false; -}; - -} // namespace NYdb::NPersQueue + + + // Shutdown. + void Abort(EStatus statusCode, NYql::TIssues&& issues); + void Abort(EStatus statusCode, const TString& message); + + void AbortImpl(TSessionClosedEvent&& closeEvent, TDeferredActions& deferred); + void AbortImpl(EStatus statusCode, NYql::TIssues&& issues, TDeferredActions& deferred); + void AbortImpl(EStatus statusCode, const TString& message, TDeferredActions& deferred); + + void OnUserRetrievedEvent(const TReadSessionEvent::TEvent& event) override; + + void MakeCountersIfNeeded(); + void DumpCountersToLog(size_t timeNumber = 0); + void ScheduleDumpCountersToLog(size_t timeNumber = 0); + +private: + TReadSessionSettings Settings; + const TString SessionId; + const TInstant StartSessionTime = TInstant::Now(); + TLog Log; + std::shared_ptr<TPersQueueClient::TImpl> Client; + std::shared_ptr<TGRpcConnectionsImpl> Connections; + IErrorHandler::TPtr ErrorHandler; + TDbDriverStatePtr DbDriverState; + TAdaptiveLock Lock; + std::shared_ptr<TReadSessionEventsQueue> EventsQueue; + THashMap<TString, TClusterSessionInfo> ClusterSessions; // Cluster name (in lower case) -> TClusterSessionInfo + NGrpc::IQueueClientContextPtr ClusterDiscoveryDelayContext; + IRetryState::TPtr ClusterDiscoveryRetryState; + bool DataReadingSuspended = false; + + NGrpc::IQueueClientContextPtr DumpCountersContext; + + // Exiting. + bool Aborting = false; + bool Closing = false; +}; + +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.cpp index af4e08b9798..44da34d54f8 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.cpp @@ -19,7 +19,7 @@ namespace NCompressionDetails { } #define HISTOGRAM_SETUP NMonitoring::ExplicitHistogram({0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100}) -TWriterCounters::TWriterCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters) { +TWriterCounters::TWriterCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters) { Errors = counters->GetCounter("errors", true); CurrentSessionLifetimeMs = counters->GetCounter("currentSessionLifetimeMs", false); BytesWritten = counters->GetCounter("bytesWritten", true); @@ -38,7 +38,7 @@ TWriterCounters::TWriterCounters(const TIntrusivePtr<NMonitoring::TDynamicCounte TWriteSession::TWriteSession( const TWriteSessionSettings& settings, - std::shared_ptr<TPersQueueClient::TImpl> client, + std::shared_ptr<TPersQueueClient::TImpl> client, std::shared_ptr<TGRpcConnectionsImpl> connections, TDbDriverStatePtr dbDriverState) : Settings(settings) @@ -46,7 +46,7 @@ TWriteSession::TWriteSession( , Connections(std::move(connections)) , DbDriverState(std::move(dbDriverState)) , PrevToken(DbDriverState->CredentialsProvider ? DbDriverState->CredentialsProvider->GetAuthInfo() : "") - , EventsQueue(std::make_shared<TWriteSessionEventsQueue>(Settings)) + , EventsQueue(std::make_shared<TWriteSessionEventsQueue>(Settings)) , InitSeqNoPromise(NThreading::NewPromise<ui64>()) , WakeupInterval( Settings.BatchFlushInterval_.GetOrElse(TDuration::Zero()) ? @@ -58,8 +58,8 @@ TWriteSession::TWriteSession( if (!Settings.RetryPolicy_) { Settings.RetryPolicy_ = IRetryPolicy::GetDefaultPolicy(); } - if (Settings.PreferredCluster_ && !Settings.AllowFallbackToOtherClusters_) { - TargetCluster = *Settings.PreferredCluster_; + if (Settings.PreferredCluster_ && !Settings.AllowFallbackToOtherClusters_) { + TargetCluster = *Settings.PreferredCluster_; TargetCluster.to_lower(); } if (Settings.Counters_.Defined()) { @@ -94,7 +94,7 @@ TWriteSession::THandleResult TWriteSession::RestartImpl(const TPlainStatus& stat if (!RetryState) { RetryState = Settings.RetryPolicy_->CreateRetryState(); } - nextDelay = RetryState->GetNextRetryDelay(TPlainStatus(status)); + nextDelay = RetryState->GetNextRetryDelay(TPlainStatus(status)); if (nextDelay) { result.StartDelay = *nextDelay; @@ -145,8 +145,8 @@ void TWriteSession::DoCdsRequest(TDuration delay) { params->set_source_id(Settings.MessageGroupId_); if (Settings.PartitionGroupId_.Defined()) params->set_partition_group(*Settings.PartitionGroupId_); - if (Settings.PreferredCluster_.Defined()) - params->set_preferred_cluster_name(*Settings.PreferredCluster_); + if (Settings.PreferredCluster_.Defined()) + params->set_preferred_cluster_name(*Settings.PreferredCluster_); auto weakConnections = std::weak_ptr<TGRpcConnectionsImpl>(Connections); DbDriverState->Log << TLOG_INFO << LogPrefix() << "Do schedule cds request after " << delay.MilliSeconds() << " ms\n"; @@ -214,7 +214,7 @@ void TWriteSession::OnCdsResponse( << normalizedName); } else { name = clusterInfo.name(); - endpoint = ApplyClusterEndpoint(DbDriverState->DiscoveryEndpoint, clusterInfo.endpoint()); + endpoint = ApplyClusterEndpoint(DbDriverState->DiscoveryEndpoint, clusterInfo.endpoint()); break; } } @@ -286,7 +286,7 @@ TMaybe<TWriteSessionEvent::TEvent> TWriteSession::GetEvent(bool block) { // Client method TVector<TWriteSessionEvent::TEvent> TWriteSession::GetEvents(bool block, TMaybe<size_t> maxEventsCount) { - return EventsQueue->GetEvents(block, maxEventsCount); + return EventsQueue->GetEvents(block, maxEventsCount); } // Only called under lock @@ -299,8 +299,8 @@ ui64 TWriteSession::GetNextSeqNoImpl(const TMaybe<ui64>& seqNo) { OnSeqNoShift = false; SeqNoShift = 0; } - } - if (seqNo.Defined()) { + } + if (seqNo.Defined()) { if (*AutoSeqNoMode) { DbDriverState->Log << TLOG_ERR << LogPrefix() << "Cannot call write() with defined SeqNo on WriteSession running in auto-seqNo mode"; ThrowFatalError( @@ -468,13 +468,13 @@ void TWriteSession::OnConnectTimeout(const NGrpc::IQueueClientContextPtr& connec } else { return; } - TStringBuilder description; - description << "Failed to establish connection to server. Attempts done: " << ConnectionAttemptsDone; + TStringBuilder description; + description << "Failed to establish connection to server. Attempts done: " << ConnectionAttemptsDone; handleResult = RestartImpl(TPlainStatus(EStatus::TIMEOUT, description)); if (handleResult.DoStop) { CloseImpl( EStatus::TIMEOUT, - description + description ); } } @@ -790,9 +790,9 @@ bool TWriteSession::CleanupOnAcknowledged(ui64 sequenceNumber) { // Only called under Lock TMemoryUsageChange TWriteSession::OnMemoryUsageChangedImpl(i64 diff) { bool wasOk = MemoryUsage <= Settings.MaxMemoryUsage_; - //if (diff < 0) { - // Y_VERIFY(MemoryUsage >= static_cast<size_t>(std::abs(diff))); - //} + //if (diff < 0) { + // Y_VERIFY(MemoryUsage >= static_cast<size_t>(std::abs(diff))); + //} MemoryUsage += diff; bool nowOk = MemoryUsage <= Settings.MaxMemoryUsage_; if (wasOk != nowOk) { @@ -1217,7 +1217,7 @@ TWriteSession::~TWriteSession() { TSimpleBlockingWriteSession::TSimpleBlockingWriteSession( const TWriteSessionSettings& settings, - std::shared_ptr<TPersQueueClient::TImpl> client, + std::shared_ptr<TPersQueueClient::TImpl> client, std::shared_ptr<TGRpcConnectionsImpl> connections, TDbDriverStatePtr dbDriverState ) { diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.h index 324550bb02b..8f8c75203e8 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/write_session.h @@ -21,86 +21,86 @@ inline const TString& GetCodecId(const ECodec codec) { return idByCodec[codec]; } -class TWriteSessionEventsQueue : public TBaseSessionEventsQueue<TWriteSessionSettings, TWriteSessionEvent::TEvent> { - using TParent = TBaseSessionEventsQueue<TWriteSessionSettings, TWriteSessionEvent::TEvent>; +class TWriteSessionEventsQueue : public TBaseSessionEventsQueue<TWriteSessionSettings, TWriteSessionEvent::TEvent> { + using TParent = TBaseSessionEventsQueue<TWriteSessionSettings, TWriteSessionEvent::TEvent>; public: TWriteSessionEventsQueue(const TWriteSessionSettings& settings) : TParent(settings) {} - void PushEvent(TEventInfo eventInfo) { + void PushEvent(TEventInfo eventInfo) { if (Closed || ApplyHandler(eventInfo)) { - return; - } - - TWaiter waiter; - with_lock (Mutex) { - Events.emplace(std::move(eventInfo)); - waiter = PopWaiterImpl(); - } - waiter.Signal(); // Does nothing if waiter is empty. - } - - TMaybe<TEvent> GetEvent(bool block = false) { - TMaybe<TEventInfo> eventInfo; - with_lock (Mutex) { - if (block) { - WaitEventsImpl(); - } - if (HasEventsImpl()) { - eventInfo = GetEventImpl(); - } else { - return Nothing(); - } - } - eventInfo->OnUserRetrievedEvent(); - return std::move(eventInfo->Event); - } - - TVector<TEvent> GetEvents(bool block = false, TMaybe<size_t> maxEventsCount = Nothing()) { - TVector<TEventInfo> eventInfos; - with_lock (Mutex) { - if (block) { - WaitEventsImpl(); - } - eventInfos.reserve(Min(Events.size() + CloseEvent.Defined(), maxEventsCount ? *maxEventsCount : std::numeric_limits<size_t>::max())); - while (!Events.empty()) { - eventInfos.emplace_back(GetEventImpl()); - if (maxEventsCount && eventInfos.size() >= *maxEventsCount) { - break; - } - } - if (CloseEvent && Events.empty() && (!maxEventsCount || eventInfos.size() < *maxEventsCount)) { - eventInfos.push_back({*CloseEvent}); - } - } - - TVector<TEvent> result; - result.reserve(eventInfos.size()); - for (TEventInfo& eventInfo : eventInfos) { - eventInfo.OnUserRetrievedEvent(); - result.emplace_back(std::move(eventInfo.Event)); - } - return result; - } - - void Close(const TSessionClosedEvent& event) { + return; + } + + TWaiter waiter; + with_lock (Mutex) { + Events.emplace(std::move(eventInfo)); + waiter = PopWaiterImpl(); + } + waiter.Signal(); // Does nothing if waiter is empty. + } + + TMaybe<TEvent> GetEvent(bool block = false) { + TMaybe<TEventInfo> eventInfo; + with_lock (Mutex) { + if (block) { + WaitEventsImpl(); + } + if (HasEventsImpl()) { + eventInfo = GetEventImpl(); + } else { + return Nothing(); + } + } + eventInfo->OnUserRetrievedEvent(); + return std::move(eventInfo->Event); + } + + TVector<TEvent> GetEvents(bool block = false, TMaybe<size_t> maxEventsCount = Nothing()) { + TVector<TEventInfo> eventInfos; + with_lock (Mutex) { + if (block) { + WaitEventsImpl(); + } + eventInfos.reserve(Min(Events.size() + CloseEvent.Defined(), maxEventsCount ? *maxEventsCount : std::numeric_limits<size_t>::max())); + while (!Events.empty()) { + eventInfos.emplace_back(GetEventImpl()); + if (maxEventsCount && eventInfos.size() >= *maxEventsCount) { + break; + } + } + if (CloseEvent && Events.empty() && (!maxEventsCount || eventInfos.size() < *maxEventsCount)) { + eventInfos.push_back({*CloseEvent}); + } + } + + TVector<TEvent> result; + result.reserve(eventInfos.size()); + for (TEventInfo& eventInfo : eventInfos) { + eventInfo.OnUserRetrievedEvent(); + result.emplace_back(std::move(eventInfo.Event)); + } + return result; + } + + void Close(const TSessionClosedEvent& event) { TWaiter waiter; - with_lock (Mutex) { - CloseEvent = event; - Closed = true; + with_lock (Mutex) { + CloseEvent = event; + Closed = true; waiter = TWaiter(Waiter.ExtractPromise(), this); - } - + } + TEventInfo info(event); ApplyHandler(info); - + waiter.Signal(); - } - + } + private: - struct THandlersVisitor : public TParent::TBaseHandlersVisitor { - using TParent::TBaseHandlersVisitor::TBaseHandlersVisitor; + struct THandlersVisitor : public TParent::TBaseHandlersVisitor { + using TParent::TBaseHandlersVisitor::TBaseHandlersVisitor; #define DECLARE_HANDLER(type, handler, answer) \ bool operator()(type& event) { \ if (Settings.EventHandlers_.handler) { \ @@ -115,7 +115,7 @@ private: DECLARE_HANDLER(TSessionClosedEvent, SessionClosedHandler_, false); // Not applied #undef DECLARE_HANDLER - bool Visit() { + bool Visit() { return std::visit(*this, EventInfo.Event); } @@ -125,18 +125,18 @@ private: THandlersVisitor visitor(Settings, eventInfo); return visitor.Visit(); } - - TEventInfo GetEventImpl() { // Assumes that we're under lock and that the event queue has events. - Y_ASSERT(HasEventsImpl()); - if (!Events.empty()) { - TEventInfo event = std::move(Events.front()); - Events.pop(); + + TEventInfo GetEventImpl() { // Assumes that we're under lock and that the event queue has events. + Y_ASSERT(HasEventsImpl()); + if (!Events.empty()) { + TEventInfo event = std::move(Events.front()); + Events.pop(); RenewWaiterImpl(); - return event; - } - Y_ASSERT(CloseEvent); - return {*CloseEvent}; - } + return event; + } + Y_ASSERT(CloseEvent); + return {*CloseEvent}; + } }; struct TMemoryUsageChange { @@ -149,16 +149,16 @@ namespace NTests { } class TWriteSession : public IWriteSession, - public std::enable_shared_from_this<TWriteSession> { + public std::enable_shared_from_this<TWriteSession> { private: friend class TSimpleBlockingWriteSession; - friend class TPersQueueClient; + friend class TPersQueueClient; friend class NTests::TSimpleWriteSessionTestAdapter; using TClientMessage = Ydb::PersQueue::V1::StreamingWriteClientMessage; using TServerMessage = Ydb::PersQueue::V1::StreamingWriteServerMessage; using IWriteSessionConnectionProcessorFactory = - TPersQueueClient::TImpl::IWriteSessionConnectionProcessorFactory; + TPersQueueClient::TImpl::IWriteSessionConnectionProcessorFactory; using IProcessor = IWriteSessionConnectionProcessorFactory::IProcessor; struct TMessage { @@ -287,7 +287,7 @@ private: public: TWriteSession(const TWriteSessionSettings& settings, - std::shared_ptr<TPersQueueClient::TImpl> client, + std::shared_ptr<TPersQueueClient::TImpl> client, std::shared_ptr<TGRpcConnectionsImpl> connections, TDbDriverStatePtr dbDriverState); @@ -366,7 +366,7 @@ private: private: TWriteSessionSettings Settings; - std::shared_ptr<TPersQueueClient::TImpl> Client; + std::shared_ptr<TPersQueueClient::TImpl> Client; std::shared_ptr<TGRpcConnectionsImpl> Connections; TString TargetCluster; TString InitialCluster; @@ -391,8 +391,8 @@ private: std::shared_ptr<TServerMessage> ServerMessage; // Server message to write server response to. TString SessionId; - IExecutor::TPtr Executor; - IExecutor::TPtr CompressionExecutor; + IExecutor::TPtr Executor; + IExecutor::TPtr CompressionExecutor; size_t MemoryUsage = 0; //!< Estimated amount of memory used TMessageBatch CurrentBatch; @@ -414,7 +414,7 @@ private: ui64 MinUnsentSeqNo = 0; ui64 SeqNoShift = 0; TMaybe<bool> AutoSeqNoMode; - bool ValidateSeqNoMode = false; + bool ValidateSeqNoMode = false; NThreading::TPromise<ui64> InitSeqNoPromise; bool InitSeqNoSetDone = false; @@ -440,7 +440,7 @@ private: public: TSimpleBlockingWriteSession( const TWriteSessionSettings& settings, - std::shared_ptr<TPersQueueClient::TImpl> client, + std::shared_ptr<TPersQueueClient::TImpl> client, std::shared_ptr<TGRpcConnectionsImpl> connections, TDbDriverStatePtr dbDriverState); diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h index ee9aa75a0ae..1f2be5aa200 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h @@ -1,175 +1,175 @@ -#pragma once +#pragma once #include <ydb/public/api/grpc/draft/ydb_persqueue_v1.grpc.pb.h> #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> - + #include <library/cpp/monlib/dynamic_counters/counters.h> #include <library/cpp/logger/log.h> -#include <util/datetime/base.h> -#include <util/generic/hash.h> -#include <util/generic/maybe.h> -#include <util/generic/ptr.h> +#include <util/datetime/base.h> +#include <util/generic/hash.h> +#include <util/generic/maybe.h> +#include <util/generic/ptr.h> #include <util/string/builder.h> -#include <util/thread/pool.h> - -#include <exception> -#include <variant> - +#include <util/thread/pool.h> + +#include <exception> +#include <variant> + namespace NYdb { class TProtoAccessor; } -namespace NYdb::NPersQueue { - -enum class EFormat { - BASE = 1, -}; - -enum class ECodec { - RAW = 1, - GZIP = 2, - LZOP = 3, - ZSTD = 4, -}; - -struct TCredentials { - enum class EMode { - NOT_SET = 1, - OAUTH_TOKEN = 2, - JWT_PARAMS = 3, - IAM = 4, - }; - - TCredentials() = default; - TCredentials(const Ydb::PersQueue::V1::Credentials& credentials); - EMode GetMode() const; - TString GetOauthToken() const; - TString GetJwtParams() const; - - TString GetIamServiceAccountKey() const; - TString GetIamEndpoint() const; - -private: - EMode Mode_; - Ydb::PersQueue::V1::Credentials Credentials_; -}; - - -// Result for describe resource request. -struct TDescribeTopicResult : public TStatus { +namespace NYdb::NPersQueue { + +enum class EFormat { + BASE = 1, +}; + +enum class ECodec { + RAW = 1, + GZIP = 2, + LZOP = 3, + ZSTD = 4, +}; + +struct TCredentials { + enum class EMode { + NOT_SET = 1, + OAUTH_TOKEN = 2, + JWT_PARAMS = 3, + IAM = 4, + }; + + TCredentials() = default; + TCredentials(const Ydb::PersQueue::V1::Credentials& credentials); + EMode GetMode() const; + TString GetOauthToken() const; + TString GetJwtParams() const; + + TString GetIamServiceAccountKey() const; + TString GetIamEndpoint() const; + +private: + EMode Mode_; + Ydb::PersQueue::V1::Credentials Credentials_; +}; + + +// Result for describe resource request. +struct TDescribeTopicResult : public TStatus { friend class NYdb::TProtoAccessor; - - struct TTopicSettings { - TTopicSettings(const Ydb::PersQueue::V1::TopicSettings&); - -#define GETTER(TYPE, NAME) TYPE NAME() const { \ - return NAME ##_; \ - } - - struct TReadRule { - TReadRule(const Ydb::PersQueue::V1::TopicSettings::ReadRule&); - - GETTER(TString, ConsumerName); - GETTER(bool, Important); - GETTER(TInstant, StartingMessageTimestamp); - GETTER(EFormat, SupportedFormat); - const TVector<ECodec>& SupportedCodecs() const { - return SupportedCodecs_; - } - GETTER(ui32, Version); + + struct TTopicSettings { + TTopicSettings(const Ydb::PersQueue::V1::TopicSettings&); + +#define GETTER(TYPE, NAME) TYPE NAME() const { \ + return NAME ##_; \ + } + + struct TReadRule { + TReadRule(const Ydb::PersQueue::V1::TopicSettings::ReadRule&); + + GETTER(TString, ConsumerName); + GETTER(bool, Important); + GETTER(TInstant, StartingMessageTimestamp); + GETTER(EFormat, SupportedFormat); + const TVector<ECodec>& SupportedCodecs() const { + return SupportedCodecs_; + } + GETTER(ui32, Version); GETTER(TString, ServiceType); - - private: - TString ConsumerName_; - bool Important_; - TInstant StartingMessageTimestamp_; - EFormat SupportedFormat_; - TVector<ECodec> SupportedCodecs_; - ui32 Version_; + + private: + TString ConsumerName_; + bool Important_; + TInstant StartingMessageTimestamp_; + EFormat SupportedFormat_; + TVector<ECodec> SupportedCodecs_; + ui32 Version_; TString ServiceType_; - }; - - struct TRemoteMirrorRule { - TRemoteMirrorRule(const Ydb::PersQueue::V1::TopicSettings::RemoteMirrorRule&); - GETTER(TString, Endpoint); - GETTER(TString, TopicPath); - GETTER(TString, ConsumerName); - GETTER(TInstant, StartingMessageTimestamp); - GETTER(TCredentials, Credentials); - GETTER(TString, Database); - - private: - TString Endpoint_; - TString TopicPath_; - TString ConsumerName_; - TInstant StartingMessageTimestamp_; - TCredentials Credentials_; - TString Database_; - }; - - GETTER(ui32, PartitionsCount); - GETTER(TDuration, RetentionPeriod); - GETTER(EFormat, SupportedFormat); - const TVector<ECodec>& SupportedCodecs() const { - return SupportedCodecs_; - } - GETTER(ui64, MaxPartitionStorageSize); - GETTER(ui64, MaxPartitionWriteSpeed); - GETTER(ui64, MaxPartitionWriteBurst); - GETTER(bool, ClientWriteDisabled); - - GETTER(bool, AllowUnauthenticatedWrite); - GETTER(bool, AllowUnauthenticatedRead); - GETTER(ui32, PartitionsPerTablet); + }; + + struct TRemoteMirrorRule { + TRemoteMirrorRule(const Ydb::PersQueue::V1::TopicSettings::RemoteMirrorRule&); + GETTER(TString, Endpoint); + GETTER(TString, TopicPath); + GETTER(TString, ConsumerName); + GETTER(TInstant, StartingMessageTimestamp); + GETTER(TCredentials, Credentials); + GETTER(TString, Database); + + private: + TString Endpoint_; + TString TopicPath_; + TString ConsumerName_; + TInstant StartingMessageTimestamp_; + TCredentials Credentials_; + TString Database_; + }; + + GETTER(ui32, PartitionsCount); + GETTER(TDuration, RetentionPeriod); + GETTER(EFormat, SupportedFormat); + const TVector<ECodec>& SupportedCodecs() const { + return SupportedCodecs_; + } + GETTER(ui64, MaxPartitionStorageSize); + GETTER(ui64, MaxPartitionWriteSpeed); + GETTER(ui64, MaxPartitionWriteBurst); + GETTER(bool, ClientWriteDisabled); + + GETTER(bool, AllowUnauthenticatedWrite); + GETTER(bool, AllowUnauthenticatedRead); + GETTER(ui32, PartitionsPerTablet); GETTER(ui32, AbcId); GETTER(TString, AbcSlug); - const TVector<TReadRule>& ReadRules() const { - return ReadRules_; - } - GETTER(TMaybe<TRemoteMirrorRule>, RemoteMirrorRule); - - -#undef GETTER - - private: - ui32 PartitionsCount_; - TDuration RetentionPeriod_; - EFormat SupportedFormat_; - TVector<ECodec> SupportedCodecs_; - ui64 MaxPartitionStorageSize_; - ui64 MaxPartitionWriteSpeed_; - ui64 MaxPartitionWriteBurst_; - bool ClientWriteDisabled_; - bool AllowUnauthenticatedRead_; - bool AllowUnauthenticatedWrite_; - ui32 PartitionsPerTablet_; + const TVector<TReadRule>& ReadRules() const { + return ReadRules_; + } + GETTER(TMaybe<TRemoteMirrorRule>, RemoteMirrorRule); + + +#undef GETTER + + private: + ui32 PartitionsCount_; + TDuration RetentionPeriod_; + EFormat SupportedFormat_; + TVector<ECodec> SupportedCodecs_; + ui64 MaxPartitionStorageSize_; + ui64 MaxPartitionWriteSpeed_; + ui64 MaxPartitionWriteBurst_; + bool ClientWriteDisabled_; + bool AllowUnauthenticatedRead_; + bool AllowUnauthenticatedWrite_; + ui32 PartitionsPerTablet_; ui32 AbcId_; TString AbcSlug_; - TVector<TReadRule> ReadRules_; - TMaybe<TRemoteMirrorRule> RemoteMirrorRule_; - }; - - TDescribeTopicResult(TStatus status, const Ydb::PersQueue::V1::DescribeTopicResult& result); - - const TTopicSettings& TopicSettings() const { - return TopicSettings_; - } - -private: - TTopicSettings TopicSettings_; + TVector<TReadRule> ReadRules_; + TMaybe<TRemoteMirrorRule> RemoteMirrorRule_; + }; + + TDescribeTopicResult(TStatus status, const Ydb::PersQueue::V1::DescribeTopicResult& result); + + const TTopicSettings& TopicSettings() const { + return TopicSettings_; + } + +private: + TTopicSettings TopicSettings_; [[nodiscard]] const Ydb::PersQueue::V1::DescribeTopicResult& GetProto() const { return Proto_; } const Ydb::PersQueue::V1::DescribeTopicResult Proto_; -}; - -using TAsyncDescribeTopicResult = NThreading::TFuture<TDescribeTopicResult>; - - - -const TVector<ECodec>& GetDefaultCodecs(); - +}; + +using TAsyncDescribeTopicResult = NThreading::TFuture<TDescribeTopicResult>; + + + +const TVector<ECodec>& GetDefaultCodecs(); + struct TReadRuleSettings { TReadRuleSettings() {} using TSelf = TReadRuleSettings; @@ -178,7 +178,7 @@ struct TReadRuleSettings { FLUENT_SETTING_DEFAULT(TInstant, StartingMessageTimestamp, TInstant::Zero()); FLUENT_SETTING_DEFAULT(EFormat, SupportedFormat, EFormat::BASE) FLUENT_SETTING_DEFAULT(TVector<ECodec>, SupportedCodecs, GetDefaultCodecs()); - + FLUENT_SETTING_DEFAULT(ui32, Version, 0); FLUENT_SETTING(TString, ServiceType); @@ -198,128 +198,128 @@ struct TReadRuleSettings { }; -// Settings for topic. -template <class TDerived> -struct TTopicSettings : public TOperationRequestSettings<TDerived> { - - struct TRemoteMirrorRuleSettings { - TRemoteMirrorRuleSettings() {} - using TSelf = TRemoteMirrorRuleSettings; - FLUENT_SETTING(TString, Endpoint); - FLUENT_SETTING(TString, TopicPath); - FLUENT_SETTING(TString, ConsumerName); - FLUENT_SETTING_DEFAULT(TInstant, StartingMessageTimestamp, TInstant::Zero()); - FLUENT_SETTING(TCredentials, Credentials); - FLUENT_SETTING(TString, Database); - - TRemoteMirrorRuleSettings& SetSettings(const TDescribeTopicResult::TTopicSettings::TRemoteMirrorRule& settings) { - Endpoint_ = settings.Endpoint(); - TopicPath_ = settings.TopicPath(); - ConsumerName_ = settings.ConsumerName(); - StartingMessageTimestamp_ = settings.StartingMessageTimestamp(); - Credentials_ = settings.Credentials(); - Database_ = settings.Database(); - return *this; - } - - }; - - using TSelf = TDerived; - - FLUENT_SETTING_DEFAULT(ui32, PartitionsCount, 1); - FLUENT_SETTING_DEFAULT(TDuration, RetentionPeriod, TDuration::Hours(18)); - FLUENT_SETTING_DEFAULT(EFormat, SupportedFormat, EFormat::BASE) - FLUENT_SETTING_DEFAULT(TVector<ECodec>, SupportedCodecs, GetDefaultCodecs()); - - FLUENT_SETTING_DEFAULT(ui64, MaxPartitionStorageSize, 0); - FLUENT_SETTING_DEFAULT(ui64, MaxPartitionWriteSpeed, 2 * 1024 * 1024); - FLUENT_SETTING_DEFAULT(ui64, MaxPartitionWriteBurst, 2 * 1024 * 1024); - - FLUENT_SETTING_DEFAULT(bool, ClientWriteDisabled, false); - FLUENT_SETTING_DEFAULT(bool, AllowUnauthenticatedWrite, false); - FLUENT_SETTING_DEFAULT(bool, AllowUnauthenticatedRead, false); - - FLUENT_SETTING_DEFAULT(ui32, PartitionsPerTablet, 2); - +// Settings for topic. +template <class TDerived> +struct TTopicSettings : public TOperationRequestSettings<TDerived> { + + struct TRemoteMirrorRuleSettings { + TRemoteMirrorRuleSettings() {} + using TSelf = TRemoteMirrorRuleSettings; + FLUENT_SETTING(TString, Endpoint); + FLUENT_SETTING(TString, TopicPath); + FLUENT_SETTING(TString, ConsumerName); + FLUENT_SETTING_DEFAULT(TInstant, StartingMessageTimestamp, TInstant::Zero()); + FLUENT_SETTING(TCredentials, Credentials); + FLUENT_SETTING(TString, Database); + + TRemoteMirrorRuleSettings& SetSettings(const TDescribeTopicResult::TTopicSettings::TRemoteMirrorRule& settings) { + Endpoint_ = settings.Endpoint(); + TopicPath_ = settings.TopicPath(); + ConsumerName_ = settings.ConsumerName(); + StartingMessageTimestamp_ = settings.StartingMessageTimestamp(); + Credentials_ = settings.Credentials(); + Database_ = settings.Database(); + return *this; + } + + }; + + using TSelf = TDerived; + + FLUENT_SETTING_DEFAULT(ui32, PartitionsCount, 1); + FLUENT_SETTING_DEFAULT(TDuration, RetentionPeriod, TDuration::Hours(18)); + FLUENT_SETTING_DEFAULT(EFormat, SupportedFormat, EFormat::BASE) + FLUENT_SETTING_DEFAULT(TVector<ECodec>, SupportedCodecs, GetDefaultCodecs()); + + FLUENT_SETTING_DEFAULT(ui64, MaxPartitionStorageSize, 0); + FLUENT_SETTING_DEFAULT(ui64, MaxPartitionWriteSpeed, 2 * 1024 * 1024); + FLUENT_SETTING_DEFAULT(ui64, MaxPartitionWriteBurst, 2 * 1024 * 1024); + + FLUENT_SETTING_DEFAULT(bool, ClientWriteDisabled, false); + FLUENT_SETTING_DEFAULT(bool, AllowUnauthenticatedWrite, false); + FLUENT_SETTING_DEFAULT(bool, AllowUnauthenticatedRead, false); + + FLUENT_SETTING_DEFAULT(ui32, PartitionsPerTablet, 2); + FLUENT_SETTING_OPTIONAL(ui32, AbcId); FLUENT_SETTING_OPTIONAL(TString, AbcSlug); - FLUENT_SETTING_DEFAULT(TVector<TReadRuleSettings>, ReadRules, {}); - FLUENT_SETTING_OPTIONAL(TRemoteMirrorRuleSettings, RemoteMirrorRule); - - TSelf& SetSettings(const TDescribeTopicResult::TTopicSettings& settings) { - - PartitionsCount_ = settings.PartitionsCount(); - RetentionPeriod_ = settings.RetentionPeriod(); - SupportedFormat_ = settings.SupportedFormat(); - SupportedCodecs_.clear(); - for (const auto& codec : settings.SupportedCodecs()) { - SupportedCodecs_.push_back(codec); - } - MaxPartitionStorageSize_ = settings.MaxPartitionStorageSize(); - MaxPartitionWriteSpeed_ = settings.MaxPartitionWriteSpeed(); - MaxPartitionWriteBurst_ = settings.MaxPartitionWriteBurst(); - ClientWriteDisabled_ = settings.ClientWriteDisabled(); - PartitionsPerTablet_ = settings.PartitionsPerTablet(); + FLUENT_SETTING_DEFAULT(TVector<TReadRuleSettings>, ReadRules, {}); + FLUENT_SETTING_OPTIONAL(TRemoteMirrorRuleSettings, RemoteMirrorRule); + + TSelf& SetSettings(const TDescribeTopicResult::TTopicSettings& settings) { + + PartitionsCount_ = settings.PartitionsCount(); + RetentionPeriod_ = settings.RetentionPeriod(); + SupportedFormat_ = settings.SupportedFormat(); + SupportedCodecs_.clear(); + for (const auto& codec : settings.SupportedCodecs()) { + SupportedCodecs_.push_back(codec); + } + MaxPartitionStorageSize_ = settings.MaxPartitionStorageSize(); + MaxPartitionWriteSpeed_ = settings.MaxPartitionWriteSpeed(); + MaxPartitionWriteBurst_ = settings.MaxPartitionWriteBurst(); + ClientWriteDisabled_ = settings.ClientWriteDisabled(); + PartitionsPerTablet_ = settings.PartitionsPerTablet(); if (settings.AbcId()) AbcId_ = settings.AbcId(); if (!settings.AbcSlug().empty()) AbcSlug_ = settings.AbcSlug(); - AllowUnauthenticatedRead_ = settings.AllowUnauthenticatedRead(); - AllowUnauthenticatedWrite_ = settings.AllowUnauthenticatedWrite(); - - ReadRules_.clear(); - for (const auto& readRule : settings.ReadRules()) { - ReadRules_.push_back({}); - ReadRules_.back().SetSettings(readRule); - } - if (settings.RemoteMirrorRule()) { - RemoteMirrorRule_ = TRemoteMirrorRuleSettings().SetSettings(settings.RemoteMirrorRule().GetRef()); - } - return static_cast<TDerived&>(*this); - } - -}; - - -// Settings for create resource request. -struct TCreateTopicSettings : public TTopicSettings<TCreateTopicSettings> { -}; - -// Settings for alter resource request. -struct TAlterTopicSettings : public TTopicSettings<TAlterTopicSettings> { -}; - -// Settings for drop resource request. -struct TDropTopicSettings : public TOperationRequestSettings<TDropTopicSettings> {}; - -// Settings for describe resource request. -struct TDescribeTopicSettings : public TOperationRequestSettings<TDescribeTopicSettings> {}; - + AllowUnauthenticatedRead_ = settings.AllowUnauthenticatedRead(); + AllowUnauthenticatedWrite_ = settings.AllowUnauthenticatedWrite(); + + ReadRules_.clear(); + for (const auto& readRule : settings.ReadRules()) { + ReadRules_.push_back({}); + ReadRules_.back().SetSettings(readRule); + } + if (settings.RemoteMirrorRule()) { + RemoteMirrorRule_ = TRemoteMirrorRuleSettings().SetSettings(settings.RemoteMirrorRule().GetRef()); + } + return static_cast<TDerived&>(*this); + } + +}; + + +// Settings for create resource request. +struct TCreateTopicSettings : public TTopicSettings<TCreateTopicSettings> { +}; + +// Settings for alter resource request. +struct TAlterTopicSettings : public TTopicSettings<TAlterTopicSettings> { +}; + +// Settings for drop resource request. +struct TDropTopicSettings : public TOperationRequestSettings<TDropTopicSettings> {}; + +// Settings for describe resource request. +struct TDescribeTopicSettings : public TOperationRequestSettings<TDescribeTopicSettings> {}; + // Settings for add read rule request struct TAddReadRuleSettings : public TTopicSettings<TAddReadRuleSettings> { FLUENT_SETTING(TReadRuleSettings, ReadRule); }; - + // Settings for remove read rule request struct TRemoveReadRuleSettings : public TOperationRequestSettings<TRemoveReadRuleSettings> { FLUENT_SETTING(TString, ConsumerName); }; -//! Session metainformation. -struct TWriteSessionMeta : public TThrRefBase { - using TPtr = TIntrusivePtr<TWriteSessionMeta>; - - //! User defined fields. - THashMap<TString, TString> Fields; -}; - -//! Event that is sent to client during session destruction. -struct TSessionClosedEvent : public TStatus { - using TStatus::TStatus; - - TString DebugString() const; -}; - +//! Session metainformation. +struct TWriteSessionMeta : public TThrRefBase { + using TPtr = TIntrusivePtr<TWriteSessionMeta>; + + //! User defined fields. + THashMap<TString, TString> Fields; +}; + +//! Event that is sent to client during session destruction. +struct TSessionClosedEvent : public TStatus { + using TStatus::TStatus; + + TString DebugString() const; +}; + struct TWriteStat : public TThrRefBase { TDuration WriteTime; TDuration TotalTimeInPartitionQueue; @@ -342,17 +342,17 @@ enum class EClusterDiscoveryMode { Off }; -class TContinuationToken : public TMoveOnly { +class TContinuationToken : public TMoveOnly { friend class TWriteSession; private: - TContinuationToken() = default; + TContinuationToken() = default; }; struct TWriterCounters : public TThrRefBase { using TSelf = TWriterCounters; using TPtr = TIntrusivePtr<TSelf>; - explicit TWriterCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters); + explicit TWriterCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters); NMonitoring::TDynamicCounters::TCounterPtr Errors; NMonitoring::TDynamicCounters::TCounterPtr CurrentSessionLifetimeMs; @@ -380,102 +380,102 @@ struct TWriterCounters : public TThrRefBase { NMonitoring::THistogramPtr CompressedBytesInflightUsageByTime; }; -struct TReaderCounters : public TThrRefBase { - using TSelf = TReaderCounters; - using TPtr = TIntrusivePtr<TSelf>; - - TReaderCounters() = default; - explicit TReaderCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters); - - NMonitoring::TDynamicCounters::TCounterPtr Errors; - NMonitoring::TDynamicCounters::TCounterPtr CurrentSessionLifetimeMs; - - NMonitoring::TDynamicCounters::TCounterPtr BytesRead; - NMonitoring::TDynamicCounters::TCounterPtr MessagesRead; - NMonitoring::TDynamicCounters::TCounterPtr BytesReadCompressed; - - NMonitoring::TDynamicCounters::TCounterPtr BytesInflightUncompressed; - NMonitoring::TDynamicCounters::TCounterPtr BytesInflightCompressed; - NMonitoring::TDynamicCounters::TCounterPtr BytesInflightTotal; - NMonitoring::TDynamicCounters::TCounterPtr MessagesInflight; - - //! Histograms reporting % usage of memory limit in time. - //! Provides a histogram looking like: 10% : 100ms, 20%: 300ms, ... 50%: 200ms, ... 100%: 50ms - //! Which means that < 10% memory usage was observed for 100ms during the period and 50% usage was observed for 200ms - //! Used to monitor if the read session successfully deals with data flow provided. Larger values in higher buckets - //! mean that read session is close to overflow (or being overflown) for major periods of time. - //! - //! Total memory usage. - NMonitoring::THistogramPtr TotalBytesInflightUsageByTime; - //! Memory usage by messages waiting that are ready to be received by user. - NMonitoring::THistogramPtr UncompressedBytesInflightUsageByTime; - //! Memory usage by compressed messages pending for decompression. - NMonitoring::THistogramPtr CompressedBytesInflightUsageByTime; -}; - -//! Partition stream. -struct TPartitionStream : public TThrRefBase { - using TPtr = TIntrusivePtr<TPartitionStream>; - - +struct TReaderCounters : public TThrRefBase { + using TSelf = TReaderCounters; + using TPtr = TIntrusivePtr<TSelf>; + + TReaderCounters() = default; + explicit TReaderCounters(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters); + + NMonitoring::TDynamicCounters::TCounterPtr Errors; + NMonitoring::TDynamicCounters::TCounterPtr CurrentSessionLifetimeMs; + + NMonitoring::TDynamicCounters::TCounterPtr BytesRead; + NMonitoring::TDynamicCounters::TCounterPtr MessagesRead; + NMonitoring::TDynamicCounters::TCounterPtr BytesReadCompressed; + + NMonitoring::TDynamicCounters::TCounterPtr BytesInflightUncompressed; + NMonitoring::TDynamicCounters::TCounterPtr BytesInflightCompressed; + NMonitoring::TDynamicCounters::TCounterPtr BytesInflightTotal; + NMonitoring::TDynamicCounters::TCounterPtr MessagesInflight; + + //! Histograms reporting % usage of memory limit in time. + //! Provides a histogram looking like: 10% : 100ms, 20%: 300ms, ... 50%: 200ms, ... 100%: 50ms + //! Which means that < 10% memory usage was observed for 100ms during the period and 50% usage was observed for 200ms + //! Used to monitor if the read session successfully deals with data flow provided. Larger values in higher buckets + //! mean that read session is close to overflow (or being overflown) for major periods of time. + //! + //! Total memory usage. + NMonitoring::THistogramPtr TotalBytesInflightUsageByTime; + //! Memory usage by messages waiting that are ready to be received by user. + NMonitoring::THistogramPtr UncompressedBytesInflightUsageByTime; + //! Memory usage by compressed messages pending for decompression. + NMonitoring::THistogramPtr CompressedBytesInflightUsageByTime; +}; + +//! Partition stream. +struct TPartitionStream : public TThrRefBase { + using TPtr = TIntrusivePtr<TPartitionStream>; + + public: - - //! Temporary stop receiving data from this partition stream. - // virtual void StopReading() = 0; // Not implemented yet. - - //! Resume receiving data from this partition stream after StopReading() call. - // virtual void ResumeReading() = 0; // Not implemented yet. - - //! Request partition stream status. - //! Result will come to TPartitionStreamStatusEvent. - virtual void RequestStatus() = 0; - - //! - //! Properties. - //! - - //! Unique identifier of partition stream inside session. - //! It is unique inside one read session. - ui64 GetPartitionStreamId() const { - return PartitionStreamId; - } - - //! Topic path. - const TString& GetTopicPath() const { - return TopicPath; - } - - //! Cluster name. - const TString& GetCluster() const { - return Cluster; - } - - //! Partition group id. - ui64 GetPartitionGroupId() const { - return PartitionGroupId; - } - - //! Partition id. - ui64 GetPartitionId() const { - return PartitionId; - } - -protected: - ui64 PartitionStreamId; - TString TopicPath; - TString Cluster; - ui64 PartitionGroupId; - ui64 PartitionId; -}; - - -//! Events for read session. -struct TReadSessionEvent { - - //! Event with new data. - //! Contains batch of messages from single partition stream. - struct TDataReceivedEvent { - + + //! Temporary stop receiving data from this partition stream. + // virtual void StopReading() = 0; // Not implemented yet. + + //! Resume receiving data from this partition stream after StopReading() call. + // virtual void ResumeReading() = 0; // Not implemented yet. + + //! Request partition stream status. + //! Result will come to TPartitionStreamStatusEvent. + virtual void RequestStatus() = 0; + + //! + //! Properties. + //! + + //! Unique identifier of partition stream inside session. + //! It is unique inside one read session. + ui64 GetPartitionStreamId() const { + return PartitionStreamId; + } + + //! Topic path. + const TString& GetTopicPath() const { + return TopicPath; + } + + //! Cluster name. + const TString& GetCluster() const { + return Cluster; + } + + //! Partition group id. + ui64 GetPartitionGroupId() const { + return PartitionGroupId; + } + + //! Partition id. + ui64 GetPartitionId() const { + return PartitionId; + } + +protected: + ui64 PartitionStreamId; + TString TopicPath; + TString Cluster; + ui64 PartitionGroupId; + ui64 PartitionId; +}; + + +//! Events for read session. +struct TReadSessionEvent { + + //! Event with new data. + //! Contains batch of messages from single partition stream. + struct TDataReceivedEvent { + struct TMessageInformation { TMessageInformation(ui64 offset, TString messageGroupId, @@ -525,53 +525,53 @@ struct TReadSessionEvent { TString ExplicitHash; }; - //! Single message. + //! Single message. struct TMessage : public IMessage { - //! User data. - //! Throws decompressor exception if decompression failed. + //! User data. + //! Throws decompressor exception if decompression failed. const TString& GetData() const override; - + bool HasException() const; - - //! Message offset. + + //! Message offset. ui64 GetOffset() const; - - //! Message group id. + + //! Message group id. const TString& GetMessageGroupId() const; - - //! Sequence number. + + //! Sequence number. ui64 GetSeqNo() const; - - //! Message creation timestamp. + + //! Message creation timestamp. TInstant GetCreateTime() const; - - //! Message write timestamp. + + //! Message write timestamp. TInstant GetWriteTime() const; - - //! Ip address of message source host. + + //! Ip address of message source host. const TString& GetIp() const; - - //! Metainfo. + + //! Metainfo. const TWriteSessionMeta::TPtr& GetMeta() const; - - TMessage(const TString& data, - std::exception_ptr decompressionException, + + TMessage(const TString& data, + std::exception_ptr decompressionException, const TMessageInformation& information, TPartitionStream::TPtr partitionStream, const TString& partitionKey, const TString& explicitHash); - + //! Commits single message. void Commit() override; using IMessage::DebugString; void DebugString(TStringBuilder& ret, bool printData = false) const override; - private: - std::exception_ptr DecompressionException; + private: + std::exception_ptr DecompressionException; TMessageInformation Information; - }; - + }; + struct TCompressedMessage : public IMessage { //! Messages count in compressed data ui64 GetBlocksCount() const; @@ -622,11 +622,11 @@ struct TReadSessionEvent { TVector<TMessageInformation> Information; }; - //! Partition stream. - const TPartitionStream::TPtr& GetPartitionStream() const { - return PartitionStream; - } - + //! Partition stream. + const TPartitionStream::TPtr& GetPartitionStream() const { + return PartitionStream; + } + bool IsCompressedMessages() const { return !CompressedMessages.empty(); } @@ -635,17 +635,17 @@ struct TReadSessionEvent { return Messages.size() + CompressedMessages.size(); } - //! Get messages. - TVector<TMessage>& GetMessages() { + //! Get messages. + TVector<TMessage>& GetMessages() { CheckMessagesFilled(false); - return Messages; - } - - const TVector<TMessage>& GetMessages() const { + return Messages; + } + + const TVector<TMessage>& GetMessages() const { CheckMessagesFilled(false); - return Messages; - } - + return Messages; + } + //! Get compressed messages. TVector<TCompressedMessage>& GetCompressedMessages() { CheckMessagesFilled(true); @@ -657,16 +657,16 @@ struct TReadSessionEvent { return CompressedMessages; } - //! Commits all messages in batch. - void Commit(); - - TString DebugString(bool printData = false) const; - - TDataReceivedEvent(TVector<TMessage> messages, + //! Commits all messages in batch. + void Commit(); + + TString DebugString(bool printData = false) const; + + TDataReceivedEvent(TVector<TMessage> messages, TVector<TCompressedMessage> compressedMessages, - TPartitionStream::TPtr partitionStream); - - private: + TPartitionStream::TPtr partitionStream); + + private: void CheckMessagesFilled(bool compressed) const { Y_VERIFY(!Messages.empty() || !CompressedMessages.empty()); if (compressed && CompressedMessages.empty()) { @@ -678,305 +678,305 @@ struct TReadSessionEvent { } private: - TVector<TMessage> Messages; + TVector<TMessage> Messages; TVector<TCompressedMessage> CompressedMessages; - TPartitionStream::TPtr PartitionStream; - std::vector<std::pair<ui64, ui64>> OffsetRanges; - }; - - //! Acknowledgement for commit request. - struct TCommitAcknowledgementEvent { - //! Partition stream. - const TPartitionStream::TPtr& GetPartitionStream() const { - return PartitionStream; - } - - //! Committed offset. - //! This means that from now the first available - //! message offset in current partition + TPartitionStream::TPtr PartitionStream; + std::vector<std::pair<ui64, ui64>> OffsetRanges; + }; + + //! Acknowledgement for commit request. + struct TCommitAcknowledgementEvent { + //! Partition stream. + const TPartitionStream::TPtr& GetPartitionStream() const { + return PartitionStream; + } + + //! Committed offset. + //! This means that from now the first available + //! message offset in current partition //! for current consumer is this offset. //! All messages before are committed and futher never be available. - ui64 GetCommittedOffset() const { - return CommittedOffset; - } - - TString DebugString() const; - - TCommitAcknowledgementEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset); - - private: - TPartitionStream::TPtr PartitionStream; - ui64 CommittedOffset; - }; - - //! Server request for creating partition stream. - struct TCreatePartitionStreamEvent { + ui64 GetCommittedOffset() const { + return CommittedOffset; + } + + TString DebugString() const; + + TCommitAcknowledgementEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset); + + private: + TPartitionStream::TPtr PartitionStream; + ui64 CommittedOffset; + }; + + //! Server request for creating partition stream. + struct TCreatePartitionStreamEvent { explicit TCreatePartitionStreamEvent(TPartitionStream::TPtr, ui64 committedOffset, ui64 endOffset); - - const TPartitionStream::TPtr& GetPartitionStream() const { - return PartitionStream; - } - + + const TPartitionStream::TPtr& GetPartitionStream() const { + return PartitionStream; + } + //! Current committed offset in partition stream. ui64 GetCommittedOffset() const { return CommittedOffset; - } - - //! Offset of first not existing message in partition stream. - ui64 GetEndOffset() const { - return EndOffset; - } - - //! Confirm partition stream creation. - //! This signals that user is ready to receive data from this partition stream. - //! If maybe is empty then no rewinding - void Confirm(TMaybe<ui64> readOffset = Nothing(), TMaybe<ui64> commitOffset = Nothing()); - - TString DebugString() const; - - private: - TPartitionStream::TPtr PartitionStream; - ui64 CommittedOffset; - ui64 EndOffset; - }; - - //! Server request for destroying partition stream. - //! Server can destroy partition stream gracefully - //! for rebalancing among all topic clients. - struct TDestroyPartitionStreamEvent { - const TPartitionStream::TPtr& GetPartitionStream() const { - return PartitionStream; - } - - //! Last offset of the partition stream that was committed. - ui64 GetCommittedOffset() const { - return CommittedOffset; - } - - //! Confirm partition stream destruction. - //! Confirm has no effect if TPartitionStreamClosedEvent for same partition stream with is received. - void Confirm(); - - TString DebugString() const; - - TDestroyPartitionStreamEvent(TPartitionStream::TPtr partitionStream, bool committedOffset); - - private: - TPartitionStream::TPtr PartitionStream; + } + + //! Offset of first not existing message in partition stream. + ui64 GetEndOffset() const { + return EndOffset; + } + + //! Confirm partition stream creation. + //! This signals that user is ready to receive data from this partition stream. + //! If maybe is empty then no rewinding + void Confirm(TMaybe<ui64> readOffset = Nothing(), TMaybe<ui64> commitOffset = Nothing()); + + TString DebugString() const; + + private: + TPartitionStream::TPtr PartitionStream; ui64 CommittedOffset; - }; - - //! Status for partition stream requested via TPartitionStream::RequestStatus() - struct TPartitionStreamStatusEvent { - const TPartitionStream::TPtr& GetPartitionStream() const { - return PartitionStream; - } - - //! Committed offset. - ui64 GetCommittedOffset() const { - return CommittedOffset; - } - - //! Offset of next message (that is not yet read by session). - ui64 GetReadOffset() const { - return ReadOffset; - } - - //! Offset of first not existing message in partition. - ui64 GetEndOffset() const { - return EndOffset; - } - - //! Write watermark. - //! The last written timestamp of message in this partition stream. - TInstant GetWriteWatermark() const { - return WriteWatermark; - } - - TString DebugString() const; - - TPartitionStreamStatusEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset, ui64 readOffset, ui64 endOffset, TInstant writeWatermark); - - private: - TPartitionStream::TPtr PartitionStream; - ui64 CommittedOffset = 0; - ui64 ReadOffset = 0; - ui64 EndOffset = 0; - TInstant WriteWatermark; - }; - - //! Event that signals user about - //! partition stream death. - //! This could be after graceful destruction of - //! partition stream or when connection with partition was lost. - struct TPartitionStreamClosedEvent { - enum class EReason { - DestroyConfirmedByUser, - Lost, - ConnectionLost, - }; - - const TPartitionStream::TPtr& GetPartitionStream() const { - return PartitionStream; - } - - EReason GetReason() const { - return Reason; - } - - TString DebugString() const; - - TPartitionStreamClosedEvent(TPartitionStream::TPtr partitionStream, EReason reason); - - private: - TPartitionStream::TPtr PartitionStream; - EReason Reason; - }; - - using TEvent = std::variant<TDataReceivedEvent, - TCommitAcknowledgementEvent, - TCreatePartitionStreamEvent, - TDestroyPartitionStreamEvent, - TPartitionStreamStatusEvent, - TPartitionStreamClosedEvent, - TSessionClosedEvent>; -}; - -//! Set of offsets to commit. -//! Class that could store offsets in order to commit them later. -//! This class is not thread safe. -class TDeferredCommit { -public: - //! Add message to set. - void Add(const TReadSessionEvent::TDataReceivedEvent::TMessage& message); - - //! Add all messages from dataReceivedEvent to set. - void Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent); - + ui64 EndOffset; + }; + + //! Server request for destroying partition stream. + //! Server can destroy partition stream gracefully + //! for rebalancing among all topic clients. + struct TDestroyPartitionStreamEvent { + const TPartitionStream::TPtr& GetPartitionStream() const { + return PartitionStream; + } + + //! Last offset of the partition stream that was committed. + ui64 GetCommittedOffset() const { + return CommittedOffset; + } + + //! Confirm partition stream destruction. + //! Confirm has no effect if TPartitionStreamClosedEvent for same partition stream with is received. + void Confirm(); + + TString DebugString() const; + + TDestroyPartitionStreamEvent(TPartitionStream::TPtr partitionStream, bool committedOffset); + + private: + TPartitionStream::TPtr PartitionStream; + ui64 CommittedOffset; + }; + + //! Status for partition stream requested via TPartitionStream::RequestStatus() + struct TPartitionStreamStatusEvent { + const TPartitionStream::TPtr& GetPartitionStream() const { + return PartitionStream; + } + + //! Committed offset. + ui64 GetCommittedOffset() const { + return CommittedOffset; + } + + //! Offset of next message (that is not yet read by session). + ui64 GetReadOffset() const { + return ReadOffset; + } + + //! Offset of first not existing message in partition. + ui64 GetEndOffset() const { + return EndOffset; + } + + //! Write watermark. + //! The last written timestamp of message in this partition stream. + TInstant GetWriteWatermark() const { + return WriteWatermark; + } + + TString DebugString() const; + + TPartitionStreamStatusEvent(TPartitionStream::TPtr partitionStream, ui64 committedOffset, ui64 readOffset, ui64 endOffset, TInstant writeWatermark); + + private: + TPartitionStream::TPtr PartitionStream; + ui64 CommittedOffset = 0; + ui64 ReadOffset = 0; + ui64 EndOffset = 0; + TInstant WriteWatermark; + }; + + //! Event that signals user about + //! partition stream death. + //! This could be after graceful destruction of + //! partition stream or when connection with partition was lost. + struct TPartitionStreamClosedEvent { + enum class EReason { + DestroyConfirmedByUser, + Lost, + ConnectionLost, + }; + + const TPartitionStream::TPtr& GetPartitionStream() const { + return PartitionStream; + } + + EReason GetReason() const { + return Reason; + } + + TString DebugString() const; + + TPartitionStreamClosedEvent(TPartitionStream::TPtr partitionStream, EReason reason); + + private: + TPartitionStream::TPtr PartitionStream; + EReason Reason; + }; + + using TEvent = std::variant<TDataReceivedEvent, + TCommitAcknowledgementEvent, + TCreatePartitionStreamEvent, + TDestroyPartitionStreamEvent, + TPartitionStreamStatusEvent, + TPartitionStreamClosedEvent, + TSessionClosedEvent>; +}; + +//! Set of offsets to commit. +//! Class that could store offsets in order to commit them later. +//! This class is not thread safe. +class TDeferredCommit { +public: + //! Add message to set. + void Add(const TReadSessionEvent::TDataReceivedEvent::TMessage& message); + + //! Add all messages from dataReceivedEvent to set. + void Add(const TReadSessionEvent::TDataReceivedEvent& dataReceivedEvent); + //! Add offsets range to set. void Add(const TPartitionStream::TPtr& partitionStream, ui64 startOffset, ui64 endOffset); //! Add offset to set. void Add(const TPartitionStream::TPtr& partitionStream, ui64 offset); - //! Commit all added offsets. - void Commit(); - - TDeferredCommit(); - TDeferredCommit(const TDeferredCommit&) = delete; + //! Commit all added offsets. + void Commit(); + + TDeferredCommit(); + TDeferredCommit(const TDeferredCommit&) = delete; TDeferredCommit(TDeferredCommit&&); TDeferredCommit& operator=(const TDeferredCommit&) = delete; TDeferredCommit& operator=(TDeferredCommit&&); - - ~TDeferredCommit(); - -private: - class TImpl; - THolder<TImpl> Impl; -}; - -//! Event debug string. -TString DebugString(const TReadSessionEvent::TEvent& event); - - -//! Retry policy. -//! Calculates delay before next retry. -//! Has several default implementations: -//! - exponential backoff policy; -//! - retries with fixed interval; -//! - no retries. -//! TODO: move to common header (not persqueue). - -//! Retry state of single request. -struct IRetryState { - using TPtr = std::unique_ptr<IRetryState>; - - virtual ~IRetryState() = default; - - //! Calculate delay before next retry if next retry is allowed. - //! Returns empty maybe if retry is not allowed anymore. - virtual TMaybe<TDuration> GetNextRetryDelay(const TStatus& status) = 0; -}; - -struct IRetryPolicy { - using TPtr = std::shared_ptr<IRetryPolicy>; - - virtual ~IRetryPolicy() = default; - - //! Function that is called after first error - //! to find out a futher retry behaviour. - //! Retry state is expected to be created for the whole single retry session. - virtual IRetryState::TPtr CreateRetryState() const = 0; - - //! - //! Default implementations. - //! - - static TPtr GetDefaultPolicy(); // Exponential backoff with infinite retry attempts. - static TPtr GetNoRetryPolicy(); // Denies all kind of retries. - - enum class ERetryErrorClass { - // This error shouldn't be retried. - NoRetry, - - // This error could be retried in short period of time. - ShortRetry, - - // This error requires waiting before it could be retried. - LongRetry, - }; - - //! Randomized exponential backoff policy. - static TPtr GetExponentialBackoffPolicy(TDuration minDelay = TDuration::MilliSeconds(10), - // Delay for statuses that require waiting before retry (such as OVERLOADED). - TDuration minLongRetryDelay = TDuration::MilliSeconds(200), - TDuration maxDelay = TDuration::Seconds(30), - size_t maxRetries = std::numeric_limits<size_t>::max(), - TDuration maxTime = TDuration::Max(), - double scaleFactor = 2.0, - std::function<ERetryErrorClass(EStatus)> customRetryClassFunction = {}); - - //! Randomized fixed interval policy. - static TPtr GetFixedIntervalPolicy(TDuration delay = TDuration::MilliSeconds(100), - // Delay for statuses that require waiting before retry (such as OVERLOADED). - TDuration longRetryDelay = TDuration::MilliSeconds(300), - size_t maxRetries = std::numeric_limits<size_t>::max(), - TDuration maxTime = TDuration::Max(), - std::function<ERetryErrorClass(EStatus)> customRetryClassFunction = {}); -}; - -class IExecutor : public TThrRefBase { + + ~TDeferredCommit(); + +private: + class TImpl; + THolder<TImpl> Impl; +}; + +//! Event debug string. +TString DebugString(const TReadSessionEvent::TEvent& event); + + +//! Retry policy. +//! Calculates delay before next retry. +//! Has several default implementations: +//! - exponential backoff policy; +//! - retries with fixed interval; +//! - no retries. +//! TODO: move to common header (not persqueue). + +//! Retry state of single request. +struct IRetryState { + using TPtr = std::unique_ptr<IRetryState>; + + virtual ~IRetryState() = default; + + //! Calculate delay before next retry if next retry is allowed. + //! Returns empty maybe if retry is not allowed anymore. + virtual TMaybe<TDuration> GetNextRetryDelay(const TStatus& status) = 0; +}; + +struct IRetryPolicy { + using TPtr = std::shared_ptr<IRetryPolicy>; + + virtual ~IRetryPolicy() = default; + + //! Function that is called after first error + //! to find out a futher retry behaviour. + //! Retry state is expected to be created for the whole single retry session. + virtual IRetryState::TPtr CreateRetryState() const = 0; + + //! + //! Default implementations. + //! + + static TPtr GetDefaultPolicy(); // Exponential backoff with infinite retry attempts. + static TPtr GetNoRetryPolicy(); // Denies all kind of retries. + + enum class ERetryErrorClass { + // This error shouldn't be retried. + NoRetry, + + // This error could be retried in short period of time. + ShortRetry, + + // This error requires waiting before it could be retried. + LongRetry, + }; + + //! Randomized exponential backoff policy. + static TPtr GetExponentialBackoffPolicy(TDuration minDelay = TDuration::MilliSeconds(10), + // Delay for statuses that require waiting before retry (such as OVERLOADED). + TDuration minLongRetryDelay = TDuration::MilliSeconds(200), + TDuration maxDelay = TDuration::Seconds(30), + size_t maxRetries = std::numeric_limits<size_t>::max(), + TDuration maxTime = TDuration::Max(), + double scaleFactor = 2.0, + std::function<ERetryErrorClass(EStatus)> customRetryClassFunction = {}); + + //! Randomized fixed interval policy. + static TPtr GetFixedIntervalPolicy(TDuration delay = TDuration::MilliSeconds(100), + // Delay for statuses that require waiting before retry (such as OVERLOADED). + TDuration longRetryDelay = TDuration::MilliSeconds(300), + size_t maxRetries = std::numeric_limits<size_t>::max(), + TDuration maxTime = TDuration::Max(), + std::function<ERetryErrorClass(EStatus)> customRetryClassFunction = {}); +}; + +class IExecutor : public TThrRefBase { public: - using TPtr = TIntrusivePtr<IExecutor>; - using TFunction = std::function<void()>; + using TPtr = TIntrusivePtr<IExecutor>; + using TFunction = std::function<void()>; - // Is executor asynchronous. - virtual bool IsAsync() const = 0; + // Is executor asynchronous. + virtual bool IsAsync() const = 0; - // Post function to execute. + // Post function to execute. virtual void Post(TFunction&& f) = 0; - - // Start method. - // This method is idempotent. - // It can be called many times. Only the first one has effect. - void Start() { - with_lock (StartLock) { - if (!Started) { - DoStart(); - Started = true; - } - } - } - -private: - virtual void DoStart() = 0; - -private: - bool Started = false; - TAdaptiveLock StartLock; + + // Start method. + // This method is idempotent. + // It can be called many times. Only the first one has effect. + void Start() { + with_lock (StartLock) { + if (!Started) { + DoStart(); + Started = true; + } + } + } + +private: + virtual void DoStart() = 0; + +private: + bool Started = false; + TAdaptiveLock StartLock; }; -IExecutor::TPtr CreateThreadPoolExecutorAdapter(std::shared_ptr<IThreadPool> threadPool); // Thread pool is expected to have been started. -IExecutor::TPtr CreateThreadPoolExecutor(size_t threads); +IExecutor::TPtr CreateThreadPoolExecutorAdapter(std::shared_ptr<IThreadPool> threadPool); // Thread pool is expected to have been started. +IExecutor::TPtr CreateThreadPoolExecutor(size_t threads); //! Events for write session. struct TWriteSessionEvent { @@ -1015,15 +1015,15 @@ struct TWriteSessionEvent { }; //! Indicates that a writer is ready to accept new message(s). - //! Continuation token should be kept and then used in write methods. + //! Continuation token should be kept and then used in write methods. struct TReadyToAcceptEvent { - TContinuationToken ContinuationToken; + TContinuationToken ContinuationToken; TString DebugString() const; }; - using TEvent = std::variant<TAcksEvent, TReadyToAcceptEvent, TSessionClosedEvent>; + using TEvent = std::variant<TAcksEvent, TReadyToAcceptEvent, TSessionClosedEvent>; }; //! Event debug string. @@ -1056,14 +1056,14 @@ struct TWriteSessionSettings : public TRequestSettings<TWriteSessionSettings> { //! Using this option is not recommended unless you know for sure why you need it. FLUENT_SETTING_OPTIONAL(ui32, PartitionGroupId); - //! Preferred LB cluster. Used for multi-cluster installation. - //! If specified cluster is unavailable, session will write to other cluster. - FLUENT_SETTING_OPTIONAL(TString, PreferredCluster); - - //! Write to other clusters if there are problems with connection - //! to the first one. - FLUENT_SETTING_DEFAULT(bool, AllowFallbackToOtherClusters, true); + //! Preferred LB cluster. Used for multi-cluster installation. + //! If specified cluster is unavailable, session will write to other cluster. + FLUENT_SETTING_OPTIONAL(TString, PreferredCluster); + //! Write to other clusters if there are problems with connection + //! to the first one. + FLUENT_SETTING_DEFAULT(bool, AllowFallbackToOtherClusters, true); + //! codec and level to use for data compression prior to write. FLUENT_SETTING_DEFAULT(ECodec, Codec, ECodec::GZIP); FLUENT_SETTING_DEFAULT(i32, CompressionLevel, 4); @@ -1077,7 +1077,7 @@ struct TWriteSessionSettings : public TRequestSettings<TWriteSessionSettings> { FLUENT_SETTING_DEFAULT(ui32, MaxInflightCount, 100000); //! Retry policy enables automatic retries for non-fatal errors. - //! IRetryPolicy::GetDefaultPolicy() if null (not set). + //! IRetryPolicy::GetDefaultPolicy() if null (not set). FLUENT_SETTING(IRetryPolicy::TPtr, RetryPolicy); //! User metadata that may be attached to write session. @@ -1101,15 +1101,15 @@ struct TWriteSessionSettings : public TRequestSettings<TWriteSessionSettings> { FLUENT_SETTING_OPTIONAL(TWriterCounters::TPtr, Counters); - //! Executor for compression tasks. - //! If not set, default executor will be used. - FLUENT_SETTING(IExecutor::TPtr, CompressionExecutor); - + //! Executor for compression tasks. + //! If not set, default executor will be used. + FLUENT_SETTING(IExecutor::TPtr, CompressionExecutor); + struct TEventHandlers { using TSelf = TEventHandlers; using TWriteAckHandler = std::function<void(TWriteSessionEvent::TAcksEvent&)>; using TReadyToAcceptHandler = std::function<void(TWriteSessionEvent::TReadyToAcceptEvent&)>; - + //! Function to handle Acks events. //! If this handler is set, write ack events will be handled by handler, //! otherwise sent to TWriteSession::GetEvent(). @@ -1124,16 +1124,16 @@ struct TWriteSessionSettings : public TRequestSettings<TWriteSessionSettings> { //! If this handler is set, close session events will be handled by handler //! and then sent to TWriteSession::GetEvent(). FLUENT_SETTING(TSessionClosedHandler, SessionClosedHandler); - - //! Function to handle all event types. - //! If event with current type has no handler for this type of event, - //! this handler (if specified) will be used. - //! If this handler is not specified, event can be received with TReadSession::GetEvent() method. - FLUENT_SETTING(std::function<void(TReadSessionEvent::TEvent&)>, CommonHandler); - - //! Executor for handlers. - //! If not set, default single threaded executor will be used. - FLUENT_SETTING(IExecutor::TPtr, HandlersExecutor); + + //! Function to handle all event types. + //! If event with current type has no handler for this type of event, + //! this handler (if specified) will be used. + //! If this handler is not specified, event can be received with TReadSession::GetEvent() method. + FLUENT_SETTING(std::function<void(TReadSessionEvent::TEvent&)>, CommonHandler); + + //! Executor for handlers. + //! If not set, default single threaded executor will be used. + FLUENT_SETTING(IExecutor::TPtr, HandlersExecutor); }; //! Event handlers. @@ -1147,188 +1147,188 @@ struct TWriteSessionSettings : public TRequestSettings<TWriteSessionSettings> { }; -//! Read settings for single topic. -struct TTopicReadSettings { - using TSelf = TTopicReadSettings; - - TTopicReadSettings() = default; - TTopicReadSettings(const TTopicReadSettings&) = default; - TTopicReadSettings(TTopicReadSettings&&) = default; - TTopicReadSettings(const TString& path) { - Path(path); - } - - TTopicReadSettings& operator=(const TTopicReadSettings&) = default; - TTopicReadSettings& operator=(TTopicReadSettings&&) = default; - - //! Path of topic to read. - FLUENT_SETTING(TString, Path); - - //! Start reading from this timestamp. - FLUENT_SETTING_OPTIONAL(TInstant, StartingMessageTimestamp); - - //! Partition groups to read. - //! 1-based. - FLUENT_SETTING_VECTOR(ui64, PartitionGroupIds); -}; - -//! Settings for read session. -struct TReadSessionSettings : public TRequestSettings<TReadSessionSettings> { - using TSelf = TReadSessionSettings; - - struct TEventHandlers { - using TSelf = TEventHandlers; - - //! Set simple handler with data processing and also - //! set other handlers with default behaviour. - //! They automatically commit data after processing - //! and confirm partition stream events. - //! - //! Sets the following handlers: - //! DataReceivedHandler: sets DataReceivedHandler to handler that calls dataHandler and (if commitDataAfterProcessing is set) then calls Commit(). - //! CommitAcknowledgementHandler to handler that does nothing. - //! CreatePartitionStreamHandler to handler that confirms event. - //! DestroyPartitionStreamHandler to handler that confirms event. - //! PartitionStreamStatusHandler to handler that does nothing. - //! PartitionStreamClosedHandler to handler that does nothing. - //! - //! dataHandler: handler of data event. - //! commitDataAfterProcessing: automatically commit data after calling of dataHandler. - //! gracefulReleaseAfterCommit: wait for commit acknowledgements for all inflight data before confirming partition stream destroy. - TSelf& SimpleDataHandlers(std::function<void(TReadSessionEvent::TDataReceivedEvent&)> dataHandler, bool commitDataAfterProcessing = false, bool gracefulReleaseAfterCommit = true); - - //! Function to handle data events. - //! If this handler is set, data events will be handled by handler, - //! otherwise sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). - FLUENT_SETTING(std::function<void(TReadSessionEvent::TDataReceivedEvent&)>, DataReceivedHandler); - - //! Function to handle commit ack events. - //! If this handler is set, commit ack events will be handled by handler, - //! otherwise sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). - FLUENT_SETTING(std::function<void(TReadSessionEvent::TCommitAcknowledgementEvent&)>, CommitAcknowledgementHandler); - - //! Function to handle create partition stream events. - //! If this handler is set, create partition stream events will be handled by handler, - //! otherwise sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). - FLUENT_SETTING(std::function<void(TReadSessionEvent::TCreatePartitionStreamEvent&)>, CreatePartitionStreamHandler); - - //! Function to handle destroy partition stream events. - //! If this handler is set, destroy partition stream events will be handled by handler, - //! otherwise sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). - FLUENT_SETTING(std::function<void(TReadSessionEvent::TDestroyPartitionStreamEvent&)>, DestroyPartitionStreamHandler); - - //! Function to handle partition stream status events. - //! If this handler is set, partition stream status events will be handled by handler, - //! otherwise sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). - FLUENT_SETTING(std::function<void(TReadSessionEvent::TPartitionStreamStatusEvent&)>, PartitionStreamStatusHandler); - - //! Function to handle partition stream closed events. - //! If this handler is set, partition stream closed events will be handled by handler, - //! otherwise sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). - FLUENT_SETTING(std::function<void(TReadSessionEvent::TPartitionStreamClosedEvent&)>, PartitionStreamClosedHandler); - - //! Function to handle session closed events. - //! If this handler is set, close session events will be handled by handler - //! and then sent to TReadSession::GetEvent(). - //! Default value is empty function (not set). +//! Read settings for single topic. +struct TTopicReadSettings { + using TSelf = TTopicReadSettings; + + TTopicReadSettings() = default; + TTopicReadSettings(const TTopicReadSettings&) = default; + TTopicReadSettings(TTopicReadSettings&&) = default; + TTopicReadSettings(const TString& path) { + Path(path); + } + + TTopicReadSettings& operator=(const TTopicReadSettings&) = default; + TTopicReadSettings& operator=(TTopicReadSettings&&) = default; + + //! Path of topic to read. + FLUENT_SETTING(TString, Path); + + //! Start reading from this timestamp. + FLUENT_SETTING_OPTIONAL(TInstant, StartingMessageTimestamp); + + //! Partition groups to read. + //! 1-based. + FLUENT_SETTING_VECTOR(ui64, PartitionGroupIds); +}; + +//! Settings for read session. +struct TReadSessionSettings : public TRequestSettings<TReadSessionSettings> { + using TSelf = TReadSessionSettings; + + struct TEventHandlers { + using TSelf = TEventHandlers; + + //! Set simple handler with data processing and also + //! set other handlers with default behaviour. + //! They automatically commit data after processing + //! and confirm partition stream events. + //! + //! Sets the following handlers: + //! DataReceivedHandler: sets DataReceivedHandler to handler that calls dataHandler and (if commitDataAfterProcessing is set) then calls Commit(). + //! CommitAcknowledgementHandler to handler that does nothing. + //! CreatePartitionStreamHandler to handler that confirms event. + //! DestroyPartitionStreamHandler to handler that confirms event. + //! PartitionStreamStatusHandler to handler that does nothing. + //! PartitionStreamClosedHandler to handler that does nothing. + //! + //! dataHandler: handler of data event. + //! commitDataAfterProcessing: automatically commit data after calling of dataHandler. + //! gracefulReleaseAfterCommit: wait for commit acknowledgements for all inflight data before confirming partition stream destroy. + TSelf& SimpleDataHandlers(std::function<void(TReadSessionEvent::TDataReceivedEvent&)> dataHandler, bool commitDataAfterProcessing = false, bool gracefulReleaseAfterCommit = true); + + //! Function to handle data events. + //! If this handler is set, data events will be handled by handler, + //! otherwise sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). + FLUENT_SETTING(std::function<void(TReadSessionEvent::TDataReceivedEvent&)>, DataReceivedHandler); + + //! Function to handle commit ack events. + //! If this handler is set, commit ack events will be handled by handler, + //! otherwise sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). + FLUENT_SETTING(std::function<void(TReadSessionEvent::TCommitAcknowledgementEvent&)>, CommitAcknowledgementHandler); + + //! Function to handle create partition stream events. + //! If this handler is set, create partition stream events will be handled by handler, + //! otherwise sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). + FLUENT_SETTING(std::function<void(TReadSessionEvent::TCreatePartitionStreamEvent&)>, CreatePartitionStreamHandler); + + //! Function to handle destroy partition stream events. + //! If this handler is set, destroy partition stream events will be handled by handler, + //! otherwise sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). + FLUENT_SETTING(std::function<void(TReadSessionEvent::TDestroyPartitionStreamEvent&)>, DestroyPartitionStreamHandler); + + //! Function to handle partition stream status events. + //! If this handler is set, partition stream status events will be handled by handler, + //! otherwise sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). + FLUENT_SETTING(std::function<void(TReadSessionEvent::TPartitionStreamStatusEvent&)>, PartitionStreamStatusHandler); + + //! Function to handle partition stream closed events. + //! If this handler is set, partition stream closed events will be handled by handler, + //! otherwise sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). + FLUENT_SETTING(std::function<void(TReadSessionEvent::TPartitionStreamClosedEvent&)>, PartitionStreamClosedHandler); + + //! Function to handle session closed events. + //! If this handler is set, close session events will be handled by handler + //! and then sent to TReadSession::GetEvent(). + //! Default value is empty function (not set). FLUENT_SETTING(TSessionClosedHandler, SessionClosedHandler); - - //! Function to handle all event types. - //! If event with current type has no handler for this type of event, - //! this handler (if specified) will be used. - //! If this handler is not specified, event can be received with TReadSession::GetEvent() method. - FLUENT_SETTING(std::function<void(TReadSessionEvent::TEvent&)>, CommonHandler); - - //! Executor for handlers. - //! If not set, default single threaded executor will be used. - FLUENT_SETTING(IExecutor::TPtr, HandlersExecutor); - }; - - //! Consumer. - FLUENT_SETTING(TString, ConsumerName); - - //! Topics. - FLUENT_SETTING_VECTOR(TTopicReadSettings, Topics); - - //! Default variant. - //! Read topic instance specified in "Topics" from all clusters. - TSelf& ReadAll() { - Clusters_.clear(); - return ReadOnlyOriginal(true); - } - - //! Read original topic instances specified in "Topics" from several clusters. - TSelf& ReadOriginal(TVector<TString> clusters) { - Clusters_ = std::move(clusters); - return ReadOnlyOriginal(true); - } - - //! Read mirrored topics specified in "Topics" from one cluster. - TSelf& ReadMirrored(const TString& cluster) { - Clusters_ = { cluster }; - return ReadOnlyOriginal(false); - } - - //! Disable Clusters discovery. ReadMirrored/ReadOriginal/ReadAll will not have any effect + + //! Function to handle all event types. + //! If event with current type has no handler for this type of event, + //! this handler (if specified) will be used. + //! If this handler is not specified, event can be received with TReadSession::GetEvent() method. + FLUENT_SETTING(std::function<void(TReadSessionEvent::TEvent&)>, CommonHandler); + + //! Executor for handlers. + //! If not set, default single threaded executor will be used. + FLUENT_SETTING(IExecutor::TPtr, HandlersExecutor); + }; + + //! Consumer. + FLUENT_SETTING(TString, ConsumerName); + + //! Topics. + FLUENT_SETTING_VECTOR(TTopicReadSettings, Topics); + + //! Default variant. + //! Read topic instance specified in "Topics" from all clusters. + TSelf& ReadAll() { + Clusters_.clear(); + return ReadOnlyOriginal(true); + } + + //! Read original topic instances specified in "Topics" from several clusters. + TSelf& ReadOriginal(TVector<TString> clusters) { + Clusters_ = std::move(clusters); + return ReadOnlyOriginal(true); + } + + //! Read mirrored topics specified in "Topics" from one cluster. + TSelf& ReadMirrored(const TString& cluster) { + Clusters_ = { cluster }; + return ReadOnlyOriginal(false); + } + + //! Disable Clusters discovery. ReadMirrored/ReadOriginal/ReadAll will not have any effect //! if this option is true. FLUENT_SETTING_DEFAULT(bool, DisableClusterDiscovery, false); - //! Maximum memory usage for read session. - FLUENT_SETTING_DEFAULT(size_t, MaxMemoryUsageBytes, 100 * 1024 * 1024); - - //! Max message time lag. All messages older that now - MaxTimeLag will be ignored. - FLUENT_SETTING_OPTIONAL(TDuration, MaxTimeLag); - - //! Start reading from this timestamp. - FLUENT_SETTING_OPTIONAL(TInstant, StartingMessageTimestamp); - - //! Policy for reconnections. - //! IRetryPolicy::GetDefaultPolicy() if null (not set). - FLUENT_SETTING(IRetryPolicy::TPtr, RetryPolicy); - - //! Event handlers. - //! See description in TEventHandlers class. - FLUENT_SETTING(TEventHandlers, EventHandlers); - + //! Maximum memory usage for read session. + FLUENT_SETTING_DEFAULT(size_t, MaxMemoryUsageBytes, 100 * 1024 * 1024); + + //! Max message time lag. All messages older that now - MaxTimeLag will be ignored. + FLUENT_SETTING_OPTIONAL(TDuration, MaxTimeLag); + + //! Start reading from this timestamp. + FLUENT_SETTING_OPTIONAL(TInstant, StartingMessageTimestamp); + + //! Policy for reconnections. + //! IRetryPolicy::GetDefaultPolicy() if null (not set). + FLUENT_SETTING(IRetryPolicy::TPtr, RetryPolicy); + + //! Event handlers. + //! See description in TEventHandlers class. + FLUENT_SETTING(TEventHandlers, EventHandlers); + //! Decompress messages FLUENT_SETTING_DEFAULT(bool, Decompress, true); - //! Executor for decompression tasks. - //! If not set, default executor will be used. - FLUENT_SETTING(IExecutor::TPtr, DecompressionExecutor); - - //! Counters. - //! If counters are not provided explicitly, - //! they will be created inside session (without link with parent counters). - FLUENT_SETTING(TReaderCounters::TPtr, Counters); - - //! Read only original topic instance, don't read mirrored. - //! - //! It's better to control this setting via ReadAll()/ReadMirrored()/ReadOriginal() helpers. - FLUENT_SETTING_DEFAULT(bool, ReadOnlyOriginal, true); - - //! Read topics from specified clusters. - //! - //! It's better to control this setting via ReadAll()/ReadMirrored()/ReadOriginal() helpers. - //! - //! 1. If ReadOnlyOriginal is true and Clusters are empty read will be done from all topic instances from all clusters. - //! Use ReadAll() function for this variant. - //! 2. If ReadOnlyOriginal is true and Clusters are not empty read will be done from specified clusters. - //! Use ReadOriginal() function for this variant. - //! 3. If ReadOnlyOriginal is false and one cluster is specified read will be done from all topic instances (mirrored and original) in one cluster. - //! Use ReadMirrored() function for this variant. - FLUENT_SETTING_VECTOR(TString, Clusters); + //! Executor for decompression tasks. + //! If not set, default executor will be used. + FLUENT_SETTING(IExecutor::TPtr, DecompressionExecutor); + + //! Counters. + //! If counters are not provided explicitly, + //! they will be created inside session (without link with parent counters). + FLUENT_SETTING(TReaderCounters::TPtr, Counters); + + //! Read only original topic instance, don't read mirrored. + //! + //! It's better to control this setting via ReadAll()/ReadMirrored()/ReadOriginal() helpers. + FLUENT_SETTING_DEFAULT(bool, ReadOnlyOriginal, true); + + //! Read topics from specified clusters. + //! + //! It's better to control this setting via ReadAll()/ReadMirrored()/ReadOriginal() helpers. + //! + //! 1. If ReadOnlyOriginal is true and Clusters are empty read will be done from all topic instances from all clusters. + //! Use ReadAll() function for this variant. + //! 2. If ReadOnlyOriginal is true and Clusters are not empty read will be done from specified clusters. + //! Use ReadOriginal() function for this variant. + //! 3. If ReadOnlyOriginal is false and one cluster is specified read will be done from all topic instances (mirrored and original) in one cluster. + //! Use ReadMirrored() function for this variant. + FLUENT_SETTING_VECTOR(TString, Clusters); FLUENT_SETTING_DEFAULT(TDuration, ConnectTimeout, TDuration::Seconds(30)); -}; - -//! Simple write session. Does not need event handlers. Does not provide Events, ContinuationTokens, write Acks. +}; + +//! Simple write session. Does not need event handlers. Does not provide Events, ContinuationTokens, write Acks. class ISimpleBlockingWriteSession : public TThrRefBase { public: //! Write single message. Blocks for up to blockTimeout if inflight is full or memoryUsage is exceeded; @@ -1361,20 +1361,20 @@ public: //! Future that is set when next event is available. virtual NThreading::TFuture<void> WaitEvent() = 0; - //! Wait and return next event. Use WaitEvent() for non-blocking wait. - virtual TMaybe<TWriteSessionEvent::TEvent> GetEvent(bool block = false) = 0; + //! Wait and return next event. Use WaitEvent() for non-blocking wait. + virtual TMaybe<TWriteSessionEvent::TEvent> GetEvent(bool block = false) = 0; //! Get several events in one call. //! If blocking = false, instantly returns up to maxEventsCount available events. //! If blocking = true, blocks till maxEventsCount events are available. //! If maxEventsCount is unset, write session decides the count to return itself. - virtual TVector<TWriteSessionEvent::TEvent> GetEvents(bool block = false, TMaybe<size_t> maxEventsCount = Nothing()) = 0; + virtual TVector<TWriteSessionEvent::TEvent> GetEvents(bool block = false, TMaybe<size_t> maxEventsCount = Nothing()) = 0; //! Future that is set when initial SeqNo is available. virtual NThreading::TFuture<ui64> GetInitSeqNo() = 0; //! Write single message. - //! continuationToken - a token earlier provided to client with ReadyToAccept event. + //! continuationToken - a token earlier provided to client with ReadyToAccept event. virtual void Write(TContinuationToken&& continuationToken, TStringBuf data, TMaybe<ui64> seqNo = Nothing(), TMaybe<TInstant> createTimestamp = Nothing()) = 0; //! Write single message that is already coded by codec. Codec from settings does not apply to this message. @@ -1394,110 +1394,110 @@ public: virtual ~IWriteSession() = default; }; -class IReadSession { -public: - //! Main reader loop. - //! Wait for next reader event. - virtual NThreading::TFuture<void> WaitEvent() = 0; - - //! Main reader loop. - //! Get read session events. - //! Blocks until event occurs if "block" is set. - //! - //! maxEventsCount: maximum events count in batch. - //! maxByteSize: total size limit of data messages in batch. - //! block: block until event occurs. - //! - //! If maxEventsCount is not specified, - //! read session chooses event batch size automatically. - virtual TVector<TReadSessionEvent::TEvent> GetEvents(bool block = false, TMaybe<size_t> maxEventsCount = Nothing(), size_t maxByteSize = std::numeric_limits<size_t>::max()) = 0; - - //! Get single event. - virtual TMaybe<TReadSessionEvent::TEvent> GetEvent(bool block = false, size_t maxByteSize = std::numeric_limits<size_t>::max()) = 0; - - //! Add topic to session, in other words, start reading new topic. - // virtual void AddTopic(const TTopicReadSettings& topicReadSettings) = 0; // Not implemented yet. - - //! Remove topic from session. - // virtual void RemoveTopic(const TString& path) = 0; // Not implemented yet. - - //! Remove partition groups of topic from session. - // virtual void RemoveTopic(const TString& path, const TVector<ui64>& partitionGruops) = 0; // Not implemented yet. - - //! Stop reading data and process only control events. - //! You might need this function if a receiving side - //! is not ready to process data. - //! Not implemented yet. - virtual void StopReadingData() = 0; - - //! Resume reading data. - //! Not implemented yet. - virtual void ResumeReadingData() = 0; - - //! Close read session. - //! Waits for all commit acknowledgments to arrive. - //! Force close after timeout. - //! This method is blocking. - //! When session is closed, - //! TSessionClosedEvent arrives. - virtual bool Close(TDuration timeout = TDuration::Max()) = 0; - - //! Reader counters with different stats (see TReaderConuters). - virtual TReaderCounters::TPtr GetCounters() const = 0; - - //! Get unique identifier of read session. - virtual TString GetSessionId() const = 0; - - virtual ~IReadSession() = default; -}; - +class IReadSession { +public: + //! Main reader loop. + //! Wait for next reader event. + virtual NThreading::TFuture<void> WaitEvent() = 0; + + //! Main reader loop. + //! Get read session events. + //! Blocks until event occurs if "block" is set. + //! + //! maxEventsCount: maximum events count in batch. + //! maxByteSize: total size limit of data messages in batch. + //! block: block until event occurs. + //! + //! If maxEventsCount is not specified, + //! read session chooses event batch size automatically. + virtual TVector<TReadSessionEvent::TEvent> GetEvents(bool block = false, TMaybe<size_t> maxEventsCount = Nothing(), size_t maxByteSize = std::numeric_limits<size_t>::max()) = 0; + + //! Get single event. + virtual TMaybe<TReadSessionEvent::TEvent> GetEvent(bool block = false, size_t maxByteSize = std::numeric_limits<size_t>::max()) = 0; + + //! Add topic to session, in other words, start reading new topic. + // virtual void AddTopic(const TTopicReadSettings& topicReadSettings) = 0; // Not implemented yet. + + //! Remove topic from session. + // virtual void RemoveTopic(const TString& path) = 0; // Not implemented yet. + + //! Remove partition groups of topic from session. + // virtual void RemoveTopic(const TString& path, const TVector<ui64>& partitionGruops) = 0; // Not implemented yet. + + //! Stop reading data and process only control events. + //! You might need this function if a receiving side + //! is not ready to process data. + //! Not implemented yet. + virtual void StopReadingData() = 0; + + //! Resume reading data. + //! Not implemented yet. + virtual void ResumeReadingData() = 0; + + //! Close read session. + //! Waits for all commit acknowledgments to arrive. + //! Force close after timeout. + //! This method is blocking. + //! When session is closed, + //! TSessionClosedEvent arrives. + virtual bool Close(TDuration timeout = TDuration::Max()) = 0; + + //! Reader counters with different stats (see TReaderConuters). + virtual TReaderCounters::TPtr GetCounters() const = 0; + + //! Get unique identifier of read session. + virtual TString GetSessionId() const = 0; + + virtual ~IReadSession() = default; +}; + struct TPersQueueClientSettings : public TCommonClientSettingsBase<TPersQueueClientSettings> { using TSelf = TPersQueueClientSettings; - //! Default executor for compression tasks. - FLUENT_SETTING_DEFAULT(IExecutor::TPtr, DefaultCompressionExecutor, CreateThreadPoolExecutor(2)); - - //! Default executor for callbacks. - FLUENT_SETTING_DEFAULT(IExecutor::TPtr, DefaultHandlersExecutor, CreateThreadPoolExecutor(1)); + //! Default executor for compression tasks. + FLUENT_SETTING_DEFAULT(IExecutor::TPtr, DefaultCompressionExecutor, CreateThreadPoolExecutor(2)); + + //! Default executor for callbacks. + FLUENT_SETTING_DEFAULT(IExecutor::TPtr, DefaultHandlersExecutor, CreateThreadPoolExecutor(1)); //! Manages cluster discovery mode. FLUENT_SETTING_DEFAULT(EClusterDiscoveryMode, ClusterDiscoveryMode, EClusterDiscoveryMode::On); -}; - -// PersQueue client. -class TPersQueueClient { -public: - class TImpl; - - TPersQueueClient(const TDriver& driver, const TPersQueueClientSettings& settings = TPersQueueClientSettings()); - - // Create a new topic. - TAsyncStatus CreateTopic(const TString& path, const TCreateTopicSettings& = {}); - - // Update a topic. - TAsyncStatus AlterTopic(const TString& path, const TAlterTopicSettings& = {}); - - // Delete a topic. - TAsyncStatus DropTopic(const TString& path, const TDropTopicSettings& = {}); - - // Add topic read rule +}; + +// PersQueue client. +class TPersQueueClient { +public: + class TImpl; + + TPersQueueClient(const TDriver& driver, const TPersQueueClientSettings& settings = TPersQueueClientSettings()); + + // Create a new topic. + TAsyncStatus CreateTopic(const TString& path, const TCreateTopicSettings& = {}); + + // Update a topic. + TAsyncStatus AlterTopic(const TString& path, const TAlterTopicSettings& = {}); + + // Delete a topic. + TAsyncStatus DropTopic(const TString& path, const TDropTopicSettings& = {}); + + // Add topic read rule TAsyncStatus AddReadRule(const TString& path, const TAddReadRuleSettings& = {}); // Remove topic read rule TAsyncStatus RemoveReadRule(const TString& path, const TRemoveReadRuleSettings& = {}); - // Describe settings of topic. - TAsyncDescribeTopicResult DescribeTopic(const TString& path, const TDescribeTopicSettings& = {}); - - //! Create read session. - std::shared_ptr<IReadSession> CreateReadSession(const TReadSessionSettings& settings); - - //! Create write session. + // Describe settings of topic. + TAsyncDescribeTopicResult DescribeTopic(const TString& path, const TDescribeTopicSettings& = {}); + + //! Create read session. + std::shared_ptr<IReadSession> CreateReadSession(const TReadSessionSettings& settings); + + //! Create write session. std::shared_ptr<ISimpleBlockingWriteSession> CreateSimpleBlockingWriteSession(const TWriteSessionSettings& settings); std::shared_ptr<IWriteSession> CreateWriteSession(const TWriteSessionSettings& settings); - -private: - std::shared_ptr<TImpl> Impl_; -}; - -} // namespace NYdb::NPersQueue + +private: + std::shared_ptr<TImpl> Impl_; +}; + +} // namespace NYdb::NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/common_ut.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/common_ut.cpp index f21f49a8592..497f273e3fe 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/common_ut.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/common_ut.cpp @@ -1,23 +1,23 @@ #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/common.h> - -#include <library/cpp/testing/unittest/registar.h> - -using NYdb::NPersQueue::ApplyClusterEndpoint; - -Y_UNIT_TEST_SUITE(ApplyClusterEndpointTest) { - Y_UNIT_TEST(NoPorts) { - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver", "cluster"), "cluster"); - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver", "clus:ter"), "clus:ter"); - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("dri:ver", "cluster"), "cluster"); - } - - Y_UNIT_TEST(PortFromCds) { - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver", "cluster:80"), "cluster:80"); - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver:75", "cluster:80"), "cluster:80"); - } - - Y_UNIT_TEST(PortFromDriver) { - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver:45", "cluster"), "cluster:45"); - UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver:75", "cluster:8A0"), "[cluster:8A0]:75"); - } -} + +#include <library/cpp/testing/unittest/registar.h> + +using NYdb::NPersQueue::ApplyClusterEndpoint; + +Y_UNIT_TEST_SUITE(ApplyClusterEndpointTest) { + Y_UNIT_TEST(NoPorts) { + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver", "cluster"), "cluster"); + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver", "clus:ter"), "clus:ter"); + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("dri:ver", "cluster"), "cluster"); + } + + Y_UNIT_TEST(PortFromCds) { + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver", "cluster:80"), "cluster:80"); + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver:75", "cluster:80"), "cluster:80"); + } + + Y_UNIT_TEST(PortFromDriver) { + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver:45", "cluster"), "cluster:45"); + UNIT_ASSERT_STRINGS_EQUAL(ApplyClusterEndpoint("driver:75", "cluster:8A0"), "[cluster:8A0]:75"); + } +} diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/read_session_ut.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/read_session_ut.cpp index 9b2367e3f32..c923e969332 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/read_session_ut.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/read_session_ut.cpp @@ -1,5 +1,5 @@ #include "ut_utils.h" - + #define INCLUDE_YDB_INTERNAL_H #include <ydb/public/sdk/cpp/client/impl/ydb_internal/logger/log.h> #undef INCLUDE_YDB_INTERNAL_H @@ -7,699 +7,699 @@ #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/impl/read_session.h> -#include <library/cpp/streams/zstd/zstd.h> -#include <library/cpp/testing/gmock_in_unittest/gmock.h> -#include <library/cpp/testing/unittest/registar.h> - -#include <util/stream/zlib.h> - -#include <future> - -using namespace NYdb; -using namespace NYdb::NPersQueue; -using IExecutor = NYdb::NPersQueue::IExecutor; -using namespace ::testing; // Google mock. - -#define UNIT_ASSERT_EVENT_TYPE(event, type) \ - UNIT_ASSERT_C( \ - std::holds_alternative<type>(event), \ - "Real event got: " << DebugString(event)) \ - /**/ - -#define UNIT_ASSERT_NOT_EVENT_TYPE(event, type) \ - UNIT_ASSERT_C( \ - !std::holds_alternative<type>(event), \ - "Real event got: " << DebugString(event)) \ - /**/ - -TString Compress(const TString& sourceData, Ydb::PersQueue::V1::Codec codec = Ydb::PersQueue::V1::CODEC_GZIP) { - if (codec == Ydb::PersQueue::V1::CODEC_RAW || codec == Ydb::PersQueue::V1::CODEC_UNSPECIFIED) { - return sourceData; - } - - TString compressed; - TStringOutput out(compressed); - THolder<IOutputStream> coder; - switch (codec) { - case Ydb::PersQueue::V1::CODEC_GZIP: - coder = MakeHolder<TZLibCompress>(&out, ZLib::GZip); - break; - case Ydb::PersQueue::V1::CODEC_LZOP: - throw yexception() << "LZO codec is disabled"; - break; - case Ydb::PersQueue::V1::CODEC_ZSTD: - coder = MakeHolder<TZstdCompress>(&out); - break; - default: - UNIT_ASSERT(false); - } - coder->Write(sourceData); - coder->Finish(); - return compressed; -} - -template <class TRequest, class TResponse> -struct TMockProcessorFactory : public ISessionConnectionProcessorFactory<TRequest, TResponse> { - using IFactory = ISessionConnectionProcessorFactory<TRequest, TResponse>; - - virtual ~TMockProcessorFactory() { - Wait(); - } - - void CreateProcessor( // ISessionConnectionProcessorFactory method. - typename IFactory::TConnectedCallback callback, - const TRpcRequestSettings& requestSettings, - NGrpc::IQueueClientContextPtr connectContext, - TDuration connectTimeout, - NGrpc::IQueueClientContextPtr connectTimeoutContext, - typename IFactory::TConnectTimeoutCallback connectTimeoutCallback, - TDuration connectDelay, - NGrpc::IQueueClientContextPtr connectDelayOperationContext) override - { - UNIT_ASSERT_C(!ConnectedCallback, "Only one connect at a time is expected"); - UNIT_ASSERT_C(!ConnectTimeoutCallback, "Only one connect at a time is expected"); - ConnectedCallback = callback; - ConnectTimeoutCallback = connectTimeoutCallback; - - Y_UNUSED(requestSettings); - UNIT_ASSERT(connectContext); - UNIT_ASSERT(connectTimeout); - UNIT_ASSERT(connectTimeoutContext); - UNIT_ASSERT(connectTimeoutCallback); - UNIT_ASSERT(!connectDelay || connectDelayOperationContext); - - OnCreateProcessor(++CreateCallsCount); - } - +#include <library/cpp/streams/zstd/zstd.h> +#include <library/cpp/testing/gmock_in_unittest/gmock.h> +#include <library/cpp/testing/unittest/registar.h> + +#include <util/stream/zlib.h> + +#include <future> + +using namespace NYdb; +using namespace NYdb::NPersQueue; +using IExecutor = NYdb::NPersQueue::IExecutor; +using namespace ::testing; // Google mock. + +#define UNIT_ASSERT_EVENT_TYPE(event, type) \ + UNIT_ASSERT_C( \ + std::holds_alternative<type>(event), \ + "Real event got: " << DebugString(event)) \ + /**/ + +#define UNIT_ASSERT_NOT_EVENT_TYPE(event, type) \ + UNIT_ASSERT_C( \ + !std::holds_alternative<type>(event), \ + "Real event got: " << DebugString(event)) \ + /**/ + +TString Compress(const TString& sourceData, Ydb::PersQueue::V1::Codec codec = Ydb::PersQueue::V1::CODEC_GZIP) { + if (codec == Ydb::PersQueue::V1::CODEC_RAW || codec == Ydb::PersQueue::V1::CODEC_UNSPECIFIED) { + return sourceData; + } + + TString compressed; + TStringOutput out(compressed); + THolder<IOutputStream> coder; + switch (codec) { + case Ydb::PersQueue::V1::CODEC_GZIP: + coder = MakeHolder<TZLibCompress>(&out, ZLib::GZip); + break; + case Ydb::PersQueue::V1::CODEC_LZOP: + throw yexception() << "LZO codec is disabled"; + break; + case Ydb::PersQueue::V1::CODEC_ZSTD: + coder = MakeHolder<TZstdCompress>(&out); + break; + default: + UNIT_ASSERT(false); + } + coder->Write(sourceData); + coder->Finish(); + return compressed; +} + +template <class TRequest, class TResponse> +struct TMockProcessorFactory : public ISessionConnectionProcessorFactory<TRequest, TResponse> { + using IFactory = ISessionConnectionProcessorFactory<TRequest, TResponse>; + + virtual ~TMockProcessorFactory() { + Wait(); + } + + void CreateProcessor( // ISessionConnectionProcessorFactory method. + typename IFactory::TConnectedCallback callback, + const TRpcRequestSettings& requestSettings, + NGrpc::IQueueClientContextPtr connectContext, + TDuration connectTimeout, + NGrpc::IQueueClientContextPtr connectTimeoutContext, + typename IFactory::TConnectTimeoutCallback connectTimeoutCallback, + TDuration connectDelay, + NGrpc::IQueueClientContextPtr connectDelayOperationContext) override + { + UNIT_ASSERT_C(!ConnectedCallback, "Only one connect at a time is expected"); + UNIT_ASSERT_C(!ConnectTimeoutCallback, "Only one connect at a time is expected"); + ConnectedCallback = callback; + ConnectTimeoutCallback = connectTimeoutCallback; + + Y_UNUSED(requestSettings); + UNIT_ASSERT(connectContext); + UNIT_ASSERT(connectTimeout); + UNIT_ASSERT(connectTimeoutContext); + UNIT_ASSERT(connectTimeoutCallback); + UNIT_ASSERT(!connectDelay || connectDelayOperationContext); + + OnCreateProcessor(++CreateCallsCount); + } + MOCK_METHOD(void, ValidateConnectTimeout, (TDuration), ()); - - // Handler is called in CreateProcessor() method after parameter validation. + + // Handler is called in CreateProcessor() method after parameter validation. MOCK_METHOD(void, OnCreateProcessor, (size_t callNumber)); // 1-based - - // Actions to use in OnCreateProcessor handler: - void CreateProcessor(typename IFactory::IProcessor::TPtr processor) { // Success. - UNIT_ASSERT(ConnectedCallback); - auto cb = std::move(ConnectedCallback); - ConnectedCallback = nullptr; - ConnectTimeoutCallback = nullptr; - with_lock (Lock) { - CallbackFutures.push(std::async(std::launch::async, std::move(cb), TPlainStatus(), processor)); - } - } - - void FailCreation(EStatus status = EStatus::INTERNAL_ERROR, const TString& message = {}) { // Fail. - UNIT_ASSERT(ConnectedCallback); - auto cb = std::move(ConnectedCallback); - ConnectedCallback = nullptr; - ConnectTimeoutCallback = nullptr; - with_lock (Lock) { - CallbackFutures.push(std::async(std::launch::async, std::move(cb), TPlainStatus(status, message), nullptr)); - } - } - - void Timeout() { // Timeout. - UNIT_ASSERT(ConnectTimeoutCallback); - auto cb = std::move(ConnectTimeoutCallback); - ConnectedCallback = nullptr; - ConnectTimeoutCallback = nullptr; - with_lock (Lock) { - CallbackFutures.push(std::async(std::launch::async, std::move(cb), true)); - } - } - - void CreateAndThenTimeout(typename IFactory::IProcessor::TPtr processor) { - UNIT_ASSERT(ConnectedCallback); - UNIT_ASSERT(ConnectTimeoutCallback); - auto cb2 = [cbt = std::move(ConnectTimeoutCallback), cb = std::move(ConnectedCallback), processor]() mutable { - cb(TPlainStatus(), std::move(processor)); - cbt(true); - }; - ConnectedCallback = nullptr; - ConnectTimeoutCallback = nullptr; - with_lock (Lock) { - CallbackFutures.push(std::async(std::launch::async, std::move(cb2))); - } - } - - void FailAndThenTimeout(EStatus status = EStatus::INTERNAL_ERROR, const TString& message = {}) { - UNIT_ASSERT(ConnectedCallback); - UNIT_ASSERT(ConnectTimeoutCallback); - auto cb2 = [cbt = std::move(ConnectTimeoutCallback), cb = std::move(ConnectedCallback), status, message]() mutable { - cb(TPlainStatus(status, message), nullptr); - cbt(true); - }; - ConnectedCallback = nullptr; - ConnectTimeoutCallback = nullptr; - with_lock (Lock) { - CallbackFutures.push(std::async(std::launch::async, std::move(cb2))); - } - } - - void TimeoutAndThenCreate(typename IFactory::IProcessor::TPtr processor) { - UNIT_ASSERT(ConnectedCallback); - UNIT_ASSERT(ConnectTimeoutCallback); - auto cb2 = [cbt = std::move(ConnectTimeoutCallback), cb = std::move(ConnectedCallback), processor]() mutable { - cbt(true); - cb(TPlainStatus(), std::move(processor)); - }; - ConnectedCallback = nullptr; - ConnectTimeoutCallback = nullptr; - with_lock (Lock) { - CallbackFutures.push(std::async(std::launch::async, std::move(cb2))); - } - } - - void Wait() { - std::queue<std::future<void>> futuresQueue; - with_lock (Lock) { - CallbackFutures.swap(futuresQueue); - } - while (!futuresQueue.empty()) { - futuresQueue.front().wait(); - futuresQueue.pop(); - } - } - - void Validate() { - } - - std::atomic<size_t> CreateCallsCount = 0; - -private: - TAdaptiveLock Lock; - typename IFactory::TConnectedCallback ConnectedCallback; - typename IFactory::TConnectTimeoutCallback ConnectTimeoutCallback; - std::queue<std::future<void>> CallbackFutures; -}; - -struct TMockReadSessionProcessor : public TMockProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>::IProcessor { - // Request to read. - struct TClientReadInfo { - TReadCallback Callback; - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* Dst; - - operator bool() const { - return Dst != nullptr; - } - }; - - // Response from server. - struct TServerReadInfo { - NGrpc::TGrpcStatus Status; - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage Response; - - TServerReadInfo& Failure(grpc::StatusCode status = grpc::StatusCode::UNAVAILABLE, const TString& message = {}, bool internal = false) { - Status.GRpcStatusCode = status; - Status.InternalError = internal; - Status.Msg = message; - return *this; - } - - TServerReadInfo& InitResponse(const TString& sessionId) { - Response.mutable_init_response()->set_session_id(sessionId); - return *this; - } - - TServerReadInfo& CreatePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1, ui64 readOffset = 0, ui64 endOffset = 0) { - auto* req = Response.mutable_assigned(); - req->mutable_topic()->set_path(topic); - req->set_cluster(cluster); - req->set_partition(partition); - req->set_assign_id(assignId); - req->set_read_offset(readOffset); - req->set_end_offset(endOffset); - return *this; - } - - TServerReadInfo& ReleasePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1, ui64 commitOffset = 0, bool forceful = false) { - auto* req = Response.mutable_release(); - req->mutable_topic()->set_path(topic); - req->set_cluster(cluster); - req->set_partition(partition); - req->set_assign_id(assignId); - req->set_commit_offset(commitOffset); - req->set_forceful_release(forceful); - return *this; - } - - TServerReadInfo& ForcefulReleasePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1, ui64 commitOffset = 0) { - return ReleasePartitionStream(topic, cluster, partition, assignId, commitOffset, true); - } - - // Data helpers. - TServerReadInfo& PartitionData(const ui64 cookie, const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1) { - auto* req = Response.mutable_data_batch()->add_partition_data(); - req->mutable_topic()->set_path(topic); - req->set_cluster(cluster); - req->set_partition(partition); - auto* cookieMsg = req->mutable_cookie(); - cookieMsg->set_assign_id(assignId); - cookieMsg->set_partition_cookie(cookie); - return *this; - } - - TServerReadInfo& Batch(const TString& sourceId, TInstant writeTimestamp = TInstant::MilliSeconds(42), const TString& ip = "::1", const std::vector<std::pair<TString, TString>>& extraFields = {}) { - const int lastPartitionData = Response.data_batch().partition_data_size(); - UNIT_ASSERT(lastPartitionData > 0); - auto* partitionData = Response.mutable_data_batch()->mutable_partition_data(lastPartitionData - 1); - auto* req = partitionData->add_batches(); - req->set_source_id(sourceId); - req->set_write_timestamp_ms(writeTimestamp.MilliSeconds()); - req->set_ip(ip); - for (auto&& [k, v] : extraFields) { - auto* kv = req->add_extra_fields(); - kv->set_key(k); - kv->set_value(v); - } - return *this; - } - - TServerReadInfo& Message(ui64 offset, const TString& data, Ydb::PersQueue::V1::Codec codec, ui64 seqNo = 1, TInstant createTimestamp = TInstant::MilliSeconds(42)) { - const int lastPartitionData = Response.data_batch().partition_data_size(); - UNIT_ASSERT(lastPartitionData > 0); - auto* partitionData = Response.mutable_data_batch()->mutable_partition_data(lastPartitionData - 1); - const int lastBatch = partitionData->batches_size(); - UNIT_ASSERT(lastBatch > 0); - auto* batch = partitionData->mutable_batches(lastBatch - 1); - auto* req = batch->add_message_data(); - req->set_offset(offset); - req->set_seq_no(seqNo); - req->set_create_timestamp_ms(createTimestamp.MilliSeconds()); - req->set_data(data); - req->set_codec(codec); - return *this; - } - - TServerReadInfo& CompressMessage(ui64 offset, const TString& sourceData, Ydb::PersQueue::V1::Codec codec = Ydb::PersQueue::V1::CODEC_GZIP, ui64 seqNo = 1, TInstant createTimestamp = TInstant::MilliSeconds(42)) { - return Message(offset, Compress(sourceData, codec), codec, seqNo, createTimestamp); - } - + + // Actions to use in OnCreateProcessor handler: + void CreateProcessor(typename IFactory::IProcessor::TPtr processor) { // Success. + UNIT_ASSERT(ConnectedCallback); + auto cb = std::move(ConnectedCallback); + ConnectedCallback = nullptr; + ConnectTimeoutCallback = nullptr; + with_lock (Lock) { + CallbackFutures.push(std::async(std::launch::async, std::move(cb), TPlainStatus(), processor)); + } + } + + void FailCreation(EStatus status = EStatus::INTERNAL_ERROR, const TString& message = {}) { // Fail. + UNIT_ASSERT(ConnectedCallback); + auto cb = std::move(ConnectedCallback); + ConnectedCallback = nullptr; + ConnectTimeoutCallback = nullptr; + with_lock (Lock) { + CallbackFutures.push(std::async(std::launch::async, std::move(cb), TPlainStatus(status, message), nullptr)); + } + } + + void Timeout() { // Timeout. + UNIT_ASSERT(ConnectTimeoutCallback); + auto cb = std::move(ConnectTimeoutCallback); + ConnectedCallback = nullptr; + ConnectTimeoutCallback = nullptr; + with_lock (Lock) { + CallbackFutures.push(std::async(std::launch::async, std::move(cb), true)); + } + } + + void CreateAndThenTimeout(typename IFactory::IProcessor::TPtr processor) { + UNIT_ASSERT(ConnectedCallback); + UNIT_ASSERT(ConnectTimeoutCallback); + auto cb2 = [cbt = std::move(ConnectTimeoutCallback), cb = std::move(ConnectedCallback), processor]() mutable { + cb(TPlainStatus(), std::move(processor)); + cbt(true); + }; + ConnectedCallback = nullptr; + ConnectTimeoutCallback = nullptr; + with_lock (Lock) { + CallbackFutures.push(std::async(std::launch::async, std::move(cb2))); + } + } + + void FailAndThenTimeout(EStatus status = EStatus::INTERNAL_ERROR, const TString& message = {}) { + UNIT_ASSERT(ConnectedCallback); + UNIT_ASSERT(ConnectTimeoutCallback); + auto cb2 = [cbt = std::move(ConnectTimeoutCallback), cb = std::move(ConnectedCallback), status, message]() mutable { + cb(TPlainStatus(status, message), nullptr); + cbt(true); + }; + ConnectedCallback = nullptr; + ConnectTimeoutCallback = nullptr; + with_lock (Lock) { + CallbackFutures.push(std::async(std::launch::async, std::move(cb2))); + } + } + + void TimeoutAndThenCreate(typename IFactory::IProcessor::TPtr processor) { + UNIT_ASSERT(ConnectedCallback); + UNIT_ASSERT(ConnectTimeoutCallback); + auto cb2 = [cbt = std::move(ConnectTimeoutCallback), cb = std::move(ConnectedCallback), processor]() mutable { + cbt(true); + cb(TPlainStatus(), std::move(processor)); + }; + ConnectedCallback = nullptr; + ConnectTimeoutCallback = nullptr; + with_lock (Lock) { + CallbackFutures.push(std::async(std::launch::async, std::move(cb2))); + } + } + + void Wait() { + std::queue<std::future<void>> futuresQueue; + with_lock (Lock) { + CallbackFutures.swap(futuresQueue); + } + while (!futuresQueue.empty()) { + futuresQueue.front().wait(); + futuresQueue.pop(); + } + } + + void Validate() { + } + + std::atomic<size_t> CreateCallsCount = 0; + +private: + TAdaptiveLock Lock; + typename IFactory::TConnectedCallback ConnectedCallback; + typename IFactory::TConnectTimeoutCallback ConnectTimeoutCallback; + std::queue<std::future<void>> CallbackFutures; +}; + +struct TMockReadSessionProcessor : public TMockProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>::IProcessor { + // Request to read. + struct TClientReadInfo { + TReadCallback Callback; + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* Dst; + + operator bool() const { + return Dst != nullptr; + } + }; + + // Response from server. + struct TServerReadInfo { + NGrpc::TGrpcStatus Status; + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage Response; + + TServerReadInfo& Failure(grpc::StatusCode status = grpc::StatusCode::UNAVAILABLE, const TString& message = {}, bool internal = false) { + Status.GRpcStatusCode = status; + Status.InternalError = internal; + Status.Msg = message; + return *this; + } + + TServerReadInfo& InitResponse(const TString& sessionId) { + Response.mutable_init_response()->set_session_id(sessionId); + return *this; + } + + TServerReadInfo& CreatePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1, ui64 readOffset = 0, ui64 endOffset = 0) { + auto* req = Response.mutable_assigned(); + req->mutable_topic()->set_path(topic); + req->set_cluster(cluster); + req->set_partition(partition); + req->set_assign_id(assignId); + req->set_read_offset(readOffset); + req->set_end_offset(endOffset); + return *this; + } + + TServerReadInfo& ReleasePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1, ui64 commitOffset = 0, bool forceful = false) { + auto* req = Response.mutable_release(); + req->mutable_topic()->set_path(topic); + req->set_cluster(cluster); + req->set_partition(partition); + req->set_assign_id(assignId); + req->set_commit_offset(commitOffset); + req->set_forceful_release(forceful); + return *this; + } + + TServerReadInfo& ForcefulReleasePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1, ui64 commitOffset = 0) { + return ReleasePartitionStream(topic, cluster, partition, assignId, commitOffset, true); + } + + // Data helpers. + TServerReadInfo& PartitionData(const ui64 cookie, const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1) { + auto* req = Response.mutable_data_batch()->add_partition_data(); + req->mutable_topic()->set_path(topic); + req->set_cluster(cluster); + req->set_partition(partition); + auto* cookieMsg = req->mutable_cookie(); + cookieMsg->set_assign_id(assignId); + cookieMsg->set_partition_cookie(cookie); + return *this; + } + + TServerReadInfo& Batch(const TString& sourceId, TInstant writeTimestamp = TInstant::MilliSeconds(42), const TString& ip = "::1", const std::vector<std::pair<TString, TString>>& extraFields = {}) { + const int lastPartitionData = Response.data_batch().partition_data_size(); + UNIT_ASSERT(lastPartitionData > 0); + auto* partitionData = Response.mutable_data_batch()->mutable_partition_data(lastPartitionData - 1); + auto* req = partitionData->add_batches(); + req->set_source_id(sourceId); + req->set_write_timestamp_ms(writeTimestamp.MilliSeconds()); + req->set_ip(ip); + for (auto&& [k, v] : extraFields) { + auto* kv = req->add_extra_fields(); + kv->set_key(k); + kv->set_value(v); + } + return *this; + } + + TServerReadInfo& Message(ui64 offset, const TString& data, Ydb::PersQueue::V1::Codec codec, ui64 seqNo = 1, TInstant createTimestamp = TInstant::MilliSeconds(42)) { + const int lastPartitionData = Response.data_batch().partition_data_size(); + UNIT_ASSERT(lastPartitionData > 0); + auto* partitionData = Response.mutable_data_batch()->mutable_partition_data(lastPartitionData - 1); + const int lastBatch = partitionData->batches_size(); + UNIT_ASSERT(lastBatch > 0); + auto* batch = partitionData->mutable_batches(lastBatch - 1); + auto* req = batch->add_message_data(); + req->set_offset(offset); + req->set_seq_no(seqNo); + req->set_create_timestamp_ms(createTimestamp.MilliSeconds()); + req->set_data(data); + req->set_codec(codec); + return *this; + } + + TServerReadInfo& CompressMessage(ui64 offset, const TString& sourceData, Ydb::PersQueue::V1::Codec codec = Ydb::PersQueue::V1::CODEC_GZIP, ui64 seqNo = 1, TInstant createTimestamp = TInstant::MilliSeconds(42)) { + return Message(offset, Compress(sourceData, codec), codec, seqNo, createTimestamp); + } + TServerReadInfo& BrokenCompressMessage(ui64 offset, const TString& sourceData, Ydb::PersQueue::V1::Codec codec = Ydb::PersQueue::V1::CODEC_GZIP, ui64 seqNo = 1, TInstant createTimestamp = TInstant::MilliSeconds(42)) { return Message(offset, "broken_header_" + Compress(sourceData, codec), codec, seqNo, createTimestamp); } - TServerReadInfo& PartitionStreamStatus(ui64 committedOffset, ui64 endOffset, TInstant writeWatermark, const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1) { - auto* req = Response.mutable_partition_status(); - req->mutable_topic()->set_path(topic); - req->set_cluster(cluster); - req->set_partition(partition); - req->set_assign_id(assignId); - req->set_committed_offset(committedOffset); - req->set_end_offset(endOffset); - req->set_write_watermark_ms(writeWatermark.MilliSeconds()); - return *this; - } - - TServerReadInfo& CommitAcknowledgement(ui64 cookie, ui64 assignId = 1) { - auto* req = Response.mutable_committed(); - auto* cookieInfo = req->add_cookies(); - cookieInfo->set_partition_cookie(cookie); - cookieInfo->set_assign_id(assignId); - return *this; - } - }; - - ~TMockReadSessionProcessor() { - Wait(); - } - - void Cancel() override { - } - + TServerReadInfo& PartitionStreamStatus(ui64 committedOffset, ui64 endOffset, TInstant writeWatermark, const TString& topic = "TestTopic", const TString& cluster = "TestCluster", const ui64 partition = 1, const ui64 assignId = 1) { + auto* req = Response.mutable_partition_status(); + req->mutable_topic()->set_path(topic); + req->set_cluster(cluster); + req->set_partition(partition); + req->set_assign_id(assignId); + req->set_committed_offset(committedOffset); + req->set_end_offset(endOffset); + req->set_write_watermark_ms(writeWatermark.MilliSeconds()); + return *this; + } + + TServerReadInfo& CommitAcknowledgement(ui64 cookie, ui64 assignId = 1) { + auto* req = Response.mutable_committed(); + auto* cookieInfo = req->add_cookies(); + cookieInfo->set_partition_cookie(cookie); + cookieInfo->set_assign_id(assignId); + return *this; + } + }; + + ~TMockReadSessionProcessor() { + Wait(); + } + + void Cancel() override { + } + void ReadInitialMetadata(std::unordered_multimap<TString, TString>* metadata, TReadCallback callback) override { - Y_UNUSED(metadata); - Y_UNUSED(callback); - UNIT_ASSERT_C(false, "This method is not expected to be called"); - } - - void Finish(TReadCallback callback) override { - Y_UNUSED(callback); - UNIT_ASSERT_C(false, "This method is not expected to be called"); - } - - void AddFinishedCallback(TReadCallback callback) override { - Y_UNUSED(callback); - UNIT_ASSERT_C(false, "This method is not expected to be called"); - } - - void Read(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* response, TReadCallback callback) override { - with_lock (Lock) { - UNIT_ASSERT(!ActiveRead); - ActiveRead.Callback = std::move(callback); - ActiveRead.Dst = response; - if (!ReadResponses.empty()) { - StartProcessReadImpl(); - } - } - } - - void Write(Ydb::PersQueue::V1::MigrationStreamingReadClientMessage&& request, TWriteCallback callback) override { - UNIT_ASSERT(!callback); // Read session doesn't set callbacks. - switch (request.request_case()) { - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kInitRequest: - OnInitRequest(request.init_request()); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kRead: - OnReadRequest(request.read()); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kStartRead: - OnStartReadRequest(request.start_read()); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kCommit: - OnCommitRequest(request.commit()); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kReleased: - OnReleasedRequest(request.released()); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kStatus: - OnStatusRequest(request.status()); - break; - case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::REQUEST_NOT_SET: - UNIT_ASSERT_C(false, "Invalid request"); - break; - } - } - + Y_UNUSED(metadata); + Y_UNUSED(callback); + UNIT_ASSERT_C(false, "This method is not expected to be called"); + } + + void Finish(TReadCallback callback) override { + Y_UNUSED(callback); + UNIT_ASSERT_C(false, "This method is not expected to be called"); + } + + void AddFinishedCallback(TReadCallback callback) override { + Y_UNUSED(callback); + UNIT_ASSERT_C(false, "This method is not expected to be called"); + } + + void Read(Ydb::PersQueue::V1::MigrationStreamingReadServerMessage* response, TReadCallback callback) override { + with_lock (Lock) { + UNIT_ASSERT(!ActiveRead); + ActiveRead.Callback = std::move(callback); + ActiveRead.Dst = response; + if (!ReadResponses.empty()) { + StartProcessReadImpl(); + } + } + } + + void Write(Ydb::PersQueue::V1::MigrationStreamingReadClientMessage&& request, TWriteCallback callback) override { + UNIT_ASSERT(!callback); // Read session doesn't set callbacks. + switch (request.request_case()) { + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kInitRequest: + OnInitRequest(request.init_request()); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kRead: + OnReadRequest(request.read()); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kStartRead: + OnStartReadRequest(request.start_read()); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kCommit: + OnCommitRequest(request.commit()); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kReleased: + OnReleasedRequest(request.released()); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::kStatus: + OnStatusRequest(request.status()); + break; + case Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::REQUEST_NOT_SET: + UNIT_ASSERT_C(false, "Invalid request"); + break; + } + } + MOCK_METHOD(void, OnInitRequest, (const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::InitRequest&), ()); MOCK_METHOD(void, OnReadRequest, (const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Read&), ()); MOCK_METHOD(void, OnStartReadRequest, (const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::StartRead&), ()); MOCK_METHOD(void, OnCommitRequest, (const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Commit&), ()); MOCK_METHOD(void, OnReleasedRequest, (const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Released&), ()); MOCK_METHOD(void, OnStatusRequest, (const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Status&), ()); - - void Wait() { - std::queue<std::future<void>> callbackFutures; - with_lock (Lock) { - CallbackFutures.swap(callbackFutures); - } - - while (!callbackFutures.empty()) { - callbackFutures.front().wait(); - callbackFutures.pop(); - } - } - - void Validate() { - with_lock (Lock) { - UNIT_ASSERT(ReadResponses.empty()); - } - } - - void ProcessRead() { - NGrpc::TGrpcStatus status; - TReadCallback callback; - with_lock (Lock) { - *ActiveRead.Dst = ReadResponses.front().Response; - ActiveRead.Dst = nullptr; - status = std::move(ReadResponses.front().Status); - ReadResponses.pop(); - callback = std::move(ActiveRead.Callback); - } - callback(std::move(status)); - } - - void StartProcessReadImpl() { - CallbackFutures.push(std::async(std::launch::async, &TMockReadSessionProcessor::ProcessRead, this)); - } - - void AddServerResponse(TServerReadInfo result) { - bool hasActiveRead = false; - with_lock (Lock) { - ReadResponses.emplace(std::move(result)); - if (ActiveRead) { - hasActiveRead = true; - } - } - if (hasActiveRead) { - ProcessRead(); - } - } - - TAdaptiveLock Lock; - TClientReadInfo ActiveRead; - std::queue<TServerReadInfo> ReadResponses; - std::queue<std::future<void>> CallbackFutures; -}; - -// Class for testing read session impl -// with mocks. -class TReadSessionImplTestSetup { -public: - // Types - using IReadSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; - using TMockProcessorFactory = ::TMockProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; - - struct TMockErrorHandler : public IErrorHandler { + + void Wait() { + std::queue<std::future<void>> callbackFutures; + with_lock (Lock) { + CallbackFutures.swap(callbackFutures); + } + + while (!callbackFutures.empty()) { + callbackFutures.front().wait(); + callbackFutures.pop(); + } + } + + void Validate() { + with_lock (Lock) { + UNIT_ASSERT(ReadResponses.empty()); + } + } + + void ProcessRead() { + NGrpc::TGrpcStatus status; + TReadCallback callback; + with_lock (Lock) { + *ActiveRead.Dst = ReadResponses.front().Response; + ActiveRead.Dst = nullptr; + status = std::move(ReadResponses.front().Status); + ReadResponses.pop(); + callback = std::move(ActiveRead.Callback); + } + callback(std::move(status)); + } + + void StartProcessReadImpl() { + CallbackFutures.push(std::async(std::launch::async, &TMockReadSessionProcessor::ProcessRead, this)); + } + + void AddServerResponse(TServerReadInfo result) { + bool hasActiveRead = false; + with_lock (Lock) { + ReadResponses.emplace(std::move(result)); + if (ActiveRead) { + hasActiveRead = true; + } + } + if (hasActiveRead) { + ProcessRead(); + } + } + + TAdaptiveLock Lock; + TClientReadInfo ActiveRead; + std::queue<TServerReadInfo> ReadResponses; + std::queue<std::future<void>> CallbackFutures; +}; + +// Class for testing read session impl +// with mocks. +class TReadSessionImplTestSetup { +public: + // Types + using IReadSessionConnectionProcessorFactory = ISessionConnectionProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; + using TMockProcessorFactory = ::TMockProcessorFactory<Ydb::PersQueue::V1::MigrationStreamingReadClientMessage, Ydb::PersQueue::V1::MigrationStreamingReadServerMessage>; + + struct TMockErrorHandler : public IErrorHandler { MOCK_METHOD(void, AbortSession, (TSessionClosedEvent&& closeEvent), (override)); - }; - - struct TFakeContext : public NGrpc::IQueueClientContext { - IQueueClientContextPtr CreateContext() override { - return std::make_shared<TFakeContext>(); - } - - grpc::CompletionQueue* CompletionQueue() override { - UNIT_ASSERT_C(false, "This method is not expected to be called"); - return nullptr; - } - - bool IsCancelled() const override { - UNIT_ASSERT_C(false, "This method is not expected to be called"); - return false; - } - - bool Cancel() override { - return false; - } - - void SubscribeCancel(std::function<void()>) override { - UNIT_ASSERT_C(false, "This method is not expected to be called"); - } - }; - - // Methods - TReadSessionImplTestSetup(); - ~TReadSessionImplTestSetup() noexcept(false); // Performs extra validation and UNIT_ASSERTs - - TSingleClusterReadSessionImpl* GetSession(); - - std::shared_ptr<TReadSessionEventsQueue> GetEventsQueue(); - ::IExecutor::TPtr GetDefaultExecutor(); - + }; + + struct TFakeContext : public NGrpc::IQueueClientContext { + IQueueClientContextPtr CreateContext() override { + return std::make_shared<TFakeContext>(); + } + + grpc::CompletionQueue* CompletionQueue() override { + UNIT_ASSERT_C(false, "This method is not expected to be called"); + return nullptr; + } + + bool IsCancelled() const override { + UNIT_ASSERT_C(false, "This method is not expected to be called"); + return false; + } + + bool Cancel() override { + return false; + } + + void SubscribeCancel(std::function<void()>) override { + UNIT_ASSERT_C(false, "This method is not expected to be called"); + } + }; + + // Methods + TReadSessionImplTestSetup(); + ~TReadSessionImplTestSetup() noexcept(false); // Performs extra validation and UNIT_ASSERTs + + TSingleClusterReadSessionImpl* GetSession(); + + std::shared_ptr<TReadSessionEventsQueue> GetEventsQueue(); + ::IExecutor::TPtr GetDefaultExecutor(); + void SuccessfulInit(bool flag = true); - TPartitionStream::TPtr CreatePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", ui64 partition = 1, ui64 assignId = 1); - - // Assertions. - void AssertNoEvents(); - -public: - // Members - TReadSessionSettings Settings; - TString ClusterName = "cluster"; - TLog Log = CreateLogBackend("cerr"); - std::shared_ptr<TReadSessionEventsQueue> EventsQueue; - TIntrusivePtr<testing::StrictMock<TMockErrorHandler>> MockErrorHandler = MakeIntrusive<testing::StrictMock<TMockErrorHandler>>(); - std::shared_ptr<TFakeContext> FakeContext = std::make_shared<TFakeContext>(); - std::shared_ptr<TMockProcessorFactory> MockProcessorFactory = std::make_shared<TMockProcessorFactory>(); - TIntrusivePtr<TMockReadSessionProcessor> MockProcessor = MakeIntrusive<TMockReadSessionProcessor>(); - ui64 PartitionIdStart = 1; - ui64 PartitionIdStep = 1; - TSingleClusterReadSessionImpl::TPtr Session; - std::shared_ptr<TThreadPool> ThreadPool; - ::IExecutor::TPtr DefaultExecutor; -}; - -class TReorderingExecutor : public ::IExecutor { -public: - TReorderingExecutor(size_t cycleCount = 2, ::IExecutor::TPtr executor = CreateThreadPoolExecutor(1)) - : CycleCount(cycleCount) - , Executor(std::move(executor)) - { - } - - bool IsAsync() const override { - return Executor->IsAsync(); - } - - void Post(TFunction&& f) override { - with_lock (Lock) { - Cerr << "Post function" << Endl; - ++TasksAdded; - if (Functions.empty()) { - Functions.reserve(CycleCount); - } - Functions.emplace_back(std::move(f)); - if (Functions.size() == CycleCount) { - Executor->Post([functions = std::move(Functions)]() { - for (auto i = functions.rbegin(), end = functions.rend(); i != end; ++i) { - (*i)(); - } - }); - Functions.clear(); - } - } - } - - void DoStart() override { - Executor->Start(); - } - - size_t GetTasksAdded() { - with_lock (Lock) { - return TasksAdded; - } - } - -private: - TAdaptiveLock Lock; - size_t CycleCount; - size_t TasksAdded = 0; - ::IExecutor::TPtr Executor; - std::vector<TFunction> Functions; -}; - -class TSynchronousExecutor : public ::IExecutor { - bool IsAsync() const override { - return false; - } - - void Post(TFunction&& f) override { - f(); - } - - void DoStart() override { - } -}; - -extern TLogFormatter NYdb::GetPrefixLogFormatter(const TString& prefix); // Defined in ydb.cpp. - -TReadSessionImplTestSetup::TReadSessionImplTestSetup() { - Settings - .AppendTopics({"TestTopic"}) - .ConsumerName("TestConsumer") - .RetryPolicy(IRetryPolicy::GetFixedIntervalPolicy(TDuration::MilliSeconds(10))) - .Counters(MakeIntrusive<NYdb::NPersQueue::TReaderCounters>(MakeIntrusive<NMonitoring::TDynamicCounters>())); - - Log.SetFormatter(GetPrefixLogFormatter("")); + TPartitionStream::TPtr CreatePartitionStream(const TString& topic = "TestTopic", const TString& cluster = "TestCluster", ui64 partition = 1, ui64 assignId = 1); + + // Assertions. + void AssertNoEvents(); + +public: + // Members + TReadSessionSettings Settings; + TString ClusterName = "cluster"; + TLog Log = CreateLogBackend("cerr"); + std::shared_ptr<TReadSessionEventsQueue> EventsQueue; + TIntrusivePtr<testing::StrictMock<TMockErrorHandler>> MockErrorHandler = MakeIntrusive<testing::StrictMock<TMockErrorHandler>>(); + std::shared_ptr<TFakeContext> FakeContext = std::make_shared<TFakeContext>(); + std::shared_ptr<TMockProcessorFactory> MockProcessorFactory = std::make_shared<TMockProcessorFactory>(); + TIntrusivePtr<TMockReadSessionProcessor> MockProcessor = MakeIntrusive<TMockReadSessionProcessor>(); + ui64 PartitionIdStart = 1; + ui64 PartitionIdStep = 1; + TSingleClusterReadSessionImpl::TPtr Session; + std::shared_ptr<TThreadPool> ThreadPool; + ::IExecutor::TPtr DefaultExecutor; +}; + +class TReorderingExecutor : public ::IExecutor { +public: + TReorderingExecutor(size_t cycleCount = 2, ::IExecutor::TPtr executor = CreateThreadPoolExecutor(1)) + : CycleCount(cycleCount) + , Executor(std::move(executor)) + { + } + + bool IsAsync() const override { + return Executor->IsAsync(); + } + + void Post(TFunction&& f) override { + with_lock (Lock) { + Cerr << "Post function" << Endl; + ++TasksAdded; + if (Functions.empty()) { + Functions.reserve(CycleCount); + } + Functions.emplace_back(std::move(f)); + if (Functions.size() == CycleCount) { + Executor->Post([functions = std::move(Functions)]() { + for (auto i = functions.rbegin(), end = functions.rend(); i != end; ++i) { + (*i)(); + } + }); + Functions.clear(); + } + } + } + + void DoStart() override { + Executor->Start(); + } + + size_t GetTasksAdded() { + with_lock (Lock) { + return TasksAdded; + } + } + +private: + TAdaptiveLock Lock; + size_t CycleCount; + size_t TasksAdded = 0; + ::IExecutor::TPtr Executor; + std::vector<TFunction> Functions; +}; + +class TSynchronousExecutor : public ::IExecutor { + bool IsAsync() const override { + return false; + } + + void Post(TFunction&& f) override { + f(); + } + + void DoStart() override { + } +}; + +extern TLogFormatter NYdb::GetPrefixLogFormatter(const TString& prefix); // Defined in ydb.cpp. + +TReadSessionImplTestSetup::TReadSessionImplTestSetup() { + Settings + .AppendTopics({"TestTopic"}) + .ConsumerName("TestConsumer") + .RetryPolicy(IRetryPolicy::GetFixedIntervalPolicy(TDuration::MilliSeconds(10))) + .Counters(MakeIntrusive<NYdb::NPersQueue::TReaderCounters>(MakeIntrusive<NMonitoring::TDynamicCounters>())); + + Log.SetFormatter(GetPrefixLogFormatter("")); Mock::AllowLeak(MockProcessor.Get()); Mock::AllowLeak(MockProcessorFactory.get()); Mock::AllowLeak(MockErrorHandler.Get()); -} - -TReadSessionImplTestSetup::~TReadSessionImplTestSetup() noexcept(false) { - if (!std::uncaught_exception()) { // Exiting from test successfully. Check additional expectations. - MockProcessorFactory->Wait(); - MockProcessor->Wait(); - - MockProcessorFactory->Validate(); - MockProcessor->Validate(); - } - Session = nullptr; - ThreadPool->Stop(); -} - -::IExecutor::TPtr TReadSessionImplTestSetup::GetDefaultExecutor() { - if (!DefaultExecutor) { - ThreadPool = std::make_shared<TThreadPool>(); - ThreadPool->Start(1); - DefaultExecutor = CreateThreadPoolExecutorAdapter(ThreadPool); - } - return DefaultExecutor; -} - -TSingleClusterReadSessionImpl* TReadSessionImplTestSetup::GetSession() { - if (!Session) { - if (!Settings.DecompressionExecutor_) { - Settings.DecompressionExecutor(GetDefaultExecutor()); - } - if (!Settings.EventHandlers_.HandlersExecutor_) { - Settings.EventHandlers_.HandlersExecutor(GetDefaultExecutor()); - } - Session = std::make_shared<TSingleClusterReadSessionImpl>( - Settings, - ClusterName, - Log, - MockProcessorFactory, - GetEventsQueue(), - MockErrorHandler, - FakeContext, - PartitionIdStart, PartitionIdStep); - } - return Session.get(); -} - -std::shared_ptr<TReadSessionEventsQueue> TReadSessionImplTestSetup::GetEventsQueue() { - if (!EventsQueue) { - EventsQueue = std::make_shared<TReadSessionEventsQueue>(Settings, std::weak_ptr<IUserRetrievedEventCallback>()); - } - return EventsQueue; -} - +} + +TReadSessionImplTestSetup::~TReadSessionImplTestSetup() noexcept(false) { + if (!std::uncaught_exception()) { // Exiting from test successfully. Check additional expectations. + MockProcessorFactory->Wait(); + MockProcessor->Wait(); + + MockProcessorFactory->Validate(); + MockProcessor->Validate(); + } + Session = nullptr; + ThreadPool->Stop(); +} + +::IExecutor::TPtr TReadSessionImplTestSetup::GetDefaultExecutor() { + if (!DefaultExecutor) { + ThreadPool = std::make_shared<TThreadPool>(); + ThreadPool->Start(1); + DefaultExecutor = CreateThreadPoolExecutorAdapter(ThreadPool); + } + return DefaultExecutor; +} + +TSingleClusterReadSessionImpl* TReadSessionImplTestSetup::GetSession() { + if (!Session) { + if (!Settings.DecompressionExecutor_) { + Settings.DecompressionExecutor(GetDefaultExecutor()); + } + if (!Settings.EventHandlers_.HandlersExecutor_) { + Settings.EventHandlers_.HandlersExecutor(GetDefaultExecutor()); + } + Session = std::make_shared<TSingleClusterReadSessionImpl>( + Settings, + ClusterName, + Log, + MockProcessorFactory, + GetEventsQueue(), + MockErrorHandler, + FakeContext, + PartitionIdStart, PartitionIdStep); + } + return Session.get(); +} + +std::shared_ptr<TReadSessionEventsQueue> TReadSessionImplTestSetup::GetEventsQueue() { + if (!EventsQueue) { + EventsQueue = std::make_shared<TReadSessionEventsQueue>(Settings, std::weak_ptr<IUserRetrievedEventCallback>()); + } + return EventsQueue; +} + void TReadSessionImplTestSetup::SuccessfulInit(bool hasInitRequest) { - EXPECT_CALL(*MockProcessorFactory, OnCreateProcessor(1)) - .WillOnce([&](){ MockProcessorFactory->CreateProcessor(MockProcessor); }); + EXPECT_CALL(*MockProcessorFactory, OnCreateProcessor(1)) + .WillOnce([&](){ MockProcessorFactory->CreateProcessor(MockProcessor); }); if (hasInitRequest) EXPECT_CALL(*MockProcessor, OnInitRequest(_)); - MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().InitResponse("123-session-id-321")); - GetSession()->Start(); - MockProcessorFactory->Wait(); - MockProcessor->Wait(); -} - -TPartitionStream::TPtr TReadSessionImplTestSetup::CreatePartitionStream(const TString& topic, const TString& cluster, ui64 partition, ui64 assignId) { - MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().CreatePartitionStream(topic, cluster, partition, assignId)); // Callback will be called. - TMaybe<TReadSessionEvent::TEvent> event = EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TCreatePartitionStreamEvent); - auto& createEvent = std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(*event); - auto stream = createEvent.GetPartitionStream(); - UNIT_ASSERT(stream); - createEvent.Confirm(); - return stream; -} - -void TReadSessionImplTestSetup::AssertNoEvents() { - TMaybe<TReadSessionEvent::TEvent> event = GetEventsQueue()->GetEvent(false); - UNIT_ASSERT(!event); -} - + MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().InitResponse("123-session-id-321")); + GetSession()->Start(); + MockProcessorFactory->Wait(); + MockProcessor->Wait(); +} + +TPartitionStream::TPtr TReadSessionImplTestSetup::CreatePartitionStream(const TString& topic, const TString& cluster, ui64 partition, ui64 assignId) { + MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().CreatePartitionStream(topic, cluster, partition, assignId)); // Callback will be called. + TMaybe<TReadSessionEvent::TEvent> event = EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TCreatePartitionStreamEvent); + auto& createEvent = std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(*event); + auto stream = createEvent.GetPartitionStream(); + UNIT_ASSERT(stream); + createEvent.Confirm(); + return stream; +} + +void TReadSessionImplTestSetup::AssertNoEvents() { + TMaybe<TReadSessionEvent::TEvent> event = GetEventsQueue()->GetEvent(false); + UNIT_ASSERT(!event); +} + using NYdb::NPersQueue::NTests::TPersQueueYdbSdkTestSetup; -Y_UNIT_TEST_SUITE(PersQueueSdkReadSessionTest) { - void ReadSessionImpl(bool close, bool commit, bool explicitlySpecifiedPartitions = false) { +Y_UNIT_TEST_SUITE(PersQueueSdkReadSessionTest) { + void ReadSessionImpl(bool close, bool commit, bool explicitlySpecifiedPartitions = false) { NYdb::NPersQueue::NTests::TPersQueueYdbSdkTestSetup setup("ReadSession"); - setup.WriteToTopic({"message1", "message2"}); - auto settings = setup.GetReadSessionSettings(); - if (explicitlySpecifiedPartitions) { - settings.Topics_[0].AppendPartitionGroupIds(1); - } - std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); - + setup.WriteToTopic({"message1", "message2"}); + auto settings = setup.GetReadSessionSettings(); + if (explicitlySpecifiedPartitions) { + settings.Topics_[0].AppendPartitionGroupIds(1); + } + std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); + TDeferredCommit dc; - // Event 1: create partition stream. - { - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TCreatePartitionStreamEvent); - std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(*event).Confirm(); + // Event 1: create partition stream. + { + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TCreatePartitionStreamEvent); + std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(*event).Confirm(); Cerr << "create event " << DebugString(*event) << Endl; - } - // Event 2: data. - { - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 2); - for (auto& msg : dataEvent.GetMessages()) { - UNIT_ASSERT(msg.GetData() == "message1" || msg.GetData() == "message2"); - } + } + // Event 2: data. + { + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 2); + for (auto& msg : dataEvent.GetMessages()) { + UNIT_ASSERT(msg.GetData() == "message1" || msg.GetData() == "message2"); + } Cerr << "data event " << DebugString(*event) << Endl; - if (commit) { + if (commit) { dc.Add(dataEvent); - } - } + } + } setup.WriteToTopic({"message3"}); // Event 3: data. { @@ -712,484 +712,484 @@ Y_UNIT_TEST_SUITE(PersQueueSdkReadSessionTest) { UNIT_ASSERT(msg.GetData() == "message3"); } Cerr << "data event " << DebugString(*event) << Endl; - + dataEvent.Commit(); // Commit right now! } dc.Commit(); - if (close) { - session->Close(TDuration::Seconds(30)); - } - + if (close) { + session->Close(TDuration::Seconds(30)); + } + // Event 4: commit ack. - if (commit) { - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(!close); // Event is expected to be already in queue if closed. - UNIT_ASSERT(event); + if (commit) { + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(!close); // Event is expected to be already in queue if closed. + UNIT_ASSERT(event); Cerr << "commit ack event " << DebugString(*event) << Endl; UNIT_ASSERT(std::holds_alternative<TReadSessionEvent::TCommitAcknowledgementEvent>(*event)); - } - - if (close) { - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(false); - UNIT_ASSERT(event); + } + + if (close) { + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(false); + UNIT_ASSERT(event); Cerr << "close event " << DebugString(*event) << Endl; UNIT_ASSERT(std::holds_alternative<TSessionClosedEvent>(*event)); - UNIT_ASSERT_STRING_CONTAINS(DebugString(*event), "Session was gracefully closed"); - } - } - - Y_UNIT_TEST(ReadSessionWithAbort) { - ReadSessionImpl(false, true); - } - - Y_UNIT_TEST(ReadSessionWithClose) { - ReadSessionImpl(true, true); - } - - Y_UNIT_TEST(ReadSessionWithCloseNotCommitted) { - ReadSessionImpl(true, false); - } - - Y_UNIT_TEST(ReadSessionWithExplicitlySpecifiedPartitions) { - ReadSessionImpl(true, true, true); - } - - Y_UNIT_TEST(SettingsValidation) { - TPersQueueYdbSdkTestSetup setup("SettingsValidation"); - const auto goodSettings = setup.GetReadSessionSettings(); - -#define ASSERT_BAD_VALIDATION \ - std::shared_ptr<IReadSession> session = \ - setup.GetPersQueueClient().CreateReadSession(settings); \ - session->WaitEvent().Wait(); \ - TMaybe<TReadSessionEvent::TEvent> event = \ - session->GetEvent(true); \ - UNIT_ASSERT(event); \ - Cerr << DebugString(*event) << Endl; \ - UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); \ - /**/ - - // No topics to read. - { - auto settings = goodSettings; - settings.Topics_.clear(); - ASSERT_BAD_VALIDATION; - } - - // No consumer name. - { - auto settings = goodSettings; - settings.ConsumerName(""); - ASSERT_BAD_VALIDATION; - } - - // Small max memory usage. - { - auto settings = goodSettings; - settings.MaxMemoryUsageBytes(100); - ASSERT_BAD_VALIDATION; - } - } - - Y_UNIT_TEST(ClosesAfterFailedConnectionToCds) { - TPersQueueYdbSdkTestSetup setup("ClosesAfterFailedConnectionToCds"); - setup.ShutdownGRpc(); - - TReadSessionSettings settings = setup.GetReadSessionSettings(); - // Set policy with max retries == 3. - settings.RetryPolicy(IRetryPolicy::GetExponentialBackoffPolicy(TDuration::MilliSeconds(10), TDuration::MilliSeconds(10), TDuration::MilliSeconds(100), 3)); - std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - Cerr << DebugString(*event) << Endl; - UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); - } - - Y_UNIT_TEST(SpecifyClustersExplicitly) { - TPersQueueYdbSdkTestSetup setup("SpecifyClustersExplicitly"); - - auto settings = setup.GetReadSessionSettings(); - settings.ReadOriginal({setup.GetLocalCluster()}); - - // Success. - { - std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - Cerr << DebugString(*event) << Endl; - UNIT_ASSERT_NOT_EVENT_TYPE(*event, TSessionClosedEvent); - } - - // Failure: one cluster endpoint is invalid. - { - settings.AppendClusters("unknown_cluster"); - std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - Cerr << DebugString(*event) << Endl; - UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); - } - - // Failure: no valid cluster endpoints. - { - settings.ReadOriginal({"unknown_cluster"}); - std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - Cerr << DebugString(*event) << Endl; - UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); - } - } - - Y_UNIT_TEST(StopResumeReadingData) { - NYdb::NPersQueue::NTests::TPersQueueYdbSdkTestSetup setup("ReadSession"); - setup.WriteToTopic({"message1"}); - std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(setup.GetReadSessionSettings()); - - // Event 1: create partition stream. - { - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TCreatePartitionStreamEvent); - std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(*event).Confirm(); - Cerr << DebugString(*event) << Endl; - } - - // Event 2: receive data. - auto GetDataEvent = [&](const TString& content) -> TMaybe<TReadSessionEvent::TEvent> { - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 1); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[0].GetData(), content); - Cerr << DebugString(*event) << Endl; - return event; - }; - - TMaybe<TReadSessionEvent::TEvent> dataEvents[2]; - - dataEvents[0] = GetDataEvent("message1"); - - // Stop reading data. - session->StopReadingData(); - - // Write data. - setup.WriteToTopic({"message2"}); - - // Already requested read. - dataEvents[1] = GetDataEvent("message2"); - - // Write data. - setup.WriteToTopic({"message3"}); - - // Commit and check that other events will come. - for (int i = 0; i < 2; ++i) { - std::get<TReadSessionEvent::TDataReceivedEvent>(*dataEvents[i]).Commit(); - TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); - UNIT_ASSERT(event); - Y_ASSERT(std::holds_alternative<TReadSessionEvent::TCommitAcknowledgementEvent>(*event)); - Cerr << DebugString(*event) << Endl; - } - - auto eventFuture = session->WaitEvent(); - UNIT_ASSERT_C(!eventFuture.Wait(TDuration::Seconds(1)), DebugString(*session->GetEvent(false))); - - // Resume reading data. - session->ResumeReadingData(); - - // And now we can read. - auto dataEvent3 = GetDataEvent("message3"); - - session->Close(TDuration::Seconds(3)); - } -} - -Y_UNIT_TEST_SUITE(ReadSessionImplTest) { - void SuccessfulInitImpl(bool thenTimeout) { - TReadSessionImplTestSetup setup; - setup.Settings - .MaxTimeLag(TDuration::Seconds(32)) - .StartingMessageTimestamp(TInstant::Seconds(42)); - - setup.Settings.Topics_[0] - .StartingMessageTimestamp(TInstant::Seconds(146)) - .AppendPartitionGroupIds(100) - .AppendPartitionGroupIds(101); - - EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) - .WillOnce([&](){ - if (thenTimeout) { - setup.MockProcessorFactory->CreateAndThenTimeout(setup.MockProcessor); - } else { - setup.MockProcessorFactory->CreateProcessor(setup.MockProcessor); - } - }); - EXPECT_CALL(*setup.MockProcessor, OnInitRequest(_)) - .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::InitRequest& req) { - UNIT_ASSERT_STRINGS_EQUAL(req.consumer(), "TestConsumer"); - UNIT_ASSERT_VALUES_EQUAL(req.max_lag_duration_ms(), 32000); - UNIT_ASSERT_VALUES_EQUAL(req.start_from_written_at_ms(), 42000); - UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings_size(), 1); - UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).topic(), "TestTopic"); - UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).start_from_written_at_ms(), 146000); - UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).partition_group_ids_size(), 2); - UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).partition_group_ids(0), 100); - UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).partition_group_ids(1), 101); - })); - setup.GetSession()->Start(); - setup.MockProcessorFactory->Wait(); - - EXPECT_CALL(*setup.MockProcessor, OnReadRequest(_)); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().InitResponse("session id")); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(SuccessfulInit) { - SuccessfulInitImpl(false); - } - - Y_UNIT_TEST(SuccessfulInitAndThenTimeoutCallback) { - SuccessfulInitImpl(true); - } - - void ReconnectOnTmpErrorImpl(bool timeout, bool thenSecondCallback) { - TReadSessionImplTestSetup setup; - EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) - .WillOnce([&](){ - if (timeout) { - if (thenSecondCallback) { - setup.MockProcessorFactory->TimeoutAndThenCreate(nullptr); - } else { - setup.MockProcessorFactory->Timeout(); - } - } else { - if (thenSecondCallback) { - setup.MockProcessorFactory->FailAndThenTimeout(); - } else { - setup.MockProcessorFactory->FailCreation(); - } - } - }) - .WillOnce([&](){ setup.MockProcessorFactory->CreateProcessor(setup.MockProcessor); }); - EXPECT_CALL(*setup.MockProcessor, OnInitRequest(_)); - setup.GetSession()->Start(); - setup.MockProcessorFactory->Wait(); - - EXPECT_CALL(*setup.MockProcessor, OnReadRequest(_)); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().InitResponse("session id")); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(ReconnectOnTmpError) { - ReconnectOnTmpErrorImpl(false, false); - } - - Y_UNIT_TEST(ReconnectOnTmpErrorAndThenTimeout) { - ReconnectOnTmpErrorImpl(false, true); - } - - Y_UNIT_TEST(ReconnectOnTimeout) { - ReconnectOnTmpErrorImpl(true, false); - } - - Y_UNIT_TEST(ReconnectOnTimeoutAndThenCreate) { - ReconnectOnTmpErrorImpl(true, true); - } - - void StopsRetryAfterFailedAttemptImpl(bool timeout) { - TReadSessionImplTestSetup setup; - setup.Settings.RetryPolicy(IRetryPolicy::GetNoRetryPolicy()); - EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) - .WillOnce([&]() { - if (timeout) - setup.MockProcessorFactory->Timeout(); - else - setup.MockProcessorFactory->FailCreation(); - }); - - EXPECT_CALL(*setup.MockErrorHandler, AbortSession(_)); - - setup.GetSession()->Start(); - setup.MockProcessorFactory->Wait(); - } - - Y_UNIT_TEST(StopsRetryAfterFailedAttempt) { - StopsRetryAfterFailedAttemptImpl(false); - } - - Y_UNIT_TEST(StopsRetryAfterTimeout) { - StopsRetryAfterFailedAttemptImpl(true); - } - - Y_UNIT_TEST(UsesOnRetryStateDuringRetries) { - class TTestRetryState : public IRetryState { - TDuration Delay; - - TMaybe<TDuration> GetNextRetryDelay(const NYdb::TStatus&) override { - Delay += TDuration::Seconds(1); - return Delay; - } - }; - - class TTestRetryPolicy : public IRetryPolicy { - IRetryState::TPtr CreateRetryState() const override { - return IRetryState::TPtr(new TTestRetryState()); - } - }; - - std::shared_ptr<IRetryState> state(new TTestRetryState()); - TReadSessionImplTestSetup setup; - ON_CALL(*setup.MockProcessorFactory, ValidateConnectTimeout(_)) - .WillByDefault([state](TDuration timeout) mutable { - UNIT_ASSERT_VALUES_EQUAL(timeout, *state->GetNextRetryDelay(NYdb::TStatus(TPlainStatus()))); - }); - - auto failCreation = [&]() { - setup.MockProcessorFactory->FailCreation(); - }; - - EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) - .WillOnce(failCreation) - .WillOnce(failCreation) - .WillOnce(failCreation) - .WillOnce([](){}); // No action. The end of test. - - setup.GetSession()->Start(); - while (setup.MockProcessorFactory->CreateCallsCount < 4) { - Sleep(TDuration::MilliSeconds(10)); - } - setup.MockProcessorFactory->Wait(); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(ReconnectsAfterFailure) { - TReadSessionImplTestSetup setup; - setup.SuccessfulInit(); - - auto secondProcessor = MakeIntrusive<TMockReadSessionProcessor>(); - EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(2)) - .WillOnce([&](){ setup.MockProcessorFactory->CreateProcessor(secondProcessor); }); - - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().Failure()); // Callback will be called. - } - - Y_UNIT_TEST(CreatePartitionStream) { - TReadSessionImplTestSetup setup; - setup.SuccessfulInit(); - - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().CreatePartitionStream()); // Callback will be called. - { - TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); - UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); - UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TCreatePartitionStreamEvent); - auto& event = std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(events[0]); - auto stream = event.GetPartitionStream(); - UNIT_ASSERT(stream); - - EXPECT_CALL(*setup.MockProcessor, OnStartReadRequest(_)) - .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::StartRead& req) { - UNIT_ASSERT_STRINGS_EQUAL(req.topic().path(), "TestTopic"); - UNIT_ASSERT_STRINGS_EQUAL(req.cluster(), "TestCluster"); - UNIT_ASSERT_VALUES_EQUAL(req.partition(), 1); - UNIT_ASSERT_VALUES_EQUAL(req.assign_id(), 1); - UNIT_ASSERT_VALUES_EQUAL(req.read_offset(), 13); - UNIT_ASSERT_VALUES_EQUAL(req.commit_offset(), 31); - })); - - event.Confirm(13, 31); - } - } - - void DestroyPartitionStreamImpl(bool forceful) { - TReadSessionImplTestSetup setup; - setup.SuccessfulInit(); - - Y_UNUSED(forceful); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - - // Send release by server. - { - TMockReadSessionProcessor::TServerReadInfo resp; - if (forceful) { - resp.ForcefulReleasePartitionStream(); - } else { - resp.ReleasePartitionStream(); - } - setup.MockProcessor->AddServerResponse(resp); // Callback will be called. - } - - // Check destroy event. - if (!forceful) { - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDestroyPartitionStreamEvent); - auto& destroyEvent = std::get<TReadSessionEvent::TDestroyPartitionStreamEvent>(*event); - UNIT_ASSERT_EQUAL(destroyEvent.GetPartitionStream(), stream); - - EXPECT_CALL(*setup.MockProcessor, OnReleasedRequest(_)) - .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Released& req) { - UNIT_ASSERT_STRINGS_EQUAL(req.topic().path(), "TestTopic"); - UNIT_ASSERT_STRINGS_EQUAL(req.cluster(), "TestCluster"); - UNIT_ASSERT_VALUES_EQUAL(req.partition(), 1); - UNIT_ASSERT_VALUES_EQUAL(req.assign_id(), 1); - })); - - destroyEvent.Confirm(); - } - - // Check closed event. - { - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TPartitionStreamClosedEvent); - auto& closedEvent = std::get<TReadSessionEvent::TPartitionStreamClosedEvent>(*event); - UNIT_ASSERT_EQUAL(closedEvent.GetPartitionStream(), stream); - - if (forceful) { - UNIT_ASSERT_EQUAL_C(closedEvent.GetReason(), TReadSessionEvent::TPartitionStreamClosedEvent::EReason::Lost, DebugString(*event)); - } else { - UNIT_ASSERT_EQUAL_C(closedEvent.GetReason(), TReadSessionEvent::TPartitionStreamClosedEvent::EReason::DestroyConfirmedByUser, DebugString(*event)); - } - } - } - - Y_UNIT_TEST(ForcefulDestroyPartitionStream) { - DestroyPartitionStreamImpl(true); - } - - Y_UNIT_TEST(DestroyPartitionStreamRequest) { - DestroyPartitionStreamImpl(false); - } - - Y_UNIT_TEST(ProperlyOrdersDecompressedData) { - TReadSessionImplTestSetup setup; - setup.Settings.DecompressionExecutor(new TReorderingExecutor()); - setup.SuccessfulInit(); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - for (ui64 i = 1; i <= 2; ++i) { - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(i) - .Batch("src_id") - .CompressMessage(i, TStringBuilder() << "message" << i)); // Callback will be called. - } - - for (ui64 i = 1; i <= 2; ++i) { - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 1); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[0].GetData(), TStringBuilder() << "message" << i); - } - - setup.AssertNoEvents(); - } - + UNIT_ASSERT_STRING_CONTAINS(DebugString(*event), "Session was gracefully closed"); + } + } + + Y_UNIT_TEST(ReadSessionWithAbort) { + ReadSessionImpl(false, true); + } + + Y_UNIT_TEST(ReadSessionWithClose) { + ReadSessionImpl(true, true); + } + + Y_UNIT_TEST(ReadSessionWithCloseNotCommitted) { + ReadSessionImpl(true, false); + } + + Y_UNIT_TEST(ReadSessionWithExplicitlySpecifiedPartitions) { + ReadSessionImpl(true, true, true); + } + + Y_UNIT_TEST(SettingsValidation) { + TPersQueueYdbSdkTestSetup setup("SettingsValidation"); + const auto goodSettings = setup.GetReadSessionSettings(); + +#define ASSERT_BAD_VALIDATION \ + std::shared_ptr<IReadSession> session = \ + setup.GetPersQueueClient().CreateReadSession(settings); \ + session->WaitEvent().Wait(); \ + TMaybe<TReadSessionEvent::TEvent> event = \ + session->GetEvent(true); \ + UNIT_ASSERT(event); \ + Cerr << DebugString(*event) << Endl; \ + UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); \ + /**/ + + // No topics to read. + { + auto settings = goodSettings; + settings.Topics_.clear(); + ASSERT_BAD_VALIDATION; + } + + // No consumer name. + { + auto settings = goodSettings; + settings.ConsumerName(""); + ASSERT_BAD_VALIDATION; + } + + // Small max memory usage. + { + auto settings = goodSettings; + settings.MaxMemoryUsageBytes(100); + ASSERT_BAD_VALIDATION; + } + } + + Y_UNIT_TEST(ClosesAfterFailedConnectionToCds) { + TPersQueueYdbSdkTestSetup setup("ClosesAfterFailedConnectionToCds"); + setup.ShutdownGRpc(); + + TReadSessionSettings settings = setup.GetReadSessionSettings(); + // Set policy with max retries == 3. + settings.RetryPolicy(IRetryPolicy::GetExponentialBackoffPolicy(TDuration::MilliSeconds(10), TDuration::MilliSeconds(10), TDuration::MilliSeconds(100), 3)); + std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + Cerr << DebugString(*event) << Endl; + UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); + } + + Y_UNIT_TEST(SpecifyClustersExplicitly) { + TPersQueueYdbSdkTestSetup setup("SpecifyClustersExplicitly"); + + auto settings = setup.GetReadSessionSettings(); + settings.ReadOriginal({setup.GetLocalCluster()}); + + // Success. + { + std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + Cerr << DebugString(*event) << Endl; + UNIT_ASSERT_NOT_EVENT_TYPE(*event, TSessionClosedEvent); + } + + // Failure: one cluster endpoint is invalid. + { + settings.AppendClusters("unknown_cluster"); + std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + Cerr << DebugString(*event) << Endl; + UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); + } + + // Failure: no valid cluster endpoints. + { + settings.ReadOriginal({"unknown_cluster"}); + std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(settings); + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + Cerr << DebugString(*event) << Endl; + UNIT_ASSERT_EVENT_TYPE(*event, TSessionClosedEvent); + } + } + + Y_UNIT_TEST(StopResumeReadingData) { + NYdb::NPersQueue::NTests::TPersQueueYdbSdkTestSetup setup("ReadSession"); + setup.WriteToTopic({"message1"}); + std::shared_ptr<IReadSession> session = setup.GetPersQueueClient().CreateReadSession(setup.GetReadSessionSettings()); + + // Event 1: create partition stream. + { + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TCreatePartitionStreamEvent); + std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(*event).Confirm(); + Cerr << DebugString(*event) << Endl; + } + + // Event 2: receive data. + auto GetDataEvent = [&](const TString& content) -> TMaybe<TReadSessionEvent::TEvent> { + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 1); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[0].GetData(), content); + Cerr << DebugString(*event) << Endl; + return event; + }; + + TMaybe<TReadSessionEvent::TEvent> dataEvents[2]; + + dataEvents[0] = GetDataEvent("message1"); + + // Stop reading data. + session->StopReadingData(); + + // Write data. + setup.WriteToTopic({"message2"}); + + // Already requested read. + dataEvents[1] = GetDataEvent("message2"); + + // Write data. + setup.WriteToTopic({"message3"}); + + // Commit and check that other events will come. + for (int i = 0; i < 2; ++i) { + std::get<TReadSessionEvent::TDataReceivedEvent>(*dataEvents[i]).Commit(); + TMaybe<TReadSessionEvent::TEvent> event = session->GetEvent(true); + UNIT_ASSERT(event); + Y_ASSERT(std::holds_alternative<TReadSessionEvent::TCommitAcknowledgementEvent>(*event)); + Cerr << DebugString(*event) << Endl; + } + + auto eventFuture = session->WaitEvent(); + UNIT_ASSERT_C(!eventFuture.Wait(TDuration::Seconds(1)), DebugString(*session->GetEvent(false))); + + // Resume reading data. + session->ResumeReadingData(); + + // And now we can read. + auto dataEvent3 = GetDataEvent("message3"); + + session->Close(TDuration::Seconds(3)); + } +} + +Y_UNIT_TEST_SUITE(ReadSessionImplTest) { + void SuccessfulInitImpl(bool thenTimeout) { + TReadSessionImplTestSetup setup; + setup.Settings + .MaxTimeLag(TDuration::Seconds(32)) + .StartingMessageTimestamp(TInstant::Seconds(42)); + + setup.Settings.Topics_[0] + .StartingMessageTimestamp(TInstant::Seconds(146)) + .AppendPartitionGroupIds(100) + .AppendPartitionGroupIds(101); + + EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) + .WillOnce([&](){ + if (thenTimeout) { + setup.MockProcessorFactory->CreateAndThenTimeout(setup.MockProcessor); + } else { + setup.MockProcessorFactory->CreateProcessor(setup.MockProcessor); + } + }); + EXPECT_CALL(*setup.MockProcessor, OnInitRequest(_)) + .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::InitRequest& req) { + UNIT_ASSERT_STRINGS_EQUAL(req.consumer(), "TestConsumer"); + UNIT_ASSERT_VALUES_EQUAL(req.max_lag_duration_ms(), 32000); + UNIT_ASSERT_VALUES_EQUAL(req.start_from_written_at_ms(), 42000); + UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings_size(), 1); + UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).topic(), "TestTopic"); + UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).start_from_written_at_ms(), 146000); + UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).partition_group_ids_size(), 2); + UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).partition_group_ids(0), 100); + UNIT_ASSERT_VALUES_EQUAL(req.topics_read_settings(0).partition_group_ids(1), 101); + })); + setup.GetSession()->Start(); + setup.MockProcessorFactory->Wait(); + + EXPECT_CALL(*setup.MockProcessor, OnReadRequest(_)); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().InitResponse("session id")); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(SuccessfulInit) { + SuccessfulInitImpl(false); + } + + Y_UNIT_TEST(SuccessfulInitAndThenTimeoutCallback) { + SuccessfulInitImpl(true); + } + + void ReconnectOnTmpErrorImpl(bool timeout, bool thenSecondCallback) { + TReadSessionImplTestSetup setup; + EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) + .WillOnce([&](){ + if (timeout) { + if (thenSecondCallback) { + setup.MockProcessorFactory->TimeoutAndThenCreate(nullptr); + } else { + setup.MockProcessorFactory->Timeout(); + } + } else { + if (thenSecondCallback) { + setup.MockProcessorFactory->FailAndThenTimeout(); + } else { + setup.MockProcessorFactory->FailCreation(); + } + } + }) + .WillOnce([&](){ setup.MockProcessorFactory->CreateProcessor(setup.MockProcessor); }); + EXPECT_CALL(*setup.MockProcessor, OnInitRequest(_)); + setup.GetSession()->Start(); + setup.MockProcessorFactory->Wait(); + + EXPECT_CALL(*setup.MockProcessor, OnReadRequest(_)); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().InitResponse("session id")); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(ReconnectOnTmpError) { + ReconnectOnTmpErrorImpl(false, false); + } + + Y_UNIT_TEST(ReconnectOnTmpErrorAndThenTimeout) { + ReconnectOnTmpErrorImpl(false, true); + } + + Y_UNIT_TEST(ReconnectOnTimeout) { + ReconnectOnTmpErrorImpl(true, false); + } + + Y_UNIT_TEST(ReconnectOnTimeoutAndThenCreate) { + ReconnectOnTmpErrorImpl(true, true); + } + + void StopsRetryAfterFailedAttemptImpl(bool timeout) { + TReadSessionImplTestSetup setup; + setup.Settings.RetryPolicy(IRetryPolicy::GetNoRetryPolicy()); + EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) + .WillOnce([&]() { + if (timeout) + setup.MockProcessorFactory->Timeout(); + else + setup.MockProcessorFactory->FailCreation(); + }); + + EXPECT_CALL(*setup.MockErrorHandler, AbortSession(_)); + + setup.GetSession()->Start(); + setup.MockProcessorFactory->Wait(); + } + + Y_UNIT_TEST(StopsRetryAfterFailedAttempt) { + StopsRetryAfterFailedAttemptImpl(false); + } + + Y_UNIT_TEST(StopsRetryAfterTimeout) { + StopsRetryAfterFailedAttemptImpl(true); + } + + Y_UNIT_TEST(UsesOnRetryStateDuringRetries) { + class TTestRetryState : public IRetryState { + TDuration Delay; + + TMaybe<TDuration> GetNextRetryDelay(const NYdb::TStatus&) override { + Delay += TDuration::Seconds(1); + return Delay; + } + }; + + class TTestRetryPolicy : public IRetryPolicy { + IRetryState::TPtr CreateRetryState() const override { + return IRetryState::TPtr(new TTestRetryState()); + } + }; + + std::shared_ptr<IRetryState> state(new TTestRetryState()); + TReadSessionImplTestSetup setup; + ON_CALL(*setup.MockProcessorFactory, ValidateConnectTimeout(_)) + .WillByDefault([state](TDuration timeout) mutable { + UNIT_ASSERT_VALUES_EQUAL(timeout, *state->GetNextRetryDelay(NYdb::TStatus(TPlainStatus()))); + }); + + auto failCreation = [&]() { + setup.MockProcessorFactory->FailCreation(); + }; + + EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(_)) + .WillOnce(failCreation) + .WillOnce(failCreation) + .WillOnce(failCreation) + .WillOnce([](){}); // No action. The end of test. + + setup.GetSession()->Start(); + while (setup.MockProcessorFactory->CreateCallsCount < 4) { + Sleep(TDuration::MilliSeconds(10)); + } + setup.MockProcessorFactory->Wait(); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(ReconnectsAfterFailure) { + TReadSessionImplTestSetup setup; + setup.SuccessfulInit(); + + auto secondProcessor = MakeIntrusive<TMockReadSessionProcessor>(); + EXPECT_CALL(*setup.MockProcessorFactory, OnCreateProcessor(2)) + .WillOnce([&](){ setup.MockProcessorFactory->CreateProcessor(secondProcessor); }); + + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().Failure()); // Callback will be called. + } + + Y_UNIT_TEST(CreatePartitionStream) { + TReadSessionImplTestSetup setup; + setup.SuccessfulInit(); + + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().CreatePartitionStream()); // Callback will be called. + { + TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); + UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); + UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TCreatePartitionStreamEvent); + auto& event = std::get<TReadSessionEvent::TCreatePartitionStreamEvent>(events[0]); + auto stream = event.GetPartitionStream(); + UNIT_ASSERT(stream); + + EXPECT_CALL(*setup.MockProcessor, OnStartReadRequest(_)) + .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::StartRead& req) { + UNIT_ASSERT_STRINGS_EQUAL(req.topic().path(), "TestTopic"); + UNIT_ASSERT_STRINGS_EQUAL(req.cluster(), "TestCluster"); + UNIT_ASSERT_VALUES_EQUAL(req.partition(), 1); + UNIT_ASSERT_VALUES_EQUAL(req.assign_id(), 1); + UNIT_ASSERT_VALUES_EQUAL(req.read_offset(), 13); + UNIT_ASSERT_VALUES_EQUAL(req.commit_offset(), 31); + })); + + event.Confirm(13, 31); + } + } + + void DestroyPartitionStreamImpl(bool forceful) { + TReadSessionImplTestSetup setup; + setup.SuccessfulInit(); + + Y_UNUSED(forceful); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + + // Send release by server. + { + TMockReadSessionProcessor::TServerReadInfo resp; + if (forceful) { + resp.ForcefulReleasePartitionStream(); + } else { + resp.ReleasePartitionStream(); + } + setup.MockProcessor->AddServerResponse(resp); // Callback will be called. + } + + // Check destroy event. + if (!forceful) { + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDestroyPartitionStreamEvent); + auto& destroyEvent = std::get<TReadSessionEvent::TDestroyPartitionStreamEvent>(*event); + UNIT_ASSERT_EQUAL(destroyEvent.GetPartitionStream(), stream); + + EXPECT_CALL(*setup.MockProcessor, OnReleasedRequest(_)) + .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Released& req) { + UNIT_ASSERT_STRINGS_EQUAL(req.topic().path(), "TestTopic"); + UNIT_ASSERT_STRINGS_EQUAL(req.cluster(), "TestCluster"); + UNIT_ASSERT_VALUES_EQUAL(req.partition(), 1); + UNIT_ASSERT_VALUES_EQUAL(req.assign_id(), 1); + })); + + destroyEvent.Confirm(); + } + + // Check closed event. + { + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TPartitionStreamClosedEvent); + auto& closedEvent = std::get<TReadSessionEvent::TPartitionStreamClosedEvent>(*event); + UNIT_ASSERT_EQUAL(closedEvent.GetPartitionStream(), stream); + + if (forceful) { + UNIT_ASSERT_EQUAL_C(closedEvent.GetReason(), TReadSessionEvent::TPartitionStreamClosedEvent::EReason::Lost, DebugString(*event)); + } else { + UNIT_ASSERT_EQUAL_C(closedEvent.GetReason(), TReadSessionEvent::TPartitionStreamClosedEvent::EReason::DestroyConfirmedByUser, DebugString(*event)); + } + } + } + + Y_UNIT_TEST(ForcefulDestroyPartitionStream) { + DestroyPartitionStreamImpl(true); + } + + Y_UNIT_TEST(DestroyPartitionStreamRequest) { + DestroyPartitionStreamImpl(false); + } + + Y_UNIT_TEST(ProperlyOrdersDecompressedData) { + TReadSessionImplTestSetup setup; + setup.Settings.DecompressionExecutor(new TReorderingExecutor()); + setup.SuccessfulInit(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + for (ui64 i = 1; i <= 2; ++i) { + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(i) + .Batch("src_id") + .CompressMessage(i, TStringBuilder() << "message" << i)); // Callback will be called. + } + + for (ui64 i = 1; i <= 2; ++i) { + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 1); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[0].GetData(), TStringBuilder() << "message" << i); + } + + setup.AssertNoEvents(); + } + Y_UNIT_TEST(BrokenCompressedData) { TReadSessionImplTestSetup setup; setup.Settings.DecompressionExecutor(new TReorderingExecutor(1)); @@ -1198,176 +1198,176 @@ Y_UNIT_TEST_SUITE(ReadSessionImplTest) { setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() .PartitionData(1) .Batch("src_id") - .BrokenCompressMessage(1, "message") - .CompressMessage(2, "message2") - .CompressMessage(3, "message3")); - - // Exception was passed during decompression. - { - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - Cerr << dataEvent.DebugString() << "\n"; - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 3); - UNIT_ASSERT_EXCEPTION(dataEvent.GetMessages()[0].GetData(), TZLibDecompressorError); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[1].GetData(), "message2"); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[2].GetData(), "message3"); - } + .BrokenCompressMessage(1, "message") + .CompressMessage(2, "message2") + .CompressMessage(3, "message3")); + + // Exception was passed during decompression. + { + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + Cerr << dataEvent.DebugString() << "\n"; + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 3); + UNIT_ASSERT_EXCEPTION(dataEvent.GetMessages()[0].GetData(), TZLibDecompressorError); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[1].GetData(), "message2"); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[2].GetData(), "message3"); + } setup.AssertNoEvents(); } - void DecompressImpl(Ydb::PersQueue::V1::Codec codec, const TString& data = "msg", ::IExecutor::TPtr executor = nullptr) { - TReadSessionImplTestSetup setup; - if (executor) { - setup.Settings.DecompressionExecutor(executor); - } + void DecompressImpl(Ydb::PersQueue::V1::Codec codec, const TString& data = "msg", ::IExecutor::TPtr executor = nullptr) { + TReadSessionImplTestSetup setup; + if (executor) { + setup.Settings.DecompressionExecutor(executor); + } setup.SuccessfulInit(false); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); EXPECT_CALL(*setup.MockProcessor, OnReadRequest(_)); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, data, codec)); - - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 1); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[0].GetData(), data); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(DecompressRaw) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_RAW); - DecompressImpl(Ydb::PersQueue::V1::CODEC_UNSPECIFIED); // The same. - } - - Y_UNIT_TEST(DecompressGzip) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_GZIP); - } - - Y_UNIT_TEST(DecompressZstd) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_ZSTD); - } - - Y_UNIT_TEST(DecompressRawEmptyMessage) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_RAW, ""); - DecompressImpl(Ydb::PersQueue::V1::CODEC_UNSPECIFIED, ""); // The same. - } - - Y_UNIT_TEST(DecompressGzipEmptyMessage) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_GZIP, ""); - } - - Y_UNIT_TEST(DecompressZstdEmptyMessage) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_ZSTD, ""); - } - - Y_UNIT_TEST(DecompressWithSynchronousExecutor) { - DecompressImpl(Ydb::PersQueue::V1::CODEC_ZSTD, "msg", new TSynchronousExecutor()); - } - - TString GenerateMessageData(size_t size) { - TString result; - result.reserve(size); - unsigned char ch = static_cast<unsigned char>(size); - while (size--) { - result.append(static_cast<char>(ch)); - do { - ch = ch * 7 + 23; - } while (ch == 0); - } - return result; - } - - void PacksBatchesImpl(size_t serverBatchesCount, size_t messagesInServerBatchCount, size_t messageSize, size_t batchLimit, size_t batches, size_t messagesInBatch, size_t expectedTasks = 0, size_t reorderedCycleSize = 0, size_t memoryLimit = 0) { - if (!expectedTasks) { - expectedTasks = serverBatchesCount; - } - if (!reorderedCycleSize) { - reorderedCycleSize = expectedTasks; - } - TReadSessionImplTestSetup setup; - if (memoryLimit) { - setup.Settings.MaxMemoryUsageBytes(memoryLimit); - } - auto executor = MakeIntrusive<TReorderingExecutor>(reorderedCycleSize); - setup.Settings.DecompressionExecutor(executor); - setup.SuccessfulInit(); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - - const TString messageData = GenerateMessageData(messageSize); - const TString compressedMessageData = Compress(messageData); - Cerr << "Message data size: " << messageData.size() << Endl; - Cerr << "Compressed message data size: " << compressedMessageData.size() << Endl; - ui64 offset = 1; - ui64 seqNo = 42; - THashSet<ui64> committedCookies; + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, data, codec)); + + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 1); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages()[0].GetData(), data); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(DecompressRaw) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_RAW); + DecompressImpl(Ydb::PersQueue::V1::CODEC_UNSPECIFIED); // The same. + } + + Y_UNIT_TEST(DecompressGzip) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_GZIP); + } + + Y_UNIT_TEST(DecompressZstd) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_ZSTD); + } + + Y_UNIT_TEST(DecompressRawEmptyMessage) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_RAW, ""); + DecompressImpl(Ydb::PersQueue::V1::CODEC_UNSPECIFIED, ""); // The same. + } + + Y_UNIT_TEST(DecompressGzipEmptyMessage) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_GZIP, ""); + } + + Y_UNIT_TEST(DecompressZstdEmptyMessage) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_ZSTD, ""); + } + + Y_UNIT_TEST(DecompressWithSynchronousExecutor) { + DecompressImpl(Ydb::PersQueue::V1::CODEC_ZSTD, "msg", new TSynchronousExecutor()); + } + + TString GenerateMessageData(size_t size) { + TString result; + result.reserve(size); + unsigned char ch = static_cast<unsigned char>(size); + while (size--) { + result.append(static_cast<char>(ch)); + do { + ch = ch * 7 + 23; + } while (ch == 0); + } + return result; + } + + void PacksBatchesImpl(size_t serverBatchesCount, size_t messagesInServerBatchCount, size_t messageSize, size_t batchLimit, size_t batches, size_t messagesInBatch, size_t expectedTasks = 0, size_t reorderedCycleSize = 0, size_t memoryLimit = 0) { + if (!expectedTasks) { + expectedTasks = serverBatchesCount; + } + if (!reorderedCycleSize) { + reorderedCycleSize = expectedTasks; + } + TReadSessionImplTestSetup setup; + if (memoryLimit) { + setup.Settings.MaxMemoryUsageBytes(memoryLimit); + } + auto executor = MakeIntrusive<TReorderingExecutor>(reorderedCycleSize); + setup.Settings.DecompressionExecutor(executor); + setup.SuccessfulInit(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + + const TString messageData = GenerateMessageData(messageSize); + const TString compressedMessageData = Compress(messageData); + Cerr << "Message data size: " << messageData.size() << Endl; + Cerr << "Compressed message data size: " << compressedMessageData.size() << Endl; + ui64 offset = 1; + ui64 seqNo = 42; + THashSet<ui64> committedCookies; THashSet<ui64> committedOffsets; - EXPECT_CALL(*setup.MockProcessor, OnCommitRequest(_)) + EXPECT_CALL(*setup.MockProcessor, OnCommitRequest(_)) .WillRepeatedly(Invoke([&committedCookies, &committedOffsets](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Commit& req) { - for (const auto& commit : req.cookies()) { - committedCookies.insert(commit.partition_cookie()); - } + for (const auto& commit : req.cookies()) { + committedCookies.insert(commit.partition_cookie()); + } for (const auto& range : req.offset_ranges()) { Cerr << "GOT RANGE " << range.start_offset() << " " << range.end_offset() << "\n"; for (ui64 i = range.start_offset(); i < range.end_offset(); ++i) { committedOffsets.insert(i); } } - })); - - for (ui64 i = 1; i <= serverBatchesCount; ++i) { - TMockReadSessionProcessor::TServerReadInfo resp; - resp.PartitionData(i).Batch("src_id", TInstant::Seconds(123), "127.0.0.1", { { "k", "v" }, { "k1", "v1" }}); - for (size_t j = 0; j < messagesInServerBatchCount; ++j) { - resp.Message(offset++, compressedMessageData, Ydb::PersQueue::V1::CODEC_GZIP, seqNo++); - if (j == messagesInServerBatchCount / 2) { // This may lead to empty batch. Client is expected to behave well with empty batch in protobuf. - resp.Batch("src_id_2", TInstant::Seconds(321), "1.0.0.127", { { "v", "k" }, { "v1", "k1" }}); - } - } - setup.MockProcessor->AddServerResponse(resp); - } - - ui64 prevOffset = 0; - ui64 prevSeqNo = 41; - for (size_t i = 0; i < batches; ++i) { - Cerr << "Getting new event" << Endl; - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true, batchLimit); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - Cerr << DebugString(*event) << Endl; - auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), messagesInBatch); - for (const auto& message : dataEvent.GetMessages()) { - auto meta = message.GetMeta(); - UNIT_ASSERT_VALUES_EQUAL(message.GetData(), messageData); - UNIT_ASSERT_VALUES_EQUAL(message.GetOffset(), prevOffset + 1); - UNIT_ASSERT_VALUES_EQUAL(message.GetSeqNo(), prevSeqNo + 1); - ++prevOffset; - ++prevSeqNo; - UNIT_ASSERT_VALUES_EQUAL(message.GetCreateTime(), TInstant::MilliSeconds(42)); - if (message.GetMessageGroupId() == "src_id") { - UNIT_ASSERT_VALUES_EQUAL(message.GetWriteTime(), TInstant::Seconds(123)); - UNIT_ASSERT_VALUES_EQUAL(message.GetIp(), "127.0.0.1"); - UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("k"), "v"); - UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("k1"), "v1"); - } else if (message.GetMessageGroupId() == "src_id_2") { - UNIT_ASSERT_VALUES_EQUAL(message.GetWriteTime(), TInstant::Seconds(321)); - UNIT_ASSERT_VALUES_EQUAL(message.GetIp(), "1.0.0.127"); - UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("v"), "k"); - UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("v1"), "k1"); - } else { - UNIT_ASSERT_C(false, message.GetMessageGroupId()); - } - } - dataEvent.Commit(); - } + })); + + for (ui64 i = 1; i <= serverBatchesCount; ++i) { + TMockReadSessionProcessor::TServerReadInfo resp; + resp.PartitionData(i).Batch("src_id", TInstant::Seconds(123), "127.0.0.1", { { "k", "v" }, { "k1", "v1" }}); + for (size_t j = 0; j < messagesInServerBatchCount; ++j) { + resp.Message(offset++, compressedMessageData, Ydb::PersQueue::V1::CODEC_GZIP, seqNo++); + if (j == messagesInServerBatchCount / 2) { // This may lead to empty batch. Client is expected to behave well with empty batch in protobuf. + resp.Batch("src_id_2", TInstant::Seconds(321), "1.0.0.127", { { "v", "k" }, { "v1", "k1" }}); + } + } + setup.MockProcessor->AddServerResponse(resp); + } + + ui64 prevOffset = 0; + ui64 prevSeqNo = 41; + for (size_t i = 0; i < batches; ++i) { + Cerr << "Getting new event" << Endl; + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true, batchLimit); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + Cerr << DebugString(*event) << Endl; + auto& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), messagesInBatch); + for (const auto& message : dataEvent.GetMessages()) { + auto meta = message.GetMeta(); + UNIT_ASSERT_VALUES_EQUAL(message.GetData(), messageData); + UNIT_ASSERT_VALUES_EQUAL(message.GetOffset(), prevOffset + 1); + UNIT_ASSERT_VALUES_EQUAL(message.GetSeqNo(), prevSeqNo + 1); + ++prevOffset; + ++prevSeqNo; + UNIT_ASSERT_VALUES_EQUAL(message.GetCreateTime(), TInstant::MilliSeconds(42)); + if (message.GetMessageGroupId() == "src_id") { + UNIT_ASSERT_VALUES_EQUAL(message.GetWriteTime(), TInstant::Seconds(123)); + UNIT_ASSERT_VALUES_EQUAL(message.GetIp(), "127.0.0.1"); + UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("k"), "v"); + UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("k1"), "v1"); + } else if (message.GetMessageGroupId() == "src_id_2") { + UNIT_ASSERT_VALUES_EQUAL(message.GetWriteTime(), TInstant::Seconds(321)); + UNIT_ASSERT_VALUES_EQUAL(message.GetIp(), "1.0.0.127"); + UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("v"), "k"); + UNIT_ASSERT_VALUES_EQUAL(meta->Fields.at("v1"), "k1"); + } else { + UNIT_ASSERT_C(false, message.GetMessageGroupId()); + } + } + dataEvent.Commit(); + } if (committedOffsets.empty()) { UNIT_ASSERT_VALUES_EQUAL(committedCookies.size(), serverBatchesCount); for (ui64 i = 1; i <= serverBatchesCount; ++i) { @@ -1378,249 +1378,249 @@ Y_UNIT_TEST_SUITE(ReadSessionImplTest) { for (ui64 i = 0; i <= batches * messagesInBatch; ++i) { UNIT_ASSERT(committedOffsets.contains(i)); } - - } - UNIT_ASSERT_VALUES_EQUAL(executor->GetTasksAdded(), expectedTasks); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(PacksBatches_BatchABitBiggerThanLimit) { - const size_t serverBatchesCount = 2; - const size_t messagesInServerBatchCount = 4; - const size_t messageSize = 11; - const size_t batchLimit = 20; - const size_t batches = 4; - const size_t messagesInBatch = 2; - PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); - } - - Y_UNIT_TEST(PacksBatches_ExactlyTwoMessagesInBatch) { - const size_t serverBatchesCount = 2; - const size_t messagesInServerBatchCount = 4; - const size_t messageSize = 10; // Exactly two messages in batch. - const size_t batchLimit = 20; - const size_t batches = 4; - const size_t messagesInBatch = 2; - PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); - } - - Y_UNIT_TEST(PacksBatches_BatchesEqualToServerBatches) { - const size_t serverBatchesCount = 2; - const size_t messagesInServerBatchCount = 4; - const size_t messageSize = 10; - const size_t batchLimit = 40; // As in server messages. - const size_t batches = 2; - const size_t messagesInBatch = 4; - PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); - } - - Y_UNIT_TEST(PacksBatches_OneMessageInEveryBatch) { - const size_t serverBatchesCount = 2; - const size_t messagesInServerBatchCount = 4; - const size_t messageSize = 100; - const size_t batchLimit = 90; // One message in every batch. - const size_t batches = 8; - const size_t messagesInBatch = 1; - PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); - } - - Y_UNIT_TEST(PacksBatches_BigBatchDecompressWithTwoBatchTasks) { - const size_t serverBatchesCount = 1; - const size_t messagesInServerBatchCount = 200; // Many messages in order to fit in two 512 KB-tasks packs. - const size_t messageSize = 1000000; - const size_t batchLimit = std::numeric_limits<size_t>::max(); - const size_t batches = 1; - const size_t messagesInBatch = messagesInServerBatchCount; - const size_t expectedDecompressionTasksCount = 2; - PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch, expectedDecompressionTasksCount); - } - - Y_UNIT_TEST(PacksBatches_DecompressesOneMessagePerTime) { - const size_t serverBatchesCount = 1; - const size_t messagesInServerBatchCount = 10; // Many messages in order to fit in two 512 KB-tasks packs. - const size_t messageSize = 1000000; - const size_t batchLimit = std::numeric_limits<size_t>::max(); - const size_t batches = messagesInServerBatchCount; - const size_t messagesInBatch = 1; - const size_t expectedDecompressionTasksCount = messagesInServerBatchCount; - const size_t reorderedCycleSize = 1; - const size_t memoryLimit = 10; - PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch, expectedDecompressionTasksCount, reorderedCycleSize, memoryLimit); - } - - Y_UNIT_TEST(UnpackBigBatchWithTwoPartitions) { - TReadSessionImplTestSetup setup; - - setup.Settings.MaxMemoryUsageBytes(5000); - setup.SuccessfulInit(); - TPartitionStream::TPtr stream1 = setup.CreatePartitionStream("TestTopic", "TestCluster", 1, 1); - TPartitionStream::TPtr stream2 = setup.CreatePartitionStream("TestTopic", "TestCluster", 2, 2); - - - const TString messageData = GenerateMessageData(100); - const TString compressedMessageData = Compress(messageData); - ui64 offset = 1; - ui64 seqNo = 42; - - for (ui64 partition = 1; partition <= 2; ++partition) { - TMockReadSessionProcessor::TServerReadInfo resp; - resp.PartitionData(partition, "TestTopic", "TestCluster", partition, partition) - .Batch("src_id", TInstant::Seconds(123), "127.0.0.1", { { "k", "v" }, { "k1", "v1" }}); - - for (size_t i = 0; i < 50; ++i) { - if (i == 22) { - resp.Batch("src_id2", TInstant::Seconds(123), "127.0.0.1", { { "k", "v" }, { "k1", "v1" }}); - } - resp.Message(offset++, compressedMessageData, Ydb::PersQueue::V1::CODEC_GZIP, seqNo++); - } - setup.MockProcessor->AddServerResponse(resp); - } - - { - TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); - UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); - UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 28); - UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream1); - } - - { - TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); - UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); - UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 22); - UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream1); - } - - { - TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); - UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); - UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 27); - UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream2); - } - - { - TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); - UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); - UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); - UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 23); - UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream2); - } - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(PartitionStreamStatus) { - TReadSessionImplTestSetup setup; - setup.SuccessfulInit(); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - EXPECT_CALL(*setup.MockProcessor, OnStatusRequest(_)) - .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Status& req) { - UNIT_ASSERT_VALUES_EQUAL(req.topic().path(), "TestTopic"); - UNIT_ASSERT_VALUES_EQUAL(req.cluster(), "TestCluster"); - UNIT_ASSERT_VALUES_EQUAL(req.partition(), 1); - UNIT_ASSERT_VALUES_EQUAL(req.assign_id(), 1); - })); - // Another assign id. - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionStreamStatus(11, 34, TInstant::Seconds(4), "TestTopic", "TestCluster", 1 /*partition*/, 13/*assign id to ignore*/)); - - // Proper assign id. - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().PartitionStreamStatus(30, 42, TInstant::Seconds(20))); - - stream->RequestStatus(); - - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TPartitionStreamStatusEvent); - auto& statusEvent = std::get<TReadSessionEvent::TPartitionStreamStatusEvent>(*event); - UNIT_ASSERT_VALUES_EQUAL(statusEvent.GetCommittedOffset(), 30); - UNIT_ASSERT_VALUES_EQUAL(statusEvent.GetEndOffset(), 42); - UNIT_ASSERT_VALUES_EQUAL(statusEvent.GetWriteWatermark(), TInstant::MilliSeconds(20000)); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(HoleBetweenOffsets) { - TReadSessionImplTestSetup setup; - setup.Settings.DecompressionExecutor(MakeIntrusive<TReorderingExecutor>(2ull)); - setup.SuccessfulInit(); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, "message1") - .CompressMessage(2, "message2")); - - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(2) - .Batch("src_id") - .CompressMessage(10, "message3") - .CompressMessage(11, "message4")); - - bool has1 = false; - bool has2 = false; - EXPECT_CALL(*setup.MockProcessor, OnCommitRequest(_)) - .WillRepeatedly(Invoke([&](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Commit& req) { + + } + UNIT_ASSERT_VALUES_EQUAL(executor->GetTasksAdded(), expectedTasks); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(PacksBatches_BatchABitBiggerThanLimit) { + const size_t serverBatchesCount = 2; + const size_t messagesInServerBatchCount = 4; + const size_t messageSize = 11; + const size_t batchLimit = 20; + const size_t batches = 4; + const size_t messagesInBatch = 2; + PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); + } + + Y_UNIT_TEST(PacksBatches_ExactlyTwoMessagesInBatch) { + const size_t serverBatchesCount = 2; + const size_t messagesInServerBatchCount = 4; + const size_t messageSize = 10; // Exactly two messages in batch. + const size_t batchLimit = 20; + const size_t batches = 4; + const size_t messagesInBatch = 2; + PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); + } + + Y_UNIT_TEST(PacksBatches_BatchesEqualToServerBatches) { + const size_t serverBatchesCount = 2; + const size_t messagesInServerBatchCount = 4; + const size_t messageSize = 10; + const size_t batchLimit = 40; // As in server messages. + const size_t batches = 2; + const size_t messagesInBatch = 4; + PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); + } + + Y_UNIT_TEST(PacksBatches_OneMessageInEveryBatch) { + const size_t serverBatchesCount = 2; + const size_t messagesInServerBatchCount = 4; + const size_t messageSize = 100; + const size_t batchLimit = 90; // One message in every batch. + const size_t batches = 8; + const size_t messagesInBatch = 1; + PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch); + } + + Y_UNIT_TEST(PacksBatches_BigBatchDecompressWithTwoBatchTasks) { + const size_t serverBatchesCount = 1; + const size_t messagesInServerBatchCount = 200; // Many messages in order to fit in two 512 KB-tasks packs. + const size_t messageSize = 1000000; + const size_t batchLimit = std::numeric_limits<size_t>::max(); + const size_t batches = 1; + const size_t messagesInBatch = messagesInServerBatchCount; + const size_t expectedDecompressionTasksCount = 2; + PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch, expectedDecompressionTasksCount); + } + + Y_UNIT_TEST(PacksBatches_DecompressesOneMessagePerTime) { + const size_t serverBatchesCount = 1; + const size_t messagesInServerBatchCount = 10; // Many messages in order to fit in two 512 KB-tasks packs. + const size_t messageSize = 1000000; + const size_t batchLimit = std::numeric_limits<size_t>::max(); + const size_t batches = messagesInServerBatchCount; + const size_t messagesInBatch = 1; + const size_t expectedDecompressionTasksCount = messagesInServerBatchCount; + const size_t reorderedCycleSize = 1; + const size_t memoryLimit = 10; + PacksBatchesImpl(serverBatchesCount, messagesInServerBatchCount, messageSize, batchLimit, batches, messagesInBatch, expectedDecompressionTasksCount, reorderedCycleSize, memoryLimit); + } + + Y_UNIT_TEST(UnpackBigBatchWithTwoPartitions) { + TReadSessionImplTestSetup setup; + + setup.Settings.MaxMemoryUsageBytes(5000); + setup.SuccessfulInit(); + TPartitionStream::TPtr stream1 = setup.CreatePartitionStream("TestTopic", "TestCluster", 1, 1); + TPartitionStream::TPtr stream2 = setup.CreatePartitionStream("TestTopic", "TestCluster", 2, 2); + + + const TString messageData = GenerateMessageData(100); + const TString compressedMessageData = Compress(messageData); + ui64 offset = 1; + ui64 seqNo = 42; + + for (ui64 partition = 1; partition <= 2; ++partition) { + TMockReadSessionProcessor::TServerReadInfo resp; + resp.PartitionData(partition, "TestTopic", "TestCluster", partition, partition) + .Batch("src_id", TInstant::Seconds(123), "127.0.0.1", { { "k", "v" }, { "k1", "v1" }}); + + for (size_t i = 0; i < 50; ++i) { + if (i == 22) { + resp.Batch("src_id2", TInstant::Seconds(123), "127.0.0.1", { { "k", "v" }, { "k1", "v1" }}); + } + resp.Message(offset++, compressedMessageData, Ydb::PersQueue::V1::CODEC_GZIP, seqNo++); + } + setup.MockProcessor->AddServerResponse(resp); + } + + { + TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); + UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); + UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 28); + UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream1); + } + + { + TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); + UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); + UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 22); + UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream1); + } + + { + TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); + UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); + UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 27); + UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream2); + } + + { + TVector<TReadSessionEvent::TEvent> events = setup.EventsQueue->GetEvents(true); + UNIT_ASSERT_VALUES_EQUAL(events.size(), 1); + UNIT_ASSERT_EVENT_TYPE(events[0], TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(events[0]); + UNIT_ASSERT_VALUES_EQUAL(dataEvent.GetMessages().size(), 23); + UNIT_ASSERT_EQUAL(dataEvent.GetPartitionStream(), stream2); + } + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(PartitionStreamStatus) { + TReadSessionImplTestSetup setup; + setup.SuccessfulInit(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + EXPECT_CALL(*setup.MockProcessor, OnStatusRequest(_)) + .WillOnce(Invoke([](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Status& req) { + UNIT_ASSERT_VALUES_EQUAL(req.topic().path(), "TestTopic"); + UNIT_ASSERT_VALUES_EQUAL(req.cluster(), "TestCluster"); + UNIT_ASSERT_VALUES_EQUAL(req.partition(), 1); + UNIT_ASSERT_VALUES_EQUAL(req.assign_id(), 1); + })); + // Another assign id. + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionStreamStatus(11, 34, TInstant::Seconds(4), "TestTopic", "TestCluster", 1 /*partition*/, 13/*assign id to ignore*/)); + + // Proper assign id. + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo().PartitionStreamStatus(30, 42, TInstant::Seconds(20))); + + stream->RequestStatus(); + + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TPartitionStreamStatusEvent); + auto& statusEvent = std::get<TReadSessionEvent::TPartitionStreamStatusEvent>(*event); + UNIT_ASSERT_VALUES_EQUAL(statusEvent.GetCommittedOffset(), 30); + UNIT_ASSERT_VALUES_EQUAL(statusEvent.GetEndOffset(), 42); + UNIT_ASSERT_VALUES_EQUAL(statusEvent.GetWriteWatermark(), TInstant::MilliSeconds(20000)); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(HoleBetweenOffsets) { + TReadSessionImplTestSetup setup; + setup.Settings.DecompressionExecutor(MakeIntrusive<TReorderingExecutor>(2ull)); + setup.SuccessfulInit(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, "message1") + .CompressMessage(2, "message2")); + + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(2) + .Batch("src_id") + .CompressMessage(10, "message3") + .CompressMessage(11, "message4")); + + bool has1 = false; + bool has2 = false; + EXPECT_CALL(*setup.MockProcessor, OnCommitRequest(_)) + .WillRepeatedly(Invoke([&](const Ydb::PersQueue::V1::MigrationStreamingReadClientMessage::Commit& req) { Cerr << "Got commit req " << req << "\n"; - for (const auto& commit : req.cookies()) { - if (commit.partition_cookie() == 1) { - has1 = true; - } else if (commit.partition_cookie() == 2) { - has2 = true; - } else { - UNIT_ASSERT(false); - } - } + for (const auto& commit : req.cookies()) { + if (commit.partition_cookie() == 1) { + has1 = true; + } else if (commit.partition_cookie() == 2) { + has2 = true; + } else { + UNIT_ASSERT(false); + } + } for (const auto& range : req.offset_ranges()) { Cerr << "RANGE " << range.start_offset() << " " << range.end_offset() << "\n"; if (range.start_offset() == 10 && range.end_offset() == 12) has1 = true; else if (range.start_offset() == 0 && range.end_offset() == 10) has2 = true; else UNIT_ASSERT(false); } - })); - - for (int i = 0; i < 2; ++i) { - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - Cerr << "got data event: " << dataEvent.DebugString() << "\n"; - dataEvent.Commit(); - } - - UNIT_ASSERT(has1); - UNIT_ASSERT(has2); - - setup.AssertNoEvents(); - } - - Y_UNIT_TEST(CommitOffsetTwiceIsError) { - TReadSessionImplTestSetup setup; - setup.SuccessfulInit(); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, "message1")); - - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); - - // First time. + })); + + for (int i = 0; i < 2; ++i) { + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + Cerr << "got data event: " << dataEvent.DebugString() << "\n"; + dataEvent.Commit(); + } + + UNIT_ASSERT(has1); + UNIT_ASSERT(has2); + + setup.AssertNoEvents(); + } + + Y_UNIT_TEST(CommitOffsetTwiceIsError) { + TReadSessionImplTestSetup setup; + setup.SuccessfulInit(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, "message1")); + + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + TReadSessionEvent::TDataReceivedEvent& dataEvent = std::get<TReadSessionEvent::TDataReceivedEvent>(*event); + + // First time. dataEvent.GetMessages()[0].Commit(); - + UNIT_ASSERT_EXCEPTION(dataEvent.GetMessages()[0].Commit(), NYdb::TContractViolation); - } - + } + Y_UNIT_TEST(DataReceivedCallbackReal) { NYdb::NPersQueue::NTests::TPersQueueYdbSdkTestSetup setup("ReadSession"); auto settings = setup.GetReadSessionSettings(); @@ -1653,193 +1653,193 @@ Y_UNIT_TEST_SUITE(ReadSessionImplTest) { Sleep(TDuration::Seconds(10)); } - Y_UNIT_TEST(DataReceivedCallback) { - TReadSessionImplTestSetup setup; - setup.Settings.DecompressionExecutor(MakeIntrusive<TReorderingExecutor>(2ull)); - auto calledPromise = NThreading::NewPromise<void>(); - int time = 0; - setup.Settings.EventHandlers_.DataReceivedHandler([&](TReadSessionEvent::TDataReceivedEvent& event) { - ++time; - UNIT_ASSERT_VALUES_EQUAL(event.GetMessages().size(), 1); - UNIT_ASSERT_VALUES_EQUAL(event.GetMessages()[0].GetData(), TStringBuilder() << "message" << time); - if (time == 2) { - calledPromise.SetValue(); - } - }); - setup.SuccessfulInit(); - TPartitionStream::TPtr stream = setup.CreatePartitionStream(); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, "message1")); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(2) - .Batch("src_id") - .CompressMessage(2, "message2")); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .ForcefulReleasePartitionStream()); - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TPartitionStreamClosedEvent); - calledPromise.GetFuture().Wait(); - } - - Y_UNIT_TEST(PartitionStreamCallbacks) { - TReadSessionImplTestSetup setup; - bool createCalled = false; - setup.Settings.EventHandlers_.CreatePartitionStreamHandler([&](TReadSessionEvent::TCreatePartitionStreamEvent&) { - createCalled = true; - }); - auto destroyCalledPromise = NThreading::NewPromise<TReadSessionEvent::TDestroyPartitionStreamEvent>(); - auto destroyCalled = destroyCalledPromise.GetFuture(); - setup.Settings.EventHandlers_.DestroyPartitionStreamHandler([&](TReadSessionEvent::TDestroyPartitionStreamEvent& event) { - destroyCalledPromise.SetValue(std::move(event)); - }); - auto closedCalledPromise = NThreading::NewPromise<void>(); - auto closedCalled = closedCalledPromise.GetFuture(); - setup.Settings.EventHandlers_.PartitionStreamClosedHandler([&](TReadSessionEvent::TPartitionStreamClosedEvent&) { - closedCalledPromise.SetValue(); - }); - - setup.SuccessfulInit(); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .CreatePartitionStream()); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, "message1")); - - TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); - UNIT_ASSERT(event); - UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); - - UNIT_ASSERT(createCalled); - UNIT_ASSERT(!destroyCalled.HasValue()); - UNIT_ASSERT(!closedCalled.HasValue()); - - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .ReleasePartitionStream()); - destroyCalled.Wait(); - UNIT_ASSERT(!closedCalled.HasValue()); - - destroyCalled.ExtractValue().Confirm(); - closedCalled.Wait(); - } - - Y_UNIT_TEST(CommonHandler) { - bool createCalled = false; - bool dataCalled = false; - auto callPromise = NThreading::NewPromise<void>(); - TReadSessionImplTestSetup setup; - setup.Settings.EventHandlers_.CommonHandler([&](TReadSessionEvent::TEvent& event) { - if (std::holds_alternative<TReadSessionEvent::TCreatePartitionStreamEvent>(event)) { - UNIT_ASSERT(!createCalled); - createCalled = true; - } else if (std::holds_alternative<TReadSessionEvent::TDataReceivedEvent>(event)) { - UNIT_ASSERT(!dataCalled); - dataCalled = true; - } else { - UNIT_ASSERT(false); - } - if (createCalled && dataCalled) { - UNIT_ASSERT_C(!callPromise.HasValue(), "Event: " << DebugString(event)); - callPromise.SetValue(); - } - }); - setup.SuccessfulInit(); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .CreatePartitionStream()); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, "message1")); - - callPromise.GetFuture().Wait(); - } - - void SimpleDataHandlersImpl(bool withCommit, bool withGracefulRelease) { - TReadSessionImplTestSetup setup; - auto dataReceived = std::make_shared<NThreading::TPromise<void>>(NThreading::NewPromise<void>()); - auto dataReceivedFuture = dataReceived->GetFuture(); - std::shared_ptr<TMaybe<TReadSessionEvent::TDataReceivedEvent>> dataReceivedEvent = std::make_shared<TMaybe<TReadSessionEvent::TDataReceivedEvent>>(); - setup.Settings.EventHandlers_.SimpleDataHandlers([=](TReadSessionEvent::TDataReceivedEvent& event) mutable { - *dataReceivedEvent = std::move(event); - dataReceived->SetValue(); - }, withCommit, withGracefulRelease); - setup.SuccessfulInit(); - - auto commitCalled = std::make_shared<NThreading::TPromise<void>>(NThreading::NewPromise<void>()); - auto commitCalledFuture = commitCalled->GetFuture(); + Y_UNIT_TEST(DataReceivedCallback) { + TReadSessionImplTestSetup setup; + setup.Settings.DecompressionExecutor(MakeIntrusive<TReorderingExecutor>(2ull)); + auto calledPromise = NThreading::NewPromise<void>(); + int time = 0; + setup.Settings.EventHandlers_.DataReceivedHandler([&](TReadSessionEvent::TDataReceivedEvent& event) { + ++time; + UNIT_ASSERT_VALUES_EQUAL(event.GetMessages().size(), 1); + UNIT_ASSERT_VALUES_EQUAL(event.GetMessages()[0].GetData(), TStringBuilder() << "message" << time); + if (time == 2) { + calledPromise.SetValue(); + } + }); + setup.SuccessfulInit(); + TPartitionStream::TPtr stream = setup.CreatePartitionStream(); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, "message1")); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(2) + .Batch("src_id") + .CompressMessage(2, "message2")); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .ForcefulReleasePartitionStream()); + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TPartitionStreamClosedEvent); + calledPromise.GetFuture().Wait(); + } + + Y_UNIT_TEST(PartitionStreamCallbacks) { + TReadSessionImplTestSetup setup; + bool createCalled = false; + setup.Settings.EventHandlers_.CreatePartitionStreamHandler([&](TReadSessionEvent::TCreatePartitionStreamEvent&) { + createCalled = true; + }); + auto destroyCalledPromise = NThreading::NewPromise<TReadSessionEvent::TDestroyPartitionStreamEvent>(); + auto destroyCalled = destroyCalledPromise.GetFuture(); + setup.Settings.EventHandlers_.DestroyPartitionStreamHandler([&](TReadSessionEvent::TDestroyPartitionStreamEvent& event) { + destroyCalledPromise.SetValue(std::move(event)); + }); + auto closedCalledPromise = NThreading::NewPromise<void>(); + auto closedCalled = closedCalledPromise.GetFuture(); + setup.Settings.EventHandlers_.PartitionStreamClosedHandler([&](TReadSessionEvent::TPartitionStreamClosedEvent&) { + closedCalledPromise.SetValue(); + }); + + setup.SuccessfulInit(); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .CreatePartitionStream()); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, "message1")); + + TMaybe<TReadSessionEvent::TEvent> event = setup.EventsQueue->GetEvent(true); + UNIT_ASSERT(event); + UNIT_ASSERT_EVENT_TYPE(*event, TReadSessionEvent::TDataReceivedEvent); + + UNIT_ASSERT(createCalled); + UNIT_ASSERT(!destroyCalled.HasValue()); + UNIT_ASSERT(!closedCalled.HasValue()); + + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .ReleasePartitionStream()); + destroyCalled.Wait(); + UNIT_ASSERT(!closedCalled.HasValue()); + + destroyCalled.ExtractValue().Confirm(); + closedCalled.Wait(); + } + + Y_UNIT_TEST(CommonHandler) { + bool createCalled = false; + bool dataCalled = false; + auto callPromise = NThreading::NewPromise<void>(); + TReadSessionImplTestSetup setup; + setup.Settings.EventHandlers_.CommonHandler([&](TReadSessionEvent::TEvent& event) { + if (std::holds_alternative<TReadSessionEvent::TCreatePartitionStreamEvent>(event)) { + UNIT_ASSERT(!createCalled); + createCalled = true; + } else if (std::holds_alternative<TReadSessionEvent::TDataReceivedEvent>(event)) { + UNIT_ASSERT(!dataCalled); + dataCalled = true; + } else { + UNIT_ASSERT(false); + } + if (createCalled && dataCalled) { + UNIT_ASSERT_C(!callPromise.HasValue(), "Event: " << DebugString(event)); + callPromise.SetValue(); + } + }); + setup.SuccessfulInit(); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .CreatePartitionStream()); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, "message1")); + + callPromise.GetFuture().Wait(); + } + + void SimpleDataHandlersImpl(bool withCommit, bool withGracefulRelease) { + TReadSessionImplTestSetup setup; + auto dataReceived = std::make_shared<NThreading::TPromise<void>>(NThreading::NewPromise<void>()); + auto dataReceivedFuture = dataReceived->GetFuture(); + std::shared_ptr<TMaybe<TReadSessionEvent::TDataReceivedEvent>> dataReceivedEvent = std::make_shared<TMaybe<TReadSessionEvent::TDataReceivedEvent>>(); + setup.Settings.EventHandlers_.SimpleDataHandlers([=](TReadSessionEvent::TDataReceivedEvent& event) mutable { + *dataReceivedEvent = std::move(event); + dataReceived->SetValue(); + }, withCommit, withGracefulRelease); + setup.SuccessfulInit(); + + auto commitCalled = std::make_shared<NThreading::TPromise<void>>(NThreading::NewPromise<void>()); + auto commitCalledFuture = commitCalled->GetFuture(); Mock::AllowLeak(setup.MockProcessor.Get()); - EXPECT_CALL(*setup.MockProcessor, OnCommitRequest(_)) - .WillOnce([=](){ commitCalled->SetValue(); }); - - auto destroyCalled = std::make_shared<NThreading::TPromise<void>>(NThreading::NewPromise<void>()); - auto destroyCalledFuture = destroyCalled->GetFuture(); - EXPECT_CALL(*setup.MockProcessor, OnReleasedRequest(_)) - .WillOnce([=](){ destroyCalled->SetValue(); }); - - EXPECT_CALL(*setup.MockProcessor, OnStartReadRequest(_)); - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .CreatePartitionStream()); - - UNIT_ASSERT(!dataReceivedFuture.Wait(TDuration::MilliSeconds(100))); - - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .PartitionData(1) - .Batch("src_id") - .CompressMessage(1, "msg") - .Batch("src_id") - .CompressMessage(2, "msg")); - - dataReceivedFuture.Wait(); - UNIT_ASSERT(*dataReceivedEvent); - UNIT_ASSERT_VALUES_EQUAL((*dataReceivedEvent)->GetMessages().size(), 2); - - if (withCommit) { - commitCalledFuture.Wait(); - } else { - UNIT_ASSERT(!commitCalledFuture.Wait(TDuration::MilliSeconds(100))); - } - - setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() - .ReleasePartitionStream()); - - if (!withCommit && withGracefulRelease) { - UNIT_ASSERT(!destroyCalledFuture.Wait(TDuration::MilliSeconds(100))); - + EXPECT_CALL(*setup.MockProcessor, OnCommitRequest(_)) + .WillOnce([=](){ commitCalled->SetValue(); }); + + auto destroyCalled = std::make_shared<NThreading::TPromise<void>>(NThreading::NewPromise<void>()); + auto destroyCalledFuture = destroyCalled->GetFuture(); + EXPECT_CALL(*setup.MockProcessor, OnReleasedRequest(_)) + .WillOnce([=](){ destroyCalled->SetValue(); }); + + EXPECT_CALL(*setup.MockProcessor, OnStartReadRequest(_)); + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .CreatePartitionStream()); + + UNIT_ASSERT(!dataReceivedFuture.Wait(TDuration::MilliSeconds(100))); + + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .PartitionData(1) + .Batch("src_id") + .CompressMessage(1, "msg") + .Batch("src_id") + .CompressMessage(2, "msg")); + + dataReceivedFuture.Wait(); + UNIT_ASSERT(*dataReceivedEvent); + UNIT_ASSERT_VALUES_EQUAL((*dataReceivedEvent)->GetMessages().size(), 2); + + if (withCommit) { + commitCalledFuture.Wait(); + } else { + UNIT_ASSERT(!commitCalledFuture.Wait(TDuration::MilliSeconds(100))); + } + + setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() + .ReleasePartitionStream()); + + if (!withCommit && withGracefulRelease) { + UNIT_ASSERT(!destroyCalledFuture.Wait(TDuration::MilliSeconds(100))); + (*dataReceivedEvent)->Commit(); - UNIT_ASSERT(!destroyCalledFuture.Wait(TDuration::MilliSeconds(100))); - } - - if (withCommit) { - commitCalledFuture.Wait(); + UNIT_ASSERT(!destroyCalledFuture.Wait(TDuration::MilliSeconds(100))); + } + + if (withCommit) { + commitCalledFuture.Wait(); } if (withCommit || withGracefulRelease) { setup.MockProcessor->AddServerResponse(TMockReadSessionProcessor::TServerReadInfo() .CommitAcknowledgement(1)); - } - - destroyCalledFuture.Wait(); - - // Guarantee that all callbacks are finished. - setup.Session->Abort(); - } - - Y_UNIT_TEST(SimpleDataHandlers) { - SimpleDataHandlersImpl(false, false); - } - - Y_UNIT_TEST(SimpleDataHandlersWithCommit) { - SimpleDataHandlersImpl(true, false); - } - - Y_UNIT_TEST(SimpleDataHandlersWithGracefulRelease) { - SimpleDataHandlersImpl(false, true); - } - - Y_UNIT_TEST(SimpleDataHandlersWithGracefulReleaseWithCommit) { - SimpleDataHandlersImpl(true, true); - } -} + } + + destroyCalledFuture.Wait(); + + // Guarantee that all callbacks are finished. + setup.Session->Abort(); + } + + Y_UNIT_TEST(SimpleDataHandlers) { + SimpleDataHandlersImpl(false, false); + } + + Y_UNIT_TEST(SimpleDataHandlersWithCommit) { + SimpleDataHandlersImpl(true, false); + } + + Y_UNIT_TEST(SimpleDataHandlersWithGracefulRelease) { + SimpleDataHandlersImpl(false, true); + } + + Y_UNIT_TEST(SimpleDataHandlersWithGracefulReleaseWithCommit) { + SimpleDataHandlersImpl(true, true); + } +} diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/retry_policy_ut.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/retry_policy_ut.cpp index 50d050e973e..d215325217e 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/retry_policy_ut.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/retry_policy_ut.cpp @@ -66,15 +66,15 @@ Y_UNIT_TEST_SUITE(RetryPolicy) { }; - Y_UNIT_TEST(TWriteSession_RetryOnTargetCluster) { + Y_UNIT_TEST(TWriteSession_RetryOnTargetCluster) { auto setup1 = std::make_shared<TPersQueueYdbSdkTestSetup>(TEST_CASE_NAME, false); - SDKTestSetup setup2("RetryOnTargetCluster_Dc2"); + SDKTestSetup setup2("RetryOnTargetCluster_Dc2"); setup1->AddDataCenter("dc2", setup2, false); setup1->Start(); auto retryPolicy = std::make_shared<TYdbPqTestRetryPolicy>(); auto settings = setup1->GetWriteSessionSettings(); - settings.PreferredCluster("dc2"); - settings.AllowFallbackToOtherClusters(false); + settings.PreferredCluster("dc2"); + settings.AllowFallbackToOtherClusters(false); settings.RetryPolicy(retryPolicy); retryPolicy->Initialized(); @@ -229,8 +229,8 @@ Y_UNIT_TEST_SUITE(RetryPolicy) { Cerr << "===Enable dc1\n"; setup1->EnableDataCenter("dc1"); auto CheckSeqNo = [&] (const TString& dcName, ui64 expectedSeqNo) { - settings.PreferredCluster(dcName); - settings.AllowFallbackToOtherClusters(false); + settings.PreferredCluster(dcName); + settings.AllowFallbackToOtherClusters(false); settings.RetryPolicy(nullptr); //switch to default policy; auto writer = client.CreateWriteSession(settings); auto seqNo = writer->GetInitSeqNo().GetValueSync(); diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils.h index 7fb219757b3..203370d9b66 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils.h @@ -12,7 +12,7 @@ namespace NYdb::NPersQueue::NTests { class TPersQueueYdbSdkTestSetup : public ::NPersQueue::SDKTestSetup { THolder<NYdb::TDriver> Driver; - THolder<NYdb::NPersQueue::TPersQueueClient> PersQueueClient; + THolder<NYdb::NPersQueue::TPersQueueClient> PersQueueClient; TAdaptiveLock Lock; public: @@ -20,17 +20,17 @@ public: : SDKTestSetup(testCaseName, start) {} - ~TPersQueueYdbSdkTestSetup() { - if (PersQueueClient) { - PersQueueClient = nullptr; - } - - if (Driver) { - Driver->Stop(true); - Driver = nullptr; - } - } - + ~TPersQueueYdbSdkTestSetup() { + if (PersQueueClient) { + PersQueueClient = nullptr; + } + + if (Driver) { + Driver->Stop(true); + Driver = nullptr; + } + } + NYdb::TDriver& GetDriver() { if (!Driver) { NYdb::TDriverConfig cfg; @@ -42,10 +42,10 @@ public: return *Driver; } - NYdb::NPersQueue::TPersQueueClient& GetPersQueueClient() { + NYdb::NPersQueue::TPersQueueClient& GetPersQueueClient() { with_lock(Lock) { if (!PersQueueClient) { - PersQueueClient = MakeHolder<NYdb::NPersQueue::TPersQueueClient>(GetDriver()); + PersQueueClient = MakeHolder<NYdb::NPersQueue::TPersQueueClient>(GetDriver()); } return *PersQueueClient; } @@ -76,15 +76,15 @@ public: TYDBClientEventLoop( std::shared_ptr<TPersQueueYdbSdkTestSetup> setup, IRetryPolicy::TPtr retryPolicy = nullptr, - IExecutor::TPtr compressExecutor = nullptr, - const TString& preferredCluster = TString(), + IExecutor::TPtr compressExecutor = nullptr, + const TString& preferredCluster = TString(), const TString& sourceId = TString() ) : IClientEventLoop() , Setup(setup) { Log = Setup->GetLog(); - Thread = std::make_unique<TThread>([setup, retryPolicy, compressExecutor, preferredCluster, sourceId, this]() { + Thread = std::make_unique<TThread>([setup, retryPolicy, compressExecutor, preferredCluster, sourceId, this]() { auto writerConfig = Setup->GetWriteSessionSettings(); writerConfig.MaxMemoryUsage(100_MB); if (!sourceId.empty()) { @@ -94,8 +94,8 @@ public: writerConfig.RetryPolicy(retryPolicy); if (compressExecutor != nullptr) writerConfig.CompressionExecutor(compressExecutor); - if (preferredCluster) - writerConfig.PreferredCluster(preferredCluster); + if (preferredCluster) + writerConfig.PreferredCluster(preferredCluster); auto writer = setup->GetPersQueueClient().CreateWriteSession(writerConfig); TMaybe<TContinuationToken> continueToken; @@ -172,7 +172,7 @@ struct TYdbPqTestRetryState : IRetryState { , Delay(delay) {} - TMaybe<TDuration> GetNextRetryDelay(const TStatus&) override { + TMaybe<TDuration> GetNextRetryDelay(const TStatus&) override { Cerr << "Test retry state: get retry delay\n"; RetryDone(); return Delay; @@ -187,7 +187,7 @@ struct TYdbPqTestRetryState : IRetryState { }; struct TYdbPqNoRetryState : IRetryState { TAtomic DelayCalled = 0; - TMaybe<TDuration> GetNextRetryDelay(const TStatus&) override { + TMaybe<TDuration> GetNextRetryDelay(const TStatus&) override { auto res = AtomicSwap(&DelayCalled, 0); UNIT_ASSERT(!res); return Nothing(); @@ -338,10 +338,10 @@ public: TasksQueue.Enqueue(std::move(f)); } - void DoStart() override { - Thread.Start(); - } - + void DoStart() override { + Thread.Start(); + } + private: std::atomic_bool Stop; TLockFreeQueue<TFunction> TasksQueue; @@ -367,7 +367,7 @@ public: TYdbPqWriterTestHelper( const TString& name, std::shared_ptr<TLockFreeQueue<ui64>> executorQueue = nullptr, - const TString& preferredCluster = TString(), + const TString& preferredCluster = TString(), std::shared_ptr<TPersQueueYdbSdkTestSetup> setup = nullptr, const TString& sourceId = TString() ) @@ -376,7 +376,7 @@ public: { if (executorQueue) CompressExecutor = MakeIntrusive<TYdbPqTestExecutor>(executorQueue); - EventLoop = std::make_unique<TYDBClientEventLoop>(Setup, Policy, CompressExecutor, preferredCluster, sourceId); + EventLoop = std::make_unique<TYDBClientEventLoop>(Setup, Policy, CompressExecutor, preferredCluster, sourceId); } NThreading::TFuture<::NPersQueue::TWriteResult> Write(bool doWait = false, const TString& message = TString()) { diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/sdk_test_setup.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/sdk_test_setup.h index 677eb8c03df..4daec9b93f7 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/sdk_test_setup.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/sdk_test_setup.h @@ -9,28 +9,28 @@ namespace NPersQueue { class SDKTestSetup { -protected: - TString TestCaseName; - - THolder<TTempFileHandle> NetDataFile; +protected: + TString TestCaseName; + + THolder<TTempFileHandle> NetDataFile; THashMap<TString, NKikimr::NPersQueueTests::TPQTestClusterInfo> DataCenters; TString LocalDC = "dc1"; - TTestServer Server = TTestServer(false /* don't start */); - - TLog Log = TLog("cerr"); - + TTestServer Server = TTestServer(false /* don't start */); + + TLog Log = TLog("cerr"); + public: - SDKTestSetup(const TString& testCaseName, bool start = true) - : TestCaseName(testCaseName) + SDKTestSetup(const TString& testCaseName, bool start = true) + : TestCaseName(testCaseName) { - InitOptions(); - if (start) { - Start(); - } - } - - void InitOptions() { - Log.SetFormatter([testCaseName = TestCaseName](ELogPriority priority, TStringBuf message) { + InitOptions(); + if (start) { + Start(); + } + } + + void InitOptions() { + Log.SetFormatter([testCaseName = TestCaseName](ELogPriority priority, TStringBuf message) { return TStringBuilder() << TInstant::Now() << " :" << testCaseName << " " << priority << ": " << message << Endl; }); Server.GrpcServerOptions.SetGRpcShutdownDeadline(TDuration::Max()); @@ -43,27 +43,27 @@ public: Server.ServerSettings.PQConfig.SetCloseClientSessionWithEnabledRemotePreferredClusterDelaySec(1); Server.ServerSettings.PQClusterDiscoveryConfig.SetEnabled(true); SetNetDataViaFile("::1/128\t" + GetLocalCluster()); - - auto seed = TInstant::Now().MicroSeconds(); - // This makes failing randomized tests (for example with NUnitTest::RandomString(size, std::rand()) calls) reproducable - Log << TLOG_INFO << "Random seed for debugging is " << seed; - std::srand(seed); - } - + + auto seed = TInstant::Now().MicroSeconds(); + // This makes failing randomized tests (for example with NUnitTest::RandomString(size, std::rand()) calls) reproducable + Log << TLOG_INFO << "Random seed for debugging is " << seed; + std::srand(seed); + } + void Start(bool waitInit = true, bool addBrokenDatacenter = false) { Server.StartServer(false); //Server.EnableLogs({NKikimrServices::PQ_WRITE_PROXY, NKikimrServices::PQ_READ_PROXY}); Server.AnnoyingClient->InitRoot(); - if (DataCenters.empty()) { - THashMap<TString, NKikimr::NPersQueueTests::TPQTestClusterInfo> dataCenters; - dataCenters.emplace("dc1", NKikimr::NPersQueueTests::TPQTestClusterInfo{TStringBuilder() << "localhost:" << Server.GrpcPort, true}); - if (addBrokenDatacenter) { - dataCenters.emplace("dc2", NKikimr::NPersQueueTests::TPQTestClusterInfo{"dc2.logbroker.yandex.net", false}); - } - Server.AnnoyingClient->InitDCs(dataCenters); - } else { + if (DataCenters.empty()) { + THashMap<TString, NKikimr::NPersQueueTests::TPQTestClusterInfo> dataCenters; + dataCenters.emplace("dc1", NKikimr::NPersQueueTests::TPQTestClusterInfo{TStringBuilder() << "localhost:" << Server.GrpcPort, true}); + if (addBrokenDatacenter) { + dataCenters.emplace("dc2", NKikimr::NPersQueueTests::TPQTestClusterInfo{"dc2.logbroker.yandex.net", false}); + } + Server.AnnoyingClient->InitDCs(dataCenters); + } else { Server.AnnoyingClient->InitDCs(DataCenters, LocalDC); - } + } Server.AnnoyingClient->InitSourceIds(); CreateTopic(GetTestTopic(), GetLocalCluster()); if (waitInit) { @@ -71,19 +71,19 @@ public: } } - TString GetTestTopic() const { + TString GetTestTopic() const { return "topic1"; } - TString GetTestClient() const { + TString GetTestClient() const { return "test-reader"; } - TString GetTestMessageGroupId() const { + TString GetTestMessageGroupId() const { return "test-message-group-id"; } - TString GetLocalCluster() const { + TString GetLocalCluster() const { return LocalDC; } @@ -92,9 +92,9 @@ public: } NGrpc::TServerOptions& GetGrpcServerOptions() { - return Server.GrpcServerOptions; - } - + return Server.GrpcServerOptions; + } + void SetNetDataViaFile(const TString& netDataTsv) { NetDataFile = MakeHolder<TTempFileHandle>("netData.tsv"); NetDataFile->Write(netDataTsv.Data(), netDataTsv.Size()); @@ -107,30 +107,30 @@ public: return Log; } - template <class TConsumerOrProducer> - void Start(const THolder<TConsumerOrProducer>& obj) { - auto startFuture = obj->Start(); - const auto& initResponse = startFuture.GetValueSync(); - UNIT_ASSERT_C(!initResponse.Response.HasError(), "Failed to start: " << initResponse.Response); - } - + template <class TConsumerOrProducer> + void Start(const THolder<TConsumerOrProducer>& obj) { + auto startFuture = obj->Start(); + const auto& initResponse = startFuture.GetValueSync(); + UNIT_ASSERT_C(!initResponse.Response.HasError(), "Failed to start: " << initResponse.Response); + } + void WriteToTopic(const TVector<TString>& data, bool compress = true) { - + auto client = NYdb::NPersQueue::TPersQueueClient(*(Server.AnnoyingClient->GetDriver())); NYdb::NPersQueue::TWriteSessionSettings settings; settings.Path(GetTestTopic()).MessageGroupId(GetTestMessageGroupId()); if (!compress) settings.Codec(NYdb::NPersQueue::ECodec::RAW); auto writer = client.CreateSimpleBlockingWriteSession(settings); - - for (const TString& d : data) { - Log << TLOG_INFO << "WriteToTopic: " << d; + + for (const TString& d : data) { + Log << TLOG_INFO << "WriteToTopic: " << d; auto res = writer->Write(d); UNIT_ASSERT(res); - } + } writer->Close(); - } - - void SetSingleDataCenter(const TString& name = "dc1") { + } + + void SetSingleDataCenter(const TString& name = "dc1") { UNIT_ASSERT( DataCenters.insert(std::make_pair( name, @@ -138,23 +138,23 @@ public: )).second ); LocalDC = name; - } - - void AddDataCenter(const TString& name, const TString& address, bool enabled = true, bool setSelfAsDc = true) { - if (DataCenters.empty() && setSelfAsDc) { - SetSingleDataCenter(); - } + } + + void AddDataCenter(const TString& name, const TString& address, bool enabled = true, bool setSelfAsDc = true) { + if (DataCenters.empty() && setSelfAsDc) { + SetSingleDataCenter(); + } NKikimr::NPersQueueTests::TPQTestClusterInfo info{ - address, + address, enabled }; UNIT_ASSERT(DataCenters.insert(std::make_pair(name, info)).second); - } - - void AddDataCenter(const TString& name, const SDKTestSetup& cluster, bool enabled = true, bool setSelfAsDc = true) { - AddDataCenter(name, TStringBuilder() << "localhost:" << cluster.Server.GrpcPort, enabled, setSelfAsDc); - } - + } + + void AddDataCenter(const TString& name, const SDKTestSetup& cluster, bool enabled = true, bool setSelfAsDc = true) { + AddDataCenter(name, TStringBuilder() << "localhost:" << cluster.Server.GrpcPort, enabled, setSelfAsDc); + } + void EnableDataCenter(const TString& name) { auto iter = DataCenters.find(name); UNIT_ASSERT(iter != DataCenters.end()); @@ -165,15 +165,15 @@ public: UNIT_ASSERT(iter != DataCenters.end()); Server.AnnoyingClient->UpdateDcEnabled(name, false); } - - void ShutdownGRpc() { - Server.ShutdownGRpc(); - } - - void EnableGRpc() { - Server.EnableGRpc(); - Server.WaitInit(GetTestTopic()); - } + + void ShutdownGRpc() { + Server.ShutdownGRpc(); + } + + void EnableGRpc() { + Server.EnableGRpc(); + Server.WaitInit(GetTestTopic()); + } void KickTablets() { for (ui32 i = 0; i < Server.CleverServer->StaticNodes() + Server.CleverServer->DynamicNodes(); i++) { @@ -189,14 +189,14 @@ public: } } - void CreateTopic(const TString& topic, const TString& cluster, size_t partitionsCount = 1) { - Server.AnnoyingClient->CreateTopic(BuildFullTopicName(topic, cluster), partitionsCount); - } - - void KillPqrb(const TString& topic, const TString& cluster) { - auto describeResult = Server.AnnoyingClient->Ls(TStringBuilder() << "/Root/PQ/" << BuildFullTopicName(topic, cluster)); - UNIT_ASSERT_C(describeResult->Record.GetPathDescription().HasPersQueueGroup(), describeResult->Record); - Server.AnnoyingClient->KillTablet(*Server.CleverServer, describeResult->Record.GetPathDescription().GetPersQueueGroup().GetBalancerTabletID()); - } + void CreateTopic(const TString& topic, const TString& cluster, size_t partitionsCount = 1) { + Server.AnnoyingClient->CreateTopic(BuildFullTopicName(topic, cluster), partitionsCount); + } + + void KillPqrb(const TString& topic, const TString& cluster) { + auto describeResult = Server.AnnoyingClient->Ls(TStringBuilder() << "/Root/PQ/" << BuildFullTopicName(topic, cluster)); + UNIT_ASSERT_C(describeResult->Record.GetPathDescription().HasPersQueueGroup(), describeResult->Record); + Server.AnnoyingClient->KillTablet(*Server.CleverServer, describeResult->Record.GetPathDescription().GetPersQueueGroup().GetBalancerTabletID()); + } }; } diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.cpp b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.cpp index 728b9911cf6..677bc5b49d1 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.cpp +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.cpp @@ -1,11 +1,11 @@ -#include "test_server.h" - -namespace NPersQueue { - -const TVector<NKikimrServices::EServiceKikimr> TTestServer::LOGGED_SERVICES = { - NKikimrServices::PQ_READ_PROXY, - NKikimrServices::PQ_WRITE_PROXY, - NKikimrServices::PQ_MIRRORER, -}; - -} // namespace NPersQueue +#include "test_server.h" + +namespace NPersQueue { + +const TVector<NKikimrServices::EServiceKikimr> TTestServer::LOGGED_SERVICES = { + NKikimrServices::PQ_READ_PROXY, + NKikimrServices::PQ_WRITE_PROXY, + NKikimrServices::PQ_MIRRORER, +}; + +} // namespace NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.h index 250ce03620c..515a17ab826 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.h @@ -1,31 +1,31 @@ -#pragma once +#pragma once #include <ydb/core/testlib/test_pq_client.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/persqueue.h> - + #include <library/cpp/grpc/server/grpc_server.h> - + #include <library/cpp/testing/unittest/registar.h> #include <library/cpp/testing/unittest/tests_data.h> - -#include <util/system/tempfile.h> - -namespace NPersQueue { - + +#include <util/system/tempfile.h> + +namespace NPersQueue { + static constexpr int DEBUG_LOG_LEVEL = 7; -class TTestServer { -public: +class TTestServer { +public: TTestServer(bool start = true, TMaybe<TSimpleSharedPtr<TPortManager>> portManager = Nothing()) : PortManager(portManager.GetOrElse(MakeSimpleShared<TPortManager>())) , Port(PortManager->GetPort(2134)) , GrpcPort(PortManager->GetPort(2135)) , ServerSettings(NKikimr::NPersQueueTests::PQSettings(Port).SetGrpcPort(GrpcPort)) , GrpcServerOptions(NGrpc::TServerOptions().SetHost("[::1]").SetPort(GrpcPort)) - { - if (start) { - StartServer(); - } - } + { + if (start) { + StartServer(); + } + } TTestServer(const NKikimr::Tests::TServerSettings& settings, bool start = true) : PortManager(MakeSimpleShared<TPortManager>()) , Port(PortManager->GetPort(2134)) @@ -38,75 +38,75 @@ public: if (start) StartServer(); } - + void StartServer(bool doClientInit = true) { PrepareNetDataFile(); - CleverServer = MakeHolder<NKikimr::Tests::TServer>(ServerSettings); - CleverServer->EnableGRpc(GrpcServerOptions); - AnnoyingClient = MakeHolder<NKikimr::NPersQueueTests::TFlatMsgBusPQClient>(ServerSettings, GrpcPort); - EnableLogs(LOGGED_SERVICES); + CleverServer = MakeHolder<NKikimr::Tests::TServer>(ServerSettings); + CleverServer->EnableGRpc(GrpcServerOptions); + AnnoyingClient = MakeHolder<NKikimr::NPersQueueTests::TFlatMsgBusPQClient>(ServerSettings, GrpcPort); + EnableLogs(LOGGED_SERVICES); if (doClientInit) { AnnoyingClient->FullInit(); } - } - - void ShutdownGRpc() { - CleverServer->ShutdownGRpc(); - } - - void EnableGRpc() { - CleverServer->EnableGRpc(GrpcServerOptions); - } - - void ShutdownServer() { - CleverServer = nullptr; - } - - void RestartServer() { - ShutdownServer(); - StartServer(); - } - - void EnableLogs(const TVector<NKikimrServices::EServiceKikimr> services, + } + + void ShutdownGRpc() { + CleverServer->ShutdownGRpc(); + } + + void EnableGRpc() { + CleverServer->EnableGRpc(GrpcServerOptions); + } + + void ShutdownServer() { + CleverServer = nullptr; + } + + void RestartServer() { + ShutdownServer(); + StartServer(); + } + + void EnableLogs(const TVector<NKikimrServices::EServiceKikimr> services, NActors::NLog::EPriority prio = NActors::NLog::PRI_DEBUG) { Y_VERIFY(CleverServer != nullptr, "Start server before enabling logs"); - for (auto s : services) { - CleverServer->GetRuntime()->SetLogPriority(s, prio); - } - } - - void WaitInit(const TString& topic) { + for (auto s : services) { + CleverServer->GetRuntime()->SetLogPriority(s, prio); + } + } + + void WaitInit(const TString& topic) { AnnoyingClient->WaitTopicInit(topic); - } - + } + bool PrepareNetDataFile(const TString& content = "::1/128\tdc1") { if (NetDataFile) return false; - NetDataFile = MakeHolder<TTempFileHandle>("netData.tsv"); - NetDataFile->Write(content.Data(), content.Size()); - NetDataFile->FlushData(); - ServerSettings.NetClassifierConfig.SetNetDataFilePath(NetDataFile->Name()); + NetDataFile = MakeHolder<TTempFileHandle>("netData.tsv"); + NetDataFile->Write(content.Data(), content.Size()); + NetDataFile->FlushData(); + ServerSettings.NetClassifierConfig.SetNetDataFilePath(NetDataFile->Name()); return true; - } - + } + void UpdateDC(const TString& name, bool local, bool enabled) { AnnoyingClient->UpdateDC(name, local, enabled); } -public: +public: TSimpleSharedPtr<TPortManager> PortManager; - ui16 Port; - ui16 GrpcPort; - - THolder<NKikimr::Tests::TServer> CleverServer; - NKikimr::Tests::TServerSettings ServerSettings; + ui16 Port; + ui16 GrpcPort; + + THolder<NKikimr::Tests::TServer> CleverServer; + NKikimr::Tests::TServerSettings ServerSettings; NGrpc::TServerOptions GrpcServerOptions; - THolder<TTempFileHandle> NetDataFile; - - THolder<NKikimr::NPersQueueTests::TFlatMsgBusPQClient> AnnoyingClient; - - - static const TVector<NKikimrServices::EServiceKikimr> LOGGED_SERVICES; -}; - -} // namespace NPersQueue + THolder<TTempFileHandle> NetDataFile; + + THolder<NKikimr::NPersQueueTests::TFlatMsgBusPQClient> AnnoyingClient; + + + static const TVector<NKikimrServices::EServiceKikimr> LOGGED_SERVICES; +}; + +} // namespace NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_utils.h b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_utils.h index f5841d62773..c6ec9a0d684 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_utils.h +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_utils.h @@ -1,20 +1,20 @@ -#pragma once -#include <util/generic/ptr.h> +#pragma once +#include <util/generic/ptr.h> #include <util/generic/size_literals.h> #include <library/cpp/threading/chunk_queue/queue.h> #include <util/generic/overloaded.h> #include <library/cpp/testing/unittest/registar.h> - + #include "sdk_test_setup.h" -namespace NPersQueue { - +namespace NPersQueue { + using namespace NThreading; using namespace NYdb::NPersQueue; using namespace NKikimr; using namespace NKikimr::NPersQueueTests; - + struct TWriteResult { bool Ok = false; // No acknowledgement is expected from a writer under test @@ -71,4 +71,4 @@ public: }; -} // namespace NPersQueue +} // namespace NPersQueue diff --git a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ya.make b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ya.make index a95f5de06fe..b4d6531b204 100644 --- a/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ya.make +++ b/ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ya.make @@ -1,23 +1,23 @@ UNITTEST_FOR(ydb/public/sdk/cpp/client/ydb_persqueue_core) - -OWNER( + +OWNER( g:kikimr g:logbroker -) - -IF (SANITIZER_TYPE) - TIMEOUT(1200) - SIZE(LARGE) - TAG(ya:fat) -ELSE() - TIMEOUT(600) - SIZE(MEDIUM) -ENDIF() - -FORK_SUBTESTS() - -PEERDIR( - library/cpp/testing/gmock_in_unittest +) + +IF (SANITIZER_TYPE) + TIMEOUT(1200) + SIZE(LARGE) + TAG(ya:fat) +ELSE() + TIMEOUT(600) + SIZE(MEDIUM) +ENDIF() + +FORK_SUBTESTS() + +PEERDIR( + library/cpp/testing/gmock_in_unittest ydb/core/testlib ydb/public/lib/json_value ydb/public/lib/yson_value @@ -26,21 +26,21 @@ PEERDIR( ydb/public/sdk/cpp/client/ydb_persqueue_core/impl ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils ydb/public/sdk/cpp/client/ydb_persqueue_public/codecs -) - +) + YQL_LAST_ABI_VERSION() -SRCS( - common_ut.cpp - read_session_ut.cpp - basic_usage_ut.cpp +SRCS( + common_ut.cpp + read_session_ut.cpp + basic_usage_ut.cpp compress_executor_ut.cpp compression_ut.cpp retry_policy_ut.cpp ut_utils.cpp -) - -END() +) + +END() RECURSE_FOR_TESTS( with_offset_ranges_mode_ut diff --git a/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.cpp b/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.cpp index bea557812c6..5ab6e84497d 100644 --- a/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.cpp +++ b/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.cpp @@ -1,35 +1,35 @@ #include "rate_limiter.h" - + #define INCLUDE_YDB_INTERNAL_H #include <ydb/public/sdk/cpp/client/impl/ydb_internal/make_request/make.h> #undef INCLUDE_YDB_INTERNAL_H #include <ydb/public/api/grpc/ydb_rate_limiter_v1.grpc.pb.h> #include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h> - -namespace NYdb::NRateLimiter { - -TListResourcesResult::TListResourcesResult(TStatus status, TVector<TString> paths) - : TStatus(std::move(status)) - , ResourcePaths_(std::move(paths)) -{ -} - -TDescribeResourceResult::TDescribeResourceResult(TStatus status, const Ydb::RateLimiter::DescribeResourceResult& result) - : TStatus(std::move(status)) - , ResourcePath_(result.resource().resource_path()) - , HierarchicalDrrProps_(result.resource().hierarchical_drr()) -{ -} - -TDescribeResourceResult::THierarchicalDrrProps::THierarchicalDrrProps(const Ydb::RateLimiter::HierarchicalDrrSettings& settings) { - if (settings.max_units_per_second()) { - MaxUnitsPerSecond_ = settings.max_units_per_second(); - } - - if (settings.max_burst_size_coefficient()) { - MaxBurstSizeCoefficient_ = settings.max_burst_size_coefficient(); - } + +namespace NYdb::NRateLimiter { + +TListResourcesResult::TListResourcesResult(TStatus status, TVector<TString> paths) + : TStatus(std::move(status)) + , ResourcePaths_(std::move(paths)) +{ +} + +TDescribeResourceResult::TDescribeResourceResult(TStatus status, const Ydb::RateLimiter::DescribeResourceResult& result) + : TStatus(std::move(status)) + , ResourcePath_(result.resource().resource_path()) + , HierarchicalDrrProps_(result.resource().hierarchical_drr()) +{ +} + +TDescribeResourceResult::THierarchicalDrrProps::THierarchicalDrrProps(const Ydb::RateLimiter::HierarchicalDrrSettings& settings) { + if (settings.max_units_per_second()) { + MaxUnitsPerSecond_ = settings.max_units_per_second(); + } + + if (settings.max_burst_size_coefficient()) { + MaxBurstSizeCoefficient_ = settings.max_burst_size_coefficient(); + } if (settings.prefetch_coefficient()) { PrefetchCoefficient_ = settings.prefetch_coefficient(); @@ -38,137 +38,137 @@ TDescribeResourceResult::THierarchicalDrrProps::THierarchicalDrrProps(const Ydb: if (settings.prefetch_watermark()) { PrefetchWatermark_ = settings.prefetch_watermark(); } -} - -class TRateLimiterClient::TImpl : public TClientImplCommon<TRateLimiterClient::TImpl> { -public: - TImpl(std::shared_ptr<TGRpcConnectionsImpl> connections, const TCommonClientSettings& settings) +} + +class TRateLimiterClient::TImpl : public TClientImplCommon<TRateLimiterClient::TImpl> { +public: + TImpl(std::shared_ptr<TGRpcConnectionsImpl> connections, const TCommonClientSettings& settings) : TClientImplCommon(std::move(connections), settings) - { - } - - template <class TRequest, class TSettings> - static TRequest MakePropsCreateOrAlterRequest(const TString& coordinationNodePath, const TString& resourcePath, const TSettings& settings) { - TRequest request = MakeOperationRequest<TRequest>(settings); - request.set_coordination_node_path(coordinationNodePath); - - Ydb::RateLimiter::Resource& resource = *request.mutable_resource(); - resource.set_resource_path(resourcePath); - - Ydb::RateLimiter::HierarchicalDrrSettings& hdrr = *resource.mutable_hierarchical_drr(); - if (settings.MaxUnitsPerSecond_) { - hdrr.set_max_units_per_second(*settings.MaxUnitsPerSecond_); - } - if (settings.MaxBurstSizeCoefficient_) { - hdrr.set_max_burst_size_coefficient(*settings.MaxBurstSizeCoefficient_); - } + { + } + + template <class TRequest, class TSettings> + static TRequest MakePropsCreateOrAlterRequest(const TString& coordinationNodePath, const TString& resourcePath, const TSettings& settings) { + TRequest request = MakeOperationRequest<TRequest>(settings); + request.set_coordination_node_path(coordinationNodePath); + + Ydb::RateLimiter::Resource& resource = *request.mutable_resource(); + resource.set_resource_path(resourcePath); + + Ydb::RateLimiter::HierarchicalDrrSettings& hdrr = *resource.mutable_hierarchical_drr(); + if (settings.MaxUnitsPerSecond_) { + hdrr.set_max_units_per_second(*settings.MaxUnitsPerSecond_); + } + if (settings.MaxBurstSizeCoefficient_) { + hdrr.set_max_burst_size_coefficient(*settings.MaxBurstSizeCoefficient_); + } if (settings.PrefetchCoefficient_) { hdrr.set_prefetch_coefficient(*settings.PrefetchCoefficient_); } if (settings.PrefetchWatermark_) { hdrr.set_prefetch_watermark(*settings.PrefetchWatermark_); } - - return request; - } - - TAsyncStatus CreateResource(const TString& coordinationNodePath, const TString& resourcePath, const TCreateResourceSettings& settings) { - auto request = MakePropsCreateOrAlterRequest<Ydb::RateLimiter::CreateResourceRequest>(coordinationNodePath, resourcePath, settings); - - return RunSimple<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::CreateResourceRequest, Ydb::RateLimiter::CreateResourceResponse>( - std::move(request), - &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncCreateResource, + + return request; + } + + TAsyncStatus CreateResource(const TString& coordinationNodePath, const TString& resourcePath, const TCreateResourceSettings& settings) { + auto request = MakePropsCreateOrAlterRequest<Ydb::RateLimiter::CreateResourceRequest>(coordinationNodePath, resourcePath, settings); + + return RunSimple<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::CreateResourceRequest, Ydb::RateLimiter::CreateResourceResponse>( + std::move(request), + &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncCreateResource, TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - } - - TAsyncStatus AlterResource(const TString& coordinationNodePath, const TString& resourcePath, const TAlterResourceSettings& settings) { - auto request = MakePropsCreateOrAlterRequest<Ydb::RateLimiter::AlterResourceRequest>(coordinationNodePath, resourcePath, settings); - - return RunSimple<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::AlterResourceRequest, Ydb::RateLimiter::AlterResourceResponse>( - std::move(request), - &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncAlterResource, + settings.ClientTimeout_); + } + + TAsyncStatus AlterResource(const TString& coordinationNodePath, const TString& resourcePath, const TAlterResourceSettings& settings) { + auto request = MakePropsCreateOrAlterRequest<Ydb::RateLimiter::AlterResourceRequest>(coordinationNodePath, resourcePath, settings); + + return RunSimple<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::AlterResourceRequest, Ydb::RateLimiter::AlterResourceResponse>( + std::move(request), + &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncAlterResource, TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - } - - TAsyncStatus DropResource(const TString& coordinationNodePath, const TString& resourcePath, const TDropResourceSettings& settings) { - auto request = MakeOperationRequest<Ydb::RateLimiter::DropResourceRequest>(settings); - request.set_coordination_node_path(coordinationNodePath); - request.set_resource_path(resourcePath); - - return RunSimple<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::DropResourceRequest, Ydb::RateLimiter::DropResourceResponse>( - std::move(request), - &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncDropResource, + settings.ClientTimeout_); + } + + TAsyncStatus DropResource(const TString& coordinationNodePath, const TString& resourcePath, const TDropResourceSettings& settings) { + auto request = MakeOperationRequest<Ydb::RateLimiter::DropResourceRequest>(settings); + request.set_coordination_node_path(coordinationNodePath); + request.set_resource_path(resourcePath); + + return RunSimple<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::DropResourceRequest, Ydb::RateLimiter::DropResourceResponse>( + std::move(request), + &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncDropResource, TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - } - - TAsyncListResourcesResult ListResources(const TString& coordinationNodePath, const TString& resourcePath, const TListResourcesSettings& settings) { - auto request = MakeOperationRequest<Ydb::RateLimiter::ListResourcesRequest>(settings); - request.set_coordination_node_path(coordinationNodePath); - request.set_resource_path(resourcePath); - request.set_recursive(settings.Recursive_); - - auto promise = NThreading::NewPromise<TListResourcesResult>(); - + settings.ClientTimeout_); + } + + TAsyncListResourcesResult ListResources(const TString& coordinationNodePath, const TString& resourcePath, const TListResourcesSettings& settings) { + auto request = MakeOperationRequest<Ydb::RateLimiter::ListResourcesRequest>(settings); + request.set_coordination_node_path(coordinationNodePath); + request.set_resource_path(resourcePath); + request.set_recursive(settings.Recursive_); + + auto promise = NThreading::NewPromise<TListResourcesResult>(); + auto extractor = [promise] - (google::protobuf::Any* any, TPlainStatus status) mutable { - TVector<TString> list; - if (any) { - Ydb::RateLimiter::ListResourcesResult result; - any->UnpackTo(&result); - list.reserve(result.resource_paths_size()); - for (const TString& path : result.resource_paths()) { - list.push_back(path); - } - } - + (google::protobuf::Any* any, TPlainStatus status) mutable { + TVector<TString> list; + if (any) { + Ydb::RateLimiter::ListResourcesResult result; + any->UnpackTo(&result); + list.reserve(result.resource_paths_size()); + for (const TString& path : result.resource_paths()) { + list.push_back(path); + } + } + TListResourcesResult val(TStatus(std::move(status)), std::move(list)); - promise.SetValue(std::move(val)); - }; - - Connections_->RunDeferred<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::ListResourcesRequest, Ydb::RateLimiter::ListResourcesResponse>( - std::move(request), - extractor, - &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncListResources, - DbDriverState_, - INITIAL_DEFERRED_CALL_DELAY, + promise.SetValue(std::move(val)); + }; + + Connections_->RunDeferred<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::ListResourcesRequest, Ydb::RateLimiter::ListResourcesResponse>( + std::move(request), + extractor, + &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncListResources, + DbDriverState_, + INITIAL_DEFERRED_CALL_DELAY, TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - - return promise.GetFuture(); - } - - TAsyncDescribeResourceResult DescribeResource(const TString& coordinationNodePath, const TString& resourcePath, const TDescribeResourceSettings& settings) { - auto request = MakeOperationRequest<Ydb::RateLimiter::DescribeResourceRequest>(settings); - request.set_coordination_node_path(coordinationNodePath); - request.set_resource_path(resourcePath); - - auto promise = NThreading::NewPromise<TDescribeResourceResult>(); - + settings.ClientTimeout_); + + return promise.GetFuture(); + } + + TAsyncDescribeResourceResult DescribeResource(const TString& coordinationNodePath, const TString& resourcePath, const TDescribeResourceSettings& settings) { + auto request = MakeOperationRequest<Ydb::RateLimiter::DescribeResourceRequest>(settings); + request.set_coordination_node_path(coordinationNodePath); + request.set_resource_path(resourcePath); + + auto promise = NThreading::NewPromise<TDescribeResourceResult>(); + auto extractor = [promise] - (google::protobuf::Any* any, TPlainStatus status) mutable { - Ydb::RateLimiter::DescribeResourceResult result; - if (any) { - any->UnpackTo(&result); - } - + (google::protobuf::Any* any, TPlainStatus status) mutable { + Ydb::RateLimiter::DescribeResourceResult result; + if (any) { + any->UnpackTo(&result); + } + TDescribeResourceResult val(TStatus(std::move(status)), result); - promise.SetValue(std::move(val)); - }; - - Connections_->RunDeferred<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::DescribeResourceRequest, Ydb::RateLimiter::DescribeResourceResponse>( - std::move(request), - extractor, - &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncDescribeResource, - DbDriverState_, - INITIAL_DEFERRED_CALL_DELAY, + promise.SetValue(std::move(val)); + }; + + Connections_->RunDeferred<Ydb::RateLimiter::V1::RateLimiterService, Ydb::RateLimiter::DescribeResourceRequest, Ydb::RateLimiter::DescribeResourceResponse>( + std::move(request), + extractor, + &Ydb::RateLimiter::V1::RateLimiterService::Stub::AsyncDescribeResource, + DbDriverState_, + INITIAL_DEFERRED_CALL_DELAY, TRpcRequestSettings::Make(settings), - settings.ClientTimeout_); - - return promise.GetFuture(); - } + settings.ClientTimeout_); + + return promise.GetFuture(); + } TAsyncStatus AcquireResource(const TString& coordinationNodePath, const TString& resourcePath, const TAcquireResourceSettings& settings) { auto request = MakeOperationRequest<Ydb::RateLimiter::AcquireResourceRequest>(settings); @@ -187,35 +187,35 @@ public: TRpcRequestSettings::Make(settings), settings.ClientTimeout_); } -}; - -TRateLimiterClient::TRateLimiterClient(const TDriver& driver, const TCommonClientSettings& settings) - : Impl_(std::make_shared<TImpl>(CreateInternalInterface(driver), settings)) -{ -} - -TAsyncStatus TRateLimiterClient::CreateResource(const TString& coordinationNodePath, const TString& resourcePath, const TCreateResourceSettings& settings) { - return Impl_->CreateResource(coordinationNodePath, resourcePath, settings); -} - -TAsyncStatus TRateLimiterClient::AlterResource(const TString& coordinationNodePath, const TString& resourcePath, const TAlterResourceSettings& settings) { - return Impl_->AlterResource(coordinationNodePath, resourcePath, settings); -} - -TAsyncStatus TRateLimiterClient::DropResource(const TString& coordinationNodePath, const TString& resourcePath, const TDropResourceSettings& settings) { - return Impl_->DropResource(coordinationNodePath, resourcePath, settings); -} - -TAsyncListResourcesResult TRateLimiterClient::ListResources(const TString& coordinationNodePath, const TString& resourcePath, const TListResourcesSettings& settings) { - return Impl_->ListResources(coordinationNodePath, resourcePath, settings); -} - -TAsyncDescribeResourceResult TRateLimiterClient::DescribeResource(const TString& coordinationNodePath, const TString& resourcePath, const TDescribeResourceSettings& settings) { - return Impl_->DescribeResource(coordinationNodePath, resourcePath, settings); -} - +}; + +TRateLimiterClient::TRateLimiterClient(const TDriver& driver, const TCommonClientSettings& settings) + : Impl_(std::make_shared<TImpl>(CreateInternalInterface(driver), settings)) +{ +} + +TAsyncStatus TRateLimiterClient::CreateResource(const TString& coordinationNodePath, const TString& resourcePath, const TCreateResourceSettings& settings) { + return Impl_->CreateResource(coordinationNodePath, resourcePath, settings); +} + +TAsyncStatus TRateLimiterClient::AlterResource(const TString& coordinationNodePath, const TString& resourcePath, const TAlterResourceSettings& settings) { + return Impl_->AlterResource(coordinationNodePath, resourcePath, settings); +} + +TAsyncStatus TRateLimiterClient::DropResource(const TString& coordinationNodePath, const TString& resourcePath, const TDropResourceSettings& settings) { + return Impl_->DropResource(coordinationNodePath, resourcePath, settings); +} + +TAsyncListResourcesResult TRateLimiterClient::ListResources(const TString& coordinationNodePath, const TString& resourcePath, const TListResourcesSettings& settings) { + return Impl_->ListResources(coordinationNodePath, resourcePath, settings); +} + +TAsyncDescribeResourceResult TRateLimiterClient::DescribeResource(const TString& coordinationNodePath, const TString& resourcePath, const TDescribeResourceSettings& settings) { + return Impl_->DescribeResource(coordinationNodePath, resourcePath, settings); +} + TAsyncStatus TRateLimiterClient::AcquireResource(const TString& coordinationNodePath, const TString& resourcePath, const TAcquireResourceSettings& settings) { return Impl_->AcquireResource(coordinationNodePath, resourcePath, settings); } -} // namespace NYdb::NRateLimiter +} // namespace NYdb::NRateLimiter diff --git a/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.h b/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.h index dca90fbe639..aeb63d5f0a1 100644 --- a/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.h +++ b/ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.h @@ -1,32 +1,32 @@ -#pragma once - +#pragma once + #include <ydb/public/sdk/cpp/client/ydb_driver/driver.h> - -#include <util/generic/maybe.h> - -namespace Ydb::RateLimiter { -class DescribeResourceResult; -class HierarchicalDrrSettings; -} // namespace Ydb::RateLimiter - -namespace NYdb::NRateLimiter { - -// Settings for hierarchical deficit round robin (HDRR) algorithm. -template <class TDerived> -struct THierarchicalDrrSettings : public TOperationRequestSettings<TDerived> { - using TSelf = TDerived; - - // Resource consumption speed limit. - // Value is required for root resource. - // Must be nonnegative. - FLUENT_SETTING_OPTIONAL(double, MaxUnitsPerSecond); - - // Maximum burst size of resource consumption across the whole cluster - // divided by max_units_per_second. - // Default value is 1. - // This means that maximum burst size might be equal to max_units_per_second. - // Must be nonnegative. - FLUENT_SETTING_OPTIONAL(double, MaxBurstSizeCoefficient); + +#include <util/generic/maybe.h> + +namespace Ydb::RateLimiter { +class DescribeResourceResult; +class HierarchicalDrrSettings; +} // namespace Ydb::RateLimiter + +namespace NYdb::NRateLimiter { + +// Settings for hierarchical deficit round robin (HDRR) algorithm. +template <class TDerived> +struct THierarchicalDrrSettings : public TOperationRequestSettings<TDerived> { + using TSelf = TDerived; + + // Resource consumption speed limit. + // Value is required for root resource. + // Must be nonnegative. + FLUENT_SETTING_OPTIONAL(double, MaxUnitsPerSecond); + + // Maximum burst size of resource consumption across the whole cluster + // divided by max_units_per_second. + // Default value is 1. + // This means that maximum burst size might be equal to max_units_per_second. + // Must be nonnegative. + FLUENT_SETTING_OPTIONAL(double, MaxBurstSizeCoefficient); // Prefetch in local bucket up to PrefetchCoefficient*MaxUnitsPerSecond units (full size). // Default value is inherited from parent or 0.2 for root. @@ -42,43 +42,43 @@ struct THierarchicalDrrSettings : public TOperationRequestSettings<TDerived> { // Default value is inherited from parent or 0.75 for root. // Must be nonnegative and less than or equal to 1. FLUENT_SETTING_OPTIONAL(double, PrefetchWatermark); -}; - -// Settings for create resource request. -struct TCreateResourceSettings : public THierarchicalDrrSettings<TCreateResourceSettings> { -}; - -// Settings for alter resource request. -struct TAlterResourceSettings : public THierarchicalDrrSettings<TAlterResourceSettings> { -}; - -// Settings for drop resource request. -struct TDropResourceSettings : public TOperationRequestSettings<TDropResourceSettings> {}; - -// Settings for list resources request. -struct TListResourcesSettings : public TOperationRequestSettings<TListResourcesSettings> { - using TSelf = TListResourcesSettings; - - // List resources recursively, including children. - FLUENT_SETTING_FLAG(Recursive); -}; - -// Settings for describe resource request. -struct TDescribeResourceSettings : public TOperationRequestSettings<TDescribeResourceSettings> {}; - -// Result for list resources request. -struct TListResourcesResult : public TStatus { - TListResourcesResult(TStatus status, TVector<TString> paths); - - // Paths of listed resources inside a specified coordination node. - const TVector<TString>& GetResourcePaths() const { - return ResourcePaths_; - } - -private: - TVector<TString> ResourcePaths_; -}; - +}; + +// Settings for create resource request. +struct TCreateResourceSettings : public THierarchicalDrrSettings<TCreateResourceSettings> { +}; + +// Settings for alter resource request. +struct TAlterResourceSettings : public THierarchicalDrrSettings<TAlterResourceSettings> { +}; + +// Settings for drop resource request. +struct TDropResourceSettings : public TOperationRequestSettings<TDropResourceSettings> {}; + +// Settings for list resources request. +struct TListResourcesSettings : public TOperationRequestSettings<TListResourcesSettings> { + using TSelf = TListResourcesSettings; + + // List resources recursively, including children. + FLUENT_SETTING_FLAG(Recursive); +}; + +// Settings for describe resource request. +struct TDescribeResourceSettings : public TOperationRequestSettings<TDescribeResourceSettings> {}; + +// Result for list resources request. +struct TListResourcesResult : public TStatus { + TListResourcesResult(TStatus status, TVector<TString> paths); + + // Paths of listed resources inside a specified coordination node. + const TVector<TString>& GetResourcePaths() const { + return ResourcePaths_; + } + +private: + TVector<TString> ResourcePaths_; +}; + // Settings for acquire resource request. struct TAcquireResourceSettings : public TOperationRequestSettings<TAcquireResourceSettings> { using TSelf = TAcquireResourceSettings; @@ -87,24 +87,24 @@ struct TAcquireResourceSettings : public TOperationRequestSettings<TAcquireResou FLUENT_SETTING_FLAG(IsUsedAmount); }; -using TAsyncListResourcesResult = NThreading::TFuture<TListResourcesResult>; - -// Result for describe resource request. -struct TDescribeResourceResult : public TStatus { - struct THierarchicalDrrProps { - THierarchicalDrrProps(const Ydb::RateLimiter::HierarchicalDrrSettings&); - - // Resource consumption speed limit. - TMaybe<double> GetMaxUnitsPerSecond() const { - return MaxUnitsPerSecond_; - } - - // Maximum burst size of resource consumption across the whole cluster - // divided by max_units_per_second. - TMaybe<double> GetMaxBurstSizeCoefficient() const { - return MaxBurstSizeCoefficient_; - } - +using TAsyncListResourcesResult = NThreading::TFuture<TListResourcesResult>; + +// Result for describe resource request. +struct TDescribeResourceResult : public TStatus { + struct THierarchicalDrrProps { + THierarchicalDrrProps(const Ydb::RateLimiter::HierarchicalDrrSettings&); + + // Resource consumption speed limit. + TMaybe<double> GetMaxUnitsPerSecond() const { + return MaxUnitsPerSecond_; + } + + // Maximum burst size of resource consumption across the whole cluster + // divided by max_units_per_second. + TMaybe<double> GetMaxBurstSizeCoefficient() const { + return MaxBurstSizeCoefficient_; + } + // Prefetch in local bucket up to PrefetchCoefficient*MaxUnitsPerSecond units (full size). TMaybe<double> GetPrefetchCoefficient() const { return PrefetchCoefficient_; @@ -115,57 +115,57 @@ struct TDescribeResourceResult : public TStatus { return PrefetchWatermark_; } - private: - TMaybe<double> MaxUnitsPerSecond_; - TMaybe<double> MaxBurstSizeCoefficient_; + private: + TMaybe<double> MaxUnitsPerSecond_; + TMaybe<double> MaxBurstSizeCoefficient_; TMaybe<double> PrefetchCoefficient_; TMaybe<double> PrefetchWatermark_; - }; - - TDescribeResourceResult(TStatus status, const Ydb::RateLimiter::DescribeResourceResult& result); - - // Path of resource inside a coordination node. - const TString& GetResourcePath() const { - return ResourcePath_; - } - - const THierarchicalDrrProps& GetHierarchicalDrrProps() const { - return HierarchicalDrrProps_; - } - -private: - TString ResourcePath_; - THierarchicalDrrProps HierarchicalDrrProps_; -}; - -using TAsyncDescribeResourceResult = NThreading::TFuture<TDescribeResourceResult>; - -// Rate limiter client. -class TRateLimiterClient { -public: - TRateLimiterClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings()); - - // Create a new resource in existing coordination node. - TAsyncStatus CreateResource(const TString& coordinationNodePath, const TString& resourcePath, const TCreateResourceSettings& = {}); - - // Update a resource in coordination node. - TAsyncStatus AlterResource(const TString& coordinationNodePath, const TString& resourcePath, const TAlterResourceSettings& = {}); - - // Delete a resource from coordination node. - TAsyncStatus DropResource(const TString& coordinationNodePath, const TString& resourcePath, const TDropResourceSettings& = {}); - - // List resources in given coordination node. - TAsyncListResourcesResult ListResources(const TString& coordinationNodePath, const TString& resourcePath, const TListResourcesSettings& = {}); - - // Describe properties of resource in coordination node. - TAsyncDescribeResourceResult DescribeResource(const TString& coordinationNodePath, const TString& resourcePath, const TDescribeResourceSettings& = {}); - + }; + + TDescribeResourceResult(TStatus status, const Ydb::RateLimiter::DescribeResourceResult& result); + + // Path of resource inside a coordination node. + const TString& GetResourcePath() const { + return ResourcePath_; + } + + const THierarchicalDrrProps& GetHierarchicalDrrProps() const { + return HierarchicalDrrProps_; + } + +private: + TString ResourcePath_; + THierarchicalDrrProps HierarchicalDrrProps_; +}; + +using TAsyncDescribeResourceResult = NThreading::TFuture<TDescribeResourceResult>; + +// Rate limiter client. +class TRateLimiterClient { +public: + TRateLimiterClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings()); + + // Create a new resource in existing coordination node. + TAsyncStatus CreateResource(const TString& coordinationNodePath, const TString& resourcePath, const TCreateResourceSettings& = {}); + + // Update a resource in coordination node. + TAsyncStatus AlterResource(const TString& coordinationNodePath, const TString& resourcePath, const TAlterResourceSettings& = {}); + + // Delete a resource from coordination node. + TAsyncStatus DropResource(const TString& coordinationNodePath, const TString& resourcePath, const TDropResourceSettings& = {}); + + // List resources in given coordination node. + TAsyncListResourcesResult ListResources(const TString& coordinationNodePath, const TString& resourcePath, const TListResourcesSettings& = {}); + + // Describe properties of resource in coordination node. + TAsyncDescribeResourceResult DescribeResource(const TString& coordinationNodePath, const TString& resourcePath, const TDescribeResourceSettings& = {}); + // Acquire resources's units inside a coordination node. TAsyncStatus AcquireResource(const TString& coordinationNodePath, const TString& resourcePath, const TAcquireResourceSettings& = {}); -private: - class TImpl; - std::shared_ptr<TImpl> Impl_; -}; - -} // namespace NYdb::NRateLimiter +private: + class TImpl; + std::shared_ptr<TImpl> Impl_; +}; + +} // namespace NYdb::NRateLimiter diff --git a/ydb/public/tools/lib/cmds/__init__.py b/ydb/public/tools/lib/cmds/__init__.py index 977f0a4aae6..aabf1a53e18 100644 --- a/ydb/public/tools/lib/cmds/__init__.py +++ b/ydb/public/tools/lib/cmds/__init__.py @@ -233,18 +233,18 @@ def grpc_tls_data_path(arguments): return os.getenv('YDB_GRPC_TLS_DATA_PATH', default_store) -def enable_datastreams(arguments): - return getattr(arguments, 'enable_datastreams', False) or os.getenv('YDB_ENABLE_DATASTREAMS') == 'true' - - -def enable_pq(arguments): - return getattr(arguments, 'enable_pq', False) or enable_datastreams(arguments) - - -def enable_pqcd(arguments): - return enable_pq(arguments) and (getattr(arguments, 'enable_pqcd', False) or os.getenv('YDB_ENABLE_PQCD') == 'true') - - +def enable_datastreams(arguments): + return getattr(arguments, 'enable_datastreams', False) or os.getenv('YDB_ENABLE_DATASTREAMS') == 'true' + + +def enable_pq(arguments): + return getattr(arguments, 'enable_pq', False) or enable_datastreams(arguments) + + +def enable_pqcd(arguments): + return enable_pq(arguments) and (getattr(arguments, 'enable_pqcd', False) or os.getenv('YDB_ENABLE_PQCD') == 'true') + + def deploy(arguments): initialize_working_dir(arguments) recipe = Recipe(arguments) @@ -280,9 +280,9 @@ def deploy(arguments): output_path=recipe.generate_data_path(), pdisk_store_path=pdisk_store_path, domain_name='local', - enable_pq=enable_pq(arguments), - enable_datastreams=enable_datastreams(arguments), - enable_pqcd=enable_pqcd(arguments), + enable_pq=enable_pq(arguments), + enable_datastreams=enable_datastreams(arguments), + enable_pqcd=enable_pqcd(arguments), load_udfs=True, suppress_version_check=arguments.suppress_version_check, udfs_path=arguments.ydb_udfs_dir, diff --git a/ydb/services/auth/grpc_service.h b/ydb/services/auth/grpc_service.h index 706621df6df..5e0008804a2 100644 --- a/ydb/services/auth/grpc_service.h +++ b/ydb/services/auth/grpc_service.h @@ -25,11 +25,11 @@ namespace NGRpcService { void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/cms/grpc_service.h b/ydb/services/cms/grpc_service.h index 80f3bbe9283..fffa63d9dc4 100644 --- a/ydb/services/cms/grpc_service.h +++ b/ydb/services/cms/grpc_service.h @@ -26,11 +26,11 @@ public: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/datastreams/datastreams_proxy.cpp b/ydb/services/datastreams/datastreams_proxy.cpp index 03a6d06213b..d6012dfd39d 100644 --- a/ydb/services/datastreams/datastreams_proxy.cpp +++ b/ydb/services/datastreams/datastreams_proxy.cpp @@ -1334,8 +1334,8 @@ namespace NKikimr::NDataStreams::V1 { ui32 MaxResults = DEFAULT_MAX_RESULTS; std::map<ui64, std::pair<ui64, ui64>> StartEndOffsetsPerPartition; std::vector<NKikimrSchemeOp::TPersQueueGroupDescription::TPartition> Shards; - ui32 LeftToRead = 0; - ui32 AllShardsCount = 0; + ui32 LeftToRead = 0; + ui32 AllShardsCount = 0; std::atomic<ui32> GotOffsetResponds; std::vector<TActorId> Pipes; }; @@ -1368,7 +1368,7 @@ namespace NKikimr::NDataStreams::V1 { } MaxResults = MaxResults == 0 ? DEFAULT_MAX_RESULTS : MaxResults; - if (MaxResults > MAX_MAX_RESULTS) { + if (MaxResults > MAX_MAX_RESULTS) { return ReplyWithError(Ydb::StatusIds::BAD_REQUEST, Ydb::PersQueue::ErrorCode::BAD_REQUEST, TStringBuilder() << "Max results '" << MaxResults << "' is out of bound [" << MIN_MAX_RESULTS << "; " << diff --git a/ydb/services/datastreams/grpc_service.h b/ydb/services/datastreams/grpc_service.h index d8ed51df522..cdef9c0dd8e 100644 --- a/ydb/services/datastreams/grpc_service.h +++ b/ydb/services/datastreams/grpc_service.h @@ -24,12 +24,12 @@ namespace NKikimr::NGRpcService { void InitNewSchemeCache(); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; NActors::TActorId NewSchemeCache; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } diff --git a/ydb/services/discovery/grpc_service.h b/ydb/services/discovery/grpc_service.h index c6cb25f0fd9..c95a145df4b 100644 --- a/ydb/services/discovery/grpc_service.h +++ b/ydb/services/discovery/grpc_service.h @@ -27,11 +27,11 @@ namespace NGRpcService { void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/monitoring/grpc_service.h b/ydb/services/monitoring/grpc_service.h index 3c324424fdd..8e3b9970b4e 100644 --- a/ydb/services/monitoring/grpc_service.h +++ b/ydb/services/monitoring/grpc_service.h @@ -26,11 +26,11 @@ namespace NGRpcService { void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/persqueue_cluster_discovery/cluster_ordering/weighed_ordering.h b/ydb/services/persqueue_cluster_discovery/cluster_ordering/weighed_ordering.h index a9695507ec9..c2577b2f03d 100644 --- a/ydb/services/persqueue_cluster_discovery/cluster_ordering/weighed_ordering.h +++ b/ydb/services/persqueue_cluster_discovery/cluster_ordering/weighed_ordering.h @@ -5,7 +5,7 @@ #include <vector> #include <util/system/types.h> -#include <util/system/yassert.h> +#include <util/system/yassert.h> namespace NKikimr::NPQ::NClusterDiscovery::NClusterOrdering { @@ -26,7 +26,7 @@ Iterator SelectByHashAndWeight(Iterator begin, Iterator end, const ui64 hashValu borders.emplace_back(total, it); } - Y_VERIFY(total); + Y_VERIFY(total); const ui64 borderToFind = hashValue % total + 1; const auto bordersIt = lower_bound(borders.begin(), borders.end(), borderToFind, diff --git a/ydb/services/persqueue_cluster_discovery/grpc_service.h b/ydb/services/persqueue_cluster_discovery/grpc_service.h index 7bc17ba2f5e..9f07a08aa5d 100644 --- a/ydb/services/persqueue_cluster_discovery/grpc_service.h +++ b/ydb/services/persqueue_cluster_discovery/grpc_service.h @@ -23,7 +23,7 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; diff --git a/ydb/services/persqueue_v1/grpc_pq_actor.h b/ydb/services/persqueue_v1/grpc_pq_actor.h index 65c8aca1db2..b41a233589c 100644 --- a/ydb/services/persqueue_v1/grpc_pq_actor.h +++ b/ydb/services/persqueue_v1/grpc_pq_actor.h @@ -33,9 +33,9 @@ #include <ydb/services/lib/actors/pq_schema_actor.h> #include <ydb/services/lib/actors/type_definitions.h> -#include <util/generic/guid.h> -#include <util/system/compiler.h> - +#include <util/generic/guid.h> +#include <util/system/compiler.h> + namespace NKikimr::NGRpcProxy::V1 { using namespace Ydb; @@ -219,12 +219,12 @@ struct TEvPQProxy { }; struct TEvReadInit : public NActors::TEventLocal<TEvReadInit, EvReadInit> { - TEvReadInit(const PersQueue::V1::MigrationStreamingReadClientMessage& req, const TString& peerName) + TEvReadInit(const PersQueue::V1::MigrationStreamingReadClientMessage& req, const TString& peerName) : Request(req) , PeerName(peerName) { } - PersQueue::V1::MigrationStreamingReadClientMessage Request; + PersQueue::V1::MigrationStreamingReadClientMessage Request; TString PeerName; }; @@ -237,7 +237,7 @@ struct TEvPQProxy { , ReadTimestampMs(readTimestampMs) { } - const TString Guid; + const TString Guid; ui64 MaxCount; ui64 MaxSize; ui64 MaxTimeLagMs; @@ -282,13 +282,13 @@ struct TEvPQProxy { struct TEvReadResponse : public NActors::TEventLocal<TEvReadResponse, EvReadResponse> { explicit TEvReadResponse(PersQueue::V1::MigrationStreamingReadServerMessage&& resp, ui64 nextReadOffset, bool fromDisk, TDuration waitQuotaTime) : Response(std::move(resp)) - , NextReadOffset(nextReadOffset) + , NextReadOffset(nextReadOffset) , FromDisk(fromDisk) , WaitQuotaTime(waitQuotaTime) { } - PersQueue::V1::MigrationStreamingReadServerMessage Response; - ui64 NextReadOffset; + PersQueue::V1::MigrationStreamingReadServerMessage Response; + ui64 NextReadOffset; bool FromDisk; TDuration WaitQuotaTime; }; @@ -338,7 +338,7 @@ struct TEvPQProxy { , ReadOffset(readOffset) , CommitOffset(commitOffset) , VerifyReadOffset(verifyReadOffset) - , Generation(0) + , Generation(0) { } const TPartitionId Partition; @@ -584,7 +584,7 @@ private: // 'SourceId' is called 'MessageGroupId' since gRPC data plane API v1 TString SourceId; // TODO: Replace with 'MessageGroupId' everywhere TString EscapedSourceId; - ui32 Hash = 0; + ui32 Hash = 0; TString OwnerCookie; TString UserAgent; @@ -593,28 +593,28 @@ private: THolder<TAclWrapper> ACL; - struct TWriteRequestBatchInfo: public TSimpleRefCount<TWriteRequestBatchInfo> { - using TPtr = TIntrusivePtr<TWriteRequestBatchInfo>; - - // Source requests from user (grpc session object) - std::deque<THolder<TEvPQProxy::TEvWrite>> UserWriteRequests; - + struct TWriteRequestBatchInfo: public TSimpleRefCount<TWriteRequestBatchInfo> { + using TPtr = TIntrusivePtr<TWriteRequestBatchInfo>; + + // Source requests from user (grpc session object) + std::deque<THolder<TEvPQProxy::TEvWrite>> UserWriteRequests; + // Formed write request's size - ui64 ByteSize = 0; - + ui64 ByteSize = 0; + // Formed write request's cookie - ui64 Cookie = 0; - }; - - // Nonprocessed source client requests + ui64 Cookie = 0; + }; + + // Nonprocessed source client requests std::deque<THolder<TEvPQProxy::TEvWrite>> Writes; - // Formed, but not sent, batch requests to partition actor - std::deque<TWriteRequestBatchInfo::TPtr> FormedWrites; - - // Requests that is already sent to partition actor - std::deque<TWriteRequestBatchInfo::TPtr> SentMessages; + // Formed, but not sent, batch requests to partition actor + std::deque<TWriteRequestBatchInfo::TPtr> FormedWrites; + // Requests that is already sent to partition actor + std::deque<TWriteRequestBatchInfo::TPtr> SentMessages; + bool WritesDone; @@ -740,7 +740,7 @@ private: class TReadSessionActor : public TActorBootstrapped<TReadSessionActor> { -using IContext = NGRpcServer::IGRpcStreamingContext<PersQueue::V1::MigrationStreamingReadClientMessage, PersQueue::V1::MigrationStreamingReadServerMessage>; +using IContext = NGRpcServer::IGRpcStreamingContext<PersQueue::V1::MigrationStreamingReadClientMessage, PersQueue::V1::MigrationStreamingReadServerMessage>; public: TReadSessionActor(NKikimr::NGRpcService::TEvStreamPQReadRequest* request, const ui64 cookie, const NActors::TActorId& schemeCache, const NActors::TActorId& newSchemeCache, @@ -783,12 +783,12 @@ private: HFunc(TEvPQProxy::TEvAuthResultOk, Handle); // form auth actor HFunc(TEvPQProxy::TEvDieCommand, HandlePoison) - HFunc(TEvPQProxy::TEvReadInit, Handle) //from gRPC + HFunc(TEvPQProxy::TEvReadInit, Handle) //from gRPC HFunc(TEvPQProxy::TEvReadSessionStatus, Handle) // from read sessions info builder proxy - HFunc(TEvPQProxy::TEvRead, Handle) //from gRPC - HFunc(TEvPQProxy::TEvDone, Handle) //from gRPC - HFunc(TEvPQProxy::TEvCloseSession, Handle) //from partitionActor - HFunc(TEvPQProxy::TEvPartitionReady, Handle) //from partitionActor + HFunc(TEvPQProxy::TEvRead, Handle) //from gRPC + HFunc(TEvPQProxy::TEvDone, Handle) //from gRPC + HFunc(TEvPQProxy::TEvCloseSession, Handle) //from partitionActor + HFunc(TEvPQProxy::TEvPartitionReady, Handle) //from partitionActor HFunc(TEvPQProxy::TEvPartitionReleased, Handle) //from partitionActor HFunc(TEvPQProxy::TEvReadResponse, Handle) //from partitionActor @@ -863,7 +863,7 @@ private: const TString& folderId); void ProcessReads(const NActors::TActorContext& ctx); // returns false if actor died - struct TFormedReadResponse; + struct TFormedReadResponse; void ProcessAnswer(const NActors::TActorContext& ctx, TIntrusivePtr<TFormedReadResponse> formedResponse); // returns false if actor died void RegisterSessions(const NActors::TActorContext& ctx); @@ -880,9 +880,9 @@ private: void InformBalancerAboutRelease(const THashMap<ui64, TPartitionActorInfo>::iterator& it, const TActorContext& ctx); - static ui32 NormalizeMaxReadMessagesCount(ui32 sourceValue); - static ui32 NormalizeMaxReadSize(ui32 sourceValue); - + static ui32 NormalizeMaxReadMessagesCount(ui32 sourceValue); + static ui32 NormalizeMaxReadSize(ui32 sourceValue); + private: std::unique_ptr<NKikimr::NGRpcService::TEvStreamPQReadRequest> Request; @@ -907,10 +907,10 @@ private: bool InitDone; bool RangesMode = false; - ui32 MaxReadMessagesCount; - ui32 MaxReadSize; - ui32 MaxTimeLagMs; - ui64 ReadTimestampMs; + ui32 MaxReadMessagesCount; + ui32 MaxReadSize; + ui32 MaxTimeLagMs; + ui64 ReadTimestampMs; TString Auth; @@ -922,10 +922,10 @@ private: TActorId Actor; const TPartitionId Partition; std::deque<ui64> Commits; - bool Reading; + bool Reading; bool Releasing; bool Released; - bool LockSent; + bool LockSent; bool ReleaseSent; ui64 ReadIdToResponse; @@ -940,10 +940,10 @@ private: TPartitionActorInfo(const TActorId& actor, const TPartitionId& partition, const TActorContext& ctx) : Actor(actor) , Partition(partition) - , Reading(false) + , Reading(false) , Releasing(false) , Released(false) - , LockSent(false) + , LockSent(false) , ReleaseSent(false) , ReadIdToResponse(1) , ReadIdCommitted(0) @@ -980,43 +980,43 @@ private: }; TSet<TPartitionInfo> AvailablePartitions; - - struct TFormedReadResponse: public TSimpleRefCount<TFormedReadResponse> { - using TPtr = TIntrusivePtr<TFormedReadResponse>; - + + struct TFormedReadResponse: public TSimpleRefCount<TFormedReadResponse> { + using TPtr = TIntrusivePtr<TFormedReadResponse>; + TFormedReadResponse(const TString& guid, const TInstant start) - : Guid(guid) + : Guid(guid) , Start(start) , FromDisk(false) - { - } - - PersQueue::V1::MigrationStreamingReadServerMessage Response; + { + } + + PersQueue::V1::MigrationStreamingReadServerMessage Response; ui32 RequestsInfly = 0; i64 ByteSize = 0; ui64 RequestedBytes = 0; - //returns byteSize diff - i64 ApplyResponse(PersQueue::V1::MigrationStreamingReadServerMessage&& resp); + //returns byteSize diff + i64 ApplyResponse(PersQueue::V1::MigrationStreamingReadServerMessage&& resp); THashSet<TActorId> PartitionsTookPartInRead; TSet<TPartitionId> PartitionsTookPartInControlMessages; + + TSet<TPartitionInfo> PartitionsBecameAvailable; // Partitions that became available during this read request execution. - TSet<TPartitionInfo> PartitionsBecameAvailable; // Partitions that became available during this read request execution. - - // These partitions are bringed back to AvailablePartitions after reply to this read request. - - const TString Guid; + // These partitions are bringed back to AvailablePartitions after reply to this read request. + + const TString Guid; TInstant Start; bool FromDisk; TDuration WaitQuotaTime; }; THashMap<TActorId, TFormedReadResponse::TPtr> PartitionToReadResponse; // Partition actor -> TFormedReadResponse answer that has this partition. - // PartitionsTookPartInRead in formed read response contain this actor id. + // PartitionsTookPartInRead in formed read response contain this actor id. struct TControlMessages { - TVector<PersQueue::V1::MigrationStreamingReadServerMessage> ControlMessages; + TVector<PersQueue::V1::MigrationStreamingReadServerMessage> ControlMessages; ui32 Infly = 0; }; diff --git a/ydb/services/persqueue_v1/grpc_pq_read.cpp b/ydb/services/persqueue_v1/grpc_pq_read.cpp index 6b7e72fdd0c..b86d1081504 100644 --- a/ydb/services/persqueue_v1/grpc_pq_read.cpp +++ b/ydb/services/persqueue_v1/grpc_pq_read.cpp @@ -98,8 +98,8 @@ void TPQReadService::Handle(TEvPQProxy::TEvSessionDead::TPtr& ev, const TActorCo } -MigrationStreamingReadServerMessage FillReadResponse(const TString& errorReason, const PersQueue::ErrorCode::ErrorCode code) { - MigrationStreamingReadServerMessage res; +MigrationStreamingReadServerMessage FillReadResponse(const TString& errorReason, const PersQueue::ErrorCode::ErrorCode code) { + MigrationStreamingReadServerMessage res; FillIssue(res.add_issues(), code, errorReason); res.set_status(ConvertPersQueueInternalCodeToStatus(code)); return res; diff --git a/ydb/services/persqueue_v1/grpc_pq_read.h b/ydb/services/persqueue_v1/grpc_pq_read.h index 558063ed131..cd438d54bb7 100644 --- a/ydb/services/persqueue_v1/grpc_pq_read.h +++ b/ydb/services/persqueue_v1/grpc_pq_read.h @@ -78,7 +78,7 @@ private: std::unique_ptr<NPersQueue::TTopicsListController> TopicsHandler; bool HaveClusters; }; - + } } diff --git a/ydb/services/persqueue_v1/grpc_pq_read_actor.cpp b/ydb/services/persqueue_v1/grpc_pq_read_actor.cpp index 654edfcfcd4..3040baf2e60 100644 --- a/ydb/services/persqueue_v1/grpc_pq_read_actor.cpp +++ b/ydb/services/persqueue_v1/grpc_pq_read_actor.cpp @@ -20,8 +20,8 @@ #include <util/string/strip.h> #include <util/charset/utf8.h> -#include <algorithm> - +#include <algorithm> + using namespace NActors; using namespace NKikimrClient; @@ -64,7 +64,7 @@ static const ui64 MAX_READ_SIZE = 100 << 20; //100mb; static const ui32 MAX_COMMITS_INFLY = 3; -static const double LAG_GROW_MULTIPLIER = 1.2; //assume that 20% more data arrived to partitions +static const double LAG_GROW_MULTIPLIER = 1.2; //assume that 20% more data arrived to partitions //TODO: add here tracking of bytes in/out @@ -91,11 +91,11 @@ struct TOffsetInfo { }; -bool RemoveEmptyMessages(MigrationStreamingReadServerMessage::DataBatch& data) { - auto batchRemover = [&](MigrationStreamingReadServerMessage::DataBatch::Batch& batch) -> bool { +bool RemoveEmptyMessages(MigrationStreamingReadServerMessage::DataBatch& data) { + auto batchRemover = [&](MigrationStreamingReadServerMessage::DataBatch::Batch& batch) -> bool { return batch.message_data_size() == 0; }; - auto partitionDataRemover = [&](MigrationStreamingReadServerMessage::DataBatch::PartitionData& partition) -> bool { + auto partitionDataRemover = [&](MigrationStreamingReadServerMessage::DataBatch::PartitionData& partition) -> bool { NProtoBuf::RemoveRepeatedFieldItemIf(partition.mutable_batches(), batchRemover); return partition.batches_size() == 0; }; @@ -211,8 +211,8 @@ private: ui64 EndOffset; ui64 SizeLag; - TString ReadGuid; // empty if not reading - + TString ReadGuid; // empty if not reading + bool NeedRelease; bool Released; @@ -253,10 +253,10 @@ TReadSessionActor::TReadSessionActor( , CommitsDisabled(false) , BalancersInitStarted(false) , InitDone(false) - , MaxReadMessagesCount(0) - , MaxReadSize(0) - , MaxTimeLagMs(0) - , ReadTimestampMs(0) + , MaxReadMessagesCount(0) + , MaxReadSize(0) + , MaxTimeLagMs(0) + , ReadTimestampMs(0) , ForceACLCheck(false) , RequestNotChecked(true) , LastACLCheckTimestamp(TInstant::Zero()) @@ -333,11 +333,11 @@ void TReadSessionActor::Handle(IContext::TEvReadFinished::TPtr& ev, const TActor return TPartitionId{converter, partition, assignId}; }; switch (request.request_case()) { - case MigrationStreamingReadClientMessage::kInitRequest: { + case MigrationStreamingReadClientMessage::kInitRequest: { ctx.Send(ctx.SelfID, new TEvPQProxy::TEvReadInit(request, Request->GetStreamCtx()->GetPeerName())); break; } - case MigrationStreamingReadClientMessage::kStatus: { + case MigrationStreamingReadClientMessage::kStatus: { //const auto& req = request.status(); ctx.Send(ctx.SelfID, new TEvPQProxy::TEvGetStatus(MakePartitionId(request.status()))); if (!Request->GetStreamCtx()->Read()) { @@ -348,11 +348,11 @@ void TReadSessionActor::Handle(IContext::TEvReadFinished::TPtr& ev, const TActor break; } - case MigrationStreamingReadClientMessage::kRead: { + case MigrationStreamingReadClientMessage::kRead: { ctx.Send(ctx.SelfID, new TEvPQProxy::TEvRead()); // Proto read message have no parameters break; } - case MigrationStreamingReadClientMessage::kReleased: { + case MigrationStreamingReadClientMessage::kReleased: { //const auto& req = request.released(); ctx.Send(ctx.SelfID, new TEvPQProxy::TEvReleased(MakePartitionId(request.released()))); if (!Request->GetStreamCtx()->Read()) { @@ -363,7 +363,7 @@ void TReadSessionActor::Handle(IContext::TEvReadFinished::TPtr& ev, const TActor break; } - case MigrationStreamingReadClientMessage::kStartRead: { + case MigrationStreamingReadClientMessage::kStartRead: { const auto& req = request.start_read(); const ui64 readOffset = req.read_offset(); @@ -378,7 +378,7 @@ void TReadSessionActor::Handle(IContext::TEvReadFinished::TPtr& ev, const TActor } break; } - case MigrationStreamingReadClientMessage::kCommit: { + case MigrationStreamingReadClientMessage::kCommit: { const auto& req = request.commit(); if (!req.cookies_size() && !RangesMode) { @@ -630,7 +630,7 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvCommitDone::TPtr& ev, const TActor if (ev->Get()->StartCookie == Max<ui64>()) //means commit at start return; - MigrationStreamingReadServerMessage result; + MigrationStreamingReadServerMessage result; result.set_status(Ydb::StatusIds::SUCCESS); if (!RangesMode) { for (ui64 i = ev->Get()->StartCookie; i <= ev->Get()->LastCookie; ++i) { @@ -691,45 +691,45 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvReadInit::TPtr& ev, const TActorCo return; } - const auto& init = event->Request.init_request(); + const auto& init = event->Request.init_request(); - if (!init.topics_read_settings_size()) { + if (!init.topics_read_settings_size()) { CloseSession("no topics in init request", PersQueue::ErrorCode::BAD_REQUEST, ctx); return; } - if (init.consumer().empty()) { + if (init.consumer().empty()) { CloseSession("no consumer in init request", PersQueue::ErrorCode::BAD_REQUEST, ctx); return; } ClientId = NPersQueue::ConvertNewConsumerName(init.consumer(), ctx); - ClientPath = init.consumer(); + ClientPath = init.consumer(); TStringBuilder session; session << ClientPath << "_" << ctx.SelfID.NodeId() << "_" << Cookie << "_" << TAppData::RandomProvider->GenRand64() << "_v1"; Session = session; - CommitsDisabled = false; + CommitsDisabled = false; RangesMode = init.ranges_mode(); MaxReadMessagesCount = NormalizeMaxReadMessagesCount(init.read_params().max_read_messages_count()); MaxReadSize = NormalizeMaxReadSize(init.read_params().max_read_size()); - if (init.max_lag_duration_ms() < 0) { - CloseSession("max_lag_duration_ms must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); - return; - } - MaxTimeLagMs = init.max_lag_duration_ms(); - if (init.start_from_written_at_ms() < 0) { - CloseSession("start_from_written_at_ms must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); - return; - } - ReadTimestampMs = static_cast<ui64>(init.start_from_written_at_ms()); - + if (init.max_lag_duration_ms() < 0) { + CloseSession("max_lag_duration_ms must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); + return; + } + MaxTimeLagMs = init.max_lag_duration_ms(); + if (init.start_from_written_at_ms() < 0) { + CloseSession("start_from_written_at_ms must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); + return; + } + ReadTimestampMs = static_cast<ui64>(init.start_from_written_at_ms()); + PeerName = event->PeerName; ReadOnlyLocal = init.read_only_original(); - for (const auto& topic : init.topics_read_settings()) { + for (const auto& topic : init.topics_read_settings()) { auto converter = TopicsHandler.GetConverterFactory()->MakeTopicNameConverter( topic.topic(), TString(), Request->GetDatabaseName().GetOrElse(TString()) ); @@ -738,20 +738,20 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvReadInit::TPtr& ev, const TActorCo CloseSession("empty topic in init request", PersQueue::ErrorCode::BAD_REQUEST, ctx); return; } - if (topic.start_from_written_at_ms() < 0) { - CloseSession("start_from_written_at_ms must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); - return; - } + if (topic.start_from_written_at_ms() < 0) { + CloseSession("start_from_written_at_ms must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); + return; + } TopicsToResolve.insert(topicName); - for (i64 pg : topic.partition_group_ids()) { - if (pg < 0) { - CloseSession("partition group id must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); - return; - } - if (pg > Max<ui32>()) { - CloseSession(TStringBuilder() << "partition group id " << pg << " is too big for partition group id", PersQueue::ErrorCode::BAD_REQUEST, ctx); - return; - } + for (i64 pg : topic.partition_group_ids()) { + if (pg < 0) { + CloseSession("partition group id must be nonnegative number", PersQueue::ErrorCode::BAD_REQUEST, ctx); + return; + } + if (pg > Max<ui32>()) { + CloseSession(TStringBuilder() << "partition group id " << pg << " is too big for partition group id", PersQueue::ErrorCode::BAD_REQUEST, ctx); + return; + } TopicGroups[topicName].push_back(static_cast<ui32>(pg)); } ReadFromTimestamp[topicName] = topic.start_from_written_at_ms(); @@ -915,10 +915,10 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvAuthResultOk::TPtr& ev, const TAct SLIBigLatency.Inc(); } - MigrationStreamingReadServerMessage result; + MigrationStreamingReadServerMessage result; result.set_status(Ydb::StatusIds::SUCCESS); - result.mutable_init_response()->set_session_id(Session); + result.mutable_init_response()->set_session_id(Session); if (!WriteResponse(std::move(result))) { LOG_INFO_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " grpc write failed"); Die(ctx); @@ -1034,7 +1034,7 @@ void TReadSessionActor::Handle(TEvPersQueue::TEvLockPartition::TPtr& ev, const T LOG_INFO_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " Assign: " << record); - ctx.Send(actorId, new TEvPQProxy::TEvLockPartition(0, 0, false, false)); + ctx.Send(actorId, new TEvPQProxy::TEvLockPartition(0, 0, false, false)); } void TReadSessionActor::Handle(TEvPQProxy::TEvPartitionStatus::TPtr& ev, const TActorContext& ctx) { @@ -1051,7 +1051,7 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvPartitionStatus::TPtr& ev, const T it->second.LockSent = true; it->second.Offset = ev->Get()->Offset; - MigrationStreamingReadServerMessage result; + MigrationStreamingReadServerMessage result; result.set_status(Ydb::StatusIds::SUCCESS); result.mutable_assigned()->mutable_topic()->set_path(ev->Get()->Partition.TopicConverter->GetModernName()); @@ -1080,7 +1080,7 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvPartitionStatus::TPtr& ev, const T } else { Y_VERIFY(it->second.LockSent); - MigrationStreamingReadServerMessage result; + MigrationStreamingReadServerMessage result; result.set_status(Ydb::StatusIds::SUCCESS); result.mutable_partition_status()->mutable_topic()->set_path(ev->Get()->Partition.TopicConverter->GetModernName()); @@ -1115,7 +1115,7 @@ void TReadSessionActor::Handle(TEvPersQueue::TEvError::TPtr& ev, const TActorCon void TReadSessionActor::SendReleaseSignalToClient(const THashMap<ui64, TPartitionActorInfo>::iterator& it, bool kill, const TActorContext& ctx) { - MigrationStreamingReadServerMessage result; + MigrationStreamingReadServerMessage result; result.set_status(Ydb::StatusIds::SUCCESS); result.mutable_release()->mutable_topic()->set_path(it->second.Partition.TopicConverter->GetModernName()); @@ -1190,10 +1190,10 @@ void TReadSessionActor::Handle(TEvPersQueue::TEvReleasePartition::TPtr& ev, cons LOG_INFO_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " releasing " << jt->second.Partition); jt->second.Releasing = true; - if (!jt->second.LockSent) { //no lock yet - can release silently - ReleasePartition(jt, true, ctx); + if (!jt->second.LockSent) { //no lock yet - can release silently + ReleasePartition(jt, true, ctx); } else { - SendReleaseSignalToClient(jt, false, ctx); + SendReleaseSignalToClient(jt, false, ctx); } } } @@ -1246,7 +1246,7 @@ void TReadSessionActor::CloseSession(const TString& errorReason, const PersQueue ++(*GetServiceCounters(Counters, "pqproxy|readSession")->GetCounter("Errors", true)); } - MigrationStreamingReadServerMessage result; + MigrationStreamingReadServerMessage result; result.set_status(ConvertPersQueueInternalCodeToStatus(errorCode)); FillIssue(result.add_issues(), errorCode, errorReason); @@ -1283,11 +1283,11 @@ void TReadSessionActor::Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, cons CloseSession(TStringBuilder() << "unable to connect to one of topics, tablet " << msg->TabletId, PersQueue::ErrorCode::ERROR, ctx); return; -#if 0 - const bool isAlive = ProcessBalancerDead(msg->TabletId, ctx); // returns false if actor died - Y_UNUSED(isAlive); +#if 0 + const bool isAlive = ProcessBalancerDead(msg->TabletId, ctx); // returns false if actor died + Y_UNUSED(isAlive); return; -#endif +#endif } } @@ -1309,15 +1309,15 @@ void TReadSessionActor::ReleasePartition(const THashMap<ui64, TPartitionActorInf } } - Y_VERIFY(couldBeReads || !it->second.Reading); + Y_VERIFY(couldBeReads || !it->second.Reading); //process reads - TFormedReadResponse::TPtr formedResponseToAnswer; - if (it->second.Reading) { - const auto readIt = PartitionToReadResponse.find(it->second.Actor); - Y_VERIFY(readIt != PartitionToReadResponse.end()); - if (--readIt->second->RequestsInfly == 0) { - formedResponseToAnswer = readIt->second; - } + TFormedReadResponse::TPtr formedResponseToAnswer; + if (it->second.Reading) { + const auto readIt = PartitionToReadResponse.find(it->second.Actor); + Y_VERIFY(readIt != PartitionToReadResponse.end()); + if (--readIt->second->RequestsInfly == 0) { + formedResponseToAnswer = readIt->second; + } } InformBalancerAboutRelease(it, ctx); @@ -1325,13 +1325,13 @@ void TReadSessionActor::ReleasePartition(const THashMap<ui64, TPartitionActorInf it->second.Released = true; //to force drop DropPartition(it, ctx); //partition will be dropped - if (formedResponseToAnswer) { + if (formedResponseToAnswer) { ProcessAnswer(ctx, formedResponseToAnswer); // returns false if actor died - } + } } -bool TReadSessionActor::ProcessBalancerDead(const ui64 tablet, const TActorContext& ctx) { +bool TReadSessionActor::ProcessBalancerDead(const ui64 tablet, const TActorContext& ctx) { for (auto& t : Topics) { if (t.second.TabletID == tablet) { LOG_INFO_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " balancer for topic " << t.first << " is dead, restarting all from this topic"); @@ -1342,9 +1342,9 @@ bool TReadSessionActor::ProcessBalancerDead(const ui64 tablet, const TActorConte // kill actor auto jt = it; ++it; - if (jt->second.LockSent) { + if (jt->second.LockSent) { SendReleaseSignalToClient(jt, true, ctx); - } + } ReleasePartition(jt, true, ctx); } else { ++it; @@ -1364,13 +1364,13 @@ bool TReadSessionActor::ProcessBalancerDead(const ui64 tablet, const TActorConte } } } - return true; + return true; } void TReadSessionActor::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) { - const bool isAlive = ProcessBalancerDead(ev->Get()->TabletId, ctx); // returns false if actor died - Y_UNUSED(isAlive); + const bool isAlive = ProcessBalancerDead(ev->Get()->TabletId, ctx); // returns false if actor died + Y_UNUSED(isAlive); } void TReadSessionActor::Handle(NGRpcService::TGRpcRequestProxy::TEvRefreshTokenResponse::TPtr &ev , const TActorContext& ctx) { @@ -1410,7 +1410,7 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvRead::TPtr& ev, const TActorContex } -i64 TReadSessionActor::TFormedReadResponse::ApplyResponse(MigrationStreamingReadServerMessage&& resp) { +i64 TReadSessionActor::TFormedReadResponse::ApplyResponse(MigrationStreamingReadServerMessage&& resp) { Y_VERIFY(resp.data_batch().partition_data_size() == 1); Response.set_status(Ydb::StatusIds::SUCCESS); @@ -1422,7 +1422,7 @@ i64 TReadSessionActor::TFormedReadResponse::ApplyResponse(MigrationStreamingRead void TReadSessionActor::Handle(TEvPQProxy::TEvReadResponse::TPtr& ev, const TActorContext& ctx) { TActorId sender = ev->Sender; - if (!ActualPartitionActor(sender)) + if (!ActualPartitionActor(sender)) return; THolder<TEvPQProxy::TEvReadResponse> event(ev->Release()); @@ -1432,34 +1432,34 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvReadResponse::TPtr& ev, const TAct Y_VERIFY(partitionCookie != 0); // cookie is assigned const ui64 assignId = event->Response.data_batch().partition_data(0).cookie().assign_id(); const auto partitionIt = Partitions.find(assignId); - Y_VERIFY(partitionIt != Partitions.end()); - Y_VERIFY(partitionIt->second.Reading); - partitionIt->second.Reading = false; + Y_VERIFY(partitionIt != Partitions.end()); + Y_VERIFY(partitionIt->second.Reading); + partitionIt->second.Reading = false; partitionIt->second.ReadIdToResponse = partitionCookie + 1; - auto it = PartitionToReadResponse.find(sender); - Y_VERIFY(it != PartitionToReadResponse.end()); - - TFormedReadResponse::TPtr formedResponse = it->second; - - LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " read done guid " << formedResponse->Guid + auto it = PartitionToReadResponse.find(sender); + Y_VERIFY(it != PartitionToReadResponse.end()); + + TFormedReadResponse::TPtr formedResponse = it->second; + + LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " read done guid " << formedResponse->Guid << partitionIt->second.Partition << " size " << event->Response.ByteSize()); - const i64 diff = formedResponse->ApplyResponse(std::move(event->Response)); + const i64 diff = formedResponse->ApplyResponse(std::move(event->Response)); if (event->FromDisk) { formedResponse->FromDisk = true; } formedResponse->WaitQuotaTime = Max(formedResponse->WaitQuotaTime, event->WaitQuotaTime); - --formedResponse->RequestsInfly; + --formedResponse->RequestsInfly; BytesInflight_ += diff; (*BytesInflight) += diff; - if (formedResponse->RequestsInfly == 0) { + if (formedResponse->RequestsInfly == 0) { ProcessAnswer(ctx, formedResponse); - } + } } bool TReadSessionActor::WriteResponse(PersQueue::V1::MigrationStreamingReadServerMessage&& response, bool finish) { @@ -1482,10 +1482,10 @@ void TReadSessionActor::ProcessAnswer(const TActorContext& ctx, TFormedReadRespo SLIBigReadLatency.Inc(); } - Y_VERIFY(formedResponse->RequestsInfly == 0); - const ui64 diff = formedResponse->Response.ByteSize(); + Y_VERIFY(formedResponse->RequestsInfly == 0); + const ui64 diff = formedResponse->Response.ByteSize(); const bool hasMessages = RemoveEmptyMessages(*formedResponse->Response.mutable_data_batch()); - if (hasMessages) { + if (hasMessages) { LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " response to read " << formedResponse->Guid); if (!WriteResponse(std::move(formedResponse->Response))) { @@ -1493,12 +1493,12 @@ void TReadSessionActor::ProcessAnswer(const TActorContext& ctx, TFormedReadRespo Die(ctx); return; } - } else { - LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " empty read result " << formedResponse->Guid << ", start new reading"); - } + } else { + LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " empty read result " << formedResponse->Guid << ", start new reading"); + } - BytesInflight_ -= diff; - (*BytesInflight) -= diff; + BytesInflight_ -= diff; + (*BytesInflight) -= diff; for (auto& pp : formedResponse->PartitionsTookPartInControlMessages) { auto it = PartitionToControlMessages.find(pp); @@ -1513,26 +1513,26 @@ void TReadSessionActor::ProcessAnswer(const TActorContext& ctx, TFormedReadRespo } PartitionToControlMessages.erase(it); } - } + } for (const TActorId& p : formedResponse->PartitionsTookPartInRead) { - PartitionToReadResponse.erase(p); - } - + PartitionToReadResponse.erase(p); + } + RequestedBytes -= formedResponse->RequestedBytes; ReadsInfly--; - // Bring back available partitions. - // If some partition was removed from partitions container, it is not bad because it will be checked during read processing. - AvailablePartitions.insert(formedResponse->PartitionsBecameAvailable.begin(), formedResponse->PartitionsBecameAvailable.end()); - + // Bring back available partitions. + // If some partition was removed from partitions container, it is not bad because it will be checked during read processing. + AvailablePartitions.insert(formedResponse->PartitionsBecameAvailable.begin(), formedResponse->PartitionsBecameAvailable.end()); + if (!hasMessages) { - // process new read - MigrationStreamingReadClientMessage req; + // process new read + MigrationStreamingReadClientMessage req; req.mutable_read(); Reads.emplace_back(new TEvPQProxy::TEvRead(formedResponse->Guid)); // Start new reading request with the same guid - } - + } + ProcessReads(ctx); // returns false if actor died } @@ -1540,26 +1540,26 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvCloseSession::TPtr& ev, const TAct CloseSession(ev->Get()->Reason, ev->Get()->ErrorCode, ctx); } -ui32 TReadSessionActor::NormalizeMaxReadMessagesCount(ui32 sourceValue) { - ui32 count = Min<ui32>(sourceValue, Max<i32>()); - if (count == 0) { +ui32 TReadSessionActor::NormalizeMaxReadMessagesCount(ui32 sourceValue) { + ui32 count = Min<ui32>(sourceValue, Max<i32>()); + if (count == 0) { count = Max<i32>(); - } - return count; -} + } + return count; +} -ui32 TReadSessionActor::NormalizeMaxReadSize(ui32 sourceValue) { +ui32 TReadSessionActor::NormalizeMaxReadSize(ui32 sourceValue) { ui32 size = Min<ui32>(sourceValue, MAX_READ_SIZE); - if (size == 0) { - size = MAX_READ_SIZE; - } - return size; -} - + if (size == 0) { + size = MAX_READ_SIZE; + } + return size; +} + void TReadSessionActor::ProcessReads(const TActorContext& ctx) { while (!Reads.empty() && BytesInflight_ + RequestedBytes < MAX_INFLY_BYTES && ReadsInfly < MAX_INFLY_READS) { - ui32 count = MaxReadMessagesCount; - ui64 size = MaxReadSize; + ui32 count = MaxReadMessagesCount; + ui64 size = MaxReadSize; ui32 partitionsAsked = 0; TFormedReadResponse::TPtr formedResponse = new TFormedReadResponse(Reads.front()->Guid, ctx.Now()); @@ -1574,7 +1574,7 @@ void TReadSessionActor::ProcessReads(const TActorContext& ctx) { //add this partition to reading ++partitionsAsked; - const ui32 ccount = Min<ui32>(part.MsgLag * LAG_GROW_MULTIPLIER, count); + const ui32 ccount = Min<ui32>(part.MsgLag * LAG_GROW_MULTIPLIER, count); count -= ccount; const ui64 csize = (ui64)Min<double>(part.SizeLag * LAG_GROW_MULTIPLIER, size); size -= csize; @@ -1589,12 +1589,12 @@ void TReadSessionActor::ProcessReads(const TActorContext& ctx) { LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " performing read request with guid " << read->Guid << " from " << it->second.Partition << " count " << ccount << " size " << csize - << " partitionsAsked " << partitionsAsked << " maxTimeLag " << MaxTimeLagMs << "ms"); + << " partitionsAsked " << partitionsAsked << " maxTimeLag " << MaxTimeLagMs << "ms"); - Y_VERIFY(!it->second.Reading); - it->second.Reading = true; - formedResponse->PartitionsTookPartInRead.insert(it->second.Actor); + Y_VERIFY(!it->second.Reading); + it->second.Reading = true; + formedResponse->PartitionsTookPartInRead.insert(it->second.Actor); auto pp = it->second.Partition; pp.AssignId = 0; PartitionToControlMessages[pp].Infly++; @@ -1605,22 +1605,22 @@ void TReadSessionActor::ProcessReads(const TActorContext& ctx) { formedResponse->RequestedBytes += csize; ctx.Send(it->second.Actor, read.Release()); - const auto insertResult = PartitionToReadResponse.insert(std::make_pair(it->second.Actor, formedResponse)); - Y_VERIFY(insertResult.second); + const auto insertResult = PartitionToReadResponse.insert(std::make_pair(it->second.Actor, formedResponse)); + Y_VERIFY(insertResult.second); - if (count == 0 || size == 0) + if (count == 0 || size == 0) break; } if (partitionsAsked == 0) break; ReadsTotal.Inc(); - formedResponse->RequestsInfly = partitionsAsked; + formedResponse->RequestsInfly = partitionsAsked; ReadsInfly++; - i64 diff = formedResponse->Response.ByteSize(); + i64 diff = formedResponse->Response.ByteSize(); BytesInflight_ += diff; - formedResponse->ByteSize = diff; + formedResponse->ByteSize = diff; (*BytesInflight) += diff; Reads.pop_front(); } @@ -1637,8 +1637,8 @@ void TReadSessionActor::Handle(TEvPQProxy::TEvPartitionReady::TPtr& ev, const TA << ev->Get()->ReadOffset << " endOffset " << ev->Get()->EndOffset << " WTime " << ev->Get()->WTime << " sizeLag " << ev->Get()->SizeLag); - const auto it = PartitionToReadResponse.find(ev->Sender); // check whether this partition is taking part in read response - auto& container = it != PartitionToReadResponse.end() ? it->second->PartitionsBecameAvailable : AvailablePartitions; + const auto it = PartitionToReadResponse.find(ev->Sender); // check whether this partition is taking part in read response + auto& container = it != PartitionToReadResponse.end() ? it->second->PartitionsBecameAvailable : AvailablePartitions; auto res = container.insert(TPartitionInfo{ev->Get()->Partition.AssignId, ev->Get()->WTime, ev->Get()->SizeLag, ev->Get()->EndOffset - ev->Get()->ReadOffset}); Y_VERIFY(res.second); @@ -1900,7 +1900,7 @@ void TPartitionActor::Handle(const TEvPQProxy::TEvRestartPipe::TPtr&, const TAct } } -bool FillBatchedData(MigrationStreamingReadServerMessage::DataBatch * data, const NKikimrClient::TCmdReadResult& res, const TPartitionId& Partition, ui64 ReadIdToResponse, ui64& ReadOffset, ui64& WTime, ui64 EndOffset, const TActorContext& ctx) { +bool FillBatchedData(MigrationStreamingReadServerMessage::DataBatch * data, const NKikimrClient::TCmdReadResult& res, const TPartitionId& Partition, ui64 ReadIdToResponse, ui64& ReadOffset, ui64& WTime, ui64 EndOffset, const TActorContext& ctx) { auto* partitionData = data->add_partition_data(); partitionData->mutable_topic()->set_path(Partition.TopicConverter->GetModernName()); @@ -1913,7 +1913,7 @@ bool FillBatchedData(MigrationStreamingReadServerMessage::DataBatch * data, cons bool hasOffset = false; bool hasData = false; - MigrationStreamingReadServerMessage::DataBatch::Batch* currentBatch = nullptr; + MigrationStreamingReadServerMessage::DataBatch::Batch* currentBatch = nullptr; for (ui32 i = 0; i < res.ResultSize(); ++i) { const auto& r = res.GetResult(i); WTime = r.GetWriteTimestampMS(); @@ -2007,8 +2007,8 @@ bool FillBatchedData(MigrationStreamingReadServerMessage::DataBatch * data, cons void TPartitionActor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorContext& ctx) { if (ev->Get()->Record.HasErrorCode() && ev->Get()->Record.GetErrorCode() != NPersQueue::NErrorCode::OK) { - const auto errorCode = ev->Get()->Record.GetErrorCode(); - if (errorCode == NPersQueue::NErrorCode::WRONG_COOKIE || errorCode == NPersQueue::NErrorCode::BAD_REQUEST) { + const auto errorCode = ev->Get()->Record.GetErrorCode(); + if (errorCode == NPersQueue::NErrorCode::WRONG_COOKIE || errorCode == NPersQueue::NErrorCode::BAD_REQUEST) { Counters.Errors.Inc(); ctx.Send(ParentId, new TEvPQProxy::TEvCloseSession("status is not ok: " + ev->Get()->Record.GetErrorReason(), ConvertOldCode(ev->Get()->Record.GetErrorCode()))); } else { @@ -2058,7 +2058,7 @@ void TPartitionActor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorCo SizeLag = resp.GetSizeLag(); WriteTimestampEstimateMs = resp.GetWriteTimestampEstimateMS(); - ClientCommitOffset = ReadOffset = CommittedOffset = resp.HasOffset() ? resp.GetOffset() : 0; + ClientCommitOffset = ReadOffset = CommittedOffset = resp.HasOffset() ? resp.GetOffset() : 0; Y_VERIFY(EndOffset >= CommittedOffset); if (resp.HasWriteTimestampMS()) @@ -2121,7 +2121,7 @@ void TPartitionActor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorCo if (result.GetCookie() != (ui64)ReadOffset) { LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " " << Partition - << " unwaited read-response with cookie " << result.GetCookie() << "; waiting for " << ReadOffset << "; current read guid is " << ReadGuid); + << " unwaited read-response with cookie " << result.GetCookie() << "; waiting for " << ReadOffset << "; current read guid is " << ReadGuid); return; } @@ -2129,7 +2129,7 @@ void TPartitionActor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorCo EndOffset = res.GetMaxOffset(); SizeLag = res.GetSizeLag(); - MigrationStreamingReadServerMessage response; + MigrationStreamingReadServerMessage response; response.set_status(Ydb::StatusIds::SUCCESS); auto* data = response.mutable_data_batch(); @@ -2165,7 +2165,7 @@ void TPartitionActor::Handle(TEvPersQueue::TEvResponse::TPtr& ev, const TActorCo LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " after read state " << Partition << " EndOffset " << EndOffset << " ReadOffset " << ReadOffset << " ReadGuid " << ReadGuid << " has messages " << hasData); - ReadGuid = TString(); + ReadGuid = TString(); auto readResponse = MakeHolder<TEvPQProxy::TEvReadResponse>( std::move(response), ReadOffset, @@ -2429,10 +2429,10 @@ void TPartitionActor::Handle(TEvPQProxy::TEvRead::TPtr& ev, const TActorContext& Y_VERIFY(!NeedRelease); Y_VERIFY(!Released); - Y_VERIFY(ReadGuid.empty()); + Y_VERIFY(ReadGuid.empty()); Y_VERIFY(!RequestInfly); - ReadGuid = ev->Get()->Guid; + ReadGuid = ev->Get()->Guid; const auto req = ev->Get(); diff --git a/ydb/services/persqueue_v1/grpc_pq_write.cpp b/ydb/services/persqueue_v1/grpc_pq_write.cpp index 5f17a2f3aba..b50d3691c5d 100644 --- a/ydb/services/persqueue_v1/grpc_pq_write.cpp +++ b/ydb/services/persqueue_v1/grpc_pq_write.cpp @@ -181,12 +181,12 @@ void TPQWriteService::Handle(NKikimr::NGRpcService::TEvStreamPQWriteRequest::TPt if (HaveClusters && localCluster.empty()) { ev->Get()->GetStreamCtx()->Attach(ctx.SelfID); if (LocalCluster) { - LOG_INFO_S(ctx, NKikimrServices::PQ_WRITE_PROXY, "new grpc connection failed - cluster disabled"); - ev->Get()->GetStreamCtx()->WriteAndFinish(FillWriteResponse("cluster disabled", PersQueue::ErrorCode::CLUSTER_DISABLED), grpc::Status::OK); //CANCELLED - } else { - LOG_INFO_S(ctx, NKikimrServices::PQ_WRITE_PROXY, "new grpc connection failed - initializing"); - ev->Get()->GetStreamCtx()->WriteAndFinish(FillWriteResponse("initializing", PersQueue::ErrorCode::INITIALIZING), grpc::Status::OK); //CANCELLED - } + LOG_INFO_S(ctx, NKikimrServices::PQ_WRITE_PROXY, "new grpc connection failed - cluster disabled"); + ev->Get()->GetStreamCtx()->WriteAndFinish(FillWriteResponse("cluster disabled", PersQueue::ErrorCode::CLUSTER_DISABLED), grpc::Status::OK); //CANCELLED + } else { + LOG_INFO_S(ctx, NKikimrServices::PQ_WRITE_PROXY, "new grpc connection failed - initializing"); + ev->Get()->GetStreamCtx()->WriteAndFinish(FillWriteResponse("initializing", PersQueue::ErrorCode::INITIALIZING), grpc::Status::OK); //CANCELLED + } return; } else { TopicsHandler = std::make_unique<NPersQueue::TTopicsListController>( diff --git a/ydb/services/persqueue_v1/grpc_pq_write_actor.cpp b/ydb/services/persqueue_v1/grpc_pq_write_actor.cpp index 3038cc82a61..21651a1cc4d 100644 --- a/ydb/services/persqueue_v1/grpc_pq_write_actor.cpp +++ b/ydb/services/persqueue_v1/grpc_pq_write_actor.cpp @@ -23,18 +23,18 @@ using namespace NKikimrClient; namespace NKikimr { using namespace NSchemeCache; - -Ydb::PersQueue::V1::Codec CodecByName(const TString& codec) { - static const THashMap<TString, Ydb::PersQueue::V1::Codec> codecsByName = { - { "raw", Ydb::PersQueue::V1::CODEC_RAW }, - { "gzip", Ydb::PersQueue::V1::CODEC_GZIP }, - { "lzop", Ydb::PersQueue::V1::CODEC_LZOP }, - { "zstd", Ydb::PersQueue::V1::CODEC_ZSTD }, - }; - auto codecIt = codecsByName.find(codec); - return codecIt != codecsByName.end() ? codecIt->second : Ydb::PersQueue::V1::CODEC_UNSPECIFIED; -} - + +Ydb::PersQueue::V1::Codec CodecByName(const TString& codec) { + static const THashMap<TString, Ydb::PersQueue::V1::Codec> codecsByName = { + { "raw", Ydb::PersQueue::V1::CODEC_RAW }, + { "gzip", Ydb::PersQueue::V1::CODEC_GZIP }, + { "lzop", Ydb::PersQueue::V1::CODEC_LZOP }, + { "zstd", Ydb::PersQueue::V1::CODEC_ZSTD }, + }; + auto codecIt = codecsByName.find(codec); + return codecIt != codecsByName.end() ? codecIt->second : Ydb::PersQueue::V1::CODEC_UNSPECIFIED; +} + template <> void FillExtraFieldsForDataChunk( const Ydb::PersQueue::V1::StreamingWriteClientMessage::InitRequest& init, @@ -248,7 +248,7 @@ void TWriteSessionActor::CheckFinish(const TActorContext& ctx) { CloseSession("out of order Writes done before initialization", PersQueue::ErrorCode::BAD_REQUEST, ctx); return; } - if (Writes.empty() && FormedWrites.empty() && SentMessages.empty()) { + if (Writes.empty() && FormedWrites.empty() && SentMessages.empty()) { CloseSession("", PersQueue::ErrorCode::OK, ctx); return; } @@ -870,7 +870,7 @@ void TWriteSessionActor::Handle(NPQ::TEvPartitionWriter::TEvWriteResponse::TPtr& batchWriteResponse->add_sequence_numbers(res.GetSeqNo()); batchWriteResponse->add_offsets(res.GetOffset()); batchWriteResponse->add_already_written(res.GetAlreadyWritten()); - + stat->set_queued_in_partition_duration_ms( Max((i64)res.GetTotalTimeInPartitionQueueMs(), stat->queued_in_partition_duration_ms())); stat->set_throttled_on_partition_duration_ms( @@ -939,14 +939,14 @@ void TWriteSessionActor::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, con } void TWriteSessionActor::GenerateNextWriteRequest(const TActorContext& ctx) { - TWriteRequestBatchInfo::TPtr writeRequest = new TWriteRequestBatchInfo(); + TWriteRequestBatchInfo::TPtr writeRequest = new TWriteRequestBatchInfo(); auto ev = MakeHolder<NPQ::TEvPartitionWriter::TEvWriteRequest>(++NextRequestCookie); NKikimrClient::TPersQueueRequest& request = ev->Record; - writeRequest->UserWriteRequests = std::move(Writes); - Writes.clear(); - + writeRequest->UserWriteRequests = std::move(Writes); + Writes.clear(); + i64 diff = 0; auto addData = [&](const StreamingWriteClientMessage::WriteRequest& writeRequest, const i32 messageIndex) { auto w = request.MutablePartitionRequest()->AddCmdWrite(); @@ -956,14 +956,14 @@ void TWriteSessionActor::GenerateNextWriteRequest(const TActorContext& ctx) { w->SetCreateTimeMS(writeRequest.created_at_ms(messageIndex)); w->SetUncompressedSize(writeRequest.blocks_uncompressed_sizes(messageIndex)); w->SetClientDC(ClientDC); - }; + }; - for (const auto& write : writeRequest->UserWriteRequests) { - diff -= write->Request.ByteSize(); + for (const auto& write : writeRequest->UserWriteRequests) { + diff -= write->Request.ByteSize(); const auto& writeRequest = write->Request.write_request(); for (i32 messageIndex = 0; messageIndex != writeRequest.sequence_numbers_size(); ++messageIndex) { addData(writeRequest, messageIndex); - } + } } writeRequest->Cookie = request.GetPartitionRequest().GetCookie(); @@ -975,8 +975,8 @@ void TWriteSessionActor::GenerateNextWriteRequest(const TActorContext& ctx) { BytesInflight.Inc(diff); BytesInflightTotal.Inc(diff); - writeRequest->ByteSize = request.ByteSize(); - FormedWrites.push_back(writeRequest); + writeRequest->ByteSize = request.ByteSize(); + FormedWrites.push_back(writeRequest); ctx.Send(Writer, std::move(ev)); ++NumReserveBytesRequests; @@ -1080,14 +1080,14 @@ void TWriteSessionActor::Handle(TEvPQProxy::TEvWrite::TPtr& ev, const TActorCont auto dataCheck = [&](const StreamingWriteClientMessage::WriteRequest& data, const i32 messageIndex) -> bool { if (data.sequence_numbers(messageIndex) <= 0) { CloseSession(TStringBuilder() << "bad write request - 'sequence_numbers' items must be greater than 0. Value at position " << messageIndex << " is " << data.sequence_numbers(messageIndex), PersQueue::ErrorCode::BAD_REQUEST, ctx); - return false; - } + return false; + } if (messageIndex > 0 && data.sequence_numbers(messageIndex) <= data.sequence_numbers(messageIndex - 1)) { CloseSession(TStringBuilder() << "bad write request - 'sequence_numbers' are unsorted. Value " << data.sequence_numbers(messageIndex) << " at position " << messageIndex << " is less than or equal to value " << data.sequence_numbers(messageIndex - 1) << " at position " << (messageIndex - 1), PersQueue::ErrorCode::BAD_REQUEST, ctx); - return false; - } + return false; + } if (data.blocks_headers(messageIndex).size() != CODEC_ID_SIZE) { CloseSession(TStringBuilder() << "bad write request - 'blocks_headers' at position " << messageIndex << " has incorrect size " << data.blocks_headers(messageIndex).size() << " [B]. Only headers of size " << CODEC_ID_SIZE << " [B] (with codec identifier) are supported in block format version 0", PersQueue::ErrorCode::BAD_REQUEST, ctx); @@ -1106,12 +1106,12 @@ void TWriteSessionActor::Handle(TEvPQProxy::TEvWrite::TPtr& ev, const TActorCont << ", only single message per block is supported by block format version 0", PersQueue::ErrorCode::BAD_REQUEST, ctx); return false; } - return true; - }; + return true; + }; for (i32 messageIndex = 0; messageIndex != messageCount; ++messageIndex) { if (!dataCheck(writeRequest, messageIndex)) { - return; - } + return; + } } THolder<TEvPQProxy::TEvWrite> event(ev->Release()); diff --git a/ydb/services/persqueue_v1/persqueue.cpp b/ydb/services/persqueue_v1/persqueue.cpp index ccacc4d43af..60a46aa88ac 100644 --- a/ydb/services/persqueue_v1/persqueue.cpp +++ b/ydb/services/persqueue_v1/persqueue.cpp @@ -91,9 +91,9 @@ void TGRpcPersQueueService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) { } { - using TBiRequest = Ydb::PersQueue::V1::MigrationStreamingReadClientMessage; + using TBiRequest = Ydb::PersQueue::V1::MigrationStreamingReadClientMessage; - using TBiResponse = Ydb::PersQueue::V1::MigrationStreamingReadServerMessage; + using TBiResponse = Ydb::PersQueue::V1::MigrationStreamingReadServerMessage; using TStreamGRpcRequest = NGRpcServer::TGRpcStreamingRequest< TBiRequest, @@ -102,7 +102,7 @@ void TGRpcPersQueueService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) { NKikimrServices::GRPC_SERVER>; - TStreamGRpcRequest::Start(this, this->GetService(), CQ, &Ydb::PersQueue::V1::PersQueueService::AsyncService::RequestMigrationStreamingRead, + TStreamGRpcRequest::Start(this, this->GetService(), CQ, &Ydb::PersQueue::V1::PersQueueService::AsyncService::RequestMigrationStreamingRead, [this](TIntrusivePtr<TStreamGRpcRequest::IContext> context) { ActorSystem->Send(GRpcRequestProxy, new NKikimr::NGRpcService::TEvStreamPQReadRequest(context)); }, @@ -150,10 +150,10 @@ void TGRpcPersQueueService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) { } -void TGRpcPersQueueService::StopService() noexcept { - TGrpcServiceBase::StopService(); -} - +void TGRpcPersQueueService::StopService() noexcept { + TGrpcServiceBase::StopService(); +} + } // V1 } // namespace NGRpcService } // namespace NKikimr diff --git a/ydb/services/persqueue_v1/persqueue.h b/ydb/services/persqueue_v1/persqueue.h index eefe1ee4c2b..ccbc43fe4da 100644 --- a/ydb/services/persqueue_v1/persqueue.h +++ b/ydb/services/persqueue_v1/persqueue.h @@ -20,7 +20,7 @@ public: void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override; void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override; - void StopService() noexcept override; + void StopService() noexcept override; using NGrpc::TGrpcServiceBase<Ydb::PersQueue::V1::PersQueueService>::GetService; @@ -33,10 +33,10 @@ private: void InitNewSchemeCacheActor(); NActors::TActorSystem* ActorSystem; - grpc::ServerCompletionQueue* CQ = nullptr; + grpc::ServerCompletionQueue* CQ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters; - NGrpc::TGlobalLimiter* Limiter = nullptr; + NGrpc::TGlobalLimiter* Limiter = nullptr; NActors::TActorId SchemeCache; NActors::TActorId NewSchemeCache; NActors::TActorId GRpcRequestProxy; diff --git a/ydb/services/persqueue_v1/persqueue_ut.cpp b/ydb/services/persqueue_v1/persqueue_ut.cpp index 3bc9b217285..9d919d02914 100644 --- a/ydb/services/persqueue_v1/persqueue_ut.cpp +++ b/ydb/services/persqueue_v1/persqueue_ut.cpp @@ -44,7 +44,7 @@ using namespace Tests; using namespace NKikimrClient; using namespace Ydb::PersQueue; using namespace Ydb::PersQueue::V1; -using namespace NThreading; +using namespace NThreading; using namespace NNetClassifier; TAutoPtr<IEventHandle> GetClassifierUpdate(TServer& server, const TActorId sender) { @@ -167,17 +167,17 @@ namespace { TPersQueueV1TestServer server; SET_LOCALS; MAKE_INSECURE_STUB; - auto readStream = StubP_->MigrationStreamingRead(&rcontext); + auto readStream = StubP_->MigrationStreamingRead(&rcontext); UNIT_ASSERT(readStream); // init read session { - MigrationStreamingReadClientMessage req; - MigrationStreamingReadServerMessage resp; + MigrationStreamingReadClientMessage req; + MigrationStreamingReadServerMessage resp; - req.mutable_init_request()->add_topics_read_settings()->set_topic("acc/topic1"); + req.mutable_init_request()->add_topics_read_settings()->set_topic("acc/topic1"); - req.mutable_init_request()->set_consumer("user"); + req.mutable_init_request()->set_consumer("user"); req.mutable_init_request()->set_read_only_original(true); req.mutable_init_request()->mutable_read_params()->set_max_read_messages_count(1); @@ -186,9 +186,9 @@ namespace { } UNIT_ASSERT(readStream->Read(&resp)); Cerr << "===Got response: " << resp.ShortDebugString() << Endl; - UNIT_ASSERT(resp.response_case() == MigrationStreamingReadServerMessage::kInitResponse); + UNIT_ASSERT(resp.response_case() == MigrationStreamingReadServerMessage::kInitResponse); //send some reads - req.Clear(); + req.Clear(); req.mutable_read(); for (ui32 i = 0; i < 10; ++i) { if (!readStream->Write(req)) { @@ -215,12 +215,12 @@ namespace { } ui64 assignId = 0; { - MigrationStreamingReadClientMessage req; - MigrationStreamingReadServerMessage resp; + MigrationStreamingReadClientMessage req; + MigrationStreamingReadServerMessage resp; //lock partition UNIT_ASSERT(readStream->Read(&resp)); - UNIT_ASSERT(resp.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(resp.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(resp.assigned().topic().path() == "acc/topic1"); UNIT_ASSERT(resp.assigned().cluster() == "dc1"); UNIT_ASSERT(resp.assigned().partition() == 0); @@ -252,11 +252,11 @@ namespace { } //check read results - MigrationStreamingReadServerMessage resp; + MigrationStreamingReadServerMessage resp; for (ui32 i = 10; i < 16; ++i) { UNIT_ASSERT(readStream->Read(&resp)); Cerr << "Got read response " << resp << "\n"; - UNIT_ASSERT_C(resp.response_case() == MigrationStreamingReadServerMessage::kDataBatch, resp); + UNIT_ASSERT_C(resp.response_case() == MigrationStreamingReadServerMessage::kDataBatch, resp); UNIT_ASSERT(resp.data_batch().partition_data_size() == 1); UNIT_ASSERT(resp.data_batch().partition_data(0).batches_size() == 1); UNIT_ASSERT(resp.data_batch().partition_data(0).batches(0).message_data_size() == 1); @@ -264,8 +264,8 @@ namespace { } //TODO: restart here readSession and read from position 10 { - MigrationStreamingReadClientMessage req; - MigrationStreamingReadServerMessage resp; + MigrationStreamingReadClientMessage req; + MigrationStreamingReadServerMessage resp; auto cookie = req.mutable_commit()->add_cookies(); cookie->set_assign_id(assignId); @@ -275,7 +275,7 @@ namespace { ythrow yexception() << "write fail"; } UNIT_ASSERT(readStream->Read(&resp)); - UNIT_ASSERT_C(resp.response_case() == MigrationStreamingReadServerMessage::kCommitted, resp); + UNIT_ASSERT_C(resp.response_case() == MigrationStreamingReadServerMessage::kCommitted, resp); } @@ -338,7 +338,7 @@ namespace { UNIT_ASSERT_VALUES_EQUAL(p, pp); writer.Write(SHORT_TOPIC_NAME, {"1", "2", "3", "4", "5"}); - + writer.Write("topic2", {"valuevaluevalue1"}, true); p = writer.InitSession("sid1", 2, true); @@ -540,20 +540,20 @@ namespace { Cerr << "===Writer - writes done\n"; grpc::ClientContext rcontext; - auto readStream = StubP_->MigrationStreamingRead(&rcontext); + auto readStream = StubP_->MigrationStreamingRead(&rcontext); UNIT_ASSERT(readStream); // init read session { - MigrationStreamingReadClientMessage req; - MigrationStreamingReadServerMessage resp; + MigrationStreamingReadClientMessage req; + MigrationStreamingReadServerMessage resp; req.mutable_init_request()->add_topics_read_settings()->set_topic(SHORT_TOPIC_NAME); - req.mutable_init_request()->set_consumer("user"); + req.mutable_init_request()->set_consumer("user"); req.mutable_init_request()->set_read_only_original(true); - req.mutable_init_request()->mutable_read_params()->set_max_read_messages_count(1000); + req.mutable_init_request()->mutable_read_params()->set_max_read_messages_count(1000); if (!readStream->Write(req)) { ythrow yexception() << "write fail"; @@ -562,7 +562,7 @@ namespace { UNIT_ASSERT(readStream->Read(&resp)); Cerr << "Read server response: " << resp.ShortDebugString() << Endl; - UNIT_ASSERT(resp.response_case() == MigrationStreamingReadServerMessage::kInitResponse); + UNIT_ASSERT(resp.response_case() == MigrationStreamingReadServerMessage::kInitResponse); //send some reads Sleep(TDuration::Seconds(5)); @@ -577,22 +577,22 @@ namespace { } //check read results - MigrationStreamingReadServerMessage resp; - for (ui32 i = 0; i < 2;) { - MigrationStreamingReadServerMessage resp; + MigrationStreamingReadServerMessage resp; + for (ui32 i = 0; i < 2;) { + MigrationStreamingReadServerMessage resp; UNIT_ASSERT(readStream->Read(&resp)); - if (resp.response_case() == MigrationStreamingReadServerMessage::kAssigned) { - auto assignId = resp.assigned().assign_id(); - MigrationStreamingReadClientMessage req; + if (resp.response_case() == MigrationStreamingReadServerMessage::kAssigned) { + auto assignId = resp.assigned().assign_id(); + MigrationStreamingReadClientMessage req; req.mutable_start_read()->mutable_topic()->set_path(SHORT_TOPIC_NAME); - req.mutable_start_read()->set_cluster("dc1"); - req.mutable_start_read()->set_assign_id(assignId); - UNIT_ASSERT(readStream->Write(req)); - continue; - } - - UNIT_ASSERT_C(resp.response_case() == MigrationStreamingReadServerMessage::kDataBatch, resp); - i += resp.data_batch().partition_data_size(); + req.mutable_start_read()->set_cluster("dc1"); + req.mutable_start_read()->set_assign_id(assignId); + UNIT_ASSERT(readStream->Write(req)); + continue; + } + + UNIT_ASSERT_C(resp.response_case() == MigrationStreamingReadServerMessage::kDataBatch, resp); + i += resp.data_batch().partition_data_size(); } } @@ -620,9 +620,9 @@ namespace { writer.Read(SHORT_TOPIC_NAME, "user1", "", false, false); } - Y_UNIT_TEST(SetupReadSession) { + Y_UNIT_TEST(SetupReadSession) { SetupReadSessionTest(); - } + } Y_UNIT_TEST(WriteExisting) { @@ -961,7 +961,7 @@ namespace { server.AnnoyingClient->DescribeTopic({thirdTopic}, true); } - void WaitResolveSuccess(TFlatMsgBusPQClient& annoyingClient, TString topic, ui32 numParts) { + void WaitResolveSuccess(TFlatMsgBusPQClient& annoyingClient, TString topic, ui32 numParts) { const TInstant start = TInstant::Now(); while (true) { TAutoPtr<NMsgBusProxy::TBusPersQueue> request(new NMsgBusProxy::TBusPersQueue); @@ -1457,7 +1457,7 @@ namespace { auto msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1474,7 +1474,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1491,7 +1491,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1508,7 +1508,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1522,7 +1522,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - if (msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kCommitted) { + if (msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kCommitted) { UNIT_ASSERT(msg.GetValue().Response.committed().cookies_size() == 1); UNIT_ASSERT(msg.GetValue().Response.committed().cookies(0).assign_id() == assignId); UNIT_ASSERT(msg.GetValue().Response.committed().cookies(0).partition_cookie() == 0); @@ -1534,7 +1534,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1549,7 +1549,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1566,7 +1566,7 @@ namespace { msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1614,7 +1614,7 @@ namespace { } /* - void TestRereadsWhenDataIsEmptyImpl(bool withWait) { + void TestRereadsWhenDataIsEmptyImpl(bool withWait) { NPersQueue::TTestServer server; server.AnnoyingClient->CreateTopic(DEFAULT_TOPIC_NAME, 1); @@ -1622,27 +1622,27 @@ namespace { TPQDataWriter writer("source", server); auto pqLib = TPQLib::WithCerrLogger(); - // Write nonempty data + // Write nonempty data NKikimr::NPersQueueTests::TRequestWritePQ writeReq(DEFAULT_TOPIC_NAME, 0, "src", 4); - - auto write = [&](const TString& data, bool empty = false) { - NKikimrPQClient::TDataChunk dataChunk; - dataChunk.SetCreateTime(42); - dataChunk.SetSeqNo(++writeReq.SeqNo); - dataChunk.SetData(data); - if (empty) { - dataChunk.SetChunkType(NKikimrPQClient::TDataChunk::GROW); // this guarantees that data will be threated as empty - } - TString serialized; - UNIT_ASSERT(dataChunk.SerializeToString(&serialized)); + + auto write = [&](const TString& data, bool empty = false) { + NKikimrPQClient::TDataChunk dataChunk; + dataChunk.SetCreateTime(42); + dataChunk.SetSeqNo(++writeReq.SeqNo); + dataChunk.SetData(data); + if (empty) { + dataChunk.SetChunkType(NKikimrPQClient::TDataChunk::GROW); // this guarantees that data will be threated as empty + } + TString serialized; + UNIT_ASSERT(dataChunk.SerializeToString(&serialized)); server.AnnoyingClient->WriteToPQ(writeReq, serialized); - }; - write("data1"); - write("data2", true); - if (!withWait) { - write("data3"); - } - + }; + write("data1"); + write("data2", true); + if (!withWait) { + write("data3"); + } + ui32 maxCount = 1; bool unpack = false; ui32 maxInflyRequests = 1; @@ -1651,47 +1651,47 @@ namespace { pqLib, server.GrpcPort, "user", {SHORT_TOPIC_NAME, {}}, maxCount, unpack, {}, maxInflyRequests, maxMemoryUsage ); - UNIT_ASSERT_C(ccResult.Response.response_case() == MigrationStreamingReadServerMessage::kInitResponse, ccResult.Response); - - auto msg1 = GetNextMessageSkipAssignment(consumer).GetValueSync().Response; - - auto assertHasData = [](const MigrationStreamingReadServerMessage& msg, const TString& data) { + UNIT_ASSERT_C(ccResult.Response.response_case() == MigrationStreamingReadServerMessage::kInitResponse, ccResult.Response); + + auto msg1 = GetNextMessageSkipAssignment(consumer).GetValueSync().Response; + + auto assertHasData = [](const MigrationStreamingReadServerMessage& msg, const TString& data) { const auto& d = msg.data_batch(); UNIT_ASSERT_VALUES_EQUAL_C(d.partition_data_size(), 1, msg); UNIT_ASSERT_VALUES_EQUAL_C(d.partition_data(0).batches_size(), 1, msg); UNIT_ASSERT_VALUES_EQUAL_C(d.partition_data(0).batches(0).message_data_size(), 1, msg); UNIT_ASSERT_VALUES_EQUAL_C(d.partition_data(0).batches(0).message_data(0).data(), data, msg); - }; + }; UNIT_ASSERT_VALUES_EQUAL_C(msg1.data_batch().partition_data(0).cookie().partition_cookie(), 1, msg1); - assertHasData(msg1, "data1"); - - auto resp2Future = consumer->GetNextMessage(); - if (withWait) { - // no data - UNIT_ASSERT(!resp2Future.HasValue()); - UNIT_ASSERT(!resp2Future.HasException()); - - // waits and data doesn't arrive - Sleep(TDuration::MilliSeconds(100)); - UNIT_ASSERT(!resp2Future.HasValue()); - UNIT_ASSERT(!resp2Future.HasException()); - - // write data - write("data3"); - } - const auto& msg2 = resp2Future.GetValueSync().Response; + assertHasData(msg1, "data1"); + + auto resp2Future = consumer->GetNextMessage(); + if (withWait) { + // no data + UNIT_ASSERT(!resp2Future.HasValue()); + UNIT_ASSERT(!resp2Future.HasException()); + + // waits and data doesn't arrive + Sleep(TDuration::MilliSeconds(100)); + UNIT_ASSERT(!resp2Future.HasValue()); + UNIT_ASSERT(!resp2Future.HasException()); + + // write data + write("data3"); + } + const auto& msg2 = resp2Future.GetValueSync().Response; UNIT_ASSERT_VALUES_EQUAL_C(msg2.data_batch().partition_data(0).cookie().partition_cookie(), 2, msg2); - assertHasData(msg2, "data3"); - } - - Y_UNIT_TEST(TestRereadsWhenDataIsEmpty) { - TestRereadsWhenDataIsEmptyImpl(false); - } - - Y_UNIT_TEST(TestRereadsWhenDataIsEmptyWithWait) { - TestRereadsWhenDataIsEmptyImpl(true); - } + assertHasData(msg2, "data3"); + } + + Y_UNIT_TEST(TestRereadsWhenDataIsEmpty) { + TestRereadsWhenDataIsEmptyImpl(false); + } + + Y_UNIT_TEST(TestRereadsWhenDataIsEmptyWithWait) { + TestRereadsWhenDataIsEmptyImpl(true); + } Y_UNIT_TEST(TestLockAfterDrop) { @@ -1714,7 +1714,7 @@ namespace { auto msg = consumer->GetNextMessage(); msg.Wait(); - UNIT_ASSERT_C(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned, msg.GetValue().Response); + UNIT_ASSERT_C(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned, msg.GetValue().Response); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.assigned().partition() == 0); @@ -1728,7 +1728,7 @@ namespace { UNIT_ASSERT(msg.Wait(TDuration::Seconds(10))); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kDataBatch); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kDataBatch); } @@ -1754,12 +1754,12 @@ namespace { ui32 maxCount = 1; bool unpack = false; auto [consumer, ccResult] = CreateConsumer(pqLib, server.GrpcPort, "user", {"aaa/bbb/ccc/topic", {}}, maxCount, unpack); - UNIT_ASSERT_C(ccResult.Response.response_case() == MigrationStreamingReadServerMessage::kInitResponse, ccResult.Response); + UNIT_ASSERT_C(ccResult.Response.response_case() == MigrationStreamingReadServerMessage::kInitResponse, ccResult.Response); auto msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); } @@ -1797,7 +1797,7 @@ namespace { auto msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); } @@ -1810,7 +1810,7 @@ namespace { msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kRelease); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kRelease); UNIT_ASSERT(msg.GetValue().Response.release().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.release().cluster() == "dc1"); @@ -1821,12 +1821,12 @@ namespace { msg2.Wait(); Cerr << msg2.GetValue().Response << "\n"; - UNIT_ASSERT(msg2.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg2.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg2.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg2.GetValue().Response.assigned().cluster() == "dc1"); } - Y_UNIT_TEST(TestSilentRelease) { + Y_UNIT_TEST(TestSilentRelease) { NPersQueue::TTestServer server; server.AnnoyingClient->CreateTopic(DEFAULT_TOPIC_NAME, 3); @@ -1860,9 +1860,9 @@ namespace { Cerr << ccResult.Response << "\n"; for (ui32 i = 1; i <= 3; ++i) { - auto msg = GetNextMessageSkipAssignment(consumer); - Cerr << msg.GetValueSync().Response << "\n"; - UNIT_ASSERT(msg.GetValueSync().Response.response_case() == MigrationStreamingReadServerMessage::kDataBatch); + auto msg = GetNextMessageSkipAssignment(consumer); + Cerr << msg.GetValueSync().Response << "\n"; + UNIT_ASSERT(msg.GetValueSync().Response.response_case() == MigrationStreamingReadServerMessage::kDataBatch); for (auto& p : msg.GetValue().Response.data_batch().partition_data()) { cookies.emplace_back(p.cookie().assign_id(), p.cookie().partition_cookie()); } @@ -1873,20 +1873,20 @@ namespace { auto msg = consumer->GetNextMessage(); auto msg2 = consumer2->GetNextMessage(); - UNIT_ASSERT(!msg2.Wait(TDuration::Seconds(1))); - consumer->Commit(cookies); + UNIT_ASSERT(!msg2.Wait(TDuration::Seconds(1))); + consumer->Commit(cookies); - if (msg.GetValueSync().Release.Initialized()) { - msg.GetValueSync().Release.SetValue(); + if (msg.GetValueSync().Release.Initialized()) { + msg.GetValueSync().Release.SetValue(); } msg2.Wait(); Cerr << msg2.GetValue().Response << "\n"; - UNIT_ASSERT(msg2.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg2.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg2.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg2.GetValue().Response.assigned().cluster() == "dc1"); - UNIT_ASSERT(msg2.GetValue().Response.assigned().read_offset() == 1); + UNIT_ASSERT(msg2.GetValue().Response.assigned().read_offset() == 1); } */ @@ -1915,7 +1915,7 @@ namespace { auto msg = consumer->GetNextMessage(); msg.Wait(); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kAssigned); UNIT_ASSERT(msg.GetValue().Response.assigned().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.assigned().cluster() == "dc1"); @@ -1929,7 +1929,7 @@ namespace { } while(!msg.Wait(TDuration::Seconds(1))); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kRelease); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kRelease); UNIT_ASSERT(msg.GetValue().Response.release().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.release().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.release().forceful_release() == false); @@ -1941,7 +1941,7 @@ namespace { UNIT_ASSERT(msg.Wait(TDuration::Seconds(1))); Cerr << msg.GetValue().Response << "\n"; - UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kRelease); + UNIT_ASSERT(msg.GetValue().Response.response_case() == MigrationStreamingReadServerMessage::kRelease); UNIT_ASSERT(msg.GetValue().Response.release().topic().path() == SHORT_TOPIC_NAME); UNIT_ASSERT(msg.GetValue().Response.release().cluster() == "dc1"); UNIT_ASSERT(msg.GetValue().Response.release().forceful_release() == true); @@ -2138,10 +2138,10 @@ namespace { auto serverMessage = setup.InitSession(session); - auto defaultSupportedCodecs = TVector<Ydb::PersQueue::V1::Codec>{ Ydb::PersQueue::V1::CODEC_RAW, Ydb::PersQueue::V1::CODEC_GZIP, Ydb::PersQueue::V1::CODEC_LZOP }; + auto defaultSupportedCodecs = TVector<Ydb::PersQueue::V1::Codec>{ Ydb::PersQueue::V1::CODEC_RAW, Ydb::PersQueue::V1::CODEC_GZIP, Ydb::PersQueue::V1::CODEC_LZOP }; auto topicSupportedCodecs = serverMessage.init_response().supported_codecs(); - UNIT_ASSERT_VALUES_EQUAL_C(defaultSupportedCodecs.size(), topicSupportedCodecs.size(), serverMessage.init_response()); - UNIT_ASSERT_C(Equal(defaultSupportedCodecs.begin(), defaultSupportedCodecs.end(), topicSupportedCodecs.begin()), serverMessage.init_response()); + UNIT_ASSERT_VALUES_EQUAL_C(defaultSupportedCodecs.size(), topicSupportedCodecs.size(), serverMessage.init_response()); + UNIT_ASSERT_C(Equal(defaultSupportedCodecs.begin(), defaultSupportedCodecs.end(), topicSupportedCodecs.begin()), serverMessage.init_response()); } Y_UNIT_TEST(Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged) { @@ -2458,8 +2458,8 @@ namespace { auto rr = props->add_read_rules(); alter(request, Ydb::StatusIds::BAD_REQUEST, false); server.AnnoyingClient->AlterTopic(); - props->add_supported_codecs(Ydb::PersQueue::V1::CODEC_RAW); - props->add_supported_codecs(Ydb::PersQueue::V1::CODEC_ZSTD); + props->add_supported_codecs(Ydb::PersQueue::V1::CODEC_RAW); + props->add_supported_codecs(Ydb::PersQueue::V1::CODEC_ZSTD); props->set_max_partition_write_speed(123); props->set_max_partition_write_burst(1234); @@ -2469,8 +2469,8 @@ namespace { rr->set_consumer_name("consumer"); rr->set_supported_format(Ydb::PersQueue::V1::TopicSettings::Format(1)); - rr->add_supported_codecs(Ydb::PersQueue::V1::CODEC_LZOP); - rr->add_supported_codecs(Ydb::PersQueue::V1::CODEC_GZIP); + rr->add_supported_codecs(Ydb::PersQueue::V1::CODEC_LZOP); + rr->add_supported_codecs(Ydb::PersQueue::V1::CODEC_GZIP); rr->set_important(true); rr->set_starting_message_timestamp_ms(111); diff --git a/ydb/services/persqueue_v1/ut/pq_data_writer.h b/ydb/services/persqueue_v1/ut/pq_data_writer.h index caef8940601..5508aa9487a 100644 --- a/ydb/services/persqueue_v1/ut/pq_data_writer.h +++ b/ydb/services/persqueue_v1/ut/pq_data_writer.h @@ -1,197 +1,197 @@ -#pragma once +#pragma once #include "test_utils.h" #include <ydb/core/testlib/test_pq_client.h> #include <ydb/public/api/grpc/draft/ydb_persqueue_v1.grpc.pb.h> #include <ydb/public/api/protos/persqueue_error_codes_v1.pb.h> #include <ydb/public/sdk/cpp/client/ydb_persqueue_core/ut/ut_utils/test_server.h> - + #include <library/cpp/testing/unittest/registar.h> - -#include <util/generic/string.h> - -namespace NKikimr::NPersQueueTests { - -class TPQDataWriter { -public: + +#include <util/generic/string.h> + +namespace NKikimr::NPersQueueTests { + +class TPQDataWriter { +public: TPQDataWriter(const TString& sourceId, NPersQueue::TTestServer& server, const TString& testTopicPath = "topic1") - : SourceId_(sourceId) + : SourceId_(sourceId) , Port_(server.GrpcPort) , Client(*server.AnnoyingClient) , Runtime(server.CleverServer->GetRuntime()) - { - InitializeChannel(); + { + InitializeChannel(); WaitWritePQServiceInitialization(testTopicPath); - } - - void Read(const TString& topic, const TString& clientId, const TString& ticket = "", bool error = false, bool checkACL = false, bool onlyCreate = false) { - Y_UNUSED(Client); - Y_UNUSED(Runtime); //TODO: use them to restart PERSQUEUE tablets - - grpc::ClientContext context; - - if (!ticket.empty()) - context.AddMetadata("x-ydb-auth-ticket", ticket); - - - auto stream = StubP_->MigrationStreamingRead(&context); - UNIT_ASSERT(stream); - - // Send initial request. - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage resp; - - req.mutable_init_request()->add_topics_read_settings()->set_topic(topic); - req.mutable_init_request()->mutable_read_params()->set_max_read_messages_count(1); + } + + void Read(const TString& topic, const TString& clientId, const TString& ticket = "", bool error = false, bool checkACL = false, bool onlyCreate = false) { + Y_UNUSED(Client); + Y_UNUSED(Runtime); //TODO: use them to restart PERSQUEUE tablets + + grpc::ClientContext context; + + if (!ticket.empty()) + context.AddMetadata("x-ydb-auth-ticket", ticket); + + + auto stream = StubP_->MigrationStreamingRead(&context); + UNIT_ASSERT(stream); + + // Send initial request. + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage resp; + + req.mutable_init_request()->add_topics_read_settings()->set_topic(topic); + req.mutable_init_request()->mutable_read_params()->set_max_read_messages_count(1); req.mutable_init_request()->set_read_only_original(true); - - req.mutable_init_request()->set_consumer(clientId); - - if (!stream->Write(req)) { - ythrow yexception() << "write fail"; - } - - Client.GetClientInfo({"rt3.dc1--topic1"}, clientId, true); - - if (!stream->Read(&resp)) { - auto status = stream->Finish(); - Cerr << (int)status.error_code() << " " << status.error_message() << "\n"; - Y_FAIL(""); - UNIT_ASSERT(error); - } + + req.mutable_init_request()->set_consumer(clientId); + + if (!stream->Write(req)) { + ythrow yexception() << "write fail"; + } + + Client.GetClientInfo({"rt3.dc1--topic1"}, clientId, true); + + if (!stream->Read(&resp)) { + auto status = stream->Finish(); + Cerr << (int)status.error_code() << " " << status.error_message() << "\n"; + Y_FAIL(""); + UNIT_ASSERT(error); + } Cerr << "=== Got response: " << resp.ShortDebugString() << Endl; - if (error) { - UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::RESPONSE_NOT_SET); - return; - } - UNIT_ASSERT_C(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kInitResponse, resp); - - - if (onlyCreate) - return; - - for (ui32 i = 0; i < 11; ++i) { - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - req.mutable_read(); - - if (!stream->Write(req)) { - ythrow yexception() << "write fail"; - } - Client.AlterTopic("rt3.dc1--topic1", i < 10 ? 2 : 3); - } - - if (checkACL) { - NACLib::TDiffACL acl; - acl.RemoveAccess(NACLib::EAccessType::Allow, NACLib::SelectRow, clientId + "@" BUILTIN_ACL_DOMAIN); - Client.ModifyACL("/Root/PQ", "rt3.dc1--topic1", acl.SerializeAsString()); - - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - req.mutable_read(); - if (!stream->Write(req)) { - ythrow yexception() << "write fail"; - } - - UNIT_ASSERT(stream->Read(&resp)); - UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::RESPONSE_NOT_SET && resp.issues(0).issue_code() == (ui32)Ydb::PersQueue::ErrorCode::ErrorCode::ACCESS_DENIED); - return; - } - Client.GetClientInfo({"rt3.dc1--topic1"}, clientId, true); - ui64 assignId = 0; - for (ui32 i = 0; i < 11;) { - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage resp; - - UNIT_ASSERT(stream->Read(&resp)); - - if (resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kAssigned) { - auto assignId = resp.assigned().assign_id(); - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - req.mutable_start_read()->mutable_topic()->set_path("topic1"); - req.mutable_start_read()->set_cluster("dc1"); - req.mutable_start_read()->set_assign_id(assignId); - UNIT_ASSERT(stream->Write(req)); - continue; - } - - UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kDataBatch); - Cerr << resp << "\n"; - - UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data_size(), 1); - UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data(0).batches_size(), 1); - UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data(0).batches(0).message_data_size(), 1); - UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data(0).batches(0).message_data(0).offset(), i); - - assignId = resp.data_batch().partition_data(0).cookie().assign_id(); - ++i; - } - //TODO: check here that read will never done UNIT_ASSERT(!stream->Read(&resp)); - { - for (ui32 i = 1; i < 11; ++i) { - Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; - - auto cookie = req.mutable_commit()->add_cookies(); - cookie->set_assign_id(assignId); - cookie->set_partition_cookie(i); - - if (!stream->Write(req)) { - ythrow yexception() << "write fail"; - } - } - ui32 i = 1; - while (i <= 10) { - Ydb::PersQueue::V1::MigrationStreamingReadServerMessage resp; - - UNIT_ASSERT(stream->Read(&resp)); - Cerr << resp << "\n"; - UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kCommitted); - UNIT_ASSERT(resp.committed().cookies_size() > 0); - for (const auto& c : resp.committed().cookies()) { - UNIT_ASSERT(c.partition_cookie() == i); - ++i; - UNIT_ASSERT(i <= 11); - } - } - Client.GetClientInfo({"rt3.dc1--topic1"}, clientId, true); - } - } - + if (error) { + UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::RESPONSE_NOT_SET); + return; + } + UNIT_ASSERT_C(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kInitResponse, resp); + + + if (onlyCreate) + return; + + for (ui32 i = 0; i < 11; ++i) { + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + req.mutable_read(); + + if (!stream->Write(req)) { + ythrow yexception() << "write fail"; + } + Client.AlterTopic("rt3.dc1--topic1", i < 10 ? 2 : 3); + } + + if (checkACL) { + NACLib::TDiffACL acl; + acl.RemoveAccess(NACLib::EAccessType::Allow, NACLib::SelectRow, clientId + "@" BUILTIN_ACL_DOMAIN); + Client.ModifyACL("/Root/PQ", "rt3.dc1--topic1", acl.SerializeAsString()); + + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + req.mutable_read(); + if (!stream->Write(req)) { + ythrow yexception() << "write fail"; + } + + UNIT_ASSERT(stream->Read(&resp)); + UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::RESPONSE_NOT_SET && resp.issues(0).issue_code() == (ui32)Ydb::PersQueue::ErrorCode::ErrorCode::ACCESS_DENIED); + return; + } + Client.GetClientInfo({"rt3.dc1--topic1"}, clientId, true); + ui64 assignId = 0; + for (ui32 i = 0; i < 11;) { + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage resp; + + UNIT_ASSERT(stream->Read(&resp)); + + if (resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kAssigned) { + auto assignId = resp.assigned().assign_id(); + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + req.mutable_start_read()->mutable_topic()->set_path("topic1"); + req.mutable_start_read()->set_cluster("dc1"); + req.mutable_start_read()->set_assign_id(assignId); + UNIT_ASSERT(stream->Write(req)); + continue; + } + + UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kDataBatch); + Cerr << resp << "\n"; + + UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data_size(), 1); + UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data(0).batches_size(), 1); + UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data(0).batches(0).message_data_size(), 1); + UNIT_ASSERT_VALUES_EQUAL(resp.data_batch().partition_data(0).batches(0).message_data(0).offset(), i); + + assignId = resp.data_batch().partition_data(0).cookie().assign_id(); + ++i; + } + //TODO: check here that read will never done UNIT_ASSERT(!stream->Read(&resp)); + { + for (ui32 i = 1; i < 11; ++i) { + Ydb::PersQueue::V1::MigrationStreamingReadClientMessage req; + + auto cookie = req.mutable_commit()->add_cookies(); + cookie->set_assign_id(assignId); + cookie->set_partition_cookie(i); + + if (!stream->Write(req)) { + ythrow yexception() << "write fail"; + } + } + ui32 i = 1; + while (i <= 10) { + Ydb::PersQueue::V1::MigrationStreamingReadServerMessage resp; + + UNIT_ASSERT(stream->Read(&resp)); + Cerr << resp << "\n"; + UNIT_ASSERT(resp.response_case() == Ydb::PersQueue::V1::MigrationStreamingReadServerMessage::kCommitted); + UNIT_ASSERT(resp.committed().cookies_size() > 0); + for (const auto& c : resp.committed().cookies()) { + UNIT_ASSERT(c.partition_cookie() == i); + ++i; + UNIT_ASSERT(i <= 11); + } + } + Client.GetClientInfo({"rt3.dc1--topic1"}, clientId, true); + } + } + void WaitWritePQServiceInitialization(const TString& testTopicPath = "topic1") { - bool tried = false; - while (true) { - if (tried) { - Sleep(TDuration::MilliSeconds(100)); - } else { - tried = true; - } - - Ydb::PersQueue::V1::StreamingWriteClientMessage req; - Ydb::PersQueue::V1::StreamingWriteServerMessage resp; - grpc::ClientContext context; - - auto stream = StubP_->StreamingWrite(&context); - UNIT_ASSERT(stream); + bool tried = false; + while (true) { + if (tried) { + Sleep(TDuration::MilliSeconds(100)); + } else { + tried = true; + } + + Ydb::PersQueue::V1::StreamingWriteClientMessage req; + Ydb::PersQueue::V1::StreamingWriteServerMessage resp; + grpc::ClientContext context; + + auto stream = StubP_->StreamingWrite(&context); + UNIT_ASSERT(stream); req.mutable_init_request()->set_topic(testTopicPath); - req.mutable_init_request()->set_message_group_id("12345678"); - - if (!stream->Write(req)) { - UNIT_ASSERT_C(stream->Read(&resp), "Context error: " << context.debug_error_string()); - UNIT_ASSERT_C(resp.status() == Ydb::StatusIds::UNAVAILABLE, "Response: " << resp << ", Context error: " << context.debug_error_string()); - continue; - } - + req.mutable_init_request()->set_message_group_id("12345678"); + + if (!stream->Write(req)) { + UNIT_ASSERT_C(stream->Read(&resp), "Context error: " << context.debug_error_string()); + UNIT_ASSERT_C(resp.status() == Ydb::StatusIds::UNAVAILABLE, "Response: " << resp << ", Context error: " << context.debug_error_string()); + continue; + } + AssertSuccessfullStreamingOperation(stream->Read(&resp), stream); - if (resp.status() == Ydb::StatusIds::UNAVAILABLE) { - continue; - } - - if (stream->WritesDone()) { - auto status = stream->Finish(); - Cerr << "Finish: " << (int)status.error_code() << " " << status.error_message() << "\n"; - } - - break; - } - } - + if (resp.status() == Ydb::StatusIds::UNAVAILABLE) { + continue; + } + + if (stream->WritesDone()) { + auto status = stream->Finish(); + Cerr << "Finish: " << (int)status.error_code() << " " << status.error_message() << "\n"; + } + + break; + } + } + ui32 InitSession(const TString& sourceId, ui32 pg, bool success, ui32 step = 0) { Ydb::PersQueue::V1::StreamingWriteClientMessage req; Ydb::PersQueue::V1::StreamingWriteServerMessage resp; @@ -224,86 +224,86 @@ public: } ui32 Write(const TString& topic, const TVector<TString>& data, bool error = false, const TMaybe<TString>& ticket = {}) { - return WriteImpl(topic, {data}, error, ticket); - } - -private: + return WriteImpl(topic, {data}, error, ticket); + } + +private: ui32 WriteImpl(const TString& topic, const TVector<TString>& data, bool error, const TMaybe<TString>& ticket) { - grpc::ClientContext context; - + grpc::ClientContext context; + if (ticket) context.AddMetadata("x-ydb-auth-ticket", *ticket); - - - auto stream = StubP_->StreamingWrite(&context); - UNIT_ASSERT(stream); - - // Send initial request. - Ydb::PersQueue::V1::StreamingWriteClientMessage req; - Ydb::PersQueue::V1::StreamingWriteServerMessage resp; - - req.mutable_init_request()->set_topic(topic); - req.mutable_init_request()->set_message_group_id(SourceId_); - req.mutable_init_request()->set_max_supported_block_format_version(0); - - (*req.mutable_init_request()->mutable_session_meta())["key"] = "value"; - - if (!stream->Write(req)) { - ythrow yexception() << "write fail"; - } - - ui32 part = 0; - - if (!stream->Read(&resp)) { - auto status = stream->Finish(); - if (error) { - Cerr << (int)status.error_code() << " " << status.error_message() << "\n"; - UNIT_ASSERT(status.error_code() == grpc::StatusCode::UNAUTHENTICATED); - - } else { - UNIT_ASSERT(false); - } - } - - if (!error) { - UNIT_ASSERT_C(resp.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::kInitResponse, resp); - UNIT_ASSERT_C(!resp.init_response().session_id().empty(), resp); - if (resp.init_response().block_format_version() > 0) { - UNIT_ASSERT_C(resp.init_response().max_block_size() > 0, resp); - UNIT_ASSERT_C(resp.init_response().max_flush_window_size() > 0, resp); - } - part = resp.init_response().partition_id(); - } else { - Cerr << resp << "\n"; - UNIT_ASSERT(resp.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::SERVER_MESSAGE_NOT_SET); - return 0; - } - - // Send data requests. - Flush(data, stream, ticket); - - Flush(data, stream, ticket); - - Flush(data, stream, ticket); - - Flush(data, stream, ticket); - - //will cause only 4 answers in stream->Read - third call will fail, not blocks - stream->WritesDone(); - - UNIT_ASSERT(!stream->Read(&resp)); - - auto status = stream->Finish(); - Cerr << status.ok() << " " << (int)status.error_code() << " " << status.error_message() << "\n"; - UNIT_ASSERT(status.ok()); - return part; - } - - template <typename S> + + + auto stream = StubP_->StreamingWrite(&context); + UNIT_ASSERT(stream); + + // Send initial request. + Ydb::PersQueue::V1::StreamingWriteClientMessage req; + Ydb::PersQueue::V1::StreamingWriteServerMessage resp; + + req.mutable_init_request()->set_topic(topic); + req.mutable_init_request()->set_message_group_id(SourceId_); + req.mutable_init_request()->set_max_supported_block_format_version(0); + + (*req.mutable_init_request()->mutable_session_meta())["key"] = "value"; + + if (!stream->Write(req)) { + ythrow yexception() << "write fail"; + } + + ui32 part = 0; + + if (!stream->Read(&resp)) { + auto status = stream->Finish(); + if (error) { + Cerr << (int)status.error_code() << " " << status.error_message() << "\n"; + UNIT_ASSERT(status.error_code() == grpc::StatusCode::UNAUTHENTICATED); + + } else { + UNIT_ASSERT(false); + } + } + + if (!error) { + UNIT_ASSERT_C(resp.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::kInitResponse, resp); + UNIT_ASSERT_C(!resp.init_response().session_id().empty(), resp); + if (resp.init_response().block_format_version() > 0) { + UNIT_ASSERT_C(resp.init_response().max_block_size() > 0, resp); + UNIT_ASSERT_C(resp.init_response().max_flush_window_size() > 0, resp); + } + part = resp.init_response().partition_id(); + } else { + Cerr << resp << "\n"; + UNIT_ASSERT(resp.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::SERVER_MESSAGE_NOT_SET); + return 0; + } + + // Send data requests. + Flush(data, stream, ticket); + + Flush(data, stream, ticket); + + Flush(data, stream, ticket); + + Flush(data, stream, ticket); + + //will cause only 4 answers in stream->Read - third call will fail, not blocks + stream->WritesDone(); + + UNIT_ASSERT(!stream->Read(&resp)); + + auto status = stream->Finish(); + Cerr << status.ok() << " " << (int)status.error_code() << " " << status.error_message() << "\n"; + UNIT_ASSERT(status.ok()); + return part; + } + + template <typename S> void Flush(const TVector<TString>& data, S& stream, const TMaybe<TString>& ticket) { - Ydb::PersQueue::V1::StreamingWriteClientMessage request; - Ydb::PersQueue::V1::StreamingWriteServerMessage response; - + Ydb::PersQueue::V1::StreamingWriteClientMessage request; + Ydb::PersQueue::V1::StreamingWriteServerMessage response; + if (ticket) { request.mutable_update_token_request()->set_token(*ticket); Cerr << "update user token request: " << request << Endl; @@ -314,67 +314,67 @@ private: UNIT_ASSERT_C(response.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::kUpdateTokenResponse, response); } - TVector<ui64> allSeqNo; - auto* mutableData = request.mutable_write_request(); - ui32 offset = 0; - for (const TString& d : data) { - ui64 seqNo = AtomicIncrement(SeqNo_); - allSeqNo.push_back(seqNo); - mutableData->add_sequence_numbers(seqNo); - mutableData->add_message_sizes(d.size()); - mutableData->add_created_at_ms(TInstant::Now().MilliSeconds()); - mutableData->add_sent_at_ms(TInstant::Now().MilliSeconds()); - - mutableData->add_blocks_offsets(offset++); - mutableData->add_blocks_part_numbers(0); - mutableData->add_blocks_message_counts(1); - mutableData->add_blocks_uncompressed_sizes(d.size()); + TVector<ui64> allSeqNo; + auto* mutableData = request.mutable_write_request(); + ui32 offset = 0; + for (const TString& d : data) { + ui64 seqNo = AtomicIncrement(SeqNo_); + allSeqNo.push_back(seqNo); + mutableData->add_sequence_numbers(seqNo); + mutableData->add_message_sizes(d.size()); + mutableData->add_created_at_ms(TInstant::Now().MilliSeconds()); + mutableData->add_sent_at_ms(TInstant::Now().MilliSeconds()); + + mutableData->add_blocks_offsets(offset++); + mutableData->add_blocks_part_numbers(0); + mutableData->add_blocks_message_counts(1); + mutableData->add_blocks_uncompressed_sizes(d.size()); mutableData->add_blocks_headers(TString(1, '\0') /* RAW codec ID */); mutableData->add_blocks_data(d); - } - - Cerr << "request: " << request << Endl; - if (!stream->Write(request)) { - ythrow yexception() << "write fail"; - } - - UNIT_ASSERT(stream->Read(&response)); - UNIT_ASSERT_C(response.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::kBatchWriteResponse, response); - UNIT_ASSERT_VALUES_EQUAL(data.size(), response.batch_write_response().sequence_numbers_size()); - for (size_t i = 0; i < data.size(); ++i) { - UNIT_ASSERT(!response.batch_write_response().already_written(i)); - UNIT_ASSERT_VALUES_EQUAL(response.batch_write_response().sequence_numbers(i), allSeqNo[i]); - } - UNIT_ASSERT(response.batch_write_response().has_write_statistics()); - } - - ui64 ReadCookieFromMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& meta) const { - auto ci = meta.find("cookie"); - if (ci == meta.end()) { - ythrow yexception() << "server didn't provide the cookie"; - } else { - return FromString<ui64>(TStringBuf(ci->second.data(), ci->second.size())); - } - } - - void InitializeChannel() { - Channel_ = grpc::CreateChannel("localhost:" + ToString(Port_), grpc::InsecureChannelCredentials()); - StubP_ = Ydb::PersQueue::V1::PersQueueService::NewStub(Channel_); - } - -private: - const TString SourceId_; - const ui16 Port_; - - TFlatMsgBusPQClient& Client; - TTestActorRuntime *Runtime; - - TAtomic SeqNo_ = 1; - - //! Сетевой канал взаимодействия с proxy-сервером. - std::shared_ptr<grpc::Channel> Channel_; - std::unique_ptr<Ydb::PersQueue::V1::PersQueueService::Stub> StubP_; - -}; - -} // namespace NKikimr::NPersQueueTests + } + + Cerr << "request: " << request << Endl; + if (!stream->Write(request)) { + ythrow yexception() << "write fail"; + } + + UNIT_ASSERT(stream->Read(&response)); + UNIT_ASSERT_C(response.server_message_case() == Ydb::PersQueue::V1::StreamingWriteServerMessage::kBatchWriteResponse, response); + UNIT_ASSERT_VALUES_EQUAL(data.size(), response.batch_write_response().sequence_numbers_size()); + for (size_t i = 0; i < data.size(); ++i) { + UNIT_ASSERT(!response.batch_write_response().already_written(i)); + UNIT_ASSERT_VALUES_EQUAL(response.batch_write_response().sequence_numbers(i), allSeqNo[i]); + } + UNIT_ASSERT(response.batch_write_response().has_write_statistics()); + } + + ui64 ReadCookieFromMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& meta) const { + auto ci = meta.find("cookie"); + if (ci == meta.end()) { + ythrow yexception() << "server didn't provide the cookie"; + } else { + return FromString<ui64>(TStringBuf(ci->second.data(), ci->second.size())); + } + } + + void InitializeChannel() { + Channel_ = grpc::CreateChannel("localhost:" + ToString(Port_), grpc::InsecureChannelCredentials()); + StubP_ = Ydb::PersQueue::V1::PersQueueService::NewStub(Channel_); + } + +private: + const TString SourceId_; + const ui16 Port_; + + TFlatMsgBusPQClient& Client; + TTestActorRuntime *Runtime; + + TAtomic SeqNo_ = 1; + + //! Сетевой канал взаимодействия с proxy-сервером. + std::shared_ptr<grpc::Channel> Channel_; + std::unique_ptr<Ydb::PersQueue::V1::PersQueueService::Stub> StubP_; + +}; + +} // namespace NKikimr::NPersQueueTests diff --git a/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp b/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp index 779d60e7785..a99ce09837b 100644 --- a/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp +++ b/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp @@ -16,7 +16,7 @@ TRateLimiterTestSetup::TRateLimiterTestSetup( bool enableReadQuoting ) : Server(new NPersQueue::TTestServer(false)) - , LimitedEntity(limitedEntity) + , LimitedEntity(limitedEntity) , WriteAccountQuota(writeAccountQuota) , ReadAccountQuota(readAccountQuota) { @@ -98,7 +98,7 @@ THolder<Ydb::PersQueue::IProducer> TRateLimiterTestSetup::StartProducer(const TS Ydb::PersQueue::TProducerSettings producerSettings; producerSettings.Server = Ydb::PersQueue::TServerSetting("localhost", Server->GrpcPort); producerSettings.Topic = topicPath; - producerSettings.SourceId = "TRateLimiterTestSetupSourceId"; + producerSettings.SourceId = "TRateLimiterTestSetupSourceId"; producerSettings.Codec = compress ? "gzip" : "raw"; THolder<Ydb::PersQueue::IProducer> producer = PQLib->CreateProducer(producerSettings); auto startResult = producer->Start(); @@ -110,7 +110,7 @@ THolder<Ydb::PersQueue::IProducer> TRateLimiterTestSetup::StartProducer(const TS void TRateLimiterTestSetup::Start(bool enableReadQuoting) { InitServer(enableReadQuoting); InitQuoting(); - WaitWritePQServiceInitialization(); + WaitWritePQServiceInitialization(); } void TRateLimiterTestSetup::InitServer(bool enableReadQuoting) { @@ -131,8 +131,8 @@ void TRateLimiterTestSetup::InitServer(bool enableReadQuoting) { NKikimrServices::QUOTER_PROXY, NKikimrServices::KESUS_TABLET, NKikimrServices::PQ_READ_SPEED_LIMITER - }, - NActors::NLog::PRI_TRACE + }, + NActors::NLog::PRI_TRACE ); } @@ -142,8 +142,8 @@ void TRateLimiterTestSetup::InitQuoting() { Server->AnnoyingClient->MkDir("/Root/PersQueue/System", "Quoters"); } -void TRateLimiterTestSetup::WaitWritePQServiceInitialization() { +void TRateLimiterTestSetup::WaitWritePQServiceInitialization() { PQDataWriter = MakeHolder<TPQDataWriter>("writer_source_id", *Server); } - -} + +} diff --git a/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.h b/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.h index 01529818e1a..3df7ca403f9 100644 --- a/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.h +++ b/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.h @@ -1,6 +1,6 @@ #pragma once -#include "pq_data_writer.h" - +#include "pq_data_writer.h" + #include <ydb/core/testlib/test_client.h> #include <ydb/core/testlib/test_pq_client.h> @@ -46,12 +46,12 @@ private: void InitServer(bool enableReadQuoting); void InitQuoting(); - void WaitWritePQServiceInitialization(); + void WaitWritePQServiceInitialization(); private: THolder<NPersQueue::TTestServer> Server; - THolder<TPQDataWriter> PQDataWriter; // For waiting for grpc writer service initialization. - const NKikimrPQ::TPQConfig::TQuotingConfig::ELimitedEntity LimitedEntity; + THolder<TPQDataWriter> PQDataWriter; // For waiting for grpc writer service initialization. + const NKikimrPQ::TPQConfig::TQuotingConfig::ELimitedEntity LimitedEntity; double WriteAccountQuota; double ReadAccountQuota; const TString QuotersRootPath = "/Root/PersQueue/System/Quoters"; diff --git a/ydb/services/rate_limiter/grpc_service.cpp b/ydb/services/rate_limiter/grpc_service.cpp index 480045d29ac..94925fc9f64 100644 --- a/ydb/services/rate_limiter/grpc_service.cpp +++ b/ydb/services/rate_limiter/grpc_service.cpp @@ -1,71 +1,71 @@ -#include "grpc_service.h" - +#include "grpc_service.h" + #include <ydb/core/grpc_services/grpc_helper.h> #include <ydb/core/grpc_services/grpc_request_proxy.h> #include <ydb/core/grpc_services/rpc_calls.h> - -namespace NKikimr::NQuoter { - + +namespace NKikimr::NQuoter { + TRateLimiterGRpcService::TRateLimiterGRpcService(NActors::TActorSystem* actorSystem, TIntrusivePtr<NMonitoring::TDynamicCounters> counters, NActors::TActorId grpcRequestProxyId) - : ActorSystem(actorSystem) - , Counters(std::move(counters)) - , GRpcRequestProxyId(grpcRequestProxyId) -{ -} - -TRateLimiterGRpcService::~TRateLimiterGRpcService() = default; - + : ActorSystem(actorSystem) + , Counters(std::move(counters)) + , GRpcRequestProxyId(grpcRequestProxyId) +{ +} + +TRateLimiterGRpcService::~TRateLimiterGRpcService() = default; + void TRateLimiterGRpcService::InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) { - CQ = cq; + CQ = cq; SetupIncomingRequests(std::move(logger)); -} - +} + void TRateLimiterGRpcService::SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) { - Limiter = limiter; -} - -bool TRateLimiterGRpcService::IncRequest() { - return Limiter->Inc(); -} - -void TRateLimiterGRpcService::DecRequest() { - Limiter->Dec(); -} - + Limiter = limiter; +} + +bool TRateLimiterGRpcService::IncRequest() { + return Limiter->Inc(); +} + +void TRateLimiterGRpcService::DecRequest() { + Limiter->Dec(); +} + void TRateLimiterGRpcService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) { - auto getCounterBlock = NGRpcService::CreateCounterCb(Counters, ActorSystem); - -#ifdef SETUP_METHOD -#error SETUP_METHOD macro collision -#endif - -#define SETUP_METHOD(methodName, event) \ - MakeIntrusive<NGRpcService::TGRpcRequest< \ - Ydb::RateLimiter::Y_CAT(methodName, Request), \ - Ydb::RateLimiter::Y_CAT(methodName, Response), \ - TRateLimiterGRpcService>> \ - ( \ - this, \ - &Service_, \ - CQ, \ + auto getCounterBlock = NGRpcService::CreateCounterCb(Counters, ActorSystem); + +#ifdef SETUP_METHOD +#error SETUP_METHOD macro collision +#endif + +#define SETUP_METHOD(methodName, event) \ + MakeIntrusive<NGRpcService::TGRpcRequest< \ + Ydb::RateLimiter::Y_CAT(methodName, Request), \ + Ydb::RateLimiter::Y_CAT(methodName, Response), \ + TRateLimiterGRpcService>> \ + ( \ + this, \ + &Service_, \ + CQ, \ [this](NGrpc::IRequestContextBase* reqCtx) { \ - NGRpcService::ReportGrpcReqToMon(*ActorSystem, reqCtx->GetPeer()); \ - ActorSystem->Send(GRpcRequestProxyId, new NGRpcService::event(reqCtx)); \ - }, \ - &Ydb::RateLimiter::V1::RateLimiterService::AsyncService::Y_CAT(Request, methodName), \ - "RateLimiter/" Y_STRINGIZE(methodName), \ + NGRpcService::ReportGrpcReqToMon(*ActorSystem, reqCtx->GetPeer()); \ + ActorSystem->Send(GRpcRequestProxyId, new NGRpcService::event(reqCtx)); \ + }, \ + &Ydb::RateLimiter::V1::RateLimiterService::AsyncService::Y_CAT(Request, methodName), \ + "RateLimiter/" Y_STRINGIZE(methodName), \ logger, \ - getCounterBlock("rate_limiter", Y_STRINGIZE(methodName)) \ - )->Run() - - SETUP_METHOD(CreateResource, TEvCreateRateLimiterResource); - SETUP_METHOD(AlterResource, TEvAlterRateLimiterResource); - SETUP_METHOD(DropResource, TEvDropRateLimiterResource); - SETUP_METHOD(ListResources, TEvListRateLimiterResources); - SETUP_METHOD(DescribeResource, TEvDescribeRateLimiterResource); + getCounterBlock("rate_limiter", Y_STRINGIZE(methodName)) \ + )->Run() + + SETUP_METHOD(CreateResource, TEvCreateRateLimiterResource); + SETUP_METHOD(AlterResource, TEvAlterRateLimiterResource); + SETUP_METHOD(DropResource, TEvDropRateLimiterResource); + SETUP_METHOD(ListResources, TEvListRateLimiterResources); + SETUP_METHOD(DescribeResource, TEvDescribeRateLimiterResource); SETUP_METHOD(AcquireResource, TEvAcquireRateLimiterResource); - -#undef SETUP_METHOD -} - -} // namespace NKikimr::NQuoter + +#undef SETUP_METHOD +} + +} // namespace NKikimr::NQuoter diff --git a/ydb/services/rate_limiter/grpc_service.h b/ydb/services/rate_limiter/grpc_service.h index 82ab6f91f53..a9ea49eb6d0 100644 --- a/ydb/services/rate_limiter/grpc_service.h +++ b/ydb/services/rate_limiter/grpc_service.h @@ -1,34 +1,34 @@ -#pragma once - +#pragma once + #include <library/cpp/actors/core/actorsystem.h> #include <library/cpp/grpc/server/grpc_server.h> #include <ydb/public/api/grpc/ydb_rate_limiter_v1.grpc.pb.h> - -namespace NKikimr::NQuoter { - -class TRateLimiterGRpcService + +namespace NKikimr::NQuoter { + +class TRateLimiterGRpcService : public NGrpc::TGrpcServiceBase<Ydb::RateLimiter::V1::RateLimiterService> -{ -public: +{ +public: TRateLimiterGRpcService(NActors::TActorSystem* actorSystem, TIntrusivePtr<NMonitoring::TDynamicCounters> counters, NActors::TActorId grpcRequestProxyId); - ~TRateLimiterGRpcService(); - + ~TRateLimiterGRpcService(); + void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override; void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override; - - bool IncRequest(); - void DecRequest(); - -private: + + bool IncRequest(); + void DecRequest(); + +private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); - -private: - NActors::TActorSystem* ActorSystem = nullptr; - TIntrusivePtr<NMonitoring::TDynamicCounters> Counters; + +private: + NActors::TActorSystem* ActorSystem = nullptr; + TIntrusivePtr<NMonitoring::TDynamicCounters> Counters; NActors::TActorId GRpcRequestProxyId; - - grpc::ServerCompletionQueue* CQ = nullptr; + + grpc::ServerCompletionQueue* CQ = nullptr; NGrpc::TGlobalLimiter* Limiter = nullptr; -}; - -} // namespace NKikimr::NQuoter +}; + +} // namespace NKikimr::NQuoter diff --git a/ydb/services/rate_limiter/rate_limiter_ut.cpp b/ydb/services/rate_limiter/rate_limiter_ut.cpp index 4ac05a571c3..4d05092c07b 100644 --- a/ydb/services/rate_limiter/rate_limiter_ut.cpp +++ b/ydb/services/rate_limiter/rate_limiter_ut.cpp @@ -1,8 +1,8 @@ #include <ydb/services/ydb/ydb_common_ut.h> - + #include <ydb/public/sdk/cpp/client/ydb_coordination/coordination.h> #include <ydb/public/sdk/cpp/client/ydb_rate_limiter/rate_limiter.h> - + #include <ydb/public/sdk/cpp/client/ydb_types/status/status.h> #include <ydb/core/grpc_services/local_rate_limiter.h> @@ -11,81 +11,81 @@ #include <library/cpp/testing/unittest/tests_data.h> #include <library/cpp/testing/unittest/registar.h> - -#include <util/string/builder.h> - + +#include <util/string/builder.h> + #include <ydb/public/api/protos/ydb_rate_limiter.pb.h> -namespace NKikimr { - -using NYdb::NRateLimiter::TCreateResourceSettings; - -namespace { - +namespace NKikimr { + +using NYdb::NRateLimiter::TCreateResourceSettings; + +namespace { + void SetDuration(const TDuration& duration, google::protobuf::Duration& protoValue) { protoValue.set_seconds(duration.Seconds()); protoValue.set_nanos(duration.NanoSecondsOfSecond()); } -TString PrintStatus(const NYdb::TStatus& status) { - TStringBuilder builder; - builder << status.GetStatus(); - const auto& issues = status.GetIssues(); - if (issues) { - builder << ":\n"; - issues.PrintTo(builder.Out); - } - return builder; -} - -#define ASSERT_STATUS_C(expr, status, comment) { \ - const auto Y_CAT(resultFuture, __LINE__) = (expr); \ - const auto Y_CAT(result, __LINE__) = Y_CAT(resultFuture, __LINE__).GetValueSync(); \ - UNIT_ASSERT_VALUES_EQUAL_C(Y_CAT(result, __LINE__).GetStatus(), status, \ - Y_STRINGIZE(expr) " has status different from expected one. Error description: " << PrintStatus(Y_CAT(result, __LINE__)) \ - << " " << comment); \ - } - -#define ASSERT_STATUS(expr, status) ASSERT_STATUS_C(expr, status, "") - -#define ASSERT_STATUS_SUCCESS_C(expr, comment) ASSERT_STATUS_C(expr, NYdb::EStatus::SUCCESS, comment) - -#define ASSERT_STATUS_SUCCESS(expr) ASSERT_STATUS_SUCCESS_C(expr, "") - +TString PrintStatus(const NYdb::TStatus& status) { + TStringBuilder builder; + builder << status.GetStatus(); + const auto& issues = status.GetIssues(); + if (issues) { + builder << ":\n"; + issues.PrintTo(builder.Out); + } + return builder; +} + +#define ASSERT_STATUS_C(expr, status, comment) { \ + const auto Y_CAT(resultFuture, __LINE__) = (expr); \ + const auto Y_CAT(result, __LINE__) = Y_CAT(resultFuture, __LINE__).GetValueSync(); \ + UNIT_ASSERT_VALUES_EQUAL_C(Y_CAT(result, __LINE__).GetStatus(), status, \ + Y_STRINGIZE(expr) " has status different from expected one. Error description: " << PrintStatus(Y_CAT(result, __LINE__)) \ + << " " << comment); \ + } + +#define ASSERT_STATUS(expr, status) ASSERT_STATUS_C(expr, status, "") + +#define ASSERT_STATUS_SUCCESS_C(expr, comment) ASSERT_STATUS_C(expr, NYdb::EStatus::SUCCESS, comment) + +#define ASSERT_STATUS_SUCCESS(expr) ASSERT_STATUS_SUCCESS_C(expr, "") + class TTestSetup { public: - TTestSetup() - : Driver(MakeDriverConfig()) - , CoordinationClient(Driver) - , RateLimiterClient(Driver) - { - CreateCoordinationNode(); - } - + TTestSetup() + : Driver(MakeDriverConfig()) + , CoordinationClient(Driver) + , RateLimiterClient(Driver) + { + CreateCoordinationNode(); + } + virtual ~TTestSetup() = default; - NYdb::TDriverConfig MakeDriverConfig() { - return NYdb::TDriverConfig() - .SetEndpoint(TStringBuilder() << "localhost:" << Server.GetPort()); - } - - void CreateCoordinationNode(const TString& path = CoordinationNodePath) { - ASSERT_STATUS_SUCCESS_C(CoordinationClient.CreateNode(path), "\nPath: " << path); - } - + NYdb::TDriverConfig MakeDriverConfig() { + return NYdb::TDriverConfig() + .SetEndpoint(TStringBuilder() << "localhost:" << Server.GetPort()); + } + + void CreateCoordinationNode(const TString& path = CoordinationNodePath) { + ASSERT_STATUS_SUCCESS_C(CoordinationClient.CreateNode(path), "\nPath: " << path); + } + void virtual CheckAcquireResource(const TString& coordinationNodePath, const TString& resourcePath, const NYdb::NRateLimiter::TAcquireResourceSettings& settings, NYdb::EStatus expected) { const auto acquireResultFuture = RateLimiterClient.AcquireResource(coordinationNodePath, resourcePath, settings); ASSERT_STATUS(acquireResultFuture, expected); } - static TString CoordinationNodePath; - - NYdb::TKikimrWithGrpcAndRootSchema Server; - NYdb::TDriver Driver; - NYdb::NCoordination::TClient CoordinationClient; - NYdb::NRateLimiter::TRateLimiterClient RateLimiterClient; -}; - + static TString CoordinationNodePath; + + NYdb::TKikimrWithGrpcAndRootSchema Server; + NYdb::TDriver Driver; + NYdb::NCoordination::TClient CoordinationClient; + NYdb::NRateLimiter::TRateLimiterClient RateLimiterClient; +}; + class TTestSetupAcquireActor : public TTestSetup { private: class TAcquireActor : public TActorBootstrapped<TAcquireActor> { @@ -147,168 +147,168 @@ public: } }; -TString TTestSetup::CoordinationNodePath = "/Root/CoordinationNode"; - -} // namespace - -Y_UNIT_TEST_SUITE(TGRpcRateLimiterTest) { - Y_UNIT_TEST(CreateResource) { - TTestSetup setup; - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "res", - TCreateResourceSettings().MaxUnitsPerSecond(1).MaxBurstSizeCoefficient(42))); - - { - const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "res"); - ASSERT_STATUS_SUCCESS(describeResultFuture); - const auto describeResult = describeResultFuture.GetValueSync(); - UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "res"); - UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 1); - UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient(), 42); - } - - // Fail - not canonized path - ASSERT_STATUS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "/res/res2"), NYdb::EStatus::BAD_REQUEST); - } - - Y_UNIT_TEST(UpdateResource) { - using NYdb::NRateLimiter::TAlterResourceSettings; - - TTestSetup setup; - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "res", - TCreateResourceSettings().MaxUnitsPerSecond(100500).MaxBurstSizeCoefficient(2))); - - { - const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "res"); - ASSERT_STATUS_SUCCESS(describeResultFuture); - const auto describeResult = describeResultFuture.GetValueSync(); - UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "res"); - UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 100500); // previous value - UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient(), 2); // previous value - } - - // Fail - negative max units per second - ASSERT_STATUS(setup.RateLimiterClient.AlterResource(TTestSetup::CoordinationNodePath, "res", - TAlterResourceSettings().MaxUnitsPerSecond(-1)), NYdb::EStatus::BAD_REQUEST); - - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.AlterResource(TTestSetup::CoordinationNodePath, "res", - TAlterResourceSettings().MaxUnitsPerSecond(100))); - - { - const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "res"); - ASSERT_STATUS_SUCCESS(describeResultFuture); - const auto describeResult = describeResultFuture.GetValueSync(); - UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "res"); - UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 100); // applied - UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient()); - } - } - - Y_UNIT_TEST(DropResource) { - TTestSetup setup; - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "res", - TCreateResourceSettings().MaxUnitsPerSecond(100500))); - - // Fail - not found - ASSERT_STATUS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "non_existent_resource"), NYdb::EStatus::NOT_FOUND); - - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "res")); - - { - const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, ""); - ASSERT_STATUS_SUCCESS(listResultFuture); - const auto listResult = listResultFuture.GetValueSync(); - UNIT_ASSERT(listResult.GetResourcePaths().empty()); - } - } - - Y_UNIT_TEST(DescribeResource) { - TTestSetup setup; - // Fail - not found - ASSERT_STATUS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "no_resource"), NYdb::EStatus::NOT_FOUND); - - ASSERT_STATUS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "not//canonized/resource/path"), NYdb::EStatus::BAD_REQUEST); - - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "resource", - TCreateResourceSettings().MaxUnitsPerSecond(100500))); - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "resource/child")); - - { - const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "resource"); - ASSERT_STATUS_SUCCESS(describeResultFuture); - const auto describeResult = describeResultFuture.GetValueSync(); - UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "resource"); - UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 100500); - UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient()); - } - - { - const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "resource/child"); - ASSERT_STATUS_SUCCESS(describeResultFuture); - const auto describeResult = describeResultFuture.GetValueSync(); - UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "resource/child"); - UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond()); - UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient()); - } - } - - Y_UNIT_TEST(ListResources) { - using NYdb::NRateLimiter::TListResourcesSettings; - - TTestSetup setup; - // Fail - not found - ASSERT_STATUS(setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "no_resource"), NYdb::EStatus::NOT_FOUND); - ASSERT_STATUS(setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "no_resource", TListResourcesSettings().Recursive()), NYdb::EStatus::NOT_FOUND); - - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent1", - TCreateResourceSettings().MaxUnitsPerSecond(100500))); - - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent2", - TCreateResourceSettings().MaxUnitsPerSecond(100500))); - - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent1/child1")); - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent1/child2")); - ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent2/child")); - - // All - { - const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "", TListResourcesSettings().Recursive(true)); - ASSERT_STATUS_SUCCESS(listResultFuture); - const auto listResult = listResultFuture.GetValueSync(); - TVector<TString> paths = listResult.GetResourcePaths(); - std::sort(paths.begin(), paths.end()); - UNIT_ASSERT_VALUES_EQUAL(paths.size(), 5); - UNIT_ASSERT_VALUES_EQUAL(paths[0], "parent1"); - UNIT_ASSERT_VALUES_EQUAL(paths[1], "parent1/child1"); - UNIT_ASSERT_VALUES_EQUAL(paths[2], "parent1/child2"); - UNIT_ASSERT_VALUES_EQUAL(paths[3], "parent2"); - UNIT_ASSERT_VALUES_EQUAL(paths[4], "parent2/child"); - } - - // All roots - { - const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "", TListResourcesSettings().Recursive(false)); - ASSERT_STATUS_SUCCESS(listResultFuture); - const auto listResult = listResultFuture.GetValueSync(); - TVector<TString> paths = listResult.GetResourcePaths(); - std::sort(paths.begin(), paths.end()); - UNIT_ASSERT_VALUES_EQUAL(paths.size(), 2); - UNIT_ASSERT_VALUES_EQUAL(paths[0], "parent1"); - UNIT_ASSERT_VALUES_EQUAL(paths[1], "parent2"); - } - - // All children of parent1 - { - const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "parent1", TListResourcesSettings().Recursive()); - ASSERT_STATUS_SUCCESS(listResultFuture); - const auto listResult = listResultFuture.GetValueSync(); - TVector<TString> paths = listResult.GetResourcePaths(); - std::sort(paths.begin(), paths.end()); - UNIT_ASSERT_VALUES_EQUAL(paths.size(), 3); - UNIT_ASSERT_VALUES_EQUAL(paths[0], "parent1"); - UNIT_ASSERT_VALUES_EQUAL(paths[1], "parent1/child1"); - UNIT_ASSERT_VALUES_EQUAL(paths[2], "parent1/child2"); - } - } +TString TTestSetup::CoordinationNodePath = "/Root/CoordinationNode"; + +} // namespace + +Y_UNIT_TEST_SUITE(TGRpcRateLimiterTest) { + Y_UNIT_TEST(CreateResource) { + TTestSetup setup; + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "res", + TCreateResourceSettings().MaxUnitsPerSecond(1).MaxBurstSizeCoefficient(42))); + + { + const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "res"); + ASSERT_STATUS_SUCCESS(describeResultFuture); + const auto describeResult = describeResultFuture.GetValueSync(); + UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "res"); + UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 1); + UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient(), 42); + } + + // Fail - not canonized path + ASSERT_STATUS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "/res/res2"), NYdb::EStatus::BAD_REQUEST); + } + + Y_UNIT_TEST(UpdateResource) { + using NYdb::NRateLimiter::TAlterResourceSettings; + + TTestSetup setup; + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "res", + TCreateResourceSettings().MaxUnitsPerSecond(100500).MaxBurstSizeCoefficient(2))); + + { + const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "res"); + ASSERT_STATUS_SUCCESS(describeResultFuture); + const auto describeResult = describeResultFuture.GetValueSync(); + UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "res"); + UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 100500); // previous value + UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient(), 2); // previous value + } + + // Fail - negative max units per second + ASSERT_STATUS(setup.RateLimiterClient.AlterResource(TTestSetup::CoordinationNodePath, "res", + TAlterResourceSettings().MaxUnitsPerSecond(-1)), NYdb::EStatus::BAD_REQUEST); + + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.AlterResource(TTestSetup::CoordinationNodePath, "res", + TAlterResourceSettings().MaxUnitsPerSecond(100))); + + { + const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "res"); + ASSERT_STATUS_SUCCESS(describeResultFuture); + const auto describeResult = describeResultFuture.GetValueSync(); + UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "res"); + UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 100); // applied + UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient()); + } + } + + Y_UNIT_TEST(DropResource) { + TTestSetup setup; + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "res", + TCreateResourceSettings().MaxUnitsPerSecond(100500))); + + // Fail - not found + ASSERT_STATUS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "non_existent_resource"), NYdb::EStatus::NOT_FOUND); + + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "res")); + + { + const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, ""); + ASSERT_STATUS_SUCCESS(listResultFuture); + const auto listResult = listResultFuture.GetValueSync(); + UNIT_ASSERT(listResult.GetResourcePaths().empty()); + } + } + + Y_UNIT_TEST(DescribeResource) { + TTestSetup setup; + // Fail - not found + ASSERT_STATUS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "no_resource"), NYdb::EStatus::NOT_FOUND); + + ASSERT_STATUS(setup.RateLimiterClient.DropResource(TTestSetup::CoordinationNodePath, "not//canonized/resource/path"), NYdb::EStatus::BAD_REQUEST); + + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "resource", + TCreateResourceSettings().MaxUnitsPerSecond(100500))); + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "resource/child")); + + { + const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "resource"); + ASSERT_STATUS_SUCCESS(describeResultFuture); + const auto describeResult = describeResultFuture.GetValueSync(); + UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "resource"); + UNIT_ASSERT_VALUES_EQUAL(*describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond(), 100500); + UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient()); + } + + { + const auto describeResultFuture = setup.RateLimiterClient.DescribeResource(TTestSetup::CoordinationNodePath, "resource/child"); + ASSERT_STATUS_SUCCESS(describeResultFuture); + const auto describeResult = describeResultFuture.GetValueSync(); + UNIT_ASSERT_VALUES_EQUAL(describeResult.GetResourcePath(), "resource/child"); + UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxUnitsPerSecond()); + UNIT_ASSERT(!describeResult.GetHierarchicalDrrProps().GetMaxBurstSizeCoefficient()); + } + } + + Y_UNIT_TEST(ListResources) { + using NYdb::NRateLimiter::TListResourcesSettings; + + TTestSetup setup; + // Fail - not found + ASSERT_STATUS(setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "no_resource"), NYdb::EStatus::NOT_FOUND); + ASSERT_STATUS(setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "no_resource", TListResourcesSettings().Recursive()), NYdb::EStatus::NOT_FOUND); + + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent1", + TCreateResourceSettings().MaxUnitsPerSecond(100500))); + + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent2", + TCreateResourceSettings().MaxUnitsPerSecond(100500))); + + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent1/child1")); + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent1/child2")); + ASSERT_STATUS_SUCCESS(setup.RateLimiterClient.CreateResource(TTestSetup::CoordinationNodePath, "parent2/child")); + + // All + { + const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "", TListResourcesSettings().Recursive(true)); + ASSERT_STATUS_SUCCESS(listResultFuture); + const auto listResult = listResultFuture.GetValueSync(); + TVector<TString> paths = listResult.GetResourcePaths(); + std::sort(paths.begin(), paths.end()); + UNIT_ASSERT_VALUES_EQUAL(paths.size(), 5); + UNIT_ASSERT_VALUES_EQUAL(paths[0], "parent1"); + UNIT_ASSERT_VALUES_EQUAL(paths[1], "parent1/child1"); + UNIT_ASSERT_VALUES_EQUAL(paths[2], "parent1/child2"); + UNIT_ASSERT_VALUES_EQUAL(paths[3], "parent2"); + UNIT_ASSERT_VALUES_EQUAL(paths[4], "parent2/child"); + } + + // All roots + { + const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "", TListResourcesSettings().Recursive(false)); + ASSERT_STATUS_SUCCESS(listResultFuture); + const auto listResult = listResultFuture.GetValueSync(); + TVector<TString> paths = listResult.GetResourcePaths(); + std::sort(paths.begin(), paths.end()); + UNIT_ASSERT_VALUES_EQUAL(paths.size(), 2); + UNIT_ASSERT_VALUES_EQUAL(paths[0], "parent1"); + UNIT_ASSERT_VALUES_EQUAL(paths[1], "parent2"); + } + + // All children of parent1 + { + const auto listResultFuture = setup.RateLimiterClient.ListResources(TTestSetup::CoordinationNodePath, "parent1", TListResourcesSettings().Recursive()); + ASSERT_STATUS_SUCCESS(listResultFuture); + const auto listResult = listResultFuture.GetValueSync(); + TVector<TString> paths = listResult.GetResourcePaths(); + std::sort(paths.begin(), paths.end()); + UNIT_ASSERT_VALUES_EQUAL(paths.size(), 3); + UNIT_ASSERT_VALUES_EQUAL(paths[0], "parent1"); + UNIT_ASSERT_VALUES_EQUAL(paths[1], "parent1/child1"); + UNIT_ASSERT_VALUES_EQUAL(paths[2], "parent1/child2"); + } + } std::unique_ptr<TTestSetup> MakeTestSetup(bool useActorApi) { if (useActorApi) { @@ -362,6 +362,6 @@ Y_UNIT_TEST_SUITE(TGRpcRateLimiterTest) { Y_UNIT_TEST(AcquireResourceManyUsedActorApi) { AcquireResourceManyUsed(true); } -} - -} // namespace NKikimr +} + +} // namespace NKikimr diff --git a/ydb/services/rate_limiter/ut/ya.make b/ydb/services/rate_limiter/ut/ya.make index c79c7a08cd1..c3ee65d7d58 100644 --- a/ydb/services/rate_limiter/ut/ya.make +++ b/ydb/services/rate_limiter/ut/ya.make @@ -1,22 +1,22 @@ UNITTEST_FOR(ydb/services/rate_limiter) - + OWNER( galaxycrab g:kikimr ) - -SIZE(MEDIUM) - -SRCS( - rate_limiter_ut.cpp -) - -PEERDIR( + +SIZE(MEDIUM) + +SRCS( + rate_limiter_ut.cpp +) + +PEERDIR( ydb/core/testlib ydb/public/sdk/cpp/client/ydb_coordination ydb/public/sdk/cpp/client/ydb_rate_limiter -) - +) + YQL_LAST_ABI_VERSION() -END() +END() diff --git a/ydb/services/rate_limiter/ya.make b/ydb/services/rate_limiter/ya.make index 134624e8844..26d78201b67 100644 --- a/ydb/services/rate_limiter/ya.make +++ b/ydb/services/rate_limiter/ya.make @@ -1,23 +1,23 @@ -LIBRARY() - +LIBRARY() + OWNER( galaxycrab g:kikimr ) - -SRCS( - grpc_service.cpp -) - -PEERDIR( + +SRCS( + grpc_service.cpp +) + +PEERDIR( library/cpp/grpc/server ydb/core/grpc_services ydb/core/kesus/tablet ydb/public/api/grpc ydb/services/ydb -) - -END() +) + +END() RECURSE_FOR_TESTS( ut diff --git a/ydb/services/ydb/ydb_clickhouse_internal.h b/ydb/services/ydb/ydb_clickhouse_internal.h index a0ff8863d10..95769048005 100644 --- a/ydb/services/ydb/ydb_clickhouse_internal.h +++ b/ydb/services/ydb/ydb_clickhouse_internal.h @@ -29,12 +29,12 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; TIntrusivePtr<NGRpcService::TInFlightLimiterRegistry> LimiterRegistry_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_common_ut.h b/ydb/services/ydb/ydb_common_ut.h index c067928c785..19e964422bd 100644 --- a/ydb/services/ydb/ydb_common_ut.h +++ b/ydb/services/ydb/ydb_common_ut.h @@ -100,10 +100,10 @@ public: //Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG); //Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_OLAPSHARD, NActors::NLog::PRI_DEBUG); //Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG); - if (enableYq) { - Server_->GetRuntime()->SetLogPriority(NKikimrServices::YQL_PROXY, NActors::NLog::PRI_DEBUG); - Server_->GetRuntime()->SetLogPriority(NKikimrServices::KQP_COMPUTE, NActors::NLog::PRI_DEBUG); - } + if (enableYq) { + Server_->GetRuntime()->SetLogPriority(NKikimrServices::YQL_PROXY, NActors::NLog::PRI_DEBUG); + Server_->GetRuntime()->SetLogPriority(NKikimrServices::KQP_COMPUTE, NActors::NLog::PRI_DEBUG); + } NGrpc::TServerOptions grpcOption; if (TestSettings::AUTH) { diff --git a/ydb/services/ydb/ydb_experimental.h b/ydb/services/ydb/ydb_experimental.h index c312a18cacb..19e3b747cd4 100644 --- a/ydb/services/ydb/ydb_experimental.h +++ b/ydb/services/ydb/ydb_experimental.h @@ -25,11 +25,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_export.h b/ydb/services/ydb/ydb_export.h index 6049462ca22..31d127bd3cb 100644 --- a/ydb/services/ydb/ydb_export.h +++ b/ydb/services/ydb/ydb_export.h @@ -22,11 +22,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_import.h b/ydb/services/ydb/ydb_import.h index a7db52853a5..e1d9a54931e 100644 --- a/ydb/services/ydb/ydb_import.h +++ b/ydb/services/ydb/ydb_import.h @@ -22,11 +22,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_logstore.h b/ydb/services/ydb/ydb_logstore.h index 27a176eb0bb..7ba50ef3426 100644 --- a/ydb/services/ydb/ydb_logstore.h +++ b/ydb/services/ydb/ydb_logstore.h @@ -24,11 +24,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_long_tx.h b/ydb/services/ydb/ydb_long_tx.h index 1788da91c55..ba9c6110f89 100644 --- a/ydb/services/ydb/ydb_long_tx.h +++ b/ydb/services/ydb/ydb_long_tx.h @@ -23,11 +23,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_operation.h b/ydb/services/ydb/ydb_operation.h index 9f7e8646b1d..c16ca898687 100644 --- a/ydb/services/ydb/ydb_operation.h +++ b/ydb/services/ydb/ydb_operation.h @@ -22,11 +22,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_s3_internal.h b/ydb/services/ydb/ydb_s3_internal.h index e947a9b131f..ec8c43c6695 100644 --- a/ydb/services/ydb/ydb_s3_internal.h +++ b/ydb/services/ydb/ydb_s3_internal.h @@ -23,11 +23,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_scheme.h b/ydb/services/ydb/ydb_scheme.h index a510e5f5920..1743846905d 100644 --- a/ydb/services/ydb/ydb_scheme.h +++ b/ydb/services/ydb/ydb_scheme.h @@ -22,11 +22,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_scripting.h b/ydb/services/ydb/ydb_scripting.h index 9566cadeeea..89cca0a6e84 100644 --- a/ydb/services/ydb/ydb_scripting.h +++ b/ydb/services/ydb/ydb_scripting.h @@ -23,11 +23,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/ydb/ydb_table.h b/ydb/services/ydb/ydb_table.h index 77756bf023d..5c7273b5fb2 100644 --- a/ydb/services/ydb/ydb_table.h +++ b/ydb/services/ydb/ydb_table.h @@ -24,11 +24,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/yq/grpc_service.h b/ydb/services/yq/grpc_service.h index 6cb8d70c640..5a4504f0b93 100644 --- a/ydb/services/yq/grpc_service.h +++ b/ydb/services/yq/grpc_service.h @@ -25,11 +25,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/yq/private_grpc.h b/ydb/services/yq/private_grpc.h index f5107a7c103..63c04d9c068 100644 --- a/ydb/services/yq/private_grpc.h +++ b/ydb/services/yq/private_grpc.h @@ -23,11 +23,11 @@ private: void SetupIncomingRequests(NGrpc::TLoggerPtr logger); NActors::TActorSystem* ActorSystem_; - grpc::ServerCompletionQueue* CQ_ = nullptr; + grpc::ServerCompletionQueue* CQ_ = nullptr; TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_; NActors::TActorId GRpcRequestProxyId_; - NGrpc::TGlobalLimiter* Limiter_ = nullptr; + NGrpc::TGlobalLimiter* Limiter_ = nullptr; }; } // namespace NGRpcService diff --git a/ydb/services/yq/ut_integration/ya.make b/ydb/services/yq/ut_integration/ya.make index bc62df45634..9a86b374fbd 100644 --- a/ydb/services/yq/ut_integration/ya.make +++ b/ydb/services/yq/ut_integration/ya.make @@ -8,7 +8,7 @@ OWNER( FORK_SUBTESTS() -SIZE(MEDIUM) +SIZE(MEDIUM) SRCS( ut_utils.cpp diff --git a/ydb/services/yq/ut_integration/yq_ut.cpp b/ydb/services/yq/ut_integration/yq_ut.cpp index d4a35f195e8..ff461b9b463 100644 --- a/ydb/services/yq/ut_integration/yq_ut.cpp +++ b/ydb/services/yq/ut_integration/yq_ut.cpp @@ -380,10 +380,10 @@ Y_UNIT_TEST_SUITE(Yq_1) { TString location = TStringBuilder() << "localhost:" << grpc; auto driver = TDriver(TDriverConfig().SetEndpoint(location).SetAuthToken("root@builtin")); NYdb::NYq::TClient client(driver); - const size_t conns = 3; + const size_t conns = 3; const auto folderId = TString(__func__) + "folder_id"; {//CreateConnections - for (size_t i = 0; i < conns - 1; ++i) { + for (size_t i = 0; i < conns - 1; ++i) { const auto request = ::NYq::TCreateConnectionBuilder() .SetName("testdb" + ToString(i)) .CreateYdb("FakeDatabaseId", "") @@ -393,16 +393,16 @@ Y_UNIT_TEST_SUITE(Yq_1) { .ExtractValueSync(); UNIT_ASSERT_C(result.GetStatus() == EStatus::SUCCESS, result.GetIssues().ToString()); } - - // yds - const auto request = ::NYq::TCreateConnectionBuilder() - .SetName("testdb2") - .CreateDataStreams("FakeDatabaseId", "") // We can use the same db in yds and ydb - .Build(); - const auto result = client + + // yds + const auto request = ::NYq::TCreateConnectionBuilder() + .SetName("testdb2") + .CreateDataStreams("FakeDatabaseId", "") // We can use the same db in yds and ydb + .Build(); + const auto result = client .CreateConnection(request, CreateYqSettings<TCreateConnectionSettings>(folderId)) - .ExtractValueSync(); - UNIT_ASSERT_C(result.GetStatus() == EStatus::SUCCESS, result.GetIssues().ToString()); + .ExtractValueSync(); + UNIT_ASSERT_C(result.GetStatus() == EStatus::SUCCESS, result.GetIssues().ToString()); } { @@ -424,11 +424,11 @@ Y_UNIT_TEST_SUITE(Yq_1) { UNIT_ASSERT_VALUES_EQUAL(content.name(), "testdb" + ToString(i)); UNIT_ASSERT_VALUES_EQUAL(meta.created_by(), "root@builtin"); UNIT_ASSERT_VALUES_EQUAL(meta.modified_by(), "root@builtin"); - if (i < 2) { - UNIT_ASSERT_C(content.setting().has_ydb_database(), content); - } else { - UNIT_ASSERT_C(content.setting().has_data_streams(), content); - } + if (i < 2) { + UNIT_ASSERT_C(content.setting().has_ydb_database(), content); + } else { + UNIT_ASSERT_C(content.setting().has_data_streams(), content); + } i++; } } @@ -995,4 +995,4 @@ Y_UNIT_TEST_SUITE(PrivateApi) { UNIT_ASSERT_C(result, "the execution of the query did not end within the time limit"); } } -} +} diff --git a/ydb/tests/functional/sqs/sqs_matchers.py b/ydb/tests/functional/sqs/sqs_matchers.py index a8bb8508fc9..783a8ba43f5 100644 --- a/ydb/tests/functional/sqs/sqs_matchers.py +++ b/ydb/tests/functional/sqs/sqs_matchers.py @@ -52,7 +52,7 @@ class ReadResponseMatcher(BaseMatcher): self.__message_count_matcher = less_than_or_equal_to(messages_count) return self - def with_n_or_more_messages(self, messages_count): + def with_n_or_more_messages(self, messages_count): self.__message_count_matcher = greater_than_or_equal_to(messages_count) return self diff --git a/ydb/tests/functional/sqs/sqs_requests_client.py b/ydb/tests/functional/sqs/sqs_requests_client.py index 6b7545503f2..fc097566a10 100644 --- a/ydb/tests/functional/sqs/sqs_requests_client.py +++ b/ydb/tests/functional/sqs/sqs_requests_client.py @@ -15,7 +15,7 @@ _author__ = 'komels@yandex-team.ru' logger = logging.getLogger(__name__) DEFAULT_DATE = '20180101' -REQUEST_TIMEOUT = 180 # teamcity is very slow +REQUEST_TIMEOUT = 180 # teamcity is very slow def to_bytes(v): @@ -56,28 +56,28 @@ class SqsMessageAttribute(object): self.type = attr_type -class SqsSendMessageParams(object): - def __init__( - self, message_body, - delay_seconds=None, attributes=None, deduplication_id=None, group_id=None - ): - self.message_body = message_body - self.delay_seconds = delay_seconds - self.attributes = attributes - self.deduplication_id = deduplication_id - self.group_id = group_id - - -class SqsChangeMessageVisibilityParams(object): - def __init__( - self, - receipt_handle, - visibility_timeout - ): - self.receipt_handle = receipt_handle - self.visibility_timeout = visibility_timeout - - +class SqsSendMessageParams(object): + def __init__( + self, message_body, + delay_seconds=None, attributes=None, deduplication_id=None, group_id=None + ): + self.message_body = message_body + self.delay_seconds = delay_seconds + self.attributes = attributes + self.deduplication_id = deduplication_id + self.group_id = group_id + + +class SqsChangeMessageVisibilityParams(object): + def __init__( + self, + receipt_handle, + visibility_timeout + ): + self.receipt_handle = receipt_handle + self.visibility_timeout = visibility_timeout + + class SqsHttpApi(object): def __init__(self, server, port, user, raise_on_error=False, timeout=REQUEST_TIMEOUT, security_token=None, force_private=False, iam_token=None, folder_id=None): self.__auth_headers = auth_headers(user, security_token, iam_token) @@ -88,21 +88,21 @@ class SqsHttpApi(object): "{}:{}".format(server, port), headers=auth_headers(user, security_token, iam_token) ) - self.__private_request = requests.Request( - 'POST', - "{}:{}/private".format(server, port), + self.__private_request = requests.Request( + 'POST', + "{}:{}/private".format(server, port), headers=auth_headers(user, security_token, iam_token) - ) + ) self.__session = requests.Session() self.__raise_on_error = raise_on_error - self.__user = user - self.__timeout = timeout + self.__user = user + self.__timeout = timeout self.__security_token = security_token assert(isinstance(force_private, bool)) self.__force_private = force_private self.__folder_id = folder_id - def execute_request(self, action, extract_result_method, default=None, private=False, **params): + def execute_request(self, action, extract_result_method, default=None, private=False, **params): if params is None: params = {} params['Action'] = action @@ -111,22 +111,22 @@ class SqsHttpApi(object): params['folderId'] = self.__folder_id if self.__force_private or private: - request = self.__private_request - else: - request = self.__request + request = self.__private_request + else: + request = self.__request request.data = urllib.parse.urlencode(params) - logger.debug("Execute request {} {} from user {} with params: {}".format( - request.method, request.url, self.__user, request.data) + logger.debug("Execute request {} {} from user {} with params: {}".format( + request.method, request.url, self.__user, request.data) ) - prep = request.prepare() + prep = request.prepare() try: - response = self.__session.send(prep, timeout=self.__timeout) - except (requests.ConnectionError, requests.exceptions.Timeout) as ex: - logging.debug("Request failed with connection exception {}: {}".format(type(ex), ex)) - if self.__raise_on_error: - raise - else: - response = None + response = self.__session.send(prep, timeout=self.__timeout) + except (requests.ConnectionError, requests.exceptions.Timeout) as ex: + logging.debug("Request failed with connection exception {}: {}".format(type(ex), ex)) + if self.__raise_on_error: + raise + else: + response = None return self._process_response( response, extract_result_method, default @@ -134,13 +134,13 @@ class SqsHttpApi(object): def _process_response(self, response, extract_method, default=None): if response is None: - logging.debug('Returning {} by default'.format(default)) + logging.debug('Returning {} by default'.format(default)) return default if response.status_code != 200: - logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( - response.status_code, response.reason, response.text - )) - # Assert that no internal info will be given to user + logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( + response.status_code, response.reason, response.text + )) + # Assert that no internal info will be given to user assert response.text.find('.cpp:') == -1, 'No internal info should be given to user' if self.__raise_on_error: raise RuntimeError( @@ -150,16 +150,16 @@ class SqsHttpApi(object): ) return default - logging.debug('Parsing response: {}'.format(response.text)) + logging.debug('Parsing response: {}'.format(response.text)) result = xmltodict.parse(response.text) try: return extract_method(result) - except (KeyError, TypeError) as ex: - logger.error("Could not process response from SQS: {}. {}: {}".format(result, type(ex), ex)) + except (KeyError, TypeError) as ex: + logger.error("Could not process response from SQS: {}. {}: {}".format(result, type(ex), ex)) return default def create_user(self, username): - return self.execute_request( + return self.execute_request( action='CreateUser', extract_result_method=lambda x: x['CreateUserResponse']['ResponseMetadata'], UserName=username @@ -173,22 +173,22 @@ class SqsHttpApi(object): ) def list_users(self): - return self.execute_request( + return self.execute_request( action='ListUsers', - extract_result_method=lambda x: wrap_in_list(x['ListUsersResponse']['ListUsersResult']['UserName']) + extract_result_method=lambda x: wrap_in_list(x['ListUsersResponse']['ListUsersResult']['UserName']) ) def list_queues(self, name_prefix=None): if name_prefix is not None: - return self.execute_request( + return self.execute_request( action='ListQueues', - extract_result_method=lambda x: wrap_in_list(x['ListQueuesResponse']['ListQueuesResult']['QueueUrl']), default=(), + extract_result_method=lambda x: wrap_in_list(x['ListQueuesResponse']['ListQueuesResult']['QueueUrl']), default=(), QueueNamePrefix=name_prefix ) else: - return self.execute_request( + return self.execute_request( action='ListQueues', - extract_result_method=lambda x: wrap_in_list(x['ListQueuesResponse']['ListQueuesResult']['QueueUrl']), default=(), + extract_result_method=lambda x: wrap_in_list(x['ListQueuesResponse']['ListQueuesResult']['QueueUrl']), default=(), ) def private_count_queues(self): @@ -202,16 +202,16 @@ class SqsHttpApi(object): # if is_fifo and not queue_name.endswith('.fifo'): # return None if attributes is None: - attributes = dict() + attributes = dict() if is_fifo: - attributes = dict(attributes) # copy + attributes = dict(attributes) # copy attributes['FifoQueue'] = 'true' params = {} for i, (k, v) in enumerate(attributes.items()): params['Attribute.{id}.Name'.format(id=i+1)] = k params['Attribute.{id}.Value'.format(id=i + 1)] = v - return self.execute_request( + return self.execute_request( action='CreateQueue', extract_result_method=lambda x: x['CreateQueueResponse']['CreateQueueResult']['QueueUrl'], QueueName=queue_name, @@ -219,47 +219,47 @@ class SqsHttpApi(object): ) def delete_queue(self, queue_url): - return self.execute_request( + return self.execute_request( action='DeleteQueue', extract_result_method=lambda x: x['DeleteQueueResponse']['ResponseMetadata']['RequestId'], QueueUrl=queue_url ) - def private_delete_queue_batch(self, queue_urls): - args = {} - for i, url in enumerate(queue_urls): - args["DeleteQueueBatchRequestEntry.{}.Id".format(i+1)] = i - args["DeleteQueueBatchRequestEntry.{}.QueueUrl".format(i+1)] = url - - return self.execute_request( - action='DeleteQueueBatch', - private=True, - extract_result_method=lambda x: x['DeleteQueueBatchResponse']['DeleteQueueBatchResult'], - **args - ) - - def purge_queue(self, queue_url): - return self.execute_request( - action='PurgeQueue', - extract_result_method=lambda x: x['PurgeQueueResponse']['ResponseMetadata']['RequestId'], - QueueUrl=queue_url - ) - - def private_purge_queue_batch(self, queue_urls): - args = {} - for i, url in enumerate(queue_urls): - args["PurgeQueueBatchRequestEntry.{}.Id".format(i+1)] = i - args["PurgeQueueBatchRequestEntry.{}.QueueUrl".format(i+1)] = url - - return self.execute_request( - action='PurgeQueueBatch', - private=True, - extract_result_method=lambda x: x['PurgeQueueBatchResponse']['PurgeQueueBatchResult'], - **args - ) - + def private_delete_queue_batch(self, queue_urls): + args = {} + for i, url in enumerate(queue_urls): + args["DeleteQueueBatchRequestEntry.{}.Id".format(i+1)] = i + args["DeleteQueueBatchRequestEntry.{}.QueueUrl".format(i+1)] = url + + return self.execute_request( + action='DeleteQueueBatch', + private=True, + extract_result_method=lambda x: x['DeleteQueueBatchResponse']['DeleteQueueBatchResult'], + **args + ) + + def purge_queue(self, queue_url): + return self.execute_request( + action='PurgeQueue', + extract_result_method=lambda x: x['PurgeQueueResponse']['ResponseMetadata']['RequestId'], + QueueUrl=queue_url + ) + + def private_purge_queue_batch(self, queue_urls): + args = {} + for i, url in enumerate(queue_urls): + args["PurgeQueueBatchRequestEntry.{}.Id".format(i+1)] = i + args["PurgeQueueBatchRequestEntry.{}.QueueUrl".format(i+1)] = url + + return self.execute_request( + action='PurgeQueueBatch', + private=True, + extract_result_method=lambda x: x['PurgeQueueBatchResponse']['PurgeQueueBatchResult'], + **args + ) + def get_queue_url(self, queue_name): - return self.execute_request( + return self.execute_request( action='GetQueueUrl', extract_result_method=lambda x: x['GetQueueUrlResponse']['GetQueueUrlResult']['QueueUrl'], QueueName=queue_name @@ -272,47 +272,47 @@ class SqsHttpApi(object): QueueUrl=queue_url ) - def get_queue_attributes(self, queue_url, attributes=['All']): - params = {} + def get_queue_attributes(self, queue_url, attributes=['All']): + params = {} for i in six.moves.range(len(attributes)): - params['AttributeName.{}'.format(i + 1)] = attributes[i] - attrib_list = self.execute_request( - action='GetQueueAttributes', - extract_result_method=lambda x: x['GetQueueAttributesResponse']['GetQueueAttributesResult']['Attribute'], - QueueUrl=queue_url, - **params - ) - result = {} + params['AttributeName.{}'.format(i + 1)] = attributes[i] + attrib_list = self.execute_request( + action='GetQueueAttributes', + extract_result_method=lambda x: x['GetQueueAttributesResponse']['GetQueueAttributesResult']['Attribute'], + QueueUrl=queue_url, + **params + ) + result = {} if attrib_list is None: return result - for attr in wrap_in_list(attrib_list): - result[attr['Name']] = attr['Value'] - return result - - def private_get_queue_attributes_batch(self, queue_urls): - args = { - 'AttributeName.1': 'All', - } - for i, url in enumerate(queue_urls): - args["GetQueueAttributesBatchRequestEntry.{}.Id".format(i+1)] = i - args["GetQueueAttributesBatchRequestEntry.{}.QueueUrl".format(i+1)] = url - result_list = self.execute_request( - action='GetQueueAttributesBatch', - private=True, - extract_result_method=lambda x: x['GetQueueAttributesBatchResponse']['GetQueueAttributesBatchResult'], - **args - ) - if 'GetQueueAttributesBatchResultEntry' in result_list: - entries = result_list['GetQueueAttributesBatchResultEntry'] - for entry in wrap_in_list(entries): - result = {} - for attr in wrap_in_list(entry['Attribute']): - result[attr['Name']] = attr['Value'] - entry['__AttributesDict'] = result - - return result_list - + for attr in wrap_in_list(attrib_list): + result[attr['Name']] = attr['Value'] + return result + + def private_get_queue_attributes_batch(self, queue_urls): + args = { + 'AttributeName.1': 'All', + } + for i, url in enumerate(queue_urls): + args["GetQueueAttributesBatchRequestEntry.{}.Id".format(i+1)] = i + args["GetQueueAttributesBatchRequestEntry.{}.QueueUrl".format(i+1)] = url + result_list = self.execute_request( + action='GetQueueAttributesBatch', + private=True, + extract_result_method=lambda x: x['GetQueueAttributesBatchResponse']['GetQueueAttributesBatchResult'], + **args + ) + if 'GetQueueAttributesBatchResultEntry' in result_list: + entries = result_list['GetQueueAttributesBatchResultEntry'] + for entry in wrap_in_list(entries): + result = {} + for attr in wrap_in_list(entry['Attribute']): + result[attr['Name']] = attr['Value'] + entry['__AttributesDict'] = result + + return result_list + def modify_permissions(self, action, subject, path, permissions): args = { 'Path': path, @@ -337,20 +337,20 @@ class SqsHttpApi(object): **args ) - def set_queue_attributes(self, queue_url, attributes): - params = {} - i = 1 - for attr in attributes: - params['Attribute.{}.Name'.format(i)] = attr - params['Attribute.{}.Value'.format(i)] = attributes[attr] - i += 1 - return self.execute_request( - action='SetQueueAttributes', - extract_result_method=lambda x: x['SetQueueAttributesResponse']['ResponseMetadata']['RequestId'], - QueueUrl=queue_url, - **params - ) - + def set_queue_attributes(self, queue_url, attributes): + params = {} + i = 1 + for attr in attributes: + params['Attribute.{}.Name'.format(i)] = attr + params['Attribute.{}.Value'.format(i)] = attributes[attr] + i += 1 + return self.execute_request( + action='SetQueueAttributes', + extract_result_method=lambda x: x['SetQueueAttributesResponse']['ResponseMetadata']['RequestId'], + QueueUrl=queue_url, + **params + ) + def send_message( self, queue_url, message_body, delay_seconds=None, attributes=None, deduplication_id=None, group_id=None @@ -363,11 +363,11 @@ class SqsHttpApi(object): 'MessageBody': message_body, } if delay_seconds is not None: - params['DelaySeconds'] = str(delay_seconds) + params['DelaySeconds'] = str(delay_seconds) if deduplication_id is not None: - params['MessageDeduplicationId'] = str(deduplication_id) + params['MessageDeduplicationId'] = str(deduplication_id) if group_id is not None: - params['MessageGroupId'] = str(group_id) + params['MessageGroupId'] = str(group_id) if attributes is not None: attr_id_counter = itertools.count() @@ -376,75 +376,75 @@ class SqsHttpApi(object): raise ValueError("Unknown attribute type: {}".format(attr.type)) _id = next(attr_id_counter) - params['MessageAttribute.{id}.Name'.format(id=_id)] = attr.name - params['MessageAttribute.{id}.Value.DataType'.format(id=_id)] = attr.type - params['MessageAttribute.{id}.Value.{value_type}'.format( + params['MessageAttribute.{id}.Name'.format(id=_id)] = attr.name + params['MessageAttribute.{id}.Value.DataType'.format(id=_id)] = attr.type + params['MessageAttribute.{id}.Value.{value_type}'.format( id=_id, value_type=SQS_ATTRIBUTE_TYPES[attr.type] )] = attr.value - return self.execute_request( + return self.execute_request( action='SendMessage', extract_result_method=lambda x: x['SendMessageResponse']['SendMessageResult']['MessageId'], **params ) - def send_message_batch(self, queue_url, send_message_params_list): - params = { - 'QueueUrl': queue_url, - } - for i, entry in enumerate(send_message_params_list): - params['SendMessageBatchRequestEntry.{i}.Id'.format(i=i+1)] = str(i) - params['SendMessageBatchRequestEntry.{i}.MessageBody'.format(i=i+1)] = entry.message_body - if entry.delay_seconds is not None: - params['SendMessageBatchRequestEntry.{i}.DelaySeconds'.format(i=i+1)] = str(entry.delay_seconds) - if entry.deduplication_id is not None: - params['SendMessageBatchRequestEntry.{i}.MessageDeduplicationId'.format(i=i+1)] = str(entry.deduplication_id) - if entry.group_id is not None: - params['SendMessageBatchRequestEntry.{i}.MessageGroupId'.format(i=i+1)] = str(entry.group_id) - - if entry.attributes is not None: - attr_id_counter = itertools.count() - for attr in entry.attributes: - if attr.type not in SQS_ATTRIBUTE_TYPES: - raise ValueError("Unknown attribute type: {}".format(attr.type)) - + def send_message_batch(self, queue_url, send_message_params_list): + params = { + 'QueueUrl': queue_url, + } + for i, entry in enumerate(send_message_params_list): + params['SendMessageBatchRequestEntry.{i}.Id'.format(i=i+1)] = str(i) + params['SendMessageBatchRequestEntry.{i}.MessageBody'.format(i=i+1)] = entry.message_body + if entry.delay_seconds is not None: + params['SendMessageBatchRequestEntry.{i}.DelaySeconds'.format(i=i+1)] = str(entry.delay_seconds) + if entry.deduplication_id is not None: + params['SendMessageBatchRequestEntry.{i}.MessageDeduplicationId'.format(i=i+1)] = str(entry.deduplication_id) + if entry.group_id is not None: + params['SendMessageBatchRequestEntry.{i}.MessageGroupId'.format(i=i+1)] = str(entry.group_id) + + if entry.attributes is not None: + attr_id_counter = itertools.count() + for attr in entry.attributes: + if attr.type not in SQS_ATTRIBUTE_TYPES: + raise ValueError("Unknown attribute type: {}".format(attr.type)) + _id = next(attr_id_counter) - params['SendMessageBatchRequestEntry.{i}.MessageAttribute.{id}.Name'.format(i=i+1, id=_id)] = attr.name - params['SendMessageBatchRequestEntry.{i}.MessageAttribute.{id}.Value.DataType'.format(i=i+1, id=_id)] = attr.type - params['SendMessageBatchRequestEntry.{i}.MessageAttribute.{id}.Value.{value_type}'.format( - i=i+1, id=_id, value_type=SQS_ATTRIBUTE_TYPES[attr.type] - )] = attr.value - - resp = self.execute_request( - action='SendMessageBatch', - extract_result_method=lambda x: x['SendMessageBatchResponse'], - **params - ) + params['SendMessageBatchRequestEntry.{i}.MessageAttribute.{id}.Name'.format(i=i+1, id=_id)] = attr.name + params['SendMessageBatchRequestEntry.{i}.MessageAttribute.{id}.Value.DataType'.format(i=i+1, id=_id)] = attr.type + params['SendMessageBatchRequestEntry.{i}.MessageAttribute.{id}.Value.{value_type}'.format( + i=i+1, id=_id, value_type=SQS_ATTRIBUTE_TYPES[attr.type] + )] = attr.value + + resp = self.execute_request( + action='SendMessageBatch', + extract_result_method=lambda x: x['SendMessageBatchResponse'], + **params + ) result = [dict() for i in six.moves.range(len(send_message_params_list))] - results = resp.get('SendMessageBatchResult') - logging.debug('results: {}'.format(results)) - if results: - entries = results.get('SendMessageBatchResultEntry') - logging.debug('entries: {}'.format(entries)) - if entries: - for res in wrap_in_list(entries): - i = int(res['Id']) - assert i < len(result) and i >= 0 - result[i]['SendMessageBatchResultEntry'] = res - - errors = results.get('BatchResultErrorEntry') - logging.debug('errors: {}'.format(errors)) - if errors: - for err in wrap_in_list(errors): - i = int(err['Id']) - assert i < len(result) and i >= 0 - result[i]['BatchResultErrorEntry'] = err - return result - + results = resp.get('SendMessageBatchResult') + logging.debug('results: {}'.format(results)) + if results: + entries = results.get('SendMessageBatchResultEntry') + logging.debug('entries: {}'.format(entries)) + if entries: + for res in wrap_in_list(entries): + i = int(res['Id']) + assert i < len(result) and i >= 0 + result[i]['SendMessageBatchResultEntry'] = res + + errors = results.get('BatchResultErrorEntry') + logging.debug('errors: {}'.format(errors)) + if errors: + for err in wrap_in_list(errors): + i = int(err['Id']) + assert i < len(result) and i >= 0 + result[i]['BatchResultErrorEntry'] = err + return result + def receive_message( self, queue_url, max_number_of_messages=None, wait_timeout=None, - visibility_timeout=None, meta_attributes=None, message_attributes=None, - receive_request_attempt_id=None + visibility_timeout=None, meta_attributes=None, message_attributes=None, + receive_request_attempt_id=None ): params = {'QueueUrl': queue_url} if max_number_of_messages is not None: @@ -466,10 +466,10 @@ class SqsHttpApi(object): _id = next(attr_id_counter) params['MessageAttributeName.{}'.format(_id)] = attr - if receive_request_attempt_id is not None: - params['ReceiveRequestAttemptId'] = receive_request_attempt_id - - return self.execute_request( + if receive_request_attempt_id is not None: + params['ReceiveRequestAttemptId'] = receive_request_attempt_id + + return self.execute_request( action='ReceiveMessage', extract_result_method=lambda x: wrap_in_list( x['ReceiveMessageResponse']['ReceiveMessageResult']['Message'] @@ -478,7 +478,7 @@ class SqsHttpApi(object): ) def delete_message(self, queue_url, handle): - return self.execute_request( + return self.execute_request( action='DeleteMessage', extract_result_method=lambda x: x['DeleteMessageResponse']['ResponseMetadata']['RequestId'], QueueUrl=queue_url, ReceiptHandle=handle @@ -487,76 +487,76 @@ class SqsHttpApi(object): def delete_message_batch(self, queue_url, message_handles): args = {} for i, handle in enumerate(message_handles): - args["DeleteMessageBatchRequestEntry.{}.Id".format(i+1)] = str(i) + args["DeleteMessageBatchRequestEntry.{}.Id".format(i+1)] = str(i) args["DeleteMessageBatchRequestEntry.{}.ReceiptHandle".format(i+1)] = handle - resp = self.execute_request( + resp = self.execute_request( action='DeleteMessageBatch', - extract_result_method=lambda x: x['DeleteMessageBatchResponse'], + extract_result_method=lambda x: x['DeleteMessageBatchResponse'], QueueUrl=queue_url, **args ) result = [dict() for i in six.moves.range(len(message_handles))] - results = resp.get('DeleteMessageBatchResult') - logging.debug('results: {}'.format(results)) - if results: - entries = results.get('DeleteMessageBatchResultEntry') - logging.debug('entries: {}'.format(entries)) - if entries: - for res in wrap_in_list(entries): - i = int(res['Id']) - assert i < len(result) and i >= 0 - result[i]['DeleteMessageBatchResultEntry'] = res - - errors = results.get('BatchResultErrorEntry') - logging.debug('errors: {}'.format(errors)) - if errors: - for err in wrap_in_list(errors): - i = int(err['Id']) - assert i < len(result) and i >= 0 - result[i]['BatchResultErrorEntry'] = err - return result - - def change_message_visibility(self, queue_url, handle, visibility_timeout): - return self.execute_request( - action='ChangeMessageVisibility', - extract_result_method=lambda x: x['ChangeMessageVisibilityResponse']['ResponseMetadata']['RequestId'], - QueueUrl=queue_url, ReceiptHandle=handle, VisibilityTimeout=visibility_timeout - ) - - def change_message_visibility_batch(self, queue_url, change_message_visibility_params_list): - args = {} - for i, params in enumerate(change_message_visibility_params_list): - args["ChangeMessageVisibilityBatchRequestEntry.{}.Id".format(i+1)] = str(i) - args["ChangeMessageVisibilityBatchRequestEntry.{}.ReceiptHandle".format(i+1)] = params.receipt_handle - args["ChangeMessageVisibilityBatchRequestEntry.{}.VisibilityTimeout".format(i+1)] = params.visibility_timeout - - resp = self.execute_request( - action='ChangeMessageVisibilityBatch', - extract_result_method=lambda x: x['ChangeMessageVisibilityBatchResponse'], - QueueUrl=queue_url, **args - ) + results = resp.get('DeleteMessageBatchResult') + logging.debug('results: {}'.format(results)) + if results: + entries = results.get('DeleteMessageBatchResultEntry') + logging.debug('entries: {}'.format(entries)) + if entries: + for res in wrap_in_list(entries): + i = int(res['Id']) + assert i < len(result) and i >= 0 + result[i]['DeleteMessageBatchResultEntry'] = res + + errors = results.get('BatchResultErrorEntry') + logging.debug('errors: {}'.format(errors)) + if errors: + for err in wrap_in_list(errors): + i = int(err['Id']) + assert i < len(result) and i >= 0 + result[i]['BatchResultErrorEntry'] = err + return result + + def change_message_visibility(self, queue_url, handle, visibility_timeout): + return self.execute_request( + action='ChangeMessageVisibility', + extract_result_method=lambda x: x['ChangeMessageVisibilityResponse']['ResponseMetadata']['RequestId'], + QueueUrl=queue_url, ReceiptHandle=handle, VisibilityTimeout=visibility_timeout + ) + + def change_message_visibility_batch(self, queue_url, change_message_visibility_params_list): + args = {} + for i, params in enumerate(change_message_visibility_params_list): + args["ChangeMessageVisibilityBatchRequestEntry.{}.Id".format(i+1)] = str(i) + args["ChangeMessageVisibilityBatchRequestEntry.{}.ReceiptHandle".format(i+1)] = params.receipt_handle + args["ChangeMessageVisibilityBatchRequestEntry.{}.VisibilityTimeout".format(i+1)] = params.visibility_timeout + + resp = self.execute_request( + action='ChangeMessageVisibilityBatch', + extract_result_method=lambda x: x['ChangeMessageVisibilityBatchResponse'], + QueueUrl=queue_url, **args + ) result = [dict() for i in six.moves.range(len(change_message_visibility_params_list))] - results = resp.get('ChangeMessageVisibilityBatchResult') - logging.debug('results: {}'.format(results)) - if results: - entries = results.get('ChangeMessageVisibilityBatchResultEntry') - logging.debug('entries: {}'.format(entries)) - if entries: - for res in wrap_in_list(entries): - i = int(res['Id']) - assert i < len(result) and i >= 0 - result[i]['ChangeMessageVisibilityBatchResultEntry'] = res - - errors = results.get('BatchResultErrorEntry') - logging.debug('errors: {}'.format(errors)) - if errors: - for err in wrap_in_list(errors): - i = int(err['Id']) - assert i < len(result) and i >= 0 - result[i]['BatchResultErrorEntry'] = err - return result - - + results = resp.get('ChangeMessageVisibilityBatchResult') + logging.debug('results: {}'.format(results)) + if results: + entries = results.get('ChangeMessageVisibilityBatchResultEntry') + logging.debug('entries: {}'.format(entries)) + if entries: + for res in wrap_in_list(entries): + i = int(res['Id']) + assert i < len(result) and i >= 0 + result[i]['ChangeMessageVisibilityBatchResultEntry'] = res + + errors = results.get('BatchResultErrorEntry') + logging.debug('errors: {}'.format(errors)) + if errors: + for err in wrap_in_list(errors): + i = int(err['Id']) + assert i < len(result) and i >= 0 + result[i]['BatchResultErrorEntry'] = err + return result + + class SqsHttpMinigunApi(SqsHttpApi): def _process_response(self, response, extract_method, default=None): if response is None: diff --git a/ydb/tests/functional/sqs/sqs_test_base.py b/ydb/tests/functional/sqs/sqs_test_base.py index 05d5303f644..68854cbcf60 100644 --- a/ydb/tests/functional/sqs/sqs_test_base.py +++ b/ydb/tests/functional/sqs/sqs_test_base.py @@ -3,12 +3,12 @@ import itertools import logging import time -import requests -import json -import uuid -import re +import requests +import json +import uuid +import re import socket -from hamcrest import assert_that, equal_to, not_none, none, greater_than, less_than_or_equal_to, any_of, not_ +from hamcrest import assert_that, equal_to, not_none, none, greater_than, less_than_or_equal_to, any_of, not_ import ydb.tests.library.common.yatest_common as yatest_common @@ -16,24 +16,24 @@ from ydb.tests.library.harness.kikimr_cluster import kikimr_cluster_factory from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator from ydb.tests.library.harness.util import LogLevels from ydb.tests.library.sqs.tables import create_all_tables as create_all_sqs_tables - + from sqs_requests_client import SqsHttpApi import ydb from concurrent import futures - -from sqs_matchers import ReadResponseMatcher + +from sqs_matchers import ReadResponseMatcher DEFAULT_VISIBILITY_TIMEOUT = 30 logger = logging.getLogger(__name__) -IS_FIFO_PARAMS = { - 'argnames': 'is_fifo', - 'argvalues': [True, False], - 'ids': ['fifo', 'std'], -} +IS_FIFO_PARAMS = { + 'argnames': 'is_fifo', + 'argvalues': [True, False], + 'ids': ['fifo', 'std'], +} POLLING_PARAMS = { 'argnames': 'polling_wait_timeout', @@ -41,27 +41,27 @@ POLLING_PARAMS = { 'ids': ['short_polling', 'long_polling'], } -STOP_NODE_PARAMS = { - 'argnames': 'stop_node', - 'argvalues': [True, False], - 'ids': ['stop_node', 'kick_tablets'], -} - -VISIBILITY_CHANGE_METHOD_PARAMS = { - 'argnames': 'delete_message', - 'argvalues': [True, False], - 'ids': ['with_delete_message', 'with_change_visibility'], -} - - +STOP_NODE_PARAMS = { + 'argnames': 'stop_node', + 'argvalues': [True, False], + 'ids': ['stop_node', 'kick_tablets'], +} + +VISIBILITY_CHANGE_METHOD_PARAMS = { + 'argnames': 'delete_message', + 'argvalues': [True, False], + 'ids': ['with_delete_message', 'with_change_visibility'], +} + + def get_sqs_client_path(): return yatest_common.binary_path("ydb/core/ymq/client/bin/sqs") -def get_kikimr_driver_path(): +def get_kikimr_driver_path(): return yatest_common.binary_path("ydb/apps/ydbd/ydbd") - - + + def to_bytes(v): if v is None: return v @@ -95,100 +95,100 @@ def wait_can_list_users(api): return False -def get_fqdn(): - # the same implementation as - # https://a.yandex-team.ru/arc/trunk/arcadia/util/system/hostname.cpp?rev=3541264#L36 +def get_fqdn(): + # the same implementation as + # https://a.yandex-team.ru/arc/trunk/arcadia/util/system/hostname.cpp?rev=3541264#L36 # that is used in ydb. - hostname = socket.gethostname() - addrinfo = socket.getaddrinfo(hostname, None, socket.AF_UNSPEC, 0, 0, socket.AI_CANONNAME) - for ai in addrinfo: - canonname_candidate = ai[3] - if canonname_candidate: - return canonname_candidate - - assert False, 'Failed to get FQDN' - - -def get_test_with_sqs_tenant_installation(base_test_class): - class TestWithTenant(base_test_class): - slot_count = 1 + hostname = socket.gethostname() + addrinfo = socket.getaddrinfo(hostname, None, socket.AF_UNSPEC, 0, 0, socket.AI_CANONNAME) + for ai in addrinfo: + canonname_candidate = ai[3] + if canonname_candidate: + return canonname_candidate + + assert False, 'Failed to get FQDN' + + +def get_test_with_sqs_tenant_installation(base_test_class): + class TestWithTenant(base_test_class): + slot_count = 1 database = '/Root/TenantSQS' sqs_root = '/Root/TenantSQS' - - @classmethod - def _init_cluster(cls, cluster, config_generator): - cluster.create_database( + + @classmethod + def _init_cluster(cls, cluster, config_generator): + cluster.create_database( cls.database, - storage_pool_units_count={ - 'hdd': 1 - } - ) + storage_pool_units_count={ + 'hdd': 1 + } + ) cluster.register_and_start_slots(cls.database, count=1) cluster.wait_tenant_up(cls.database) - super(TestWithTenant, cls)._init_cluster(cluster, config_generator) - - return TestWithTenant - - -def get_test_with_sqs_installation_by_path(base_test_class): - class TestWithPath(base_test_class): + super(TestWithTenant, cls)._init_cluster(cluster, config_generator) + + return TestWithTenant + + +def get_test_with_sqs_installation_by_path(base_test_class): + class TestWithPath(base_test_class): database = '/Root' sqs_root = '/Root/PathSQS' - return TestWithPath - - + return TestWithPath + + class KikimrSqsTestBase(object): - erasure = None - slot_count = 0 + erasure = None + slot_count = 0 database = '/Root' sqs_root = '/Root/SQS' use_in_memory_pdisks = True @classmethod def setup_class(cls): - cls.cluster, cls.config_generator = cls._setup_cluster() - cls.sqs_ports = [] - if cls.slot_count: + cls.cluster, cls.config_generator = cls._setup_cluster() + cls.sqs_ports = [] + if cls.slot_count: cls.cluster_nodes = list(cls.cluster.slots.values()) - else: + else: cls.cluster_nodes = list(cls.cluster.nodes.values()) - cls.cluster_nodes_count = len(cls.cluster_nodes) + cls.cluster_nodes_count = len(cls.cluster_nodes) for node in cls.cluster_nodes: cls.sqs_ports.append(node.sqs_port) - cls.sqs_port = cls.sqs_ports[0] + cls.sqs_port = cls.sqs_ports[0] cls.server_fqdn = get_fqdn() def setup_method(self, method=None): - logging.debug('Test started: {}'.format(str(method.__name__))) - logging.debug("Kikimr logs dir: {}".format(self.cluster.slots[1].cwd if self.slot_count else self.cluster.nodes[1].cwd)) - - # Start all nodes in case of previous test with killed nodes + logging.debug('Test started: {}'.format(str(method.__name__))) + logging.debug("Kikimr logs dir: {}".format(self.cluster.slots[1].cwd if self.slot_count else self.cluster.nodes[1].cwd)) + + # Start all nodes in case of previous test with killed nodes for node_index in range(len(self.cluster.nodes)): - self.cluster.nodes[node_index + 1].start() # start if not alive - + self.cluster.nodes[node_index + 1].start() # start if not alive + for slot_index in range(len(self.cluster.slots)): - self.cluster.slots[slot_index + 1].start() # start if not alive - + self.cluster.slots[slot_index + 1].start() # start if not alive + for slot_index in range(len(self.cluster.slots)): - self._enable_tablets_on_node(slot_index) - - grpc_port = self.cluster.slots[1].grpc_port if self.slot_count else self.cluster.nodes[1].grpc_port - self._sqs_server_opts = ['-s', 'localhost', '-p', str(grpc_port)] - test_name = str(method.__name__)[5:] - self._username = 'U_' + test_name - self.queue_name = 'Q_{}_{}'.format(test_name, str(uuid.uuid1())) - max_queue_name_length = 80 - len('.fifo') - if len(self.queue_name) > max_queue_name_length: - self.queue_name = self.queue_name[:max_queue_name_length] + self._enable_tablets_on_node(slot_index) + + grpc_port = self.cluster.slots[1].grpc_port if self.slot_count else self.cluster.nodes[1].grpc_port + self._sqs_server_opts = ['-s', 'localhost', '-p', str(grpc_port)] + test_name = str(method.__name__)[5:] + self._username = 'U_' + test_name + self.queue_name = 'Q_{}_{}'.format(test_name, str(uuid.uuid1())) + max_queue_name_length = 80 - len('.fifo') + if len(self.queue_name) > max_queue_name_length: + self.queue_name = self.queue_name[:max_queue_name_length] self._msg_body_template = self._username + '-{}' self._setup_user(self._username) - self._sqs_apis = [] + self._sqs_apis = [] for node in self.cluster_nodes: self._sqs_apis.append( SqsHttpApi( - 'localhost', + 'localhost', node.sqs_port, self._username, raise_on_error=True, @@ -208,10 +208,10 @@ class KikimrSqsTestBase(object): for f in fs: f.result() - self._sqs_api = self._sqs_apis[0] - - self._driver = self._make_kikimr_driver() + self._sqs_api = self._sqs_apis[0] + self._driver = self._make_kikimr_driver() + self.counter = itertools.count() self.message_ids = [] self.read_result = [] @@ -219,9 +219,9 @@ class KikimrSqsTestBase(object): self.seq_no = 0 def teardown_method(self, method=None): - self.check_no_queues_table(self._username) - self._driver.stop() - + self.check_no_queues_table(self._username) + self._driver.stop() + logging.debug( 'Test finished: {}'.format( str( @@ -230,31 +230,31 @@ class KikimrSqsTestBase(object): ) ) - def check_all_users_queues_tables_consistency(self): + def check_all_users_queues_tables_consistency(self): users = [entry.name for entry in self._driver.scheme_client.list_directory(self.sqs_root).children] - for user in users: - if user == '.AtomicCounter' or user == '.Settings' or user == '.Queues': - continue - self.check_no_queues_table(user) - - def check_no_queues_table(self, username): - raised = False - try: + for user in users: + if user == '.AtomicCounter' or user == '.Settings' or user == '.Queues': + continue + self.check_no_queues_table(user) + + def check_no_queues_table(self, username): + raised = False + try: self._driver.scheme_client.describe_path('{}/{}/Queues'.format(self.sqs_root, username)) - except: - raised = True # Expect SchemeError or at least ConnectionLost in tests with node killings - - assert_that(raised) - + except: + raised = True # Expect SchemeError or at least ConnectionLost in tests with node killings + + assert_that(raised) + @classmethod def teardown_class(cls): if hasattr(cls, 'cluster'): cls.cluster.stop() - @classmethod - def _setup_config_generator(cls): + @classmethod + def _setup_config_generator(cls): config_generator = KikimrConfigGenerator( - erasure=cls.erasure, + erasure=cls.erasure, use_in_memory_pdisks=cls.use_in_memory_pdisks, additional_log_configs={'SQS': LogLevels.INFO}, enable_sqs=True, @@ -269,10 +269,10 @@ class KikimrSqsTestBase(object): config_generator.yaml_config['sqs_config']['create_legacy_duration_counters'] = False config_generator.yaml_config['sqs_config']['validate_message_body'] = True - return config_generator - - @classmethod - def _init_cluster(cls, cluster, config_generator): + return config_generator + + @classmethod + def _init_cluster(cls, cluster, config_generator): driver_config = ydb.DriverConfig( "%s:%s" % (cluster.nodes[1].host, cluster.nodes[1].port), cls.database @@ -283,52 +283,52 @@ class KikimrSqsTestBase(object): with ydb.SessionPool(driver, size=1) as pool: with pool.checkout() as session: create_all_sqs_tables(cls.sqs_root, driver, session) - cls.create_metauser(cluster, config_generator) - - @classmethod - def create_metauser(cls, cluster, config_generator): - grpc_port = cluster.slots[1].grpc_port if cls.slot_count else cluster.nodes[1].grpc_port - cmd = [ - get_sqs_client_path(), - 'user', - '-u', 'metauser', - '-n', 'metauser', - '-s', 'localhost', - '-p', str(grpc_port) - ] - yatest_common.execute(cmd) - - @classmethod - def _setup_cluster(cls): - config_generator = cls._setup_config_generator() - cluster = kikimr_cluster_factory(config_generator) - cluster.start() - cls._init_cluster(cluster, config_generator) - return cluster, config_generator + cls.create_metauser(cluster, config_generator) + + @classmethod + def create_metauser(cls, cluster, config_generator): + grpc_port = cluster.slots[1].grpc_port if cls.slot_count else cluster.nodes[1].grpc_port + cmd = [ + get_sqs_client_path(), + 'user', + '-u', 'metauser', + '-n', 'metauser', + '-s', 'localhost', + '-p', str(grpc_port) + ] + yatest_common.execute(cmd) + + @classmethod + def _setup_cluster(cls): + config_generator = cls._setup_config_generator() + cluster = kikimr_cluster_factory(config_generator) + cluster.start() + cls._init_cluster(cluster, config_generator) + return cluster, config_generator def _setup_user(self, _username, retries_count=20): cmd = [ get_sqs_client_path(), 'user', - '-u', 'metauser', - '-n', _username, + '-u', 'metauser', + '-n', _username, ] + self._sqs_server_opts while retries_count: - logging.debug("Running {}".format(' '.join(cmd))) + logging.debug("Running {}".format(' '.join(cmd))) try: - yatest_common.execute(cmd) - except yatest_common.ExecutionError as ex: - logging.debug("Create user failed: {}. Retrying".format(ex)) + yatest_common.execute(cmd) + except yatest_common.ExecutionError as ex: + logging.debug("Create user failed: {}. Retrying".format(ex)) retries_count -= 1 time.sleep(3) else: - return + return raise RuntimeError("Failed to create SQS user") def _create_api_for_user(self, user_name, raise_on_error=True, security_token=None, force_private=False, iam_token=None, folder_id=None): api = SqsHttpApi(self.cluster.nodes[1].host, - self.cluster_nodes[0].sqs_port, - user_name, + self.cluster_nodes[0].sqs_port, + user_name, raise_on_error=raise_on_error, timeout=None, security_token=security_token, @@ -336,38 +336,38 @@ class KikimrSqsTestBase(object): iam_token=iam_token, folder_id=folder_id ) - return api - - def _make_kikimr_driver(self, node_index=0): - if self.slot_count: - port = self.cluster.slots[node_index + 1].port - else: - port = self.cluster.nodes[node_index + 1].port - connection_params = ydb.ConnectionParams("localhost:{}".format(port)) + return api + + def _make_kikimr_driver(self, node_index=0): + if self.slot_count: + port = self.cluster.slots[node_index + 1].port + else: + port = self.cluster.nodes[node_index + 1].port + connection_params = ydb.ConnectionParams("localhost:{}".format(port)) connection_params.set_database(self.database) - driver = ydb.Driver(connection_params) - driver.wait() - return driver - - def _execute_yql_query(self, query_text): - logging.debug('Executing database query: "{}"'.format(query_text)) - retries = 5 - retries_left = retries - while retries_left: - retries_left -= 1 - try: - session = self._driver.table_client.session().create() - data_result_sets = session.transaction().execute(query_text, commit_tx=True) - return data_result_sets + driver = ydb.Driver(connection_params) + driver.wait() + return driver + + def _execute_yql_query(self, query_text): + logging.debug('Executing database query: "{}"'.format(query_text)) + retries = 5 + retries_left = retries + while retries_left: + retries_left -= 1 + try: + session = self._driver.table_client.session().create() + data_result_sets = session.transaction().execute(query_text, commit_tx=True) + return data_result_sets except (ydb.ConnectionError, ydb.Timeout, ydb.BadSession, ydb.Unavailable, ydb.InternalError) as ex: - logging.warning('Kikimr driver exception: {}'.format(ex)) - # https://st.yandex-team.ru/SQS-307 - if retries_left: - logging.info('Retrying query') - time.sleep(1) - else: - raise - + logging.warning('Kikimr driver exception: {}'.format(ex)) + # https://st.yandex-team.ru/SQS-307 + if retries_left: + logging.info('Retrying query') + time.sleep(1) + else: + raise + def _queue_url_matcher(self, queue_name): urls_matchers = [ equal_to( @@ -379,39 +379,39 @@ class KikimrSqsTestBase(object): ) for port in self.sqs_ports ] - return any_of(*urls_matchers) + return any_of(*urls_matchers) - def _create_queue_and_assert(self, queue_name, is_fifo=False, use_http=False, attributes=None, shards=None, retries=3): - self.queue_url = None - if attributes is None: - attributes = dict() - logging.debug('Create queue. Attributes: {}. Use http: {}. Is fifo: {}'.format(attributes, use_http, is_fifo)) + def _create_queue_and_assert(self, queue_name, is_fifo=False, use_http=False, attributes=None, shards=None, retries=3): + self.queue_url = None + if attributes is None: + attributes = dict() + logging.debug('Create queue. Attributes: {}. Use http: {}. Is fifo: {}'.format(attributes, use_http, is_fifo)) assert (len(attributes.keys()) == 0 or use_http), 'Attributes are supported only for http queue creation' assert (shards is None or not use_http), 'Custom shards number is only supported in non-http mode' while retries: - retries -= 1 + retries -= 1 try: - if use_http: - self.queue_url = self._sqs_api.create_queue(queue_name, is_fifo=is_fifo, attributes=attributes) - else: - cmd = [ - get_sqs_client_path(), - 'create', # create queue command + if use_http: + self.queue_url = self._sqs_api.create_queue(queue_name, is_fifo=is_fifo, attributes=attributes) + else: + cmd = [ + get_sqs_client_path(), + 'create', # create queue command '-u', self._username, '--shards', str(shards) if shards is not None else '2', - '--partitions', '1', - '--queue-name', queue_name, + '--partitions', '1', + '--queue-name', queue_name, ] + self._sqs_server_opts - execute = yatest_common.execute(cmd) - self.queue_url = execute.std_out - self.queue_url = self.queue_url.strip() - except (RuntimeError, yatest_common.ExecutionError) as ex: - logging.debug("Got error: {}. Retrying creation request".format(ex)) - if retries: - time.sleep(1) # Sleep before next retry - else: - raise - if self.queue_url is not None: # queue_url will be None in case of connection error + execute = yatest_common.execute(cmd) + self.queue_url = execute.std_out + self.queue_url = self.queue_url.strip() + except (RuntimeError, yatest_common.ExecutionError) as ex: + logging.debug("Got error: {}. Retrying creation request".format(ex)) + if retries: + time.sleep(1) # Sleep before next retry + else: + raise + if self.queue_url is not None: # queue_url will be None in case of connection error break assert_that( to_bytes(self.queue_url), @@ -422,7 +422,7 @@ class KikimrSqsTestBase(object): def _send_message_and_assert(self, queue_url, msg_body, seq_no=None, group_id=None, attributes=None, delay_seconds=None): attributes = {} if attributes is None else attributes send_msg_result = self._sqs_api.send_message( - queue_url, msg_body, deduplication_id=seq_no, group_id=group_id, attributes=attributes, delay_seconds=delay_seconds + queue_url, msg_body, deduplication_id=seq_no, group_id=group_id, attributes=attributes, delay_seconds=delay_seconds ) assert_that( send_msg_result, not_none() @@ -444,45 +444,45 @@ class KikimrSqsTestBase(object): ret.append(result) return ret - def _read_while_not_empty(self, queue_url, messages_count, visibility_timeout=None, wait_timeout=1, max_empty_reads=1): + def _read_while_not_empty(self, queue_url, messages_count, visibility_timeout=None, wait_timeout=1, max_empty_reads=1): ret = [] messages_by_time = {} actual_vis_timeout = visibility_timeout if visibility_timeout is not None else DEFAULT_VISIBILITY_TIMEOUT - empty_reads_count = 0 + empty_reads_count = 0 max_batch_to_read = self.config_generator.yaml_config['sqs_config']['max_number_of_receive_messages'] while len(ret) < messages_count: # noinspection PyTypeChecker request_start = time.time() read_result = self._sqs_api.receive_message( - queue_url, max_number_of_messages=min(messages_count - len(ret), max_batch_to_read), - visibility_timeout=visibility_timeout, wait_timeout=wait_timeout + queue_url, max_number_of_messages=min(messages_count - len(ret), max_batch_to_read), + visibility_timeout=visibility_timeout, wait_timeout=wait_timeout ) if not read_result: - empty_reads_count += 1 - if empty_reads_count == max_empty_reads: - break - else: - empty_reads_count = 0 - - if read_result: - request_end = time.time() - for msg in read_result: - msg_id = msg['MessageId'] - if msg_id not in messages_by_time: - messages_by_time[msg_id] = request_start - ret.append(msg) - elif request_end > messages_by_time[msg_id] + actual_vis_timeout: - messages_by_time[msg_id] = max( - request_start, messages_by_time[msg_id] + actual_vis_timeout - ) - else: - raise AssertionError("Message {} appeared twice before visibility timeout expired".format(msg_id)) + empty_reads_count += 1 + if empty_reads_count == max_empty_reads: + break + else: + empty_reads_count = 0 + + if read_result: + request_end = time.time() + for msg in read_result: + msg_id = msg['MessageId'] + if msg_id not in messages_by_time: + messages_by_time[msg_id] = request_start + ret.append(msg) + elif request_end > messages_by_time[msg_id] + actual_vis_timeout: + messages_by_time[msg_id] = max( + request_start, messages_by_time[msg_id] + actual_vis_timeout + ) + else: + raise AssertionError("Message {} appeared twice before visibility timeout expired".format(msg_id)) return ret def _read_messages_and_assert( - self, queue_url, messages_count, matcher=None, visibility_timeout=None, wait_timeout=1 + self, queue_url, messages_count, matcher=None, visibility_timeout=None, wait_timeout=1 ): - read_result = self._read_while_not_empty( + read_result = self._read_while_not_empty( queue_url, messages_count=messages_count, visibility_timeout=visibility_timeout, wait_timeout=wait_timeout ) @@ -494,10 +494,10 @@ class KikimrSqsTestBase(object): def _create_queue_send_x_messages_read_y_messages( self, queue_name, send_count, read_count, msg_body_template, - is_fifo=False, visibility_timeout=None, wait_timeout=1, group_id="1" + is_fifo=False, visibility_timeout=None, wait_timeout=1, group_id="1" ): - self._create_queue_and_assert(queue_name, is_fifo) - if is_fifo: + self._create_queue_and_assert(queue_name, is_fifo) + if is_fifo: self.message_ids = self._send_messages( self.queue_url, send_count, msg_body_template, is_fifo=True, group_id=group_id ) @@ -511,44 +511,44 @@ class KikimrSqsTestBase(object): visibility_timeout, wait_timeout ) - def _other_node(self, node_index): - if node_index == 0: - return 1 - else: - return node_index - 1 - - def _get_live_node_index(self): - for i in range(self.cluster_nodes_count): - if self.slot_count: - if self.cluster.slots[i + 1].is_alive(): - return i - else: - if self.cluster.nodes[i + 1].is_alive(): - return i - assert(False) - - def _get_mon_port(self, node_index): - if self.slot_count: - return self.config_generator.port_allocator.get_slot_port_allocator(node_index + 1).mon_port - else: - return self.config_generator.port_allocator.get_node_port_allocator(node_index + 1).mon_port - - def _get_sqs_counters(self, node_index=0, counters_format='json'): + def _other_node(self, node_index): + if node_index == 0: + return 1 + else: + return node_index - 1 + + def _get_live_node_index(self): + for i in range(self.cluster_nodes_count): + if self.slot_count: + if self.cluster.slots[i + 1].is_alive(): + return i + else: + if self.cluster.nodes[i + 1].is_alive(): + return i + assert(False) + + def _get_mon_port(self, node_index): + if self.slot_count: + return self.config_generator.port_allocator.get_slot_port_allocator(node_index + 1).mon_port + else: + return self.config_generator.port_allocator.get_node_port_allocator(node_index + 1).mon_port + + def _get_sqs_counters(self, node_index=0, counters_format='json'): return self._get_counters(node_index, "sqs", counters_format) def _get_ymq_counters(self, cloud, folder, node_index=0, counters_format='json'): return self._get_counters(node_index, "ymq_public", counters_format, cloud=cloud, folder=folder) def _get_counters(self, node_index, component, counters_format, cloud=None, folder=None): - mon_port = self._get_mon_port(node_index) - - if counters_format == 'json': - format_suffix = '/json' - elif counters_format == 'text': - format_suffix = '' - else: - raise Exception('Unknown counters format: \"{}\"'.format(counters_format)) - + mon_port = self._get_mon_port(node_index) + + if counters_format == 'json': + format_suffix = '/json' + elif counters_format == 'text': + format_suffix = '' + else: + raise Exception('Unknown counters format: \"{}\"'.format(counters_format)) + if folder is not None: labels="/cloud%3D{cloud}/folder%3D{folder}".format(cloud=cloud, folder=folder) else: @@ -556,76 +556,76 @@ class KikimrSqsTestBase(object): counters_url = 'http://localhost:{port}/counters/counters%3D{component}{labels}{suffix}'.format( port=mon_port, component=component, suffix=format_suffix, labels=labels ) - reply = requests.get(counters_url) - assert_that(reply.status_code, equal_to(200)) - - if counters_format == 'json': - ret = reply.json() - else: - ret = reply.text - - assert_that(ret, not_none()) - logging.debug('Got counters from node {}:\n{}'.format(node_index, json.dumps(ret, sort_keys=True, indent=2))) - return ret - - def _get_counter(self, counters, labels): - logging.debug('Searching for counter with labels:\n{}'.format(json.dumps(labels, sort_keys=True, indent=2))) - for sensor in counters['sensors']: - sensor_labels = sensor['labels'] - found = True - for label in labels: - if labels[label] != sensor_labels.get(label): - found = False - break - if found: - logging.debug('Return counter:\n{}'.format(json.dumps(sensor, sort_keys=True, indent=2))) - return sensor - logging.debug('No counter with labels found:\n{}'.format(json.dumps(labels, sort_keys=True, indent=2))) - - def _get_counter_value(self, counters, labels, default_value=None): - sensor = self._get_counter(counters, labels) - return sensor['value'] if sensor is not None else default_value - - def _kick_tablets_from_node(self, node_index): - mon_port = self._get_mon_port(0) - - toggle_down_url = 'http://localhost:{}/tablets/app?TabletID=72057594037968897&node={}&page=SetDown&down=1'.format(mon_port, node_index + 1) - toggle_down_reply = requests.get(toggle_down_url) - assert_that(toggle_down_reply.status_code, equal_to(200)) - logging.debug('Toggle down reply: {}'.format(toggle_down_reply.text)) - - kick_url = 'http://localhost:{}/tablets/app?TabletID=72057594037968897&node={}&page=KickNode'.format(mon_port, node_index + 1) - kick_reply = requests.get(kick_url) - assert_that(kick_reply.status_code, equal_to(200)) - logging.debug('Kick reply: {}'.format(kick_reply.text)) - - time.sleep(3) - - def _enable_tablets_on_node(self, node_index): - # make several attempts because this function is needed to be called right after node's start - attempts = 30 - while attempts: - attempts -= 1 - - try: - mon_port = self._get_mon_port(0) - toggle_up_url = 'http://localhost:{}/tablets/app?TabletID=72057594037968897&node={}&page=SetDown&down=0'.format(mon_port, node_index + 1) - toggle_up_reply = requests.get(toggle_up_url) - - logging.debug('Toggle up reply: {}'.format(toggle_up_reply.text)) - if toggle_up_reply.status_code != 200 and attempts: - logging.debug('Non 200 error code: {}. Retrying'.format(toggle_up_reply.status_code)) - time.sleep(1) - continue - - assert_that(toggle_up_reply.status_code, equal_to(200)) - break - except requests.ConnectionError: - if attempts == 0: - raise - logging.debug('Connection error: trying to enable tablets on node {} in 1 second'.format(node_index)) - time.sleep(1) # wait node to start - + reply = requests.get(counters_url) + assert_that(reply.status_code, equal_to(200)) + + if counters_format == 'json': + ret = reply.json() + else: + ret = reply.text + + assert_that(ret, not_none()) + logging.debug('Got counters from node {}:\n{}'.format(node_index, json.dumps(ret, sort_keys=True, indent=2))) + return ret + + def _get_counter(self, counters, labels): + logging.debug('Searching for counter with labels:\n{}'.format(json.dumps(labels, sort_keys=True, indent=2))) + for sensor in counters['sensors']: + sensor_labels = sensor['labels'] + found = True + for label in labels: + if labels[label] != sensor_labels.get(label): + found = False + break + if found: + logging.debug('Return counter:\n{}'.format(json.dumps(sensor, sort_keys=True, indent=2))) + return sensor + logging.debug('No counter with labels found:\n{}'.format(json.dumps(labels, sort_keys=True, indent=2))) + + def _get_counter_value(self, counters, labels, default_value=None): + sensor = self._get_counter(counters, labels) + return sensor['value'] if sensor is not None else default_value + + def _kick_tablets_from_node(self, node_index): + mon_port = self._get_mon_port(0) + + toggle_down_url = 'http://localhost:{}/tablets/app?TabletID=72057594037968897&node={}&page=SetDown&down=1'.format(mon_port, node_index + 1) + toggle_down_reply = requests.get(toggle_down_url) + assert_that(toggle_down_reply.status_code, equal_to(200)) + logging.debug('Toggle down reply: {}'.format(toggle_down_reply.text)) + + kick_url = 'http://localhost:{}/tablets/app?TabletID=72057594037968897&node={}&page=KickNode'.format(mon_port, node_index + 1) + kick_reply = requests.get(kick_url) + assert_that(kick_reply.status_code, equal_to(200)) + logging.debug('Kick reply: {}'.format(kick_reply.text)) + + time.sleep(3) + + def _enable_tablets_on_node(self, node_index): + # make several attempts because this function is needed to be called right after node's start + attempts = 30 + while attempts: + attempts -= 1 + + try: + mon_port = self._get_mon_port(0) + toggle_up_url = 'http://localhost:{}/tablets/app?TabletID=72057594037968897&node={}&page=SetDown&down=0'.format(mon_port, node_index + 1) + toggle_up_reply = requests.get(toggle_up_url) + + logging.debug('Toggle up reply: {}'.format(toggle_up_reply.text)) + if toggle_up_reply.status_code != 200 and attempts: + logging.debug('Non 200 error code: {}. Retrying'.format(toggle_up_reply.status_code)) + time.sleep(1) + continue + + assert_that(toggle_up_reply.status_code, equal_to(200)) + break + except requests.ConnectionError: + if attempts == 0: + raise + logging.debug('Connection error: trying to enable tablets on node {} in 1 second'.format(node_index)) + time.sleep(1) # wait node to start + def _smart_make_table_path(self, user_name, queue_name, queue_version, shard, table_name): table_path = '{}/{}'.format(self.sqs_root, user_name) if queue_name is not None: @@ -649,105 +649,105 @@ class KikimrSqsTestBase(object): else: return int(version) - def _get_queue_master_tablet_id(self, user_name_param=None, queue_name_param=None): - user_name = user_name_param if user_name_param else self._username - queue_name = queue_name_param if queue_name_param else self.queue_name + def _get_queue_master_tablet_id(self, user_name_param=None, queue_name_param=None): + user_name = user_name_param if user_name_param else self._username + queue_name = queue_name_param if queue_name_param else self.queue_name table_path = '{}/.Queues'.format(self.sqs_root) data_result_sets = self._execute_yql_query('SELECT MasterTabletId FROM `{}` WHERE Account=\'{}\' AND QueueName=\'{}\''.format(table_path, user_name, queue_name)) - assert_that(len(data_result_sets), equal_to(1)) - assert_that(len(data_result_sets[0].rows), equal_to(1)) - - tablet_id = data_result_sets[0].rows[0]['MasterTabletId'] - assert_that(tablet_id, not_(none())) - return int(tablet_id) - - def _get_queue_master_node_index(self, user_name=None, queue_name=None): - tablet_id = self._get_queue_master_tablet_id(user_name, queue_name) - mon_port = self._get_mon_port(self._get_live_node_index()) - - tablet_info_url = 'http://localhost:{}/tablets?TabletID={}'.format(mon_port, tablet_id) - - retries = 5 - retries_left = retries - while retries_left: - retries_left -= 1 - tablet_info_reply = requests.get(tablet_info_url) - if tablet_info_reply.status_code != 200: - time.sleep(1) - continue - logging.debug('Tablet info reply: {}'.format(tablet_info_reply.text)) - - node_id_match = re.search('NodeID:\\s(\\d+)', tablet_info_reply.text) - if node_id_match is None: - time.sleep(1) - continue - node_index = int(node_id_match.group(1)) - assert_that(node_index, greater_than(0)) - assert_that(node_index, less_than_or_equal_to(self.cluster_nodes_count)) - return node_index - 1 - assert False, "Couldn't get tablet info from viewer in {} tries".format(retries) - - def _get_queue_shards_count(self, username, queuename, queue_version): - state_table_path = self._smart_make_table_path(username, queuename, queue_version, None, 'State') - return self._get_table_lines_count(state_table_path) - - def _check_queue_tables_are_empty(self, queue_name=None): - if queue_name is None: - queue_name = self.queue_name - is_fifo = queue_name.endswith('.fifo') - + assert_that(len(data_result_sets), equal_to(1)) + assert_that(len(data_result_sets[0].rows), equal_to(1)) + + tablet_id = data_result_sets[0].rows[0]['MasterTabletId'] + assert_that(tablet_id, not_(none())) + return int(tablet_id) + + def _get_queue_master_node_index(self, user_name=None, queue_name=None): + tablet_id = self._get_queue_master_tablet_id(user_name, queue_name) + mon_port = self._get_mon_port(self._get_live_node_index()) + + tablet_info_url = 'http://localhost:{}/tablets?TabletID={}'.format(mon_port, tablet_id) + + retries = 5 + retries_left = retries + while retries_left: + retries_left -= 1 + tablet_info_reply = requests.get(tablet_info_url) + if tablet_info_reply.status_code != 200: + time.sleep(1) + continue + logging.debug('Tablet info reply: {}'.format(tablet_info_reply.text)) + + node_id_match = re.search('NodeID:\\s(\\d+)', tablet_info_reply.text) + if node_id_match is None: + time.sleep(1) + continue + node_index = int(node_id_match.group(1)) + assert_that(node_index, greater_than(0)) + assert_that(node_index, less_than_or_equal_to(self.cluster_nodes_count)) + return node_index - 1 + assert False, "Couldn't get tablet info from viewer in {} tries".format(retries) + + def _get_queue_shards_count(self, username, queuename, queue_version): + state_table_path = self._smart_make_table_path(username, queuename, queue_version, None, 'State') + return self._get_table_lines_count(state_table_path) + + def _check_queue_tables_are_empty(self, queue_name=None): + if queue_name is None: + queue_name = self.queue_name + is_fifo = queue_name.endswith('.fifo') + queue_version = self._get_queue_version_number(self._username, queue_name) - if is_fifo: + if is_fifo: self._check_fifo_queue_is_empty(queue_name, queue_version) - else: + else: self._check_std_queue_is_empty(queue_name, queue_version) - - def _get_table_lines_count(self, table_path): + + def _get_table_lines_count(self, table_path): data_result_sets = self._execute_yql_query('SELECT COUNT(*) AS count FROM `{}`;'.format(table_path)) - assert_that(len(data_result_sets), equal_to(1)) - assert_that(len(data_result_sets[0].rows), equal_to(1)) - logging.debug('Received count result for table {}: {}'.format(table_path, data_result_sets[0].rows[0])) - return data_result_sets[0].rows[0]['count'] - + assert_that(len(data_result_sets), equal_to(1)) + assert_that(len(data_result_sets[0].rows), equal_to(1)) + logging.debug('Received count result for table {}: {}'.format(table_path, data_result_sets[0].rows[0])) + return data_result_sets[0].rows[0]['count'] + def _check_std_queue_is_empty(self, queue_name, queue_version): - shards = self._get_queue_shards_count(self._username, queue_name, queue_version) - assert_that(shards, not_(equal_to(0))) + shards = self._get_queue_shards_count(self._username, queue_name, queue_version) + assert_that(shards, not_(equal_to(0))) for shard in range(shards): self._check_std_queue_shard_is_empty(queue_name, queue_version, shard) - + def _check_std_queue_shard_is_empty(self, queue_name, queue_version, shard): - def get_table_path(table_name): + def get_table_path(table_name): return self._smart_make_table_path(self._username, queue_name, queue_version, shard, table_name) - - def get_lines_count(table_name): - return self._get_table_lines_count(get_table_path(table_name)) - - assert_that(get_lines_count('Infly'), equal_to(0)) - assert_that(get_lines_count('MessageData'), equal_to(0)) - assert_that(get_lines_count('Messages'), equal_to(0)) - assert_that(get_lines_count('SentTimestampIdx'), equal_to(0)) - + + def get_lines_count(table_name): + return self._get_table_lines_count(get_table_path(table_name)) + + assert_that(get_lines_count('Infly'), equal_to(0)) + assert_that(get_lines_count('MessageData'), equal_to(0)) + assert_that(get_lines_count('Messages'), equal_to(0)) + assert_that(get_lines_count('SentTimestampIdx'), equal_to(0)) + def _check_fifo_queue_is_empty(self, queue_name, queue_version): - def get_table_path(table_name): + def get_table_path(table_name): return self._smart_make_table_path(self._username, queue_name, queue_version, None, table_name) - - def get_lines_count(table_name): - return self._get_table_lines_count(get_table_path(table_name)) - - assert_that(get_lines_count('Data'), equal_to(0)) - assert_that(get_lines_count('Groups'), equal_to(0)) - assert_that(get_lines_count('Messages'), equal_to(0)) - assert_that(get_lines_count('SentTimestampIdx'), equal_to(0)) - - def _break_queue(self, username, queuename, is_fifo): - version = self._get_queue_version_number(username, queuename) - session = self._driver.table_client.session().create() - if is_fifo: - session.drop_table(self._smart_make_table_path(username, queuename, version, None, 'Messages')) - session.drop_table(self._smart_make_table_path(username, queuename, version, None, 'Data')) - else: - shards = self._get_queue_shards_count(username, queuename, version) + + def get_lines_count(table_name): + return self._get_table_lines_count(get_table_path(table_name)) + + assert_that(get_lines_count('Data'), equal_to(0)) + assert_that(get_lines_count('Groups'), equal_to(0)) + assert_that(get_lines_count('Messages'), equal_to(0)) + assert_that(get_lines_count('SentTimestampIdx'), equal_to(0)) + + def _break_queue(self, username, queuename, is_fifo): + version = self._get_queue_version_number(username, queuename) + session = self._driver.table_client.session().create() + if is_fifo: + session.drop_table(self._smart_make_table_path(username, queuename, version, None, 'Messages')) + session.drop_table(self._smart_make_table_path(username, queuename, version, None, 'Data')) + else: + shards = self._get_queue_shards_count(username, queuename, version) for shard in range(shards): - session.drop_table(self._smart_make_table_path(username, queuename, version, shard, 'Messages')) - session.drop_table(self._smart_make_table_path(username, queuename, version, shard, 'MessageData')) + session.drop_table(self._smart_make_table_path(username, queuename, version, shard, 'Messages')) + session.drop_table(self._smart_make_table_path(username, queuename, version, shard, 'MessageData')) diff --git a/ydb/tests/functional/sqs/test_account_actions.py b/ydb/tests/functional/sqs/test_account_actions.py index 60ab1e4d102..340e657e25f 100644 --- a/ydb/tests/functional/sqs/test_account_actions.py +++ b/ydb/tests/functional/sqs/test_account_actions.py @@ -1,11 +1,11 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -from hamcrest import assert_that, not_none, has_item, is_not +from hamcrest import assert_that, not_none, has_item, is_not -from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation +from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation -class AccountActionsTest(KikimrSqsTestBase): +class AccountActionsTest(KikimrSqsTestBase): def test_manage_account(self): user_name = 'pupkin' create_user_result = self._sqs_api.create_user(user_name) @@ -27,11 +27,11 @@ class AccountActionsTest(KikimrSqsTestBase): assert_that( user_list, is_not(has_item(user_name)) ) - - -class TestAccountActionsWithTenant(get_test_with_sqs_tenant_installation(AccountActionsTest)): - pass - - -class TestAccountActionsWithPath(get_test_with_sqs_installation_by_path(AccountActionsTest)): - pass + + +class TestAccountActionsWithTenant(get_test_with_sqs_tenant_installation(AccountActionsTest)): + pass + + +class TestAccountActionsWithPath(get_test_with_sqs_installation_by_path(AccountActionsTest)): + pass diff --git a/ydb/tests/functional/sqs/test_acl.py b/ydb/tests/functional/sqs/test_acl.py index e5221f0f43b..2da24e6f8cc 100644 --- a/ydb/tests/functional/sqs/test_acl.py +++ b/ydb/tests/functional/sqs/test_acl.py @@ -3,15 +3,15 @@ import logging import time -import pytest -from hamcrest import assert_that, equal_to, none, is_not, is_, raises +import pytest +from hamcrest import assert_that, equal_to, none, is_not, is_, raises import ydb.tests.library.common.yatest_common as yatest_common from sqs_test_base import KikimrSqsTestBase, get_sqs_client_path, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, to_bytes - - -class SqsACLTest(KikimrSqsTestBase): + + +class SqsACLTest(KikimrSqsTestBase): def _modify_permissions(self, resource, action, permissions_string, clear_acl_flag=False): cmd = [ get_sqs_client_path(), @@ -27,7 +27,7 @@ class SqsACLTest(KikimrSqsTestBase): retries_count = 1 while retries_count: - logging.debug("Running {}".format(' '.join(cmd))) + logging.debug("Running {}".format(' '.join(cmd))) try: yatest_common.execute(cmd) except yatest_common.ExecutionError as ex: @@ -46,7 +46,7 @@ class SqsACLTest(KikimrSqsTestBase): ] + self._sqs_server_opts while retries_count: - logging.debug("Running {}".format(' '.join(cmd))) + logging.debug("Running {}".format(' '.join(cmd))) try: execute = yatest_common.execute(cmd) except yatest_common.ExecutionError as ex: @@ -168,62 +168,62 @@ class SqsACLTest(KikimrSqsTestBase): assert('Account' in str(result)) assert(berkanavt_sid in str(result)) assert('Permissions' in str(result)) - - -class SqsWithForceAuthorizationTest(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(SqsWithForceAuthorizationTest, cls)._setup_config_generator() + + +class SqsWithForceAuthorizationTest(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(SqsWithForceAuthorizationTest, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['force_access_control'] = True - return config_generator - - def _setup_user(self, _username, retries_count=3): - pass - - @classmethod - def create_metauser(cls, cluster, config_generator): - pass - - @pytest.mark.parametrize(argnames='token,pattern', - argvalues=[('invalid_token', 'AccessDeniedException'), ('', 'No security token was provided.'), (None, 'InvalidClientTokenId')], - ids=['invalid', 'empty', 'no']) - def test_invalid_token(self, token, pattern): - sqs_api = self._create_api_for_user(self._username, raise_on_error=True, security_token=token) - - def call_list(): - sqs_api.list_queues() - - def call_get_queue_url(): - sqs_api.get_queue_url('queue_name') - - assert_that( - call_list, - raises( - RuntimeError, - pattern=pattern - ) - ) - - assert_that( - call_get_queue_url, - raises( - RuntimeError, - pattern=pattern - ) - ) - - -class TestSqsACLWithTenant(get_test_with_sqs_tenant_installation(SqsACLTest)): - pass - - -class TestSqsACLWithPath(get_test_with_sqs_installation_by_path(SqsACLTest)): - pass - - -class TestSqsWithForceAuthorizationWithTenant(get_test_with_sqs_tenant_installation(SqsWithForceAuthorizationTest)): - pass - - -class TestSqsWithForceAuthorizationWithPath(get_test_with_sqs_installation_by_path(SqsWithForceAuthorizationTest)): - pass + return config_generator + + def _setup_user(self, _username, retries_count=3): + pass + + @classmethod + def create_metauser(cls, cluster, config_generator): + pass + + @pytest.mark.parametrize(argnames='token,pattern', + argvalues=[('invalid_token', 'AccessDeniedException'), ('', 'No security token was provided.'), (None, 'InvalidClientTokenId')], + ids=['invalid', 'empty', 'no']) + def test_invalid_token(self, token, pattern): + sqs_api = self._create_api_for_user(self._username, raise_on_error=True, security_token=token) + + def call_list(): + sqs_api.list_queues() + + def call_get_queue_url(): + sqs_api.get_queue_url('queue_name') + + assert_that( + call_list, + raises( + RuntimeError, + pattern=pattern + ) + ) + + assert_that( + call_get_queue_url, + raises( + RuntimeError, + pattern=pattern + ) + ) + + +class TestSqsACLWithTenant(get_test_with_sqs_tenant_installation(SqsACLTest)): + pass + + +class TestSqsACLWithPath(get_test_with_sqs_installation_by_path(SqsACLTest)): + pass + + +class TestSqsWithForceAuthorizationWithTenant(get_test_with_sqs_tenant_installation(SqsWithForceAuthorizationTest)): + pass + + +class TestSqsWithForceAuthorizationWithPath(get_test_with_sqs_installation_by_path(SqsWithForceAuthorizationTest)): + pass diff --git a/ydb/tests/functional/sqs/test_counters.py b/ydb/tests/functional/sqs/test_counters.py index dc23814a6b1..a9dbf2769f1 100644 --- a/ydb/tests/functional/sqs/test_counters.py +++ b/ydb/tests/functional/sqs/test_counters.py @@ -1,274 +1,274 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import logging -import time - -import pytest -from hamcrest import assert_that, equal_to, none, not_, greater_than, raises - -from sqs_test_base import KikimrSqsTestBase - - -class TestSqsCountersFeatures(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsCountersFeatures, cls)._setup_config_generator() +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import logging +import time + +import pytest +from hamcrest import assert_that, equal_to, none, not_, greater_than, raises + +from sqs_test_base import KikimrSqsTestBase + + +class TestSqsCountersFeatures(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsCountersFeatures, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['create_lazy_counters'] = False config_generator.yaml_config['sqs_config']['queue_attributes_cache_time_ms'] = 1000 config_generator.yaml_config['sqs_config']['user_settings_update_time_ms'] = 1000 # Checking settings table. - return config_generator - - def enable_detailed_queue_counters(self): - version = self._get_queue_version_number(self._username, self.queue_name) - attributes_path = self._smart_make_table_path(self._username, self.queue_name, version, None, 'Attributes') - - deadline = int((time.time() + 600) * 1000) + return config_generator + + def enable_detailed_queue_counters(self): + version = self._get_queue_version_number(self._username, self.queue_name) + attributes_path = self._smart_make_table_path(self._username, self.queue_name, version, None, 'Attributes') + + deadline = int((time.time() + 600) * 1000) self._execute_yql_query('UPSERT INTO `{}` (State, ShowDetailedCountersDeadline) VALUES (0, {})' - .format(attributes_path, deadline)) - - def test_creates_counter(self): - queue_url = self._create_queue_and_assert(self.queue_name) - self._sqs_api.get_queue_attributes(queue_url) # Ensure that counters structure is initialized. - - counters = self._get_sqs_counters() - send_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'SendMessage_Count', - } - counter = self._get_counter_value(counters, send_counter_labels, None) - assert_that(counter, equal_to(0)) - - @pytest.mark.parametrize('switch_user', argvalues=[True, False], ids=['user', 'queue']) - def test_detailed_counters(self, switch_user): - queue_url = self._create_queue_and_assert(self.queue_name) - self._sqs_api.get_queue_attributes(queue_url) - - counters = self._get_sqs_counters() - counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'TransactionsCount', - } - counter = self._get_counter_value(counters, counter_labels, None) - assert_that(counter, none()) - - if switch_user: - deadline = int((time.time() + 600) * 1000) + .format(attributes_path, deadline)) + + def test_creates_counter(self): + queue_url = self._create_queue_and_assert(self.queue_name) + self._sqs_api.get_queue_attributes(queue_url) # Ensure that counters structure is initialized. + + counters = self._get_sqs_counters() + send_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'SendMessage_Count', + } + counter = self._get_counter_value(counters, send_counter_labels, None) + assert_that(counter, equal_to(0)) + + @pytest.mark.parametrize('switch_user', argvalues=[True, False], ids=['user', 'queue']) + def test_detailed_counters(self, switch_user): + queue_url = self._create_queue_and_assert(self.queue_name) + self._sqs_api.get_queue_attributes(queue_url) + + counters = self._get_sqs_counters() + counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'TransactionsCount', + } + counter = self._get_counter_value(counters, counter_labels, None) + assert_that(counter, none()) + + if switch_user: + deadline = int((time.time() + 600) * 1000) self._execute_yql_query('UPSERT INTO `{}/.Settings` (Account, Name, Value) VALUES (\'{}\', \'ShowDetailedCountersDeadlineMs\', \'{}\')' .format(self.sqs_root, self._username, deadline)) - else: - self.enable_detailed_queue_counters() - - time.sleep(2) # Wait attributes/settings cache time - self._sqs_api.send_message(queue_url, 'data') - - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - assert_that(counter, not_(none())) - assert_that(counter, greater_than(0)) - - def test_disables_user_counters(self): - self._sqs_api.list_queues() # init user's structure in server - counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'sensor': 'RequestTimeouts', - } - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - assert_that(counter, equal_to(0)) - - def disable_user_counters(disable): + else: + self.enable_detailed_queue_counters() + + time.sleep(2) # Wait attributes/settings cache time + self._sqs_api.send_message(queue_url, 'data') + + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + assert_that(counter, not_(none())) + assert_that(counter, greater_than(0)) + + def test_disables_user_counters(self): + self._sqs_api.list_queues() # init user's structure in server + counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'sensor': 'RequestTimeouts', + } + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + assert_that(counter, equal_to(0)) + + def disable_user_counters(disable): self._execute_yql_query('UPSERT INTO `{}/.Settings` (Account, Name, Value) VALUES (\'{}\', \'DisableCounters\', \'{}\')' .format(self.sqs_root, self._username, '1' if disable else '0')) - - disable_user_counters(True) - - attempts = 50 - while attempts: - attempts -= 1 - - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - - if counter is not None and attempts: - time.sleep(0.5) - continue - - assert_that(counter, none()) - break - - disable_user_counters(False) - - attempts = 50 - while attempts: - attempts -= 1 - - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - - if counter is None and attempts: - time.sleep(0.5) - continue - - assert_that(counter, equal_to(0)) - break - - def test_removes_user_counters_after_user_deletion(self): - self._sqs_api.list_queues() # init user's structure in server - counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'sensor': 'RequestTimeouts', - } - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - assert_that(counter, equal_to(0)) - - self._sqs_api.delete_user(self._username) - - attempts = 50 - while attempts: - attempts -= 1 - - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - - if counter is not None and attempts: - time.sleep(0.2) - continue - - assert_that(counter, none()) - break - - @pytest.mark.parametrize('switch_user', argvalues=[True, False], ids=['user', 'queue']) - def test_aggregates_transaction_counters(self, switch_user): - queue_url = self._create_queue_and_assert(self.queue_name) - self._sqs_api.send_message(queue_url, 'data') - - counters = self._get_sqs_counters() - total_counter_labels = { - 'subsystem': 'core', - 'user': 'total', - 'queue': 'total', - 'sensor': 'TransactionsByType', - 'query_type': 'WRITE_MESSAGE_ID', - } - user_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': 'total', - 'sensor': 'TransactionsByType', - 'query_type': 'WRITE_MESSAGE_ID', - } - queue_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'TransactionsByType', - 'query_type': 'WRITE_MESSAGE_ID', - } - total_counter = self._get_counter_value(counters, total_counter_labels) - assert_that(total_counter, not_(none())) - - if switch_user: + + disable_user_counters(True) + + attempts = 50 + while attempts: + attempts -= 1 + + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + + if counter is not None and attempts: + time.sleep(0.5) + continue + + assert_that(counter, none()) + break + + disable_user_counters(False) + + attempts = 50 + while attempts: + attempts -= 1 + + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + + if counter is None and attempts: + time.sleep(0.5) + continue + + assert_that(counter, equal_to(0)) + break + + def test_removes_user_counters_after_user_deletion(self): + self._sqs_api.list_queues() # init user's structure in server + counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'sensor': 'RequestTimeouts', + } + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + assert_that(counter, equal_to(0)) + + self._sqs_api.delete_user(self._username) + + attempts = 50 + while attempts: + attempts -= 1 + + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + + if counter is not None and attempts: + time.sleep(0.2) + continue + + assert_that(counter, none()) + break + + @pytest.mark.parametrize('switch_user', argvalues=[True, False], ids=['user', 'queue']) + def test_aggregates_transaction_counters(self, switch_user): + queue_url = self._create_queue_and_assert(self.queue_name) + self._sqs_api.send_message(queue_url, 'data') + + counters = self._get_sqs_counters() + total_counter_labels = { + 'subsystem': 'core', + 'user': 'total', + 'queue': 'total', + 'sensor': 'TransactionsByType', + 'query_type': 'WRITE_MESSAGE_ID', + } + user_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': 'total', + 'sensor': 'TransactionsByType', + 'query_type': 'WRITE_MESSAGE_ID', + } + queue_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'TransactionsByType', + 'query_type': 'WRITE_MESSAGE_ID', + } + total_counter = self._get_counter_value(counters, total_counter_labels) + assert_that(total_counter, not_(none())) + + if switch_user: self._execute_yql_query('UPSERT INTO `{}/.Settings` (Account, Name, Value) VALUES (\'{}\', \'ExportTransactionCounters\', \'1\')' .format(self.sqs_root, self._username)) - else: - self.enable_detailed_queue_counters() - - attempts = 50 - while attempts: - attempts -= 1 - - self._sqs_api.send_message(queue_url, 'data') - - counters = self._get_sqs_counters() - new_total_counter = self._get_counter_value(counters, total_counter_labels) - assert_that(new_total_counter, equal_to(total_counter + 1)) - total_counter = new_total_counter - - user_counter = self._get_counter_value(counters, user_counter_labels) - queue_counter = self._get_counter_value(counters, queue_counter_labels) - if (user_counter is None or queue_counter is None and not switch_user) and attempts: - time.sleep(0.2) - continue - - assert_that(user_counter, not_(none())) - if not switch_user: - assert_that(queue_counter, not_(none())) - break - - def test_updates_status_code_counters_when_parsing_errors_occur(self): - self._sqs_api.list_queues() # init user's structure in server - counter_labels = { - 'subsystem': 'core', - 'user': 'total', - 'sensor': 'StatusesByType', - 'status_code': 'InvalidParameterValue', - } - counters = self._get_sqs_counters() - prev_counter = self._get_counter_value(counters, counter_labels, 0) - - def call_receive(): - self._sqs_api.receive_message('http://sqs.yandex.net/user/queue_url', max_number_of_messages=-42) - - assert_that( - call_receive, - raises( - RuntimeError, - pattern='.*MaxNumberOfMessages.*' - ) - ) - - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, 0) - - assert_that(counter, equal_to(prev_counter + 1)) - - -class TestSqsCountersExportDelay(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsCountersExportDelay, cls)._setup_config_generator() + else: + self.enable_detailed_queue_counters() + + attempts = 50 + while attempts: + attempts -= 1 + + self._sqs_api.send_message(queue_url, 'data') + + counters = self._get_sqs_counters() + new_total_counter = self._get_counter_value(counters, total_counter_labels) + assert_that(new_total_counter, equal_to(total_counter + 1)) + total_counter = new_total_counter + + user_counter = self._get_counter_value(counters, user_counter_labels) + queue_counter = self._get_counter_value(counters, queue_counter_labels) + if (user_counter is None or queue_counter is None and not switch_user) and attempts: + time.sleep(0.2) + continue + + assert_that(user_counter, not_(none())) + if not switch_user: + assert_that(queue_counter, not_(none())) + break + + def test_updates_status_code_counters_when_parsing_errors_occur(self): + self._sqs_api.list_queues() # init user's structure in server + counter_labels = { + 'subsystem': 'core', + 'user': 'total', + 'sensor': 'StatusesByType', + 'status_code': 'InvalidParameterValue', + } + counters = self._get_sqs_counters() + prev_counter = self._get_counter_value(counters, counter_labels, 0) + + def call_receive(): + self._sqs_api.receive_message('http://sqs.yandex.net/user/queue_url', max_number_of_messages=-42) + + assert_that( + call_receive, + raises( + RuntimeError, + pattern='.*MaxNumberOfMessages.*' + ) + ) + + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, 0) + + assert_that(counter, equal_to(prev_counter + 1)) + + +class TestSqsCountersExportDelay(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsCountersExportDelay, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['create_lazy_counters'] = False config_generator.yaml_config['sqs_config']['queue_counters_export_delay_ms'] = 2000 - return config_generator - - def test_export_delay(self): - counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'RequestTimeouts', - } - - queue_url = self._create_queue_and_assert(self.queue_name) - attributes = self._sqs_api.get_queue_attributes(queue_url, attributes=['CreatedTimestamp']) - - counters = self._get_sqs_counters() - time_counters_got = time.time() - - time_passed = time_counters_got - int(attributes['CreatedTimestamp']) - if time_passed < 2: - logging.debug('Time passed: {}'.format(time_passed)) - counter = self._get_counter_value(counters, counter_labels, None) - assert_that(counter, none()) - else: - logging.debug('Too much time passed: {}'.format(time_passed)) - - attempts = 50 - while attempts: - attempts -= 1 - - counters = self._get_sqs_counters() - counter = self._get_counter_value(counters, counter_labels, None) - if counter is None and attempts: - time.sleep(0.2) - continue - - assert_that(counter, equal_to(0)) - break + return config_generator + + def test_export_delay(self): + counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'RequestTimeouts', + } + + queue_url = self._create_queue_and_assert(self.queue_name) + attributes = self._sqs_api.get_queue_attributes(queue_url, attributes=['CreatedTimestamp']) + + counters = self._get_sqs_counters() + time_counters_got = time.time() + + time_passed = time_counters_got - int(attributes['CreatedTimestamp']) + if time_passed < 2: + logging.debug('Time passed: {}'.format(time_passed)) + counter = self._get_counter_value(counters, counter_labels, None) + assert_that(counter, none()) + else: + logging.debug('Too much time passed: {}'.format(time_passed)) + + attempts = 50 + while attempts: + attempts -= 1 + + counters = self._get_sqs_counters() + counter = self._get_counter_value(counters, counter_labels, None) + if counter is None and attempts: + time.sleep(0.2) + continue + + assert_that(counter, equal_to(0)) + break diff --git a/ydb/tests/functional/sqs/test_fifo_messaging.py b/ydb/tests/functional/sqs/test_fifo_messaging.py index 6410b219663..0e11202a3fa 100644 --- a/ydb/tests/functional/sqs/test_fifo_messaging.py +++ b/ydb/tests/functional/sqs/test_fifo_messaging.py @@ -4,11 +4,11 @@ import logging import time import pytest -from hamcrest import assert_that, equal_to, not_none, greater_than, less_than_or_equal_to, has_items, raises +from hamcrest import assert_that, equal_to, not_none, greater_than, less_than_or_equal_to, has_items, raises from sqs_matchers import ReadResponseMatcher, extract_message_ids -from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, VISIBILITY_CHANGE_METHOD_PARAMS +from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, VISIBILITY_CHANGE_METHOD_PARAMS class SqsFifoMicroBatchTest(KikimrSqsTestBase): @@ -40,51 +40,51 @@ class TestSqsFifoMicroBatchesWithPath(get_test_with_sqs_installation_by_path(Sqs pass -class SqsFifoMessagingTest(KikimrSqsTestBase): +class SqsFifoMessagingTest(KikimrSqsTestBase): def setup_method(self, method=None): - super(SqsFifoMessagingTest, self).setup_method(method) + super(SqsFifoMessagingTest, self).setup_method(method) self.queue_name = self.queue_name + ".fifo" - @classmethod - def _setup_config_generator(cls): - config_generator = super(SqsFifoMessagingTest, cls)._setup_config_generator() + @classmethod + def _setup_config_generator(cls): + config_generator = super(SqsFifoMessagingTest, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['group_selection_batch_size'] = 100 - return config_generator - + return config_generator + def test_only_single_read_infly_from_fifo(self): self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=10, read_count=1, visibility_timeout=1000, - msg_body_template=self._msg_body_template, is_fifo=True + self.queue_name, send_count=10, read_count=1, visibility_timeout=1000, + msg_body_template=self._msg_body_template, is_fifo=True ) self._read_messages_and_assert( - self.queue_url, messages_count=10, matcher=ReadResponseMatcher().with_n_messages(0) + self.queue_url, messages_count=10, matcher=ReadResponseMatcher().with_n_messages(0) ) def test_fifo_read_delete_single_message(self): created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) message_ids = self._send_messages( - created_queue_url, message_count=10, msg_body_template=self._msg_body_template, is_fifo=True, group_id='group' + created_queue_url, message_count=10, msg_body_template=self._msg_body_template, is_fifo=True, group_id='group' ) read_result = self._read_messages_and_assert( - self.queue_url, messages_count=1, matcher=ReadResponseMatcher().with_message_ids(message_ids[:1]) + self.queue_url, messages_count=1, matcher=ReadResponseMatcher().with_message_ids(message_ids[:1]) ) handle = read_result[0]['ReceiptHandle'] assert_that( self._sqs_api.delete_message(self.queue_url, handle), not_none() ) self._read_messages_and_assert( - self.queue_url, messages_count=1, matcher=ReadResponseMatcher().with_message_ids(message_ids[1:2]) + self.queue_url, messages_count=1, matcher=ReadResponseMatcher().with_message_ids(message_ids[1:2]) ) - counters = self._get_sqs_counters() - delete_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'DeleteMessage_Count', - } - assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(1)) - + counters = self._get_sqs_counters() + delete_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'DeleteMessage_Count', + } + assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(1)) + def test_write_and_read_to_different_groups(self): seq_no = 1 self._create_queue_and_assert(self.queue_name, is_fifo=True) @@ -97,7 +97,7 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): message_ids.append(msg_id) result = self._read_messages_and_assert( - self.queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_n_messages(10) + self.queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_n_messages(10) ) received_message_ids = extract_message_ids(result) assert_that( @@ -117,12 +117,12 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): first_message_ids = extract_message_ids( self._read_messages_and_assert( - self.queue_url, 10, ReadResponseMatcher().with_n_messages(10), visibility_timeout=1000 + self.queue_url, 10, ReadResponseMatcher().with_n_messages(10), visibility_timeout=1000 ) ) second_message_ids = extract_message_ids( self._read_messages_and_assert( - self.queue_url, 10, ReadResponseMatcher().with_n_messages(10), visibility_timeout=1000 + self.queue_url, 10, ReadResponseMatcher().with_n_messages(10), visibility_timeout=1000 ) ) @@ -131,20 +131,20 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): equal_to(len(first_message_ids) + len(second_message_ids)) ) self._read_messages_and_assert( - self.queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_n_messages(0) + self.queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_n_messages(0) ) def test_send_and_read_multiple_messages(self): queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) first_message_id = self._send_message_and_assert( - queue_url, self._msg_body_template.format('0'), seq_no=1, group_id='group' + queue_url, self._msg_body_template.format('0'), seq_no=1, group_id='group' ) time.sleep(5) self._send_message_and_assert( - queue_url, self._msg_body_template.format('1'), seq_no=2, group_id='group' + queue_url, self._msg_body_template.format('1'), seq_no=2, group_id='group' ) self._read_messages_and_assert( - queue_url, messages_count=1, + queue_url, messages_count=1, matcher=ReadResponseMatcher().with_message_ids([first_message_id, ]) ) @@ -162,7 +162,7 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): ) time.sleep(5) self._read_messages_and_assert( - queue_url, messages_count=10, visibility_timeout=1000, + queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids( [first_pack_ids[0], second_pack_ids[0]] ) @@ -172,23 +172,23 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): group_id='3' ) self._read_messages_and_assert( - queue_url, messages_count=10, visibility_timeout=1000, + queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids(third_pack_ids[:1]) ) def test_visibility_timeout_works(self): self._create_queue_send_x_messages_read_y_messages( self.queue_name, send_count=5, read_count=1, visibility_timeout=10, - msg_body_template=self._msg_body_template, is_fifo=True, group_id='1' + msg_body_template=self._msg_body_template, is_fifo=True, group_id='1' ) second_pack_ids = self._send_messages(self.queue_url, 5, group_id='2', is_fifo=True) self._read_messages_and_assert( - self.queue_url, messages_count=5, matcher=ReadResponseMatcher().with_these_or_more_message_ids(second_pack_ids[:1]), + self.queue_url, messages_count=5, matcher=ReadResponseMatcher().with_these_or_more_message_ids(second_pack_ids[:1]), visibility_timeout=10 ) time.sleep(12) self._read_messages_and_assert( - self.queue_url, messages_count=5, visibility_timeout=1000, matcher=ReadResponseMatcher().with_these_or_more_message_ids( + self.queue_url, messages_count=5, visibility_timeout=1000, matcher=ReadResponseMatcher().with_these_or_more_message_ids( [self.message_ids[0], second_pack_ids[0]] ) ) @@ -196,7 +196,7 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): def test_delete_message_works(self): self._create_queue_send_x_messages_read_y_messages( self.queue_name, send_count=10, read_count=1, visibility_timeout=1, - msg_body_template=self._msg_body_template, is_fifo=True + msg_body_template=self._msg_body_template, is_fifo=True ) handle = self.read_result[0]['ReceiptHandle'] @@ -205,31 +205,31 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): ) time.sleep(1) self._read_messages_and_assert( - self.queue_url, messages_count=5, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids(self.message_ids[1:2]) - ) - - counters = self._get_sqs_counters() - delete_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'DeleteMessage_Count', - } - assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(1)) - - # break a queue and check failure - self._break_queue(self._username, self.queue_name, True) - - handle_2 = self.read_result[0]['ReceiptHandle'] - - def call_delete(): - self._sqs_api.delete_message(self.queue_url, handle_2) - - assert_that( - call_delete, - raises(RuntimeError, pattern='InternalFailure') + self.queue_url, messages_count=5, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids(self.message_ids[1:2]) ) + counters = self._get_sqs_counters() + delete_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'DeleteMessage_Count', + } + assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(1)) + + # break a queue and check failure + self._break_queue(self._username, self.queue_name, True) + + handle_2 = self.read_result[0]['ReceiptHandle'] + + def call_delete(): + self._sqs_api.delete_message(self.queue_url, handle_2) + + assert_that( + call_delete, + raises(RuntimeError, pattern='InternalFailure') + ) + def test_write_read_delete_many_groups(self): queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) message_ids = {} @@ -256,204 +256,204 @@ class SqsFifoMessagingTest(KikimrSqsTestBase): ).with_messages_data( [self._msg_body_template.format(1)] + [self._msg_body_template.format(i*5) for i in range(1, 10)] ) - self._read_messages_and_assert(queue_url, 10, matcher=matcher, visibility_timeout=1000) - - def test_queue_attributes(self): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - attributes = self._sqs_api.get_queue_attributes(queue_url) - assert_that(attributes, has_items( - 'FifoQueue', - 'ContentBasedDeduplication', - 'ApproximateNumberOfMessages', - 'ApproximateNumberOfMessagesDelayed', - 'ApproximateNumberOfMessagesNotVisible', - 'CreatedTimestamp', - 'DelaySeconds', - 'MaximumMessageSize', - 'MessageRetentionPeriod', - 'ReceiveMessageWaitTimeSeconds', - 'VisibilityTimeout', - )) - assert_that(attributes['VisibilityTimeout'], equal_to('30')) - - self._sqs_api.set_queue_attributes(queue_url, {'VisibilityTimeout': '2'}) - attributes = self._sqs_api.get_queue_attributes(queue_url) - assert_that(attributes['VisibilityTimeout'], equal_to('2')) - - def test_validates_group_id(self): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - - def send_caller(group_id): - def call_send(): - self._sqs_api.send_message(queue_url, 'body', deduplication_id='42', group_id=group_id) - - return call_send - - def check(group_id): - assert_that( - send_caller(group_id), - raises(RuntimeError, pattern='(MissingParameter|InvalidParameterValue)') - ) - - check('§') - check('') - check(None) # without - - def test_validates_deduplication_id(self): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - - def send_caller(deduplication_id): - def call_send(): - self._sqs_api.send_message(queue_url, 'body', deduplication_id=deduplication_id, group_id='42') - - return call_send - - def check(deduplication_id): - assert_that( - send_caller(deduplication_id), - raises(RuntimeError, pattern='(MissingParameter|InvalidParameterValue)') - ) - - check('§') - check('') - check(None) # without - - def test_validates_receive_attempt_id(self): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - - def receive_caller(receive_request_attempt_id): - def call_receive(): - self._sqs_api.receive_message(queue_url, 'body', receive_request_attempt_id=receive_request_attempt_id) - - return call_receive - - def check(receive_request_attempt_id): - assert_that( - receive_caller(receive_request_attempt_id), - raises(RuntimeError, pattern='InvalidParameterValue') - ) - - check('§') - - @pytest.mark.parametrize('content_based', [True, False], ids=['content_based', 'by_deduplication_id']) - def test_deduplication(self, content_based): - attributes = {} - if content_based: - attributes['ContentBasedDeduplication'] = 'true' - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True, use_http=True, attributes=attributes) - - if content_based: - deduplication_id = None - other_deduplication_id = None - body_1 = 'body' - body_2 = 'body' - body_3 = 'other_body' - else: - deduplication_id = 'trololo' - other_deduplication_id = '42' - body_1 = 'body_1' - body_2 = 'body_1' - body_3 = 'body_1' - - def get_deduplicated_messages(): - counters = self._get_sqs_counters() - labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'SendMessage_DeduplicationCount', - } - return self._get_counter_value(counters, labels, 0) - - self._sqs_api.send_message(queue_url, body_1, deduplication_id=deduplication_id, group_id='1') - - deduplicated = get_deduplicated_messages() - self._sqs_api.send_message(queue_url, body_2, deduplication_id=deduplication_id, group_id='2') - assert_that(get_deduplicated_messages(), equal_to(deduplicated + 1)) - - self._sqs_api.send_message(queue_url, body_3, deduplication_id=other_deduplication_id, group_id='3') - assert_that(get_deduplicated_messages(), equal_to(deduplicated + 1)) - - self._read_messages_and_assert(queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_n_messages(2), wait_timeout=3) - - @pytest.mark.parametrize('after_crutch_batch', [False, True], ids=['standard_mode', 'after_crutch_batch']) - def test_receive_attempt_reloads_same_messages(self, after_crutch_batch): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - + self._read_messages_and_assert(queue_url, 10, matcher=matcher, visibility_timeout=1000) + + def test_queue_attributes(self): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + attributes = self._sqs_api.get_queue_attributes(queue_url) + assert_that(attributes, has_items( + 'FifoQueue', + 'ContentBasedDeduplication', + 'ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesDelayed', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'DelaySeconds', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout', + )) + assert_that(attributes['VisibilityTimeout'], equal_to('30')) + + self._sqs_api.set_queue_attributes(queue_url, {'VisibilityTimeout': '2'}) + attributes = self._sqs_api.get_queue_attributes(queue_url) + assert_that(attributes['VisibilityTimeout'], equal_to('2')) + + def test_validates_group_id(self): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + + def send_caller(group_id): + def call_send(): + self._sqs_api.send_message(queue_url, 'body', deduplication_id='42', group_id=group_id) + + return call_send + + def check(group_id): + assert_that( + send_caller(group_id), + raises(RuntimeError, pattern='(MissingParameter|InvalidParameterValue)') + ) + + check('§') + check('') + check(None) # without + + def test_validates_deduplication_id(self): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + + def send_caller(deduplication_id): + def call_send(): + self._sqs_api.send_message(queue_url, 'body', deduplication_id=deduplication_id, group_id='42') + + return call_send + + def check(deduplication_id): + assert_that( + send_caller(deduplication_id), + raises(RuntimeError, pattern='(MissingParameter|InvalidParameterValue)') + ) + + check('§') + check('') + check(None) # without + + def test_validates_receive_attempt_id(self): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + + def receive_caller(receive_request_attempt_id): + def call_receive(): + self._sqs_api.receive_message(queue_url, 'body', receive_request_attempt_id=receive_request_attempt_id) + + return call_receive + + def check(receive_request_attempt_id): + assert_that( + receive_caller(receive_request_attempt_id), + raises(RuntimeError, pattern='InvalidParameterValue') + ) + + check('§') + + @pytest.mark.parametrize('content_based', [True, False], ids=['content_based', 'by_deduplication_id']) + def test_deduplication(self, content_based): + attributes = {} + if content_based: + attributes['ContentBasedDeduplication'] = 'true' + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True, use_http=True, attributes=attributes) + + if content_based: + deduplication_id = None + other_deduplication_id = None + body_1 = 'body' + body_2 = 'body' + body_3 = 'other_body' + else: + deduplication_id = 'trololo' + other_deduplication_id = '42' + body_1 = 'body_1' + body_2 = 'body_1' + body_3 = 'body_1' + + def get_deduplicated_messages(): + counters = self._get_sqs_counters() + labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'SendMessage_DeduplicationCount', + } + return self._get_counter_value(counters, labels, 0) + + self._sqs_api.send_message(queue_url, body_1, deduplication_id=deduplication_id, group_id='1') + + deduplicated = get_deduplicated_messages() + self._sqs_api.send_message(queue_url, body_2, deduplication_id=deduplication_id, group_id='2') + assert_that(get_deduplicated_messages(), equal_to(deduplicated + 1)) + + self._sqs_api.send_message(queue_url, body_3, deduplication_id=other_deduplication_id, group_id='3') + assert_that(get_deduplicated_messages(), equal_to(deduplicated + 1)) + + self._read_messages_and_assert(queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_n_messages(2), wait_timeout=3) + + @pytest.mark.parametrize('after_crutch_batch', [False, True], ids=['standard_mode', 'after_crutch_batch']) + def test_receive_attempt_reloads_same_messages(self, after_crutch_batch): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + groups_selection_batch_size = self.config_generator.yaml_config['sqs_config']['group_selection_batch_size'] - assert_that(groups_selection_batch_size, equal_to(100)) - - groups_count = groups_selection_batch_size + 50 if after_crutch_batch else 15 + assert_that(groups_selection_batch_size, equal_to(100)) + + groups_count = groups_selection_batch_size + 50 if after_crutch_batch else 15 for group_number in range(groups_count): - self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) - - if after_crutch_batch: - first_lock_messages_count = groups_selection_batch_size + 10 - self._read_messages_and_assert( - queue_url, messages_count=first_lock_messages_count, visibility_timeout=1000, - matcher=ReadResponseMatcher().with_n_messages(first_lock_messages_count) - ) - - receive_attempt_id = 'my_attempt' - other_receive_attempt_id = 'my_other_attempt' - - def receive(attempt_id): - read_result = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=attempt_id) + self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) + + if after_crutch_batch: + first_lock_messages_count = groups_selection_batch_size + 10 + self._read_messages_and_assert( + queue_url, messages_count=first_lock_messages_count, visibility_timeout=1000, + matcher=ReadResponseMatcher().with_n_messages(first_lock_messages_count) + ) + + receive_attempt_id = 'my_attempt' + other_receive_attempt_id = 'my_other_attempt' + + def receive(attempt_id): + read_result = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=attempt_id) message_set = set([res['MessageId'] for res in read_result]) - return read_result, message_set - - read_result_1, message_set_1 = receive(receive_attempt_id) - read_result_2, message_set_2 = receive(receive_attempt_id) - read_result_3, message_set_3 = receive(other_receive_attempt_id) - - assert_that(len(read_result_1), greater_than(0)) - assert_that(len(read_result_1), equal_to(len(read_result_2))) - assert_that(len(read_result_1) + len(read_result_3), less_than_or_equal_to(groups_count)) - - assert_that(len(message_set_1 & message_set_2), equal_to(len(message_set_1))) - assert_that(len(message_set_1 & message_set_3), equal_to(0)) - - @pytest.mark.parametrize(**VISIBILITY_CHANGE_METHOD_PARAMS) - def test_visibility_change_disables_receive_attempt_id(self, delete_message): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - groups_count = 5 + return read_result, message_set + + read_result_1, message_set_1 = receive(receive_attempt_id) + read_result_2, message_set_2 = receive(receive_attempt_id) + read_result_3, message_set_3 = receive(other_receive_attempt_id) + + assert_that(len(read_result_1), greater_than(0)) + assert_that(len(read_result_1), equal_to(len(read_result_2))) + assert_that(len(read_result_1) + len(read_result_3), less_than_or_equal_to(groups_count)) + + assert_that(len(message_set_1 & message_set_2), equal_to(len(message_set_1))) + assert_that(len(message_set_1 & message_set_3), equal_to(0)) + + @pytest.mark.parametrize(**VISIBILITY_CHANGE_METHOD_PARAMS) + def test_visibility_change_disables_receive_attempt_id(self, delete_message): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + groups_count = 5 for group_number in range(groups_count): - self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) - - receive_attempt_id = 'my_attempt' - read_result_1 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) - assert_that(len(read_result_1), greater_than(0)) - receipt_handle = read_result_1[0]['ReceiptHandle'] - logging.debug('{} message with receipt handle {}'.format('Delete' if delete_message else 'Change visibility of', receipt_handle)) - if delete_message: - self._sqs_api.delete_message(queue_url, receipt_handle) - else: - self._sqs_api.change_message_visibility(queue_url, receipt_handle, visibility_timeout=1500) - read_result_2 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) - if read_result_2 is None: - read_result_2 = [] - assert_that(len(read_result_1) + len(read_result_2), less_than_or_equal_to(groups_count)) - if read_result_2: + self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) + + receive_attempt_id = 'my_attempt' + read_result_1 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) + assert_that(len(read_result_1), greater_than(0)) + receipt_handle = read_result_1[0]['ReceiptHandle'] + logging.debug('{} message with receipt handle {}'.format('Delete' if delete_message else 'Change visibility of', receipt_handle)) + if delete_message: + self._sqs_api.delete_message(queue_url, receipt_handle) + else: + self._sqs_api.change_message_visibility(queue_url, receipt_handle, visibility_timeout=1500) + read_result_2 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) + if read_result_2 is None: + read_result_2 = [] + assert_that(len(read_result_1) + len(read_result_2), less_than_or_equal_to(groups_count)) + if read_result_2: message_set_1 = set([res['MessageId'] for res in read_result_1]) message_set_2 = set([res['MessageId'] for res in read_result_2]) - assert_that(len(message_set_1 & message_set_2), equal_to(0)) - - def test_crutch_groups_selection_algorithm_selects_second_group_batch(self): - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + assert_that(len(message_set_1 & message_set_2), equal_to(0)) + + def test_crutch_groups_selection_algorithm_selects_second_group_batch(self): + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) groups_selection_batch_size = self.config_generator.yaml_config['sqs_config']['group_selection_batch_size'] - groups_count = groups_selection_batch_size + 50 + groups_count = groups_selection_batch_size + 50 for group_number in range(groups_count): - self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) - - self._read_messages_and_assert( - queue_url, messages_count=groups_count, visibility_timeout=1000, - matcher=ReadResponseMatcher().with_n_messages(groups_count) - ) - - -class TestSqsFifoMessagingWithTenant(get_test_with_sqs_tenant_installation(SqsFifoMessagingTest)): - pass - - -class TestSqsFifoMessagingWithPath(get_test_with_sqs_installation_by_path(SqsFifoMessagingTest)): - pass + self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) + + self._read_messages_and_assert( + queue_url, messages_count=groups_count, visibility_timeout=1000, + matcher=ReadResponseMatcher().with_n_messages(groups_count) + ) + + +class TestSqsFifoMessagingWithTenant(get_test_with_sqs_tenant_installation(SqsFifoMessagingTest)): + pass + + +class TestSqsFifoMessagingWithPath(get_test_with_sqs_installation_by_path(SqsFifoMessagingTest)): + pass diff --git a/ydb/tests/functional/sqs/test_garbage_collection.py b/ydb/tests/functional/sqs/test_garbage_collection.py index 992c8a65138..9cf07543383 100644 --- a/ydb/tests/functional/sqs/test_garbage_collection.py +++ b/ydb/tests/functional/sqs/test_garbage_collection.py @@ -2,225 +2,225 @@ # -*- coding: utf-8 -*- import logging import time -import multiprocessing -import random +import multiprocessing +import random import pytest -from hamcrest import assert_that, equal_to, less_than_or_equal_to +from hamcrest import assert_that, equal_to, less_than_or_equal_to from sqs_requests_client import SqsHttpApi -from sqs_matchers import ReadResponseMatcher - +from sqs_matchers import ReadResponseMatcher + from sqs_test_base import to_bytes -from sqs_test_base import KikimrSqsTestBase, VISIBILITY_CHANGE_METHOD_PARAMS, IS_FIFO_PARAMS - +from sqs_test_base import KikimrSqsTestBase, VISIBILITY_CHANGE_METHOD_PARAMS, IS_FIFO_PARAMS + -def send_message(server, username, queue_url, sqs_port, body, seq_no, group_id): +def send_message(server, username, queue_url, sqs_port, body, seq_no, group_id): is_fifo = to_bytes(queue_url).endswith(to_bytes('.fifo')) - api = SqsHttpApi( - server, - sqs_port, - username, - raise_on_error=True, - timeout=None - ) - api.send_message( - queue_url, - body, - group_id=group_id if is_fifo else None, - deduplication_id=seq_no if is_fifo else None) - - -def send_message_pack(args): - server, username, queue_url, sqs_port, body, seq_no, group_id = args - send_message(server, username, queue_url, sqs_port, body, seq_no, group_id) - - -def delete_message(server, username, queue_url, sqs_port, receipt_handle): - api = SqsHttpApi( - server, - sqs_port, - username, - raise_on_error=True, - timeout=None - ) + api = SqsHttpApi( + server, + sqs_port, + username, + raise_on_error=True, + timeout=None + ) + api.send_message( + queue_url, + body, + group_id=group_id if is_fifo else None, + deduplication_id=seq_no if is_fifo else None) + + +def send_message_pack(args): + server, username, queue_url, sqs_port, body, seq_no, group_id = args + send_message(server, username, queue_url, sqs_port, body, seq_no, group_id) + + +def delete_message(server, username, queue_url, sqs_port, receipt_handle): + api = SqsHttpApi( + server, + sqs_port, + username, + raise_on_error=True, + timeout=None + ) api.delete_message(to_bytes(queue_url), receipt_handle) - - -def delete_message_pack(args): - server, username, queue_url, sqs_port, receipt_handle = args - delete_message(server, username, queue_url, sqs_port, receipt_handle) - - -class TestSqsGarbageCollection(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsGarbageCollection, cls)._setup_config_generator() + + +def delete_message_pack(args): + server, username, queue_url, sqs_port, receipt_handle = args + delete_message(server, username, queue_url, sqs_port, receipt_handle) + + +class TestSqsGarbageCollection(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsGarbageCollection, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['min_message_retention_period_ms'] = 3000 config_generator.yaml_config['sqs_config']['cleanup_batch_size'] = 100 config_generator.yaml_config['sqs_config']['cleanup_period_ms'] = 2000 config_generator.yaml_config['sqs_config']['deduplication_period_ms'] = 2000 config_generator.yaml_config['sqs_config']['groups_read_attempt_ids_period_ms'] = 2000 - return config_generator - - def fill_queue_with_messages(self, number_of_mesages_to_write, is_fifo, processes=None, random_groups_max_count=50): - args = [ - ( + return config_generator + + def fill_queue_with_messages(self, number_of_mesages_to_write, is_fifo, processes=None, random_groups_max_count=50): + args = [ + ( self.cluster.nodes[1].host, - self._username, - self.queue_url, - self.cluster_nodes[0].sqs_port, - self._msg_body_template.format(i), - self.seq_no + i, - 'group_{}'.format(random.randint(1, random_groups_max_count)) + self._username, + self.queue_url, + self.cluster_nodes[0].sqs_port, + self._msg_body_template.format(i), + self.seq_no + i, + 'group_{}'.format(random.randint(1, random_groups_max_count)) ) for i in range(number_of_mesages_to_write) - ] - self.seq_no += number_of_mesages_to_write - processes_to_write = processes - if processes_to_write is None: - processes_to_write = 50 - pool = multiprocessing.Pool(processes=processes_to_write) - pool.map(send_message_pack, args) - - def delete_messages(self, queue_url, receipt_handles): - args = [ - ( + ] + self.seq_no += number_of_mesages_to_write + processes_to_write = processes + if processes_to_write is None: + processes_to_write = 50 + pool = multiprocessing.Pool(processes=processes_to_write) + pool.map(send_message_pack, args) + + def delete_messages(self, queue_url, receipt_handles): + args = [ + ( self.cluster.nodes[1].host, - self._username, - self.queue_url, - self.cluster_nodes[0].sqs_port, - receipt_handle - ) for receipt_handle in receipt_handles - ] - pool = multiprocessing.Pool(processes=50) - pool.map(delete_message_pack, args) - - def wait_fifo_table_empty(self, table_name): - queue_version = self._get_queue_version_number(self._username, self.queue_name) - table_path = self._smart_make_table_path(self._username, self.queue_name, queue_version, None, table_name) - attempts = 150 - while attempts: - attempts -= 1 - - lines_count = self._get_table_lines_count(table_path) - logging.debug('Received "{}" table rows count: {}'.format(table_name, lines_count)) - if lines_count > 0 and attempts: - time.sleep(0.2) - continue - - assert_that(lines_count, equal_to(0)) - break - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_removes_messages_by_retention_time(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - - number_of_mesages_to_write = 110 if is_fifo else 220 - processes = 10 if is_fifo else 30 - self.fill_queue_with_messages(number_of_mesages_to_write, is_fifo, processes=processes) - time_after_send = time.time() - - attempts = 150 - while attempts: - attempts -= 1 - attributes = self._sqs_api.get_queue_attributes(self.queue_url) - number_of_messages = int(attributes['ApproximateNumberOfMessages']) - - if abs(number_of_messages - number_of_mesages_to_write) > processes and attempts: - time.sleep(0.2) - continue - - assert_that( - abs(number_of_messages - number_of_mesages_to_write), - less_than_or_equal_to(processes) - ) - - # Read a message. It will trigger moving a batch of messages from one shard to infly table (for std queue). - self._read_messages_and_assert( - self.queue_url, 1, ReadResponseMatcher().with_n_messages(1) - ) - - retention = 3 - self._sqs_api.set_queue_attributes(self.queue_url, {'MessageRetentionPeriod': str(retention)}) - assert_that( - self._sqs_api.get_queue_attributes(self.queue_url)['MessageRetentionPeriod'], - equal_to(str(retention)) - ) - - now = time.time() - if now < time_after_send + retention: - time.sleep(time_after_send + retention - now) - - number_of_messages = None + self._username, + self.queue_url, + self.cluster_nodes[0].sqs_port, + receipt_handle + ) for receipt_handle in receipt_handles + ] + pool = multiprocessing.Pool(processes=50) + pool.map(delete_message_pack, args) + + def wait_fifo_table_empty(self, table_name): + queue_version = self._get_queue_version_number(self._username, self.queue_name) + table_path = self._smart_make_table_path(self._username, self.queue_name, queue_version, None, table_name) + attempts = 150 + while attempts: + attempts -= 1 + + lines_count = self._get_table_lines_count(table_path) + logging.debug('Received "{}" table rows count: {}'.format(table_name, lines_count)) + if lines_count > 0 and attempts: + time.sleep(0.2) + continue + + assert_that(lines_count, equal_to(0)) + break + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_removes_messages_by_retention_time(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + + number_of_mesages_to_write = 110 if is_fifo else 220 + processes = 10 if is_fifo else 30 + self.fill_queue_with_messages(number_of_mesages_to_write, is_fifo, processes=processes) + time_after_send = time.time() + + attempts = 150 + while attempts: + attempts -= 1 + attributes = self._sqs_api.get_queue_attributes(self.queue_url) + number_of_messages = int(attributes['ApproximateNumberOfMessages']) + + if abs(number_of_messages - number_of_mesages_to_write) > processes and attempts: + time.sleep(0.2) + continue + + assert_that( + abs(number_of_messages - number_of_mesages_to_write), + less_than_or_equal_to(processes) + ) + + # Read a message. It will trigger moving a batch of messages from one shard to infly table (for std queue). + self._read_messages_and_assert( + self.queue_url, 1, ReadResponseMatcher().with_n_messages(1) + ) + + retention = 3 + self._sqs_api.set_queue_attributes(self.queue_url, {'MessageRetentionPeriod': str(retention)}) + assert_that( + self._sqs_api.get_queue_attributes(self.queue_url)['MessageRetentionPeriod'], + equal_to(str(retention)) + ) + + now = time.time() + if now < time_after_send + retention: + time.sleep(time_after_send + retention - now) + + number_of_messages = None for i in range(100): - attributes = self._sqs_api.get_queue_attributes(self.queue_url) - number_of_messages = int(attributes['ApproximateNumberOfMessages']) - if number_of_messages == 0: - break - else: - time.sleep(0.5) - - assert_that(number_of_messages, equal_to(0)) - self._check_queue_tables_are_empty() - - def test_cleanups_deduplication_table(self): - self.queue_name = self.queue_name + '.fifo' - self._create_queue_and_assert(self.queue_name, is_fifo=True) - - # do the same again to ensure that this process will not stop + attributes = self._sqs_api.get_queue_attributes(self.queue_url) + number_of_messages = int(attributes['ApproximateNumberOfMessages']) + if number_of_messages == 0: + break + else: + time.sleep(0.5) + + assert_that(number_of_messages, equal_to(0)) + self._check_queue_tables_are_empty() + + def test_cleanups_deduplication_table(self): + self.queue_name = self.queue_name + '.fifo' + self._create_queue_and_assert(self.queue_name, is_fifo=True) + + # do the same again to ensure that this process will not stop for i in range(2): - self.fill_queue_with_messages(150, True) - time.sleep(2) - self.wait_fifo_table_empty('Deduplication') - - @pytest.mark.parametrize('random_groups_max_count', [30, 200]) - def test_cleanups_reads_table(self, random_groups_max_count): - self.queue_name = self.queue_name + '.fifo' - self._create_queue_and_assert(self.queue_name, is_fifo=True) - - # do the same again to ensure that this process will not stop + self.fill_queue_with_messages(150, True) + time.sleep(2) + self.wait_fifo_table_empty('Deduplication') + + @pytest.mark.parametrize('random_groups_max_count', [30, 200]) + def test_cleanups_reads_table(self, random_groups_max_count): + self.queue_name = self.queue_name + '.fifo' + self._create_queue_and_assert(self.queue_name, is_fifo=True) + + # do the same again to ensure that this process will not stop for i in range(2): - self.fill_queue_with_messages(150, True, processes=None, random_groups_max_count=random_groups_max_count) - - read_result = self._read_messages_and_assert(self.queue_url, 500, matcher=ReadResponseMatcher().with_n_or_more_messages(1), visibility_timeout=1000, wait_timeout=1) - - time.sleep(2) - self.wait_fifo_table_empty('Reads') - - # delete received messages - self.delete_messages(self.queue_url, [r['ReceiptHandle'] for r in read_result]) - - @pytest.mark.parametrize(**VISIBILITY_CHANGE_METHOD_PARAMS) - def test_visibility_change_cleanups_proper_receive_attempt_id(self, delete_message): - self.queue_name = self.queue_name + '.fifo' - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) - groups_count = 5 + self.fill_queue_with_messages(150, True, processes=None, random_groups_max_count=random_groups_max_count) + + read_result = self._read_messages_and_assert(self.queue_url, 500, matcher=ReadResponseMatcher().with_n_or_more_messages(1), visibility_timeout=1000, wait_timeout=1) + + time.sleep(2) + self.wait_fifo_table_empty('Reads') + + # delete received messages + self.delete_messages(self.queue_url, [r['ReceiptHandle'] for r in read_result]) + + @pytest.mark.parametrize(**VISIBILITY_CHANGE_METHOD_PARAMS) + def test_visibility_change_cleanups_proper_receive_attempt_id(self, delete_message): + self.queue_name = self.queue_name + '.fifo' + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=True) + groups_count = 5 for group_number in range(groups_count): - self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) - - receive_attempt_id = 'my_attempt' - read_result_1 = self._sqs_api.receive_message(queue_url, max_number_of_messages=1, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) - assert_that(len(read_result_1), equal_to(1)) - receipt_handle = read_result_1[0]['ReceiptHandle'] - time.sleep(2) # oversleep reads valid period - time_before_read = time.time() - read_result_2 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) - if read_result_2 is None: - read_result_2 = [] - assert_that(len(read_result_1) + len(read_result_2), less_than_or_equal_to(groups_count)) - if delete_message: - self._sqs_api.delete_message(queue_url, receipt_handle) - else: - self._sqs_api.change_message_visibility(queue_url, receipt_handle, visibility_timeout=1500) - read_result_3 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) - time_after_read = time.time() - if read_result_3 is None: - read_result_3 = [] - + self._send_messages(queue_url, 1, is_fifo=True, group_id='group_{}'.format(group_number)) + + receive_attempt_id = 'my_attempt' + read_result_1 = self._sqs_api.receive_message(queue_url, max_number_of_messages=1, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) + assert_that(len(read_result_1), equal_to(1)) + receipt_handle = read_result_1[0]['ReceiptHandle'] + time.sleep(2) # oversleep reads valid period + time_before_read = time.time() + read_result_2 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) + if read_result_2 is None: + read_result_2 = [] + assert_that(len(read_result_1) + len(read_result_2), less_than_or_equal_to(groups_count)) + if delete_message: + self._sqs_api.delete_message(queue_url, receipt_handle) + else: + self._sqs_api.change_message_visibility(queue_url, receipt_handle, visibility_timeout=1500) + read_result_3 = self._sqs_api.receive_message(queue_url, max_number_of_messages=10, visibility_timeout=1000, receive_request_attempt_id=receive_attempt_id) + time_after_read = time.time() + if read_result_3 is None: + read_result_3 = [] + if read_result_2 and (time_after_read - time_before_read) <= (self.config_generator.yaml_config['sqs_config']['groups_read_attempt_ids_period_ms'] / 1000): message_set_2 = set([res['MessageId'] for res in read_result_2]) message_set_3 = set([res['MessageId'] for res in read_result_3]) - assert_that(len(message_set_2 & message_set_3), equal_to(len(message_set_2))) + assert_that(len(message_set_2 & message_set_3), equal_to(len(message_set_2))) diff --git a/ydb/tests/functional/sqs/test_generic_messaging.py b/ydb/tests/functional/sqs/test_generic_messaging.py index 3f6b2371bab..d6c8ce66b69 100644 --- a/ydb/tests/functional/sqs/test_generic_messaging.py +++ b/ydb/tests/functional/sqs/test_generic_messaging.py @@ -1,315 +1,315 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import base64 +import base64 import logging import time -from collections import OrderedDict +from collections import OrderedDict import pytest -from hamcrest import assert_that, equal_to, not_none, greater_than, has_item, has_items, raises, is_not, not_, empty, instance_of +from hamcrest import assert_that, equal_to, not_none, greater_than, has_item, has_items, raises, is_not, not_, empty, instance_of from sqs_requests_client import SqsMessageAttribute, SqsSendMessageParams, SqsChangeMessageVisibilityParams from sqs_matchers import ReadResponseMatcher, extract_message_ids from sqs_test_base import to_bytes -from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, IS_FIFO_PARAMS +from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, IS_FIFO_PARAMS -class SqsGenericMessagingTest(KikimrSqsTestBase): - @pytest.mark.parametrize(**IS_FIFO_PARAMS) +class SqsGenericMessagingTest(KikimrSqsTestBase): + @pytest.mark.parametrize(**IS_FIFO_PARAMS) def test_send_message(self, is_fifo): if is_fifo: self.queue_name = self.queue_name + '.fifo' created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - self.seq_no += 1 - self._send_message_and_assert(created_queue_url, 'test_send_message', seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) - - # break a queue and check failure - self._break_queue(self._username, self.queue_name, is_fifo) - - def call_send(): - group = 'trololo' if is_fifo else None - dedup = group - self._sqs_api.send_message( - created_queue_url, 'body', deduplication_id=dedup, group_id=group - ) - - assert_that( - call_send, - raises( - RuntimeError, - pattern='InternalFailure' - ) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_send_message_batch(self, is_fifo): - queue_attributes = {} - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - queue_attributes['ContentBasedDeduplication'] = 'true' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True, attributes=queue_attributes) - group_id = 'group' if is_fifo else None - group_id_2 = 'group2' if is_fifo else None - batch_request = [ - SqsSendMessageParams('body 1', attributes=[SqsMessageAttribute('a', 'String', 'b')], group_id=group_id), - SqsSendMessageParams('body 2', delay_seconds=100500, group_id=group_id), - SqsSendMessageParams('body 3', attributes=[SqsMessageAttribute('x', 'String', 'y')], group_id=group_id), - SqsSendMessageParams('body 4', group_id=group_id_2), - ] - send_message_batch_result = self._sqs_api.send_message_batch(created_queue_url, batch_request) - logging.debug('SendMessageBatch result: {}'.format(send_message_batch_result)) - - assert_that(len(send_message_batch_result), equal_to(4)) - - assert_that(send_message_batch_result[1]['BatchResultErrorEntry']['Code'], equal_to('InvalidParameterValue')) - - assert_that(send_message_batch_result[2]['SendMessageBatchResultEntry']['MD5OfMessageAttributes'], - not_(equal_to(send_message_batch_result[0]['SendMessageBatchResultEntry']['MD5OfMessageAttributes']))) - - assert_that(send_message_batch_result[2]['SendMessageBatchResultEntry']['MD5OfMessageBody'], - not_(equal_to(send_message_batch_result[0]['SendMessageBatchResultEntry']['MD5OfMessageBody']))) - - assert_that(send_message_batch_result[3]['SendMessageBatchResultEntry']['MD5OfMessageBody'], - not_(equal_to(send_message_batch_result[0]['SendMessageBatchResultEntry']['MD5OfMessageBody']))) - - message_ids = ( - send_message_batch_result[0]['SendMessageBatchResultEntry']['MessageId'], - send_message_batch_result[2]['SendMessageBatchResultEntry']['MessageId'], - send_message_batch_result[3]['SendMessageBatchResultEntry']['MessageId'], - ) + self.seq_no += 1 + self._send_message_and_assert(created_queue_url, 'test_send_message', seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) + + # break a queue and check failure + self._break_queue(self._username, self.queue_name, is_fifo) + + def call_send(): + group = 'trololo' if is_fifo else None + dedup = group + self._sqs_api.send_message( + created_queue_url, 'body', deduplication_id=dedup, group_id=group + ) + + assert_that( + call_send, + raises( + RuntimeError, + pattern='InternalFailure' + ) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_send_message_batch(self, is_fifo): + queue_attributes = {} + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + queue_attributes['ContentBasedDeduplication'] = 'true' + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True, attributes=queue_attributes) + group_id = 'group' if is_fifo else None + group_id_2 = 'group2' if is_fifo else None + batch_request = [ + SqsSendMessageParams('body 1', attributes=[SqsMessageAttribute('a', 'String', 'b')], group_id=group_id), + SqsSendMessageParams('body 2', delay_seconds=100500, group_id=group_id), + SqsSendMessageParams('body 3', attributes=[SqsMessageAttribute('x', 'String', 'y')], group_id=group_id), + SqsSendMessageParams('body 4', group_id=group_id_2), + ] + send_message_batch_result = self._sqs_api.send_message_batch(created_queue_url, batch_request) + logging.debug('SendMessageBatch result: {}'.format(send_message_batch_result)) + + assert_that(len(send_message_batch_result), equal_to(4)) + + assert_that(send_message_batch_result[1]['BatchResultErrorEntry']['Code'], equal_to('InvalidParameterValue')) + + assert_that(send_message_batch_result[2]['SendMessageBatchResultEntry']['MD5OfMessageAttributes'], + not_(equal_to(send_message_batch_result[0]['SendMessageBatchResultEntry']['MD5OfMessageAttributes']))) + + assert_that(send_message_batch_result[2]['SendMessageBatchResultEntry']['MD5OfMessageBody'], + not_(equal_to(send_message_batch_result[0]['SendMessageBatchResultEntry']['MD5OfMessageBody']))) + + assert_that(send_message_batch_result[3]['SendMessageBatchResultEntry']['MD5OfMessageBody'], + not_(equal_to(send_message_batch_result[0]['SendMessageBatchResultEntry']['MD5OfMessageBody']))) + + message_ids = ( + send_message_batch_result[0]['SendMessageBatchResultEntry']['MessageId'], + send_message_batch_result[2]['SendMessageBatchResultEntry']['MessageId'], + send_message_batch_result[3]['SendMessageBatchResultEntry']['MessageId'], + ) for i in range(3): - msgs = self._sqs_api.receive_message(created_queue_url, max_number_of_messages=1, - visibility_timeout=1000) - assert_that(len(msgs), equal_to(1)) - assert_that(message_ids, has_item(msgs[0]['MessageId'])) - assert_that( - self._sqs_api.delete_message(created_queue_url, msgs[0]['ReceiptHandle']), not_none() - ) - - msgs = self._sqs_api.receive_message(created_queue_url, max_number_of_messages=10, - visibility_timeout=1000) - assert_that(len([] if msgs is None else msgs), equal_to(0)) - - # break a queue and check failure - self._break_queue(self._username, self.queue_name, is_fifo) - - group = 'trololo' if is_fifo else None - dedup = group - errors = self._sqs_api.send_message_batch( - created_queue_url, [SqsSendMessageParams('other body', group_id=group, deduplication_id=dedup)] - ) - assert_that(len(errors), equal_to(1)) - assert_that(errors[0]['BatchResultErrorEntry']['Code'], equal_to('InternalFailure')) - - # Test error handling - def call_send_message_batch_with_greater_than_ten_messages(): - requests = [] + msgs = self._sqs_api.receive_message(created_queue_url, max_number_of_messages=1, + visibility_timeout=1000) + assert_that(len(msgs), equal_to(1)) + assert_that(message_ids, has_item(msgs[0]['MessageId'])) + assert_that( + self._sqs_api.delete_message(created_queue_url, msgs[0]['ReceiptHandle']), not_none() + ) + + msgs = self._sqs_api.receive_message(created_queue_url, max_number_of_messages=10, + visibility_timeout=1000) + assert_that(len([] if msgs is None else msgs), equal_to(0)) + + # break a queue and check failure + self._break_queue(self._username, self.queue_name, is_fifo) + + group = 'trololo' if is_fifo else None + dedup = group + errors = self._sqs_api.send_message_batch( + created_queue_url, [SqsSendMessageParams('other body', group_id=group, deduplication_id=dedup)] + ) + assert_that(len(errors), equal_to(1)) + assert_that(errors[0]['BatchResultErrorEntry']['Code'], equal_to('InternalFailure')) + + # Test error handling + def call_send_message_batch_with_greater_than_ten_messages(): + requests = [] for i in range(11): - requests.append(SqsSendMessageParams('body', attributes=[], group_id=group_id)) - self._sqs_api.send_message_batch(created_queue_url, requests) - - assert_that( - call_send_message_batch_with_greater_than_ten_messages, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.TooManyEntriesInBatchRequest' - ) - ) - - def call_send_message_batch_with_no_messages(): - requests = [] - self._sqs_api.send_message_batch(created_queue_url, requests) - - assert_that( - call_send_message_batch_with_no_messages, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.EmptyBatchRequest' - ) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) + requests.append(SqsSendMessageParams('body', attributes=[], group_id=group_id)) + self._sqs_api.send_message_batch(created_queue_url, requests) + + assert_that( + call_send_message_batch_with_greater_than_ten_messages, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.TooManyEntriesInBatchRequest' + ) + ) + + def call_send_message_batch_with_no_messages(): + requests = [] + self._sqs_api.send_message_batch(created_queue_url, requests) + + assert_that( + call_send_message_batch_with_no_messages, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.EmptyBatchRequest' + ) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) def test_send_and_read_message(self, is_fifo): if is_fifo: self.queue_name = self.queue_name + '.fifo' created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - body = '<' + self._msg_body_template.format('trololo') + '<' # to ensure that we have correct xml - attributes = { - SqsMessageAttribute('a', 'String', 'xyz'), - SqsMessageAttribute('b', 'Number', 42), + body = '<' + self._msg_body_template.format('trololo') + '<' # to ensure that we have correct xml + attributes = { + SqsMessageAttribute('a', 'String', 'xyz'), + SqsMessageAttribute('b', 'Number', 42), SqsMessageAttribute('c', 'Binary', base64.b64encode(to_bytes('binary_data'))), - } - if is_fifo: - self.seq_no += 1 - message_id = self._send_message_and_assert(created_queue_url, body, group_id='3', attributes=attributes, seq_no=self.seq_no) - else: - message_id = self._send_message_and_assert(created_queue_url, body, attributes=attributes) - read_message_result = self._read_while_not_empty(created_queue_url, 1) + } + if is_fifo: + self.seq_no += 1 + message_id = self._send_message_and_assert(created_queue_url, body, group_id='3', attributes=attributes, seq_no=self.seq_no) + else: + message_id = self._send_message_and_assert(created_queue_url, body, attributes=attributes) + read_message_result = self._read_while_not_empty(created_queue_url, 1) assert_that( read_message_result, ReadResponseMatcher().with_message_ids([message_id, ]) ) - message_attributes = read_message_result[0]['MessageAttribute'] - message_attributes_by_name = {} - for ma in message_attributes: - assert_that( - ma, has_item('Name') - ) - message_attributes_by_name[ma['Name']] = ma - assert_that( - message_attributes_by_name, - has_items('a', 'b', 'c') - ) - assert_that( - message_attributes_by_name['a']['Value']['DataType'], - equal_to('String') - ) - assert_that( - message_attributes_by_name['a']['Value']['StringValue'], - equal_to('xyz') - ) - assert_that( - message_attributes_by_name['b']['Value']['DataType'], - equal_to('Number') - ) - assert_that( - message_attributes_by_name['b']['Value']['StringValue'], - equal_to('42') - ) - assert_that( - message_attributes_by_name['c']['Value']['DataType'], - equal_to('Binary') - ) - assert_that( + message_attributes = read_message_result[0]['MessageAttribute'] + message_attributes_by_name = {} + for ma in message_attributes: + assert_that( + ma, has_item('Name') + ) + message_attributes_by_name[ma['Name']] = ma + assert_that( + message_attributes_by_name, + has_items('a', 'b', 'c') + ) + assert_that( + message_attributes_by_name['a']['Value']['DataType'], + equal_to('String') + ) + assert_that( + message_attributes_by_name['a']['Value']['StringValue'], + equal_to('xyz') + ) + assert_that( + message_attributes_by_name['b']['Value']['DataType'], + equal_to('Number') + ) + assert_that( + message_attributes_by_name['b']['Value']['StringValue'], + equal_to('42') + ) + assert_that( + message_attributes_by_name['c']['Value']['DataType'], + equal_to('Binary') + ) + assert_that( to_bytes(message_attributes_by_name['c']['Value']['BinaryValue']), equal_to(base64.b64encode(to_bytes('binary_data'))) - ) - - attributes = read_message_result[0]['Attribute'] - attributes_by_name = {} - for a in attributes: - assert_that( - a, has_item('Name') - ) - attributes_by_name[a['Name']] = a - - def assert_has_nonempty_attribute(name): - assert_that( - attributes_by_name, - has_item(name) - ) - assert_that(attributes_by_name[name]['Value'], not_(empty())) - - assert_has_nonempty_attribute('ApproximateFirstReceiveTimestamp') - assert_has_nonempty_attribute('ApproximateReceiveCount') - assert_has_nonempty_attribute('SentTimestamp') - - if is_fifo: - assert_has_nonempty_attribute('MessageDeduplicationId') - assert_has_nonempty_attribute('MessageGroupId') - assert_has_nonempty_attribute('SequenceNumber') - else: - assert_that( - attributes_by_name, - not_(has_item('MessageDeduplicationId')) - ) - assert_that( - attributes_by_name, - not_(has_item('MessageGroupId')) - ) - assert_that( - attributes_by_name, - not_(has_item('SequenceNumber')) - ) - - counters = self._get_sqs_counters() - send_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'SendMessage_Count', - } - receive_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'ReceiveMessage_Count', - } - assert_that(self._get_counter_value(counters, send_counter_labels, 0), equal_to(1)) - assert_that(self._get_counter_value(counters, receive_counter_labels, 0), equal_to(1)) - - def test_validates_message_attributes(self): - created_queue_url = self._create_queue_and_assert(self.queue_name) - - def call_send(attributes): - self._sqs_api.send_message( - created_queue_url, 'msg_body', attributes=attributes - ) - - def assert_invalid_attributes(attributes): - def call_with_attrs(): - call_send(attributes) - assert_that( - call_with_attrs, - raises( - RuntimeError, - pattern='InvalidParameterValue|InvalidParameterCombination' - ) - ) - - attributes1 = { - SqsMessageAttribute('!a', 'String', 'xyz'), - } - assert_invalid_attributes(attributes1) - - attributes2 = { - SqsMessageAttribute('a', 'String', 'xyz'), - SqsMessageAttribute('a', 'String', 'xyz2'), - } - assert_invalid_attributes(attributes2) - - attributes3 = { - SqsMessageAttribute('ya.reserved-prefix', 'String', 'xyz') - } - assert_invalid_attributes(attributes3) - - def test_send_to_nonexistent_queue(self): - created_queue_url = self._create_queue_and_assert(self.queue_name) - - def call_send(): - self._sqs_api.send_message( + ) + + attributes = read_message_result[0]['Attribute'] + attributes_by_name = {} + for a in attributes: + assert_that( + a, has_item('Name') + ) + attributes_by_name[a['Name']] = a + + def assert_has_nonempty_attribute(name): + assert_that( + attributes_by_name, + has_item(name) + ) + assert_that(attributes_by_name[name]['Value'], not_(empty())) + + assert_has_nonempty_attribute('ApproximateFirstReceiveTimestamp') + assert_has_nonempty_attribute('ApproximateReceiveCount') + assert_has_nonempty_attribute('SentTimestamp') + + if is_fifo: + assert_has_nonempty_attribute('MessageDeduplicationId') + assert_has_nonempty_attribute('MessageGroupId') + assert_has_nonempty_attribute('SequenceNumber') + else: + assert_that( + attributes_by_name, + not_(has_item('MessageDeduplicationId')) + ) + assert_that( + attributes_by_name, + not_(has_item('MessageGroupId')) + ) + assert_that( + attributes_by_name, + not_(has_item('SequenceNumber')) + ) + + counters = self._get_sqs_counters() + send_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'SendMessage_Count', + } + receive_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'ReceiveMessage_Count', + } + assert_that(self._get_counter_value(counters, send_counter_labels, 0), equal_to(1)) + assert_that(self._get_counter_value(counters, receive_counter_labels, 0), equal_to(1)) + + def test_validates_message_attributes(self): + created_queue_url = self._create_queue_and_assert(self.queue_name) + + def call_send(attributes): + self._sqs_api.send_message( + created_queue_url, 'msg_body', attributes=attributes + ) + + def assert_invalid_attributes(attributes): + def call_with_attrs(): + call_send(attributes) + assert_that( + call_with_attrs, + raises( + RuntimeError, + pattern='InvalidParameterValue|InvalidParameterCombination' + ) + ) + + attributes1 = { + SqsMessageAttribute('!a', 'String', 'xyz'), + } + assert_invalid_attributes(attributes1) + + attributes2 = { + SqsMessageAttribute('a', 'String', 'xyz'), + SqsMessageAttribute('a', 'String', 'xyz2'), + } + assert_invalid_attributes(attributes2) + + attributes3 = { + SqsMessageAttribute('ya.reserved-prefix', 'String', 'xyz') + } + assert_invalid_attributes(attributes3) + + def test_send_to_nonexistent_queue(self): + created_queue_url = self._create_queue_and_assert(self.queue_name) + + def call_send(): + self._sqs_api.send_message( to_bytes(created_queue_url) + to_bytes('1'), to_bytes('42') - ) - - assert_that( - call_send, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.NonExistentQueue' - ) - ) - - def test_receive_with_very_big_visibility_timeout(self): - queue_url = self._create_queue_and_assert(self.queue_name) - - def call_with_very_big_visibility_timeout(): - self._sqs_api.receive_message(queue_url, visibility_timeout=100500) - - assert_that( - call_with_very_big_visibility_timeout, - raises(RuntimeError, pattern='InvalidParameter') - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) + ) + + assert_that( + call_send, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.NonExistentQueue' + ) + ) + + def test_receive_with_very_big_visibility_timeout(self): + queue_url = self._create_queue_and_assert(self.queue_name) + + def call_with_very_big_visibility_timeout(): + self._sqs_api.receive_message(queue_url, visibility_timeout=100500) + + assert_that( + call_with_very_big_visibility_timeout, + raises(RuntimeError, pattern='InvalidParameter') + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) def test_create_q_twice(self, is_fifo): if is_fifo: self.queue_name = self.queue_name + '.fifo' created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - self.seq_no += 1 - message_id = self._send_message_and_assert(created_queue_url, self._msg_body_template, seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) + self.seq_no += 1 + message_id = self._send_message_and_assert(created_queue_url, self._msg_body_template, seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) second_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) assert_that(second_url, equal_to(created_queue_url)) @@ -329,7 +329,7 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): queue_url, self._msg_body_template.format('1') ) self._read_messages_and_assert( - queue_url, messages_count=2, visibility_timeout=1000, + queue_url, messages_count=2, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids( [first_message_id, second_message_id] ).with_messages_data([self._msg_body_template.format('0'), self._msg_body_template.format('1')]) @@ -344,8 +344,8 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): ) self._send_messages(queue_url, message_count=second_pack_size, msg_body_template=self._msg_body_template) self._read_messages_and_assert( - queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher( - ).with_n_or_more_messages(first_pack_size + 1).with_these_or_more_message_ids(first_pack_ids) + queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher( + ).with_n_or_more_messages(first_pack_size + 1).with_these_or_more_message_ids(first_pack_ids) ) def test_multi_read_dont_stall(self): @@ -359,7 +359,7 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): queue_url, message_count=pack_size, msg_body_template=self._msg_body_template )) result = self._read_messages_and_assert( - queue_url, messages_count=10, visibility_timeout=1000, + queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_some_of_message_ids(all_ids - read_ids) ) if result: @@ -374,30 +374,30 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): def test_visibility_timeout_works(self): total_msg_count = 10 - visibility_timeout = 5 - - before_read_time = time.time() + visibility_timeout = 5 + + before_read_time = time.time() self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=total_msg_count, read_count=1, visibility_timeout=visibility_timeout, - msg_body_template=self._msg_body_template, is_fifo=False + self.queue_name, send_count=total_msg_count, read_count=1, visibility_timeout=visibility_timeout, + msg_body_template=self._msg_body_template, is_fifo=False ) msg_data = [self._msg_body_template.format(i) for i in range(total_msg_count)] already_read_id = extract_message_ids(self.read_result)[0] - read_result = self._read_messages_and_assert( - self.queue_url, messages_count=5, visibility_timeout=visibility_timeout, wait_timeout=1, + read_result = self._read_messages_and_assert( + self.queue_url, messages_count=5, visibility_timeout=visibility_timeout, wait_timeout=1, matcher=ReadResponseMatcher().with_some_of_message_ids(self.message_ids) - .with_n_messages(5) - ) - - read_time_2 = time.time() - if read_time_2 - before_read_time < visibility_timeout: - read_ids = set(extract_message_ids(read_result)) - assert_that(read_ids, not_(has_item(already_read_id))) - - time.sleep(visibility_timeout + 0.1) + .with_n_messages(5) + ) + + read_time_2 = time.time() + if read_time_2 - before_read_time < visibility_timeout: + read_ids = set(extract_message_ids(read_result)) + assert_that(read_ids, not_(has_item(already_read_id))) + + time.sleep(visibility_timeout + 0.1) self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=1000, + self.queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids(self.message_ids).with_messages_data(msg_data) ) @@ -412,230 +412,230 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): visibility_timeout=9 ) self._read_messages_and_assert( - queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_some_of_message_ids(message_ids), wait_timeout=10, - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_zero_visibility_timeout_works(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, visibility_timeout=0, - msg_body_template=self._msg_body_template, is_fifo=is_fifo - ) - self._read_messages_and_assert( - self.queue_url, messages_count=1, visibility_timeout=0, wait_timeout=1, - matcher=ReadResponseMatcher().with_n_messages(1) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_change_visibility_works(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, visibility_timeout=1000, - msg_body_template=self._msg_body_template, is_fifo=is_fifo - ) - receipt_handle = self.read_result[0]['ReceiptHandle'] - logging.debug('First received receipt handle: {}'.format(receipt_handle)) - # decrease - self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 1) - time.sleep(2) - - before_receive = time.time() - receive_visibility_timeout = 5 - read_result_2 = self._read_messages_and_assert( - self.queue_url, messages_count=1, visibility_timeout=receive_visibility_timeout, wait_timeout=1, - matcher=ReadResponseMatcher().with_n_messages(1) - ) - receipt_handle = read_result_2[0]['ReceiptHandle'] - logging.debug('Second received receipt handle: {}'.format(receipt_handle)) - # increase - changed = False - try: - self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 1000) - changed = True - except Exception as ex: - seconds_passed = time.time() - before_receive - logging.info('Exception while changing message visibility: {}'.format(ex)) - if seconds_passed < receive_visibility_timeout: - raise - elif str(ex).find('AWS.SimpleQueueService.MessageNotInflight') == -1: - raise - - if changed: - time.sleep(receive_visibility_timeout + 0.1) - self._read_messages_and_assert( - self.queue_url, messages_count=1, visibility_timeout=100, wait_timeout=0, - matcher=ReadResponseMatcher().with_n_messages(0) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_change_visibility_batch_works(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_and_assert(self.queue_name, is_fifo) - - if is_fifo: - self._send_messages( - self.queue_url, 1, self._msg_body_template, is_fifo=True, group_id='group_1' - ) - self.seq_no += 1 - self._send_messages( - self.queue_url, 1, self._msg_body_template, is_fifo=True, group_id='group_2' - ) - else: - self._send_messages( - self.queue_url, 2, self._msg_body_template - ) - - read_result = self._read_messages_and_assert( - self.queue_url, messages_count=2, - matcher=ReadResponseMatcher().with_n_messages(2), - visibility_timeout=1000, wait_timeout=10 - ) - - receipt_handle_1 = read_result[0]['ReceiptHandle'] - receipt_handle_2 = read_result[1]['ReceiptHandle'] - self._sqs_api.change_message_visibility_batch(self.queue_url, [ - SqsChangeMessageVisibilityParams(receipt_handle_1, 1), - SqsChangeMessageVisibilityParams(receipt_handle_2, 10), - ]) - time.sleep(2) - read_result = self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=1000, wait_timeout=1, - matcher=ReadResponseMatcher().with_n_or_more_messages(1) # test machine may be slow and visibility timeout may expire - ) - if len(read_result) == 1: - time.sleep(9) - self._read_messages_and_assert( - self.queue_url, messages_count=1, visibility_timeout=1000, wait_timeout=1, - matcher=ReadResponseMatcher().with_n_messages(1) - ) - - # Test error handling - def call_change_message_visibility_batch_with_greater_than_ten_handles(): - requests = [] + queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_some_of_message_ids(message_ids), wait_timeout=10, + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_zero_visibility_timeout_works(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, visibility_timeout=0, + msg_body_template=self._msg_body_template, is_fifo=is_fifo + ) + self._read_messages_and_assert( + self.queue_url, messages_count=1, visibility_timeout=0, wait_timeout=1, + matcher=ReadResponseMatcher().with_n_messages(1) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_change_visibility_works(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, visibility_timeout=1000, + msg_body_template=self._msg_body_template, is_fifo=is_fifo + ) + receipt_handle = self.read_result[0]['ReceiptHandle'] + logging.debug('First received receipt handle: {}'.format(receipt_handle)) + # decrease + self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 1) + time.sleep(2) + + before_receive = time.time() + receive_visibility_timeout = 5 + read_result_2 = self._read_messages_and_assert( + self.queue_url, messages_count=1, visibility_timeout=receive_visibility_timeout, wait_timeout=1, + matcher=ReadResponseMatcher().with_n_messages(1) + ) + receipt_handle = read_result_2[0]['ReceiptHandle'] + logging.debug('Second received receipt handle: {}'.format(receipt_handle)) + # increase + changed = False + try: + self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 1000) + changed = True + except Exception as ex: + seconds_passed = time.time() - before_receive + logging.info('Exception while changing message visibility: {}'.format(ex)) + if seconds_passed < receive_visibility_timeout: + raise + elif str(ex).find('AWS.SimpleQueueService.MessageNotInflight') == -1: + raise + + if changed: + time.sleep(receive_visibility_timeout + 0.1) + self._read_messages_and_assert( + self.queue_url, messages_count=1, visibility_timeout=100, wait_timeout=0, + matcher=ReadResponseMatcher().with_n_messages(0) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_change_visibility_batch_works(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_and_assert(self.queue_name, is_fifo) + + if is_fifo: + self._send_messages( + self.queue_url, 1, self._msg_body_template, is_fifo=True, group_id='group_1' + ) + self.seq_no += 1 + self._send_messages( + self.queue_url, 1, self._msg_body_template, is_fifo=True, group_id='group_2' + ) + else: + self._send_messages( + self.queue_url, 2, self._msg_body_template + ) + + read_result = self._read_messages_and_assert( + self.queue_url, messages_count=2, + matcher=ReadResponseMatcher().with_n_messages(2), + visibility_timeout=1000, wait_timeout=10 + ) + + receipt_handle_1 = read_result[0]['ReceiptHandle'] + receipt_handle_2 = read_result[1]['ReceiptHandle'] + self._sqs_api.change_message_visibility_batch(self.queue_url, [ + SqsChangeMessageVisibilityParams(receipt_handle_1, 1), + SqsChangeMessageVisibilityParams(receipt_handle_2, 10), + ]) + time.sleep(2) + read_result = self._read_messages_and_assert( + self.queue_url, messages_count=10, visibility_timeout=1000, wait_timeout=1, + matcher=ReadResponseMatcher().with_n_or_more_messages(1) # test machine may be slow and visibility timeout may expire + ) + if len(read_result) == 1: + time.sleep(9) + self._read_messages_and_assert( + self.queue_url, messages_count=1, visibility_timeout=1000, wait_timeout=1, + matcher=ReadResponseMatcher().with_n_messages(1) + ) + + # Test error handling + def call_change_message_visibility_batch_with_greater_than_ten_handles(): + requests = [] for i in range(11): - requests.append(SqsChangeMessageVisibilityParams('Handle{}'.format(i), 1)) - self._sqs_api.change_message_visibility_batch(self.queue_url, requests) - - assert_that( - call_change_message_visibility_batch_with_greater_than_ten_handles, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.TooManyEntriesInBatchRequest' - ) - ) - - def call_change_message_visibility_batch_with_no_handles(): - requests = [] - self._sqs_api.change_message_visibility_batch(self.queue_url, requests) - - assert_that( - call_change_message_visibility_batch_with_no_handles, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.EmptyBatchRequest' - ) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_change_visibility_to_zero_works(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, visibility_timeout=1000, - msg_body_template=self._msg_body_template, is_fifo=is_fifo - ) - receipt_handle = self.read_result[0]['ReceiptHandle'] - self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 0) - self._read_messages_and_assert( - self.queue_url, messages_count=1, visibility_timeout=0, wait_timeout=1, - matcher=ReadResponseMatcher().with_n_messages(1) - ) - - def test_change_message_visibility_with_very_big_timeout(self): - queue_url = self._create_queue_and_assert(self.queue_name) - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=2, read_count=2, visibility_timeout=1000, - msg_body_template=self._msg_body_template, is_fifo=False - ) - receipt_handle_1 = self.read_result[0]['ReceiptHandle'] - receipt_handle_2 = self.read_result[1]['ReceiptHandle'] - - def call_with_very_big_visibility_timeout(): - self._sqs_api.change_message_visibility(queue_url, receipt_handle_1, 100500) - - assert_that( - call_with_very_big_visibility_timeout, - raises(RuntimeError, pattern='InvalidParameter') - ) - - result = self._sqs_api.change_message_visibility_batch(self.queue_url, [ - SqsChangeMessageVisibilityParams(receipt_handle_1, 100500), - SqsChangeMessageVisibilityParams(receipt_handle_2, 10), - ]) - assert_that(len(result), equal_to(2)) - assert_that(result[0]['BatchResultErrorEntry']['Code'], equal_to('InvalidParameterValue')) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_does_not_change_visibility_not_in_flight(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, visibility_timeout=1, - msg_body_template=self._msg_body_template, is_fifo=is_fifo - ) - receipt_handle = self.read_result[0]['ReceiptHandle'] - logging.debug('Received receipt handle: {}'.format(receipt_handle)) - time.sleep(2) - - def call_change_visibility(): - self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 1000) - - assert_that( - call_change_visibility, - raises(RuntimeError, pattern='.*\n.*AWS\\.SimpleQueueService\\.MessageNotInflight') - ) - - # Check that we can receive message after change message visibility to big timeout failed - self._read_messages_and_assert( - self.queue_url, messages_count=1, visibility_timeout=10, wait_timeout=10, - matcher=ReadResponseMatcher().with_n_messages(1) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_does_not_change_visibility_for_deleted_message(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, visibility_timeout=1000, - msg_body_template=self._msg_body_template, is_fifo=is_fifo - ) - receipt_handle = self.read_result[0]['ReceiptHandle'] - logging.debug('Received receipt handle: {}'.format(receipt_handle)) - - self._sqs_api.delete_message(self.queue_url, receipt_handle) - - def call_change_visibility(): - self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 10) - - assert_that( - call_change_visibility, - raises(RuntimeError, pattern='InvalidParameterValue') - ) - + requests.append(SqsChangeMessageVisibilityParams('Handle{}'.format(i), 1)) + self._sqs_api.change_message_visibility_batch(self.queue_url, requests) + + assert_that( + call_change_message_visibility_batch_with_greater_than_ten_handles, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.TooManyEntriesInBatchRequest' + ) + ) + + def call_change_message_visibility_batch_with_no_handles(): + requests = [] + self._sqs_api.change_message_visibility_batch(self.queue_url, requests) + + assert_that( + call_change_message_visibility_batch_with_no_handles, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.EmptyBatchRequest' + ) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_change_visibility_to_zero_works(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, visibility_timeout=1000, + msg_body_template=self._msg_body_template, is_fifo=is_fifo + ) + receipt_handle = self.read_result[0]['ReceiptHandle'] + self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 0) + self._read_messages_and_assert( + self.queue_url, messages_count=1, visibility_timeout=0, wait_timeout=1, + matcher=ReadResponseMatcher().with_n_messages(1) + ) + + def test_change_message_visibility_with_very_big_timeout(self): + queue_url = self._create_queue_and_assert(self.queue_name) + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=2, read_count=2, visibility_timeout=1000, + msg_body_template=self._msg_body_template, is_fifo=False + ) + receipt_handle_1 = self.read_result[0]['ReceiptHandle'] + receipt_handle_2 = self.read_result[1]['ReceiptHandle'] + + def call_with_very_big_visibility_timeout(): + self._sqs_api.change_message_visibility(queue_url, receipt_handle_1, 100500) + + assert_that( + call_with_very_big_visibility_timeout, + raises(RuntimeError, pattern='InvalidParameter') + ) + + result = self._sqs_api.change_message_visibility_batch(self.queue_url, [ + SqsChangeMessageVisibilityParams(receipt_handle_1, 100500), + SqsChangeMessageVisibilityParams(receipt_handle_2, 10), + ]) + assert_that(len(result), equal_to(2)) + assert_that(result[0]['BatchResultErrorEntry']['Code'], equal_to('InvalidParameterValue')) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_does_not_change_visibility_not_in_flight(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, visibility_timeout=1, + msg_body_template=self._msg_body_template, is_fifo=is_fifo + ) + receipt_handle = self.read_result[0]['ReceiptHandle'] + logging.debug('Received receipt handle: {}'.format(receipt_handle)) + time.sleep(2) + + def call_change_visibility(): + self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 1000) + + assert_that( + call_change_visibility, + raises(RuntimeError, pattern='.*\n.*AWS\\.SimpleQueueService\\.MessageNotInflight') + ) + + # Check that we can receive message after change message visibility to big timeout failed + self._read_messages_and_assert( + self.queue_url, messages_count=1, visibility_timeout=10, wait_timeout=10, + matcher=ReadResponseMatcher().with_n_messages(1) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_does_not_change_visibility_for_deleted_message(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, visibility_timeout=1000, + msg_body_template=self._msg_body_template, is_fifo=is_fifo + ) + receipt_handle = self.read_result[0]['ReceiptHandle'] + logging.debug('Received receipt handle: {}'.format(receipt_handle)) + + self._sqs_api.delete_message(self.queue_url, receipt_handle) + + def call_change_visibility(): + self._sqs_api.change_message_visibility(self.queue_url, receipt_handle, 10) + + assert_that( + call_change_visibility, + raises(RuntimeError, pattern='InvalidParameterValue') + ) + def test_delete_message_works(self): self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=10, read_count=2, visibility_timeout=0, + self.queue_name, send_count=10, read_count=2, visibility_timeout=0, msg_body_template=self._msg_body_template ) handle = self.read_result[0]['ReceiptHandle'] @@ -649,131 +649,131 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): self._sqs_api.delete_message(self.queue_url, handle), not_none() ) self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=1000, + self.queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids(self.message_ids) ) - counters = self._get_sqs_counters() - delete_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'DeleteMessage_Count', - } - assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(1)) - - # break a queue and check failure - self._break_queue(self._username, self.queue_name, False) - - handle_2 = self.read_result[0]['ReceiptHandle'] - - def call_delete(): - self._sqs_api.delete_message(self.queue_url, handle_2) - - assert_that( - call_delete, - raises(RuntimeError, pattern='InternalFailure') - ) - - def test_delete_message_batch_works(self): - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=9, read_count=9, - msg_body_template=self._msg_body_template - ) - handles = [] - for result in self.read_result: - handles.append(result['ReceiptHandle']) - handles.insert(5, 'not_a_receipt_handle') - - assert_that(len(handles), equal_to(10)) - - delete_message_batch_result = self._sqs_api.delete_message_batch(self.queue_url, handles) - assert_that(len(delete_message_batch_result), equal_to(10)) - + counters = self._get_sqs_counters() + delete_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'DeleteMessage_Count', + } + assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(1)) + + # break a queue and check failure + self._break_queue(self._username, self.queue_name, False) + + handle_2 = self.read_result[0]['ReceiptHandle'] + + def call_delete(): + self._sqs_api.delete_message(self.queue_url, handle_2) + + assert_that( + call_delete, + raises(RuntimeError, pattern='InternalFailure') + ) + + def test_delete_message_batch_works(self): + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=9, read_count=9, + msg_body_template=self._msg_body_template + ) + handles = [] + for result in self.read_result: + handles.append(result['ReceiptHandle']) + handles.insert(5, 'not_a_receipt_handle') + + assert_that(len(handles), equal_to(10)) + + delete_message_batch_result = self._sqs_api.delete_message_batch(self.queue_url, handles) + assert_that(len(delete_message_batch_result), equal_to(10)) + for i in range(len(delete_message_batch_result)): - res = delete_message_batch_result[i] - if i == 5: - assert_that(res['BatchResultErrorEntry']['Code'], equal_to('ReceiptHandleIsInvalid')) - else: - assert_that(res, has_item('DeleteMessageBatchResultEntry')) - - self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=1000, - matcher=ReadResponseMatcher().with_n_messages(0) - ) - - # Test error handling - def call_delete_message_batch_with_greater_than_ten_handles(): - handles = [] + res = delete_message_batch_result[i] + if i == 5: + assert_that(res['BatchResultErrorEntry']['Code'], equal_to('ReceiptHandleIsInvalid')) + else: + assert_that(res, has_item('DeleteMessageBatchResultEntry')) + + self._read_messages_and_assert( + self.queue_url, messages_count=10, visibility_timeout=1000, + matcher=ReadResponseMatcher().with_n_messages(0) + ) + + # Test error handling + def call_delete_message_batch_with_greater_than_ten_handles(): + handles = [] for i in range(11): - handles.append('Handle{}'.format(i)) - self._sqs_api.delete_message_batch(self.queue_url, handles) - - assert_that( - call_delete_message_batch_with_greater_than_ten_handles, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.TooManyEntriesInBatchRequest' - ) - ) - - def call_delete_message_batch_with_no_handles(): - handles = [] - self._sqs_api.delete_message_batch(self.queue_url, handles) - - assert_that( - call_delete_message_batch_with_no_handles, - raises( - RuntimeError, - pattern='AWS.SimpleQueueService.EmptyBatchRequest' - ) - ) - - counters = self._get_sqs_counters() - delete_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'DeleteMessage_Count', - } - assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(9)) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_delete_message_batch_deduplicates_receipt_handle(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, - msg_body_template=self._msg_body_template, is_fifo=is_fifo - ) - handle = self.read_result[0]['ReceiptHandle'] + handles.append('Handle{}'.format(i)) + self._sqs_api.delete_message_batch(self.queue_url, handles) + + assert_that( + call_delete_message_batch_with_greater_than_ten_handles, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.TooManyEntriesInBatchRequest' + ) + ) + + def call_delete_message_batch_with_no_handles(): + handles = [] + self._sqs_api.delete_message_batch(self.queue_url, handles) + + assert_that( + call_delete_message_batch_with_no_handles, + raises( + RuntimeError, + pattern='AWS.SimpleQueueService.EmptyBatchRequest' + ) + ) + + counters = self._get_sqs_counters() + delete_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'DeleteMessage_Count', + } + assert_that(self._get_counter_value(counters, delete_counter_labels, 0), equal_to(9)) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_delete_message_batch_deduplicates_receipt_handle(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, + msg_body_template=self._msg_body_template, is_fifo=is_fifo + ) + handle = self.read_result[0]['ReceiptHandle'] handles = [handle for i in range(5)] - - delete_message_batch_result = self._sqs_api.delete_message_batch(self.queue_url, handles) - assert_that(len(delete_message_batch_result), equal_to(5)) - + + delete_message_batch_result = self._sqs_api.delete_message_batch(self.queue_url, handles) + assert_that(len(delete_message_batch_result), equal_to(5)) + for i in range(len(delete_message_batch_result)): - res = delete_message_batch_result[i] - assert_that(res, has_item('DeleteMessageBatchResultEntry')) - - labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'MessagesCount' - } - - attempts = 30 - while attempts: - attempts -= 1 - counters = self._get_sqs_counters() - messages_count_metric = self._get_counter_value(counters, labels) - if (messages_count_metric is None or messages_count_metric != 0) and attempts: - time.sleep(1) - else: - assert_that(messages_count_metric, equal_to(0)) - + res = delete_message_batch_result[i] + assert_that(res, has_item('DeleteMessageBatchResultEntry')) + + labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'MessagesCount' + } + + attempts = 30 + while attempts: + attempts -= 1 + counters = self._get_sqs_counters() + messages_count_metric = self._get_counter_value(counters, labels) + if (messages_count_metric is None or messages_count_metric != 0) and attempts: + time.sleep(1) + else: + assert_that(messages_count_metric, equal_to(0)) + def test_can_read_new_written_data_on_visibility_timeout(self): visibility_timeout = 15 self._create_queue_send_x_messages_read_y_messages( @@ -788,15 +788,15 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): message_ids = set(first_pack_ids) - set(extract_message_ids(self.read_result)) message_ids.update(second_pack_ids) - visibility_timeout_2 = 15 + visibility_timeout_2 = 15 self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=visibility_timeout_2, - matcher=ReadResponseMatcher().with_these_or_more_message_ids(message_ids).with_n_or_more_messages(6), + self.queue_url, messages_count=10, visibility_timeout=visibility_timeout_2, + matcher=ReadResponseMatcher().with_these_or_more_message_ids(message_ids).with_n_or_more_messages(6), ) - remaining_time = visibility_timeout - (time.time() - begin_time) # for first pack read - time.sleep(max(remaining_time + 0.1, visibility_timeout_2 + 0.1, 0)) + remaining_time = visibility_timeout - (time.time() - begin_time) # for first pack read + time.sleep(max(remaining_time + 0.1, visibility_timeout_2 + 0.1, 0)) self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids( + self.queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids( first_pack_ids + second_pack_ids ) ) @@ -806,49 +806,49 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): self.queue_name, send_count=10, read_count=5, visibility_timeout=5, msg_body_template=self._msg_body_template ) - handle = self.read_result[4]['ReceiptHandle'] # select the last read message to avoid race with visibility timeout while reading - self.message_ids.remove(self.read_result[4]['MessageId']) + handle = self.read_result[4]['ReceiptHandle'] # select the last read message to avoid race with visibility timeout while reading + self.message_ids.remove(self.read_result[4]['MessageId']) assert_that( self._sqs_api.delete_message(self.queue_url, handle), not_none() ) time.sleep(6) self._read_messages_and_assert( - self.queue_url, messages_count=10, visibility_timeout=1000, + self.queue_url, messages_count=10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids(self.message_ids) ) - def test_wrong_delete_fails(self): - self._create_queue_send_x_messages_read_y_messages( - self.queue_name, send_count=1, read_count=1, visibility_timeout=5, - msg_body_template=self._msg_body_template - ) - handle = self.read_result[0]['ReceiptHandle'] - - def call_delete_invalid_handle(): - self._sqs_api.delete_message(self.queue_url, handle + handle), # wrong handle - assert_that( - call_delete_invalid_handle, - raises(RuntimeError, pattern='.*\n.*ReceiptHandleIsInvalid') - ) - - def call_delete_without_handle(): - logging.debug('Calling delete message without receipt handle on queue {}'.format(self.queue_url)) - self._sqs_api.execute_request( - action='DeleteMessage', - extract_result_method=lambda x: x['DeleteMessageResponse']['ResponseMetadata']['RequestId'], - QueueUrl=self.queue_url # no receipt handle - ) - assert_that( - call_delete_without_handle, - raises(RuntimeError) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_queue_attributes(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + def test_wrong_delete_fails(self): + self._create_queue_send_x_messages_read_y_messages( + self.queue_name, send_count=1, read_count=1, visibility_timeout=5, + msg_body_template=self._msg_body_template + ) + handle = self.read_result[0]['ReceiptHandle'] + + def call_delete_invalid_handle(): + self._sqs_api.delete_message(self.queue_url, handle + handle), # wrong handle + assert_that( + call_delete_invalid_handle, + raises(RuntimeError, pattern='.*\n.*ReceiptHandleIsInvalid') + ) + + def call_delete_without_handle(): + logging.debug('Calling delete message without receipt handle on queue {}'.format(self.queue_url)) + self._sqs_api.execute_request( + action='DeleteMessage', + extract_result_method=lambda x: x['DeleteMessageResponse']['ResponseMetadata']['RequestId'], + QueueUrl=self.queue_url # no receipt handle + ) + assert_that( + call_delete_without_handle, + raises(RuntimeError) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_queue_attributes(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) # assert empty response when no attribute names are provided attributes = self._sqs_api.get_queue_attributes(queue_url, attributes=[]) @@ -856,252 +856,252 @@ class SqsGenericMessagingTest(KikimrSqsTestBase): # check common case attributes = self._sqs_api.get_queue_attributes(queue_url, attributes=['All']) - if is_fifo: - assert_that(attributes, has_item('FifoQueue')) - assert_that(attributes, has_item('ContentBasedDeduplication')) - else: - assert_that(attributes, is_not(has_item('FifoQueue'))) - assert_that(attributes, is_not(has_item('ContentBasedDeduplication'))) - - assert_that(attributes, has_items( - 'ApproximateNumberOfMessages', - 'ApproximateNumberOfMessagesDelayed', - 'ApproximateNumberOfMessagesNotVisible', - 'CreatedTimestamp', - 'DelaySeconds', - 'MaximumMessageSize', - 'MessageRetentionPeriod', - 'ReceiveMessageWaitTimeSeconds', - 'VisibilityTimeout', - )) - assert_that(attributes['ReceiveMessageWaitTimeSeconds'], equal_to('0')) - if is_fifo: + if is_fifo: + assert_that(attributes, has_item('FifoQueue')) + assert_that(attributes, has_item('ContentBasedDeduplication')) + else: + assert_that(attributes, is_not(has_item('FifoQueue'))) + assert_that(attributes, is_not(has_item('ContentBasedDeduplication'))) + + assert_that(attributes, has_items( + 'ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesDelayed', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'DelaySeconds', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout', + )) + assert_that(attributes['ReceiveMessageWaitTimeSeconds'], equal_to('0')) + if is_fifo: assert_that(attributes['ContentBasedDeduplication'], equal_to('false')) - - self._sqs_api.set_queue_attributes(queue_url, {'ReceiveMessageWaitTimeSeconds': '10', 'MaximumMessageSize': '111111'}) - attributes = self._sqs_api.get_queue_attributes(queue_url) - assert_that(attributes['ReceiveMessageWaitTimeSeconds'], equal_to('10')) - assert_that(attributes['MaximumMessageSize'], equal_to('111111')) - - if is_fifo: - self._sqs_api.set_queue_attributes(queue_url, {'ContentBasedDeduplication': 'true'}) - attributes = self._sqs_api.get_queue_attributes(queue_url) + + self._sqs_api.set_queue_attributes(queue_url, {'ReceiveMessageWaitTimeSeconds': '10', 'MaximumMessageSize': '111111'}) + attributes = self._sqs_api.get_queue_attributes(queue_url) + assert_that(attributes['ReceiveMessageWaitTimeSeconds'], equal_to('10')) + assert_that(attributes['MaximumMessageSize'], equal_to('111111')) + + if is_fifo: + self._sqs_api.set_queue_attributes(queue_url, {'ContentBasedDeduplication': 'true'}) + attributes = self._sqs_api.get_queue_attributes(queue_url) assert_that(attributes['ContentBasedDeduplication'], equal_to('true')) - - def test_set_very_big_visibility_timeout(self): - queue_url = self._create_queue_and_assert(self.queue_name) - - def call_with_very_big_visibility_timeout(): - self._sqs_api.set_queue_attributes(queue_url, {'VisibilityTimeout': '100500'}) - - assert_that( - call_with_very_big_visibility_timeout, + + def test_set_very_big_visibility_timeout(self): + queue_url = self._create_queue_and_assert(self.queue_name) + + def call_with_very_big_visibility_timeout(): + self._sqs_api.set_queue_attributes(queue_url, {'VisibilityTimeout': '100500'}) + + assert_that( + call_with_very_big_visibility_timeout, raises(RuntimeError, pattern='InvalidAttributeValue') - ) - - def test_wrong_attribute_name(self): - queue_url = self._create_queue_and_assert(self.queue_name) - - def call_with_wrong_attribute_name(): - self._sqs_api.get_queue_attributes(queue_url, ['All', 'UnknownAttributeName']) - - assert_that( - call_with_wrong_attribute_name, - raises(RuntimeError, pattern='InvalidAttributeName') - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_get_queue_attributes_only_runtime_attributes(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - attributes = self._sqs_api.get_queue_attributes(queue_url, ['ApproximateNumberOfMessagesDelayed']) - assert_that(int(attributes['ApproximateNumberOfMessagesDelayed']), equal_to(0)) - - self._send_message_and_assert(queue_url, 'test', delay_seconds=900, seq_no='1' if is_fifo else None, group_id='group' if is_fifo else None) - attributes = self._sqs_api.get_queue_attributes(queue_url, ['ApproximateNumberOfMessagesDelayed']) - assert_that(int(attributes['ApproximateNumberOfMessagesDelayed']), equal_to(1)) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_get_queue_attributes_only_attributes_table(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - attributes = self._sqs_api.get_queue_attributes(queue_url, ['MaximumMessageSize']) - assert_that(int(attributes['MaximumMessageSize']), equal_to(256 * 1024)) - - def test_queue_attributes_batch(self): - # Create > 10 queues to check that private commands for UI will work - queue_urls = [self._create_queue_and_assert("{}-{}".format(self.queue_name, i)) for i in range(10)] - created_queue_url2 = self._create_queue_and_assert(self.queue_name + '1.fifo', is_fifo=True) - queue_urls.append(created_queue_url2) + ) + + def test_wrong_attribute_name(self): + queue_url = self._create_queue_and_assert(self.queue_name) + + def call_with_wrong_attribute_name(): + self._sqs_api.get_queue_attributes(queue_url, ['All', 'UnknownAttributeName']) + + assert_that( + call_with_wrong_attribute_name, + raises(RuntimeError, pattern='InvalidAttributeName') + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_get_queue_attributes_only_runtime_attributes(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + attributes = self._sqs_api.get_queue_attributes(queue_url, ['ApproximateNumberOfMessagesDelayed']) + assert_that(int(attributes['ApproximateNumberOfMessagesDelayed']), equal_to(0)) + + self._send_message_and_assert(queue_url, 'test', delay_seconds=900, seq_no='1' if is_fifo else None, group_id='group' if is_fifo else None) + attributes = self._sqs_api.get_queue_attributes(queue_url, ['ApproximateNumberOfMessagesDelayed']) + assert_that(int(attributes['ApproximateNumberOfMessagesDelayed']), equal_to(1)) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_get_queue_attributes_only_attributes_table(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + attributes = self._sqs_api.get_queue_attributes(queue_url, ['MaximumMessageSize']) + assert_that(int(attributes['MaximumMessageSize']), equal_to(256 * 1024)) + + def test_queue_attributes_batch(self): + # Create > 10 queues to check that private commands for UI will work + queue_urls = [self._create_queue_and_assert("{}-{}".format(self.queue_name, i)) for i in range(10)] + created_queue_url2 = self._create_queue_and_assert(self.queue_name + '1.fifo', is_fifo=True) + queue_urls.append(created_queue_url2) queue_urls.append(created_queue_url2 + to_bytes('_nonexistent_queue_url')) - batch_result = self._sqs_api.private_get_queue_attributes_batch(queue_urls) - assert_that( - batch_result['GetQueueAttributesBatchResultEntry'], instance_of(list) - ) - assert_that( - len(batch_result['GetQueueAttributesBatchResultEntry']), equal_to(11) - ) - assert_that( - batch_result['BatchResultErrorEntry'], instance_of(OrderedDict) # that means that we have only one entry for error - ) - - for entry in batch_result['GetQueueAttributesBatchResultEntry']: - assert_that( - entry['__AttributesDict'], - has_items( - 'ReceiveMessageWaitTimeSeconds', - 'VisibilityTimeout', - 'ApproximateNumberOfMessages', - 'ApproximateNumberOfMessagesNotVisible', - 'CreatedTimestamp', - 'MaximumMessageSize', - 'MessageRetentionPeriod', - ) - ) - - def test_create_queue_by_nonexistent_user_fails(self): - api = self._create_api_for_user('unknown_user') - try: - api.create_queue('known_queue_name') - assert False, 'Exception is expected' - except RuntimeError as ex: - # Check that exception pattern will not give us any internal information - assert_that(str(ex).find('.cpp:'), equal_to(-1)) - - def test_list_queues_of_nonexistent_user(self): - api = self._create_api_for_user('unknown_user') - - def call_list_queues(): - api.list_queues() - - assert_that( - call_list_queues, - raises(RuntimeError, pattern='OptInRequired') - ) - - def test_invalid_queue_url(self): - def call_with_invalid_queue_url(): - self._sqs_api.get_queue_attributes('invalid_queue_url') - - assert_that( - call_with_invalid_queue_url, - raises(RuntimeError, pattern='InvalidParameterValue') - ) - - def test_empty_queue_url(self): - def call_with_empty_queue_url(): - self._sqs_api.send_message(queue_url='', message_body='body') - - assert_that( - call_with_empty_queue_url, - raises(RuntimeError, pattern='MissingParameter') - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_delay_one_message(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - self._send_message_and_assert(created_queue_url, 'test_delay_message', delay_seconds=900, seq_no='1' if is_fifo else None, group_id='group' if is_fifo else None) - - self._read_messages_and_assert( - created_queue_url, 10, ReadResponseMatcher().with_n_messages(0) - ) - - message_id = self._send_message_and_assert(created_queue_url, 'test_delay_message_2', delay_seconds=2, seq_no='2' if is_fifo else None, group_id='group_2' if is_fifo else None) - time.sleep(3) - self._read_messages_and_assert( - created_queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids([message_id]) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_delay_message_batch(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - - def get_group_id(i): - group_id = 'group_{}'.format(i) if is_fifo else None - return group_id - - batch_request = [ - SqsSendMessageParams('body 1', group_id=get_group_id(1), deduplication_id='1' if is_fifo else None), - SqsSendMessageParams('body 2', delay_seconds=900, group_id=get_group_id(2), deduplication_id='2' if is_fifo else None), - SqsSendMessageParams('body 3', delay_seconds=2, group_id=get_group_id(3), deduplication_id='3' if is_fifo else None), - ] - send_message_batch_result = self._sqs_api.send_message_batch(created_queue_url, batch_request) - assert_that(len(send_message_batch_result), equal_to(3)) - - assert_that(send_message_batch_result[0], has_item('SendMessageBatchResultEntry')) - assert_that(send_message_batch_result[1], has_item('SendMessageBatchResultEntry')) - assert_that(send_message_batch_result[2], has_item('SendMessageBatchResultEntry')) - - time.sleep(3) - - self._read_messages_and_assert( - created_queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids([ - send_message_batch_result[0]['SendMessageBatchResultEntry']['MessageId'], - send_message_batch_result[2]['SendMessageBatchResultEntry']['MessageId'], - ]) - ) - - def test_validates_message_body(self): - created_queue_url = self._create_queue_and_assert(self.queue_name) - - def call_send(): - self._sqs_api.send_message( - created_queue_url, 'invalid body: \x02' - ) - - assert_that( - call_send, - raises( - RuntimeError, - pattern='InvalidParameterValue' - ) - ) - - def test_validates_message_attribute_value(self): - created_queue_url = self._create_queue_and_assert(self.queue_name) - - def call_send(): - self._sqs_api.send_message( - created_queue_url, 'body', attributes=[SqsMessageAttribute('invalid', 'String', 'invaid value: \x1F')] - ) - - assert_that( - call_send, - raises( - RuntimeError, - pattern='InvalidParameterValue' - ) - ) - - -class TestYandexAttributesPrefix(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestYandexAttributesPrefix, cls)._setup_config_generator() + batch_result = self._sqs_api.private_get_queue_attributes_batch(queue_urls) + assert_that( + batch_result['GetQueueAttributesBatchResultEntry'], instance_of(list) + ) + assert_that( + len(batch_result['GetQueueAttributesBatchResultEntry']), equal_to(11) + ) + assert_that( + batch_result['BatchResultErrorEntry'], instance_of(OrderedDict) # that means that we have only one entry for error + ) + + for entry in batch_result['GetQueueAttributesBatchResultEntry']: + assert_that( + entry['__AttributesDict'], + has_items( + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout', + 'ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + ) + ) + + def test_create_queue_by_nonexistent_user_fails(self): + api = self._create_api_for_user('unknown_user') + try: + api.create_queue('known_queue_name') + assert False, 'Exception is expected' + except RuntimeError as ex: + # Check that exception pattern will not give us any internal information + assert_that(str(ex).find('.cpp:'), equal_to(-1)) + + def test_list_queues_of_nonexistent_user(self): + api = self._create_api_for_user('unknown_user') + + def call_list_queues(): + api.list_queues() + + assert_that( + call_list_queues, + raises(RuntimeError, pattern='OptInRequired') + ) + + def test_invalid_queue_url(self): + def call_with_invalid_queue_url(): + self._sqs_api.get_queue_attributes('invalid_queue_url') + + assert_that( + call_with_invalid_queue_url, + raises(RuntimeError, pattern='InvalidParameterValue') + ) + + def test_empty_queue_url(self): + def call_with_empty_queue_url(): + self._sqs_api.send_message(queue_url='', message_body='body') + + assert_that( + call_with_empty_queue_url, + raises(RuntimeError, pattern='MissingParameter') + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_delay_one_message(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + self._send_message_and_assert(created_queue_url, 'test_delay_message', delay_seconds=900, seq_no='1' if is_fifo else None, group_id='group' if is_fifo else None) + + self._read_messages_and_assert( + created_queue_url, 10, ReadResponseMatcher().with_n_messages(0) + ) + + message_id = self._send_message_and_assert(created_queue_url, 'test_delay_message_2', delay_seconds=2, seq_no='2' if is_fifo else None, group_id='group_2' if is_fifo else None) + time.sleep(3) + self._read_messages_and_assert( + created_queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids([message_id]) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_delay_message_batch(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + + def get_group_id(i): + group_id = 'group_{}'.format(i) if is_fifo else None + return group_id + + batch_request = [ + SqsSendMessageParams('body 1', group_id=get_group_id(1), deduplication_id='1' if is_fifo else None), + SqsSendMessageParams('body 2', delay_seconds=900, group_id=get_group_id(2), deduplication_id='2' if is_fifo else None), + SqsSendMessageParams('body 3', delay_seconds=2, group_id=get_group_id(3), deduplication_id='3' if is_fifo else None), + ] + send_message_batch_result = self._sqs_api.send_message_batch(created_queue_url, batch_request) + assert_that(len(send_message_batch_result), equal_to(3)) + + assert_that(send_message_batch_result[0], has_item('SendMessageBatchResultEntry')) + assert_that(send_message_batch_result[1], has_item('SendMessageBatchResultEntry')) + assert_that(send_message_batch_result[2], has_item('SendMessageBatchResultEntry')) + + time.sleep(3) + + self._read_messages_and_assert( + created_queue_url, 10, visibility_timeout=1000, matcher=ReadResponseMatcher().with_message_ids([ + send_message_batch_result[0]['SendMessageBatchResultEntry']['MessageId'], + send_message_batch_result[2]['SendMessageBatchResultEntry']['MessageId'], + ]) + ) + + def test_validates_message_body(self): + created_queue_url = self._create_queue_and_assert(self.queue_name) + + def call_send(): + self._sqs_api.send_message( + created_queue_url, 'invalid body: \x02' + ) + + assert_that( + call_send, + raises( + RuntimeError, + pattern='InvalidParameterValue' + ) + ) + + def test_validates_message_attribute_value(self): + created_queue_url = self._create_queue_and_assert(self.queue_name) + + def call_send(): + self._sqs_api.send_message( + created_queue_url, 'body', attributes=[SqsMessageAttribute('invalid', 'String', 'invaid value: \x1F')] + ) + + assert_that( + call_send, + raises( + RuntimeError, + pattern='InvalidParameterValue' + ) + ) + + +class TestYandexAttributesPrefix(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestYandexAttributesPrefix, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['allow_yandex_attribute_prefix'] = True - return config_generator - - def test_allows_yandex_message_attribute_prefix(self): - created_queue_url = self._create_queue_and_assert(self.queue_name) - - self._sqs_api.send_message( - created_queue_url, 'msg_body', attributes={ - SqsMessageAttribute('Ya.attr', 'String', 'a'), - SqsMessageAttribute('YC.attr', 'String', 'b'), - SqsMessageAttribute('Yandex.attr', 'String', 'c'), - } - ) - - -class TestSqsGenericMessagingWithTenant(get_test_with_sqs_tenant_installation(SqsGenericMessagingTest)): - pass - - -class TestSqsGenericMessagingWithPath(get_test_with_sqs_installation_by_path(SqsGenericMessagingTest)): - pass + return config_generator + + def test_allows_yandex_message_attribute_prefix(self): + created_queue_url = self._create_queue_and_assert(self.queue_name) + + self._sqs_api.send_message( + created_queue_url, 'msg_body', attributes={ + SqsMessageAttribute('Ya.attr', 'String', 'a'), + SqsMessageAttribute('YC.attr', 'String', 'b'), + SqsMessageAttribute('Yandex.attr', 'String', 'c'), + } + ) + + +class TestSqsGenericMessagingWithTenant(get_test_with_sqs_tenant_installation(SqsGenericMessagingTest)): + pass + + +class TestSqsGenericMessagingWithPath(get_test_with_sqs_installation_by_path(SqsGenericMessagingTest)): + pass diff --git a/ydb/tests/functional/sqs/test_multinode_cluster.py b/ydb/tests/functional/sqs/test_multinode_cluster.py index 047ae56198d..e7037a516a5 100644 --- a/ydb/tests/functional/sqs/test_multinode_cluster.py +++ b/ydb/tests/functional/sqs/test_multinode_cluster.py @@ -2,205 +2,205 @@ # -*- coding: utf-8 -*- import logging import time -import threading +import threading import pytest -from hamcrest import assert_that, equal_to, not_none, raises, not_ +from hamcrest import assert_that, equal_to, not_none, raises, not_ from ydb.tests.library.common.types import Erasure -from sqs_matchers import ReadResponseMatcher +from sqs_matchers import ReadResponseMatcher + +from sqs_test_base import KikimrSqsTestBase, STOP_NODE_PARAMS, IS_FIFO_PARAMS -from sqs_test_base import KikimrSqsTestBase, STOP_NODE_PARAMS, IS_FIFO_PARAMS - -class TestSqsMultinodeCluster(KikimrSqsTestBase): - erasure = Erasure.BLOCK_4_2 +class TestSqsMultinodeCluster(KikimrSqsTestBase): + erasure = Erasure.BLOCK_4_2 use_in_memory_pdisks = False - - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsMultinodeCluster, cls)._setup_config_generator() + + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsMultinodeCluster, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['masters_describer_update_time_ms'] = 1000 config_generator.yaml_config['sqs_config']['background_metrics_update_time_ms'] = 1000 - return config_generator - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_sqs_writes_through_proxy_on_each_node(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - message_ids = [] + return config_generator + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_sqs_writes_through_proxy_on_each_node(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + message_ids = [] for i in range(self.cluster_nodes_count * 3): - if is_fifo: - group_id = str(i) - seq_no = self.seq_no - self.seq_no += 1 - else: - group_id = None - seq_no = None - if i: + if is_fifo: + group_id = str(i) + seq_no = self.seq_no + self.seq_no += 1 + else: + group_id = None + seq_no = None + if i: time.sleep(0.3) # sleep such a time that node caches master info in the second request, but doesn't do so in the third one - node_index = i % self.cluster_nodes_count + node_index = i % self.cluster_nodes_count result = self._sqs_apis[node_index].send_message(self.queue_url, self._msg_body_template.format(next(self.counter)), deduplication_id=seq_no, group_id=group_id) - assert_that( - result, not_none() - ) - logging.info('Message with id {} is sent to queue through proxy {}:{}'.format(result, self.server_fqdn, self.sqs_ports[node_index])) - message_ids.append(result) - - self._read_messages_and_assert( - self.queue_url, 50, + assert_that( + result, not_none() + ) + logging.info('Message with id {} is sent to queue through proxy {}:{}'.format(result, self.server_fqdn, self.sqs_ports[node_index])) + message_ids.append(result) + + self._read_messages_and_assert( + self.queue_url, 50, ReadResponseMatcher().with_message_ids(message_ids), - wait_timeout=1, + wait_timeout=1, visibility_timeout=1000 - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - @pytest.mark.parametrize(**STOP_NODE_PARAMS) - def test_has_messages_counters(self, is_fifo, stop_node): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - node_index = self._get_queue_master_node_index() - logging.debug('Master node for queue "{}" is {}'.format(self.queue_name, node_index)) - - # send one message - if is_fifo: - group_id = "1" - seq_no = self.seq_no - self.seq_no += 1 - else: - group_id = None - seq_no = None + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + @pytest.mark.parametrize(**STOP_NODE_PARAMS) + def test_has_messages_counters(self, is_fifo, stop_node): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + node_index = self._get_queue_master_node_index() + logging.debug('Master node for queue "{}" is {}'.format(self.queue_name, node_index)) + + # send one message + if is_fifo: + group_id = "1" + seq_no = self.seq_no + self.seq_no += 1 + else: + group_id = None + seq_no = None message_id = self._sqs_api.send_message(self.queue_url, self._msg_body_template.format(next(self.counter)), deduplication_id=seq_no, group_id=group_id) - assert_that( - message_id, not_none() - ) - - def get_messages_counters(node_index): - counters = self._get_sqs_counters(node_index) - - labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - } - - labels['sensor'] = 'MessagesCount' - messages = self._get_counter(counters, labels) - - labels['sensor'] = 'InflyMessagesCount' - infly_messages = self._get_counter(counters, labels) - - labels['sensor'] = 'OldestMessageAgeSeconds' - age = self._get_counter(counters, labels) - - return (messages, infly_messages, age) - - def check_master_node_counters(node_index): - attempts = 100 - while attempts: - attempts -= 1 - messages, infly_messages, age = get_messages_counters(node_index) - - if messages is None or messages['value'] != 1 or not is_fifo and infly_messages is None or not is_fifo and infly_messages['value'] != 0 or age is None or age['value'] == 0: - assert attempts, 'No attempts left to see right counter values for queue' - time.sleep(0.5) - else: - break - - check_master_node_counters(node_index) - - def check_no_message_counters(node_index): - attempts = 100 - while attempts: - attempts -= 1 - messages, infly_messages, age = get_messages_counters(node_index) - - if messages is not None or infly_messages is not None or age is not None: - assert attempts, 'No attempts left to see no counter values for queue' - time.sleep(0.5) - else: - break - - check_no_message_counters(self._other_node(node_index)) - - # remove master from node - if stop_node: - self.cluster.nodes[node_index + 1].stop() # nodes indices are one-based - logging.debug('Killed node {}'.format(node_index + 1)) - else: - self._kick_tablets_from_node(node_index) - - new_node_index = self._get_queue_master_node_index() - logging.debug('Previous master node: {}. New master node: {}'.format(node_index, new_node_index)) - assert_that(new_node_index, not_(equal_to(node_index))) - - if not stop_node: - check_no_message_counters(node_index) - - logging.debug('New master node for queue "{}" is {}'.format(self.queue_name, new_node_index)) - check_master_node_counters(new_node_index) - - @pytest.mark.parametrize(**STOP_NODE_PARAMS) - def test_reassign_master(self, stop_node): - self._create_queue_and_assert(self.queue_name) - node_index = self._get_queue_master_node_index() - proxy_node_index = self._other_node(node_index) - assert_that(proxy_node_index, not_(equal_to(node_index))) + assert_that( + message_id, not_none() + ) + + def get_messages_counters(node_index): + counters = self._get_sqs_counters(node_index) + + labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + } + + labels['sensor'] = 'MessagesCount' + messages = self._get_counter(counters, labels) + + labels['sensor'] = 'InflyMessagesCount' + infly_messages = self._get_counter(counters, labels) + + labels['sensor'] = 'OldestMessageAgeSeconds' + age = self._get_counter(counters, labels) + + return (messages, infly_messages, age) + + def check_master_node_counters(node_index): + attempts = 100 + while attempts: + attempts -= 1 + messages, infly_messages, age = get_messages_counters(node_index) + + if messages is None or messages['value'] != 1 or not is_fifo and infly_messages is None or not is_fifo and infly_messages['value'] != 0 or age is None or age['value'] == 0: + assert attempts, 'No attempts left to see right counter values for queue' + time.sleep(0.5) + else: + break + + check_master_node_counters(node_index) + + def check_no_message_counters(node_index): + attempts = 100 + while attempts: + attempts -= 1 + messages, infly_messages, age = get_messages_counters(node_index) + + if messages is not None or infly_messages is not None or age is not None: + assert attempts, 'No attempts left to see no counter values for queue' + time.sleep(0.5) + else: + break + + check_no_message_counters(self._other_node(node_index)) + + # remove master from node + if stop_node: + self.cluster.nodes[node_index + 1].stop() # nodes indices are one-based + logging.debug('Killed node {}'.format(node_index + 1)) + else: + self._kick_tablets_from_node(node_index) + + new_node_index = self._get_queue_master_node_index() + logging.debug('Previous master node: {}. New master node: {}'.format(node_index, new_node_index)) + assert_that(new_node_index, not_(equal_to(node_index))) + + if not stop_node: + check_no_message_counters(node_index) + + logging.debug('New master node for queue "{}" is {}'.format(self.queue_name, new_node_index)) + check_master_node_counters(new_node_index) + + @pytest.mark.parametrize(**STOP_NODE_PARAMS) + def test_reassign_master(self, stop_node): + self._create_queue_and_assert(self.queue_name) + node_index = self._get_queue_master_node_index() + proxy_node_index = self._other_node(node_index) + assert_that(proxy_node_index, not_(equal_to(node_index))) result = self._sqs_apis[proxy_node_index].send_message(self.queue_url, self._msg_body_template.format(next(self.counter))) - assert_that( - result, not_none() - ) - - # remove master from node - if stop_node: - self.cluster.nodes[node_index + 1].stop() # nodes indices are one-based - logging.debug('Killed node {}'.format(node_index + 1)) - else: - self._kick_tablets_from_node(node_index) - - retries = 50 - while retries: - retries -= 1 - time.sleep(0.5) - try: + assert_that( + result, not_none() + ) + + # remove master from node + if stop_node: + self.cluster.nodes[node_index + 1].stop() # nodes indices are one-based + logging.debug('Killed node {}'.format(node_index + 1)) + else: + self._kick_tablets_from_node(node_index) + + retries = 50 + while retries: + retries -= 1 + time.sleep(0.5) + try: result = self._sqs_apis[proxy_node_index].send_message(self.queue_url, self._msg_body_template.format(next(self.counter))) - assert_that( - result, not_none() - ) - break - except RuntimeError: - continue - - def _run_receive_message(self): - logging.debug('_run_receive_message started') - node_index = self._get_queue_master_node_index() - proxy_node_index = self._other_node(node_index) - - def call_receive(): - self._sqs_apis[proxy_node_index].receive_message( - self.queue_url, max_number_of_messages=10, - visibility_timeout=1000, wait_timeout=20 - ) - - assert_that( - call_receive, + assert_that( + result, not_none() + ) + break + except RuntimeError: + continue + + def _run_receive_message(self): + logging.debug('_run_receive_message started') + node_index = self._get_queue_master_node_index() + proxy_node_index = self._other_node(node_index) + + def call_receive(): + self._sqs_apis[proxy_node_index].receive_message( + self.queue_url, max_number_of_messages=10, + visibility_timeout=1000, wait_timeout=20 + ) + + assert_that( + call_receive, raises(RuntimeError, pattern='failed with status 50.*\n.*Queue leader session error.') - ) - logging.debug('_run_receive_message finished') - self.receive_message_finished = True - - def test_ends_request_after_kill(self): - self._create_queue_and_assert(self.queue_name) - node_index = self._get_queue_master_node_index() - self.receive_message_finished = False - thread = threading.Thread(target=self._run_receive_message) - thread.start() + ) + logging.debug('_run_receive_message finished') + self.receive_message_finished = True + + def test_ends_request_after_kill(self): + self._create_queue_and_assert(self.queue_name) + node_index = self._get_queue_master_node_index() + self.receive_message_finished = False + thread = threading.Thread(target=self._run_receive_message) + thread.start() time.sleep(3) - self.cluster.nodes[node_index + 1].stop() # nodes indices are one-based - thread.join() + self.cluster.nodes[node_index + 1].stop() # nodes indices are one-based + thread.join() assert_that( self.receive_message_finished, equal_to( diff --git a/ydb/tests/functional/sqs/test_ping.py b/ydb/tests/functional/sqs/test_ping.py index 7ee604ce561..a832686d3ac 100644 --- a/ydb/tests/functional/sqs/test_ping.py +++ b/ydb/tests/functional/sqs/test_ping.py @@ -1,32 +1,32 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import requests - -from hamcrest import assert_that, equal_to - -from sqs_test_base import KikimrSqsTestBase - - -class TestPing(KikimrSqsTestBase): - def test_ping(self): - result = requests.get('http://localhost:{}/private/ping'.format(self.sqs_port)) - assert_that(result.status_code, equal_to(200)) - assert_that(result.text, equal_to('pong')) - - result = requests.get('http://localhost:{}/private/ping/'.format(self.sqs_port)) - assert_that(result.status_code, equal_to(200)) - assert_that(result.text, equal_to('pong')) - - def test_error_on_cgi_parameters(self): - result = requests.get('http://localhost:{}/private/ping?cgi=1'.format(self.sqs_port)) - assert_that(result.status_code, equal_to(400)) - - def test_error_on_non_ping_path(self): - result = requests.get('http://localhost:{}/private/ping/a'.format(self.sqs_port)) - assert_that(result.status_code, equal_to(400)) - - result = requests.get('http://localhost:{}/private/a/ping'.format(self.sqs_port)) - assert_that(result.status_code, equal_to(400)) - - result = requests.get('http://localhost:{}/a/private/ping'.format(self.sqs_port)) - assert_that(result.status_code, equal_to(400)) +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import requests + +from hamcrest import assert_that, equal_to + +from sqs_test_base import KikimrSqsTestBase + + +class TestPing(KikimrSqsTestBase): + def test_ping(self): + result = requests.get('http://localhost:{}/private/ping'.format(self.sqs_port)) + assert_that(result.status_code, equal_to(200)) + assert_that(result.text, equal_to('pong')) + + result = requests.get('http://localhost:{}/private/ping/'.format(self.sqs_port)) + assert_that(result.status_code, equal_to(200)) + assert_that(result.text, equal_to('pong')) + + def test_error_on_cgi_parameters(self): + result = requests.get('http://localhost:{}/private/ping?cgi=1'.format(self.sqs_port)) + assert_that(result.status_code, equal_to(400)) + + def test_error_on_non_ping_path(self): + result = requests.get('http://localhost:{}/private/ping/a'.format(self.sqs_port)) + assert_that(result.status_code, equal_to(400)) + + result = requests.get('http://localhost:{}/private/a/ping'.format(self.sqs_port)) + assert_that(result.status_code, equal_to(400)) + + result = requests.get('http://localhost:{}/a/private/ping'.format(self.sqs_port)) + assert_that(result.status_code, equal_to(400)) diff --git a/ydb/tests/functional/sqs/test_polling.py b/ydb/tests/functional/sqs/test_polling.py index dba0556e7d4..53d0397490d 100644 --- a/ydb/tests/functional/sqs/test_polling.py +++ b/ydb/tests/functional/sqs/test_polling.py @@ -1,13 +1,13 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest -from hamcrest import assert_that, equal_to - -from sqs_matchers import ReadResponseMatcher - -from sqs_test_base import KikimrSqsTestBase, POLLING_PARAMS, IS_FIFO_PARAMS +from hamcrest import assert_that, equal_to +from sqs_matchers import ReadResponseMatcher +from sqs_test_base import KikimrSqsTestBase, POLLING_PARAMS, IS_FIFO_PARAMS + + class TestSqsPolling(KikimrSqsTestBase): @classmethod def _setup_config_generator(cls): @@ -28,7 +28,7 @@ class TestSqsPolling(KikimrSqsTestBase): read_result = self._read_messages_and_assert( created_queue_url, messages_count=1, visibility_timeout=1000, wait_timeout=polling_wait_timeout, - matcher=ReadResponseMatcher().with_n_messages(1) + matcher=ReadResponseMatcher().with_n_messages(1) ) assert_that(read_result[0]['Body'], equal_to('test_send_message')) diff --git a/ydb/tests/functional/sqs/test_queue_attributes_validation.py b/ydb/tests/functional/sqs/test_queue_attributes_validation.py index 27ec79c9ee7..5e4102f801d 100644 --- a/ydb/tests/functional/sqs/test_queue_attributes_validation.py +++ b/ydb/tests/functional/sqs/test_queue_attributes_validation.py @@ -1,11 +1,11 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest -from hamcrest import assert_that, equal_to - -from sqs_test_base import KikimrSqsTestBase, IS_FIFO_PARAMS +from hamcrest import assert_that, equal_to +from sqs_test_base import KikimrSqsTestBase, IS_FIFO_PARAMS + class TestQueueAttributesInCompatibilityMode(KikimrSqsTestBase): @classmethod def _setup_config_generator(cls): @@ -28,7 +28,7 @@ class TestQueueAttributesInCompatibilityMode(KikimrSqsTestBase): # previously set attributes should be right there assert_that(self._sqs_api.get_queue_attributes(queue_url)['MaximumMessageSize'], equal_to('1024')) try: - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True, attributes={'MaximumMessageSize': 'troll'}, retries=1) + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True, attributes={'MaximumMessageSize': 'troll'}, retries=1) except: pass else: @@ -167,7 +167,7 @@ class TestQueueAttributesValidation(KikimrSqsTestBase): assert_that(attributes[attr], equal_to(custom_attributes[attr])) # get arn by default - assert_that(attributes['QueueArn'], equal_to('yrn:ya:sqs:ru-central1:' + self._username + ':' + self.queue_name)) + assert_that(attributes['QueueArn'], equal_to('yrn:ya:sqs:ru-central1:' + self._username + ':' + self.queue_name)) # okay, now we'll try to break it for attr in custom_attributes: diff --git a/ydb/tests/functional/sqs/test_queues_managing.py b/ydb/tests/functional/sqs/test_queues_managing.py index bcfd91ca677..5f5f3a27796 100644 --- a/ydb/tests/functional/sqs/test_queues_managing.py +++ b/ydb/tests/functional/sqs/test_queues_managing.py @@ -1,37 +1,37 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import time -from collections import OrderedDict +from collections import OrderedDict import pytest from hamcrest import assert_that, equal_to, greater_than, not_none, none, has_item, has_items, raises, empty, instance_of -from sqs_matchers import ReadResponseMatcher +from sqs_matchers import ReadResponseMatcher -from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, IS_FIFO_PARAMS +from sqs_test_base import KikimrSqsTestBase, get_test_with_sqs_installation_by_path, get_test_with_sqs_tenant_installation, IS_FIFO_PARAMS from sqs_test_base import to_bytes from ydb import issues as ydb_issues - - -class QueuesManagingTest(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(QueuesManagingTest, cls)._setup_config_generator() + + +class QueuesManagingTest(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(QueuesManagingTest, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['account_settings_defaults'] = {'max_queues_count': 10} - return config_generator - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) + return config_generator + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) def test_create_queue(self, is_fifo): - attributes = {} + attributes = {} if is_fifo: self.queue_name = self.queue_name + '.fifo' - attributes['ContentBasedDeduplication'] = 'true' - attributes['DelaySeconds'] = '506' - attributes['MaximumMessageSize'] = '10003' - attributes['MessageRetentionPeriod'] = '502000' - attributes['ReceiveMessageWaitTimeSeconds'] = '11' - attributes['VisibilityTimeout'] = '42' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True) + attributes['ContentBasedDeduplication'] = 'true' + attributes['DelaySeconds'] = '506' + attributes['MaximumMessageSize'] = '10003' + attributes['MessageRetentionPeriod'] = '502000' + attributes['ReceiveMessageWaitTimeSeconds'] = '11' + attributes['VisibilityTimeout'] = '42' + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True) existing_queues = self._sqs_api.list_queues() assert_that( created_queue_url in existing_queues @@ -41,25 +41,25 @@ class QueuesManagingTest(KikimrSqsTestBase): got_queue_url, equal_to(created_queue_url) ) - created_attributes = self._sqs_api.get_queue_attributes(got_queue_url) - assert_that(equal_to(created_attributes.get('DelaySeconds')), attributes['DelaySeconds']) - assert_that(equal_to(created_attributes.get('MaximumMessageSize')), attributes['MaximumMessageSize']) - assert_that(equal_to(created_attributes.get('MessageRetentionPeriod')), attributes['MessageRetentionPeriod']) - assert_that(equal_to(created_attributes.get('ReceiveMessageWaitTimeSeconds')), attributes['ReceiveMessageWaitTimeSeconds']) - assert_that(equal_to(created_attributes.get('VisibilityTimeout')), attributes['VisibilityTimeout']) - if is_fifo: - assert_that(created_attributes.get('ContentBasedDeduplication'), 'true') - + created_attributes = self._sqs_api.get_queue_attributes(got_queue_url) + assert_that(equal_to(created_attributes.get('DelaySeconds')), attributes['DelaySeconds']) + assert_that(equal_to(created_attributes.get('MaximumMessageSize')), attributes['MaximumMessageSize']) + assert_that(equal_to(created_attributes.get('MessageRetentionPeriod')), attributes['MessageRetentionPeriod']) + assert_that(equal_to(created_attributes.get('ReceiveMessageWaitTimeSeconds')), attributes['ReceiveMessageWaitTimeSeconds']) + assert_that(equal_to(created_attributes.get('VisibilityTimeout')), attributes['VisibilityTimeout']) + if is_fifo: + assert_that(created_attributes.get('ContentBasedDeduplication'), 'true') + def test_create_fifo_queue_wo_postfix(self): - def call_create(): - self.called = True - self._sqs_api.create_queue(self.queue_name, is_fifo=True) - + def call_create(): + self.called = True + self._sqs_api.create_queue(self.queue_name, is_fifo=True) + assert_that( - call_create, + call_create, raises( RuntimeError, - pattern='failed with status 400.*\n.*FIFO queue should end with "\\.fifo"' + pattern='failed with status 400.*\n.*FIFO queue should end with "\\.fifo"' ) ) @@ -82,36 +82,36 @@ class QueuesManagingTest(KikimrSqsTestBase): self._sqs_api.delete_queue(queue_url) assert_that(self._get_table_lines_count(table_path), greater_than(lines_count)) - def test_create_queue_with_invalid_name(self): - def call_create(): - self._sqs_api.create_queue('invalid_queue_name!') - - assert_that( - call_create, - raises( - RuntimeError, - pattern='Invalid queue name' - ) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_create_queue_with_invalid_name(self): + def call_create(): + self._sqs_api.create_queue('invalid_queue_name!') + + assert_that( + call_create, + raises( + RuntimeError, + pattern='Invalid queue name' + ) + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) def test_delete_queue(self, is_fifo): if is_fifo: self.queue_name = self.queue_name + '.fifo' created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) self._sqs_api.list_queues() - self._sqs_api.send_message(created_queue_url, 'body', group_id='group' if is_fifo else None, deduplication_id='123' if is_fifo else None) - - send_message_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'SendMessage_Count', - } - counters = self._get_sqs_counters() - sends = self._get_counter_value(counters, send_message_labels) - assert_that(sends, equal_to(1)) - + self._sqs_api.send_message(created_queue_url, 'body', group_id='group' if is_fifo else None, deduplication_id='123' if is_fifo else None) + + send_message_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'SendMessage_Count', + } + counters = self._get_sqs_counters() + sends = self._get_counter_value(counters, send_message_labels) + assert_that(sends, equal_to(1)) + delete_result = self._sqs_api.delete_queue(created_queue_url) assert_that( delete_result, not_none() @@ -123,218 +123,218 @@ class QueuesManagingTest(KikimrSqsTestBase): "Deleted queue appears in list_queues()" ) - time.sleep(2) - counters = self._get_sqs_counters() - sends = self._get_counter_value(counters, send_message_labels) - assert_that(sends, none()) - - def describe_queue_path(): + time.sleep(2) + counters = self._get_sqs_counters() + sends = self._get_counter_value(counters, send_message_labels) + assert_that(sends, none()) + + def describe_queue_path(): self._driver.scheme_client.describe_path('{}/{}/{}'.format(self.sqs_root, self._username, self.queue_name)) - - assert_that( - describe_queue_path, - raises( - ydb_issues.SchemeError - ) - ) - - def test_delete_queue_batch(self): - existing_queues = self._sqs_api.list_queues() - assert_that( - existing_queues, empty() - ) - url1 = self._create_queue_and_assert('{}_1.fifo'.format(self.queue_name), is_fifo=True) - url2 = self._create_queue_and_assert('{}_2'.format(self.queue_name), is_fifo=False) - url3 = self._create_queue_and_assert('{}_3'.format(self.queue_name), is_fifo=False) + + assert_that( + describe_queue_path, + raises( + ydb_issues.SchemeError + ) + ) + + def test_delete_queue_batch(self): + existing_queues = self._sqs_api.list_queues() + assert_that( + existing_queues, empty() + ) + url1 = self._create_queue_and_assert('{}_1.fifo'.format(self.queue_name), is_fifo=True) + url2 = self._create_queue_and_assert('{}_2'.format(self.queue_name), is_fifo=False) + url3 = self._create_queue_and_assert('{}_3'.format(self.queue_name), is_fifo=False) url4 = to_bytes(url2) + to_bytes('_incorrect_url') existing_queues = [to_bytes(y) for y in self._sqs_api.list_queues()] - assert_that( - len(existing_queues), equal_to(3) - ) - assert_that( + assert_that( + len(existing_queues), equal_to(3) + ) + assert_that( existing_queues, has_items(to_bytes(url1), to_bytes(url2)) - ) - - delete_queue_batch_result = self._sqs_api.private_delete_queue_batch([url1, url2, url4]) - assert_that( - delete_queue_batch_result['DeleteQueueBatchResultEntry'], instance_of(list) - ) - assert_that( - len(delete_queue_batch_result['DeleteQueueBatchResultEntry']), equal_to(2) - ) - assert_that( - delete_queue_batch_result['BatchResultErrorEntry'], instance_of(OrderedDict) # that means that we have only one entry for error - ) - + ) + + delete_queue_batch_result = self._sqs_api.private_delete_queue_batch([url1, url2, url4]) + assert_that( + delete_queue_batch_result['DeleteQueueBatchResultEntry'], instance_of(list) + ) + assert_that( + len(delete_queue_batch_result['DeleteQueueBatchResultEntry']), equal_to(2) + ) + assert_that( + delete_queue_batch_result['BatchResultErrorEntry'], instance_of(OrderedDict) # that means that we have only one entry for error + ) + existing_queues = [to_bytes(y) for y in self._sqs_api.list_queues()] - assert_that( - len(existing_queues), equal_to(1) - ) - assert_that( + assert_that( + len(existing_queues), equal_to(1) + ) + assert_that( existing_queues, has_item(to_bytes(url3)) - ) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_purge_queue(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - if is_fifo: - group_id = 'group' - else: - group_id = None - self._send_messages(created_queue_url, 10, self._msg_body_template, is_fifo=is_fifo, group_id=group_id) - self._sqs_api.purge_queue(created_queue_url) - - tries = 20 - while tries: - tries -= 1 - time.sleep(0.5) - - ret = self._read_while_not_empty(created_queue_url, 10, visibility_timeout=0) - if len(ret) > 0 and tries: - continue - assert_that(len(ret), equal_to(0)) - break - - self._check_queue_tables_are_empty() - - # We can continue working after purge - message_ids = self._send_messages(created_queue_url, 1, self._msg_body_template, is_fifo=is_fifo, group_id=group_id) - self._read_messages_and_assert(created_queue_url, 1, matcher=ReadResponseMatcher().with_message_ids(message_ids)) - - def test_purge_queue_batch(self): - created_queue_url1 = self._create_queue_and_assert(self.queue_name) - created_queue_url2 = self._create_queue_and_assert(self.queue_name + '1') + ) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_purge_queue(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + if is_fifo: + group_id = 'group' + else: + group_id = None + self._send_messages(created_queue_url, 10, self._msg_body_template, is_fifo=is_fifo, group_id=group_id) + self._sqs_api.purge_queue(created_queue_url) + + tries = 20 + while tries: + tries -= 1 + time.sleep(0.5) + + ret = self._read_while_not_empty(created_queue_url, 10, visibility_timeout=0) + if len(ret) > 0 and tries: + continue + assert_that(len(ret), equal_to(0)) + break + + self._check_queue_tables_are_empty() + + # We can continue working after purge + message_ids = self._send_messages(created_queue_url, 1, self._msg_body_template, is_fifo=is_fifo, group_id=group_id) + self._read_messages_and_assert(created_queue_url, 1, matcher=ReadResponseMatcher().with_message_ids(message_ids)) + + def test_purge_queue_batch(self): + created_queue_url1 = self._create_queue_and_assert(self.queue_name) + created_queue_url2 = self._create_queue_and_assert(self.queue_name + '1') created_queue_url3 = to_bytes(created_queue_url2) + to_bytes('_nonexistent_queue_url') - self._send_messages(created_queue_url1, 10, self._msg_body_template) - self._send_messages(created_queue_url2, 10, self._msg_body_template) - purge_queue_batch_result = self._sqs_api.private_purge_queue_batch([created_queue_url1, created_queue_url2, created_queue_url3]) - assert_that( - purge_queue_batch_result['PurgeQueueBatchResultEntry'], instance_of(list) - ) - assert_that( - len(purge_queue_batch_result['PurgeQueueBatchResultEntry']), equal_to(2) - ) - assert_that( - purge_queue_batch_result['BatchResultErrorEntry'], instance_of(OrderedDict) # that means that we have only one entry for error - ) - - def check_purged_queue(queue_url): - tries = 20 - while tries: - tries -= 1 - - ret = self._read_while_not_empty(queue_url, 10, visibility_timeout=0) - if len(ret) > 0 and tries: - time.sleep(0.5) - continue - assert_that(len(ret), equal_to(0)) - break - - self._check_queue_tables_are_empty() - - check_purged_queue(created_queue_url1) - check_purged_queue(created_queue_url2) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) + self._send_messages(created_queue_url1, 10, self._msg_body_template) + self._send_messages(created_queue_url2, 10, self._msg_body_template) + purge_queue_batch_result = self._sqs_api.private_purge_queue_batch([created_queue_url1, created_queue_url2, created_queue_url3]) + assert_that( + purge_queue_batch_result['PurgeQueueBatchResultEntry'], instance_of(list) + ) + assert_that( + len(purge_queue_batch_result['PurgeQueueBatchResultEntry']), equal_to(2) + ) + assert_that( + purge_queue_batch_result['BatchResultErrorEntry'], instance_of(OrderedDict) # that means that we have only one entry for error + ) + + def check_purged_queue(queue_url): + tries = 20 + while tries: + tries -= 1 + + ret = self._read_while_not_empty(queue_url, 10, visibility_timeout=0) + if len(ret) > 0 and tries: + time.sleep(0.5) + continue + assert_that(len(ret), equal_to(0)) + break + + self._check_queue_tables_are_empty() + + check_purged_queue(created_queue_url1) + check_purged_queue(created_queue_url2) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) def test_delete_and_create_queue(self, is_fifo): if is_fifo: self.queue_name = self.queue_name + '.fifo' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True) - self.seq_no += 1 - self._send_message_and_assert(created_queue_url, self._msg_body_template.format(1), seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True) + self.seq_no += 1 + self._send_message_and_assert(created_queue_url, self._msg_body_template.format(1), seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) delete_result = self._sqs_api.delete_queue(created_queue_url) assert_that( delete_result, not_none() ) - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True) - - master_is_updated = False + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo, use_http=True) + + master_is_updated = False for i in range(100): - try: - self._read_messages_and_assert( - created_queue_url, 10, ReadResponseMatcher().with_n_messages(0) - ) - master_is_updated = True - break - except RuntimeError as ex: - assert str(ex).find('master session error') != -1 - time.sleep(0.5) # wait master update time - - assert_that(master_is_updated) - self.seq_no += 1 - msg_id = self._send_message_and_assert(created_queue_url, self._msg_body_template.format(2), seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) + try: + self._read_messages_and_assert( + created_queue_url, 10, ReadResponseMatcher().with_n_messages(0) + ) + master_is_updated = True + break + except RuntimeError as ex: + assert str(ex).find('master session error') != -1 + time.sleep(0.5) # wait master update time + + assert_that(master_is_updated) + self.seq_no += 1 + msg_id = self._send_message_and_assert(created_queue_url, self._msg_body_template.format(2), seq_no=self.seq_no if is_fifo else None, group_id='group' if is_fifo else None) self._read_messages_and_assert( - created_queue_url, 10, ReadResponseMatcher().with_message_ids([msg_id, ]) + created_queue_url, 10, ReadResponseMatcher().with_message_ids([msg_id, ]) ) - + def test_ya_count_queues(self): assert_that(self._sqs_api.private_count_queues(), equal_to('0')) q_url = self._create_queue_and_assert('new_q') self._create_queue_and_assert('new_q_2') - time.sleep(2.1) + time.sleep(2.1) assert_that(self._sqs_api.private_count_queues(), equal_to('2')) self._sqs_api.delete_queue(q_url) - time.sleep(2.1) + time.sleep(2.1) assert_that(self._sqs_api.private_count_queues(), equal_to('1')) - def test_queues_count_over_limit(self): - urls = [] + def test_queues_count_over_limit(self): + urls = [] for i in range(10): - urls.append(self._create_queue_and_assert('queue_{}'.format(i), shards=1, retries=1)) - - def call_create(): - self._sqs_api.create_queue('extra_queue') - - assert_that( - call_create, - raises( - RuntimeError, - pattern='OverLimit' - ) - ) - - def set_max_queues_count(count): + urls.append(self._create_queue_and_assert('queue_{}'.format(i), shards=1, retries=1)) + + def call_create(): + self._sqs_api.create_queue('extra_queue') + + assert_that( + call_create, + raises( + RuntimeError, + pattern='OverLimit' + ) + ) + + def set_max_queues_count(count): self._execute_yql_query('UPSERT INTO `{}/.Settings` (Account, Name, Value) VALUES (\'{}\', \'MaxQueuesCount\', \'{}\')' .format(self.sqs_root, self._username, count)) - - set_max_queues_count(12) - + + set_max_queues_count(12) + for i in range(10, 12): - urls.append(self._create_queue_and_assert('queue_{}'.format(i), shards=1, retries=1)) - - assert_that( - call_create, - raises( - RuntimeError, - pattern='OverLimit' - ) - ) - - self._sqs_api.delete_queue(urls[5]) - self._create_queue_and_assert('new_queue', shards=1, retries=1) - - self._sqs_api.delete_queue(urls[1]) - self._sqs_api.delete_queue(urls[10]) - - set_max_queues_count(10) - - assert_that( - call_create, - raises( - RuntimeError, - pattern='OverLimit' - ) - ) - - -class TestQueuesManagingWithTenant(get_test_with_sqs_tenant_installation(QueuesManagingTest)): - pass - - + urls.append(self._create_queue_and_assert('queue_{}'.format(i), shards=1, retries=1)) + + assert_that( + call_create, + raises( + RuntimeError, + pattern='OverLimit' + ) + ) + + self._sqs_api.delete_queue(urls[5]) + self._create_queue_and_assert('new_queue', shards=1, retries=1) + + self._sqs_api.delete_queue(urls[1]) + self._sqs_api.delete_queue(urls[10]) + + set_max_queues_count(10) + + assert_that( + call_create, + raises( + RuntimeError, + pattern='OverLimit' + ) + ) + + +class TestQueuesManagingWithTenant(get_test_with_sqs_tenant_installation(QueuesManagingTest)): + pass + + class TestQueuesManagingWithPathTestQueuesManagingWithPath(get_test_with_sqs_installation_by_path(QueuesManagingTest)): - pass + pass diff --git a/ydb/tests/functional/sqs/test_quoting.py b/ydb/tests/functional/sqs/test_quoting.py index e35e8f4a54f..8d2f7de4d50 100644 --- a/ydb/tests/functional/sqs/test_quoting.py +++ b/ydb/tests/functional/sqs/test_quoting.py @@ -1,60 +1,60 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import logging -import time - -import pytest -from hamcrest import assert_that, raises, greater_than, contains_string, equal_to, instance_of - +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import logging +import time + +import pytest +from hamcrest import assert_that, raises, greater_than, contains_string, equal_to, instance_of + from sqs_requests_client import SqsSendMessageParams - -from sqs_test_base import KikimrSqsTestBase, IS_FIFO_PARAMS - + +from sqs_test_base import KikimrSqsTestBase, IS_FIFO_PARAMS + from ydb import issues as ydb_issues - - -class TestSqsQuotingWithKesus(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsQuotingWithKesus, cls)._setup_config_generator() + + +class TestSqsQuotingWithKesus(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsQuotingWithKesus, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['quoting_config'] = { 'enable_quoting': True, 'kesus_quoter_config': {'default_limits': {'std_send_message_rate': 1000}} } - return config_generator - - def test_creates_quoter(self): + return config_generator + + def test_creates_quoter(self): quoter_description = self._driver.scheme_client.describe_path('{}/{}/.Quoter'.format(self.sqs_root, self._username)) - assert_that(quoter_description.is_coordination_node()) - - # Check that user is properly deleted - self._sqs_api.delete_user(self._username) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_properly_creates_and_deletes_queue(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - self._sqs_api.delete_queue(created_queue_url) - + assert_that(quoter_description.is_coordination_node()) + + # Check that user is properly deleted + self._sqs_api.delete_user(self._username) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_properly_creates_and_deletes_queue(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + created_queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + self._sqs_api.delete_queue(created_queue_url) + quoter_description = self._driver.scheme_client.describe_path('{}/{}/.Quoter'.format(self.sqs_root, self._username)) - assert_that(quoter_description.is_coordination_node()) - - def describe_queue_path(): + assert_that(quoter_description.is_coordination_node()) + + def describe_queue_path(): self._driver.scheme_client.describe_path('{}/{}/{}'.format(self.sqs_root, self._username, self.queue_name)) - - assert_that( - describe_queue_path, - raises( - ydb_issues.SchemeError - ) - ) - - -class TestSqsQuotingWithLocalRateLimiter(KikimrSqsTestBase): - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsQuotingWithLocalRateLimiter, cls)._setup_config_generator() + + assert_that( + describe_queue_path, + raises( + ydb_issues.SchemeError + ) + ) + + +class TestSqsQuotingWithLocalRateLimiter(KikimrSqsTestBase): + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsQuotingWithLocalRateLimiter, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['quoting_config'] = { 'enable_quoting': True, 'quota_deadline_ms': 150, @@ -74,111 +74,111 @@ class TestSqsQuotingWithLocalRateLimiter(KikimrSqsTestBase): } } } - return config_generator - - def test_does_not_create_kesus(self): - def call_describe(): + return config_generator + + def test_does_not_create_kesus(self): + def call_describe(): self._driver.scheme_client.describe_path('{}/{}/.Quoter'.format(self.sqs_root, self._username)) - - assert_that( - call_describe, - raises( - ydb_issues.SchemeError - ) - ) - - # Check that user is properly deleted - self._sqs_api.delete_user(self._username) - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_does_actions_with_queue(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - self._create_queue_send_x_messages_read_y_messages(self.queue_name, - send_count=1, - read_count=1, - msg_body_template=self._msg_body_template, - visibility_timeout=1000, - is_fifo=is_fifo) - self._sqs_api.get_queue_attributes(self.queue_url) - self._sqs_api.delete_queue(self.queue_url) - - def call_n_times_except_throttling(self, count, func): - throttling_times = 0 - i = 0 - while i < count: - try: - func(i) - i += 1 - except RuntimeError as err: - assert_that(str(err), contains_string("ThrottlingException")) - throttling_times += 1 - return throttling_times - - def test_send_message_rate(self): - self._create_queue_and_assert(self.queue_name) - - counters = self._get_sqs_counters() - throttling_counter_labels = { - 'subsystem': 'core', - 'user': self._username, - 'queue': self.queue_name, - 'sensor': 'RequestsThrottled', - } - prev_throttling_counter_value = self._get_counter_value(counters, throttling_counter_labels, 0) - - def call_send(i): - if i % 2: - self._sqs_api.send_message(self.queue_url, 'data') - else: - self._sqs_api.send_message_batch(self.queue_url, [SqsSendMessageParams('data'), SqsSendMessageParams('data')]) - - start_time = time.time() - throttling_times = self.call_n_times_except_throttling(50, call_send) - duration = time.time() - start_time - logging.debug('Duration: {}'.format(duration)) - assert_that(duration, greater_than(4)) # reserve 1 second for test stability - - if throttling_times: - counters = self._get_sqs_counters() - throttling_counter_value = self._get_counter_value(counters, throttling_counter_labels, 0) - assert_that(throttling_counter_value - prev_throttling_counter_value, equal_to(throttling_times)) - - def test_create_queue_rate(self): - queue_urls = [] - - def call_create_queue(i): - url = self._sqs_api.create_queue('{}_{}.fifo'.format(self.queue_name, i), is_fifo=True) - queue_urls.append(url) - - start_time = time.time() - self.call_n_times_except_throttling(6, call_create_queue) - duration = time.time() - start_time - logging.debug('Duration: {}'.format(duration)) - assert_that(duration, greater_than(2)) # reserve 1 second for test stability - - # test that one delete queue batch can delete all the queues despite that delete objects rate is only 2: - assert_that(len(queue_urls), equal_to(6)) - delete_queue_batch_result = self._sqs_api.private_delete_queue_batch(queue_urls) - logging.debug('Delete queue batch result: {}'.format(delete_queue_batch_result)) - assert_that( - delete_queue_batch_result['DeleteQueueBatchResultEntry'], instance_of(list) - ) - assert_that( - len(delete_queue_batch_result['DeleteQueueBatchResultEntry']), equal_to(6) # no errors, all items are results - ) - - def test_other_requests_rate(self): - self._create_queue_and_assert(self.queue_name) - - def call(i): - if i % 2: - self._sqs_api.get_queue_url(self.queue_name) - else: - self._sqs_api.get_queue_attributes(self.queue_url) - - start_time = time.time() - self.call_n_times_except_throttling(30, call) - duration = time.time() - start_time - logging.debug('Duration: {}'.format(duration)) - assert_that(duration, greater_than(5)) # reserve 1 second for test stability + + assert_that( + call_describe, + raises( + ydb_issues.SchemeError + ) + ) + + # Check that user is properly deleted + self._sqs_api.delete_user(self._username) + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_does_actions_with_queue(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + self._create_queue_send_x_messages_read_y_messages(self.queue_name, + send_count=1, + read_count=1, + msg_body_template=self._msg_body_template, + visibility_timeout=1000, + is_fifo=is_fifo) + self._sqs_api.get_queue_attributes(self.queue_url) + self._sqs_api.delete_queue(self.queue_url) + + def call_n_times_except_throttling(self, count, func): + throttling_times = 0 + i = 0 + while i < count: + try: + func(i) + i += 1 + except RuntimeError as err: + assert_that(str(err), contains_string("ThrottlingException")) + throttling_times += 1 + return throttling_times + + def test_send_message_rate(self): + self._create_queue_and_assert(self.queue_name) + + counters = self._get_sqs_counters() + throttling_counter_labels = { + 'subsystem': 'core', + 'user': self._username, + 'queue': self.queue_name, + 'sensor': 'RequestsThrottled', + } + prev_throttling_counter_value = self._get_counter_value(counters, throttling_counter_labels, 0) + + def call_send(i): + if i % 2: + self._sqs_api.send_message(self.queue_url, 'data') + else: + self._sqs_api.send_message_batch(self.queue_url, [SqsSendMessageParams('data'), SqsSendMessageParams('data')]) + + start_time = time.time() + throttling_times = self.call_n_times_except_throttling(50, call_send) + duration = time.time() - start_time + logging.debug('Duration: {}'.format(duration)) + assert_that(duration, greater_than(4)) # reserve 1 second for test stability + + if throttling_times: + counters = self._get_sqs_counters() + throttling_counter_value = self._get_counter_value(counters, throttling_counter_labels, 0) + assert_that(throttling_counter_value - prev_throttling_counter_value, equal_to(throttling_times)) + + def test_create_queue_rate(self): + queue_urls = [] + + def call_create_queue(i): + url = self._sqs_api.create_queue('{}_{}.fifo'.format(self.queue_name, i), is_fifo=True) + queue_urls.append(url) + + start_time = time.time() + self.call_n_times_except_throttling(6, call_create_queue) + duration = time.time() - start_time + logging.debug('Duration: {}'.format(duration)) + assert_that(duration, greater_than(2)) # reserve 1 second for test stability + + # test that one delete queue batch can delete all the queues despite that delete objects rate is only 2: + assert_that(len(queue_urls), equal_to(6)) + delete_queue_batch_result = self._sqs_api.private_delete_queue_batch(queue_urls) + logging.debug('Delete queue batch result: {}'.format(delete_queue_batch_result)) + assert_that( + delete_queue_batch_result['DeleteQueueBatchResultEntry'], instance_of(list) + ) + assert_that( + len(delete_queue_batch_result['DeleteQueueBatchResultEntry']), equal_to(6) # no errors, all items are results + ) + + def test_other_requests_rate(self): + self._create_queue_and_assert(self.queue_name) + + def call(i): + if i % 2: + self._sqs_api.get_queue_url(self.queue_name) + else: + self._sqs_api.get_queue_attributes(self.queue_url) + + start_time = time.time() + self.call_n_times_except_throttling(30, call) + duration = time.time() - start_time + logging.debug('Duration: {}'.format(duration)) + assert_that(duration, greater_than(5)) # reserve 1 second for test stability diff --git a/ydb/tests/functional/sqs/test_recompiles_requests.py b/ydb/tests/functional/sqs/test_recompiles_requests.py index 367003323f6..601ca732de5 100644 --- a/ydb/tests/functional/sqs/test_recompiles_requests.py +++ b/ydb/tests/functional/sqs/test_recompiles_requests.py @@ -1,49 +1,49 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest -from hamcrest import assert_that, not_none +from hamcrest import assert_that, not_none from ydb.tests.library.common.types import Erasure -from sqs_test_base import KikimrSqsTestBase, IS_FIFO_PARAMS - - -class TestSqsRecompilesRequestsForOtherQueue(KikimrSqsTestBase): - erasure = Erasure.BLOCK_4_2 - - @classmethod - def _setup_config_generator(cls): - config_generator = super(TestSqsRecompilesRequestsForOtherQueue, cls)._setup_config_generator() +from sqs_test_base import KikimrSqsTestBase, IS_FIFO_PARAMS + + +class TestSqsRecompilesRequestsForOtherQueue(KikimrSqsTestBase): + erasure = Erasure.BLOCK_4_2 + + @classmethod + def _setup_config_generator(cls): + config_generator = super(TestSqsRecompilesRequestsForOtherQueue, cls)._setup_config_generator() config_generator.yaml_config['sqs_config']['enable_queue_master'] = False - return config_generator - - @pytest.mark.parametrize(**IS_FIFO_PARAMS) - def test_recompiles_queries(self, is_fifo): - if is_fifo: - self.queue_name = self.queue_name + '.fifo' - queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - - def send(node_index): - if is_fifo: - group_id = "0" - seq_no = self.seq_no - self.seq_no += 1 - else: - group_id = None - seq_no = None + return config_generator + + @pytest.mark.parametrize(**IS_FIFO_PARAMS) + def test_recompiles_queries(self, is_fifo): + if is_fifo: + self.queue_name = self.queue_name + '.fifo' + queue_url = self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + + def send(node_index): + if is_fifo: + group_id = "0" + seq_no = self.seq_no + self.seq_no += 1 + else: + group_id = None + seq_no = None result = self._sqs_apis[node_index].send_message( self.queue_url, self._msg_body_template.format(next(self.counter)), deduplication_id=seq_no, group_id=group_id) - assert_that( - result, not_none() - ) - - # cache write requests on each node - for i in range(self.cluster_nodes_count): - send(i) - - # delete and create queue through node 0 - self._sqs_api.delete_queue(queue_url) - self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) - - # send through node 1 - send(1) + assert_that( + result, not_none() + ) + + # cache write requests on each node + for i in range(self.cluster_nodes_count): + send(i) + + # delete and create queue through node 0 + self._sqs_api.delete_queue(queue_url) + self._create_queue_and_assert(self.queue_name, is_fifo=is_fifo) + + # send through node 1 + send(1) diff --git a/ydb/tests/functional/sqs/ya.make b/ydb/tests/functional/sqs/ya.make index bbb0a57bb89..bee2d570474 100644 --- a/ydb/tests/functional/sqs/ya.make +++ b/ydb/tests/functional/sqs/ya.make @@ -1,29 +1,29 @@ -OWNER( - g:sqs - g:kikimr -) +OWNER( + g:sqs + g:kikimr +) PY3TEST() ENV(YDB_DRIVER_BINARY="ydb/apps/ydbd/ydbd") TEST_SRCS( sqs_requests_client.py - sqs_matchers.py - sqs_test_base.py - test_account_actions.py - test_acl.py - test_counters.py - test_garbage_collection.py - test_generic_messaging.py - test_fifo_messaging.py - test_multinode_cluster.py + sqs_matchers.py + sqs_test_base.py + test_account_actions.py + test_acl.py + test_counters.py + test_garbage_collection.py + test_generic_messaging.py + test_fifo_messaging.py + test_multinode_cluster.py test_multiplexing_tables_format.py - test_ping.py - test_polling.py - test_queue_attributes_validation.py - test_queues_managing.py - test_quoting.py - test_recompiles_requests.py + test_ping.py + test_polling.py + test_queue_attributes_validation.py + test_queues_managing.py + test_quoting.py + test_recompiles_requests.py ) IF (SANITIZER_TYPE) @@ -56,13 +56,13 @@ PEERDIR( contrib/python/botocore ) -FORK_SUBTESTS() +FORK_SUBTESTS() -# SQS tests are not CPU or disk intensive, -# but they use sleeping for some events, -# so it would be secure to increase split factor. -# This increasing of split factor reduces test time -# to 15-20 seconds. +# SQS tests are not CPU or disk intensive, +# but they use sleeping for some events, +# so it would be secure to increase split factor. +# This increasing of split factor reduces test time +# to 15-20 seconds. SPLIT_FACTOR(60) - + END() diff --git a/ydb/tests/library/harness/kikimr_port_allocator.py b/ydb/tests/library/harness/kikimr_port_allocator.py index a32e3708a86..fcdc26bd4b7 100644 --- a/ydb/tests/library/harness/kikimr_port_allocator.py +++ b/ydb/tests/library/harness/kikimr_port_allocator.py @@ -1,145 +1,145 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import abc import os import ydb.tests.library.common.yatest_common as yatest_common - - -class KikimrNodePortAllocatorInterface(object): + + +class KikimrNodePortAllocatorInterface(object): __metaclass__ = abc.ABCMeta - - def __init__(self): - pass - + + def __init__(self): + pass + @abc.abstractproperty def grpc_ssl_port(self): pass @abc.abstractproperty - def mon_port(self): - pass - + def mon_port(self): + pass + @abc.abstractproperty - def grpc_port(self): - pass - + def grpc_port(self): + pass + @abc.abstractproperty - def mbus_port(self): - pass - + def mbus_port(self): + pass + @abc.abstractproperty - def ic_port(self): - pass - + def ic_port(self): + pass + @abc.abstractproperty - def sqs_port(self): - pass - - -class KikimrPortAllocatorInterface(object): + def sqs_port(self): + pass + + +class KikimrPortAllocatorInterface(object): __metaclass__ = abc.ABCMeta - - def __init__(self): - pass - + + def __init__(self): + pass + @abc.abstractmethod - def get_node_port_allocator(self, node_index): - """ - Returns KikimrNodePortAllocatorInterface object - """ - pass - + def get_node_port_allocator(self, node_index): + """ + Returns KikimrNodePortAllocatorInterface object + """ + pass + @abc.abstractmethod - def get_slot_port_allocator(self, slot_index): - """ - Returns KikimrNodePortAllocatorInterface object - """ - pass - + def get_slot_port_allocator(self, slot_index): + """ + Returns KikimrNodePortAllocatorInterface object + """ + pass + @abc.abstractmethod - def release_ports(self): - pass - -# -# Port manager allocator -# - - -class KikimrPortManagerNodePortAllocator(KikimrNodePortAllocatorInterface): - def __init__(self, port_manager): - super(KikimrPortManagerNodePortAllocator, self).__init__() - self.__port_manager = port_manager + def release_ports(self): + pass + +# +# Port manager allocator +# + + +class KikimrPortManagerNodePortAllocator(KikimrNodePortAllocatorInterface): + def __init__(self, port_manager): + super(KikimrPortManagerNodePortAllocator, self).__init__() + self.__port_manager = port_manager self.__mon_port = None self.__grpc_port = None self.__mbus_port = None self.__ic_port = None self.__sqs_port = None self.__grpc_ssl_port = None - - @property - def mon_port(self): + + @property + def mon_port(self): if self.__mon_port is None: self.__mon_port = self.__port_manager.get_port() - return self.__mon_port - - @property - def grpc_port(self): + return self.__mon_port + + @property + def grpc_port(self): if self.__grpc_port is None: self.__grpc_port = self.__port_manager.get_port() - return self.__grpc_port - - @property - def mbus_port(self): + return self.__grpc_port + + @property + def mbus_port(self): if self.__mbus_port is None: self.__mbus_port = self.__port_manager.get_port() - return self.__mbus_port - - @property - def ic_port(self): + return self.__mbus_port + + @property + def ic_port(self): if self.__ic_port is None: self.__ic_port = self.__port_manager.get_port() - return self.__ic_port - - @property + return self.__ic_port + + @property def grpc_ssl_port(self): if self.__grpc_ssl_port is None: self.__grpc_ssl_port = self.__port_manager.get_port() return self.__grpc_ssl_port @property - def sqs_port(self): + def sqs_port(self): if self.__sqs_port is None: self.__sqs_port = self.__port_manager.get_port() - return self.__sqs_port - - -class KikimrPortManagerPortAllocator(KikimrPortAllocatorInterface): - def __init__(self, port_manager=None): - super(KikimrPortManagerPortAllocator, self).__init__() - self.__port_manager = yatest_common.PortManager() if port_manager is None else port_manager - self.__nodes_allocators = [] - self.__slots_allocators = [] - - def get_node_port_allocator(self, node_index): - while len(self.__nodes_allocators) <= node_index: - self.__nodes_allocators.append(KikimrPortManagerNodePortAllocator(self.__port_manager)) - return self.__nodes_allocators[node_index] - - def get_slot_port_allocator(self, slot_index): - while len(self.__slots_allocators) <= slot_index: - self.__slots_allocators.append(KikimrPortManagerNodePortAllocator(self.__port_manager)) - return self.__slots_allocators[slot_index] - - def release_ports(self): - self.__port_manager.release() - - -# -# Fixed port allocator -# - -class KikimrFixedNodePortAllocator(KikimrNodePortAllocatorInterface): + return self.__sqs_port + + +class KikimrPortManagerPortAllocator(KikimrPortAllocatorInterface): + def __init__(self, port_manager=None): + super(KikimrPortManagerPortAllocator, self).__init__() + self.__port_manager = yatest_common.PortManager() if port_manager is None else port_manager + self.__nodes_allocators = [] + self.__slots_allocators = [] + + def get_node_port_allocator(self, node_index): + while len(self.__nodes_allocators) <= node_index: + self.__nodes_allocators.append(KikimrPortManagerNodePortAllocator(self.__port_manager)) + return self.__nodes_allocators[node_index] + + def get_slot_port_allocator(self, slot_index): + while len(self.__slots_allocators) <= slot_index: + self.__slots_allocators.append(KikimrPortManagerNodePortAllocator(self.__port_manager)) + return self.__slots_allocators[slot_index] + + def release_ports(self): + self.__port_manager.release() + + +# +# Fixed port allocator +# + +class KikimrFixedNodePortAllocator(KikimrNodePortAllocatorInterface): def __init__(self, mon_port=8765, grpc_port=2135, mbus_port=2134, ic_port=19001, sqs_port=8771, grpc_ssl_port=2137): - super(KikimrFixedNodePortAllocator, self).__init__() + super(KikimrFixedNodePortAllocator, self).__init__() if os.getenv('MON_PORT') is not None: self.__mon_port = int(os.getenv('MON_PORT')) else: @@ -148,62 +148,62 @@ class KikimrFixedNodePortAllocator(KikimrNodePortAllocatorInterface): self.__grpc_port = int(os.getenv('GRPC_PORT')) else: self.__grpc_port = grpc_port - self.__mbus_port = mbus_port + self.__mbus_port = mbus_port if os.getenv('IC_PORT') is not None: self.__ic_port = int(os.getenv('IC_PORT')) else: self.__ic_port = ic_port - self.__sqs_port = sqs_port + self.__sqs_port = sqs_port if os.getenv('GRPC_TLS_PORT') is not None: self.__grpc_ssl_port = int(os.getenv('GRPC_TLS_PORT')) else: self.__grpc_ssl_port = grpc_ssl_port - - @property - def mon_port(self): - return self.__mon_port - - @property + + @property + def mon_port(self): + return self.__mon_port + + @property def grpc_ssl_port(self): return self.__grpc_ssl_port @property - def grpc_port(self): - return self.__grpc_port - - @property - def mbus_port(self): - return self.__mbus_port - - @property - def ic_port(self): - return self.__ic_port - - @property - def sqs_port(self): - return self.__sqs_port - - -class KikimrFixedPortAllocator(KikimrPortAllocatorInterface): - def __init__(self, - nodes_port_allocators_list=(), - slots_port_allocators_list=()): - super(KikimrFixedPortAllocator, self).__init__() - self.__nodes_port_allocators_list = nodes_port_allocators_list - self.__slots_port_allocators_list = slots_port_allocators_list - self.__default_value = KikimrFixedNodePortAllocator() - - def get_node_port_allocator(self, node_index): + def grpc_port(self): + return self.__grpc_port + + @property + def mbus_port(self): + return self.__mbus_port + + @property + def ic_port(self): + return self.__ic_port + + @property + def sqs_port(self): + return self.__sqs_port + + +class KikimrFixedPortAllocator(KikimrPortAllocatorInterface): + def __init__(self, + nodes_port_allocators_list=(), + slots_port_allocators_list=()): + super(KikimrFixedPortAllocator, self).__init__() + self.__nodes_port_allocators_list = nodes_port_allocators_list + self.__slots_port_allocators_list = slots_port_allocators_list + self.__default_value = KikimrFixedNodePortAllocator() + + def get_node_port_allocator(self, node_index): if node_index <= len(self.__nodes_port_allocators_list): return self.__nodes_port_allocators_list[node_index - 1] - else: - return self.__default_value - - def get_slot_port_allocator(self, slot_index): + else: + return self.__default_value + + def get_slot_port_allocator(self, slot_index): if slot_index <= len(self.__slots_port_allocators_list): return self.__slots_port_allocators_list[slot_index - 1] - else: - return self.__default_value - - def release_ports(self): - pass + else: + return self.__default_value + + def release_ports(self): + pass diff --git a/ydb/tests/library/harness/kikimr_runner.py b/ydb/tests/library/harness/kikimr_runner.py index 251f485b5db..f888542e740 100644 --- a/ydb/tests/library/harness/kikimr_runner.py +++ b/ydb/tests/library/harness/kikimr_runner.py @@ -46,7 +46,7 @@ def join(a, b): class KiKiMRNode(daemon.Daemon, kikimr_node_interface.NodeInterface): - def __init__(self, node_idx, config_path, port_allocator, cluster_name, configurator, + def __init__(self, node_idx, config_path, port_allocator, cluster_name, configurator, udfs_dir=None, role='node', node_broker_port=None, tenant_affiliation=None, encryption_key=None): super(kikimr_node_interface.NodeInterface, self).__init__() @@ -71,7 +71,7 @@ class KiKiMRNode(daemon.Daemon, kikimr_node_interface.NodeInterface): self.__node_broker_port = node_broker_port self.__log_file = tempfile.NamedTemporaryFile(dir=self.cwd, prefix="logfile_", suffix=".log", delete=False) self.__cms_config_cache_file = tempfile.NamedTemporaryFile( - dir=self.cwd, + dir=self.cwd, prefix="cms_config_cache_", delete=False ) @@ -212,7 +212,7 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface): self.__common_udfs_dir = None self.__cluster_name = cluster_name self.__configurator = kikimr_config.KikimrConfigGenerator() if configurator is None else configurator - self.__port_allocator = self.__configurator.port_allocator + self.__port_allocator = self.__configurator.port_allocator self._nodes = {} self._slots = {} self.__server = 'localhost' @@ -327,7 +327,7 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface): self._nodes[node_index] = KiKiMRNode( node_index, self.config_path, - port_allocator=self.__port_allocator.get_node_port_allocator(node_index), + port_allocator=self.__port_allocator.get_node_port_allocator(node_index), cluster_name=self.__cluster_name, configurator=self.__configurator, udfs_dir=self.__common_udfs_dir, @@ -355,7 +355,7 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface): self._slots[slot_index] = KiKiMRNode( slot_index, self.config_path, - port_allocator=self.__port_allocator.get_slot_port_allocator(slot_index), + port_allocator=self.__port_allocator.get_slot_port_allocator(slot_index), cluster_name=self.__cluster_name, configurator=self.__configurator, udfs_dir=self.__common_udfs_dir, @@ -392,7 +392,7 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface): if exception is not None: saved_exceptions.append(exception) - self.__port_allocator.release_ports() + self.__port_allocator.release_ports() if saved_exceptions: raise daemon.SeveralDaemonErrors(saved_exceptions) |