aboutsummaryrefslogtreecommitdiffstats
path: root/util/thread/lfqueue.h
diff options
context:
space:
mode:
authorAlexander Smirnov <alex@ydb.tech>2024-11-20 11:14:58 +0000
committerAlexander Smirnov <alex@ydb.tech>2024-11-20 11:14:58 +0000
commit31773f157bf8164364649b5f470f52dece0a4317 (patch)
tree33d0f7eef45303ab68cf08ab381ce5e5e36c5240 /util/thread/lfqueue.h
parent2c7938962d8689e175574fc1e817c05049f27905 (diff)
parenteff600952d5dfe17942f38f510a8ac2b203bb3a5 (diff)
downloadydb-31773f157bf8164364649b5f470f52dece0a4317.tar.gz
Merge branch 'rightlib' into mergelibs-241120-1113
Diffstat (limited to 'util/thread/lfqueue.h')
-rw-r--r--util/thread/lfqueue.h24
1 files changed, 16 insertions, 8 deletions
diff --git a/util/thread/lfqueue.h b/util/thread/lfqueue.h
index bfe88013fc..089f234741 100644
--- a/util/thread/lfqueue.h
+++ b/util/thread/lfqueue.h
@@ -72,8 +72,9 @@ class TLockFreeQueue: public TNonCopyable {
void TryToFreeAsyncMemory() {
const auto keepCounter = FreeingTaskCounter.load();
TRootNode* current = FreePtr.load(std::memory_order_acquire);
- if (current == nullptr)
+ if (current == nullptr) {
return;
+ }
if (FreememCounter.load() == 1) {
// we are the last thread, try to cleanup
// check if another thread have cleaned up
@@ -103,8 +104,9 @@ class TLockFreeQueue: public TNonCopyable {
toDelete->ToDelete.store(lst, std::memory_order_release);
for (auto freePtr = FreePtr.load();;) {
toDelete->NextFree.store(freePtr, std::memory_order_release);
- if (FreePtr.compare_exchange_weak(freePtr, toDelete))
+ if (FreePtr.compare_exchange_weak(freePtr, toDelete)) {
break;
+ }
}
}
void AsyncUnref(TRootNode* toDelete, TListNode* lst) {
@@ -148,15 +150,17 @@ class TLockFreeQueue: public TNonCopyable {
Tail->Next.store(newCopy, std::memory_order_release);
newCopy = Copy;
Copy = nullptr; // do not destroy prev try
- if (!newTail)
+ if (!newTail) {
newTail = Tail; // tried to invert same list
+ }
break;
}
TListNode* newElem = new TListNode(ptr->Data, newCopy);
newCopy = newElem;
ptr = ptr->Next.load(std::memory_order_acquire);
- if (!newTail)
+ if (!newTail) {
newTail = newElem;
+ }
}
EraseList(Copy); // copy was useless
Copy = newCopy;
@@ -176,8 +180,9 @@ class TLockFreeQueue: public TNonCopyable {
for (TListNode* node = head;; node = node->Next.load(std::memory_order_acquire)) {
newRoot->IncCount(node->Data);
- if (node == tail)
+ if (node == tail) {
break;
+ }
}
if (JobQueue.compare_exchange_weak(curRoot, newRoot)) {
@@ -251,8 +256,9 @@ public:
}
template <typename TIter>
void EnqueueAll(TIter dataBegin, TIter dataEnd) {
- if (dataBegin == dataEnd)
+ if (dataBegin == dataEnd) {
return;
+ }
TIter i = dataBegin;
TListNode* node = new TListNode(*i);
@@ -272,8 +278,9 @@ public:
TListNode* tail = curRoot->PopQueue.load(std::memory_order_acquire);
if (tail) {
// has elems to pop
- if (!newRoot)
+ if (!newRoot) {
newRoot = new TRootNode;
+ }
newRoot->PushQueue.store(curRoot->PushQueue.load(std::memory_order_acquire), std::memory_order_release);
newRoot->PopQueue.store(tail->Next.load(std::memory_order_acquire), std::memory_order_release);
@@ -294,8 +301,9 @@ public:
return false; // no elems to pop
}
- if (!newRoot)
+ if (!newRoot) {
newRoot = new TRootNode;
+ }
newRoot->PushQueue.store(nullptr, std::memory_order_release);
listInvertor.DoCopy(curRoot->PushQueue.load(std::memory_order_acquire));
newRoot->PopQueue.store(listInvertor.Copy, std::memory_order_release);