aboutsummaryrefslogtreecommitdiffstats
path: root/yql/essentials/utils/multi_resource_lock.cpp
blob: 84b095a2076d1f70a5482c206992cabc38099386 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#include "multi_resource_lock.h"

using namespace NYql;

TMultiResourceLock::TResourceLock TMultiResourceLock::Acquire(TString resourceId) {
    TLock::TPtr lock = ProvideResourceLock(resourceId);

    // resource-specific mutex should be locked outside of Guard_ lock
    return { *this, std::move(lock), std::move(resourceId) };
}

TMultiResourceLock::~TMultiResourceLock() {
    with_lock(Guard_) {
        Y_ABORT_UNLESS(Locks_.empty(), "~TMultiResourceLock: we still have %lu unreleased locks", Locks_.size());
    }
}

TMultiResourceLock::TLock::TPtr TMultiResourceLock::ProvideResourceLock(const TString& resourceId) {
    with_lock(Guard_) {
        auto it = Locks_.find(resourceId);
        if (it == Locks_.end()) {
            it = Locks_.emplace(resourceId, MakeIntrusive<TLock>()).first;
        }

        // important: ref count will be incremented under lock
        // in this case we have guarantee TryCleanup will not erase this resource just after exit from this method and before entering lock->Mutex_.Acquire()
        return it->second;
    }
}

void TMultiResourceLock::TryCleanup(const TString& resourceId) {
    with_lock(Guard_) {
        auto it = Locks_.find(resourceId);
        if (it == Locks_.end()) {
            return;
        }

        if (it->second->IsUnique()) {
            Locks_.erase(it);
        }
    }
}