diff options
author | Daniil Cherednik <dan.cherednik@gmail.com> | 2022-11-24 13:14:34 +0300 |
---|---|---|
committer | Daniil Cherednik <dan.cherednik@gmail.com> | 2022-11-24 14:46:00 +0300 |
commit | 87f7fceed34bcafb8aaff351dd493a35c916986f (patch) | |
tree | 26809ec8f550aba8eb019e59adc3d48e51913eb2 /contrib/go/_std_1.18/src/sync/rwmutex.go | |
parent | 11bc4015b8010ae201bf3eb33db7dba425aca35e (diff) | |
download | ydb-87f7fceed34bcafb8aaff351dd493a35c916986f.tar.gz |
Ydb stable 22-4-4322.4.43
x-stable-origin-commit: 8d49d46cc834835bf3e50870516acd7376a63bcf
Diffstat (limited to 'contrib/go/_std_1.18/src/sync/rwmutex.go')
-rw-r--r-- | contrib/go/_std_1.18/src/sync/rwmutex.go | 223 |
1 files changed, 223 insertions, 0 deletions
diff --git a/contrib/go/_std_1.18/src/sync/rwmutex.go b/contrib/go/_std_1.18/src/sync/rwmutex.go new file mode 100644 index 0000000000..f0d4c9771a --- /dev/null +++ b/contrib/go/_std_1.18/src/sync/rwmutex.go @@ -0,0 +1,223 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync + +import ( + "internal/race" + "sync/atomic" + "unsafe" +) + +// There is a modified copy of this file in runtime/rwmutex.go. +// If you make any changes here, see if you should make them there. + +// A RWMutex is a reader/writer mutual exclusion lock. +// The lock can be held by an arbitrary number of readers or a single writer. +// The zero value for a RWMutex is an unlocked mutex. +// +// A RWMutex must not be copied after first use. +// +// If a goroutine holds a RWMutex for reading and another goroutine might +// call Lock, no goroutine should expect to be able to acquire a read lock +// until the initial read lock is released. In particular, this prohibits +// recursive read locking. This is to ensure that the lock eventually becomes +// available; a blocked Lock call excludes new readers from acquiring the +// lock. +type RWMutex struct { + w Mutex // held if there are pending writers + writerSem uint32 // semaphore for writers to wait for completing readers + readerSem uint32 // semaphore for readers to wait for completing writers + readerCount int32 // number of pending readers + readerWait int32 // number of departing readers +} + +const rwmutexMaxReaders = 1 << 30 + +// Happens-before relationships are indicated to the race detector via: +// - Unlock -> Lock: readerSem +// - Unlock -> RLock: readerSem +// - RUnlock -> Lock: writerSem +// +// The methods below temporarily disable handling of race synchronization +// events in order to provide the more precise model above to the race +// detector. +// +// For example, atomic.AddInt32 in RLock should not appear to provide +// acquire-release semantics, which would incorrectly synchronize racing +// readers, thus potentially missing races. + +// RLock locks rw for reading. +// +// It should not be used for recursive read locking; a blocked Lock +// call excludes new readers from acquiring the lock. See the +// documentation on the RWMutex type. +func (rw *RWMutex) RLock() { + if race.Enabled { + _ = rw.w.state + race.Disable() + } + if atomic.AddInt32(&rw.readerCount, 1) < 0 { + // A writer is pending, wait for it. + runtime_SemacquireMutex(&rw.readerSem, false, 0) + } + if race.Enabled { + race.Enable() + race.Acquire(unsafe.Pointer(&rw.readerSem)) + } +} + +// TryRLock tries to lock rw for reading and reports whether it succeeded. +// +// Note that while correct uses of TryRLock do exist, they are rare, +// and use of TryRLock is often a sign of a deeper problem +// in a particular use of mutexes. +func (rw *RWMutex) TryRLock() bool { + if race.Enabled { + _ = rw.w.state + race.Disable() + } + for { + c := atomic.LoadInt32(&rw.readerCount) + if c < 0 { + if race.Enabled { + race.Enable() + } + return false + } + if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) { + if race.Enabled { + race.Enable() + race.Acquire(unsafe.Pointer(&rw.readerSem)) + } + return true + } + } +} + +// RUnlock undoes a single RLock call; +// it does not affect other simultaneous readers. +// It is a run-time error if rw is not locked for reading +// on entry to RUnlock. +func (rw *RWMutex) RUnlock() { + if race.Enabled { + _ = rw.w.state + race.ReleaseMerge(unsafe.Pointer(&rw.writerSem)) + race.Disable() + } + if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { + // Outlined slow-path to allow the fast-path to be inlined + rw.rUnlockSlow(r) + } + if race.Enabled { + race.Enable() + } +} + +func (rw *RWMutex) rUnlockSlow(r int32) { + if r+1 == 0 || r+1 == -rwmutexMaxReaders { + race.Enable() + throw("sync: RUnlock of unlocked RWMutex") + } + // A writer is pending. + if atomic.AddInt32(&rw.readerWait, -1) == 0 { + // The last reader unblocks the writer. + runtime_Semrelease(&rw.writerSem, false, 1) + } +} + +// Lock locks rw for writing. +// If the lock is already locked for reading or writing, +// Lock blocks until the lock is available. +func (rw *RWMutex) Lock() { + if race.Enabled { + _ = rw.w.state + race.Disable() + } + // First, resolve competition with other writers. + rw.w.Lock() + // Announce to readers there is a pending writer. + r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders + // Wait for active readers. + if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { + runtime_SemacquireMutex(&rw.writerSem, false, 0) + } + if race.Enabled { + race.Enable() + race.Acquire(unsafe.Pointer(&rw.readerSem)) + race.Acquire(unsafe.Pointer(&rw.writerSem)) + } +} + +// TryLock tries to lock rw for writing and reports whether it succeeded. +// +// Note that while correct uses of TryLock do exist, they are rare, +// and use of TryLock is often a sign of a deeper problem +// in a particular use of mutexes. +func (rw *RWMutex) TryLock() bool { + if race.Enabled { + _ = rw.w.state + race.Disable() + } + if !rw.w.TryLock() { + if race.Enabled { + race.Enable() + } + return false + } + if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) { + rw.w.Unlock() + if race.Enabled { + race.Enable() + } + return false + } + if race.Enabled { + race.Enable() + race.Acquire(unsafe.Pointer(&rw.readerSem)) + race.Acquire(unsafe.Pointer(&rw.writerSem)) + } + return true +} + +// Unlock unlocks rw for writing. It is a run-time error if rw is +// not locked for writing on entry to Unlock. +// +// As with Mutexes, a locked RWMutex is not associated with a particular +// goroutine. One goroutine may RLock (Lock) a RWMutex and then +// arrange for another goroutine to RUnlock (Unlock) it. +func (rw *RWMutex) Unlock() { + if race.Enabled { + _ = rw.w.state + race.Release(unsafe.Pointer(&rw.readerSem)) + race.Disable() + } + + // Announce to readers there is no active writer. + r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) + if r >= rwmutexMaxReaders { + race.Enable() + throw("sync: Unlock of unlocked RWMutex") + } + // Unblock blocked readers, if any. + for i := 0; i < int(r); i++ { + runtime_Semrelease(&rw.readerSem, false, 0) + } + // Allow other writers to proceed. + rw.w.Unlock() + if race.Enabled { + race.Enable() + } +} + +// RLocker returns a Locker interface that implements +// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock. +func (rw *RWMutex) RLocker() Locker { + return (*rlocker)(rw) +} + +type rlocker RWMutex + +func (r *rlocker) Lock() { (*RWMutex)(r).RLock() } +func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() } |