aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
blob: 2920a5bfff5224fcda0f7b7b20141eeab9046d53 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Each active thread has an ThreadIdentity that may represent the thread in
// various level interfaces.  ThreadIdentity objects are never deallocated.
// When a thread terminates, its ThreadIdentity object may be reused for a
// thread created later.

#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_

#ifndef _WIN32
#include <pthread.h>
// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
// supported.
#include <unistd.h>
#endif

#include <atomic>
#include <cstdint>

#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h" 

namespace absl {
ABSL_NAMESPACE_BEGIN

struct SynchLocksHeld;
struct SynchWaitParams;

namespace base_internal {

class SpinLock;
struct ThreadIdentity;

// Used by the implementation of absl::Mutex and absl::CondVar.
struct PerThreadSynch {
  // The internal representation of absl::Mutex and absl::CondVar rely
  // on the alignment of PerThreadSynch. Both store the address of the
  // PerThreadSynch in the high-order bits of their internal state,
  // which means the low kLowZeroBits of the address of PerThreadSynch
  // must be zero.
  static constexpr int kLowZeroBits = 8;
  static constexpr int kAlignment = 1 << kLowZeroBits;

  // Returns the associated ThreadIdentity.
  // This can be implemented as a cast because we guarantee
  // PerThreadSynch is the first element of ThreadIdentity.
  ThreadIdentity* thread_identity() {
    return reinterpret_cast<ThreadIdentity*>(this);
  }

  PerThreadSynch *next;  // Circular waiter queue; initialized to 0.
  PerThreadSynch *skip;  // If non-zero, all entries in Mutex queue
                         // up to and including "skip" have same
                         // condition as this, and will be woken later
  bool may_skip;         // if false while on mutex queue, a mutex unlocker
                         // is using this PerThreadSynch as a terminator.  Its
                         // skip field must not be filled in because the loop
                         // might then skip over the terminator.
  bool wake;             // This thread is to be woken from a Mutex. 
  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the 
  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. 
  // 
  // The value of "x->cond_waiter" is meaningless if "x" is not on a 
  // Mutex waiter list. 
  bool cond_waiter; 
  bool maybe_unlocking;  // Valid at head of Mutex waiter queue; 
                         // true if UnlockSlow could be searching 
                         // for a waiter to wake.  Used for an optimization 
                         // in Enqueue().  true is always a valid value. 
                         // Can be reset to false when the unlocker or any 
                         // writer releases the lock, or a reader fully 
                         // releases the lock.  It may not be set to false 
                         // by a reader that decrements the count to 
                         // non-zero. protected by mutex spinlock 
  bool suppress_fatal_errors;  // If true, try to proceed even in the face 
                               // of broken invariants.  This is used within 
                               // fatal signal handlers to improve the 
                               // chances of debug logging information being 
                               // output successfully. 
  int priority;                // Priority of thread (updated every so often). 

  // State values:
  //   kAvailable: This PerThreadSynch is available.
  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
  //            Mutex or CondVar waistlist.
  //
  // Transitions from kQueued to kAvailable require a release
  // barrier. This is needed as a waiter may use "state" to
  // independently observe that it's no longer queued.
  //
  // Transitions from kAvailable to kQueued require no barrier, they
  // are externally ordered by the Mutex.
  enum State {
    kAvailable,
    kQueued
  };
  std::atomic<State> state;

  // The wait parameters of the current wait.  waitp is null if the 
  // thread is not waiting. Transitions from null to non-null must 
  // occur before the enqueue commit point (state = kQueued in 
  // Enqueue() and CondVarEnqueue()). Transitions from non-null to 
  // null must occur after the wait is finished (state = kAvailable in 
  // Mutex::Block() and CondVar::WaitCommon()). This field may be 
  // changed only by the thread that describes this PerThreadSynch.  A 
  // special case is Fer(), which calls Enqueue() on another thread, 
  // but with an identical SynchWaitParams pointer, thus leaving the 
  // pointer unchanged. 
  SynchWaitParams* waitp; 

  intptr_t readers;     // Number of readers in mutex. 

  // When priority will next be read (cycles). 
  int64_t next_priority_read_cycles; 

  // Locks held; used during deadlock detection.
  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
  SynchLocksHeld *all_locks;
};

// The instances of this class are allocated in NewThreadIdentity() with an 
// alignment of PerThreadSynch::kAlignment. 
struct ThreadIdentity {
  // Must be the first member.  The Mutex implementation requires that
  // the PerThreadSynch object associated with each thread is
  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
  // ThreadIdentity itself.
  PerThreadSynch per_thread_synch;

  // Private: Reserved for absl::synchronization_internal::Waiter.
  struct WaiterState {
    alignas(void*) char data[128]; 
  } waiter_state;

  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
  std::atomic<int>* blocked_count_ptr;

  // The following variables are mostly read/written just by the
  // thread itself.  The only exception is that these are read by
  // a ticker thread as a hint.
  std::atomic<int> ticker;      // Tick counter, incremented once per second.
  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
  std::atomic<bool> is_idle;    // Has thread become idle yet?

  ThreadIdentity* next;
};

// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime.  The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
// If one does not exist, return nullptr instead.
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
// is handled internally within tcmalloc's initialization already.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
ThreadIdentity* CurrentThreadIdentityIfPresent();

using ThreadIdentityReclaimerFunction = void (*)(void*);

// Sets the current thread identity to the given value.  'reclaimer' is a
// pointer to the global function for cleaning up instances on thread
// destruction.
void SetCurrentThreadIdentity(ThreadIdentity* identity,
                              ThreadIdentityReclaimerFunction reclaimer);

// Removes the currently associated ThreadIdentity from the running thread.
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
// from that function.
void ClearCurrentThreadIdentity();

// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
// index>
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
#endif

#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
#endif

#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
#endif

#ifdef ABSL_THREAD_IDENTITY_MODE
#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) 
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) &&        \ 
    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4.  It's not
// present in the upstream eglibc.
// Note:  Current default for production systems.
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
#else
#define ABSL_THREAD_IDENTITY_MODE \
  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#endif

#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11

#if ABSL_PER_THREAD_TLS
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
    thread_identity_ptr;
#elif defined(ABSL_HAVE_THREAD_LOCAL)
ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#else
#error Thread-local storage not detected on this platform
#endif

// thread_local variables cannot be in headers exposed by DLLs or in certain 
// build configurations on Apple platforms. However, it is important for 
// performance reasons in general that `CurrentThreadIdentityIfPresent` be 
// inlined. In the other cases we opt to have the function not be inlined. Note 
// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
// this entire inline definition. 
#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ 
    !defined(ABSL_CONSUME_DLL) 
#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 
#endif 
 
#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
  return thread_identity_ptr;
}
#endif

#elif ABSL_THREAD_IDENTITY_MODE != \
    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error Unknown ABSL_THREAD_IDENTITY_MODE
#endif

}  // namespace base_internal
ABSL_NAMESPACE_END
}  // namespace absl

#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_