1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
|
//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_EXCLUSIVE_H_
#define SCUDO_TSD_EXCLUSIVE_H_
#include "tsd.h"
#include "string_utils.h"
namespace scudo {
struct ThreadState {
bool DisableMemInit : 1;
enum : unsigned {
NotInitialized = 0,
Initialized,
TornDown,
} InitState : 2;
};
template <class Allocator> void teardownThread(void *Ptr);
template <class Allocator> struct TSDRegistryExT {
void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
FallbackTSD.init(Instance);
Initialized = true;
}
void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
DCHECK(Instance);
if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
Instance);
ThreadTSD.commitBack(Instance);
ThreadTSD = {};
}
CHECK_EQ(pthread_key_delete(PThreadKey), 0);
PThreadKey = {};
FallbackTSD.commitBack(Instance);
FallbackTSD = {};
State = {};
ScopedLock L(Mutex);
Initialized = false;
}
void drainCaches(Allocator *Instance) {
// We don't have a way to iterate all thread local `ThreadTSD`s. Simply
// drain the `ThreadTSD` of current thread and `FallbackTSD`.
Instance->drainCache(&ThreadTSD);
FallbackTSD.lock();
Instance->drainCache(&FallbackTSD);
FallbackTSD.unlock();
}
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
if (LIKELY(State.InitState != ThreadState::NotInitialized))
return;
initThread(Instance, MinimalInit);
}
// TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
// embedding the logic into TSD or always locking the TSD. It will enable us
// to properly mark thread annotation here and adding proper runtime
// assertions in the member functions of TSD. For example, assert the lock is
// acquired before calling TSD::commitBack().
ALWAYS_INLINE TSD<Allocator> *
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
return &ThreadTSD;
}
FallbackTSD.lock();
*UnlockRequired = true;
return &FallbackTSD;
}
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
// and force all threads to attempt to use it instead of their local one.
void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
FallbackTSD.lock();
atomic_store(&Disabled, 1U, memory_order_release);
}
void enable() NO_THREAD_SAFETY_ANALYSIS {
atomic_store(&Disabled, 0U, memory_order_release);
FallbackTSD.unlock();
Mutex.unlock();
}
bool setOption(Option O, sptr Value) {
if (O == Option::ThreadDisableMemInit)
State.DisableMemInit = Value;
if (O == Option::MaxTSDsCount)
return false;
return true;
}
bool getDisableMemInit() { return State.DisableMemInit; }
void getStats(ScopedString *Str) {
// We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
// printing only self `ThreadTSD` which may mislead the usage, we just skip
// it.
Str->append("Exclusive TSD don't support iterating each TSD\n");
}
private:
// Using minimal initialization allows for global initialization while keeping
// the thread specific structure untouched. The fallback structure will be
// used instead.
NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
initOnceMaybe(Instance);
if (UNLIKELY(MinimalInit))
return;
CHECK_EQ(
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
ThreadTSD.init(Instance);
State.InitState = ThreadState::Initialized;
Instance->callPostInitCallback();
}
pthread_key_t PThreadKey = {};
bool Initialized GUARDED_BY(Mutex) = false;
atomic_u8 Disabled = {};
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
static thread_local ThreadState State;
static thread_local TSD<Allocator> ThreadTSD;
friend void teardownThread<Allocator>(void *Ptr);
};
template <class Allocator>
thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
template <class Allocator>
thread_local ThreadState TSDRegistryExT<Allocator>::State;
template <class Allocator>
void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
typedef TSDRegistryExT<Allocator> TSDRegistryT;
Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user
// provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
// We want to be called last since other destructors might call free and the
// like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
// quarantine and swallowing the cache.
if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
TSDRegistryT::ThreadTSD.DestructorIterations--;
// If pthread_setspecific fails, we will go ahead with the teardown.
if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
Ptr) == 0))
return;
}
TSDRegistryT::ThreadTSD.commitBack(Instance);
TSDRegistryT::State.InitState = ThreadState::TornDown;
}
} // namespace scudo
#endif // SCUDO_TSD_EXCLUSIVE_H_
|