aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/clang16-rt/lib/scudo/standalone/tsd_exclusive.h
blob: d49427b2005b1f179e99a3e6879c8b3e9cb8cabd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef SCUDO_TSD_EXCLUSIVE_H_
#define SCUDO_TSD_EXCLUSIVE_H_

#include "tsd.h"

namespace scudo {

struct ThreadState {
  bool DisableMemInit : 1;
  enum : unsigned {
    NotInitialized = 0,
    Initialized,
    TornDown,
  } InitState : 2;
};

template <class Allocator> void teardownThread(void *Ptr);

template <class Allocator> struct TSDRegistryExT {
  void init(Allocator *Instance) {
    DCHECK(!Initialized);
    Instance->init();
    CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
    FallbackTSD.init(Instance);
    Initialized = true;
  }

  void initOnceMaybe(Allocator *Instance) {
    ScopedLock L(Mutex);
    if (LIKELY(Initialized))
      return;
    init(Instance); // Sets Initialized.
  }

  void unmapTestOnly(Allocator *Instance) {
    DCHECK(Instance);
    if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
      DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
                Instance);
      ThreadTSD.commitBack(Instance);
      ThreadTSD = {};
    }
    CHECK_EQ(pthread_key_delete(PThreadKey), 0);
    PThreadKey = {};
    FallbackTSD.commitBack(Instance);
    FallbackTSD = {};
    State = {};
    Initialized = false;
  }

  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
    if (LIKELY(State.InitState != ThreadState::NotInitialized))
      return;
    initThread(Instance, MinimalInit);
  }

  ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
    if (LIKELY(State.InitState == ThreadState::Initialized &&
               !atomic_load(&Disabled, memory_order_acquire))) {
      *UnlockRequired = false;
      return &ThreadTSD;
    }
    FallbackTSD.lock();
    *UnlockRequired = true;
    return &FallbackTSD;
  }

  // To disable the exclusive TSD registry, we effectively lock the fallback TSD
  // and force all threads to attempt to use it instead of their local one.
  void disable() {
    Mutex.lock();
    FallbackTSD.lock();
    atomic_store(&Disabled, 1U, memory_order_release);
  }

  void enable() {
    atomic_store(&Disabled, 0U, memory_order_release);
    FallbackTSD.unlock();
    Mutex.unlock();
  }

  bool setOption(Option O, sptr Value) {
    if (O == Option::ThreadDisableMemInit)
      State.DisableMemInit = Value;
    if (O == Option::MaxTSDsCount)
      return false;
    return true;
  }

  bool getDisableMemInit() { return State.DisableMemInit; }

private:
  // Using minimal initialization allows for global initialization while keeping
  // the thread specific structure untouched. The fallback structure will be
  // used instead.
  NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
    initOnceMaybe(Instance);
    if (UNLIKELY(MinimalInit))
      return;
    CHECK_EQ(
        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
    ThreadTSD.init(Instance);
    State.InitState = ThreadState::Initialized;
    Instance->callPostInitCallback();
  }

  pthread_key_t PThreadKey = {};
  bool Initialized = false;
  atomic_u8 Disabled = {};
  TSD<Allocator> FallbackTSD;
  HybridMutex Mutex;
  static thread_local ThreadState State;
  static thread_local TSD<Allocator> ThreadTSD;

  friend void teardownThread<Allocator>(void *Ptr);
};

template <class Allocator>
thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
template <class Allocator>
thread_local ThreadState TSDRegistryExT<Allocator>::State;

template <class Allocator> void teardownThread(void *Ptr) {
  typedef TSDRegistryExT<Allocator> TSDRegistryT;
  Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
  // The glibc POSIX thread-local-storage deallocation routine calls user
  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
  // We want to be called last since other destructors might call free and the
  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
  // quarantine and swallowing the cache.
  if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
    TSDRegistryT::ThreadTSD.DestructorIterations--;
    // If pthread_setspecific fails, we will go ahead with the teardown.
    if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
                                   Ptr) == 0))
      return;
  }
  TSDRegistryT::ThreadTSD.commitBack(Instance);
  TSDRegistryT::State.InitState = ThreadState::TornDown;
}

} // namespace scudo

#endif // SCUDO_TSD_EXCLUSIVE_H_