blob: eef4a7ba1e656ea8db4f7fbc5df31e63ad573eb8 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// Scudo thread specific data definition.
/// Implementation will differ based on the thread local storage primitives
/// offered by the underlying platform.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
#define SCUDO_TSD_H_
#include "scudo_allocator.h"
#include "scudo_utils.h"
#include <pthread.h>
namespace __scudo {
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
AllocatorCacheT Cache;
uptr QuarantineCachePlaceHolder[4];
void init();
void commitBack();
inline bool tryLock() SANITIZER_TRY_ACQUIRE(true, Mutex) {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
atomic_store_relaxed(&Precedence, static_cast<uptr>(
MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
return false;
}
inline void lock() SANITIZER_ACQUIRE(Mutex) {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
inline void unlock() SANITIZER_RELEASE(Mutex) { Mutex.Unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
StaticSpinMutex Mutex;
atomic_uintptr_t Precedence;
};
void initThread(bool MinimalInit);
// TSD model specific fastpath functions definitions.
#include "scudo_tsd_exclusive.inc"
#include "scudo_tsd_shared.inc"
} // namespace __scudo
#endif // SCUDO_TSD_H_
|