1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
|
//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_DEFS_H
#define TSAN_DEFS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "ubsan/ubsan_platform.h"
#ifndef TSAN_VECTORIZE
# define TSAN_VECTORIZE __SSE4_2__
#endif
#if TSAN_VECTORIZE
// <emmintrin.h> transitively includes <stdlib.h>,
// and it's prohibited to include std headers into tsan runtime.
// So we do this dirty trick.
# define _MM_MALLOC_H_INCLUDED
# define __MM_MALLOC_H
# include <emmintrin.h>
# include <smmintrin.h>
# define VECTOR_ALIGNED ALIGNED(16)
typedef __m128i m128;
#else
# define VECTOR_ALIGNED
#endif
// Setup defaults for compile definitions.
#ifndef TSAN_NO_HISTORY
# define TSAN_NO_HISTORY 0
#endif
#ifndef TSAN_CONTAINS_UBSAN
# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
# else
# define TSAN_CONTAINS_UBSAN 0
# endif
#endif
namespace __tsan {
constexpr uptr kByteBits = 8;
// Thread slot ID.
enum class Sid : u8 {};
constexpr uptr kThreadSlotCount = 256;
constexpr Sid kFreeSid = static_cast<Sid>(255);
// Abstract time unit, vector clock element.
enum class Epoch : u16 {};
constexpr uptr kEpochBits = 14;
constexpr Epoch kEpochZero = static_cast<Epoch>(0);
constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
constexpr Epoch kEpochLast = static_cast<Epoch>((1 << kEpochBits) - 1);
inline Epoch EpochInc(Epoch epoch) {
return static_cast<Epoch>(static_cast<u16>(epoch) + 1);
}
inline bool EpochOverflow(Epoch epoch) { return epoch == kEpochOver; }
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
const uptr kShadowCnt = 4;
// That many user bytes are mapped onto a single shadow cell.
const uptr kShadowCell = 8;
// Single shadow value.
enum class RawShadow : u32 {};
const uptr kShadowSize = sizeof(RawShadow);
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
// That many user bytes are mapped onto a single meta shadow cell.
// Must be less or equal to minimal memory allocator alignment.
const uptr kMetaShadowCell = 8;
// Size of a single meta shadow value (u32).
const uptr kMetaShadowSize = 4;
// All addresses and PCs are assumed to be compressable to that many bits.
const uptr kCompressedAddrBits = 44;
#if TSAN_NO_HISTORY
const bool kCollectHistory = false;
#else
const bool kCollectHistory = true;
#endif
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// hard to debug crashes.
#if SANITIZER_DEBUG
void build_consistency_debug();
#else
void build_consistency_release();
#endif
static inline void USED build_consistency() {
#if SANITIZER_DEBUG
build_consistency_debug();
#else
build_consistency_release();
#endif
}
template<typename T>
T min(T a, T b) {
return a < b ? a : b;
}
template<typename T>
T max(T a, T b) {
return a > b ? a : b;
}
template<typename T>
T RoundUp(T p, u64 align) {
DCHECK_EQ(align & (align - 1), 0);
return (T)(((u64)p + align - 1) & ~(align - 1));
}
template<typename T>
T RoundDown(T p, u64 align) {
DCHECK_EQ(align & (align - 1), 0);
return (T)((u64)p & ~(align - 1));
}
// Zeroizes high part, returns 'bits' lsb bits.
template<typename T>
T GetLsb(T v, int bits) {
return (T)((u64)v & ((1ull << bits) - 1));
}
struct MD5Hash {
u64 hash[2];
bool operator==(const MD5Hash &other) const;
};
MD5Hash md5_hash(const void *data, uptr size);
struct Processor;
struct ThreadState;
class ThreadContext;
struct TidSlot;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
struct Trace;
struct TracePart;
typedef uptr AccessType;
enum : AccessType {
kAccessWrite = 0,
kAccessRead = 1 << 0,
kAccessAtomic = 1 << 1,
kAccessVptr = 1 << 2, // read or write of an object virtual table pointer
kAccessFree = 1 << 3, // synthetic memory access during memory freeing
kAccessExternalPC = 1 << 4, // access PC can have kExternalPCBit set
kAccessCheckOnly = 1 << 5, // check for races, but don't store
kAccessNoRodata = 1 << 6, // don't check for .rodata marker
kAccessSlotLocked = 1 << 7, // memory access with TidSlot locked
};
// Descriptor of user's memory block.
struct MBlock {
u64 siz : 48;
u64 tag : 16;
StackID stk;
Tid tid;
};
COMPILER_CHECK(sizeof(MBlock) == 16);
enum ExternalTag : uptr {
kExternalTagNone = 0,
kExternalTagSwiftModifyingAccess = 1,
kExternalTagFirstUserAvailable = 2,
kExternalTagMax = 1024,
// Don't set kExternalTagMax over 65,536, since MBlock only stores tags
// as 16-bit values, see tsan_defs.h.
};
enum {
MutexTypeReport = MutexLastCommon,
MutexTypeSyncVar,
MutexTypeAnnotations,
MutexTypeAtExit,
MutexTypeFired,
MutexTypeRacy,
MutexTypeGlobalProc,
MutexTypeInternalAlloc,
MutexTypeTrace,
MutexTypeSlot,
MutexTypeSlots,
};
} // namespace __tsan
#endif // TSAN_DEFS_H
|