1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
|
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/seq.h"
typedef struct hooks_internal_s hooks_internal_t;
struct hooks_internal_s {
hooks_t hooks;
bool in_use;
};
seq_define(hooks_internal_t, hooks)
static atomic_u_t nhooks = ATOMIC_INIT(0);
static seq_hooks_t hooks[HOOK_MAX];
static malloc_mutex_t hooks_mu;
bool
hook_boot() {
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
malloc_mutex_rank_exclusive);
}
static void *
hook_install_locked(hooks_t *to_install) {
hooks_internal_t hooks_internal;
for (int i = 0; i < HOOK_MAX; i++) {
bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
/* We hold mu; no concurrent access. */
assert(success);
if (!hooks_internal.in_use) {
hooks_internal.hooks = *to_install;
hooks_internal.in_use = true;
seq_store_hooks(&hooks[i], &hooks_internal);
atomic_store_u(&nhooks,
atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
ATOMIC_RELAXED);
return &hooks[i];
}
}
return NULL;
}
void *
hook_install(tsdn_t *tsdn, hooks_t *to_install) {
malloc_mutex_lock(tsdn, &hooks_mu);
void *ret = hook_install_locked(to_install);
if (ret != NULL) {
tsd_global_slow_inc(tsdn);
}
malloc_mutex_unlock(tsdn, &hooks_mu);
return ret;
}
static void
hook_remove_locked(seq_hooks_t *to_remove) {
hooks_internal_t hooks_internal;
bool success = seq_try_load_hooks(&hooks_internal, to_remove);
/* We hold mu; no concurrent access. */
assert(success);
/* Should only remove hooks that were added. */
assert(hooks_internal.in_use);
hooks_internal.in_use = false;
seq_store_hooks(to_remove, &hooks_internal);
atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
ATOMIC_RELAXED);
}
void
hook_remove(tsdn_t *tsdn, void *opaque) {
if (config_debug) {
char *hooks_begin = (char *)&hooks[0];
char *hooks_end = (char *)&hooks[HOOK_MAX];
char *hook = (char *)opaque;
assert(hooks_begin <= hook && hook < hooks_end
&& (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
}
malloc_mutex_lock(tsdn, &hooks_mu);
hook_remove_locked((seq_hooks_t *)opaque);
tsd_global_slow_dec(tsdn);
malloc_mutex_unlock(tsdn, &hooks_mu);
}
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
for (int for_each_hook_counter = 0; \
for_each_hook_counter < HOOK_MAX; \
for_each_hook_counter++) { \
bool for_each_hook_success = seq_try_load_hooks( \
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
if (!for_each_hook_success) { \
continue; \
} \
if (!(hooks_internal_ptr)->in_use) { \
continue; \
}
#define FOR_EACH_HOOK_END \
}
static bool *
hook_reentrantp() {
/*
* We prevent user reentrancy within hooks. This is basically just a
* thread-local bool that triggers an early-exit.
*
* We don't fold in_hook into reentrancy. There are two reasons for
* this:
* - Right now, we turn on reentrancy during things like extent hook
* execution. Allocating during extent hooks is not officially
* supported, but we don't want to break it for the time being. These
* sorts of allocations should probably still be hooked, though.
* - If a hook allocates, we may want it to be relatively fast (after
* all, it executes on every allocator operation). Turning on
* reentrancy is a fairly heavyweight mode (disabling tcache,
* redirecting to arena 0, etc.). It's possible we may one day want
* to turn on reentrant mode here, if it proves too difficult to keep
* this working. But that's fairly easy for us to see; OTOH, people
* not using hooks because they're too slow is easy for us to miss.
*
* The tricky part is
* that this code might get invoked even if we don't have access to tsd.
* This function mimics getting a pointer to thread-local data, except
* that it might secretly return a pointer to some global data if we
* know that the caller will take the early-exit path.
* If we return a bool that indicates that we are reentrant, then the
* caller will go down the early exit path, leaving the global
* untouched.
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
bool *in_hook = tsdn_in_hookp_get(tsdn);
if (in_hook!= NULL) {
return in_hook;
}
return &in_hook_global;
}
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
*in_hook = true;
#define HOOK_EPILOGUE \
*in_hook = false;
void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, old_usize, new_usize,
result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
|