1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
|
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/* Definitions of the following structs in malloc/malloc.h might be too old
* for the built binary to run on newer versions of OSX. So use the newest
* possible version of those structs.
*/
typedef struct _malloc_zone_t {
void *reserved1;
void *reserved2;
size_t (*size)(struct _malloc_zone_t *, const void *);
void *(*malloc)(struct _malloc_zone_t *, size_t);
void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
void *(*valloc)(struct _malloc_zone_t *, size_t);
void (*free)(struct _malloc_zone_t *, void *);
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
void (*destroy)(struct _malloc_zone_t *);
const char *zone_name;
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
struct malloc_introspection_t *introspect;
unsigned version;
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
} malloc_zone_t;
typedef struct {
vm_address_t address;
vm_size_t size;
} vm_range_t;
typedef struct malloc_statistics_t {
unsigned blocks_in_use;
size_t size_in_use;
size_t max_size_in_use;
size_t size_allocated;
} malloc_statistics_t;
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
typedef struct malloc_introspection_t {
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
size_t (*good_size)(malloc_zone_t *, size_t);
boolean_t (*check)(malloc_zone_t *);
void (*print)(malloc_zone_t *, boolean_t);
void (*log)(malloc_zone_t *, void *);
void (*force_lock)(malloc_zone_t *);
void (*force_unlock)(malloc_zone_t *);
void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
boolean_t (*zone_locked)(malloc_zone_t *);
boolean_t (*enable_discharge_checking)(malloc_zone_t *);
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
void (*discharge)(malloc_zone_t *, void *);
#ifdef __BLOCKS__
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
#else
void *enumerate_unavailable_without_blocks;
#endif
void (*reinit_lock)(malloc_zone_t *);
} malloc_introspection_t;
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
extern malloc_zone_t *malloc_default_zone(void);
extern void malloc_zone_register(malloc_zone_t *zone);
extern void malloc_zone_unregister(malloc_zone_t *zone);
/*
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t *default_zone, *purgeable_zone;
static malloc_zone_t jemalloc_zone;
static struct malloc_introspection_t jemalloc_zone_introspect;
static pid_t zone_force_lock_pid = -1;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
static void zone_destroy(malloc_zone_t *zone);
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
void **results, unsigned num_requested);
static void zone_batch_free(struct _malloc_zone_t *zone,
void **to_be_freed, unsigned num_to_be_freed);
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder);
static boolean_t zone_check(malloc_zone_t *zone);
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static void zone_statistics(malloc_zone_t *zone,
malloc_statistics_t *stats);
static boolean_t zone_locked(malloc_zone_t *zone);
static void zone_reinit_lock(malloc_zone_t *zone);
/******************************************************************************/
/*
* Functions.
*/
static size_t
zone_size(malloc_zone_t *zone, const void *ptr) {
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
* we knew that all pointers were owned by *some* zone, we could split
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
* reside within a mapped extent before determining size.
*/
return ivsalloc(tsdn_fetch(), ptr);
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size) {
return je_malloc(size);
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
return je_calloc(num, size);
}
static void *
zone_valloc(malloc_zone_t *zone, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
return ret;
}
static void
zone_free(malloc_zone_t *zone, void *ptr) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
}
free(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
return je_realloc(ptr, size);
}
return realloc(ptr, size);
}
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
return ret;
}
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
size_t alloc_size;
alloc_size = ivsalloc(tsdn_fetch(), ptr);
if (alloc_size != 0) {
assert(alloc_size == size);
je_free(ptr);
return;
}
free(ptr);
}
static void
zone_destroy(malloc_zone_t *zone) {
/* This function should never be called. */
not_reached();
}
static unsigned
zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
unsigned num_requested) {
unsigned i;
for (i = 0; i < num_requested; i++) {
results[i] = je_malloc(size);
if (!results[i])
break;
}
return i;
}
static void
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
unsigned num_to_be_freed) {
unsigned i;
for (i = 0; i < num_to_be_freed; i++) {
zone_free(zone, to_be_freed[i]);
to_be_freed[i] = NULL;
}
}
static size_t
zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
return 0;
}
static size_t
zone_good_size(malloc_zone_t *zone, size_t size) {
if (size == 0) {
size = 1;
}
return sz_s2u(size);
}
static kern_return_t
zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder) {
return KERN_SUCCESS;
}
static boolean_t
zone_check(malloc_zone_t *zone) {
return true;
}
static void
zone_print(malloc_zone_t *zone, boolean_t verbose) {
}
static void
zone_log(malloc_zone_t *zone, void *address) {
}
static void
zone_force_lock(malloc_zone_t *zone) {
if (isthreaded) {
/*
* See the note in zone_force_unlock, below, to see why we need
* this.
*/
assert(zone_force_lock_pid == -1);
zone_force_lock_pid = getpid();
jemalloc_prefork();
}
}
static void
zone_force_unlock(malloc_zone_t *zone) {
/*
* zone_force_lock and zone_force_unlock are the entry points to the
* forking machinery on OS X. The tricky thing is, the child is not
* allowed to unlock mutexes locked in the parent, even if owned by the
* forking thread (and the mutex type we use in OS X will fail an assert
* if we try). In the child, we can get away with reinitializing all
* the mutexes, which has the effect of unlocking them. In the parent,
* doing this would mean we wouldn't wake any waiters blocked on the
* mutexes we unlock. So, we record the pid of the current thread in
* zone_force_lock, and use that to detect if we're in the parent or
* child here, to decide which unlock logic we need.
*/
if (isthreaded) {
assert(zone_force_lock_pid != -1);
if (getpid() == zone_force_lock_pid) {
jemalloc_postfork_parent();
} else {
jemalloc_postfork_child();
}
zone_force_lock_pid = -1;
}
}
static void
zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
/* We make no effort to actually fill the values */
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0;
stats->size_allocated = 0;
}
static boolean_t
zone_locked(malloc_zone_t *zone) {
/* Pretend no lock is being held */
return false;
}
static void
zone_reinit_lock(malloc_zone_t *zone) {
/* As of OSX 10.12, this function is only used when force_unlock would
* be used if the zone version were < 9. So just use force_unlock. */
zone_force_unlock(zone);
}
static void
zone_init(void) {
jemalloc_zone.size = zone_size;
jemalloc_zone.malloc = zone_malloc;
jemalloc_zone.calloc = zone_calloc;
jemalloc_zone.valloc = zone_valloc;
jemalloc_zone.free = zone_free;
jemalloc_zone.realloc = zone_realloc;
jemalloc_zone.destroy = zone_destroy;
jemalloc_zone.zone_name = "jemalloc_zone";
jemalloc_zone.batch_malloc = zone_batch_malloc;
jemalloc_zone.batch_free = zone_batch_free;
jemalloc_zone.introspect = &jemalloc_zone_introspect;
jemalloc_zone.version = 9;
jemalloc_zone.memalign = zone_memalign;
jemalloc_zone.free_definite_size = zone_free_definite_size;
jemalloc_zone.pressure_relief = zone_pressure_relief;
jemalloc_zone_introspect.enumerator = zone_enumerator;
jemalloc_zone_introspect.good_size = zone_good_size;
jemalloc_zone_introspect.check = zone_check;
jemalloc_zone_introspect.print = zone_print;
jemalloc_zone_introspect.log = zone_log;
jemalloc_zone_introspect.force_lock = zone_force_lock;
jemalloc_zone_introspect.force_unlock = zone_force_unlock;
jemalloc_zone_introspect.statistics = zone_statistics;
jemalloc_zone_introspect.zone_locked = zone_locked;
jemalloc_zone_introspect.enable_discharge_checking = NULL;
jemalloc_zone_introspect.disable_discharge_checking = NULL;
jemalloc_zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
#else
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
}
static malloc_zone_t *
zone_default_get(void) {
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
/*
* On OSX 10.12, malloc_default_zone returns a special zone that is not
* present in the list of registered zones. That zone uses a "lite zone"
* if one is present (apparently enabled when malloc stack logging is
* enabled), or the first registered zone otherwise. In practice this
* means unless malloc stack logging is enabled, the first registered
* zone is the default. So get the list of zones to get the first one,
* instead of relying on malloc_default_zone.
*/
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
(vm_address_t**)&zones, &num_zones)) {
/*
* Reset the value in case the failure happened after it was
* set.
*/
num_zones = 0;
}
if (num_zones) {
return zones[0];
}
return malloc_default_zone();
}
/* As written, this function can only promote jemalloc_zone. */
static void
zone_promote(void) {
malloc_zone_t *zone;
do {
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
* at the location of the specified zone. Unregistering the
* default zone thus makes the last registered one the default.
* On OSX < 10.6, unregistering shifts all registered zones.
* The first registered zone then becomes the default.
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
* owns the default zone allocated pointers. We thus
* unregister/re-register it in order to ensure it's always
* after the default zone. On OSX < 10.6, there is no purgeable
* zone, so this does nothing. On OSX >= 10.6, unregistering
* replaces the purgeable zone with the last registered zone
* above, i.e. the default zone. Registering it again then puts
* it at the end, obviously after the default zone.
*/
if (purgeable_zone != NULL) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
zone = zone_default_get();
} while (zone != &jemalloc_zone);
}
JEMALLOC_ATTR(constructor)
void
zone_register(void) {
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
default_zone = zone_default_get();
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
"DefaultMallocZone") != 0) {
return;
}
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone() is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
zone_init();
malloc_zone_register(&jemalloc_zone);
/* Promote the custom zone to be default. */
zone_promote();
}
|