diff options
Diffstat (limited to 'contrib/libs/jemalloc/src/prof.c')
-rw-r--r-- | contrib/libs/jemalloc/src/prof.c | 1504 |
1 files changed, 752 insertions, 752 deletions
diff --git a/contrib/libs/jemalloc/src/prof.c b/contrib/libs/jemalloc/src/prof.c index da834b54fb..ca8945f6db 100644 --- a/contrib/libs/jemalloc/src/prof.c +++ b/contrib/libs/jemalloc/src/prof.c @@ -9,44 +9,44 @@ #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/emitter.h" -/******************************************************************************/ - -#ifdef JEMALLOC_PROF_LIBUNWIND +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND #define UNW_LOCAL_ONLY #include <libunwind.h> -#endif - -#ifdef JEMALLOC_PROF_LIBGCC +#endif + +#ifdef JEMALLOC_PROF_LIBGCC /* * We have a circular dependency -- jemalloc_internal.h tells us if we should * use libgcc's unwinding functionality, but after we've included that, we've * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. */ #undef _Unwind_Backtrace -#include <unwind.h> +#include <unwind.h> #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) -#endif - -/******************************************************************************/ -/* Data. */ - -bool opt_prof = false; -bool opt_prof_active = true; +#endif + +/******************************************************************************/ +/* Data. */ + +bool opt_prof = false; +bool opt_prof_active = true; bool opt_prof_thread_active_init = true; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; bool opt_prof_final = false; -bool opt_prof_leak = false; -bool opt_prof_accum = false; +bool opt_prof_leak = false; +bool opt_prof_accum = false; bool opt_prof_log = false; -char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - +char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + /* * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). @@ -68,8 +68,8 @@ static malloc_mutex_t prof_thread_active_init_mtx; bool prof_gdump_val; static malloc_mutex_t prof_gdump_mtx; -uint64_t prof_interval = 0; - +uint64_t prof_interval = 0; + size_t lg_prof_sample; typedef enum prof_logging_state_e prof_logging_state_t; @@ -79,7 +79,7 @@ enum prof_logging_state_e { prof_logging_state_dumping }; -/* +/* * - stopped: log_start never called, or previous log_stop has completed. * - started: log_start called, log_stop not called yet. Allocations are logged. * - dumping: log_stop called but not finished; samples are not logged anymore. @@ -170,13 +170,13 @@ static malloc_mutex_t log_mtx; * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. * The primary motivation for this sharing though is that gctx's are ephemeral, - * and destroying mutexes causes complications for systems that allocate when - * creating/destroying mutexes. - */ + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ static malloc_mutex_t *gctx_locks; static atomic_u_t cum_gctxs; /* Atomic counter. */ - -/* + +/* * Table of mutexes that are shared among tdata's. No operations require * holding multiple tdata locks, so there is no problem with using them for more * than one tdata at the same time, even though a gctx lock may be acquired @@ -186,12 +186,12 @@ static malloc_mutex_t *tdata_locks; /* * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ + * structure that knows about all backtraces currently captured. + */ static ckh_t bt2gctx; /* Non static to enable profiling. */ malloc_mutex_t bt2gctx_mtx; - + /* * Tree of all extant prof_tdata_t structures, regardless of state, * {attached,detached,expired}. @@ -202,37 +202,37 @@ static malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; -static malloc_mutex_t prof_dump_seq_mtx; -static uint64_t prof_dump_seq; -static uint64_t prof_dump_iseq; -static uint64_t prof_dump_mseq; -static uint64_t prof_dump_useq; - -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static malloc_mutex_t prof_dump_mtx; -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; static size_t prof_dump_buf_end; -static int prof_dump_fd; - -/* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; - -/******************************************************************************/ +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +/******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ - + static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, @@ -309,12 +309,12 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, /******************************************************************************/ -void +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; - - cassert(config_prof); - + + cassert(config_prof); + if (updated) { /* * Compute a new sample threshold. This isn't very important in @@ -337,8 +337,8 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } -} - +} + void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { @@ -440,10 +440,10 @@ prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { } } -static void +static void prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - + prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); if (cons_tdata == NULL) { /* @@ -453,7 +453,7 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { */ return; } - + malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); if (prof_logging_state != prof_logging_state_started) { @@ -515,58 +515,58 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { label_done: malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); -} - +} + void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); - + assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - + prof_try_log(tsd, ptr, usize, tctx); - + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } -} - +} + void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); - + bt->vec = vec; bt->len = 0; } static void prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); + cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - + if (tdata != NULL) { assert(!tdata->enq); tdata->enq = true; } - + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); -} - +} + static void prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); + cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - + if (tdata != NULL) { bool idump, gdump; - + assert(tdata->enq); tdata->enq = false; idump = tdata->enq_idump; @@ -581,257 +581,257 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { prof_gdump(tsd_tsdn(tsd)); } } -} - -#ifdef JEMALLOC_PROF_LIBUNWIND -void +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void prof_backtrace(prof_bt_t *bt) { - unw_context_t uc; - unw_cursor_t cursor; - unsigned i; - int err; - - cassert(config_prof); - assert(bt->len == 0); - assert(bt->vec != NULL); - - unw_getcontext(&uc); - unw_init_local(&cursor, &uc); - - /* - * Iterate over stack frames until there are no more, or until no space - * remains in bt. - */ - for (i = 0; i < PROF_BT_MAX; i++) { - unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); - bt->len++; - err = unw_step(&cursor); - if (err <= 0) - break; - } -} -#elif (defined(JEMALLOC_PROF_LIBGCC)) -static _Unwind_Reason_Code + unw_context_t uc; + unw_cursor_t cursor; + unsigned i; + int err; + + cassert(config_prof); + assert(bt->len == 0); + assert(bt->vec != NULL); + + unw_getcontext(&uc); + unw_init_local(&cursor, &uc); + + /* + * Iterate over stack frames until there are no more, or until no space + * remains in bt. + */ + for (i = 0; i < PROF_BT_MAX; i++) { + unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); + bt->len++; + err = unw_step(&cursor); + if (err <= 0) + break; + } +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { - cassert(config_prof); - + cassert(config_prof); + return _URC_NO_REASON; -} - -static _Unwind_Reason_Code +} + +static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { - prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; - - cassert(config_prof); - + + cassert(config_prof); + ip = (void *)_Unwind_GetIP(context); if (ip == NULL) { return _URC_END_OF_STACK; - } + } data->bt->vec[data->bt->len] = ip; data->bt->len++; if (data->bt->len == data->max) { return _URC_END_OF_STACK; } - + return _URC_NO_REASON; -} - -void +} + +void prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; - - cassert(config_prof); - - _Unwind_Backtrace(prof_unwind_callback, &data); -} -#elif (defined(JEMALLOC_PROF_GCC)) -void + + cassert(config_prof); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +void prof_backtrace(prof_bt_t *bt) { #define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ - void *p; \ + void *p; \ if (__builtin_frame_address(i) == 0) { \ - return; \ + return; \ } \ - p = __builtin_return_address(i); \ + p = __builtin_return_address(i); \ if (p == NULL) { \ - return; \ - } \ + return; \ + } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ } else { \ return; \ } - - cassert(config_prof); - - BT_FRAME(0) - BT_FRAME(1) - BT_FRAME(2) - BT_FRAME(3) - BT_FRAME(4) - BT_FRAME(5) - BT_FRAME(6) - BT_FRAME(7) - BT_FRAME(8) - BT_FRAME(9) - - BT_FRAME(10) - BT_FRAME(11) - BT_FRAME(12) - BT_FRAME(13) - BT_FRAME(14) - BT_FRAME(15) - BT_FRAME(16) - BT_FRAME(17) - BT_FRAME(18) - BT_FRAME(19) - - BT_FRAME(20) - BT_FRAME(21) - BT_FRAME(22) - BT_FRAME(23) - BT_FRAME(24) - BT_FRAME(25) - BT_FRAME(26) - BT_FRAME(27) - BT_FRAME(28) - BT_FRAME(29) - - BT_FRAME(30) - BT_FRAME(31) - BT_FRAME(32) - BT_FRAME(33) - BT_FRAME(34) - BT_FRAME(35) - BT_FRAME(36) - BT_FRAME(37) - BT_FRAME(38) - BT_FRAME(39) - - BT_FRAME(40) - BT_FRAME(41) - BT_FRAME(42) - BT_FRAME(43) - BT_FRAME(44) - BT_FRAME(45) - BT_FRAME(46) - BT_FRAME(47) - BT_FRAME(48) - BT_FRAME(49) - - BT_FRAME(50) - BT_FRAME(51) - BT_FRAME(52) - BT_FRAME(53) - BT_FRAME(54) - BT_FRAME(55) - BT_FRAME(56) - BT_FRAME(57) - BT_FRAME(58) - BT_FRAME(59) - - BT_FRAME(60) - BT_FRAME(61) - BT_FRAME(62) - BT_FRAME(63) - BT_FRAME(64) - BT_FRAME(65) - BT_FRAME(66) - BT_FRAME(67) - BT_FRAME(68) - BT_FRAME(69) - - BT_FRAME(70) - BT_FRAME(71) - BT_FRAME(72) - BT_FRAME(73) - BT_FRAME(74) - BT_FRAME(75) - BT_FRAME(76) - BT_FRAME(77) - BT_FRAME(78) - BT_FRAME(79) - - BT_FRAME(80) - BT_FRAME(81) - BT_FRAME(82) - BT_FRAME(83) - BT_FRAME(84) - BT_FRAME(85) - BT_FRAME(86) - BT_FRAME(87) - BT_FRAME(88) - BT_FRAME(89) - - BT_FRAME(90) - BT_FRAME(91) - BT_FRAME(92) - BT_FRAME(93) - BT_FRAME(94) - BT_FRAME(95) - BT_FRAME(96) - BT_FRAME(97) - BT_FRAME(98) - BT_FRAME(99) - - BT_FRAME(100) - BT_FRAME(101) - BT_FRAME(102) - BT_FRAME(103) - BT_FRAME(104) - BT_FRAME(105) - BT_FRAME(106) - BT_FRAME(107) - BT_FRAME(108) - BT_FRAME(109) - - BT_FRAME(110) - BT_FRAME(111) - BT_FRAME(112) - BT_FRAME(113) - BT_FRAME(114) - BT_FRAME(115) - BT_FRAME(116) - BT_FRAME(117) - BT_FRAME(118) - BT_FRAME(119) - - BT_FRAME(120) - BT_FRAME(121) - BT_FRAME(122) - BT_FRAME(123) - BT_FRAME(124) - BT_FRAME(125) - BT_FRAME(126) - BT_FRAME(127) -#undef BT_FRAME -} -#else -void + + cassert(config_prof); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) +#undef BT_FRAME +} +#else +void prof_backtrace(prof_bt_t *bt) { - cassert(config_prof); - not_reached(); -} -#endif - -static malloc_mutex_t * + cassert(config_prof); + not_reached(); +} +#endif + +static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; -} - +} + static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } - + static prof_gctx_t * prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { - /* + /* * Create a single allocation that has space for vec of length bt->len. */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); @@ -843,9 +843,9 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { } gctx->lock = prof_gctx_mutex_choose(); /* - * Set nlimbo to 1, in order to avoid a race condition with + * Set nlimbo to 1, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ + */ gctx->nlimbo = 1; tctx_tree_new(&gctx->tctxs); /* Duplicate bt. */ @@ -853,43 +853,43 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; return gctx; -} - -static void +} + +static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, prof_tdata_t *tdata) { - cassert(config_prof); - - /* + cassert(config_prof); + + /* * Check that gctx is still unused by any thread cache before destroying * it. prof_lookup() increments gctx->nlimbo in order to avoid a race * condition with this function, as does prof_tctx_destroy() in order to * avoid a race between the main body of prof_tctx_destroy() and entry - * into this function. - */ + * into this function. + */ prof_enter(tsd, tdata_self); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { - not_reached(); + not_reached(); } prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); - } else { - /* + } else { + /* * Compensate for increment in prof_tctx_destroy() or - * prof_lookup(). - */ + * prof_lookup(). + */ gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); - } -} - + } +} + static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); @@ -920,14 +920,14 @@ prof_gctx_should_destroy(prof_gctx_t *gctx) { return true; } -static void +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; - + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - + assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); @@ -965,11 +965,11 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { } break; case prof_tctx_state_dumping: - /* + /* * A dumping thread needs tctx to remain valid until dumping * has finished. Change state such that the dumping thread will * complete destruction during a late dump iteration phase. - */ + */ tctx->state = prof_tctx_state_purgatory; destroy_tctx = false; destroy_gctx = false; @@ -994,29 +994,29 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { if (destroy_tctx) { idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); } -} - -static bool +} + +static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { - union { + union { prof_gctx_t *p; - void *v; + void *v; } gctx, tgctx; - union { - prof_bt_t *p; - void *v; - } btkey; + union { + prof_bt_t *p; + void *v; + } btkey; bool new_gctx; - + prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - /* bt has never been seen before. Insert it. */ + /* bt has never been seen before. Insert it. */ prof_leave(tsd, tdata); tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (tgctx.v == NULL) { return true; - } + } prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { gctx.p = tgctx.p; @@ -1031,17 +1031,17 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, new_gctx = true; } else { new_gctx = false; - } - } else { + } + } else { tgctx.v = NULL; new_gctx = false; } if (!new_gctx) { - /* - * Increment nlimbo, in order to avoid a race condition with + /* + * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ + */ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); @@ -1052,31 +1052,31 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, true); } - } + } prof_leave(tsd, tdata); - - *p_btkey = btkey.v; + + *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; return false; -} - +} + prof_tctx_t * prof_lookup(tsd_t *tsd, prof_bt_t *bt) { - union { + union { prof_tctx_t *p; - void *v; - } ret; + void *v; + } ret; prof_tdata_t *tdata; bool not_found; - - cassert(config_prof); - + + cassert(config_prof); + tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return NULL; } - + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) { /* Note double negative! */ @@ -1084,19 +1084,19 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - void *btkey; + void *btkey; prof_gctx_t *gctx; bool new_gctx, error; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) { return NULL; } - + /* Link a prof_tctx_t into gctx for this thread. */ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, @@ -1104,13 +1104,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { if (ret.p == NULL) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } + } return NULL; - } + } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; ret.p->thr_discrim = tdata->thr_discrim; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); ret.p->gctx = gctx; ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; @@ -1124,17 +1124,17 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { } idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); return NULL; - } + } malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - + } + return ret.p; -} - +} + /* * The bodies of this function and prof_leakcheck() are compiled out unless heap * profiling is enabled, so that it is possible to compile jemalloc with @@ -1191,7 +1191,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata) { #endif } -#ifdef JEMALLOC_JET +#ifdef JEMALLOC_JET static prof_tdata_t * prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { @@ -1202,7 +1202,7 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, return NULL; } -size_t +size_t prof_tdata_count(void) { size_t tdata_count = 0; tsdn_t *tsdn; @@ -1218,127 +1218,127 @@ prof_tdata_count(void) { size_t prof_bt_count(void) { - size_t bt_count; + size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; - + tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return 0; } - + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - + return bt_count; -} -#endif - -static int +} +#endif + +static int prof_dump_open_impl(bool propagate_err, const char *filename) { - int fd; - - fd = creat(filename, 0644); + int fd; + + fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { - malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", - filename); + malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", + filename); if (opt_abort) { - abort(); + abort(); } - } - + } + return fd; -} +} prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; - -static bool + +static bool prof_dump_flush(bool propagate_err) { - bool ret = false; - ssize_t err; - - cassert(config_prof); - + bool ret = false; + ssize_t err; + + cassert(config_prof); + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { + if (err == -1) { if (!propagate_err) { - malloc_write("<jemalloc>: write() failed during heap " - "profile flush\n"); + malloc_write("<jemalloc>: write() failed during heap " + "profile flush\n"); if (opt_abort) { - abort(); + abort(); } - } - ret = true; - } - prof_dump_buf_end = 0; - + } + ret = true; + } + prof_dump_buf_end = 0; + return ret; -} - -static bool +} + +static bool prof_dump_close(bool propagate_err) { - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + return ret; -} - -static bool +} + +static bool prof_dump_write(bool propagate_err, const char *s) { size_t i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { if (prof_dump_flush(propagate_err) && propagate_err) { return true; } } - + if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } assert(i == slen); - + return false; -} - +} + JEMALLOC_FORMAT_PRINTF(2, 3) -static bool +static bool prof_dump_printf(bool propagate_err, const char *format, ...) { - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + return ret; -} - -static void +} + +static void prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { @@ -1460,33 +1460,33 @@ label_return: static void prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { - cassert(config_prof); - + cassert(config_prof); + malloc_mutex_lock(tsdn, gctx->lock); - - /* + + /* * Increment nlimbo so that gctx won't go away before dump. * Additionally, link gctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ + * prof_dump()'s second pass. + */ gctx->nlimbo++; gctx_tree_insert(gctxs, gctx); - + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - + malloc_mutex_unlock(tsdn, gctx->lock); } - + struct prof_gctx_merge_iter_arg_s { tsdn_t *tsdn; size_t leak_ngctx; }; - + static prof_gctx_t * prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { struct prof_gctx_merge_iter_arg_s *arg = (struct prof_gctx_merge_iter_arg_s *)opaque; - + malloc_mutex_lock(arg->tsdn, gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn); @@ -1494,7 +1494,7 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { arg->leak_ngctx++; } malloc_mutex_unlock(arg->tsdn, gctx->lock); - + return NULL; } @@ -1532,7 +1532,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { next = NULL; } } while (next != NULL); - } + } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; @@ -1541,20 +1541,20 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { } else { malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - } + } } - + struct prof_tdata_merge_iter_arg_s { tsdn_t *tsdn; prof_cnt_t cnt_all; }; - + static prof_tdata_t * prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *opaque) { struct prof_tdata_merge_iter_arg_s *arg = (struct prof_tdata_merge_iter_arg_s *)opaque; - + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; @@ -1562,33 +1562,33 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tctx_t *p; void *v; } tctx; - + tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) { prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); } - + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } - } else { + } else { tdata->dumping = false; - } + } malloc_mutex_unlock(arg->tsdn, tdata->lock); - + return NULL; -} - +} + static prof_tdata_t * prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { bool propagate_err = *(bool *)arg; - + if (!tdata->dumping) { return NULL; } @@ -1603,13 +1603,13 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, return tdata; } return NULL; -} - +} + static bool prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; - + if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", @@ -1623,19 +1623,19 @@ prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, (void *)&propagate_err) != NULL); malloc_mutex_unlock(tsdn, &tdatas_mtx); return ret; -} +} prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; - -static bool + +static bool prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { - bool ret; - unsigned i; + bool ret; + unsigned i; struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; - - cassert(config_prof); + + cassert(config_prof); malloc_mutex_assert_owner(tsdn, gctx->lock); - + /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { @@ -1643,31 +1643,31 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.accumobjs == 0); assert(gctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - + ret = false; + goto label_return; + } + if (prof_dump_printf(propagate_err, "@")) { - ret = true; - goto label_return; - } - for (i = 0; i < bt->len; i++) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { if (prof_dump_printf(propagate_err, " %#"FMTxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + if (prof_dump_printf(propagate_err, "\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - + ret = true; + goto label_return; + } + prof_tctx_dump_iter_arg.tsdn = tsdn; prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, @@ -1676,11 +1676,11 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, goto label_return; } - ret = false; -label_return: + ret = false; +label_return: return ret; -} - +} + #ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int @@ -1715,17 +1715,17 @@ prof_getpid(void) { #endif } -static bool +static bool prof_dump_maps(bool propagate_err) { - bool ret; - int mfd; - - cassert(config_prof); -#ifdef __FreeBSD__ + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); #elif defined(_WIN32) mfd = -1; // Not implemented -#else +#else { int pid = prof_getpid(); @@ -1734,48 +1734,48 @@ prof_dump_maps(bool propagate_err) { mfd = prof_open_maps("/proc/%d/maps", pid); } } -#endif - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } nread = malloc_read_fd(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: if (mfd != -1) { - close(mfd); + close(mfd); } return ret; -} - +} + /* * See prof_sample_threshold_update() comment for why the body of this function * is conditionally compiled. */ -static void +static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { #ifdef JEMALLOC_PROF @@ -1794,18 +1794,18 @@ prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, * scale_factor); uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * scale_factor); - + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); - malloc_printf( + malloc_printf( "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", - filename); - } + filename); + } #endif -} - +} + struct prof_gctx_dump_iter_arg_s { tsdn_t *tsdn; bool propagate_err; @@ -1836,14 +1836,14 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, prof_gctx_tree_t *gctxs) { - size_t tabind; - union { + size_t tabind; + union { prof_gctx_t *p; - void *v; + void *v; } gctx; - + prof_enter(tsd, tdata); - + /* * Put gctx's in limbo and clear their counters in preparation for * summing. @@ -1852,7 +1852,7 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); } - + /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. @@ -1863,13 +1863,13 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)prof_tdata_merge_iter_arg); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - + /* Merge tctx stats into gctx's. */ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_merge_iter_arg->leak_ngctx = 0; gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, (void *)prof_gctx_merge_iter_arg); - + prof_leave(tsd, tdata); } @@ -1880,40 +1880,40 @@ prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, prof_gctx_tree_t *gctxs) { - /* Create dump file. */ + /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { return true; } - - /* Dump profile header. */ + + /* Dump profile header. */ if (prof_dump_header(tsd_tsdn(tsd), propagate_err, &prof_tdata_merge_iter_arg->cnt_all)) { - goto label_write_error; + goto label_write_error; } - + /* Dump per gctx profile stats. */ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_dump_iter_arg->propagate_err = propagate_err; if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, (void *)prof_gctx_dump_iter_arg) != NULL) { goto label_write_error; - } - - /* Dump /proc/<pid>/maps if possible. */ + } + + /* Dump /proc/<pid>/maps if possible. */ if (prof_dump_maps(propagate_err)) { - goto label_write_error; + goto label_write_error; } - + if (prof_dump_close(propagate_err)) { return true; } - + return false; -label_write_error: - prof_dump_close(propagate_err); +label_write_error: + prof_dump_close(propagate_err); return true; -} - +} + static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { @@ -2002,39 +2002,39 @@ prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void +static void prof_dump_filename(char *filename, char v, uint64_t vseq) { - cassert(config_prof); - - if (vseq != VSEQ_INVALID) { - /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + cassert(config_prof); + + if (vseq != VSEQ_INVALID) { + /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); - } else { - /* "<prefix>.<pid>.<seq>.<v>.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + } else { + /* "<prefix>.<pid>.<seq>.<v>.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void + } + prof_dump_seq++; +} + +static void prof_fdump(void) { tsd_t *tsd; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); - + if (!prof_booted) { - return; + return; } tsd = tsd_fetch(); assert(tsd_reentrancy_level_get(tsd) == 0); - + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); @@ -2049,133 +2049,133 @@ prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { return true; - } + } prof_accum->accumbytes = 0; #else atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); #endif return false; -} - -void +} + +void prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; - - cassert(config_prof); - + + cassert(config_prof); + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; + return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { - return; + return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { - return; - } + return; + } if (tdata->enq) { tdata->enq_idump = true; return; } - - if (opt_prof_prefix[0] != '\0') { + + if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'i', prof_dump_iseq); - prof_dump_iseq++; + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); - } -} - -bool + } +} + +bool prof_mdump(tsd_t *tsd, const char *filename) { - cassert(config_prof); + cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); - + if (!opt_prof || !prof_booted) { return true; } char filename_buf[DUMP_FILENAME_BUFSIZE]; - if (filename == NULL) { - /* No filename specified, so automatically generate one. */ + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') { return true; } malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename_buf, 'm', prof_dump_mseq); - prof_dump_mseq++; + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - filename = filename_buf; - } + filename = filename_buf; + } return prof_dump(tsd, true, filename, false); -} - -void +} + +void prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; - - cassert(config_prof); - + + cassert(config_prof); + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; + return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { - return; + return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { - return; - } + return; + } if (tdata->enq) { tdata->enq_gdump = true; return; } - - if (opt_prof_prefix[0] != '\0') { + + if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); - prof_dump_filename(filename, 'u', prof_dump_useq); - prof_dump_useq++; + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); - } -} - -static void + } +} + +static void prof_bt_hash(const void *key, size_t r_hash[2]) { - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -static bool + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +static bool prof_bt_keycomp(const void *k1, const void *k2) { - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + if (bt1->len != bt2->len) { return false; } - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + static void prof_bt_node_hash(const void *key, size_t r_hash[2]) { const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; prof_bt_hash((void *)(&bt_node->bt), r_hash); } - + static bool prof_bt_node_keycomp(const void *k1, const void *k2) { const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; @@ -2214,16 +2214,16 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) { return NULL; } - + tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; tdata->thr_discrim = thr_discrim; @@ -2236,8 +2236,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, prof_bt_keycomp)) { idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); return NULL; - } - + } + tdata->prng_state = (uint64_t)(uintptr_t)tdata; prof_sample_threshold_update(tdata); @@ -2265,26 +2265,26 @@ static bool prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) { return false; - } + } if (ckh_count(&tdata->bt2tctx) != 0) { return false; } return true; } - + static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); - + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } - + static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - + tdata_tree_remove(&tdatas, tdata); assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); @@ -2295,40 +2295,40 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, } ckh_delete(tsd, &tdata->bt2tctx); idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); -} - +} + static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } - + static void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, true); - /* + /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. - */ + */ if (!destroy_tdata) { tdata->attached = false; - } + } tsd_prof_tdata_set(tsd, NULL); } else { destroy_tdata = false; - } + } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); } -} - +} + prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; @@ -2367,10 +2367,10 @@ prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } -void +void prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; - + assert(lg_sample < (sizeof(uint64_t) << 3)); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); @@ -2930,57 +2930,57 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) { void prof_boot0(void) { - cassert(config_prof); - - memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, - sizeof(PROF_PREFIX_DEFAULT)); -} - -void + cassert(config_prof); + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void prof_boot1(void) { - cassert(config_prof); - - /* + cassert(config_prof); + + /* * opt_prof must be in its final state before any arenas are * initialized, so this function must be executed early. - */ - + */ + if (opt_prof_leak && !opt_prof) { - /* - * Enable opt_prof, but in such a way that profiles are never - * automatically dumped. - */ - opt_prof = true; - opt_prof_gdump = false; - } else if (opt_prof) { - if (opt_lg_prof_interval >= 0) { - prof_interval = (((uint64_t)1U) << - opt_lg_prof_interval); - } - } -} - -bool + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } + } +} + +bool prof_boot2(tsd_t *tsd) { - cassert(config_prof); - - if (opt_prof) { - unsigned i; - + cassert(config_prof); + + if (opt_prof) { + unsigned i; + lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { return true; - } - + } + prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { return true; } - + prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", @@ -3021,12 +3021,12 @@ prof_boot2(tsd_t *tsd) { if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { - malloc_write("<jemalloc>: Error in atexit()\n"); + malloc_write("<jemalloc>: Error in atexit()\n"); if (opt_abort) { - abort(); + abort(); } - } - + } + if (opt_prof_log) { prof_log_start(tsd_tsdn(tsd), NULL); } @@ -3062,14 +3062,14 @@ prof_boot2(tsd_t *tsd) { if (gctx_locks == NULL) { return true; } - for (i = 0; i < PROF_NCTX_LOCKS; i++) { + for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", WITNESS_RANK_PROF_GCTX, malloc_mutex_rank_exclusive)) { return true; } - } - + } + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); @@ -3083,24 +3083,24 @@ prof_boot2(tsd_t *tsd) { return true; } } -#ifdef JEMALLOC_PROF_LIBGCC +#ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal * state before enabling profiling. */ _Unwind_Backtrace(prof_unwind_init_callback, NULL); -#endif +#endif } - prof_booted = true; - + prof_booted = true; + return false; -} - -void +} + +void prof_prefork0(tsdn_t *tsdn) { if (config_prof && opt_prof) { - unsigned i; - + unsigned i; + malloc_mutex_prefork(tsdn, &prof_dump_mtx); malloc_mutex_prefork(tsdn, &bt2gctx_mtx); malloc_mutex_prefork(tsdn, &tdatas_mtx); @@ -3110,10 +3110,10 @@ prof_prefork0(tsdn_t *tsdn) { for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_prefork(tsdn, &gctx_locks[i]); } - } -} - -void + } +} + +void prof_prefork1(tsdn_t *tsdn) { if (config_prof && opt_prof) { malloc_mutex_prefork(tsdn, &prof_active_mtx); @@ -3123,12 +3123,12 @@ prof_prefork1(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } } - + void prof_postfork_parent(tsdn_t *tsdn) { if (config_prof && opt_prof) { - unsigned i; - + unsigned i; + malloc_mutex_postfork_parent(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); @@ -3144,14 +3144,14 @@ prof_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); - } -} - -void + } +} + +void prof_postfork_child(tsdn_t *tsdn) { if (config_prof && opt_prof) { - unsigned i; - + unsigned i; + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); @@ -3166,7 +3166,7 @@ prof_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &tdatas_mtx); malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); - } -} - -/******************************************************************************/ + } +} + +/******************************************************************************/ |