size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
-bool opt_prof_final = true;
+bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[
static bool prof_thread_active_init;
static malloc_mutex_t prof_thread_active_init_mtx;
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
uint64_t prof_interval = 0;
size_t lg_prof_sample;
1
#endif
];
-static unsigned prof_dump_buf_end;
+static size_t prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
JEMALLOC_INLINE_C int
prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
{
- uint64_t a_uid = a->tdata->thr_uid;
- uint64_t b_uid = b->tdata->thr_uid;
-
- return ((a_uid > b_uid) - (a_uid < b_uid));
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return (ret);
}
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
*/
tdata = prof_tdata_get(tsd, true);
if (tdata != NULL)
- prof_sample_threshold_update(tctx->tdata);
+ prof_sample_threshold_update(tdata);
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
}
void
-prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) {
- prof_tctx_set(ptr, tctx);
+prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
+{
+
+ prof_tctx_set(ptr, usize, tctx);
malloc_mutex_lock(tctx->tdata->lock);
tctx->cnts.curobjs++;
bt->len = 0;
}
-static inline void
-prof_enter(prof_tdata_t *tdata)
+JEMALLOC_INLINE_C void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
{
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
- assert(!tdata->enq);
- tdata->enq = true;
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
malloc_mutex_lock(&bt2gctx_mtx);
}
-static inline void
-prof_leave(prof_tdata_t *tdata)
+JEMALLOC_INLINE_C void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
{
- bool idump, gdump;
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
malloc_mutex_unlock(&bt2gctx_mtx);
- assert(tdata->enq);
- tdata->enq = false;
- idump = tdata->enq_idump;
- tdata->enq_idump = false;
- gdump = tdata->enq_gdump;
- tdata->enq_gdump = false;
+ if (tdata != NULL) {
+ bool idump, gdump;
+
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
- if (idump)
- prof_idump();
- if (gdump)
- prof_gdump();
+ if (idump)
+ prof_idump();
+ if (gdump)
+ prof_gdump();
+ }
}
#ifdef JEMALLOC_PROF_LIBUNWIND
/*
* Create a single allocation that has space for vec of length bt->len.
*/
- prof_gctx_t *gctx = (prof_gctx_t *)imalloc(tsd, offsetof(prof_gctx_t,
- vec) + (bt->len * sizeof(void *)));
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
+ size2index(size), false, tcache_get(tsd, true), true, NULL, true);
if (gctx == NULL)
return (NULL);
gctx->lock = prof_gctx_mutex_choose();
}
static void
-prof_gctx_try_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata)
{
cassert(config_prof);
* avoid a race between the main body of prof_tctx_destroy() and entry
* into this function.
*/
- prof_enter(tdata);
+ prof_enter(tsd, tdata_self);
malloc_mutex_lock(gctx->lock);
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
not_reached();
- prof_leave(tdata);
+ prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(gctx->lock);
- idalloc(tsd, gctx);
+ idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
*/
gctx->nlimbo--;
malloc_mutex_unlock(gctx->lock);
- prof_leave(tdata);
+ prof_leave(tsd, tdata_self);
}
}
{
prof_tdata_t *tdata = tctx->tdata;
prof_gctx_t *gctx = tctx->gctx;
- bool destroy_tdata, destroy_gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0);
malloc_mutex_unlock(tdata->lock);
malloc_mutex_lock(gctx->lock);
- tctx_tree_remove(&gctx->tctxs, tctx);
- if (prof_gctx_should_destroy(gctx)) {
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else
+ destroy_gctx = false;
+ break;
+ case prof_tctx_state_dumping:
/*
- * Increment gctx->nlimbo in order to keep another thread from
- * winning the race to destroy gctx while this one has
- * gctx->lock dropped. Without this, it would be possible for
- * another thread to:
- *
- * 1) Sample an allocation associated with gctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_gctx_try_destroy(gctx).
- *
- * The result would be that gctx no longer exists by the time
- * this thread accesses it in prof_gctx_try_destroy().
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
*/
- gctx->nlimbo++;
- destroy_gctx = true;
- } else
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
malloc_mutex_unlock(gctx->lock);
- if (destroy_gctx)
- prof_gctx_try_destroy(tsd, gctx, tdata);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, false);
- idalloc(tsd, tctx);
+ if (destroy_tctx)
+ idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
}
static bool
} btkey;
bool new_gctx;
- prof_enter(tdata);
+ prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
gctx.p = prof_gctx_create(tsd, bt);
if (gctx.v == NULL) {
- prof_leave(tdata);
+ prof_leave(tsd, tdata);
return (true);
}
btkey.p = &gctx.p->bt;
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
- prof_leave(tdata);
- idalloc(tsd, gctx.v);
+ prof_leave(tsd, tdata);
+ idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
+ true);
return (true);
}
new_gctx = true;
malloc_mutex_unlock(gctx.p->lock);
new_gctx = false;
}
- prof_leave(tdata);
+ prof_leave(tsd, tdata);
*p_btkey = btkey.v;
*p_gctx = gctx.p;
ret.p->prepared = true;
malloc_mutex_unlock(tdata->lock);
if (not_found) {
+ tcache_t *tcache;
void *btkey;
prof_gctx_t *gctx;
bool new_gctx, error;
return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */
- ret.v = imalloc(tsd, sizeof(prof_tctx_t));
+ tcache = tcache_get(tsd, true);
+ ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
+ size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
+ true);
if (ret.p == NULL) {
if (new_gctx)
- prof_gctx_try_destroy(tsd, gctx, tdata);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
return (NULL);
}
ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
ret.p->prepared = true;
ret.p->state = prof_tctx_state_initializing;
malloc_mutex_lock(tdata->lock);
malloc_mutex_unlock(tdata->lock);
if (error) {
if (new_gctx)
- prof_gctx_try_destroy(tsd, gctx, tdata);
- idalloc(tsd, ret.v);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ idalloctm(tsd, ret.v, tcache, true, true);
return (NULL);
}
malloc_mutex_lock(gctx->lock);
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
- prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
- UINT64_C(1442695040888963407));
+ r = prng_lg_range(&tdata->prng_state, 53);
u = (double)r * (1.0/9007199254740992.0L);
tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
if (tdata == NULL)
return (0);
- prof_enter(tdata);
+ malloc_mutex_lock(&bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
- prof_leave(tdata);
+ malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count);
}
static bool
prof_dump_write(bool propagate_err, const char *s)
{
- unsigned i, slen, n;
+ size_t i, slen, n;
cassert(config_prof);
return (false);
}
-JEMALLOC_ATTR(format(printf, 2, 3))
+JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
prof_dump_printf(bool propagate_err, const char *format, ...)
{
{
malloc_mutex_lock(tctx->gctx->lock);
- if (tctx->state == prof_tctx_state_initializing) {
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
malloc_mutex_unlock(tctx->gctx->lock);
return;
- }
- assert(tctx->state == prof_tctx_state_nominal);
- tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tctx->gctx->lock);
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tctx->gctx->lock);
- memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
- tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
- tdata->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
}
}
{
bool propagate_err = *(bool *)arg;
- if (prof_dump_printf(propagate_err,
- " t%"PRIu64": %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
- tctx->tdata->thr_uid, tctx->dump_cnts.curobjs,
- tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
- tctx->dump_cnts.accumbytes))
- return (tctx);
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+ "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+ tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+ tctx->dump_cnts.accumbytes))
+ return (tctx);
+ break;
+ default:
+ not_reached();
+ }
return (NULL);
}
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
- idalloc(tsd, to_destroy);
+ idalloctm(tsd, to_destroy,
+ tcache_get(tsd, false), true, true);
} else
next = NULL;
} while (next != NULL);
if (prof_gctx_should_destroy(gctx)) {
gctx->nlimbo++;
malloc_mutex_unlock(gctx->lock);
- prof_gctx_try_destroy(tsd, gctx, tdata);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
} else
malloc_mutex_unlock(gctx->lock);
}
return (NULL);
if (prof_dump_printf(propagate_err,
- " t%"PRIu64": %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]%s%s\n",
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
tdata->thr_uid, tdata->cnt_summed.curobjs,
tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
tdata->cnt_summed.accumbytes,
bool ret;
if (prof_dump_printf(propagate_err,
- "heap_v2/%"PRIu64"\n"
- " t*: %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
return (true);
goto label_return;
}
for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
(uintptr_t)bt->vec[i])) {
ret = true;
goto label_return;
if (prof_dump_printf(propagate_err,
"\n"
- " t*: %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
ret = true;
return (ret);
}
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...)
+{
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+ mfd = open(filename, O_RDONLY);
+
+ return (mfd);
+}
+#endif
+
+static int
+prof_getpid(void)
+{
+
+#ifdef _WIN32
+ return (GetCurrentProcessId());
+#else
+ return (getpid());
+#endif
+}
+
static bool
prof_dump_maps(bool propagate_err)
{
bool ret;
int mfd;
- char filename[PATH_MAX + 1];
cassert(config_prof);
#ifdef __FreeBSD__
- malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+ mfd = prof_open_maps("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
#else
- malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
- (int)getpid());
+ {
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1)
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
#endif
- mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
{
if (cnt_all->curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRIu64" byte%s, %"
- PRIu64" object%s, %zu context%s\n",
+ malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
+ FMTu64" object%s, %zu context%s\n",
cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
}
return (true);
malloc_mutex_lock(&prof_dump_mtx);
- prof_enter(tdata);
+ prof_enter(tsd, tdata);
/*
* Put gctx's in limbo and clear their counters in preparation for
leak_ngctx = 0;
gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
- prof_leave(tdata);
+ prof_leave(tsd, tdata);
/* Create dump file. */
if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c%"PRIu64".heap",
- opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c.heap",
- opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
+ "%s.%d.%"FMTu64".%c.heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
if (!prof_booted)
return;
tsd = tsd_fetch();
- if (opt_prof_final && opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, opt_prof_leak);
- }
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
}
void
{
tsd_t *tsd;
prof_tdata_t *tdata;
- char filename[PATH_MAX + 1];
cassert(config_prof);
}
if (opt_prof_prefix[0] != '\0') {
+ char filename[PATH_MAX + 1];
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
{
tsd_t *tsd;
prof_tdata_t *tdata;
- char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
}
if (opt_prof_prefix[0] != '\0') {
+ char filename[DUMP_FILENAME_BUFSIZE];
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
char *thread_name, bool active)
{
prof_tdata_t *tdata;
+ tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)imalloc(tsd, sizeof(prof_tdata_t));
+ tcache = tcache_get(tsd, true);
+ tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
+ size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
if (tdata == NULL)
return (NULL);
tdata->thread_name = thread_name;
tdata->attached = true;
tdata->expired = false;
+ tdata->tctx_uid_next = 0;
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
- idalloc(tsd, tdata);
+ idalloctm(tsd, tdata, tcache, true, true);
return (NULL);
}
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached)
{
+ tcache_t *tcache;
assert(prof_tdata_should_destroy(tdata, even_if_attached));
assert(tsd_prof_tdata_get(tsd) != tdata);
tdata_tree_remove(&tdatas, tdata);
+ tcache = tcache_get(tsd, false);
if (tdata->thread_name != NULL)
- idalloc(tsd, tdata->thread_name);
+ idalloctm(tsd, tdata->thread_name, tcache, true, true);
ckh_delete(tsd, &tdata->bt2tctx);
- idalloc(tsd, tdata);
+ idalloctm(tsd, tdata, tcache, true, true);
}
static void
if (size == 1)
return ("");
- ret = imalloc(tsd, size);
+ ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
+ true), true, NULL, true);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
return (EAGAIN);
if (tdata->thread_name != NULL) {
- idalloc(tsd, tdata->thread_name);
+ idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
+ true, true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
return (active_init_old);
}
+bool
+prof_gdump_get(void)
+{
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(&prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(&prof_gdump_mtx);
+ return (prof_gdump_current);
+}
+
+bool
+prof_gdump_set(bool gdump)
+{
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(&prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(&prof_gdump_mtx);
+ return (prof_gdump_old);
+}
+
void
prof_boot0(void)
{
if (malloc_mutex_init(&prof_active_mtx))
return (true);
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx))
+ return (true);
+
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);
if (malloc_mutex_init(&prof_dump_mtx))
return (true);
- if (atexit(prof_fdump) != 0) {
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();