]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/include/jemalloc/internal/stats.h
New upstream version 1.22.1+dfsg1
[rustc.git] / src / jemalloc / include / jemalloc / internal / stats.h
CommitLineData
970d7e83
LB
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4typedef struct tcache_bin_stats_s tcache_bin_stats_t;
5typedef struct malloc_bin_stats_s malloc_bin_stats_t;
6typedef struct malloc_large_stats_s malloc_large_stats_t;
54a0048b 7typedef struct malloc_huge_stats_s malloc_huge_stats_t;
970d7e83
LB
8typedef struct arena_stats_s arena_stats_t;
9typedef struct chunk_stats_s chunk_stats_t;
10
11#endif /* JEMALLOC_H_TYPES */
12/******************************************************************************/
13#ifdef JEMALLOC_H_STRUCTS
14
15struct tcache_bin_stats_s {
16 /*
17 * Number of allocation requests that corresponded to the size of this
18 * bin.
19 */
20 uint64_t nrequests;
21};
22
23struct malloc_bin_stats_s {
970d7e83
LB
24 /*
25 * Total number of allocation/deallocation requests served directly by
26 * the bin. Note that tcache may allocate an object, then recycle it
27 * many times, resulting many increments to nrequests, but only one
28 * each to nmalloc and ndalloc.
29 */
30 uint64_t nmalloc;
31 uint64_t ndalloc;
32
33 /*
34 * Number of allocation requests that correspond to the size of this
35 * bin. This includes requests served by tcache, though tcache only
36 * periodically merges into this counter.
37 */
38 uint64_t nrequests;
39
54a0048b
SL
40 /*
41 * Current number of regions of this size class, including regions
42 * currently cached by tcache.
43 */
44 size_t curregs;
45
970d7e83
LB
46 /* Number of tcache fills from this bin. */
47 uint64_t nfills;
48
49 /* Number of tcache flushes to this bin. */
50 uint64_t nflushes;
51
52 /* Total number of runs created for this bin's size class. */
53 uint64_t nruns;
54
55 /*
56 * Total number of runs reused by extracting them from the runs tree for
57 * this bin's size class.
58 */
59 uint64_t reruns;
60
61 /* Current number of runs in this bin. */
62 size_t curruns;
63};
64
65struct malloc_large_stats_s {
66 /*
67 * Total number of allocation/deallocation requests served directly by
68 * the arena. Note that tcache may allocate an object, then recycle it
69 * many times, resulting many increments to nrequests, but only one
70 * each to nmalloc and ndalloc.
71 */
72 uint64_t nmalloc;
73 uint64_t ndalloc;
74
75 /*
76 * Number of allocation requests that correspond to this size class.
77 * This includes requests served by tcache, though tcache only
78 * periodically merges into this counter.
79 */
80 uint64_t nrequests;
81
54a0048b
SL
82 /*
83 * Current number of runs of this size class, including runs currently
84 * cached by tcache.
85 */
970d7e83
LB
86 size_t curruns;
87};
88
54a0048b
SL
89struct malloc_huge_stats_s {
90 /*
91 * Total number of allocation/deallocation requests served directly by
92 * the arena.
93 */
94 uint64_t nmalloc;
95 uint64_t ndalloc;
96
97 /* Current number of (multi-)chunk allocations of this size class. */
98 size_t curhchunks;
99};
100
970d7e83
LB
101struct arena_stats_s {
102 /* Number of bytes currently mapped. */
103 size_t mapped;
104
3b2f2976
XL
105 /*
106 * Number of bytes currently retained as a side effect of munmap() being
107 * disabled/bypassed. Retained bytes are technically mapped (though
108 * always decommitted or purged), but they are excluded from the mapped
109 * statistic (above).
110 */
111 size_t retained;
112
970d7e83
LB
113 /*
114 * Total number of purge sweeps, total number of madvise calls made,
115 * and total pages purged in order to keep dirty unused memory under
116 * control.
117 */
118 uint64_t npurge;
119 uint64_t nmadvise;
120 uint64_t purged;
121
54a0048b
SL
122 /*
123 * Number of bytes currently mapped purely for metadata purposes, and
124 * number of bytes currently allocated for internal metadata.
125 */
126 size_t metadata_mapped;
127 size_t metadata_allocated; /* Protected via atomic_*_z(). */
128
970d7e83
LB
129 /* Per-size-category statistics. */
130 size_t allocated_large;
131 uint64_t nmalloc_large;
132 uint64_t ndalloc_large;
133 uint64_t nrequests_large;
134
1a4d82fc
JJ
135 size_t allocated_huge;
136 uint64_t nmalloc_huge;
137 uint64_t ndalloc_huge;
1a4d82fc 138
54a0048b 139 /* One element for each large size class. */
970d7e83 140 malloc_large_stats_t *lstats;
970d7e83 141
54a0048b
SL
142 /* One element for each huge size class. */
143 malloc_huge_stats_t *hstats;
970d7e83
LB
144};
145
146#endif /* JEMALLOC_H_STRUCTS */
147/******************************************************************************/
148#ifdef JEMALLOC_H_EXTERNS
149
150extern bool opt_stats_print;
151
152extern size_t stats_cactive;
153
154void stats_print(void (*write)(void *, const char *), void *cbopaque,
155 const char *opts);
156
157#endif /* JEMALLOC_H_EXTERNS */
158/******************************************************************************/
159#ifdef JEMALLOC_H_INLINES
160
161#ifndef JEMALLOC_ENABLE_INLINE
162size_t stats_cactive_get(void);
163void stats_cactive_add(size_t size);
164void stats_cactive_sub(size_t size);
165#endif
166
167#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
168JEMALLOC_INLINE size_t
169stats_cactive_get(void)
170{
171
172 return (atomic_read_z(&stats_cactive));
173}
174
175JEMALLOC_INLINE void
176stats_cactive_add(size_t size)
177{
54a0048b
SL
178
179 assert(size > 0);
180 assert((size & chunksize_mask) == 0);
970d7e83 181
3b2f2976 182 atomic_add_z(&stats_cactive, size);
970d7e83
LB
183}
184
185JEMALLOC_INLINE void
186stats_cactive_sub(size_t size)
187{
54a0048b
SL
188
189 assert(size > 0);
190 assert((size & chunksize_mask) == 0);
970d7e83 191
3b2f2976 192 atomic_sub_z(&stats_cactive, size);
970d7e83
LB
193}
194#endif
195
196#endif /* JEMALLOC_H_INLINES */
197/******************************************************************************/