]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/include/jemalloc/internal/rtree.h
Imported Upstream version 1.7.0+dfsg1
[rustc.git] / src / jemalloc / include / jemalloc / internal / rtree.h
CommitLineData
970d7e83
LB
1/*
2 * This radix tree implementation is tailored to the singular purpose of
9cc50fc6 3 * associating metadata with chunks that are currently owned by jemalloc.
970d7e83
LB
4 *
5 *******************************************************************************
6 */
7#ifdef JEMALLOC_H_TYPES
8
9cc50fc6
SL
9typedef struct rtree_node_elm_s rtree_node_elm_t;
10typedef struct rtree_level_s rtree_level_t;
970d7e83
LB
11typedef struct rtree_s rtree_t;
12
13/*
9cc50fc6
SL
14 * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
15 * machine address width.
970d7e83 16 */
9cc50fc6
SL
17#define LG_RTREE_BITS_PER_LEVEL 4
18#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
19#define RTREE_HEIGHT_MAX \
20 ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
1a4d82fc 21
9cc50fc6
SL
22/* Used for two-stage lock-free node initialization. */
23#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
24
25/*
26 * The node allocation callback function's argument is the number of contiguous
27 * rtree_node_elm_t structures to allocate, and the resulting memory must be
28 * zeroed.
29 */
30typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
31typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
970d7e83
LB
32
33#endif /* JEMALLOC_H_TYPES */
34/******************************************************************************/
35#ifdef JEMALLOC_H_STRUCTS
36
9cc50fc6
SL
37struct rtree_node_elm_s {
38 union {
39 void *pun;
40 rtree_node_elm_t *child;
41 extent_node_t *val;
42 };
43};
44
45struct rtree_level_s {
46 /*
47 * A non-NULL subtree points to a subtree rooted along the hypothetical
48 * path to the leaf node corresponding to key 0. Depending on what keys
49 * have been used to store to the tree, an arbitrary combination of
50 * subtree pointers may remain NULL.
51 *
52 * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
53 * This results in a 3-level tree, and the leftmost leaf can be directly
54 * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
55 * 0x00000000) can be accessed via subtrees[1], and the remainder of the
56 * tree can be accessed via subtrees[0].
57 *
58 * levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
59 *
60 * levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
61 *
62 * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
63 *
64 * This has practical implications on x64, which currently uses only the
65 * lower 47 bits of virtual address space in userland, thus leaving
66 * subtrees[0] unused and avoiding a level of tree traversal.
67 */
68 union {
69 void *subtree_pun;
70 rtree_node_elm_t *subtree;
71 };
72 /* Number of key bits distinguished by this level. */
73 unsigned bits;
74 /*
75 * Cumulative number of key bits distinguished by traversing to
76 * corresponding tree level.
77 */
78 unsigned cumbits;
79};
80
970d7e83 81struct rtree_s {
9cc50fc6
SL
82 rtree_node_alloc_t *alloc;
83 rtree_node_dalloc_t *dalloc;
84 unsigned height;
85 /*
86 * Precomputed table used to convert from the number of leading 0 key
87 * bits to which subtree level to start at.
88 */
89 unsigned start_level[RTREE_HEIGHT_MAX];
90 rtree_level_t levels[RTREE_HEIGHT_MAX];
970d7e83
LB
91};
92
93#endif /* JEMALLOC_H_STRUCTS */
94/******************************************************************************/
95#ifdef JEMALLOC_H_EXTERNS
96
9cc50fc6
SL
97bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
98 rtree_node_dalloc_t *dalloc);
1a4d82fc 99void rtree_delete(rtree_t *rtree);
9cc50fc6
SL
100rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
101 unsigned level);
102rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
103 rtree_node_elm_t *elm, unsigned level);
970d7e83
LB
104
105#endif /* JEMALLOC_H_EXTERNS */
106/******************************************************************************/
107#ifdef JEMALLOC_H_INLINES
108
109#ifndef JEMALLOC_ENABLE_INLINE
9cc50fc6
SL
110unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
111uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
112
113bool rtree_node_valid(rtree_node_elm_t *node);
114rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
115rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
116 unsigned level);
117extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
118 bool dependent);
119void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
120 const extent_node_t *val);
121rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
122rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
123
124extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
125bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
970d7e83
LB
126#endif
127
128#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
9cc50fc6
SL
129JEMALLOC_INLINE unsigned
130rtree_start_level(rtree_t *rtree, uintptr_t key)
131{
132 unsigned start_level;
133
134 if (unlikely(key == 0))
135 return (rtree->height - 1);
136
137 start_level = rtree->start_level[lg_floor(key) >>
138 LG_RTREE_BITS_PER_LEVEL];
139 assert(start_level < rtree->height);
140 return (start_level);
970d7e83
LB
141}
142
9cc50fc6
SL
143JEMALLOC_INLINE uintptr_t
144rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
145{
970d7e83 146
9cc50fc6
SL
147 return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
148 rtree->levels[level].cumbits)) & ((ZU(1) <<
149 rtree->levels[level].bits) - 1));
150}
970d7e83
LB
151
152JEMALLOC_INLINE bool
9cc50fc6
SL
153rtree_node_valid(rtree_node_elm_t *node)
154{
155
156 return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
157}
158
159JEMALLOC_INLINE rtree_node_elm_t *
160rtree_child_tryread(rtree_node_elm_t *elm)
161{
162 rtree_node_elm_t *child;
163
164 /* Double-checked read (first read may be stale. */
165 child = elm->child;
166 if (!rtree_node_valid(child))
167 child = atomic_read_p(&elm->pun);
168 return (child);
169}
170
171JEMALLOC_INLINE rtree_node_elm_t *
172rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
173{
174 rtree_node_elm_t *child;
175
176 child = rtree_child_tryread(elm);
177 if (unlikely(!rtree_node_valid(child)))
178 child = rtree_child_read_hard(rtree, elm, level);
179 return (child);
180}
181
182JEMALLOC_INLINE extent_node_t *
183rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
184{
185
186 if (dependent) {
187 /*
188 * Reading a val on behalf of a pointer to a valid allocation is
189 * guaranteed to be a clean read even without synchronization,
190 * because the rtree update became visible in memory before the
191 * pointer came into existence.
192 */
193 return (elm->val);
194 } else {
195 /*
196 * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
197 * dependent on a previous rtree write, which means a stale read
198 * could result if synchronization were omitted here.
199 */
200 return (atomic_read_p(&elm->pun));
201 }
202}
203
204JEMALLOC_INLINE void
205rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
206{
207
208 atomic_write_p(&elm->pun, val);
209}
210
211JEMALLOC_INLINE rtree_node_elm_t *
212rtree_subtree_tryread(rtree_t *rtree, unsigned level)
213{
214 rtree_node_elm_t *subtree;
215
216 /* Double-checked read (first read may be stale. */
217 subtree = rtree->levels[level].subtree;
218 if (!rtree_node_valid(subtree))
219 subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
220 return (subtree);
221}
222
223JEMALLOC_INLINE rtree_node_elm_t *
224rtree_subtree_read(rtree_t *rtree, unsigned level)
225{
226 rtree_node_elm_t *subtree;
227
228 subtree = rtree_subtree_tryread(rtree, level);
229 if (unlikely(!rtree_node_valid(subtree)))
230 subtree = rtree_subtree_read_hard(rtree, level);
231 return (subtree);
232}
233
234JEMALLOC_INLINE extent_node_t *
235rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
970d7e83
LB
236{
237 uintptr_t subkey;
9cc50fc6
SL
238 unsigned i, start_level;
239 rtree_node_elm_t *node, *child;
240
241 start_level = rtree_start_level(rtree, key);
242
243 for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
244 /**/; i++, node = child) {
245 if (!dependent && unlikely(!rtree_node_valid(node)))
246 return (NULL);
247 subkey = rtree_subkey(rtree, key, i);
248 if (i == rtree->height - 1) {
249 /*
250 * node is a leaf, so it contains values rather than
251 * child pointers.
252 */
253 return (rtree_val_read(rtree, &node[subkey],
254 dependent));
970d7e83 255 }
9cc50fc6
SL
256 assert(i < rtree->height - 1);
257 child = rtree_child_tryread(&node[subkey]);
970d7e83 258 }
9cc50fc6
SL
259 not_reached();
260}
970d7e83 261
9cc50fc6
SL
262JEMALLOC_INLINE bool
263rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
264{
265 uintptr_t subkey;
266 unsigned i, start_level;
267 rtree_node_elm_t *node, *child;
970d7e83 268
9cc50fc6
SL
269 start_level = rtree_start_level(rtree, key);
270
271 node = rtree_subtree_read(rtree, start_level);
272 if (node == NULL)
273 return (true);
274 for (i = start_level; /**/; i++, node = child) {
275 subkey = rtree_subkey(rtree, key, i);
276 if (i == rtree->height - 1) {
277 /*
278 * node is a leaf, so it contains values rather than
279 * child pointers.
280 */
281 rtree_val_write(rtree, &node[subkey], val);
282 return (false);
283 }
284 assert(i + 1 < rtree->height);
285 child = rtree_child_read(rtree, &node[subkey], i);
286 if (child == NULL)
287 return (true);
288 }
289 not_reached();
970d7e83
LB
290}
291#endif
292
293#endif /* JEMALLOC_H_INLINES */
294/******************************************************************************/