]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* |
3 | Red Black Trees | |
4 | (C) 1999 Andrea Arcangeli <andrea@suse.de> | |
5 | ||
1da177e4 LT |
6 | |
7 | linux/include/linux/rbtree.h | |
8 | ||
9 | To use rbtrees you'll have to implement your own insert and search cores. | |
10 | This will avoid us to use callbacks and to drop drammatically performances. | |
11 | I know it's not the cleaner way, but in C (not in C++) to get | |
12 | performances and genericity... | |
13 | ||
14bbe3e3 | 14 | See Documentation/core-api/rbtree.rst for documentation and samples. |
1da177e4 LT |
15 | */ |
16 | ||
17 | #ifndef _LINUX_RBTREE_H | |
18 | #define _LINUX_RBTREE_H | |
19 | ||
20 | #include <linux/kernel.h> | |
21 | #include <linux/stddef.h> | |
d72da4a4 | 22 | #include <linux/rcupdate.h> |
1da177e4 | 23 | |
bf7ad8ee ML |
24 | struct rb_node { |
25 | unsigned long __rb_parent_color; | |
1da177e4 LT |
26 | struct rb_node *rb_right; |
27 | struct rb_node *rb_left; | |
e977145a DW |
28 | } __attribute__((aligned(sizeof(long)))); |
29 | /* The alignment might seem pointless, but allegedly CRIS needs it */ | |
1da177e4 | 30 | |
bf7ad8ee | 31 | struct rb_root { |
1da177e4 LT |
32 | struct rb_node *rb_node; |
33 | }; | |
34 | ||
bf7ad8ee | 35 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
55a98102 | 36 | |
b945d6b2 | 37 | #define RB_ROOT (struct rb_root) { NULL, } |
1da177e4 LT |
38 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
39 | ||
a460bece | 40 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
4c199a93 | 41 | |
7647f14f | 42 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
bf7ad8ee ML |
43 | #define RB_EMPTY_NODE(node) \ |
44 | ((node)->__rb_parent_color == (unsigned long)(node)) | |
45 | #define RB_CLEAR_NODE(node) \ | |
46 | ((node)->__rb_parent_color = (unsigned long)(node)) | |
dd67d051 | 47 | |
88d19cf3 | 48 | |
1da177e4 LT |
49 | extern void rb_insert_color(struct rb_node *, struct rb_root *); |
50 | extern void rb_erase(struct rb_node *, struct rb_root *); | |
51 | ||
14b94af0 | 52 | |
1da177e4 | 53 | /* Find logical next and previous nodes in a tree */ |
f4b477c4 AB |
54 | extern struct rb_node *rb_next(const struct rb_node *); |
55 | extern struct rb_node *rb_prev(const struct rb_node *); | |
56 | extern struct rb_node *rb_first(const struct rb_root *); | |
57 | extern struct rb_node *rb_last(const struct rb_root *); | |
1da177e4 | 58 | |
9dee5c51 CS |
59 | /* Postorder iteration - always visit the parent after its children */ |
60 | extern struct rb_node *rb_first_postorder(const struct rb_root *); | |
61 | extern struct rb_node *rb_next_postorder(const struct rb_node *); | |
62 | ||
1da177e4 | 63 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
d72da4a4 | 64 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
1da177e4 | 65 | struct rb_root *root); |
c1adf200 DH |
66 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
67 | struct rb_root *root); | |
1da177e4 | 68 | |
d72da4a4 PZ |
69 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
70 | struct rb_node **rb_link) | |
1da177e4 | 71 | { |
bf7ad8ee | 72 | node->__rb_parent_color = (unsigned long)parent; |
1da177e4 LT |
73 | node->rb_left = node->rb_right = NULL; |
74 | ||
75 | *rb_link = node; | |
76 | } | |
77 | ||
d72da4a4 PZ |
78 | static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, |
79 | struct rb_node **rb_link) | |
80 | { | |
81 | node->__rb_parent_color = (unsigned long)parent; | |
82 | node->rb_left = node->rb_right = NULL; | |
83 | ||
84 | rcu_assign_pointer(*rb_link, node); | |
85 | } | |
86 | ||
1310a5a9 JK |
87 | #define rb_entry_safe(ptr, type, member) \ |
88 | ({ typeof(ptr) ____ptr = (ptr); \ | |
89 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ | |
90 | }) | |
91 | ||
2b529089 | 92 | /** |
8de1ee7e CS |
93 | * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of |
94 | * given type allowing the backing memory of @pos to be invalidated | |
2b529089 CS |
95 | * |
96 | * @pos: the 'type *' to use as a loop cursor. | |
97 | * @n: another 'type *' to use as temporary storage | |
98 | * @root: 'rb_root *' of the rbtree. | |
99 | * @field: the name of the rb_node field within 'type'. | |
8de1ee7e CS |
100 | * |
101 | * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as | |
102 | * list_for_each_entry_safe() and allows the iteration to continue independent | |
103 | * of changes to @pos by the body of the loop. | |
104 | * | |
105 | * Note, however, that it cannot handle other modifications that re-order the | |
106 | * rbtree it is iterating over. This includes calling rb_erase() on @pos, as | |
107 | * rb_erase() may rebalance the tree, causing us to miss some nodes. | |
2b529089 CS |
108 | */ |
109 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ | |
1310a5a9 JK |
110 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
111 | pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ | |
112 | typeof(*pos), field); 1; }); \ | |
113 | pos = n) | |
2b529089 | 114 | |
9f973cb3 ML |
115 | /* |
116 | * Leftmost-cached rbtrees. | |
117 | * | |
118 | * We do not cache the rightmost node based on footprint | |
119 | * size vs number of potential users that could benefit | |
120 | * from O(1) rb_last(). Just not worth it, users that want | |
121 | * this feature can always implement the logic explicitly. | |
122 | * Furthermore, users that want to cache both pointers may | |
123 | * find it a bit asymmetric, but that's ok. | |
124 | */ | |
125 | struct rb_root_cached { | |
126 | struct rb_root rb_root; | |
127 | struct rb_node *rb_leftmost; | |
128 | }; | |
129 | ||
130 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } | |
131 | ||
132 | /* Same as rb_first(), but O(1) */ | |
133 | #define rb_first_cached(root) (root)->rb_leftmost | |
134 | ||
135 | static inline void rb_insert_color_cached(struct rb_node *node, | |
136 | struct rb_root_cached *root, | |
137 | bool leftmost) | |
138 | { | |
139 | if (leftmost) | |
140 | root->rb_leftmost = node; | |
141 | rb_insert_color(node, &root->rb_root); | |
142 | } | |
143 | ||
8ecca394 PZ |
144 | |
145 | static inline struct rb_node * | |
146 | rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) | |
9f973cb3 | 147 | { |
8ecca394 PZ |
148 | struct rb_node *leftmost = NULL; |
149 | ||
9f973cb3 | 150 | if (root->rb_leftmost == node) |
8ecca394 PZ |
151 | leftmost = root->rb_leftmost = rb_next(node); |
152 | ||
9f973cb3 | 153 | rb_erase(node, &root->rb_root); |
8ecca394 PZ |
154 | |
155 | return leftmost; | |
9f973cb3 ML |
156 | } |
157 | ||
158 | static inline void rb_replace_node_cached(struct rb_node *victim, | |
159 | struct rb_node *new, | |
160 | struct rb_root_cached *root) | |
161 | { | |
162 | if (root->rb_leftmost == victim) | |
163 | root->rb_leftmost = new; | |
164 | rb_replace_node(victim, new, &root->rb_root); | |
165 | } | |
166 | ||
2d24dd57 PZ |
167 | /* |
168 | * The below helper functions use 2 operators with 3 different | |
169 | * calling conventions. The operators are related like: | |
170 | * | |
171 | * comp(a->key,b) < 0 := less(a,b) | |
172 | * comp(a->key,b) > 0 := less(b,a) | |
173 | * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) | |
174 | * | |
175 | * If these operators define a partial order on the elements we make no | |
176 | * guarantee on which of the elements matching the key is found. See | |
177 | * rb_find(). | |
178 | * | |
179 | * The reason for this is to allow the find() interface without requiring an | |
180 | * on-stack dummy object, which might not be feasible due to object size. | |
181 | */ | |
182 | ||
183 | /** | |
184 | * rb_add_cached() - insert @node into the leftmost cached tree @tree | |
185 | * @node: node to insert | |
186 | * @tree: leftmost cached tree to insert @node into | |
187 | * @less: operator defining the (partial) node order | |
8ecca394 PZ |
188 | * |
189 | * Returns @node when it is the new leftmost, or NULL. | |
2d24dd57 | 190 | */ |
8ecca394 | 191 | static __always_inline struct rb_node * |
2d24dd57 PZ |
192 | rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, |
193 | bool (*less)(struct rb_node *, const struct rb_node *)) | |
194 | { | |
195 | struct rb_node **link = &tree->rb_root.rb_node; | |
196 | struct rb_node *parent = NULL; | |
197 | bool leftmost = true; | |
198 | ||
199 | while (*link) { | |
200 | parent = *link; | |
201 | if (less(node, parent)) { | |
202 | link = &parent->rb_left; | |
203 | } else { | |
204 | link = &parent->rb_right; | |
205 | leftmost = false; | |
206 | } | |
207 | } | |
208 | ||
209 | rb_link_node(node, parent, link); | |
210 | rb_insert_color_cached(node, tree, leftmost); | |
8ecca394 PZ |
211 | |
212 | return leftmost ? node : NULL; | |
2d24dd57 PZ |
213 | } |
214 | ||
215 | /** | |
216 | * rb_add() - insert @node into @tree | |
217 | * @node: node to insert | |
218 | * @tree: tree to insert @node into | |
219 | * @less: operator defining the (partial) node order | |
220 | */ | |
221 | static __always_inline void | |
222 | rb_add(struct rb_node *node, struct rb_root *tree, | |
223 | bool (*less)(struct rb_node *, const struct rb_node *)) | |
224 | { | |
225 | struct rb_node **link = &tree->rb_node; | |
226 | struct rb_node *parent = NULL; | |
227 | ||
228 | while (*link) { | |
229 | parent = *link; | |
230 | if (less(node, parent)) | |
231 | link = &parent->rb_left; | |
232 | else | |
233 | link = &parent->rb_right; | |
234 | } | |
235 | ||
236 | rb_link_node(node, parent, link); | |
237 | rb_insert_color(node, tree); | |
238 | } | |
239 | ||
240 | /** | |
241 | * rb_find_add() - find equivalent @node in @tree, or add @node | |
242 | * @node: node to look-for / insert | |
243 | * @tree: tree to search / modify | |
244 | * @cmp: operator defining the node order | |
245 | * | |
246 | * Returns the rb_node matching @node, or NULL when no match is found and @node | |
247 | * is inserted. | |
248 | */ | |
249 | static __always_inline struct rb_node * | |
250 | rb_find_add(struct rb_node *node, struct rb_root *tree, | |
251 | int (*cmp)(struct rb_node *, const struct rb_node *)) | |
252 | { | |
253 | struct rb_node **link = &tree->rb_node; | |
254 | struct rb_node *parent = NULL; | |
255 | int c; | |
256 | ||
257 | while (*link) { | |
258 | parent = *link; | |
259 | c = cmp(node, parent); | |
260 | ||
261 | if (c < 0) | |
262 | link = &parent->rb_left; | |
263 | else if (c > 0) | |
264 | link = &parent->rb_right; | |
265 | else | |
266 | return parent; | |
267 | } | |
268 | ||
269 | rb_link_node(node, parent, link); | |
270 | rb_insert_color(node, tree); | |
271 | return NULL; | |
272 | } | |
273 | ||
274 | /** | |
275 | * rb_find() - find @key in tree @tree | |
276 | * @key: key to match | |
277 | * @tree: tree to search | |
278 | * @cmp: operator defining the node order | |
279 | * | |
280 | * Returns the rb_node matching @key or NULL. | |
281 | */ | |
282 | static __always_inline struct rb_node * | |
283 | rb_find(const void *key, const struct rb_root *tree, | |
284 | int (*cmp)(const void *key, const struct rb_node *)) | |
285 | { | |
286 | struct rb_node *node = tree->rb_node; | |
287 | ||
288 | while (node) { | |
289 | int c = cmp(key, node); | |
290 | ||
291 | if (c < 0) | |
292 | node = node->rb_left; | |
293 | else if (c > 0) | |
294 | node = node->rb_right; | |
295 | else | |
296 | return node; | |
297 | } | |
298 | ||
299 | return NULL; | |
300 | } | |
301 | ||
302 | /** | |
303 | * rb_find_first() - find the first @key in @tree | |
304 | * @key: key to match | |
305 | * @tree: tree to search | |
306 | * @cmp: operator defining node order | |
307 | * | |
308 | * Returns the leftmost node matching @key, or NULL. | |
309 | */ | |
310 | static __always_inline struct rb_node * | |
311 | rb_find_first(const void *key, const struct rb_root *tree, | |
312 | int (*cmp)(const void *key, const struct rb_node *)) | |
313 | { | |
314 | struct rb_node *node = tree->rb_node; | |
315 | struct rb_node *match = NULL; | |
316 | ||
317 | while (node) { | |
318 | int c = cmp(key, node); | |
319 | ||
320 | if (c <= 0) { | |
321 | if (!c) | |
322 | match = node; | |
323 | node = node->rb_left; | |
324 | } else if (c > 0) { | |
325 | node = node->rb_right; | |
326 | } | |
327 | } | |
328 | ||
329 | return match; | |
330 | } | |
331 | ||
332 | /** | |
333 | * rb_next_match() - find the next @key in @tree | |
334 | * @key: key to match | |
335 | * @tree: tree to search | |
336 | * @cmp: operator defining node order | |
337 | * | |
338 | * Returns the next node matching @key, or NULL. | |
339 | */ | |
340 | static __always_inline struct rb_node * | |
341 | rb_next_match(const void *key, struct rb_node *node, | |
342 | int (*cmp)(const void *key, const struct rb_node *)) | |
343 | { | |
344 | node = rb_next(node); | |
345 | if (node && cmp(key, node)) | |
346 | node = NULL; | |
347 | return node; | |
348 | } | |
349 | ||
350 | /** | |
351 | * rb_for_each() - iterates a subtree matching @key | |
352 | * @node: iterator | |
353 | * @key: key to match | |
354 | * @tree: tree to search | |
355 | * @cmp: operator defining node order | |
356 | */ | |
357 | #define rb_for_each(node, key, tree, cmp) \ | |
358 | for ((node) = rb_find_first((key), (tree), (cmp)); \ | |
359 | (node); (node) = rb_next_match((key), (node), (cmp))) | |
360 | ||
1da177e4 | 361 | #endif /* _LINUX_RBTREE_H */ |