]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
7cf9c2c7 | 4 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 5 | * Copyright (C) 2012 Konstantin Khlebnikov |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation; either version 2, or (at | |
10 | * your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
20 | */ | |
21 | #ifndef _LINUX_RADIX_TREE_H | |
22 | #define _LINUX_RADIX_TREE_H | |
23 | ||
f67c07f0 | 24 | #include <linux/bitops.h> |
7cf9c2c7 | 25 | #include <linux/kernel.h> |
15f2e88d MW |
26 | #include <linux/list.h> |
27 | #include <linux/preempt.h> | |
7cf9c2c7 | 28 | #include <linux/rcupdate.h> |
15f2e88d MW |
29 | #include <linux/spinlock.h> |
30 | #include <linux/types.h> | |
7cf9c2c7 NP |
31 | |
32 | /* | |
3bcadd6f MW |
33 | * The bottom two bits of the slot determine how the remaining bits in the |
34 | * slot are interpreted: | |
7cf9c2c7 | 35 | * |
3bcadd6f MW |
36 | * 00 - data pointer |
37 | * 01 - internal entry | |
38 | * 10 - exceptional entry | |
a23216a2 | 39 | * 11 - this bit combination is currently unused/reserved |
3bcadd6f MW |
40 | * |
41 | * The internal entry may be a pointer to the next level in the tree, a | |
42 | * sibling entry, or an indicator that the entry in this slot has been moved | |
43 | * to another location in the tree and the lookup should be restarted. While | |
44 | * NULL fits the 'data pointer' pattern, it means that there is no entry in | |
45 | * the tree for this index (no matter what level of the tree it is found at). | |
46 | * This means that you cannot store NULL in the tree as a value for the index. | |
7cf9c2c7 | 47 | */ |
3bcadd6f MW |
48 | #define RADIX_TREE_ENTRY_MASK 3UL |
49 | #define RADIX_TREE_INTERNAL_NODE 1UL | |
30ff46cc | 50 | |
6328650b | 51 | /* |
3bcadd6f MW |
52 | * Most users of the radix tree store pointers but shmem/tmpfs stores swap |
53 | * entries in the same tree. They are marked as exceptional entries to | |
54 | * distinguish them from pointers to struct page. | |
6328650b HD |
55 | * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. |
56 | */ | |
57 | #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 | |
58 | #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 | |
7cf9c2c7 | 59 | |
3bcadd6f | 60 | static inline bool radix_tree_is_internal_node(void *ptr) |
7cf9c2c7 | 61 | { |
3bcadd6f MW |
62 | return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == |
63 | RADIX_TREE_INTERNAL_NODE; | |
7cf9c2c7 NP |
64 | } |
65 | ||
66 | /*** radix-tree API starts here ***/ | |
1da177e4 | 67 | |
f446daae | 68 | #define RADIX_TREE_MAX_TAGS 3 |
612d6c19 | 69 | |
97d778b2 | 70 | #ifndef RADIX_TREE_MAP_SHIFT |
139e5616 | 71 | #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) |
139e5616 JW |
72 | #endif |
73 | ||
74 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) | |
75 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | |
76 | ||
77 | #define RADIX_TREE_TAG_LONGS \ | |
78 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) | |
79 | ||
449dd698 JW |
80 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) |
81 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | |
82 | RADIX_TREE_MAP_SHIFT)) | |
83 | ||
e157b555 MW |
84 | /* |
85 | * @count is the count of every non-NULL element in the ->slots array | |
86 | * whether that is an exceptional entry, a retry entry, a user pointer, | |
87 | * a sibling entry or a pointer to the next level of the tree. | |
88 | * @exceptional is the count of every element in ->slots which is | |
89 | * either radix_tree_exceptional_entry() or is a sibling entry for an | |
90 | * exceptional entry. | |
91 | */ | |
139e5616 | 92 | struct radix_tree_node { |
f7942430 JW |
93 | unsigned char shift; /* Bits remaining in each slot */ |
94 | unsigned char offset; /* Slot offset in parent */ | |
14b46879 | 95 | unsigned char count; /* Total entry count */ |
f7942430 | 96 | unsigned char exceptional; /* Exceptional entry count */ |
91d9c05a | 97 | struct radix_tree_node *parent; /* Used when ascending tree */ |
d58275bc | 98 | struct radix_tree_root *root; /* The tree we belong to */ |
139e5616 | 99 | union { |
91d9c05a MW |
100 | struct list_head private_list; /* For tree user */ |
101 | struct rcu_head rcu_head; /* Used when freeing node */ | |
139e5616 JW |
102 | }; |
103 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; | |
104 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | |
105 | }; | |
106 | ||
0a835c4f MW |
107 | /* The top bits of gfp_mask are used to store the root tags and the IDR flag */ |
108 | #define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) | |
109 | #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) | |
110 | ||
1da177e4 | 111 | struct radix_tree_root { |
fd4f2df2 | 112 | gfp_t gfp_mask; |
a1115570 | 113 | struct radix_tree_node __rcu *rnode; |
1da177e4 LT |
114 | }; |
115 | ||
116 | #define RADIX_TREE_INIT(mask) { \ | |
1da177e4 LT |
117 | .gfp_mask = (mask), \ |
118 | .rnode = NULL, \ | |
119 | } | |
120 | ||
121 | #define RADIX_TREE(name, mask) \ | |
122 | struct radix_tree_root name = RADIX_TREE_INIT(mask) | |
123 | ||
124 | #define INIT_RADIX_TREE(root, mask) \ | |
125 | do { \ | |
1da177e4 LT |
126 | (root)->gfp_mask = (mask); \ |
127 | (root)->rnode = NULL; \ | |
128 | } while (0) | |
129 | ||
35534c86 | 130 | static inline bool radix_tree_empty(const struct radix_tree_root *root) |
e9256efc MW |
131 | { |
132 | return root->rnode == NULL; | |
133 | } | |
134 | ||
268f42de MW |
135 | /** |
136 | * struct radix_tree_iter - radix tree iterator state | |
137 | * | |
138 | * @index: index of current slot | |
139 | * @next_index: one beyond the last index for this chunk | |
140 | * @tags: bit-mask for tag-iterating | |
141 | * @node: node that contains current slot | |
142 | * @shift: shift for the node that holds our slots | |
143 | * | |
144 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | |
145 | * subinterval of slots contained within one radix tree leaf node. It is | |
146 | * described by a pointer to its first slot and a struct radix_tree_iter | |
147 | * which holds the chunk's position in the tree and its size. For tagged | |
148 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | |
149 | * radix tree tag. | |
150 | */ | |
151 | struct radix_tree_iter { | |
152 | unsigned long index; | |
153 | unsigned long next_index; | |
154 | unsigned long tags; | |
155 | struct radix_tree_node *node; | |
156 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
157 | unsigned int shift; | |
158 | #endif | |
159 | }; | |
160 | ||
161 | static inline unsigned int iter_shift(const struct radix_tree_iter *iter) | |
162 | { | |
163 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
164 | return iter->shift; | |
165 | #else | |
166 | return 0; | |
167 | #endif | |
168 | } | |
169 | ||
7cf9c2c7 NP |
170 | /** |
171 | * Radix-tree synchronization | |
172 | * | |
173 | * The radix-tree API requires that users provide all synchronisation (with | |
174 | * specific exceptions, noted below). | |
175 | * | |
176 | * Synchronization of access to the data items being stored in the tree, and | |
177 | * management of their lifetimes must be completely managed by API users. | |
178 | * | |
179 | * For API usage, in general, | |
59c51591 | 180 | * - any function _modifying_ the tree or tags (inserting or deleting |
eb8dc5e7 | 181 | * items, setting or clearing tags) must exclude other modifications, and |
7cf9c2c7 | 182 | * exclude any functions reading the tree. |
59c51591 | 183 | * - any function _reading_ the tree or tags (looking up items or tags, |
7cf9c2c7 NP |
184 | * gang lookups) must exclude modifications to the tree, but may occur |
185 | * concurrently with other readers. | |
186 | * | |
187 | * The notable exceptions to this rule are the following functions: | |
139e5616 | 188 | * __radix_tree_lookup |
7cf9c2c7 | 189 | * radix_tree_lookup |
47feff2c | 190 | * radix_tree_lookup_slot |
7cf9c2c7 NP |
191 | * radix_tree_tag_get |
192 | * radix_tree_gang_lookup | |
47feff2c | 193 | * radix_tree_gang_lookup_slot |
7cf9c2c7 | 194 | * radix_tree_gang_lookup_tag |
47feff2c | 195 | * radix_tree_gang_lookup_tag_slot |
7cf9c2c7 NP |
196 | * radix_tree_tagged |
197 | * | |
243c2137 | 198 | * The first 8 functions are able to be called locklessly, using RCU. The |
7cf9c2c7 NP |
199 | * caller must ensure calls to these functions are made within rcu_read_lock() |
200 | * regions. Other readers (lock-free or otherwise) and modifications may be | |
201 | * running concurrently. | |
202 | * | |
203 | * It is still required that the caller manage the synchronization and lifetimes | |
204 | * of the items. So if RCU lock-free lookups are used, typically this would mean | |
205 | * that the items have their own locks, or are amenable to lock-free access; and | |
206 | * that the items are freed by RCU (or only freed after having been deleted from | |
207 | * the radix tree *and* a synchronize_rcu() grace period). | |
208 | * | |
209 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control | |
210 | * access to data items when inserting into or looking up from the radix tree) | |
211 | * | |
ce82653d DH |
212 | * Note that the value returned by radix_tree_tag_get() may not be relied upon |
213 | * if only the RCU read lock is held. Functions to set/clear tags and to | |
214 | * delete nodes running concurrently with it may affect its result such that | |
215 | * two consecutive reads in the same locked section may return different | |
216 | * values. If reliability is required, modification functions must also be | |
217 | * excluded from concurrency. | |
218 | * | |
7cf9c2c7 NP |
219 | * radix_tree_tagged is able to be called without locking or RCU. |
220 | */ | |
221 | ||
222 | /** | |
d7b62727 MW |
223 | * radix_tree_deref_slot - dereference a slot |
224 | * @slot: slot pointer, returned by radix_tree_lookup_slot | |
7cf9c2c7 NP |
225 | * |
226 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read | |
27d20fdd NP |
227 | * locked across slot lookup and dereference. Not required if write lock is |
228 | * held (ie. items cannot be concurrently inserted). | |
229 | * | |
230 | * radix_tree_deref_retry must be used to confirm validity of the pointer if | |
231 | * only the read lock is held. | |
d7b62727 MW |
232 | * |
233 | * Return: entry stored in that slot. | |
7cf9c2c7 | 234 | */ |
d7b62727 | 235 | static inline void *radix_tree_deref_slot(void __rcu **slot) |
7cf9c2c7 | 236 | { |
d7b62727 | 237 | return rcu_dereference(*slot); |
7cf9c2c7 | 238 | } |
27d20fdd | 239 | |
29c1f677 | 240 | /** |
d7b62727 MW |
241 | * radix_tree_deref_slot_protected - dereference a slot with tree lock held |
242 | * @slot: slot pointer, returned by radix_tree_lookup_slot | |
243 | * | |
244 | * Similar to radix_tree_deref_slot. The caller does not hold the RCU read | |
245 | * lock but it must hold the tree lock to prevent parallel updates. | |
29c1f677 | 246 | * |
d7b62727 | 247 | * Return: entry stored in that slot. |
29c1f677 | 248 | */ |
d7b62727 | 249 | static inline void *radix_tree_deref_slot_protected(void __rcu **slot, |
29c1f677 MG |
250 | spinlock_t *treelock) |
251 | { | |
d7b62727 | 252 | return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); |
29c1f677 MG |
253 | } |
254 | ||
27d20fdd NP |
255 | /** |
256 | * radix_tree_deref_retry - check radix_tree_deref_slot | |
257 | * @arg: pointer returned by radix_tree_deref_slot | |
258 | * Returns: 0 if retry is not required, otherwise retry is required | |
259 | * | |
260 | * radix_tree_deref_retry must be used with radix_tree_deref_slot. | |
261 | */ | |
262 | static inline int radix_tree_deref_retry(void *arg) | |
263 | { | |
b194d16c | 264 | return unlikely(radix_tree_is_internal_node(arg)); |
27d20fdd NP |
265 | } |
266 | ||
6328650b HD |
267 | /** |
268 | * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? | |
269 | * @arg: value returned by radix_tree_deref_slot | |
270 | * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. | |
271 | */ | |
272 | static inline int radix_tree_exceptional_entry(void *arg) | |
273 | { | |
274 | /* Not unlikely because radix_tree_exception often tested first */ | |
275 | return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; | |
276 | } | |
277 | ||
278 | /** | |
279 | * radix_tree_exception - radix_tree_deref_slot returned either exception? | |
280 | * @arg: value returned by radix_tree_deref_slot | |
281 | * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. | |
282 | */ | |
283 | static inline int radix_tree_exception(void *arg) | |
284 | { | |
3bcadd6f | 285 | return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); |
6328650b HD |
286 | } |
287 | ||
d7b62727 | 288 | int __radix_tree_create(struct radix_tree_root *, unsigned long index, |
e6145236 | 289 | unsigned order, struct radix_tree_node **nodep, |
d7b62727 | 290 | void __rcu ***slotp); |
e6145236 MW |
291 | int __radix_tree_insert(struct radix_tree_root *, unsigned long index, |
292 | unsigned order, void *); | |
293 | static inline int radix_tree_insert(struct radix_tree_root *root, | |
294 | unsigned long index, void *entry) | |
295 | { | |
296 | return __radix_tree_insert(root, index, 0, entry); | |
297 | } | |
35534c86 | 298 | void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, |
d7b62727 | 299 | struct radix_tree_node **nodep, void __rcu ***slotp); |
35534c86 | 300 | void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); |
d7b62727 MW |
301 | void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, |
302 | unsigned long index); | |
c7df8ad2 | 303 | typedef void (*radix_tree_update_node_t)(struct radix_tree_node *); |
d7b62727 MW |
304 | void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, |
305 | void __rcu **slot, void *entry, | |
c7df8ad2 | 306 | radix_tree_update_node_t update_node); |
e157b555 | 307 | void radix_tree_iter_replace(struct radix_tree_root *, |
d7b62727 MW |
308 | const struct radix_tree_iter *, void __rcu **slot, void *entry); |
309 | void radix_tree_replace_slot(struct radix_tree_root *, | |
310 | void __rcu **slot, void *entry); | |
311 | void __radix_tree_delete_node(struct radix_tree_root *, | |
312 | struct radix_tree_node *, | |
c7df8ad2 | 313 | radix_tree_update_node_t update_node); |
0ac398ef | 314 | void radix_tree_iter_delete(struct radix_tree_root *, |
d7b62727 | 315 | struct radix_tree_iter *iter, void __rcu **slot); |
53c59f26 | 316 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); |
1da177e4 | 317 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); |
d7b62727 MW |
318 | void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, |
319 | void __rcu **slot); | |
35534c86 | 320 | unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, |
d604c324 MW |
321 | void **results, unsigned long first_index, |
322 | unsigned int max_items); | |
35534c86 | 323 | unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, |
d7b62727 | 324 | void __rcu ***results, unsigned long *indices, |
47feff2c | 325 | unsigned long first_index, unsigned int max_items); |
dd0fc66f | 326 | int radix_tree_preload(gfp_t gfp_mask); |
5e4c0d97 | 327 | int radix_tree_maybe_preload(gfp_t gfp_mask); |
c78c66d1 | 328 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); |
1da177e4 | 329 | void radix_tree_init(void); |
d7b62727 | 330 | void *radix_tree_tag_set(struct radix_tree_root *, |
daff89f3 | 331 | unsigned long index, unsigned int tag); |
d7b62727 | 332 | void *radix_tree_tag_clear(struct radix_tree_root *, |
daff89f3 | 333 | unsigned long index, unsigned int tag); |
35534c86 | 334 | int radix_tree_tag_get(const struct radix_tree_root *, |
daff89f3 | 335 | unsigned long index, unsigned int tag); |
30b888ba MW |
336 | void radix_tree_iter_tag_set(struct radix_tree_root *, |
337 | const struct radix_tree_iter *iter, unsigned int tag); | |
338 | void radix_tree_iter_tag_clear(struct radix_tree_root *, | |
268f42de | 339 | const struct radix_tree_iter *iter, unsigned int tag); |
d7b62727 MW |
340 | unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, |
341 | void **results, unsigned long first_index, | |
342 | unsigned int max_items, unsigned int tag); | |
343 | unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, | |
344 | void __rcu ***results, unsigned long first_index, | |
345 | unsigned int max_items, unsigned int tag); | |
346 | int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); | |
1da177e4 LT |
347 | |
348 | static inline void radix_tree_preload_end(void) | |
349 | { | |
350 | preempt_enable(); | |
351 | } | |
352 | ||
2791653a | 353 | int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); |
e157b555 MW |
354 | int radix_tree_split(struct radix_tree_root *, unsigned long index, |
355 | unsigned new_order); | |
175542f5 MW |
356 | int radix_tree_join(struct radix_tree_root *, unsigned long index, |
357 | unsigned new_order, void *); | |
388f79fd CM |
358 | |
359 | void __rcu **idr_get_free_cmn(struct radix_tree_root *root, | |
360 | struct radix_tree_iter *iter, gfp_t gfp, | |
361 | unsigned long max); | |
362 | static inline void __rcu **idr_get_free(struct radix_tree_root *root, | |
363 | struct radix_tree_iter *iter, | |
364 | gfp_t gfp, | |
365 | int end) | |
366 | { | |
367 | return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX); | |
368 | } | |
369 | ||
370 | static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root, | |
371 | struct radix_tree_iter *iter, | |
372 | gfp_t gfp, | |
373 | unsigned long end) | |
374 | { | |
375 | return idr_get_free_cmn(root, iter, gfp, end - 1); | |
376 | } | |
175542f5 | 377 | |
0a835c4f MW |
378 | enum { |
379 | RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ | |
380 | RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ | |
381 | RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ | |
382 | }; | |
78c1d784 KK |
383 | |
384 | /** | |
385 | * radix_tree_iter_init - initialize radix tree iterator | |
386 | * | |
387 | * @iter: pointer to iterator state | |
388 | * @start: iteration starting index | |
389 | * Returns: NULL | |
390 | */ | |
d7b62727 | 391 | static __always_inline void __rcu ** |
78c1d784 KK |
392 | radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) |
393 | { | |
394 | /* | |
395 | * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it | |
396 | * in the case of a successful tagged chunk lookup. If the lookup was | |
397 | * unsuccessful or non-tagged then nobody cares about ->tags. | |
398 | * | |
399 | * Set index to zero to bypass next_index overflow protection. | |
400 | * See the comment in radix_tree_next_chunk() for details. | |
401 | */ | |
402 | iter->index = 0; | |
403 | iter->next_index = start; | |
404 | return NULL; | |
405 | } | |
406 | ||
407 | /** | |
408 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
409 | * | |
410 | * @root: radix tree root | |
411 | * @iter: iterator state | |
412 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
413 | * Returns: pointer to chunk first slot, or NULL if there no more left | |
414 | * | |
415 | * This function looks up the next chunk in the radix tree starting from | |
416 | * @iter->next_index. It returns a pointer to the chunk's first slot. | |
417 | * Also it fills @iter with data about chunk: position in the tree (index), | |
418 | * its end (next_index), and constructs a bit mask for tagged iterating (tags). | |
419 | */ | |
d7b62727 | 420 | void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, |
78c1d784 KK |
421 | struct radix_tree_iter *iter, unsigned flags); |
422 | ||
0a835c4f MW |
423 | /** |
424 | * radix_tree_iter_lookup - look up an index in the radix tree | |
425 | * @root: radix tree root | |
426 | * @iter: iterator state | |
427 | * @index: key to look up | |
428 | * | |
429 | * If @index is present in the radix tree, this function returns the slot | |
430 | * containing it and updates @iter to describe the entry. If @index is not | |
431 | * present, it returns NULL. | |
432 | */ | |
d7b62727 MW |
433 | static inline void __rcu ** |
434 | radix_tree_iter_lookup(const struct radix_tree_root *root, | |
0a835c4f MW |
435 | struct radix_tree_iter *iter, unsigned long index) |
436 | { | |
437 | radix_tree_iter_init(iter, index); | |
438 | return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); | |
439 | } | |
440 | ||
441 | /** | |
442 | * radix_tree_iter_find - find a present entry | |
443 | * @root: radix tree root | |
444 | * @iter: iterator state | |
445 | * @index: start location | |
446 | * | |
447 | * This function returns the slot containing the entry with the lowest index | |
448 | * which is at least @index. If @index is larger than any present entry, this | |
449 | * function returns NULL. The @iter is updated to describe the entry found. | |
450 | */ | |
d7b62727 MW |
451 | static inline void __rcu ** |
452 | radix_tree_iter_find(const struct radix_tree_root *root, | |
0a835c4f MW |
453 | struct radix_tree_iter *iter, unsigned long index) |
454 | { | |
455 | radix_tree_iter_init(iter, index); | |
456 | return radix_tree_next_chunk(root, iter, 0); | |
457 | } | |
458 | ||
46437f9a MW |
459 | /** |
460 | * radix_tree_iter_retry - retry this chunk of the iteration | |
461 | * @iter: iterator state | |
462 | * | |
463 | * If we iterate over a tree protected only by the RCU lock, a race | |
464 | * against deletion or creation may result in seeing a slot for which | |
465 | * radix_tree_deref_retry() returns true. If so, call this function | |
466 | * and continue the iteration. | |
467 | */ | |
468 | static inline __must_check | |
d7b62727 | 469 | void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) |
46437f9a MW |
470 | { |
471 | iter->next_index = iter->index; | |
3cb9185c | 472 | iter->tags = 0; |
46437f9a MW |
473 | return NULL; |
474 | } | |
475 | ||
21ef5339 RZ |
476 | static inline unsigned long |
477 | __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) | |
478 | { | |
479 | return iter->index + (slots << iter_shift(iter)); | |
480 | } | |
481 | ||
7165092f | 482 | /** |
148deab2 MW |
483 | * radix_tree_iter_resume - resume iterating when the chunk may be invalid |
484 | * @slot: pointer to current slot | |
485 | * @iter: iterator state | |
486 | * Returns: New slot pointer | |
7165092f MW |
487 | * |
488 | * If the iterator needs to release then reacquire a lock, the chunk may | |
489 | * have been invalidated by an insertion or deletion. Call this function | |
148deab2 | 490 | * before releasing the lock to continue the iteration from the next index. |
7165092f | 491 | */ |
d7b62727 | 492 | void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, |
148deab2 | 493 | struct radix_tree_iter *iter); |
7165092f | 494 | |
78c1d784 KK |
495 | /** |
496 | * radix_tree_chunk_size - get current chunk size | |
497 | * | |
498 | * @iter: pointer to radix tree iterator | |
499 | * Returns: current chunk size | |
500 | */ | |
73204282 | 501 | static __always_inline long |
78c1d784 KK |
502 | radix_tree_chunk_size(struct radix_tree_iter *iter) |
503 | { | |
21ef5339 RZ |
504 | return (iter->next_index - iter->index) >> iter_shift(iter); |
505 | } | |
506 | ||
148deab2 | 507 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
d7b62727 MW |
508 | void __rcu **__radix_tree_next_slot(void __rcu **slot, |
509 | struct radix_tree_iter *iter, unsigned flags); | |
148deab2 MW |
510 | #else |
511 | /* Can't happen without sibling entries, but the compiler can't tell that */ | |
d7b62727 | 512 | static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, |
148deab2 | 513 | struct radix_tree_iter *iter, unsigned flags) |
21ef5339 | 514 | { |
148deab2 | 515 | return slot; |
78c1d784 | 516 | } |
148deab2 | 517 | #endif |
78c1d784 KK |
518 | |
519 | /** | |
520 | * radix_tree_next_slot - find next slot in chunk | |
521 | * | |
522 | * @slot: pointer to current slot | |
523 | * @iter: pointer to interator state | |
524 | * @flags: RADIX_TREE_ITER_*, should be constant | |
525 | * Returns: pointer to next slot, or NULL if there no more left | |
526 | * | |
527 | * This function updates @iter->index in the case of a successful lookup. | |
528 | * For tagged lookup it also eats @iter->tags. | |
915045fe RZ |
529 | * |
530 | * There are several cases where 'slot' can be passed in as NULL to this | |
148deab2 | 531 | * function. These cases result from the use of radix_tree_iter_resume() or |
915045fe RZ |
532 | * radix_tree_iter_retry(). In these cases we don't end up dereferencing |
533 | * 'slot' because either: | |
534 | * a) we are doing tagged iteration and iter->tags has been set to 0, or | |
535 | * b) we are doing non-tagged iteration, and iter->index and iter->next_index | |
536 | * have been set up so that radix_tree_chunk_size() returns 1 or 0. | |
78c1d784 | 537 | */ |
d7b62727 MW |
538 | static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, |
539 | struct radix_tree_iter *iter, unsigned flags) | |
78c1d784 KK |
540 | { |
541 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
542 | iter->tags >>= 1; | |
21ef5339 RZ |
543 | if (unlikely(!iter->tags)) |
544 | return NULL; | |
78c1d784 | 545 | if (likely(iter->tags & 1ul)) { |
21ef5339 | 546 | iter->index = __radix_tree_iter_add(iter, 1); |
148deab2 MW |
547 | slot++; |
548 | goto found; | |
78c1d784 | 549 | } |
21ef5339 | 550 | if (!(flags & RADIX_TREE_ITER_CONTIG)) { |
78c1d784 KK |
551 | unsigned offset = __ffs(iter->tags); |
552 | ||
148deab2 MW |
553 | iter->tags >>= offset++; |
554 | iter->index = __radix_tree_iter_add(iter, offset); | |
555 | slot += offset; | |
556 | goto found; | |
78c1d784 KK |
557 | } |
558 | } else { | |
21ef5339 | 559 | long count = radix_tree_chunk_size(iter); |
78c1d784 | 560 | |
21ef5339 | 561 | while (--count > 0) { |
78c1d784 | 562 | slot++; |
21ef5339 RZ |
563 | iter->index = __radix_tree_iter_add(iter, 1); |
564 | ||
78c1d784 | 565 | if (likely(*slot)) |
148deab2 | 566 | goto found; |
fffaee36 KK |
567 | if (flags & RADIX_TREE_ITER_CONTIG) { |
568 | /* forbid switching to the next chunk */ | |
569 | iter->next_index = 0; | |
78c1d784 | 570 | break; |
fffaee36 | 571 | } |
78c1d784 KK |
572 | } |
573 | } | |
574 | return NULL; | |
148deab2 MW |
575 | |
576 | found: | |
12320d0f | 577 | if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) |
148deab2 MW |
578 | return __radix_tree_next_slot(slot, iter, flags); |
579 | return slot; | |
78c1d784 KK |
580 | } |
581 | ||
78c1d784 KK |
582 | /** |
583 | * radix_tree_for_each_slot - iterate over non-empty slots | |
584 | * | |
585 | * @slot: the void** variable for pointer to slot | |
586 | * @root: the struct radix_tree_root pointer | |
587 | * @iter: the struct radix_tree_iter pointer | |
588 | * @start: iteration starting index | |
589 | * | |
590 | * @slot points to radix tree slot, @iter->index contains its index. | |
591 | */ | |
592 | #define radix_tree_for_each_slot(slot, root, iter, start) \ | |
593 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
594 | slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ | |
595 | slot = radix_tree_next_slot(slot, iter, 0)) | |
596 | ||
597 | /** | |
598 | * radix_tree_for_each_contig - iterate over contiguous slots | |
599 | * | |
600 | * @slot: the void** variable for pointer to slot | |
601 | * @root: the struct radix_tree_root pointer | |
602 | * @iter: the struct radix_tree_iter pointer | |
603 | * @start: iteration starting index | |
604 | * | |
605 | * @slot points to radix tree slot, @iter->index contains its index. | |
606 | */ | |
607 | #define radix_tree_for_each_contig(slot, root, iter, start) \ | |
608 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
609 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
610 | RADIX_TREE_ITER_CONTIG)) ; \ | |
611 | slot = radix_tree_next_slot(slot, iter, \ | |
612 | RADIX_TREE_ITER_CONTIG)) | |
613 | ||
614 | /** | |
615 | * radix_tree_for_each_tagged - iterate over tagged slots | |
616 | * | |
617 | * @slot: the void** variable for pointer to slot | |
618 | * @root: the struct radix_tree_root pointer | |
619 | * @iter: the struct radix_tree_iter pointer | |
620 | * @start: iteration starting index | |
621 | * @tag: tag index | |
622 | * | |
623 | * @slot points to radix tree slot, @iter->index contains its index. | |
624 | */ | |
625 | #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ | |
626 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
627 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
628 | RADIX_TREE_ITER_TAGGED | tag)) ; \ | |
629 | slot = radix_tree_next_slot(slot, iter, \ | |
148deab2 | 630 | RADIX_TREE_ITER_TAGGED | tag)) |
78c1d784 | 631 | |
1da177e4 | 632 | #endif /* _LINUX_RADIX_TREE_H */ |