]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
cde53535 | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
7cf9c2c7 | 5 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
6b053b8e MW |
7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
8 | * Copyright (C) 2016 Intel, Ross Zwisler | |
1da177e4 LT |
9 | * |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2, or (at | |
13 | * your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
25 | #include <linux/errno.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/kernel.h> | |
8bc3bcc9 | 28 | #include <linux/export.h> |
1da177e4 LT |
29 | #include <linux/radix-tree.h> |
30 | #include <linux/percpu.h> | |
31 | #include <linux/slab.h> | |
ce80b067 | 32 | #include <linux/kmemleak.h> |
1da177e4 LT |
33 | #include <linux/notifier.h> |
34 | #include <linux/cpu.h> | |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/bitops.h> | |
7cf9c2c7 | 37 | #include <linux/rcupdate.h> |
92cf2118 | 38 | #include <linux/preempt.h> /* in_interrupt() */ |
1da177e4 LT |
39 | |
40 | ||
26fb1589 JM |
41 | /* |
42 | * The height_to_maxindex array needs to be one deeper than the maximum | |
43 | * path as height 0 holds only 1 entry. | |
44 | */ | |
45 | static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly; | |
1da177e4 LT |
46 | |
47 | /* | |
48 | * Radix tree node cache. | |
49 | */ | |
e18b890b | 50 | static struct kmem_cache *radix_tree_node_cachep; |
1da177e4 | 51 | |
55368052 NP |
52 | /* |
53 | * The radix tree is variable-height, so an insert operation not only has | |
54 | * to build the branch to its corresponding item, it also has to build the | |
55 | * branch to existing items if the size has to be increased (by | |
56 | * radix_tree_extend). | |
57 | * | |
58 | * The worst case is a zero height tree with just a single item at index 0, | |
59 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | |
60 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | |
61 | * Hence: | |
62 | */ | |
63 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | |
64 | ||
1da177e4 LT |
65 | /* |
66 | * Per-cpu pool of preloaded nodes | |
67 | */ | |
68 | struct radix_tree_preload { | |
69 | int nr; | |
9d2a8da0 KS |
70 | /* nodes->private_data points to next preallocated node */ |
71 | struct radix_tree_node *nodes; | |
1da177e4 | 72 | }; |
8cef7d57 | 73 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
1da177e4 | 74 | |
27d20fdd NP |
75 | static inline void *ptr_to_indirect(void *ptr) |
76 | { | |
77 | return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); | |
78 | } | |
79 | ||
afe0e395 MW |
80 | #define RADIX_TREE_RETRY ptr_to_indirect(NULL) |
81 | ||
db050f29 MW |
82 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
83 | /* Sibling slots point directly to another slot in the same node */ | |
84 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
85 | { | |
86 | void **ptr = node; | |
87 | return (parent->slots <= ptr) && | |
88 | (ptr < parent->slots + RADIX_TREE_MAP_SIZE); | |
89 | } | |
90 | #else | |
91 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
92 | { | |
93 | return false; | |
94 | } | |
95 | #endif | |
96 | ||
97 | static inline unsigned long get_slot_offset(struct radix_tree_node *parent, | |
98 | void **slot) | |
99 | { | |
100 | return slot - parent->slots; | |
101 | } | |
102 | ||
103 | static unsigned radix_tree_descend(struct radix_tree_node *parent, | |
104 | struct radix_tree_node **nodep, unsigned offset) | |
105 | { | |
106 | void **entry = rcu_dereference_raw(parent->slots[offset]); | |
107 | ||
108 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
109 | if (radix_tree_is_indirect_ptr(entry)) { | |
110 | unsigned long siboff = get_slot_offset(parent, entry); | |
111 | if (siboff < RADIX_TREE_MAP_SIZE) { | |
112 | offset = siboff; | |
113 | entry = rcu_dereference_raw(parent->slots[offset]); | |
114 | } | |
115 | } | |
116 | #endif | |
117 | ||
118 | *nodep = (void *)entry; | |
119 | return offset; | |
120 | } | |
121 | ||
612d6c19 NP |
122 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) |
123 | { | |
124 | return root->gfp_mask & __GFP_BITS_MASK; | |
125 | } | |
126 | ||
643b52b9 NP |
127 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
128 | int offset) | |
129 | { | |
130 | __set_bit(offset, node->tags[tag]); | |
131 | } | |
132 | ||
133 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | |
134 | int offset) | |
135 | { | |
136 | __clear_bit(offset, node->tags[tag]); | |
137 | } | |
138 | ||
139 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | |
140 | int offset) | |
141 | { | |
142 | return test_bit(offset, node->tags[tag]); | |
143 | } | |
144 | ||
145 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | |
146 | { | |
147 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | |
148 | } | |
149 | ||
150 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) | |
151 | { | |
152 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | |
153 | } | |
154 | ||
155 | static inline void root_tag_clear_all(struct radix_tree_root *root) | |
156 | { | |
157 | root->gfp_mask &= __GFP_BITS_MASK; | |
158 | } | |
159 | ||
160 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | |
161 | { | |
162 | return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | |
163 | } | |
164 | ||
7b60e9ad MW |
165 | static inline unsigned root_tags_get(struct radix_tree_root *root) |
166 | { | |
167 | return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; | |
168 | } | |
169 | ||
643b52b9 NP |
170 | /* |
171 | * Returns 1 if any slot in the node has this tag set. | |
172 | * Otherwise returns 0. | |
173 | */ | |
174 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | |
175 | { | |
176 | int idx; | |
177 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | |
178 | if (node->tags[tag][idx]) | |
179 | return 1; | |
180 | } | |
181 | return 0; | |
182 | } | |
78c1d784 KK |
183 | |
184 | /** | |
185 | * radix_tree_find_next_bit - find the next set bit in a memory region | |
186 | * | |
187 | * @addr: The address to base the search on | |
188 | * @size: The bitmap size in bits | |
189 | * @offset: The bitnumber to start searching at | |
190 | * | |
191 | * Unrollable variant of find_next_bit() for constant size arrays. | |
192 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | |
193 | * Returns next bit offset, or size if nothing found. | |
194 | */ | |
195 | static __always_inline unsigned long | |
196 | radix_tree_find_next_bit(const unsigned long *addr, | |
197 | unsigned long size, unsigned long offset) | |
198 | { | |
199 | if (!__builtin_constant_p(size)) | |
200 | return find_next_bit(addr, size, offset); | |
201 | ||
202 | if (offset < size) { | |
203 | unsigned long tmp; | |
204 | ||
205 | addr += offset / BITS_PER_LONG; | |
206 | tmp = *addr >> (offset % BITS_PER_LONG); | |
207 | if (tmp) | |
208 | return __ffs(tmp) + offset; | |
209 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | |
210 | while (offset < size) { | |
211 | tmp = *++addr; | |
212 | if (tmp) | |
213 | return __ffs(tmp) + offset; | |
214 | offset += BITS_PER_LONG; | |
215 | } | |
216 | } | |
217 | return size; | |
218 | } | |
219 | ||
0796c583 RZ |
220 | #ifndef __KERNEL__ |
221 | static void dump_node(struct radix_tree_node *node, unsigned offset, | |
222 | unsigned shift, unsigned long index) | |
7cf19af4 | 223 | { |
0796c583 | 224 | unsigned long i; |
7cf19af4 | 225 | |
7cf19af4 | 226 | pr_debug("radix node: %p offset %d tags %lx %lx %lx path %x count %d parent %p\n", |
0796c583 RZ |
227 | node, offset, |
228 | node->tags[0][0], node->tags[1][0], node->tags[2][0], | |
229 | node->path, node->count, node->parent); | |
230 | ||
231 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | |
232 | unsigned long first = index | (i << shift); | |
233 | unsigned long last = first | ((1UL << shift) - 1); | |
234 | void *entry = node->slots[i]; | |
235 | if (!entry) | |
236 | continue; | |
237 | if (is_sibling_entry(node, entry)) { | |
238 | pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n", | |
239 | entry, i, | |
240 | *(void **)indirect_to_ptr(entry), | |
241 | first, last); | |
242 | } else if (!radix_tree_is_indirect_ptr(entry)) { | |
243 | pr_debug("radix entry %p offset %ld indices %ld-%ld\n", | |
244 | entry, i, first, last); | |
245 | } else { | |
246 | dump_node(indirect_to_ptr(entry), i, | |
247 | shift - RADIX_TREE_MAP_SHIFT, first); | |
248 | } | |
249 | } | |
7cf19af4 MW |
250 | } |
251 | ||
252 | /* For debug */ | |
253 | static void radix_tree_dump(struct radix_tree_root *root) | |
254 | { | |
255 | pr_debug("radix root: %p height %d rnode %p tags %x\n", | |
256 | root, root->height, root->rnode, | |
257 | root->gfp_mask >> __GFP_BITS_SHIFT); | |
258 | if (!radix_tree_is_indirect_ptr(root->rnode)) | |
259 | return; | |
0796c583 RZ |
260 | dump_node(indirect_to_ptr(root->rnode), 0, |
261 | (root->height - 1) * RADIX_TREE_MAP_SHIFT, 0); | |
7cf19af4 MW |
262 | } |
263 | #endif | |
264 | ||
1da177e4 LT |
265 | /* |
266 | * This assumes that the caller has performed appropriate preallocation, and | |
267 | * that the caller has pinned this thread of control to the current CPU. | |
268 | */ | |
269 | static struct radix_tree_node * | |
270 | radix_tree_node_alloc(struct radix_tree_root *root) | |
271 | { | |
e2848a0e | 272 | struct radix_tree_node *ret = NULL; |
612d6c19 | 273 | gfp_t gfp_mask = root_gfp_mask(root); |
1da177e4 | 274 | |
5e4c0d97 JK |
275 | /* |
276 | * Preload code isn't irq safe and it doesn't make sence to use | |
277 | * preloading in the interrupt anyway as all the allocations have to | |
278 | * be atomic. So just do normal allocation when in interrupt. | |
279 | */ | |
d0164adc | 280 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
1da177e4 LT |
281 | struct radix_tree_preload *rtp; |
282 | ||
58e698af VD |
283 | /* |
284 | * Even if the caller has preloaded, try to allocate from the | |
285 | * cache first for the new node to get accounted. | |
286 | */ | |
287 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
288 | gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); | |
289 | if (ret) | |
290 | goto out; | |
291 | ||
e2848a0e NP |
292 | /* |
293 | * Provided the caller has preloaded here, we will always | |
294 | * succeed in getting a node here (and never reach | |
295 | * kmem_cache_alloc) | |
296 | */ | |
7c8e0181 | 297 | rtp = this_cpu_ptr(&radix_tree_preloads); |
1da177e4 | 298 | if (rtp->nr) { |
9d2a8da0 KS |
299 | ret = rtp->nodes; |
300 | rtp->nodes = ret->private_data; | |
301 | ret->private_data = NULL; | |
1da177e4 LT |
302 | rtp->nr--; |
303 | } | |
ce80b067 CM |
304 | /* |
305 | * Update the allocation stack trace as this is more useful | |
306 | * for debugging. | |
307 | */ | |
308 | kmemleak_update_trace(ret); | |
58e698af | 309 | goto out; |
1da177e4 | 310 | } |
58e698af VD |
311 | ret = kmem_cache_alloc(radix_tree_node_cachep, |
312 | gfp_mask | __GFP_ACCOUNT); | |
313 | out: | |
c0bc9875 | 314 | BUG_ON(radix_tree_is_indirect_ptr(ret)); |
1da177e4 LT |
315 | return ret; |
316 | } | |
317 | ||
7cf9c2c7 NP |
318 | static void radix_tree_node_rcu_free(struct rcu_head *head) |
319 | { | |
320 | struct radix_tree_node *node = | |
321 | container_of(head, struct radix_tree_node, rcu_head); | |
b6dd0865 | 322 | int i; |
643b52b9 NP |
323 | |
324 | /* | |
325 | * must only free zeroed nodes into the slab. radix_tree_shrink | |
326 | * can leave us with a non-NULL entry in the first slot, so clear | |
327 | * that here to make sure. | |
328 | */ | |
b6dd0865 DC |
329 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) |
330 | tag_clear(node, i, 0); | |
331 | ||
643b52b9 NP |
332 | node->slots[0] = NULL; |
333 | node->count = 0; | |
334 | ||
7cf9c2c7 NP |
335 | kmem_cache_free(radix_tree_node_cachep, node); |
336 | } | |
337 | ||
1da177e4 LT |
338 | static inline void |
339 | radix_tree_node_free(struct radix_tree_node *node) | |
340 | { | |
7cf9c2c7 | 341 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
1da177e4 LT |
342 | } |
343 | ||
344 | /* | |
345 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
346 | * ensure that the addition of a single element in the tree cannot fail. On | |
347 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
348 | * with preemption not disabled. | |
b34df792 DH |
349 | * |
350 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 351 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
1da177e4 | 352 | */ |
5e4c0d97 | 353 | static int __radix_tree_preload(gfp_t gfp_mask) |
1da177e4 LT |
354 | { |
355 | struct radix_tree_preload *rtp; | |
356 | struct radix_tree_node *node; | |
357 | int ret = -ENOMEM; | |
358 | ||
359 | preempt_disable(); | |
7c8e0181 | 360 | rtp = this_cpu_ptr(&radix_tree_preloads); |
9d2a8da0 | 361 | while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { |
1da177e4 | 362 | preempt_enable(); |
488514d1 | 363 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
1da177e4 LT |
364 | if (node == NULL) |
365 | goto out; | |
366 | preempt_disable(); | |
7c8e0181 | 367 | rtp = this_cpu_ptr(&radix_tree_preloads); |
9d2a8da0 KS |
368 | if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { |
369 | node->private_data = rtp->nodes; | |
370 | rtp->nodes = node; | |
371 | rtp->nr++; | |
372 | } else { | |
1da177e4 | 373 | kmem_cache_free(radix_tree_node_cachep, node); |
9d2a8da0 | 374 | } |
1da177e4 LT |
375 | } |
376 | ret = 0; | |
377 | out: | |
378 | return ret; | |
379 | } | |
5e4c0d97 JK |
380 | |
381 | /* | |
382 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
383 | * ensure that the addition of a single element in the tree cannot fail. On | |
384 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
385 | * with preemption not disabled. | |
386 | * | |
387 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 388 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
5e4c0d97 JK |
389 | */ |
390 | int radix_tree_preload(gfp_t gfp_mask) | |
391 | { | |
392 | /* Warn on non-sensical use... */ | |
d0164adc | 393 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
5e4c0d97 JK |
394 | return __radix_tree_preload(gfp_mask); |
395 | } | |
d7f0923d | 396 | EXPORT_SYMBOL(radix_tree_preload); |
1da177e4 | 397 | |
5e4c0d97 JK |
398 | /* |
399 | * The same as above function, except we don't guarantee preloading happens. | |
400 | * We do it, if we decide it helps. On success, return zero with preemption | |
401 | * disabled. On error, return -ENOMEM with preemption not disabled. | |
402 | */ | |
403 | int radix_tree_maybe_preload(gfp_t gfp_mask) | |
404 | { | |
d0164adc | 405 | if (gfpflags_allow_blocking(gfp_mask)) |
5e4c0d97 JK |
406 | return __radix_tree_preload(gfp_mask); |
407 | /* Preloading doesn't help anything with this gfp mask, skip it */ | |
408 | preempt_disable(); | |
409 | return 0; | |
410 | } | |
411 | EXPORT_SYMBOL(radix_tree_maybe_preload); | |
412 | ||
1da177e4 LT |
413 | /* |
414 | * Return the maximum key which can be store into a | |
415 | * radix tree with height HEIGHT. | |
416 | */ | |
417 | static inline unsigned long radix_tree_maxindex(unsigned int height) | |
418 | { | |
419 | return height_to_maxindex[height]; | |
420 | } | |
421 | ||
1456a439 MW |
422 | static inline unsigned long node_maxindex(struct radix_tree_node *node) |
423 | { | |
424 | return radix_tree_maxindex(node->path & RADIX_TREE_HEIGHT_MASK); | |
425 | } | |
426 | ||
427 | static unsigned radix_tree_load_root(struct radix_tree_root *root, | |
428 | struct radix_tree_node **nodep, unsigned long *maxindex) | |
429 | { | |
430 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); | |
431 | ||
432 | *nodep = node; | |
433 | ||
434 | if (likely(radix_tree_is_indirect_ptr(node))) { | |
435 | node = indirect_to_ptr(node); | |
436 | *maxindex = node_maxindex(node); | |
437 | return (node->path & RADIX_TREE_HEIGHT_MASK) * | |
438 | RADIX_TREE_MAP_SHIFT; | |
439 | } | |
440 | ||
441 | *maxindex = 0; | |
442 | return 0; | |
443 | } | |
444 | ||
1da177e4 LT |
445 | /* |
446 | * Extend a radix tree so it can store key @index. | |
447 | */ | |
e6145236 | 448 | static int radix_tree_extend(struct radix_tree_root *root, |
49ea6ebc | 449 | unsigned long index) |
1da177e4 LT |
450 | { |
451 | struct radix_tree_node *node; | |
e2bdb933 | 452 | struct radix_tree_node *slot; |
1da177e4 | 453 | unsigned int height; |
1da177e4 LT |
454 | int tag; |
455 | ||
456 | /* Figure out what the height should be. */ | |
457 | height = root->height + 1; | |
458 | while (index > radix_tree_maxindex(height)) | |
459 | height++; | |
460 | ||
49ea6ebc | 461 | if (root->rnode == NULL) { |
1da177e4 LT |
462 | root->height = height; |
463 | goto out; | |
464 | } | |
465 | ||
1da177e4 | 466 | do { |
7cf9c2c7 | 467 | unsigned int newheight; |
1da177e4 LT |
468 | if (!(node = radix_tree_node_alloc(root))) |
469 | return -ENOMEM; | |
470 | ||
1da177e4 | 471 | /* Propagate the aggregated tag info into the new root */ |
daff89f3 | 472 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
612d6c19 | 473 | if (root_tag_get(root, tag)) |
1da177e4 LT |
474 | tag_set(node, tag, 0); |
475 | } | |
476 | ||
e2bdb933 | 477 | /* Increase the height. */ |
7cf9c2c7 | 478 | newheight = root->height+1; |
449dd698 JW |
479 | BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK); |
480 | node->path = newheight; | |
1da177e4 | 481 | node->count = 1; |
e2bdb933 HD |
482 | node->parent = NULL; |
483 | slot = root->rnode; | |
49ea6ebc | 484 | if (radix_tree_is_indirect_ptr(slot)) { |
e2bdb933 HD |
485 | slot = indirect_to_ptr(slot); |
486 | slot->parent = node; | |
339e6353 | 487 | slot = ptr_to_indirect(slot); |
e2bdb933 HD |
488 | } |
489 | node->slots[0] = slot; | |
27d20fdd | 490 | node = ptr_to_indirect(node); |
7cf9c2c7 NP |
491 | rcu_assign_pointer(root->rnode, node); |
492 | root->height = newheight; | |
1da177e4 LT |
493 | } while (height > root->height); |
494 | out: | |
49ea6ebc | 495 | return height * RADIX_TREE_MAP_SHIFT; |
1da177e4 LT |
496 | } |
497 | ||
498 | /** | |
139e5616 | 499 | * __radix_tree_create - create a slot in a radix tree |
1da177e4 LT |
500 | * @root: radix tree root |
501 | * @index: index key | |
e6145236 | 502 | * @order: index occupies 2^order aligned slots |
139e5616 JW |
503 | * @nodep: returns node |
504 | * @slotp: returns slot | |
1da177e4 | 505 | * |
139e5616 JW |
506 | * Create, if necessary, and return the node and slot for an item |
507 | * at position @index in the radix tree @root. | |
508 | * | |
509 | * Until there is more than one item in the tree, no nodes are | |
510 | * allocated and @root->rnode is used as a direct slot instead of | |
511 | * pointing to a node, in which case *@nodep will be NULL. | |
512 | * | |
513 | * Returns -ENOMEM, or 0 for success. | |
1da177e4 | 514 | */ |
139e5616 | 515 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
e6145236 MW |
516 | unsigned order, struct radix_tree_node **nodep, |
517 | void ***slotp) | |
1da177e4 | 518 | { |
201b6264 | 519 | struct radix_tree_node *node = NULL, *slot; |
49ea6ebc | 520 | unsigned long maxindex; |
139e5616 | 521 | unsigned int height, shift, offset; |
49ea6ebc MW |
522 | unsigned long max = index | ((1UL << order) - 1); |
523 | ||
524 | shift = radix_tree_load_root(root, &slot, &maxindex); | |
1da177e4 LT |
525 | |
526 | /* Make sure the tree is high enough. */ | |
49ea6ebc MW |
527 | if (max > maxindex) { |
528 | int error = radix_tree_extend(root, max); | |
529 | if (error < 0) | |
1da177e4 | 530 | return error; |
49ea6ebc MW |
531 | shift = error; |
532 | slot = root->rnode; | |
533 | if (order == shift) { | |
534 | shift += RADIX_TREE_MAP_SHIFT; | |
535 | root->height++; | |
536 | } | |
1da177e4 LT |
537 | } |
538 | ||
1da177e4 | 539 | height = root->height; |
1da177e4 LT |
540 | |
541 | offset = 0; /* uninitialised var warning */ | |
e6145236 | 542 | while (shift > order) { |
201b6264 | 543 | if (slot == NULL) { |
1da177e4 | 544 | /* Have to add a child node. */ |
201b6264 | 545 | if (!(slot = radix_tree_node_alloc(root))) |
1da177e4 | 546 | return -ENOMEM; |
449dd698 | 547 | slot->path = height; |
e2bdb933 | 548 | slot->parent = node; |
201b6264 | 549 | if (node) { |
339e6353 MW |
550 | rcu_assign_pointer(node->slots[offset], |
551 | ptr_to_indirect(slot)); | |
1da177e4 | 552 | node->count++; |
449dd698 | 553 | slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT; |
201b6264 | 554 | } else |
339e6353 MW |
555 | rcu_assign_pointer(root->rnode, |
556 | ptr_to_indirect(slot)); | |
e6145236 MW |
557 | } else if (!radix_tree_is_indirect_ptr(slot)) |
558 | break; | |
1da177e4 LT |
559 | |
560 | /* Go a level down */ | |
e6145236 | 561 | height--; |
0070e28d | 562 | shift -= RADIX_TREE_MAP_SHIFT; |
e6145236 | 563 | node = indirect_to_ptr(slot); |
8a14f4d8 MW |
564 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
565 | offset = radix_tree_descend(node, &slot, offset); | |
e6145236 MW |
566 | } |
567 | ||
57578c2e | 568 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
e6145236 | 569 | /* Insert pointers to the canonical entry */ |
3b8c00f6 MW |
570 | if (order > shift) { |
571 | int i, n = 1 << (order - shift); | |
e6145236 MW |
572 | offset = offset & ~(n - 1); |
573 | slot = ptr_to_indirect(&node->slots[offset]); | |
574 | for (i = 0; i < n; i++) { | |
575 | if (node->slots[offset + i]) | |
576 | return -EEXIST; | |
577 | } | |
578 | ||
579 | for (i = 1; i < n; i++) { | |
580 | rcu_assign_pointer(node->slots[offset + i], slot); | |
581 | node->count++; | |
582 | } | |
612d6c19 | 583 | } |
57578c2e | 584 | #endif |
1da177e4 | 585 | |
139e5616 JW |
586 | if (nodep) |
587 | *nodep = node; | |
588 | if (slotp) | |
589 | *slotp = node ? node->slots + offset : (void **)&root->rnode; | |
590 | return 0; | |
591 | } | |
592 | ||
593 | /** | |
e6145236 | 594 | * __radix_tree_insert - insert into a radix tree |
139e5616 JW |
595 | * @root: radix tree root |
596 | * @index: index key | |
e6145236 | 597 | * @order: key covers the 2^order indices around index |
139e5616 JW |
598 | * @item: item to insert |
599 | * | |
600 | * Insert an item into the radix tree at position @index. | |
601 | */ | |
e6145236 MW |
602 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
603 | unsigned order, void *item) | |
139e5616 JW |
604 | { |
605 | struct radix_tree_node *node; | |
606 | void **slot; | |
607 | int error; | |
608 | ||
609 | BUG_ON(radix_tree_is_indirect_ptr(item)); | |
610 | ||
e6145236 | 611 | error = __radix_tree_create(root, index, order, &node, &slot); |
139e5616 JW |
612 | if (error) |
613 | return error; | |
614 | if (*slot != NULL) | |
1da177e4 | 615 | return -EEXIST; |
139e5616 | 616 | rcu_assign_pointer(*slot, item); |
201b6264 | 617 | |
612d6c19 | 618 | if (node) { |
7b60e9ad | 619 | unsigned offset = get_slot_offset(node, slot); |
612d6c19 | 620 | node->count++; |
7b60e9ad MW |
621 | BUG_ON(tag_get(node, 0, offset)); |
622 | BUG_ON(tag_get(node, 1, offset)); | |
623 | BUG_ON(tag_get(node, 2, offset)); | |
612d6c19 | 624 | } else { |
7b60e9ad | 625 | BUG_ON(root_tags_get(root)); |
612d6c19 | 626 | } |
1da177e4 | 627 | |
1da177e4 LT |
628 | return 0; |
629 | } | |
e6145236 | 630 | EXPORT_SYMBOL(__radix_tree_insert); |
1da177e4 | 631 | |
139e5616 JW |
632 | /** |
633 | * __radix_tree_lookup - lookup an item in a radix tree | |
634 | * @root: radix tree root | |
635 | * @index: index key | |
636 | * @nodep: returns node | |
637 | * @slotp: returns slot | |
638 | * | |
639 | * Lookup and return the item at position @index in the radix | |
640 | * tree @root. | |
641 | * | |
642 | * Until there is more than one item in the tree, no nodes are | |
643 | * allocated and @root->rnode is used as a direct slot instead of | |
644 | * pointing to a node, in which case *@nodep will be NULL. | |
7cf9c2c7 | 645 | */ |
139e5616 JW |
646 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
647 | struct radix_tree_node **nodep, void ***slotp) | |
1da177e4 | 648 | { |
139e5616 | 649 | struct radix_tree_node *node, *parent; |
85829954 MW |
650 | unsigned long maxindex; |
651 | unsigned int shift; | |
139e5616 | 652 | void **slot; |
612d6c19 | 653 | |
85829954 MW |
654 | restart: |
655 | parent = NULL; | |
656 | slot = (void **)&root->rnode; | |
657 | shift = radix_tree_load_root(root, &node, &maxindex); | |
658 | if (index > maxindex) | |
1da177e4 LT |
659 | return NULL; |
660 | ||
85829954 MW |
661 | while (radix_tree_is_indirect_ptr(node)) { |
662 | unsigned offset; | |
1da177e4 | 663 | |
85829954 MW |
664 | if (node == RADIX_TREE_RETRY) |
665 | goto restart; | |
666 | parent = indirect_to_ptr(node); | |
1da177e4 | 667 | shift -= RADIX_TREE_MAP_SHIFT; |
85829954 MW |
668 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
669 | offset = radix_tree_descend(parent, &node, offset); | |
670 | slot = parent->slots + offset; | |
671 | } | |
1da177e4 | 672 | |
139e5616 JW |
673 | if (nodep) |
674 | *nodep = parent; | |
675 | if (slotp) | |
676 | *slotp = slot; | |
677 | return node; | |
b72b71c6 HS |
678 | } |
679 | ||
680 | /** | |
681 | * radix_tree_lookup_slot - lookup a slot in a radix tree | |
682 | * @root: radix tree root | |
683 | * @index: index key | |
684 | * | |
685 | * Returns: the slot corresponding to the position @index in the | |
686 | * radix tree @root. This is useful for update-if-exists operations. | |
687 | * | |
688 | * This function can be called under rcu_read_lock iff the slot is not | |
689 | * modified by radix_tree_replace_slot, otherwise it must be called | |
690 | * exclusive from other writers. Any dereference of the slot must be done | |
691 | * using radix_tree_deref_slot. | |
692 | */ | |
693 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |
694 | { | |
139e5616 JW |
695 | void **slot; |
696 | ||
697 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | |
698 | return NULL; | |
699 | return slot; | |
a4331366 | 700 | } |
a4331366 HR |
701 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
702 | ||
703 | /** | |
704 | * radix_tree_lookup - perform lookup operation on a radix tree | |
705 | * @root: radix tree root | |
706 | * @index: index key | |
707 | * | |
708 | * Lookup the item at the position @index in the radix tree @root. | |
7cf9c2c7 NP |
709 | * |
710 | * This function can be called under rcu_read_lock, however the caller | |
711 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free | |
712 | * them safely). No RCU barriers are required to access or modify the | |
713 | * returned item, however. | |
a4331366 HR |
714 | */ |
715 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | |
716 | { | |
139e5616 | 717 | return __radix_tree_lookup(root, index, NULL, NULL); |
1da177e4 LT |
718 | } |
719 | EXPORT_SYMBOL(radix_tree_lookup); | |
720 | ||
721 | /** | |
722 | * radix_tree_tag_set - set a tag on a radix tree node | |
723 | * @root: radix tree root | |
724 | * @index: index key | |
725 | * @tag: tag index | |
726 | * | |
daff89f3 JC |
727 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
728 | * corresponding to @index in the radix tree. From | |
1da177e4 LT |
729 | * the root all the way down to the leaf node. |
730 | * | |
731 | * Returns the address of the tagged item. Setting a tag on a not-present | |
732 | * item is a bug. | |
733 | */ | |
734 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
daff89f3 | 735 | unsigned long index, unsigned int tag) |
1da177e4 | 736 | { |
fb969909 RZ |
737 | struct radix_tree_node *node, *parent; |
738 | unsigned long maxindex; | |
739 | unsigned int shift; | |
1da177e4 | 740 | |
fb969909 RZ |
741 | shift = radix_tree_load_root(root, &node, &maxindex); |
742 | BUG_ON(index > maxindex); | |
1da177e4 | 743 | |
fb969909 RZ |
744 | while (radix_tree_is_indirect_ptr(node)) { |
745 | unsigned offset; | |
1da177e4 | 746 | |
1da177e4 | 747 | shift -= RADIX_TREE_MAP_SHIFT; |
fb969909 RZ |
748 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
749 | ||
750 | parent = indirect_to_ptr(node); | |
751 | offset = radix_tree_descend(parent, &node, offset); | |
752 | BUG_ON(!node); | |
753 | ||
754 | if (!tag_get(parent, tag, offset)) | |
755 | tag_set(parent, tag, offset); | |
1da177e4 LT |
756 | } |
757 | ||
612d6c19 | 758 | /* set the root's tag bit */ |
fb969909 | 759 | if (!root_tag_get(root, tag)) |
612d6c19 NP |
760 | root_tag_set(root, tag); |
761 | ||
fb969909 | 762 | return node; |
1da177e4 LT |
763 | } |
764 | EXPORT_SYMBOL(radix_tree_tag_set); | |
765 | ||
766 | /** | |
767 | * radix_tree_tag_clear - clear a tag on a radix tree node | |
768 | * @root: radix tree root | |
769 | * @index: index key | |
770 | * @tag: tag index | |
771 | * | |
daff89f3 JC |
772 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
773 | * corresponding to @index in the radix tree. If | |
1da177e4 LT |
774 | * this causes the leaf node to have no tags set then clear the tag in the |
775 | * next-to-leaf node, etc. | |
776 | * | |
777 | * Returns the address of the tagged item on success, else NULL. ie: | |
778 | * has the same return value and semantics as radix_tree_lookup(). | |
779 | */ | |
780 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
daff89f3 | 781 | unsigned long index, unsigned int tag) |
1da177e4 | 782 | { |
00f47b58 RZ |
783 | struct radix_tree_node *node, *parent; |
784 | unsigned long maxindex; | |
785 | unsigned int shift; | |
e2bdb933 | 786 | int uninitialized_var(offset); |
1da177e4 | 787 | |
00f47b58 RZ |
788 | shift = radix_tree_load_root(root, &node, &maxindex); |
789 | if (index > maxindex) | |
790 | return NULL; | |
1da177e4 | 791 | |
00f47b58 | 792 | parent = NULL; |
1da177e4 | 793 | |
00f47b58 | 794 | while (radix_tree_is_indirect_ptr(node)) { |
e2bdb933 | 795 | shift -= RADIX_TREE_MAP_SHIFT; |
1da177e4 | 796 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
00f47b58 RZ |
797 | |
798 | parent = indirect_to_ptr(node); | |
799 | offset = radix_tree_descend(parent, &node, offset); | |
1da177e4 LT |
800 | } |
801 | ||
00f47b58 | 802 | if (node == NULL) |
1da177e4 LT |
803 | goto out; |
804 | ||
00f47b58 RZ |
805 | index >>= shift; |
806 | ||
807 | while (parent) { | |
808 | if (!tag_get(parent, tag, offset)) | |
d5274261 | 809 | goto out; |
00f47b58 RZ |
810 | tag_clear(parent, tag, offset); |
811 | if (any_tag_set(parent, tag)) | |
6e954b9e | 812 | goto out; |
e2bdb933 HD |
813 | |
814 | index >>= RADIX_TREE_MAP_SHIFT; | |
815 | offset = index & RADIX_TREE_MAP_MASK; | |
00f47b58 | 816 | parent = parent->parent; |
612d6c19 NP |
817 | } |
818 | ||
819 | /* clear the root's tag bit */ | |
820 | if (root_tag_get(root, tag)) | |
821 | root_tag_clear(root, tag); | |
822 | ||
1da177e4 | 823 | out: |
00f47b58 | 824 | return node; |
1da177e4 LT |
825 | } |
826 | EXPORT_SYMBOL(radix_tree_tag_clear); | |
827 | ||
1da177e4 | 828 | /** |
32605a18 MT |
829 | * radix_tree_tag_get - get a tag on a radix tree node |
830 | * @root: radix tree root | |
831 | * @index: index key | |
daff89f3 | 832 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 | 833 | * |
32605a18 | 834 | * Return values: |
1da177e4 | 835 | * |
612d6c19 NP |
836 | * 0: tag not present or not set |
837 | * 1: tag set | |
ce82653d DH |
838 | * |
839 | * Note that the return value of this function may not be relied on, even if | |
840 | * the RCU lock is held, unless tag modification and node deletion are excluded | |
841 | * from concurrency. | |
1da177e4 LT |
842 | */ |
843 | int radix_tree_tag_get(struct radix_tree_root *root, | |
daff89f3 | 844 | unsigned long index, unsigned int tag) |
1da177e4 | 845 | { |
4589ba6d RZ |
846 | struct radix_tree_node *node, *parent; |
847 | unsigned long maxindex; | |
848 | unsigned int shift; | |
1da177e4 | 849 | |
612d6c19 NP |
850 | if (!root_tag_get(root, tag)) |
851 | return 0; | |
852 | ||
4589ba6d RZ |
853 | shift = radix_tree_load_root(root, &node, &maxindex); |
854 | if (index > maxindex) | |
855 | return 0; | |
7cf9c2c7 NP |
856 | if (node == NULL) |
857 | return 0; | |
858 | ||
4589ba6d RZ |
859 | while (radix_tree_is_indirect_ptr(node)) { |
860 | int offset; | |
612d6c19 | 861 | |
4589ba6d RZ |
862 | shift -= RADIX_TREE_MAP_SHIFT; |
863 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
1da177e4 | 864 | |
4589ba6d RZ |
865 | parent = indirect_to_ptr(node); |
866 | offset = radix_tree_descend(parent, &node, offset); | |
1da177e4 | 867 | |
4589ba6d | 868 | if (!node) |
1da177e4 | 869 | return 0; |
4589ba6d | 870 | if (!tag_get(parent, tag, offset)) |
3fa36acb | 871 | return 0; |
4589ba6d RZ |
872 | if (node == RADIX_TREE_RETRY) |
873 | break; | |
1da177e4 | 874 | } |
4589ba6d RZ |
875 | |
876 | return 1; | |
1da177e4 LT |
877 | } |
878 | EXPORT_SYMBOL(radix_tree_tag_get); | |
1da177e4 | 879 | |
21ef5339 RZ |
880 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
881 | unsigned int shift) | |
882 | { | |
883 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
884 | iter->shift = shift; | |
885 | #endif | |
886 | } | |
887 | ||
78c1d784 KK |
888 | /** |
889 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
890 | * | |
891 | * @root: radix tree root | |
892 | * @iter: iterator state | |
893 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
894 | * Returns: pointer to chunk first slot, or NULL if iteration is over | |
895 | */ | |
896 | void **radix_tree_next_chunk(struct radix_tree_root *root, | |
897 | struct radix_tree_iter *iter, unsigned flags) | |
898 | { | |
899 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; | |
900 | struct radix_tree_node *rnode, *node; | |
21ef5339 | 901 | unsigned long index, offset, maxindex; |
78c1d784 KK |
902 | |
903 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | |
904 | return NULL; | |
905 | ||
906 | /* | |
907 | * Catch next_index overflow after ~0UL. iter->index never overflows | |
908 | * during iterating; it can be zero only at the beginning. | |
909 | * And we cannot overflow iter->next_index in a single step, | |
910 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | |
fffaee36 KK |
911 | * |
912 | * This condition also used by radix_tree_next_slot() to stop | |
913 | * contiguous iterating, and forbid swithing to the next chunk. | |
78c1d784 KK |
914 | */ |
915 | index = iter->next_index; | |
916 | if (!index && iter->index) | |
917 | return NULL; | |
918 | ||
21ef5339 RZ |
919 | restart: |
920 | shift = radix_tree_load_root(root, &rnode, &maxindex); | |
921 | if (index > maxindex) | |
922 | return NULL; | |
923 | ||
78c1d784 KK |
924 | if (radix_tree_is_indirect_ptr(rnode)) { |
925 | rnode = indirect_to_ptr(rnode); | |
21ef5339 | 926 | } else if (rnode) { |
78c1d784 | 927 | /* Single-slot tree */ |
21ef5339 RZ |
928 | iter->index = index; |
929 | iter->next_index = maxindex + 1; | |
78c1d784 | 930 | iter->tags = 1; |
21ef5339 | 931 | __set_iter_shift(iter, shift); |
78c1d784 KK |
932 | return (void **)&root->rnode; |
933 | } else | |
934 | return NULL; | |
935 | ||
21ef5339 | 936 | shift -= RADIX_TREE_MAP_SHIFT; |
78c1d784 KK |
937 | offset = index >> shift; |
938 | ||
78c1d784 KK |
939 | node = rnode; |
940 | while (1) { | |
e6145236 | 941 | struct radix_tree_node *slot; |
21ef5339 RZ |
942 | unsigned new_off = radix_tree_descend(node, &slot, offset); |
943 | ||
944 | if (new_off < offset) { | |
945 | offset = new_off; | |
946 | index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1); | |
947 | index |= offset << shift; | |
948 | } | |
949 | ||
78c1d784 | 950 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
21ef5339 | 951 | !tag_get(node, tag, offset) : !slot) { |
78c1d784 KK |
952 | /* Hole detected */ |
953 | if (flags & RADIX_TREE_ITER_CONTIG) | |
954 | return NULL; | |
955 | ||
956 | if (flags & RADIX_TREE_ITER_TAGGED) | |
957 | offset = radix_tree_find_next_bit( | |
958 | node->tags[tag], | |
959 | RADIX_TREE_MAP_SIZE, | |
960 | offset + 1); | |
961 | else | |
962 | while (++offset < RADIX_TREE_MAP_SIZE) { | |
21ef5339 RZ |
963 | void *slot = node->slots[offset]; |
964 | if (is_sibling_entry(node, slot)) | |
965 | continue; | |
966 | if (slot) | |
78c1d784 KK |
967 | break; |
968 | } | |
969 | index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1); | |
970 | index += offset << shift; | |
971 | /* Overflow after ~0UL */ | |
972 | if (!index) | |
973 | return NULL; | |
974 | if (offset == RADIX_TREE_MAP_SIZE) | |
975 | goto restart; | |
21ef5339 | 976 | slot = rcu_dereference_raw(node->slots[offset]); |
78c1d784 KK |
977 | } |
978 | ||
21ef5339 | 979 | if ((slot == NULL) || (slot == RADIX_TREE_RETRY)) |
78c1d784 | 980 | goto restart; |
e6145236 MW |
981 | if (!radix_tree_is_indirect_ptr(slot)) |
982 | break; | |
21ef5339 | 983 | |
e6145236 | 984 | node = indirect_to_ptr(slot); |
78c1d784 KK |
985 | shift -= RADIX_TREE_MAP_SHIFT; |
986 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
987 | } | |
988 | ||
989 | /* Update the iterator state */ | |
21ef5339 RZ |
990 | iter->index = index & ~((1 << shift) - 1); |
991 | iter->next_index = (index | ((RADIX_TREE_MAP_SIZE << shift) - 1)) + 1; | |
992 | __set_iter_shift(iter, shift); | |
78c1d784 KK |
993 | |
994 | /* Construct iter->tags bit-mask from node->tags[tag] array */ | |
995 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
996 | unsigned tag_long, tag_bit; | |
997 | ||
998 | tag_long = offset / BITS_PER_LONG; | |
999 | tag_bit = offset % BITS_PER_LONG; | |
1000 | iter->tags = node->tags[tag][tag_long] >> tag_bit; | |
1001 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | |
1002 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | |
1003 | /* Pick tags from next element */ | |
1004 | if (tag_bit) | |
1005 | iter->tags |= node->tags[tag][tag_long + 1] << | |
1006 | (BITS_PER_LONG - tag_bit); | |
1007 | /* Clip chunk size, here only BITS_PER_LONG tags */ | |
1008 | iter->next_index = index + BITS_PER_LONG; | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | return node->slots + offset; | |
1013 | } | |
1014 | EXPORT_SYMBOL(radix_tree_next_chunk); | |
1015 | ||
ebf8aa44 JK |
1016 | /** |
1017 | * radix_tree_range_tag_if_tagged - for each item in given range set given | |
1018 | * tag if item has another tag set | |
1019 | * @root: radix tree root | |
1020 | * @first_indexp: pointer to a starting index of a range to scan | |
1021 | * @last_index: last index of a range to scan | |
1022 | * @nr_to_tag: maximum number items to tag | |
1023 | * @iftag: tag index to test | |
1024 | * @settag: tag index to set if tested tag is set | |
1025 | * | |
1026 | * This function scans range of radix tree from first_index to last_index | |
1027 | * (inclusive). For each item in the range if iftag is set, the function sets | |
1028 | * also settag. The function stops either after tagging nr_to_tag items or | |
1029 | * after reaching last_index. | |
1030 | * | |
144dcfc0 DC |
1031 | * The tags must be set from the leaf level only and propagated back up the |
1032 | * path to the root. We must do this so that we resolve the full path before | |
1033 | * setting any tags on intermediate nodes. If we set tags as we descend, then | |
1034 | * we can get to the leaf node and find that the index that has the iftag | |
1035 | * set is outside the range we are scanning. This reults in dangling tags and | |
1036 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | |
1037 | * | |
ebf8aa44 JK |
1038 | * The function returns number of leaves where the tag was set and sets |
1039 | * *first_indexp to the first unscanned index. | |
d5ed3a4a JK |
1040 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must |
1041 | * be prepared to handle that. | |
ebf8aa44 JK |
1042 | */ |
1043 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |
1044 | unsigned long *first_indexp, unsigned long last_index, | |
1045 | unsigned long nr_to_tag, | |
1046 | unsigned int iftag, unsigned int settag) | |
1047 | { | |
070c5ac2 MW |
1048 | struct radix_tree_node *slot, *node = NULL; |
1049 | unsigned long maxindex; | |
1050 | unsigned int shift = radix_tree_load_root(root, &slot, &maxindex); | |
144dcfc0 DC |
1051 | unsigned long tagged = 0; |
1052 | unsigned long index = *first_indexp; | |
ebf8aa44 | 1053 | |
070c5ac2 | 1054 | last_index = min(last_index, maxindex); |
ebf8aa44 JK |
1055 | if (index > last_index) |
1056 | return 0; | |
1057 | if (!nr_to_tag) | |
1058 | return 0; | |
1059 | if (!root_tag_get(root, iftag)) { | |
1060 | *first_indexp = last_index + 1; | |
1061 | return 0; | |
1062 | } | |
070c5ac2 | 1063 | if (!radix_tree_is_indirect_ptr(slot)) { |
ebf8aa44 JK |
1064 | *first_indexp = last_index + 1; |
1065 | root_tag_set(root, settag); | |
1066 | return 1; | |
1067 | } | |
1068 | ||
070c5ac2 MW |
1069 | node = indirect_to_ptr(slot); |
1070 | shift -= RADIX_TREE_MAP_SHIFT; | |
ebf8aa44 JK |
1071 | |
1072 | for (;;) { | |
e2bdb933 | 1073 | unsigned long upindex; |
070c5ac2 | 1074 | unsigned offset; |
ebf8aa44 JK |
1075 | |
1076 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
070c5ac2 MW |
1077 | offset = radix_tree_descend(node, &slot, offset); |
1078 | if (!slot) | |
ebf8aa44 | 1079 | goto next; |
070c5ac2 | 1080 | if (!tag_get(node, iftag, offset)) |
ebf8aa44 | 1081 | goto next; |
070c5ac2 MW |
1082 | /* Sibling slots never have tags set on them */ |
1083 | if (radix_tree_is_indirect_ptr(slot)) { | |
1084 | node = indirect_to_ptr(slot); | |
1085 | shift -= RADIX_TREE_MAP_SHIFT; | |
1086 | continue; | |
144dcfc0 DC |
1087 | } |
1088 | ||
1089 | /* tag the leaf */ | |
070c5ac2 MW |
1090 | tagged++; |
1091 | tag_set(node, settag, offset); | |
144dcfc0 | 1092 | |
070c5ac2 | 1093 | slot = node->parent; |
144dcfc0 | 1094 | /* walk back up the path tagging interior nodes */ |
070c5ac2 MW |
1095 | upindex = index >> shift; |
1096 | while (slot) { | |
e2bdb933 HD |
1097 | upindex >>= RADIX_TREE_MAP_SHIFT; |
1098 | offset = upindex & RADIX_TREE_MAP_MASK; | |
1099 | ||
144dcfc0 | 1100 | /* stop if we find a node with the tag already set */ |
070c5ac2 | 1101 | if (tag_get(slot, settag, offset)) |
144dcfc0 | 1102 | break; |
070c5ac2 MW |
1103 | tag_set(slot, settag, offset); |
1104 | slot = slot->parent; | |
ebf8aa44 | 1105 | } |
144dcfc0 | 1106 | |
070c5ac2 | 1107 | next: |
ebf8aa44 JK |
1108 | /* Go to next item at level determined by 'shift' */ |
1109 | index = ((index >> shift) + 1) << shift; | |
d5ed3a4a JK |
1110 | /* Overflow can happen when last_index is ~0UL... */ |
1111 | if (index > last_index || !index) | |
ebf8aa44 | 1112 | break; |
070c5ac2 MW |
1113 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
1114 | while (offset == 0) { | |
ebf8aa44 JK |
1115 | /* |
1116 | * We've fully scanned this node. Go up. Because | |
1117 | * last_index is guaranteed to be in the tree, what | |
1118 | * we do below cannot wander astray. | |
1119 | */ | |
070c5ac2 | 1120 | node = node->parent; |
ebf8aa44 | 1121 | shift += RADIX_TREE_MAP_SHIFT; |
070c5ac2 | 1122 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
ebf8aa44 | 1123 | } |
070c5ac2 MW |
1124 | if (is_sibling_entry(node, node->slots[offset])) |
1125 | goto next; | |
1126 | if (tagged >= nr_to_tag) | |
1127 | break; | |
ebf8aa44 JK |
1128 | } |
1129 | /* | |
ac15ee69 TO |
1130 | * We need not to tag the root tag if there is no tag which is set with |
1131 | * settag within the range from *first_indexp to last_index. | |
ebf8aa44 | 1132 | */ |
ac15ee69 TO |
1133 | if (tagged > 0) |
1134 | root_tag_set(root, settag); | |
ebf8aa44 JK |
1135 | *first_indexp = index; |
1136 | ||
1137 | return tagged; | |
1138 | } | |
1139 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | |
1140 | ||
1da177e4 LT |
1141 | /** |
1142 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | |
1143 | * @root: radix tree root | |
1144 | * @results: where the results of the lookup are placed | |
1145 | * @first_index: start the lookup from this key | |
1146 | * @max_items: place up to this many items at *results | |
1147 | * | |
1148 | * Performs an index-ascending scan of the tree for present items. Places | |
1149 | * them at *@results and returns the number of items which were placed at | |
1150 | * *@results. | |
1151 | * | |
1152 | * The implementation is naive. | |
7cf9c2c7 NP |
1153 | * |
1154 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | |
1155 | * rcu_read_lock. In this case, rather than the returned results being | |
1156 | * an atomic snapshot of the tree at a single point in time, the semantics | |
1157 | * of an RCU protected gang lookup are as though multiple radix_tree_lookups | |
1158 | * have been issued in individual locks, and results stored in 'results'. | |
1da177e4 LT |
1159 | */ |
1160 | unsigned int | |
1161 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
1162 | unsigned long first_index, unsigned int max_items) | |
1163 | { | |
cebbd29e KK |
1164 | struct radix_tree_iter iter; |
1165 | void **slot; | |
1166 | unsigned int ret = 0; | |
7cf9c2c7 | 1167 | |
cebbd29e | 1168 | if (unlikely(!max_items)) |
7cf9c2c7 | 1169 | return 0; |
1da177e4 | 1170 | |
cebbd29e | 1171 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
46437f9a | 1172 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1173 | if (!results[ret]) |
1174 | continue; | |
46437f9a MW |
1175 | if (radix_tree_is_indirect_ptr(results[ret])) { |
1176 | slot = radix_tree_iter_retry(&iter); | |
1177 | continue; | |
1178 | } | |
cebbd29e | 1179 | if (++ret == max_items) |
1da177e4 | 1180 | break; |
1da177e4 | 1181 | } |
7cf9c2c7 | 1182 | |
1da177e4 LT |
1183 | return ret; |
1184 | } | |
1185 | EXPORT_SYMBOL(radix_tree_gang_lookup); | |
1186 | ||
47feff2c NP |
1187 | /** |
1188 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | |
1189 | * @root: radix tree root | |
1190 | * @results: where the results of the lookup are placed | |
6328650b | 1191 | * @indices: where their indices should be placed (but usually NULL) |
47feff2c NP |
1192 | * @first_index: start the lookup from this key |
1193 | * @max_items: place up to this many items at *results | |
1194 | * | |
1195 | * Performs an index-ascending scan of the tree for present items. Places | |
1196 | * their slots at *@results and returns the number of items which were | |
1197 | * placed at *@results. | |
1198 | * | |
1199 | * The implementation is naive. | |
1200 | * | |
1201 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | |
1202 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | |
1203 | * protection, radix_tree_deref_slot may fail requiring a retry. | |
1204 | */ | |
1205 | unsigned int | |
6328650b HD |
1206 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
1207 | void ***results, unsigned long *indices, | |
47feff2c NP |
1208 | unsigned long first_index, unsigned int max_items) |
1209 | { | |
cebbd29e KK |
1210 | struct radix_tree_iter iter; |
1211 | void **slot; | |
1212 | unsigned int ret = 0; | |
47feff2c | 1213 | |
cebbd29e | 1214 | if (unlikely(!max_items)) |
47feff2c NP |
1215 | return 0; |
1216 | ||
cebbd29e KK |
1217 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
1218 | results[ret] = slot; | |
6328650b | 1219 | if (indices) |
cebbd29e KK |
1220 | indices[ret] = iter.index; |
1221 | if (++ret == max_items) | |
47feff2c | 1222 | break; |
47feff2c NP |
1223 | } |
1224 | ||
1225 | return ret; | |
1226 | } | |
1227 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | |
1228 | ||
1da177e4 LT |
1229 | /** |
1230 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | |
1231 | * based on a tag | |
1232 | * @root: radix tree root | |
1233 | * @results: where the results of the lookup are placed | |
1234 | * @first_index: start the lookup from this key | |
1235 | * @max_items: place up to this many items at *results | |
daff89f3 | 1236 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 LT |
1237 | * |
1238 | * Performs an index-ascending scan of the tree for present items which | |
1239 | * have the tag indexed by @tag set. Places the items at *@results and | |
1240 | * returns the number of items which were placed at *@results. | |
1241 | */ | |
1242 | unsigned int | |
1243 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
daff89f3 JC |
1244 | unsigned long first_index, unsigned int max_items, |
1245 | unsigned int tag) | |
1da177e4 | 1246 | { |
cebbd29e KK |
1247 | struct radix_tree_iter iter; |
1248 | void **slot; | |
1249 | unsigned int ret = 0; | |
612d6c19 | 1250 | |
cebbd29e | 1251 | if (unlikely(!max_items)) |
7cf9c2c7 NP |
1252 | return 0; |
1253 | ||
cebbd29e | 1254 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
46437f9a | 1255 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1256 | if (!results[ret]) |
1257 | continue; | |
46437f9a MW |
1258 | if (radix_tree_is_indirect_ptr(results[ret])) { |
1259 | slot = radix_tree_iter_retry(&iter); | |
1260 | continue; | |
1261 | } | |
cebbd29e | 1262 | if (++ret == max_items) |
1da177e4 | 1263 | break; |
1da177e4 | 1264 | } |
7cf9c2c7 | 1265 | |
1da177e4 LT |
1266 | return ret; |
1267 | } | |
1268 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | |
1269 | ||
47feff2c NP |
1270 | /** |
1271 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | |
1272 | * radix tree based on a tag | |
1273 | * @root: radix tree root | |
1274 | * @results: where the results of the lookup are placed | |
1275 | * @first_index: start the lookup from this key | |
1276 | * @max_items: place up to this many items at *results | |
1277 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1278 | * | |
1279 | * Performs an index-ascending scan of the tree for present items which | |
1280 | * have the tag indexed by @tag set. Places the slots at *@results and | |
1281 | * returns the number of slots which were placed at *@results. | |
1282 | */ | |
1283 | unsigned int | |
1284 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
1285 | unsigned long first_index, unsigned int max_items, | |
1286 | unsigned int tag) | |
1287 | { | |
cebbd29e KK |
1288 | struct radix_tree_iter iter; |
1289 | void **slot; | |
1290 | unsigned int ret = 0; | |
47feff2c | 1291 | |
cebbd29e | 1292 | if (unlikely(!max_items)) |
47feff2c NP |
1293 | return 0; |
1294 | ||
cebbd29e KK |
1295 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1296 | results[ret] = slot; | |
1297 | if (++ret == max_items) | |
47feff2c | 1298 | break; |
47feff2c NP |
1299 | } |
1300 | ||
1301 | return ret; | |
1302 | } | |
1303 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |
1304 | ||
e504f3fd HD |
1305 | #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP) |
1306 | #include <linux/sched.h> /* for cond_resched() */ | |
1307 | ||
0a2efc6c MW |
1308 | struct locate_info { |
1309 | unsigned long found_index; | |
1310 | bool stop; | |
1311 | }; | |
1312 | ||
e504f3fd HD |
1313 | /* |
1314 | * This linear search is at present only useful to shmem_unuse_inode(). | |
1315 | */ | |
1316 | static unsigned long __locate(struct radix_tree_node *slot, void *item, | |
0a2efc6c | 1317 | unsigned long index, struct locate_info *info) |
e504f3fd HD |
1318 | { |
1319 | unsigned int shift, height; | |
1320 | unsigned long i; | |
1321 | ||
449dd698 | 1322 | height = slot->path & RADIX_TREE_HEIGHT_MASK; |
0a2efc6c | 1323 | shift = height * RADIX_TREE_MAP_SHIFT; |
e504f3fd | 1324 | |
0a2efc6c MW |
1325 | do { |
1326 | shift -= RADIX_TREE_MAP_SHIFT; | |
e504f3fd | 1327 | |
0a2efc6c MW |
1328 | for (i = (index >> shift) & RADIX_TREE_MAP_MASK; |
1329 | i < RADIX_TREE_MAP_SIZE; | |
1330 | i++, index += (1UL << shift)) { | |
1331 | struct radix_tree_node *node = | |
1332 | rcu_dereference_raw(slot->slots[i]); | |
1333 | if (node == RADIX_TREE_RETRY) | |
1334 | goto out; | |
1335 | if (!radix_tree_is_indirect_ptr(node)) { | |
1336 | if (node == item) { | |
1337 | info->found_index = index; | |
1338 | info->stop = true; | |
1339 | goto out; | |
1340 | } | |
1341 | continue; | |
e6145236 | 1342 | } |
0a2efc6c MW |
1343 | node = indirect_to_ptr(node); |
1344 | if (is_sibling_entry(slot, node)) | |
1345 | continue; | |
1346 | slot = node; | |
1347 | break; | |
e6145236 | 1348 | } |
0a2efc6c MW |
1349 | if (i == RADIX_TREE_MAP_SIZE) |
1350 | break; | |
1351 | } while (shift); | |
e504f3fd | 1352 | |
e504f3fd | 1353 | out: |
0a2efc6c MW |
1354 | if ((index == 0) && (i == RADIX_TREE_MAP_SIZE)) |
1355 | info->stop = true; | |
e504f3fd HD |
1356 | return index; |
1357 | } | |
1358 | ||
1359 | /** | |
1360 | * radix_tree_locate_item - search through radix tree for item | |
1361 | * @root: radix tree root | |
1362 | * @item: item to be found | |
1363 | * | |
1364 | * Returns index where item was found, or -1 if not found. | |
1365 | * Caller must hold no lock (since this time-consuming function needs | |
1366 | * to be preemptible), and must check afterwards if item is still there. | |
1367 | */ | |
1368 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |
1369 | { | |
1370 | struct radix_tree_node *node; | |
1371 | unsigned long max_index; | |
1372 | unsigned long cur_index = 0; | |
0a2efc6c MW |
1373 | struct locate_info info = { |
1374 | .found_index = -1, | |
1375 | .stop = false, | |
1376 | }; | |
e504f3fd HD |
1377 | |
1378 | do { | |
1379 | rcu_read_lock(); | |
1380 | node = rcu_dereference_raw(root->rnode); | |
1381 | if (!radix_tree_is_indirect_ptr(node)) { | |
1382 | rcu_read_unlock(); | |
1383 | if (node == item) | |
0a2efc6c | 1384 | info.found_index = 0; |
e504f3fd HD |
1385 | break; |
1386 | } | |
1387 | ||
1388 | node = indirect_to_ptr(node); | |
0a2efc6c MW |
1389 | |
1390 | max_index = node_maxindex(node); | |
5f30fc94 HD |
1391 | if (cur_index > max_index) { |
1392 | rcu_read_unlock(); | |
e504f3fd | 1393 | break; |
5f30fc94 | 1394 | } |
e504f3fd | 1395 | |
0a2efc6c | 1396 | cur_index = __locate(node, item, cur_index, &info); |
e504f3fd HD |
1397 | rcu_read_unlock(); |
1398 | cond_resched(); | |
0a2efc6c | 1399 | } while (!info.stop && cur_index <= max_index); |
e504f3fd | 1400 | |
0a2efc6c | 1401 | return info.found_index; |
e504f3fd HD |
1402 | } |
1403 | #else | |
1404 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |
1405 | { | |
1406 | return -1; | |
1407 | } | |
1408 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ | |
47feff2c | 1409 | |
a5f51c96 NP |
1410 | /** |
1411 | * radix_tree_shrink - shrink height of a radix tree to minimal | |
1412 | * @root radix tree root | |
1413 | */ | |
1414 | static inline void radix_tree_shrink(struct radix_tree_root *root) | |
1415 | { | |
1416 | /* try to shrink tree height */ | |
c0bc9875 | 1417 | while (root->height > 0) { |
a5f51c96 | 1418 | struct radix_tree_node *to_free = root->rnode; |
e2bdb933 | 1419 | struct radix_tree_node *slot; |
a5f51c96 | 1420 | |
c0bc9875 | 1421 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); |
27d20fdd | 1422 | to_free = indirect_to_ptr(to_free); |
c0bc9875 NP |
1423 | |
1424 | /* | |
1425 | * The candidate node has more than one child, or its child | |
e6145236 MW |
1426 | * is not at the leftmost slot, or it is a multiorder entry, |
1427 | * we cannot shrink. | |
c0bc9875 NP |
1428 | */ |
1429 | if (to_free->count != 1) | |
1430 | break; | |
339e6353 MW |
1431 | slot = to_free->slots[0]; |
1432 | if (!slot) | |
c0bc9875 | 1433 | break; |
afe0e395 MW |
1434 | if (!radix_tree_is_indirect_ptr(slot) && (root->height > 1)) |
1435 | break; | |
1436 | ||
1437 | if (radix_tree_is_indirect_ptr(slot)) { | |
1438 | slot = indirect_to_ptr(slot); | |
1439 | slot->parent = NULL; | |
1440 | slot = ptr_to_indirect(slot); | |
1441 | } | |
c0bc9875 | 1442 | |
7cf9c2c7 NP |
1443 | /* |
1444 | * We don't need rcu_assign_pointer(), since we are simply | |
27d20fdd NP |
1445 | * moving the node from one part of the tree to another: if it |
1446 | * was safe to dereference the old pointer to it | |
7cf9c2c7 | 1447 | * (to_free->slots[0]), it will be safe to dereference the new |
27d20fdd | 1448 | * one (root->rnode) as far as dependent read barriers go. |
7cf9c2c7 | 1449 | */ |
e2bdb933 | 1450 | root->rnode = slot; |
a5f51c96 | 1451 | root->height--; |
27d20fdd NP |
1452 | |
1453 | /* | |
1454 | * We have a dilemma here. The node's slot[0] must not be | |
1455 | * NULLed in case there are concurrent lookups expecting to | |
1456 | * find the item. However if this was a bottom-level node, | |
1457 | * then it may be subject to the slot pointer being visible | |
1458 | * to callers dereferencing it. If item corresponding to | |
1459 | * slot[0] is subsequently deleted, these callers would expect | |
1460 | * their slot to become empty sooner or later. | |
1461 | * | |
1462 | * For example, lockless pagecache will look up a slot, deref | |
1463 | * the page pointer, and if the page is 0 refcount it means it | |
1464 | * was concurrently deleted from pagecache so try the deref | |
1465 | * again. Fortunately there is already a requirement for logic | |
1466 | * to retry the entire slot lookup -- the indirect pointer | |
1467 | * problem (replacing direct root node with an indirect pointer | |
1468 | * also results in a stale slot). So tag the slot as indirect | |
1469 | * to force callers to retry. | |
1470 | */ | |
afe0e395 MW |
1471 | if (!radix_tree_is_indirect_ptr(slot)) |
1472 | to_free->slots[0] = RADIX_TREE_RETRY; | |
27d20fdd | 1473 | |
a5f51c96 NP |
1474 | radix_tree_node_free(to_free); |
1475 | } | |
1476 | } | |
1477 | ||
139e5616 JW |
1478 | /** |
1479 | * __radix_tree_delete_node - try to free node after clearing a slot | |
1480 | * @root: radix tree root | |
139e5616 JW |
1481 | * @node: node containing @index |
1482 | * | |
1483 | * After clearing the slot at @index in @node from radix tree | |
1484 | * rooted at @root, call this function to attempt freeing the | |
1485 | * node and shrinking the tree. | |
1486 | * | |
1487 | * Returns %true if @node was freed, %false otherwise. | |
1488 | */ | |
449dd698 | 1489 | bool __radix_tree_delete_node(struct radix_tree_root *root, |
139e5616 JW |
1490 | struct radix_tree_node *node) |
1491 | { | |
1492 | bool deleted = false; | |
1493 | ||
1494 | do { | |
1495 | struct radix_tree_node *parent; | |
1496 | ||
1497 | if (node->count) { | |
1498 | if (node == indirect_to_ptr(root->rnode)) { | |
1499 | radix_tree_shrink(root); | |
1500 | if (root->height == 0) | |
1501 | deleted = true; | |
1502 | } | |
1503 | return deleted; | |
1504 | } | |
1505 | ||
1506 | parent = node->parent; | |
1507 | if (parent) { | |
449dd698 | 1508 | unsigned int offset; |
139e5616 | 1509 | |
449dd698 JW |
1510 | offset = node->path >> RADIX_TREE_HEIGHT_SHIFT; |
1511 | parent->slots[offset] = NULL; | |
139e5616 JW |
1512 | parent->count--; |
1513 | } else { | |
1514 | root_tag_clear_all(root); | |
1515 | root->height = 0; | |
1516 | root->rnode = NULL; | |
1517 | } | |
1518 | ||
1519 | radix_tree_node_free(node); | |
1520 | deleted = true; | |
1521 | ||
1522 | node = parent; | |
1523 | } while (node); | |
1524 | ||
1525 | return deleted; | |
1526 | } | |
1527 | ||
57578c2e MW |
1528 | static inline void delete_sibling_entries(struct radix_tree_node *node, |
1529 | void *ptr, unsigned offset) | |
1530 | { | |
1531 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1532 | int i; | |
1533 | for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { | |
1534 | if (node->slots[offset + i] != ptr) | |
1535 | break; | |
1536 | node->slots[offset + i] = NULL; | |
1537 | node->count--; | |
1538 | } | |
1539 | #endif | |
1540 | } | |
1541 | ||
1da177e4 | 1542 | /** |
53c59f26 | 1543 | * radix_tree_delete_item - delete an item from a radix tree |
1da177e4 LT |
1544 | * @root: radix tree root |
1545 | * @index: index key | |
53c59f26 | 1546 | * @item: expected item |
1da177e4 | 1547 | * |
53c59f26 | 1548 | * Remove @item at @index from the radix tree rooted at @root. |
1da177e4 | 1549 | * |
53c59f26 JW |
1550 | * Returns the address of the deleted item, or NULL if it was not present |
1551 | * or the entry at the given @index was not @item. | |
1da177e4 | 1552 | */ |
53c59f26 JW |
1553 | void *radix_tree_delete_item(struct radix_tree_root *root, |
1554 | unsigned long index, void *item) | |
1da177e4 | 1555 | { |
139e5616 | 1556 | struct radix_tree_node *node; |
57578c2e | 1557 | unsigned int offset; |
139e5616 JW |
1558 | void **slot; |
1559 | void *entry; | |
d5274261 | 1560 | int tag; |
1da177e4 | 1561 | |
139e5616 JW |
1562 | entry = __radix_tree_lookup(root, index, &node, &slot); |
1563 | if (!entry) | |
1564 | return NULL; | |
1da177e4 | 1565 | |
139e5616 JW |
1566 | if (item && entry != item) |
1567 | return NULL; | |
1568 | ||
1569 | if (!node) { | |
612d6c19 NP |
1570 | root_tag_clear_all(root); |
1571 | root->rnode = NULL; | |
139e5616 | 1572 | return entry; |
612d6c19 | 1573 | } |
1da177e4 | 1574 | |
29e0967c | 1575 | offset = get_slot_offset(node, slot); |
53c59f26 | 1576 | |
1da177e4 | 1577 | /* |
e2bdb933 HD |
1578 | * Clear all tags associated with the item to be deleted. |
1579 | * This way of doing it would be inefficient, but seldom is any set. | |
1da177e4 | 1580 | */ |
daff89f3 | 1581 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
e2bdb933 | 1582 | if (tag_get(node, tag, offset)) |
612d6c19 | 1583 | radix_tree_tag_clear(root, index, tag); |
d5274261 | 1584 | } |
1da177e4 | 1585 | |
57578c2e | 1586 | delete_sibling_entries(node, ptr_to_indirect(slot), offset); |
139e5616 JW |
1587 | node->slots[offset] = NULL; |
1588 | node->count--; | |
e2bdb933 | 1589 | |
449dd698 | 1590 | __radix_tree_delete_node(root, node); |
612d6c19 | 1591 | |
139e5616 | 1592 | return entry; |
1da177e4 | 1593 | } |
53c59f26 JW |
1594 | EXPORT_SYMBOL(radix_tree_delete_item); |
1595 | ||
1596 | /** | |
1597 | * radix_tree_delete - delete an item from a radix tree | |
1598 | * @root: radix tree root | |
1599 | * @index: index key | |
1600 | * | |
1601 | * Remove the item at @index from the radix tree rooted at @root. | |
1602 | * | |
1603 | * Returns the address of the deleted item, or NULL if it was not present. | |
1604 | */ | |
1605 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |
1606 | { | |
1607 | return radix_tree_delete_item(root, index, NULL); | |
1608 | } | |
1da177e4 LT |
1609 | EXPORT_SYMBOL(radix_tree_delete); |
1610 | ||
1611 | /** | |
1612 | * radix_tree_tagged - test whether any items in the tree are tagged | |
1613 | * @root: radix tree root | |
1614 | * @tag: tag to test | |
1615 | */ | |
daff89f3 | 1616 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) |
1da177e4 | 1617 | { |
612d6c19 | 1618 | return root_tag_get(root, tag); |
1da177e4 LT |
1619 | } |
1620 | EXPORT_SYMBOL(radix_tree_tagged); | |
1621 | ||
1622 | static void | |
449dd698 | 1623 | radix_tree_node_ctor(void *arg) |
1da177e4 | 1624 | { |
449dd698 JW |
1625 | struct radix_tree_node *node = arg; |
1626 | ||
1627 | memset(node, 0, sizeof(*node)); | |
1628 | INIT_LIST_HEAD(&node->private_list); | |
1da177e4 LT |
1629 | } |
1630 | ||
1631 | static __init unsigned long __maxindex(unsigned int height) | |
1632 | { | |
430d275a PL |
1633 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; |
1634 | int shift = RADIX_TREE_INDEX_BITS - width; | |
1635 | ||
1636 | if (shift < 0) | |
1637 | return ~0UL; | |
1638 | if (shift >= BITS_PER_LONG) | |
1639 | return 0UL; | |
1640 | return ~0UL >> shift; | |
1da177e4 LT |
1641 | } |
1642 | ||
1643 | static __init void radix_tree_init_maxindex(void) | |
1644 | { | |
1645 | unsigned int i; | |
1646 | ||
1647 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) | |
1648 | height_to_maxindex[i] = __maxindex(i); | |
1649 | } | |
1650 | ||
1da177e4 LT |
1651 | static int radix_tree_callback(struct notifier_block *nfb, |
1652 | unsigned long action, | |
1653 | void *hcpu) | |
1654 | { | |
1655 | int cpu = (long)hcpu; | |
1656 | struct radix_tree_preload *rtp; | |
9d2a8da0 | 1657 | struct radix_tree_node *node; |
1da177e4 LT |
1658 | |
1659 | /* Free per-cpu pool of perloaded nodes */ | |
8bb78442 | 1660 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
1da177e4 LT |
1661 | rtp = &per_cpu(radix_tree_preloads, cpu); |
1662 | while (rtp->nr) { | |
9d2a8da0 KS |
1663 | node = rtp->nodes; |
1664 | rtp->nodes = node->private_data; | |
1665 | kmem_cache_free(radix_tree_node_cachep, node); | |
1666 | rtp->nr--; | |
1da177e4 LT |
1667 | } |
1668 | } | |
1669 | return NOTIFY_OK; | |
1670 | } | |
1da177e4 LT |
1671 | |
1672 | void __init radix_tree_init(void) | |
1673 | { | |
1674 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | |
1675 | sizeof(struct radix_tree_node), 0, | |
488514d1 CL |
1676 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1677 | radix_tree_node_ctor); | |
1da177e4 LT |
1678 | radix_tree_init_maxindex(); |
1679 | hotcpu_notifier(radix_tree_callback, 0); | |
1680 | } |