]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
4 | * Copyright (C) 2005 SGI, Christoph Lameter | |
5 | * Copyright (C) 2006 Nick Piggin | |
6 | * Copyright (C) 2012 Konstantin Khlebnikov | |
7 | * Copyright (C) 2016 Intel, Matthew Wilcox | |
8 | * Copyright (C) 2016 Intel, Ross Zwisler | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2, or (at | |
13 | * your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
25 | #include <linux/errno.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/kernel.h> | |
28 | #include <linux/export.h> | |
29 | #include <linux/radix-tree.h> | |
30 | #include <linux/percpu.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/kmemleak.h> | |
33 | #include <linux/notifier.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/string.h> | |
36 | #include <linux/bitops.h> | |
37 | #include <linux/rcupdate.h> | |
38 | #include <linux/preempt.h> /* in_interrupt() */ | |
39 | ||
40 | ||
41 | /* | |
42 | * Radix tree node cache. | |
43 | */ | |
44 | static struct kmem_cache *radix_tree_node_cachep; | |
45 | ||
46 | /* | |
47 | * The radix tree is variable-height, so an insert operation not only has | |
48 | * to build the branch to its corresponding item, it also has to build the | |
49 | * branch to existing items if the size has to be increased (by | |
50 | * radix_tree_extend). | |
51 | * | |
52 | * The worst case is a zero height tree with just a single item at index 0, | |
53 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | |
54 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | |
55 | * Hence: | |
56 | */ | |
57 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | |
58 | ||
59 | /* | |
60 | * Per-cpu pool of preloaded nodes | |
61 | */ | |
62 | struct radix_tree_preload { | |
63 | unsigned nr; | |
64 | /* nodes->private_data points to next preallocated node */ | |
65 | struct radix_tree_node *nodes; | |
66 | }; | |
67 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; | |
68 | ||
69 | static inline void *node_to_entry(void *ptr) | |
70 | { | |
71 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); | |
72 | } | |
73 | ||
74 | #define RADIX_TREE_RETRY node_to_entry(NULL) | |
75 | ||
76 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
77 | /* Sibling slots point directly to another slot in the same node */ | |
78 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
79 | { | |
80 | void **ptr = node; | |
81 | return (parent->slots <= ptr) && | |
82 | (ptr < parent->slots + RADIX_TREE_MAP_SIZE); | |
83 | } | |
84 | #else | |
85 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
86 | { | |
87 | return false; | |
88 | } | |
89 | #endif | |
90 | ||
91 | static inline unsigned long get_slot_offset(struct radix_tree_node *parent, | |
92 | void **slot) | |
93 | { | |
94 | return slot - parent->slots; | |
95 | } | |
96 | ||
97 | static unsigned radix_tree_descend(struct radix_tree_node *parent, | |
98 | struct radix_tree_node **nodep, unsigned offset) | |
99 | { | |
100 | void **entry = rcu_dereference_raw(parent->slots[offset]); | |
101 | ||
102 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
103 | if (radix_tree_is_internal_node(entry)) { | |
104 | unsigned long siboff = get_slot_offset(parent, entry); | |
105 | if (siboff < RADIX_TREE_MAP_SIZE) { | |
106 | offset = siboff; | |
107 | entry = rcu_dereference_raw(parent->slots[offset]); | |
108 | } | |
109 | } | |
110 | #endif | |
111 | ||
112 | *nodep = (void *)entry; | |
113 | return offset; | |
114 | } | |
115 | ||
116 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) | |
117 | { | |
118 | return root->gfp_mask & __GFP_BITS_MASK; | |
119 | } | |
120 | ||
121 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, | |
122 | int offset) | |
123 | { | |
124 | __set_bit(offset, node->tags[tag]); | |
125 | } | |
126 | ||
127 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | |
128 | int offset) | |
129 | { | |
130 | __clear_bit(offset, node->tags[tag]); | |
131 | } | |
132 | ||
133 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | |
134 | int offset) | |
135 | { | |
136 | return test_bit(offset, node->tags[tag]); | |
137 | } | |
138 | ||
139 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | |
140 | { | |
141 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | |
142 | } | |
143 | ||
144 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) | |
145 | { | |
146 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | |
147 | } | |
148 | ||
149 | static inline void root_tag_clear_all(struct radix_tree_root *root) | |
150 | { | |
151 | root->gfp_mask &= __GFP_BITS_MASK; | |
152 | } | |
153 | ||
154 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | |
155 | { | |
156 | return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | |
157 | } | |
158 | ||
159 | static inline unsigned root_tags_get(struct radix_tree_root *root) | |
160 | { | |
161 | return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; | |
162 | } | |
163 | ||
164 | /* | |
165 | * Returns 1 if any slot in the node has this tag set. | |
166 | * Otherwise returns 0. | |
167 | */ | |
168 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | |
169 | { | |
170 | unsigned idx; | |
171 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | |
172 | if (node->tags[tag][idx]) | |
173 | return 1; | |
174 | } | |
175 | return 0; | |
176 | } | |
177 | ||
178 | /** | |
179 | * radix_tree_find_next_bit - find the next set bit in a memory region | |
180 | * | |
181 | * @addr: The address to base the search on | |
182 | * @size: The bitmap size in bits | |
183 | * @offset: The bitnumber to start searching at | |
184 | * | |
185 | * Unrollable variant of find_next_bit() for constant size arrays. | |
186 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | |
187 | * Returns next bit offset, or size if nothing found. | |
188 | */ | |
189 | static __always_inline unsigned long | |
190 | radix_tree_find_next_bit(const unsigned long *addr, | |
191 | unsigned long size, unsigned long offset) | |
192 | { | |
193 | if (!__builtin_constant_p(size)) | |
194 | return find_next_bit(addr, size, offset); | |
195 | ||
196 | if (offset < size) { | |
197 | unsigned long tmp; | |
198 | ||
199 | addr += offset / BITS_PER_LONG; | |
200 | tmp = *addr >> (offset % BITS_PER_LONG); | |
201 | if (tmp) | |
202 | return __ffs(tmp) + offset; | |
203 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | |
204 | while (offset < size) { | |
205 | tmp = *++addr; | |
206 | if (tmp) | |
207 | return __ffs(tmp) + offset; | |
208 | offset += BITS_PER_LONG; | |
209 | } | |
210 | } | |
211 | return size; | |
212 | } | |
213 | ||
214 | #ifndef __KERNEL__ | |
215 | static void dump_node(struct radix_tree_node *node, unsigned long index) | |
216 | { | |
217 | unsigned long i; | |
218 | ||
219 | pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d parent %p\n", | |
220 | node, node->offset, | |
221 | node->tags[0][0], node->tags[1][0], node->tags[2][0], | |
222 | node->shift, node->count, node->parent); | |
223 | ||
224 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | |
225 | unsigned long first = index | (i << node->shift); | |
226 | unsigned long last = first | ((1UL << node->shift) - 1); | |
227 | void *entry = node->slots[i]; | |
228 | if (!entry) | |
229 | continue; | |
230 | if (is_sibling_entry(node, entry)) { | |
231 | pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n", | |
232 | entry, i, | |
233 | *(void **)entry_to_node(entry), | |
234 | first, last); | |
235 | } else if (!radix_tree_is_internal_node(entry)) { | |
236 | pr_debug("radix entry %p offset %ld indices %ld-%ld\n", | |
237 | entry, i, first, last); | |
238 | } else { | |
239 | dump_node(entry_to_node(entry), first); | |
240 | } | |
241 | } | |
242 | } | |
243 | ||
244 | /* For debug */ | |
245 | static void radix_tree_dump(struct radix_tree_root *root) | |
246 | { | |
247 | pr_debug("radix root: %p rnode %p tags %x\n", | |
248 | root, root->rnode, | |
249 | root->gfp_mask >> __GFP_BITS_SHIFT); | |
250 | if (!radix_tree_is_internal_node(root->rnode)) | |
251 | return; | |
252 | dump_node(entry_to_node(root->rnode), 0); | |
253 | } | |
254 | #endif | |
255 | ||
256 | /* | |
257 | * This assumes that the caller has performed appropriate preallocation, and | |
258 | * that the caller has pinned this thread of control to the current CPU. | |
259 | */ | |
260 | static struct radix_tree_node * | |
261 | radix_tree_node_alloc(struct radix_tree_root *root) | |
262 | { | |
263 | struct radix_tree_node *ret = NULL; | |
264 | gfp_t gfp_mask = root_gfp_mask(root); | |
265 | ||
266 | /* | |
267 | * Preload code isn't irq safe and it doesn't make sense to use | |
268 | * preloading during an interrupt anyway as all the allocations have | |
269 | * to be atomic. So just do normal allocation when in interrupt. | |
270 | */ | |
271 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { | |
272 | struct radix_tree_preload *rtp; | |
273 | ||
274 | /* | |
275 | * Even if the caller has preloaded, try to allocate from the | |
276 | * cache first for the new node to get accounted. | |
277 | */ | |
278 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
279 | gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); | |
280 | if (ret) | |
281 | goto out; | |
282 | ||
283 | /* | |
284 | * Provided the caller has preloaded here, we will always | |
285 | * succeed in getting a node here (and never reach | |
286 | * kmem_cache_alloc) | |
287 | */ | |
288 | rtp = this_cpu_ptr(&radix_tree_preloads); | |
289 | if (rtp->nr) { | |
290 | ret = rtp->nodes; | |
291 | rtp->nodes = ret->private_data; | |
292 | ret->private_data = NULL; | |
293 | rtp->nr--; | |
294 | } | |
295 | /* | |
296 | * Update the allocation stack trace as this is more useful | |
297 | * for debugging. | |
298 | */ | |
299 | kmemleak_update_trace(ret); | |
300 | goto out; | |
301 | } | |
302 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
303 | gfp_mask | __GFP_ACCOUNT); | |
304 | out: | |
305 | BUG_ON(radix_tree_is_internal_node(ret)); | |
306 | return ret; | |
307 | } | |
308 | ||
309 | static void radix_tree_node_rcu_free(struct rcu_head *head) | |
310 | { | |
311 | struct radix_tree_node *node = | |
312 | container_of(head, struct radix_tree_node, rcu_head); | |
313 | int i; | |
314 | ||
315 | /* | |
316 | * must only free zeroed nodes into the slab. radix_tree_shrink | |
317 | * can leave us with a non-NULL entry in the first slot, so clear | |
318 | * that here to make sure. | |
319 | */ | |
320 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) | |
321 | tag_clear(node, i, 0); | |
322 | ||
323 | node->slots[0] = NULL; | |
324 | node->count = 0; | |
325 | ||
326 | kmem_cache_free(radix_tree_node_cachep, node); | |
327 | } | |
328 | ||
329 | static inline void | |
330 | radix_tree_node_free(struct radix_tree_node *node) | |
331 | { | |
332 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); | |
333 | } | |
334 | ||
335 | /* | |
336 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
337 | * ensure that the addition of a single element in the tree cannot fail. On | |
338 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
339 | * with preemption not disabled. | |
340 | * | |
341 | * To make use of this facility, the radix tree must be initialised without | |
342 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). | |
343 | */ | |
344 | static int __radix_tree_preload(gfp_t gfp_mask) | |
345 | { | |
346 | struct radix_tree_preload *rtp; | |
347 | struct radix_tree_node *node; | |
348 | int ret = -ENOMEM; | |
349 | ||
350 | preempt_disable(); | |
351 | rtp = this_cpu_ptr(&radix_tree_preloads); | |
352 | while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { | |
353 | preempt_enable(); | |
354 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); | |
355 | if (node == NULL) | |
356 | goto out; | |
357 | preempt_disable(); | |
358 | rtp = this_cpu_ptr(&radix_tree_preloads); | |
359 | if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { | |
360 | node->private_data = rtp->nodes; | |
361 | rtp->nodes = node; | |
362 | rtp->nr++; | |
363 | } else { | |
364 | kmem_cache_free(radix_tree_node_cachep, node); | |
365 | } | |
366 | } | |
367 | ret = 0; | |
368 | out: | |
369 | return ret; | |
370 | } | |
371 | ||
372 | /* | |
373 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
374 | * ensure that the addition of a single element in the tree cannot fail. On | |
375 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
376 | * with preemption not disabled. | |
377 | * | |
378 | * To make use of this facility, the radix tree must be initialised without | |
379 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). | |
380 | */ | |
381 | int radix_tree_preload(gfp_t gfp_mask) | |
382 | { | |
383 | /* Warn on non-sensical use... */ | |
384 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); | |
385 | return __radix_tree_preload(gfp_mask); | |
386 | } | |
387 | EXPORT_SYMBOL(radix_tree_preload); | |
388 | ||
389 | /* | |
390 | * The same as above function, except we don't guarantee preloading happens. | |
391 | * We do it, if we decide it helps. On success, return zero with preemption | |
392 | * disabled. On error, return -ENOMEM with preemption not disabled. | |
393 | */ | |
394 | int radix_tree_maybe_preload(gfp_t gfp_mask) | |
395 | { | |
396 | if (gfpflags_allow_blocking(gfp_mask)) | |
397 | return __radix_tree_preload(gfp_mask); | |
398 | /* Preloading doesn't help anything with this gfp mask, skip it */ | |
399 | preempt_disable(); | |
400 | return 0; | |
401 | } | |
402 | EXPORT_SYMBOL(radix_tree_maybe_preload); | |
403 | ||
404 | /* | |
405 | * The maximum index which can be stored in a radix tree | |
406 | */ | |
407 | static inline unsigned long shift_maxindex(unsigned int shift) | |
408 | { | |
409 | return (RADIX_TREE_MAP_SIZE << shift) - 1; | |
410 | } | |
411 | ||
412 | static inline unsigned long node_maxindex(struct radix_tree_node *node) | |
413 | { | |
414 | return shift_maxindex(node->shift); | |
415 | } | |
416 | ||
417 | static unsigned radix_tree_load_root(struct radix_tree_root *root, | |
418 | struct radix_tree_node **nodep, unsigned long *maxindex) | |
419 | { | |
420 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); | |
421 | ||
422 | *nodep = node; | |
423 | ||
424 | if (likely(radix_tree_is_internal_node(node))) { | |
425 | node = entry_to_node(node); | |
426 | *maxindex = node_maxindex(node); | |
427 | return node->shift + RADIX_TREE_MAP_SHIFT; | |
428 | } | |
429 | ||
430 | *maxindex = 0; | |
431 | return 0; | |
432 | } | |
433 | ||
434 | /* | |
435 | * Extend a radix tree so it can store key @index. | |
436 | */ | |
437 | static int radix_tree_extend(struct radix_tree_root *root, | |
438 | unsigned long index, unsigned int shift) | |
439 | { | |
440 | struct radix_tree_node *slot; | |
441 | unsigned int maxshift; | |
442 | int tag; | |
443 | ||
444 | /* Figure out what the shift should be. */ | |
445 | maxshift = shift; | |
446 | while (index > shift_maxindex(maxshift)) | |
447 | maxshift += RADIX_TREE_MAP_SHIFT; | |
448 | ||
449 | slot = root->rnode; | |
450 | if (!slot) | |
451 | goto out; | |
452 | ||
453 | do { | |
454 | struct radix_tree_node *node = radix_tree_node_alloc(root); | |
455 | ||
456 | if (!node) | |
457 | return -ENOMEM; | |
458 | ||
459 | /* Propagate the aggregated tag info into the new root */ | |
460 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | |
461 | if (root_tag_get(root, tag)) | |
462 | tag_set(node, tag, 0); | |
463 | } | |
464 | ||
465 | BUG_ON(shift > BITS_PER_LONG); | |
466 | node->shift = shift; | |
467 | node->offset = 0; | |
468 | node->count = 1; | |
469 | node->parent = NULL; | |
470 | if (radix_tree_is_internal_node(slot)) | |
471 | entry_to_node(slot)->parent = node; | |
472 | node->slots[0] = slot; | |
473 | slot = node_to_entry(node); | |
474 | rcu_assign_pointer(root->rnode, slot); | |
475 | shift += RADIX_TREE_MAP_SHIFT; | |
476 | } while (shift <= maxshift); | |
477 | out: | |
478 | return maxshift + RADIX_TREE_MAP_SHIFT; | |
479 | } | |
480 | ||
481 | /** | |
482 | * __radix_tree_create - create a slot in a radix tree | |
483 | * @root: radix tree root | |
484 | * @index: index key | |
485 | * @order: index occupies 2^order aligned slots | |
486 | * @nodep: returns node | |
487 | * @slotp: returns slot | |
488 | * | |
489 | * Create, if necessary, and return the node and slot for an item | |
490 | * at position @index in the radix tree @root. | |
491 | * | |
492 | * Until there is more than one item in the tree, no nodes are | |
493 | * allocated and @root->rnode is used as a direct slot instead of | |
494 | * pointing to a node, in which case *@nodep will be NULL. | |
495 | * | |
496 | * Returns -ENOMEM, or 0 for success. | |
497 | */ | |
498 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, | |
499 | unsigned order, struct radix_tree_node **nodep, | |
500 | void ***slotp) | |
501 | { | |
502 | struct radix_tree_node *node = NULL, *slot; | |
503 | unsigned long maxindex; | |
504 | unsigned int shift, offset; | |
505 | unsigned long max = index | ((1UL << order) - 1); | |
506 | ||
507 | shift = radix_tree_load_root(root, &slot, &maxindex); | |
508 | ||
509 | /* Make sure the tree is high enough. */ | |
510 | if (max > maxindex) { | |
511 | int error = radix_tree_extend(root, max, shift); | |
512 | if (error < 0) | |
513 | return error; | |
514 | shift = error; | |
515 | slot = root->rnode; | |
516 | if (order == shift) | |
517 | shift += RADIX_TREE_MAP_SHIFT; | |
518 | } | |
519 | ||
520 | offset = 0; /* uninitialised var warning */ | |
521 | while (shift > order) { | |
522 | shift -= RADIX_TREE_MAP_SHIFT; | |
523 | if (slot == NULL) { | |
524 | /* Have to add a child node. */ | |
525 | slot = radix_tree_node_alloc(root); | |
526 | if (!slot) | |
527 | return -ENOMEM; | |
528 | slot->shift = shift; | |
529 | slot->offset = offset; | |
530 | slot->parent = node; | |
531 | if (node) { | |
532 | rcu_assign_pointer(node->slots[offset], | |
533 | node_to_entry(slot)); | |
534 | node->count++; | |
535 | } else | |
536 | rcu_assign_pointer(root->rnode, | |
537 | node_to_entry(slot)); | |
538 | } else if (!radix_tree_is_internal_node(slot)) | |
539 | break; | |
540 | ||
541 | /* Go a level down */ | |
542 | node = entry_to_node(slot); | |
543 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
544 | offset = radix_tree_descend(node, &slot, offset); | |
545 | } | |
546 | ||
547 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
548 | /* Insert pointers to the canonical entry */ | |
549 | if (order > shift) { | |
550 | int i, n = 1 << (order - shift); | |
551 | offset = offset & ~(n - 1); | |
552 | slot = node_to_entry(&node->slots[offset]); | |
553 | for (i = 0; i < n; i++) { | |
554 | if (node->slots[offset + i]) | |
555 | return -EEXIST; | |
556 | } | |
557 | ||
558 | for (i = 1; i < n; i++) { | |
559 | rcu_assign_pointer(node->slots[offset + i], slot); | |
560 | node->count++; | |
561 | } | |
562 | } | |
563 | #endif | |
564 | ||
565 | if (nodep) | |
566 | *nodep = node; | |
567 | if (slotp) | |
568 | *slotp = node ? node->slots + offset : (void **)&root->rnode; | |
569 | return 0; | |
570 | } | |
571 | ||
572 | /** | |
573 | * __radix_tree_insert - insert into a radix tree | |
574 | * @root: radix tree root | |
575 | * @index: index key | |
576 | * @order: key covers the 2^order indices around index | |
577 | * @item: item to insert | |
578 | * | |
579 | * Insert an item into the radix tree at position @index. | |
580 | */ | |
581 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, | |
582 | unsigned order, void *item) | |
583 | { | |
584 | struct radix_tree_node *node; | |
585 | void **slot; | |
586 | int error; | |
587 | ||
588 | BUG_ON(radix_tree_is_internal_node(item)); | |
589 | ||
590 | error = __radix_tree_create(root, index, order, &node, &slot); | |
591 | if (error) | |
592 | return error; | |
593 | if (*slot != NULL) | |
594 | return -EEXIST; | |
595 | rcu_assign_pointer(*slot, item); | |
596 | ||
597 | if (node) { | |
598 | unsigned offset = get_slot_offset(node, slot); | |
599 | node->count++; | |
600 | BUG_ON(tag_get(node, 0, offset)); | |
601 | BUG_ON(tag_get(node, 1, offset)); | |
602 | BUG_ON(tag_get(node, 2, offset)); | |
603 | } else { | |
604 | BUG_ON(root_tags_get(root)); | |
605 | } | |
606 | ||
607 | return 0; | |
608 | } | |
609 | EXPORT_SYMBOL(__radix_tree_insert); | |
610 | ||
611 | /** | |
612 | * __radix_tree_lookup - lookup an item in a radix tree | |
613 | * @root: radix tree root | |
614 | * @index: index key | |
615 | * @nodep: returns node | |
616 | * @slotp: returns slot | |
617 | * | |
618 | * Lookup and return the item at position @index in the radix | |
619 | * tree @root. | |
620 | * | |
621 | * Until there is more than one item in the tree, no nodes are | |
622 | * allocated and @root->rnode is used as a direct slot instead of | |
623 | * pointing to a node, in which case *@nodep will be NULL. | |
624 | */ | |
625 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, | |
626 | struct radix_tree_node **nodep, void ***slotp) | |
627 | { | |
628 | struct radix_tree_node *node, *parent; | |
629 | unsigned long maxindex; | |
630 | unsigned int shift; | |
631 | void **slot; | |
632 | ||
633 | restart: | |
634 | parent = NULL; | |
635 | slot = (void **)&root->rnode; | |
636 | shift = radix_tree_load_root(root, &node, &maxindex); | |
637 | if (index > maxindex) | |
638 | return NULL; | |
639 | ||
640 | while (radix_tree_is_internal_node(node)) { | |
641 | unsigned offset; | |
642 | ||
643 | if (node == RADIX_TREE_RETRY) | |
644 | goto restart; | |
645 | parent = entry_to_node(node); | |
646 | shift -= RADIX_TREE_MAP_SHIFT; | |
647 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
648 | offset = radix_tree_descend(parent, &node, offset); | |
649 | slot = parent->slots + offset; | |
650 | } | |
651 | ||
652 | if (nodep) | |
653 | *nodep = parent; | |
654 | if (slotp) | |
655 | *slotp = slot; | |
656 | return node; | |
657 | } | |
658 | ||
659 | /** | |
660 | * radix_tree_lookup_slot - lookup a slot in a radix tree | |
661 | * @root: radix tree root | |
662 | * @index: index key | |
663 | * | |
664 | * Returns: the slot corresponding to the position @index in the | |
665 | * radix tree @root. This is useful for update-if-exists operations. | |
666 | * | |
667 | * This function can be called under rcu_read_lock iff the slot is not | |
668 | * modified by radix_tree_replace_slot, otherwise it must be called | |
669 | * exclusive from other writers. Any dereference of the slot must be done | |
670 | * using radix_tree_deref_slot. | |
671 | */ | |
672 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |
673 | { | |
674 | void **slot; | |
675 | ||
676 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | |
677 | return NULL; | |
678 | return slot; | |
679 | } | |
680 | EXPORT_SYMBOL(radix_tree_lookup_slot); | |
681 | ||
682 | /** | |
683 | * radix_tree_lookup - perform lookup operation on a radix tree | |
684 | * @root: radix tree root | |
685 | * @index: index key | |
686 | * | |
687 | * Lookup the item at the position @index in the radix tree @root. | |
688 | * | |
689 | * This function can be called under rcu_read_lock, however the caller | |
690 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free | |
691 | * them safely). No RCU barriers are required to access or modify the | |
692 | * returned item, however. | |
693 | */ | |
694 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | |
695 | { | |
696 | return __radix_tree_lookup(root, index, NULL, NULL); | |
697 | } | |
698 | EXPORT_SYMBOL(radix_tree_lookup); | |
699 | ||
700 | /** | |
701 | * radix_tree_tag_set - set a tag on a radix tree node | |
702 | * @root: radix tree root | |
703 | * @index: index key | |
704 | * @tag: tag index | |
705 | * | |
706 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) | |
707 | * corresponding to @index in the radix tree. From | |
708 | * the root all the way down to the leaf node. | |
709 | * | |
710 | * Returns the address of the tagged item. Setting a tag on a not-present | |
711 | * item is a bug. | |
712 | */ | |
713 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
714 | unsigned long index, unsigned int tag) | |
715 | { | |
716 | struct radix_tree_node *node, *parent; | |
717 | unsigned long maxindex; | |
718 | unsigned int shift; | |
719 | ||
720 | shift = radix_tree_load_root(root, &node, &maxindex); | |
721 | BUG_ON(index > maxindex); | |
722 | ||
723 | while (radix_tree_is_internal_node(node)) { | |
724 | unsigned offset; | |
725 | ||
726 | shift -= RADIX_TREE_MAP_SHIFT; | |
727 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
728 | ||
729 | parent = entry_to_node(node); | |
730 | offset = radix_tree_descend(parent, &node, offset); | |
731 | BUG_ON(!node); | |
732 | ||
733 | if (!tag_get(parent, tag, offset)) | |
734 | tag_set(parent, tag, offset); | |
735 | } | |
736 | ||
737 | /* set the root's tag bit */ | |
738 | if (!root_tag_get(root, tag)) | |
739 | root_tag_set(root, tag); | |
740 | ||
741 | return node; | |
742 | } | |
743 | EXPORT_SYMBOL(radix_tree_tag_set); | |
744 | ||
745 | /** | |
746 | * radix_tree_tag_clear - clear a tag on a radix tree node | |
747 | * @root: radix tree root | |
748 | * @index: index key | |
749 | * @tag: tag index | |
750 | * | |
751 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) | |
752 | * corresponding to @index in the radix tree. If this causes | |
753 | * the leaf node to have no tags set then clear the tag in the | |
754 | * next-to-leaf node, etc. | |
755 | * | |
756 | * Returns the address of the tagged item on success, else NULL. ie: | |
757 | * has the same return value and semantics as radix_tree_lookup(). | |
758 | */ | |
759 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
760 | unsigned long index, unsigned int tag) | |
761 | { | |
762 | struct radix_tree_node *node, *parent; | |
763 | unsigned long maxindex; | |
764 | unsigned int shift; | |
765 | int uninitialized_var(offset); | |
766 | ||
767 | shift = radix_tree_load_root(root, &node, &maxindex); | |
768 | if (index > maxindex) | |
769 | return NULL; | |
770 | ||
771 | parent = NULL; | |
772 | ||
773 | while (radix_tree_is_internal_node(node)) { | |
774 | shift -= RADIX_TREE_MAP_SHIFT; | |
775 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
776 | ||
777 | parent = entry_to_node(node); | |
778 | offset = radix_tree_descend(parent, &node, offset); | |
779 | } | |
780 | ||
781 | if (node == NULL) | |
782 | goto out; | |
783 | ||
784 | index >>= shift; | |
785 | ||
786 | while (parent) { | |
787 | if (!tag_get(parent, tag, offset)) | |
788 | goto out; | |
789 | tag_clear(parent, tag, offset); | |
790 | if (any_tag_set(parent, tag)) | |
791 | goto out; | |
792 | ||
793 | index >>= RADIX_TREE_MAP_SHIFT; | |
794 | offset = index & RADIX_TREE_MAP_MASK; | |
795 | parent = parent->parent; | |
796 | } | |
797 | ||
798 | /* clear the root's tag bit */ | |
799 | if (root_tag_get(root, tag)) | |
800 | root_tag_clear(root, tag); | |
801 | ||
802 | out: | |
803 | return node; | |
804 | } | |
805 | EXPORT_SYMBOL(radix_tree_tag_clear); | |
806 | ||
807 | /** | |
808 | * radix_tree_tag_get - get a tag on a radix tree node | |
809 | * @root: radix tree root | |
810 | * @index: index key | |
811 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) | |
812 | * | |
813 | * Return values: | |
814 | * | |
815 | * 0: tag not present or not set | |
816 | * 1: tag set | |
817 | * | |
818 | * Note that the return value of this function may not be relied on, even if | |
819 | * the RCU lock is held, unless tag modification and node deletion are excluded | |
820 | * from concurrency. | |
821 | */ | |
822 | int radix_tree_tag_get(struct radix_tree_root *root, | |
823 | unsigned long index, unsigned int tag) | |
824 | { | |
825 | struct radix_tree_node *node, *parent; | |
826 | unsigned long maxindex; | |
827 | unsigned int shift; | |
828 | ||
829 | if (!root_tag_get(root, tag)) | |
830 | return 0; | |
831 | ||
832 | shift = radix_tree_load_root(root, &node, &maxindex); | |
833 | if (index > maxindex) | |
834 | return 0; | |
835 | if (node == NULL) | |
836 | return 0; | |
837 | ||
838 | while (radix_tree_is_internal_node(node)) { | |
839 | int offset; | |
840 | ||
841 | shift -= RADIX_TREE_MAP_SHIFT; | |
842 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
843 | ||
844 | parent = entry_to_node(node); | |
845 | offset = radix_tree_descend(parent, &node, offset); | |
846 | ||
847 | if (!node) | |
848 | return 0; | |
849 | if (!tag_get(parent, tag, offset)) | |
850 | return 0; | |
851 | if (node == RADIX_TREE_RETRY) | |
852 | break; | |
853 | } | |
854 | ||
855 | return 1; | |
856 | } | |
857 | EXPORT_SYMBOL(radix_tree_tag_get); | |
858 | ||
859 | static inline void __set_iter_shift(struct radix_tree_iter *iter, | |
860 | unsigned int shift) | |
861 | { | |
862 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
863 | iter->shift = shift; | |
864 | #endif | |
865 | } | |
866 | ||
867 | /** | |
868 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
869 | * | |
870 | * @root: radix tree root | |
871 | * @iter: iterator state | |
872 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
873 | * Returns: pointer to chunk first slot, or NULL if iteration is over | |
874 | */ | |
875 | void **radix_tree_next_chunk(struct radix_tree_root *root, | |
876 | struct radix_tree_iter *iter, unsigned flags) | |
877 | { | |
878 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; | |
879 | struct radix_tree_node *rnode, *node; | |
880 | unsigned long index, offset, maxindex; | |
881 | ||
882 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | |
883 | return NULL; | |
884 | ||
885 | /* | |
886 | * Catch next_index overflow after ~0UL. iter->index never overflows | |
887 | * during iterating; it can be zero only at the beginning. | |
888 | * And we cannot overflow iter->next_index in a single step, | |
889 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | |
890 | * | |
891 | * This condition also used by radix_tree_next_slot() to stop | |
892 | * contiguous iterating, and forbid swithing to the next chunk. | |
893 | */ | |
894 | index = iter->next_index; | |
895 | if (!index && iter->index) | |
896 | return NULL; | |
897 | ||
898 | restart: | |
899 | shift = radix_tree_load_root(root, &rnode, &maxindex); | |
900 | if (index > maxindex) | |
901 | return NULL; | |
902 | ||
903 | if (radix_tree_is_internal_node(rnode)) { | |
904 | rnode = entry_to_node(rnode); | |
905 | } else if (rnode) { | |
906 | /* Single-slot tree */ | |
907 | iter->index = index; | |
908 | iter->next_index = maxindex + 1; | |
909 | iter->tags = 1; | |
910 | __set_iter_shift(iter, shift); | |
911 | return (void **)&root->rnode; | |
912 | } else | |
913 | return NULL; | |
914 | ||
915 | shift -= RADIX_TREE_MAP_SHIFT; | |
916 | offset = index >> shift; | |
917 | ||
918 | node = rnode; | |
919 | while (1) { | |
920 | struct radix_tree_node *slot; | |
921 | unsigned new_off = radix_tree_descend(node, &slot, offset); | |
922 | ||
923 | if (new_off < offset) { | |
924 | offset = new_off; | |
925 | index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1); | |
926 | index |= offset << shift; | |
927 | } | |
928 | ||
929 | if ((flags & RADIX_TREE_ITER_TAGGED) ? | |
930 | !tag_get(node, tag, offset) : !slot) { | |
931 | /* Hole detected */ | |
932 | if (flags & RADIX_TREE_ITER_CONTIG) | |
933 | return NULL; | |
934 | ||
935 | if (flags & RADIX_TREE_ITER_TAGGED) | |
936 | offset = radix_tree_find_next_bit( | |
937 | node->tags[tag], | |
938 | RADIX_TREE_MAP_SIZE, | |
939 | offset + 1); | |
940 | else | |
941 | while (++offset < RADIX_TREE_MAP_SIZE) { | |
942 | void *slot = node->slots[offset]; | |
943 | if (is_sibling_entry(node, slot)) | |
944 | continue; | |
945 | if (slot) | |
946 | break; | |
947 | } | |
948 | index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1); | |
949 | index += offset << shift; | |
950 | /* Overflow after ~0UL */ | |
951 | if (!index) | |
952 | return NULL; | |
953 | if (offset == RADIX_TREE_MAP_SIZE) | |
954 | goto restart; | |
955 | slot = rcu_dereference_raw(node->slots[offset]); | |
956 | } | |
957 | ||
958 | if ((slot == NULL) || (slot == RADIX_TREE_RETRY)) | |
959 | goto restart; | |
960 | if (!radix_tree_is_internal_node(slot)) | |
961 | break; | |
962 | ||
963 | node = entry_to_node(slot); | |
964 | shift -= RADIX_TREE_MAP_SHIFT; | |
965 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
966 | } | |
967 | ||
968 | /* Update the iterator state */ | |
969 | iter->index = index & ~((1 << shift) - 1); | |
970 | iter->next_index = (index | ((RADIX_TREE_MAP_SIZE << shift) - 1)) + 1; | |
971 | __set_iter_shift(iter, shift); | |
972 | ||
973 | /* Construct iter->tags bit-mask from node->tags[tag] array */ | |
974 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
975 | unsigned tag_long, tag_bit; | |
976 | ||
977 | tag_long = offset / BITS_PER_LONG; | |
978 | tag_bit = offset % BITS_PER_LONG; | |
979 | iter->tags = node->tags[tag][tag_long] >> tag_bit; | |
980 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | |
981 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | |
982 | /* Pick tags from next element */ | |
983 | if (tag_bit) | |
984 | iter->tags |= node->tags[tag][tag_long + 1] << | |
985 | (BITS_PER_LONG - tag_bit); | |
986 | /* Clip chunk size, here only BITS_PER_LONG tags */ | |
987 | iter->next_index = index + BITS_PER_LONG; | |
988 | } | |
989 | } | |
990 | ||
991 | return node->slots + offset; | |
992 | } | |
993 | EXPORT_SYMBOL(radix_tree_next_chunk); | |
994 | ||
995 | /** | |
996 | * radix_tree_range_tag_if_tagged - for each item in given range set given | |
997 | * tag if item has another tag set | |
998 | * @root: radix tree root | |
999 | * @first_indexp: pointer to a starting index of a range to scan | |
1000 | * @last_index: last index of a range to scan | |
1001 | * @nr_to_tag: maximum number items to tag | |
1002 | * @iftag: tag index to test | |
1003 | * @settag: tag index to set if tested tag is set | |
1004 | * | |
1005 | * This function scans range of radix tree from first_index to last_index | |
1006 | * (inclusive). For each item in the range if iftag is set, the function sets | |
1007 | * also settag. The function stops either after tagging nr_to_tag items or | |
1008 | * after reaching last_index. | |
1009 | * | |
1010 | * The tags must be set from the leaf level only and propagated back up the | |
1011 | * path to the root. We must do this so that we resolve the full path before | |
1012 | * setting any tags on intermediate nodes. If we set tags as we descend, then | |
1013 | * we can get to the leaf node and find that the index that has the iftag | |
1014 | * set is outside the range we are scanning. This reults in dangling tags and | |
1015 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | |
1016 | * | |
1017 | * The function returns the number of leaves where the tag was set and sets | |
1018 | * *first_indexp to the first unscanned index. | |
1019 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must | |
1020 | * be prepared to handle that. | |
1021 | */ | |
1022 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |
1023 | unsigned long *first_indexp, unsigned long last_index, | |
1024 | unsigned long nr_to_tag, | |
1025 | unsigned int iftag, unsigned int settag) | |
1026 | { | |
1027 | struct radix_tree_node *slot, *node = NULL; | |
1028 | unsigned long maxindex; | |
1029 | unsigned int shift = radix_tree_load_root(root, &slot, &maxindex); | |
1030 | unsigned long tagged = 0; | |
1031 | unsigned long index = *first_indexp; | |
1032 | ||
1033 | last_index = min(last_index, maxindex); | |
1034 | if (index > last_index) | |
1035 | return 0; | |
1036 | if (!nr_to_tag) | |
1037 | return 0; | |
1038 | if (!root_tag_get(root, iftag)) { | |
1039 | *first_indexp = last_index + 1; | |
1040 | return 0; | |
1041 | } | |
1042 | if (!radix_tree_is_internal_node(slot)) { | |
1043 | *first_indexp = last_index + 1; | |
1044 | root_tag_set(root, settag); | |
1045 | return 1; | |
1046 | } | |
1047 | ||
1048 | node = entry_to_node(slot); | |
1049 | shift -= RADIX_TREE_MAP_SHIFT; | |
1050 | ||
1051 | for (;;) { | |
1052 | unsigned long upindex; | |
1053 | unsigned offset; | |
1054 | ||
1055 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
1056 | offset = radix_tree_descend(node, &slot, offset); | |
1057 | if (!slot) | |
1058 | goto next; | |
1059 | if (!tag_get(node, iftag, offset)) | |
1060 | goto next; | |
1061 | /* Sibling slots never have tags set on them */ | |
1062 | if (radix_tree_is_internal_node(slot)) { | |
1063 | node = entry_to_node(slot); | |
1064 | shift -= RADIX_TREE_MAP_SHIFT; | |
1065 | continue; | |
1066 | } | |
1067 | ||
1068 | /* tag the leaf */ | |
1069 | tagged++; | |
1070 | tag_set(node, settag, offset); | |
1071 | ||
1072 | slot = node->parent; | |
1073 | /* walk back up the path tagging interior nodes */ | |
1074 | upindex = index >> shift; | |
1075 | while (slot) { | |
1076 | upindex >>= RADIX_TREE_MAP_SHIFT; | |
1077 | offset = upindex & RADIX_TREE_MAP_MASK; | |
1078 | ||
1079 | /* stop if we find a node with the tag already set */ | |
1080 | if (tag_get(slot, settag, offset)) | |
1081 | break; | |
1082 | tag_set(slot, settag, offset); | |
1083 | slot = slot->parent; | |
1084 | } | |
1085 | ||
1086 | next: | |
1087 | /* Go to next item at level determined by 'shift' */ | |
1088 | index = ((index >> shift) + 1) << shift; | |
1089 | /* Overflow can happen when last_index is ~0UL... */ | |
1090 | if (index > last_index || !index) | |
1091 | break; | |
1092 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
1093 | while (offset == 0) { | |
1094 | /* | |
1095 | * We've fully scanned this node. Go up. Because | |
1096 | * last_index is guaranteed to be in the tree, what | |
1097 | * we do below cannot wander astray. | |
1098 | */ | |
1099 | node = node->parent; | |
1100 | shift += RADIX_TREE_MAP_SHIFT; | |
1101 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | |
1102 | } | |
1103 | if (is_sibling_entry(node, node->slots[offset])) | |
1104 | goto next; | |
1105 | if (tagged >= nr_to_tag) | |
1106 | break; | |
1107 | } | |
1108 | /* | |
1109 | * We need not to tag the root tag if there is no tag which is set with | |
1110 | * settag within the range from *first_indexp to last_index. | |
1111 | */ | |
1112 | if (tagged > 0) | |
1113 | root_tag_set(root, settag); | |
1114 | *first_indexp = index; | |
1115 | ||
1116 | return tagged; | |
1117 | } | |
1118 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | |
1119 | ||
1120 | /** | |
1121 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | |
1122 | * @root: radix tree root | |
1123 | * @results: where the results of the lookup are placed | |
1124 | * @first_index: start the lookup from this key | |
1125 | * @max_items: place up to this many items at *results | |
1126 | * | |
1127 | * Performs an index-ascending scan of the tree for present items. Places | |
1128 | * them at *@results and returns the number of items which were placed at | |
1129 | * *@results. | |
1130 | * | |
1131 | * The implementation is naive. | |
1132 | * | |
1133 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | |
1134 | * rcu_read_lock. In this case, rather than the returned results being | |
1135 | * an atomic snapshot of the tree at a single point in time, the | |
1136 | * semantics of an RCU protected gang lookup are as though multiple | |
1137 | * radix_tree_lookups have been issued in individual locks, and results | |
1138 | * stored in 'results'. | |
1139 | */ | |
1140 | unsigned int | |
1141 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
1142 | unsigned long first_index, unsigned int max_items) | |
1143 | { | |
1144 | struct radix_tree_iter iter; | |
1145 | void **slot; | |
1146 | unsigned int ret = 0; | |
1147 | ||
1148 | if (unlikely(!max_items)) | |
1149 | return 0; | |
1150 | ||
1151 | radix_tree_for_each_slot(slot, root, &iter, first_index) { | |
1152 | results[ret] = rcu_dereference_raw(*slot); | |
1153 | if (!results[ret]) | |
1154 | continue; | |
1155 | if (radix_tree_is_internal_node(results[ret])) { | |
1156 | slot = radix_tree_iter_retry(&iter); | |
1157 | continue; | |
1158 | } | |
1159 | if (++ret == max_items) | |
1160 | break; | |
1161 | } | |
1162 | ||
1163 | return ret; | |
1164 | } | |
1165 | EXPORT_SYMBOL(radix_tree_gang_lookup); | |
1166 | ||
1167 | /** | |
1168 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | |
1169 | * @root: radix tree root | |
1170 | * @results: where the results of the lookup are placed | |
1171 | * @indices: where their indices should be placed (but usually NULL) | |
1172 | * @first_index: start the lookup from this key | |
1173 | * @max_items: place up to this many items at *results | |
1174 | * | |
1175 | * Performs an index-ascending scan of the tree for present items. Places | |
1176 | * their slots at *@results and returns the number of items which were | |
1177 | * placed at *@results. | |
1178 | * | |
1179 | * The implementation is naive. | |
1180 | * | |
1181 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | |
1182 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | |
1183 | * protection, radix_tree_deref_slot may fail requiring a retry. | |
1184 | */ | |
1185 | unsigned int | |
1186 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, | |
1187 | void ***results, unsigned long *indices, | |
1188 | unsigned long first_index, unsigned int max_items) | |
1189 | { | |
1190 | struct radix_tree_iter iter; | |
1191 | void **slot; | |
1192 | unsigned int ret = 0; | |
1193 | ||
1194 | if (unlikely(!max_items)) | |
1195 | return 0; | |
1196 | ||
1197 | radix_tree_for_each_slot(slot, root, &iter, first_index) { | |
1198 | results[ret] = slot; | |
1199 | if (indices) | |
1200 | indices[ret] = iter.index; | |
1201 | if (++ret == max_items) | |
1202 | break; | |
1203 | } | |
1204 | ||
1205 | return ret; | |
1206 | } | |
1207 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | |
1208 | ||
1209 | /** | |
1210 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | |
1211 | * based on a tag | |
1212 | * @root: radix tree root | |
1213 | * @results: where the results of the lookup are placed | |
1214 | * @first_index: start the lookup from this key | |
1215 | * @max_items: place up to this many items at *results | |
1216 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1217 | * | |
1218 | * Performs an index-ascending scan of the tree for present items which | |
1219 | * have the tag indexed by @tag set. Places the items at *@results and | |
1220 | * returns the number of items which were placed at *@results. | |
1221 | */ | |
1222 | unsigned int | |
1223 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
1224 | unsigned long first_index, unsigned int max_items, | |
1225 | unsigned int tag) | |
1226 | { | |
1227 | struct radix_tree_iter iter; | |
1228 | void **slot; | |
1229 | unsigned int ret = 0; | |
1230 | ||
1231 | if (unlikely(!max_items)) | |
1232 | return 0; | |
1233 | ||
1234 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { | |
1235 | results[ret] = rcu_dereference_raw(*slot); | |
1236 | if (!results[ret]) | |
1237 | continue; | |
1238 | if (radix_tree_is_internal_node(results[ret])) { | |
1239 | slot = radix_tree_iter_retry(&iter); | |
1240 | continue; | |
1241 | } | |
1242 | if (++ret == max_items) | |
1243 | break; | |
1244 | } | |
1245 | ||
1246 | return ret; | |
1247 | } | |
1248 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | |
1249 | ||
1250 | /** | |
1251 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | |
1252 | * radix tree based on a tag | |
1253 | * @root: radix tree root | |
1254 | * @results: where the results of the lookup are placed | |
1255 | * @first_index: start the lookup from this key | |
1256 | * @max_items: place up to this many items at *results | |
1257 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1258 | * | |
1259 | * Performs an index-ascending scan of the tree for present items which | |
1260 | * have the tag indexed by @tag set. Places the slots at *@results and | |
1261 | * returns the number of slots which were placed at *@results. | |
1262 | */ | |
1263 | unsigned int | |
1264 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
1265 | unsigned long first_index, unsigned int max_items, | |
1266 | unsigned int tag) | |
1267 | { | |
1268 | struct radix_tree_iter iter; | |
1269 | void **slot; | |
1270 | unsigned int ret = 0; | |
1271 | ||
1272 | if (unlikely(!max_items)) | |
1273 | return 0; | |
1274 | ||
1275 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { | |
1276 | results[ret] = slot; | |
1277 | if (++ret == max_items) | |
1278 | break; | |
1279 | } | |
1280 | ||
1281 | return ret; | |
1282 | } | |
1283 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |
1284 | ||
1285 | #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP) | |
1286 | #include <linux/sched.h> /* for cond_resched() */ | |
1287 | ||
1288 | struct locate_info { | |
1289 | unsigned long found_index; | |
1290 | bool stop; | |
1291 | }; | |
1292 | ||
1293 | /* | |
1294 | * This linear search is at present only useful to shmem_unuse_inode(). | |
1295 | */ | |
1296 | static unsigned long __locate(struct radix_tree_node *slot, void *item, | |
1297 | unsigned long index, struct locate_info *info) | |
1298 | { | |
1299 | unsigned int shift; | |
1300 | unsigned long i; | |
1301 | ||
1302 | shift = slot->shift + RADIX_TREE_MAP_SHIFT; | |
1303 | ||
1304 | do { | |
1305 | shift -= RADIX_TREE_MAP_SHIFT; | |
1306 | ||
1307 | for (i = (index >> shift) & RADIX_TREE_MAP_MASK; | |
1308 | i < RADIX_TREE_MAP_SIZE; | |
1309 | i++, index += (1UL << shift)) { | |
1310 | struct radix_tree_node *node = | |
1311 | rcu_dereference_raw(slot->slots[i]); | |
1312 | if (node == RADIX_TREE_RETRY) | |
1313 | goto out; | |
1314 | if (!radix_tree_is_internal_node(node)) { | |
1315 | if (node == item) { | |
1316 | info->found_index = index; | |
1317 | info->stop = true; | |
1318 | goto out; | |
1319 | } | |
1320 | continue; | |
1321 | } | |
1322 | node = entry_to_node(node); | |
1323 | if (is_sibling_entry(slot, node)) | |
1324 | continue; | |
1325 | slot = node; | |
1326 | break; | |
1327 | } | |
1328 | if (i == RADIX_TREE_MAP_SIZE) | |
1329 | break; | |
1330 | } while (shift); | |
1331 | ||
1332 | out: | |
1333 | if ((index == 0) && (i == RADIX_TREE_MAP_SIZE)) | |
1334 | info->stop = true; | |
1335 | return index; | |
1336 | } | |
1337 | ||
1338 | /** | |
1339 | * radix_tree_locate_item - search through radix tree for item | |
1340 | * @root: radix tree root | |
1341 | * @item: item to be found | |
1342 | * | |
1343 | * Returns index where item was found, or -1 if not found. | |
1344 | * Caller must hold no lock (since this time-consuming function needs | |
1345 | * to be preemptible), and must check afterwards if item is still there. | |
1346 | */ | |
1347 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |
1348 | { | |
1349 | struct radix_tree_node *node; | |
1350 | unsigned long max_index; | |
1351 | unsigned long cur_index = 0; | |
1352 | struct locate_info info = { | |
1353 | .found_index = -1, | |
1354 | .stop = false, | |
1355 | }; | |
1356 | ||
1357 | do { | |
1358 | rcu_read_lock(); | |
1359 | node = rcu_dereference_raw(root->rnode); | |
1360 | if (!radix_tree_is_internal_node(node)) { | |
1361 | rcu_read_unlock(); | |
1362 | if (node == item) | |
1363 | info.found_index = 0; | |
1364 | break; | |
1365 | } | |
1366 | ||
1367 | node = entry_to_node(node); | |
1368 | ||
1369 | max_index = node_maxindex(node); | |
1370 | if (cur_index > max_index) { | |
1371 | rcu_read_unlock(); | |
1372 | break; | |
1373 | } | |
1374 | ||
1375 | cur_index = __locate(node, item, cur_index, &info); | |
1376 | rcu_read_unlock(); | |
1377 | cond_resched(); | |
1378 | } while (!info.stop && cur_index <= max_index); | |
1379 | ||
1380 | return info.found_index; | |
1381 | } | |
1382 | #else | |
1383 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |
1384 | { | |
1385 | return -1; | |
1386 | } | |
1387 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ | |
1388 | ||
1389 | /** | |
1390 | * radix_tree_shrink - shrink radix tree to minimum height | |
1391 | * @root radix tree root | |
1392 | */ | |
1393 | static inline bool radix_tree_shrink(struct radix_tree_root *root) | |
1394 | { | |
1395 | bool shrunk = false; | |
1396 | ||
1397 | for (;;) { | |
1398 | struct radix_tree_node *node = root->rnode; | |
1399 | struct radix_tree_node *child; | |
1400 | ||
1401 | if (!radix_tree_is_internal_node(node)) | |
1402 | break; | |
1403 | node = entry_to_node(node); | |
1404 | ||
1405 | /* | |
1406 | * The candidate node has more than one child, or its child | |
1407 | * is not at the leftmost slot, or the child is a multiorder | |
1408 | * entry, we cannot shrink. | |
1409 | */ | |
1410 | if (node->count != 1) | |
1411 | break; | |
1412 | child = node->slots[0]; | |
1413 | if (!child) | |
1414 | break; | |
1415 | if (!radix_tree_is_internal_node(child) && node->shift) | |
1416 | break; | |
1417 | ||
1418 | if (radix_tree_is_internal_node(child)) | |
1419 | entry_to_node(child)->parent = NULL; | |
1420 | ||
1421 | /* | |
1422 | * We don't need rcu_assign_pointer(), since we are simply | |
1423 | * moving the node from one part of the tree to another: if it | |
1424 | * was safe to dereference the old pointer to it | |
1425 | * (node->slots[0]), it will be safe to dereference the new | |
1426 | * one (root->rnode) as far as dependent read barriers go. | |
1427 | */ | |
1428 | root->rnode = child; | |
1429 | ||
1430 | /* | |
1431 | * We have a dilemma here. The node's slot[0] must not be | |
1432 | * NULLed in case there are concurrent lookups expecting to | |
1433 | * find the item. However if this was a bottom-level node, | |
1434 | * then it may be subject to the slot pointer being visible | |
1435 | * to callers dereferencing it. If item corresponding to | |
1436 | * slot[0] is subsequently deleted, these callers would expect | |
1437 | * their slot to become empty sooner or later. | |
1438 | * | |
1439 | * For example, lockless pagecache will look up a slot, deref | |
1440 | * the page pointer, and if the page has 0 refcount it means it | |
1441 | * was concurrently deleted from pagecache so try the deref | |
1442 | * again. Fortunately there is already a requirement for logic | |
1443 | * to retry the entire slot lookup -- the indirect pointer | |
1444 | * problem (replacing direct root node with an indirect pointer | |
1445 | * also results in a stale slot). So tag the slot as indirect | |
1446 | * to force callers to retry. | |
1447 | */ | |
1448 | if (!radix_tree_is_internal_node(child)) | |
1449 | node->slots[0] = RADIX_TREE_RETRY; | |
1450 | ||
1451 | radix_tree_node_free(node); | |
1452 | shrunk = true; | |
1453 | } | |
1454 | ||
1455 | return shrunk; | |
1456 | } | |
1457 | ||
1458 | /** | |
1459 | * __radix_tree_delete_node - try to free node after clearing a slot | |
1460 | * @root: radix tree root | |
1461 | * @node: node containing @index | |
1462 | * | |
1463 | * After clearing the slot at @index in @node from radix tree | |
1464 | * rooted at @root, call this function to attempt freeing the | |
1465 | * node and shrinking the tree. | |
1466 | * | |
1467 | * Returns %true if @node was freed, %false otherwise. | |
1468 | */ | |
1469 | bool __radix_tree_delete_node(struct radix_tree_root *root, | |
1470 | struct radix_tree_node *node) | |
1471 | { | |
1472 | bool deleted = false; | |
1473 | ||
1474 | do { | |
1475 | struct radix_tree_node *parent; | |
1476 | ||
1477 | if (node->count) { | |
1478 | if (node == entry_to_node(root->rnode)) | |
1479 | deleted |= radix_tree_shrink(root); | |
1480 | return deleted; | |
1481 | } | |
1482 | ||
1483 | parent = node->parent; | |
1484 | if (parent) { | |
1485 | parent->slots[node->offset] = NULL; | |
1486 | parent->count--; | |
1487 | } else { | |
1488 | root_tag_clear_all(root); | |
1489 | root->rnode = NULL; | |
1490 | } | |
1491 | ||
1492 | radix_tree_node_free(node); | |
1493 | deleted = true; | |
1494 | ||
1495 | node = parent; | |
1496 | } while (node); | |
1497 | ||
1498 | return deleted; | |
1499 | } | |
1500 | ||
1501 | static inline void delete_sibling_entries(struct radix_tree_node *node, | |
1502 | void *ptr, unsigned offset) | |
1503 | { | |
1504 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1505 | int i; | |
1506 | for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { | |
1507 | if (node->slots[offset + i] != ptr) | |
1508 | break; | |
1509 | node->slots[offset + i] = NULL; | |
1510 | node->count--; | |
1511 | } | |
1512 | #endif | |
1513 | } | |
1514 | ||
1515 | /** | |
1516 | * radix_tree_delete_item - delete an item from a radix tree | |
1517 | * @root: radix tree root | |
1518 | * @index: index key | |
1519 | * @item: expected item | |
1520 | * | |
1521 | * Remove @item at @index from the radix tree rooted at @root. | |
1522 | * | |
1523 | * Returns the address of the deleted item, or NULL if it was not present | |
1524 | * or the entry at the given @index was not @item. | |
1525 | */ | |
1526 | void *radix_tree_delete_item(struct radix_tree_root *root, | |
1527 | unsigned long index, void *item) | |
1528 | { | |
1529 | struct radix_tree_node *node; | |
1530 | unsigned int offset; | |
1531 | void **slot; | |
1532 | void *entry; | |
1533 | int tag; | |
1534 | ||
1535 | entry = __radix_tree_lookup(root, index, &node, &slot); | |
1536 | if (!entry) | |
1537 | return NULL; | |
1538 | ||
1539 | if (item && entry != item) | |
1540 | return NULL; | |
1541 | ||
1542 | if (!node) { | |
1543 | root_tag_clear_all(root); | |
1544 | root->rnode = NULL; | |
1545 | return entry; | |
1546 | } | |
1547 | ||
1548 | offset = get_slot_offset(node, slot); | |
1549 | ||
1550 | /* | |
1551 | * Clear all tags associated with the item to be deleted. | |
1552 | * This way of doing it would be inefficient, but seldom is any set. | |
1553 | */ | |
1554 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | |
1555 | if (tag_get(node, tag, offset)) | |
1556 | radix_tree_tag_clear(root, index, tag); | |
1557 | } | |
1558 | ||
1559 | delete_sibling_entries(node, node_to_entry(slot), offset); | |
1560 | node->slots[offset] = NULL; | |
1561 | node->count--; | |
1562 | ||
1563 | __radix_tree_delete_node(root, node); | |
1564 | ||
1565 | return entry; | |
1566 | } | |
1567 | EXPORT_SYMBOL(radix_tree_delete_item); | |
1568 | ||
1569 | /** | |
1570 | * radix_tree_delete - delete an item from a radix tree | |
1571 | * @root: radix tree root | |
1572 | * @index: index key | |
1573 | * | |
1574 | * Remove the item at @index from the radix tree rooted at @root. | |
1575 | * | |
1576 | * Returns the address of the deleted item, or NULL if it was not present. | |
1577 | */ | |
1578 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |
1579 | { | |
1580 | return radix_tree_delete_item(root, index, NULL); | |
1581 | } | |
1582 | EXPORT_SYMBOL(radix_tree_delete); | |
1583 | ||
1584 | /** | |
1585 | * radix_tree_tagged - test whether any items in the tree are tagged | |
1586 | * @root: radix tree root | |
1587 | * @tag: tag to test | |
1588 | */ | |
1589 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |
1590 | { | |
1591 | return root_tag_get(root, tag); | |
1592 | } | |
1593 | EXPORT_SYMBOL(radix_tree_tagged); | |
1594 | ||
1595 | static void | |
1596 | radix_tree_node_ctor(void *arg) | |
1597 | { | |
1598 | struct radix_tree_node *node = arg; | |
1599 | ||
1600 | memset(node, 0, sizeof(*node)); | |
1601 | INIT_LIST_HEAD(&node->private_list); | |
1602 | } | |
1603 | ||
1604 | static int radix_tree_callback(struct notifier_block *nfb, | |
1605 | unsigned long action, void *hcpu) | |
1606 | { | |
1607 | int cpu = (long)hcpu; | |
1608 | struct radix_tree_preload *rtp; | |
1609 | struct radix_tree_node *node; | |
1610 | ||
1611 | /* Free per-cpu pool of preloaded nodes */ | |
1612 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | |
1613 | rtp = &per_cpu(radix_tree_preloads, cpu); | |
1614 | while (rtp->nr) { | |
1615 | node = rtp->nodes; | |
1616 | rtp->nodes = node->private_data; | |
1617 | kmem_cache_free(radix_tree_node_cachep, node); | |
1618 | rtp->nr--; | |
1619 | } | |
1620 | } | |
1621 | return NOTIFY_OK; | |
1622 | } | |
1623 | ||
1624 | void __init radix_tree_init(void) | |
1625 | { | |
1626 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | |
1627 | sizeof(struct radix_tree_node), 0, | |
1628 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, | |
1629 | radix_tree_node_ctor); | |
1630 | hotcpu_notifier(radix_tree_callback, 0); | |
1631 | } |