]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - lib/radix-tree.c
radix-tree: Add rcu_dereference and rcu_assign_pointer calls
[mirror_ubuntu-artful-kernel.git] / lib / radix-tree.c
1 /*
2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
7 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25 #include <linux/bitmap.h>
26 #include <linux/bitops.h>
27 #include <linux/cpu.h>
28 #include <linux/errno.h>
29 #include <linux/export.h>
30 #include <linux/idr.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/kmemleak.h>
34 #include <linux/percpu.h>
35 #include <linux/preempt.h> /* in_interrupt() */
36 #include <linux/radix-tree.h>
37 #include <linux/rcupdate.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40
41
42 /* Number of nodes in fully populated tree of given height */
43 static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
44
45 /*
46 * Radix tree node cache.
47 */
48 static struct kmem_cache *radix_tree_node_cachep;
49
50 /*
51 * The radix tree is variable-height, so an insert operation not only has
52 * to build the branch to its corresponding item, it also has to build the
53 * branch to existing items if the size has to be increased (by
54 * radix_tree_extend).
55 *
56 * The worst case is a zero height tree with just a single item at index 0,
57 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
58 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
59 * Hence:
60 */
61 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
62
63 /*
64 * The IDR does not have to be as high as the radix tree since it uses
65 * signed integers, not unsigned longs.
66 */
67 #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
68 #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
69 RADIX_TREE_MAP_SHIFT))
70 #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
71
72 /*
73 * The IDA is even shorter since it uses a bitmap at the last level.
74 */
75 #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
76 #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
77 RADIX_TREE_MAP_SHIFT))
78 #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
79
80 /*
81 * Per-cpu pool of preloaded nodes
82 */
83 struct radix_tree_preload {
84 unsigned nr;
85 /* nodes->parent points to next preallocated node */
86 struct radix_tree_node *nodes;
87 };
88 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
89
90 static inline struct radix_tree_node *entry_to_node(void *ptr)
91 {
92 return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
93 }
94
95 static inline void *node_to_entry(void *ptr)
96 {
97 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
98 }
99
100 #define RADIX_TREE_RETRY node_to_entry(NULL)
101
102 #ifdef CONFIG_RADIX_TREE_MULTIORDER
103 /* Sibling slots point directly to another slot in the same node */
104 static inline
105 bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
106 {
107 void **ptr = node;
108 return (parent->slots <= ptr) &&
109 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
110 }
111 #else
112 static inline
113 bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
114 {
115 return false;
116 }
117 #endif
118
119 static inline
120 unsigned long get_slot_offset(const struct radix_tree_node *parent, void **slot)
121 {
122 return slot - parent->slots;
123 }
124
125 static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
126 struct radix_tree_node **nodep, unsigned long index)
127 {
128 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
129 void **entry = rcu_dereference_raw(parent->slots[offset]);
130
131 #ifdef CONFIG_RADIX_TREE_MULTIORDER
132 if (radix_tree_is_internal_node(entry)) {
133 if (is_sibling_entry(parent, entry)) {
134 void **sibentry = (void **) entry_to_node(entry);
135 offset = get_slot_offset(parent, sibentry);
136 entry = rcu_dereference_raw(*sibentry);
137 }
138 }
139 #endif
140
141 *nodep = (void *)entry;
142 return offset;
143 }
144
145 static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
146 {
147 return root->gfp_mask & __GFP_BITS_MASK;
148 }
149
150 static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
151 int offset)
152 {
153 __set_bit(offset, node->tags[tag]);
154 }
155
156 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
157 int offset)
158 {
159 __clear_bit(offset, node->tags[tag]);
160 }
161
162 static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
163 int offset)
164 {
165 return test_bit(offset, node->tags[tag]);
166 }
167
168 static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
169 {
170 root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
171 }
172
173 static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
174 {
175 root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
176 }
177
178 static inline void root_tag_clear_all(struct radix_tree_root *root)
179 {
180 root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
181 }
182
183 static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
184 {
185 return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
186 }
187
188 static inline unsigned root_tags_get(const struct radix_tree_root *root)
189 {
190 return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
191 }
192
193 static inline bool is_idr(const struct radix_tree_root *root)
194 {
195 return !!(root->gfp_mask & ROOT_IS_IDR);
196 }
197
198 /*
199 * Returns 1 if any slot in the node has this tag set.
200 * Otherwise returns 0.
201 */
202 static inline int any_tag_set(const struct radix_tree_node *node,
203 unsigned int tag)
204 {
205 unsigned idx;
206 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
207 if (node->tags[tag][idx])
208 return 1;
209 }
210 return 0;
211 }
212
213 static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
214 {
215 bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
216 }
217
218 /**
219 * radix_tree_find_next_bit - find the next set bit in a memory region
220 *
221 * @addr: The address to base the search on
222 * @size: The bitmap size in bits
223 * @offset: The bitnumber to start searching at
224 *
225 * Unrollable variant of find_next_bit() for constant size arrays.
226 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
227 * Returns next bit offset, or size if nothing found.
228 */
229 static __always_inline unsigned long
230 radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
231 unsigned long offset)
232 {
233 const unsigned long *addr = node->tags[tag];
234
235 if (offset < RADIX_TREE_MAP_SIZE) {
236 unsigned long tmp;
237
238 addr += offset / BITS_PER_LONG;
239 tmp = *addr >> (offset % BITS_PER_LONG);
240 if (tmp)
241 return __ffs(tmp) + offset;
242 offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
243 while (offset < RADIX_TREE_MAP_SIZE) {
244 tmp = *++addr;
245 if (tmp)
246 return __ffs(tmp) + offset;
247 offset += BITS_PER_LONG;
248 }
249 }
250 return RADIX_TREE_MAP_SIZE;
251 }
252
253 static unsigned int iter_offset(const struct radix_tree_iter *iter)
254 {
255 return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
256 }
257
258 /*
259 * The maximum index which can be stored in a radix tree
260 */
261 static inline unsigned long shift_maxindex(unsigned int shift)
262 {
263 return (RADIX_TREE_MAP_SIZE << shift) - 1;
264 }
265
266 static inline unsigned long node_maxindex(const struct radix_tree_node *node)
267 {
268 return shift_maxindex(node->shift);
269 }
270
271 static unsigned long next_index(unsigned long index,
272 const struct radix_tree_node *node,
273 unsigned long offset)
274 {
275 return (index & ~node_maxindex(node)) + (offset << node->shift);
276 }
277
278 #ifndef __KERNEL__
279 static void dump_node(struct radix_tree_node *node, unsigned long index)
280 {
281 unsigned long i;
282
283 pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
284 node, node->offset, index, index | node_maxindex(node),
285 node->parent,
286 node->tags[0][0], node->tags[1][0], node->tags[2][0],
287 node->shift, node->count, node->exceptional);
288
289 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
290 unsigned long first = index | (i << node->shift);
291 unsigned long last = first | ((1UL << node->shift) - 1);
292 void *entry = node->slots[i];
293 if (!entry)
294 continue;
295 if (entry == RADIX_TREE_RETRY) {
296 pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
297 i, first, last, node);
298 } else if (!radix_tree_is_internal_node(entry)) {
299 pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
300 entry, i, first, last, node);
301 } else if (is_sibling_entry(node, entry)) {
302 pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
303 entry, i, first, last, node,
304 *(void **)entry_to_node(entry));
305 } else {
306 dump_node(entry_to_node(entry), first);
307 }
308 }
309 }
310
311 /* For debug */
312 static void radix_tree_dump(struct radix_tree_root *root)
313 {
314 pr_debug("radix root: %p rnode %p tags %x\n",
315 root, root->rnode,
316 root->gfp_mask >> ROOT_TAG_SHIFT);
317 if (!radix_tree_is_internal_node(root->rnode))
318 return;
319 dump_node(entry_to_node(root->rnode), 0);
320 }
321
322 static void dump_ida_node(void *entry, unsigned long index)
323 {
324 unsigned long i;
325
326 if (!entry)
327 return;
328
329 if (radix_tree_is_internal_node(entry)) {
330 struct radix_tree_node *node = entry_to_node(entry);
331
332 pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
333 node, node->offset, index * IDA_BITMAP_BITS,
334 ((index | node_maxindex(node)) + 1) *
335 IDA_BITMAP_BITS - 1,
336 node->parent, node->tags[0][0], node->shift,
337 node->count);
338 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
339 dump_ida_node(node->slots[i],
340 index | (i << node->shift));
341 } else if (radix_tree_exceptional_entry(entry)) {
342 pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
343 entry, (int)(index & RADIX_TREE_MAP_MASK),
344 index * IDA_BITMAP_BITS,
345 index * IDA_BITMAP_BITS + BITS_PER_LONG -
346 RADIX_TREE_EXCEPTIONAL_SHIFT,
347 (unsigned long)entry >>
348 RADIX_TREE_EXCEPTIONAL_SHIFT);
349 } else {
350 struct ida_bitmap *bitmap = entry;
351
352 pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
353 (int)(index & RADIX_TREE_MAP_MASK),
354 index * IDA_BITMAP_BITS,
355 (index + 1) * IDA_BITMAP_BITS - 1);
356 for (i = 0; i < IDA_BITMAP_LONGS; i++)
357 pr_cont(" %lx", bitmap->bitmap[i]);
358 pr_cont("\n");
359 }
360 }
361
362 static void ida_dump(struct ida *ida)
363 {
364 struct radix_tree_root *root = &ida->ida_rt;
365 pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
366 root->gfp_mask >> ROOT_TAG_SHIFT);
367 dump_ida_node(root->rnode, 0);
368 }
369 #endif
370
371 /*
372 * This assumes that the caller has performed appropriate preallocation, and
373 * that the caller has pinned this thread of control to the current CPU.
374 */
375 static struct radix_tree_node *
376 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
377 struct radix_tree_root *root,
378 unsigned int shift, unsigned int offset,
379 unsigned int count, unsigned int exceptional)
380 {
381 struct radix_tree_node *ret = NULL;
382
383 /*
384 * Preload code isn't irq safe and it doesn't make sense to use
385 * preloading during an interrupt anyway as all the allocations have
386 * to be atomic. So just do normal allocation when in interrupt.
387 */
388 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
389 struct radix_tree_preload *rtp;
390
391 /*
392 * Even if the caller has preloaded, try to allocate from the
393 * cache first for the new node to get accounted to the memory
394 * cgroup.
395 */
396 ret = kmem_cache_alloc(radix_tree_node_cachep,
397 gfp_mask | __GFP_NOWARN);
398 if (ret)
399 goto out;
400
401 /*
402 * Provided the caller has preloaded here, we will always
403 * succeed in getting a node here (and never reach
404 * kmem_cache_alloc)
405 */
406 rtp = this_cpu_ptr(&radix_tree_preloads);
407 if (rtp->nr) {
408 ret = rtp->nodes;
409 rtp->nodes = ret->parent;
410 rtp->nr--;
411 }
412 /*
413 * Update the allocation stack trace as this is more useful
414 * for debugging.
415 */
416 kmemleak_update_trace(ret);
417 goto out;
418 }
419 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
420 out:
421 BUG_ON(radix_tree_is_internal_node(ret));
422 if (ret) {
423 ret->shift = shift;
424 ret->offset = offset;
425 ret->count = count;
426 ret->exceptional = exceptional;
427 ret->parent = parent;
428 ret->root = root;
429 }
430 return ret;
431 }
432
433 static void radix_tree_node_rcu_free(struct rcu_head *head)
434 {
435 struct radix_tree_node *node =
436 container_of(head, struct radix_tree_node, rcu_head);
437
438 /*
439 * Must only free zeroed nodes into the slab. We can be left with
440 * non-NULL entries by radix_tree_free_nodes, so clear the entries
441 * and tags here.
442 */
443 memset(node->slots, 0, sizeof(node->slots));
444 memset(node->tags, 0, sizeof(node->tags));
445 INIT_LIST_HEAD(&node->private_list);
446
447 kmem_cache_free(radix_tree_node_cachep, node);
448 }
449
450 static inline void
451 radix_tree_node_free(struct radix_tree_node *node)
452 {
453 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
454 }
455
456 /*
457 * Load up this CPU's radix_tree_node buffer with sufficient objects to
458 * ensure that the addition of a single element in the tree cannot fail. On
459 * success, return zero, with preemption disabled. On error, return -ENOMEM
460 * with preemption not disabled.
461 *
462 * To make use of this facility, the radix tree must be initialised without
463 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
464 */
465 static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
466 {
467 struct radix_tree_preload *rtp;
468 struct radix_tree_node *node;
469 int ret = -ENOMEM;
470
471 /*
472 * Nodes preloaded by one cgroup can be be used by another cgroup, so
473 * they should never be accounted to any particular memory cgroup.
474 */
475 gfp_mask &= ~__GFP_ACCOUNT;
476
477 preempt_disable();
478 rtp = this_cpu_ptr(&radix_tree_preloads);
479 while (rtp->nr < nr) {
480 preempt_enable();
481 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
482 if (node == NULL)
483 goto out;
484 preempt_disable();
485 rtp = this_cpu_ptr(&radix_tree_preloads);
486 if (rtp->nr < nr) {
487 node->parent = rtp->nodes;
488 rtp->nodes = node;
489 rtp->nr++;
490 } else {
491 kmem_cache_free(radix_tree_node_cachep, node);
492 }
493 }
494 ret = 0;
495 out:
496 return ret;
497 }
498
499 /*
500 * Load up this CPU's radix_tree_node buffer with sufficient objects to
501 * ensure that the addition of a single element in the tree cannot fail. On
502 * success, return zero, with preemption disabled. On error, return -ENOMEM
503 * with preemption not disabled.
504 *
505 * To make use of this facility, the radix tree must be initialised without
506 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
507 */
508 int radix_tree_preload(gfp_t gfp_mask)
509 {
510 /* Warn on non-sensical use... */
511 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
512 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
513 }
514 EXPORT_SYMBOL(radix_tree_preload);
515
516 /*
517 * The same as above function, except we don't guarantee preloading happens.
518 * We do it, if we decide it helps. On success, return zero with preemption
519 * disabled. On error, return -ENOMEM with preemption not disabled.
520 */
521 int radix_tree_maybe_preload(gfp_t gfp_mask)
522 {
523 if (gfpflags_allow_blocking(gfp_mask))
524 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
525 /* Preloading doesn't help anything with this gfp mask, skip it */
526 preempt_disable();
527 return 0;
528 }
529 EXPORT_SYMBOL(radix_tree_maybe_preload);
530
531 #ifdef CONFIG_RADIX_TREE_MULTIORDER
532 /*
533 * Preload with enough objects to ensure that we can split a single entry
534 * of order @old_order into many entries of size @new_order
535 */
536 int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
537 gfp_t gfp_mask)
538 {
539 unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
540 unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
541 (new_order / RADIX_TREE_MAP_SHIFT);
542 unsigned nr = 0;
543
544 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
545 BUG_ON(new_order >= old_order);
546
547 while (layers--)
548 nr = nr * RADIX_TREE_MAP_SIZE + 1;
549 return __radix_tree_preload(gfp_mask, top * nr);
550 }
551 #endif
552
553 /*
554 * The same as function above, but preload number of nodes required to insert
555 * (1 << order) continuous naturally-aligned elements.
556 */
557 int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
558 {
559 unsigned long nr_subtrees;
560 int nr_nodes, subtree_height;
561
562 /* Preloading doesn't help anything with this gfp mask, skip it */
563 if (!gfpflags_allow_blocking(gfp_mask)) {
564 preempt_disable();
565 return 0;
566 }
567
568 /*
569 * Calculate number and height of fully populated subtrees it takes to
570 * store (1 << order) elements.
571 */
572 nr_subtrees = 1 << order;
573 for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
574 subtree_height++)
575 nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
576
577 /*
578 * The worst case is zero height tree with a single item at index 0 and
579 * then inserting items starting at ULONG_MAX - (1 << order).
580 *
581 * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
582 * 0-index item.
583 */
584 nr_nodes = RADIX_TREE_MAX_PATH;
585
586 /* Plus branch to fully populated subtrees. */
587 nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
588
589 /* Root node is shared. */
590 nr_nodes--;
591
592 /* Plus nodes required to build subtrees. */
593 nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
594
595 return __radix_tree_preload(gfp_mask, nr_nodes);
596 }
597
598 static unsigned radix_tree_load_root(const struct radix_tree_root *root,
599 struct radix_tree_node **nodep, unsigned long *maxindex)
600 {
601 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
602
603 *nodep = node;
604
605 if (likely(radix_tree_is_internal_node(node))) {
606 node = entry_to_node(node);
607 *maxindex = node_maxindex(node);
608 return node->shift + RADIX_TREE_MAP_SHIFT;
609 }
610
611 *maxindex = 0;
612 return 0;
613 }
614
615 /*
616 * Extend a radix tree so it can store key @index.
617 */
618 static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
619 unsigned long index, unsigned int shift)
620 {
621 struct radix_tree_node *slot;
622 unsigned int maxshift;
623 int tag;
624
625 /* Figure out what the shift should be. */
626 maxshift = shift;
627 while (index > shift_maxindex(maxshift))
628 maxshift += RADIX_TREE_MAP_SHIFT;
629
630 slot = rcu_dereference_raw(root->rnode);
631 if (!slot && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
632 goto out;
633
634 do {
635 struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
636 root, shift, 0, 1, 0);
637 if (!node)
638 return -ENOMEM;
639
640 if (is_idr(root)) {
641 all_tag_set(node, IDR_FREE);
642 if (!root_tag_get(root, IDR_FREE)) {
643 tag_clear(node, IDR_FREE, 0);
644 root_tag_set(root, IDR_FREE);
645 }
646 } else {
647 /* Propagate the aggregated tag info to the new child */
648 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
649 if (root_tag_get(root, tag))
650 tag_set(node, tag, 0);
651 }
652 }
653
654 BUG_ON(shift > BITS_PER_LONG);
655 if (radix_tree_is_internal_node(slot)) {
656 entry_to_node(slot)->parent = node;
657 } else if (radix_tree_exceptional_entry(slot)) {
658 /* Moving an exceptional root->rnode to a node */
659 node->exceptional = 1;
660 }
661 node->slots[0] = slot;
662 slot = node_to_entry(node);
663 rcu_assign_pointer(root->rnode, slot);
664 shift += RADIX_TREE_MAP_SHIFT;
665 } while (shift <= maxshift);
666 out:
667 return maxshift + RADIX_TREE_MAP_SHIFT;
668 }
669
670 /**
671 * radix_tree_shrink - shrink radix tree to minimum height
672 * @root radix tree root
673 */
674 static inline bool radix_tree_shrink(struct radix_tree_root *root,
675 radix_tree_update_node_t update_node,
676 void *private)
677 {
678 bool shrunk = false;
679
680 for (;;) {
681 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
682 struct radix_tree_node *child;
683
684 if (!radix_tree_is_internal_node(node))
685 break;
686 node = entry_to_node(node);
687
688 /*
689 * The candidate node has more than one child, or its child
690 * is not at the leftmost slot, or the child is a multiorder
691 * entry, we cannot shrink.
692 */
693 if (node->count != 1)
694 break;
695 child = rcu_dereference_raw(node->slots[0]);
696 if (!child)
697 break;
698 if (!radix_tree_is_internal_node(child) && node->shift)
699 break;
700
701 if (radix_tree_is_internal_node(child))
702 entry_to_node(child)->parent = NULL;
703
704 /*
705 * We don't need rcu_assign_pointer(), since we are simply
706 * moving the node from one part of the tree to another: if it
707 * was safe to dereference the old pointer to it
708 * (node->slots[0]), it will be safe to dereference the new
709 * one (root->rnode) as far as dependent read barriers go.
710 */
711 root->rnode = child;
712 if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
713 root_tag_clear(root, IDR_FREE);
714
715 /*
716 * We have a dilemma here. The node's slot[0] must not be
717 * NULLed in case there are concurrent lookups expecting to
718 * find the item. However if this was a bottom-level node,
719 * then it may be subject to the slot pointer being visible
720 * to callers dereferencing it. If item corresponding to
721 * slot[0] is subsequently deleted, these callers would expect
722 * their slot to become empty sooner or later.
723 *
724 * For example, lockless pagecache will look up a slot, deref
725 * the page pointer, and if the page has 0 refcount it means it
726 * was concurrently deleted from pagecache so try the deref
727 * again. Fortunately there is already a requirement for logic
728 * to retry the entire slot lookup -- the indirect pointer
729 * problem (replacing direct root node with an indirect pointer
730 * also results in a stale slot). So tag the slot as indirect
731 * to force callers to retry.
732 */
733 node->count = 0;
734 if (!radix_tree_is_internal_node(child)) {
735 node->slots[0] = RADIX_TREE_RETRY;
736 if (update_node)
737 update_node(node, private);
738 }
739
740 WARN_ON_ONCE(!list_empty(&node->private_list));
741 radix_tree_node_free(node);
742 shrunk = true;
743 }
744
745 return shrunk;
746 }
747
748 static bool delete_node(struct radix_tree_root *root,
749 struct radix_tree_node *node,
750 radix_tree_update_node_t update_node, void *private)
751 {
752 bool deleted = false;
753
754 do {
755 struct radix_tree_node *parent;
756
757 if (node->count) {
758 if (node_to_entry(node) ==
759 rcu_dereference_raw(root->rnode))
760 deleted |= radix_tree_shrink(root, update_node,
761 private);
762 return deleted;
763 }
764
765 parent = node->parent;
766 if (parent) {
767 parent->slots[node->offset] = NULL;
768 parent->count--;
769 } else {
770 /*
771 * Shouldn't the tags already have all been cleared
772 * by the caller?
773 */
774 if (!is_idr(root))
775 root_tag_clear_all(root);
776 root->rnode = NULL;
777 }
778
779 WARN_ON_ONCE(!list_empty(&node->private_list));
780 radix_tree_node_free(node);
781 deleted = true;
782
783 node = parent;
784 } while (node);
785
786 return deleted;
787 }
788
789 /**
790 * __radix_tree_create - create a slot in a radix tree
791 * @root: radix tree root
792 * @index: index key
793 * @order: index occupies 2^order aligned slots
794 * @nodep: returns node
795 * @slotp: returns slot
796 *
797 * Create, if necessary, and return the node and slot for an item
798 * at position @index in the radix tree @root.
799 *
800 * Until there is more than one item in the tree, no nodes are
801 * allocated and @root->rnode is used as a direct slot instead of
802 * pointing to a node, in which case *@nodep will be NULL.
803 *
804 * Returns -ENOMEM, or 0 for success.
805 */
806 int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
807 unsigned order, struct radix_tree_node **nodep,
808 void ***slotp)
809 {
810 struct radix_tree_node *node = NULL, *child;
811 void **slot = (void **)&root->rnode;
812 unsigned long maxindex;
813 unsigned int shift, offset = 0;
814 unsigned long max = index | ((1UL << order) - 1);
815 gfp_t gfp = root_gfp_mask(root);
816
817 shift = radix_tree_load_root(root, &child, &maxindex);
818
819 /* Make sure the tree is high enough. */
820 if (order > 0 && max == ((1UL << order) - 1))
821 max++;
822 if (max > maxindex) {
823 int error = radix_tree_extend(root, gfp, max, shift);
824 if (error < 0)
825 return error;
826 shift = error;
827 child = rcu_dereference_raw(root->rnode);
828 }
829
830 while (shift > order) {
831 shift -= RADIX_TREE_MAP_SHIFT;
832 if (child == NULL) {
833 /* Have to add a child node. */
834 child = radix_tree_node_alloc(gfp, node, root, shift,
835 offset, 0, 0);
836 if (!child)
837 return -ENOMEM;
838 rcu_assign_pointer(*slot, node_to_entry(child));
839 if (node)
840 node->count++;
841 } else if (!radix_tree_is_internal_node(child))
842 break;
843
844 /* Go a level down */
845 node = entry_to_node(child);
846 offset = radix_tree_descend(node, &child, index);
847 slot = &node->slots[offset];
848 }
849
850 if (nodep)
851 *nodep = node;
852 if (slotp)
853 *slotp = slot;
854 return 0;
855 }
856
857 /*
858 * Free any nodes below this node. The tree is presumed to not need
859 * shrinking, and any user data in the tree is presumed to not need a
860 * destructor called on it. If we need to add a destructor, we can
861 * add that functionality later. Note that we may not clear tags or
862 * slots from the tree as an RCU walker may still have a pointer into
863 * this subtree. We could replace the entries with RADIX_TREE_RETRY,
864 * but we'll still have to clear those in rcu_free.
865 */
866 static void radix_tree_free_nodes(struct radix_tree_node *node)
867 {
868 unsigned offset = 0;
869 struct radix_tree_node *child = entry_to_node(node);
870
871 for (;;) {
872 void *entry = rcu_dereference_raw(child->slots[offset]);
873 if (radix_tree_is_internal_node(entry) &&
874 !is_sibling_entry(child, entry)) {
875 child = entry_to_node(entry);
876 offset = 0;
877 continue;
878 }
879 offset++;
880 while (offset == RADIX_TREE_MAP_SIZE) {
881 struct radix_tree_node *old = child;
882 offset = child->offset + 1;
883 child = child->parent;
884 WARN_ON_ONCE(!list_empty(&old->private_list));
885 radix_tree_node_free(old);
886 if (old == entry_to_node(node))
887 return;
888 }
889 }
890 }
891
892 #ifdef CONFIG_RADIX_TREE_MULTIORDER
893 static inline int insert_entries(struct radix_tree_node *node, void **slot,
894 void *item, unsigned order, bool replace)
895 {
896 struct radix_tree_node *child;
897 unsigned i, n, tag, offset, tags = 0;
898
899 if (node) {
900 if (order > node->shift)
901 n = 1 << (order - node->shift);
902 else
903 n = 1;
904 offset = get_slot_offset(node, slot);
905 } else {
906 n = 1;
907 offset = 0;
908 }
909
910 if (n > 1) {
911 offset = offset & ~(n - 1);
912 slot = &node->slots[offset];
913 }
914 child = node_to_entry(slot);
915
916 for (i = 0; i < n; i++) {
917 if (slot[i]) {
918 if (replace) {
919 node->count--;
920 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
921 if (tag_get(node, tag, offset + i))
922 tags |= 1 << tag;
923 } else
924 return -EEXIST;
925 }
926 }
927
928 for (i = 0; i < n; i++) {
929 struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
930 if (i) {
931 rcu_assign_pointer(slot[i], child);
932 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
933 if (tags & (1 << tag))
934 tag_clear(node, tag, offset + i);
935 } else {
936 rcu_assign_pointer(slot[i], item);
937 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
938 if (tags & (1 << tag))
939 tag_set(node, tag, offset);
940 }
941 if (radix_tree_is_internal_node(old) &&
942 !is_sibling_entry(node, old) &&
943 (old != RADIX_TREE_RETRY))
944 radix_tree_free_nodes(old);
945 if (radix_tree_exceptional_entry(old))
946 node->exceptional--;
947 }
948 if (node) {
949 node->count += n;
950 if (radix_tree_exceptional_entry(item))
951 node->exceptional += n;
952 }
953 return n;
954 }
955 #else
956 static inline int insert_entries(struct radix_tree_node *node, void **slot,
957 void *item, unsigned order, bool replace)
958 {
959 if (*slot)
960 return -EEXIST;
961 rcu_assign_pointer(*slot, item);
962 if (node) {
963 node->count++;
964 if (radix_tree_exceptional_entry(item))
965 node->exceptional++;
966 }
967 return 1;
968 }
969 #endif
970
971 /**
972 * __radix_tree_insert - insert into a radix tree
973 * @root: radix tree root
974 * @index: index key
975 * @order: key covers the 2^order indices around index
976 * @item: item to insert
977 *
978 * Insert an item into the radix tree at position @index.
979 */
980 int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
981 unsigned order, void *item)
982 {
983 struct radix_tree_node *node;
984 void **slot;
985 int error;
986
987 BUG_ON(radix_tree_is_internal_node(item));
988
989 error = __radix_tree_create(root, index, order, &node, &slot);
990 if (error)
991 return error;
992
993 error = insert_entries(node, slot, item, order, false);
994 if (error < 0)
995 return error;
996
997 if (node) {
998 unsigned offset = get_slot_offset(node, slot);
999 BUG_ON(tag_get(node, 0, offset));
1000 BUG_ON(tag_get(node, 1, offset));
1001 BUG_ON(tag_get(node, 2, offset));
1002 } else {
1003 BUG_ON(root_tags_get(root));
1004 }
1005
1006 return 0;
1007 }
1008 EXPORT_SYMBOL(__radix_tree_insert);
1009
1010 /**
1011 * __radix_tree_lookup - lookup an item in a radix tree
1012 * @root: radix tree root
1013 * @index: index key
1014 * @nodep: returns node
1015 * @slotp: returns slot
1016 *
1017 * Lookup and return the item at position @index in the radix
1018 * tree @root.
1019 *
1020 * Until there is more than one item in the tree, no nodes are
1021 * allocated and @root->rnode is used as a direct slot instead of
1022 * pointing to a node, in which case *@nodep will be NULL.
1023 */
1024 void *__radix_tree_lookup(const struct radix_tree_root *root,
1025 unsigned long index, struct radix_tree_node **nodep,
1026 void ***slotp)
1027 {
1028 struct radix_tree_node *node, *parent;
1029 unsigned long maxindex;
1030 void **slot;
1031
1032 restart:
1033 parent = NULL;
1034 slot = (void **)&root->rnode;
1035 radix_tree_load_root(root, &node, &maxindex);
1036 if (index > maxindex)
1037 return NULL;
1038
1039 while (radix_tree_is_internal_node(node)) {
1040 unsigned offset;
1041
1042 if (node == RADIX_TREE_RETRY)
1043 goto restart;
1044 parent = entry_to_node(node);
1045 offset = radix_tree_descend(parent, &node, index);
1046 slot = parent->slots + offset;
1047 }
1048
1049 if (nodep)
1050 *nodep = parent;
1051 if (slotp)
1052 *slotp = slot;
1053 return node;
1054 }
1055
1056 /**
1057 * radix_tree_lookup_slot - lookup a slot in a radix tree
1058 * @root: radix tree root
1059 * @index: index key
1060 *
1061 * Returns: the slot corresponding to the position @index in the
1062 * radix tree @root. This is useful for update-if-exists operations.
1063 *
1064 * This function can be called under rcu_read_lock iff the slot is not
1065 * modified by radix_tree_replace_slot, otherwise it must be called
1066 * exclusive from other writers. Any dereference of the slot must be done
1067 * using radix_tree_deref_slot.
1068 */
1069 void **radix_tree_lookup_slot(const struct radix_tree_root *root,
1070 unsigned long index)
1071 {
1072 void **slot;
1073
1074 if (!__radix_tree_lookup(root, index, NULL, &slot))
1075 return NULL;
1076 return slot;
1077 }
1078 EXPORT_SYMBOL(radix_tree_lookup_slot);
1079
1080 /**
1081 * radix_tree_lookup - perform lookup operation on a radix tree
1082 * @root: radix tree root
1083 * @index: index key
1084 *
1085 * Lookup the item at the position @index in the radix tree @root.
1086 *
1087 * This function can be called under rcu_read_lock, however the caller
1088 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
1089 * them safely). No RCU barriers are required to access or modify the
1090 * returned item, however.
1091 */
1092 void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
1093 {
1094 return __radix_tree_lookup(root, index, NULL, NULL);
1095 }
1096 EXPORT_SYMBOL(radix_tree_lookup);
1097
1098 static inline void replace_sibling_entries(struct radix_tree_node *node,
1099 void **slot, int count, int exceptional)
1100 {
1101 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1102 void *ptr = node_to_entry(slot);
1103 unsigned offset = get_slot_offset(node, slot) + 1;
1104
1105 while (offset < RADIX_TREE_MAP_SIZE) {
1106 if (rcu_dereference_raw(node->slots[offset]) != ptr)
1107 break;
1108 if (count < 0) {
1109 node->slots[offset] = NULL;
1110 node->count--;
1111 }
1112 node->exceptional += exceptional;
1113 offset++;
1114 }
1115 #endif
1116 }
1117
1118 static void replace_slot(void **slot, void *item, struct radix_tree_node *node,
1119 int count, int exceptional)
1120 {
1121 if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
1122 return;
1123
1124 if (node && (count || exceptional)) {
1125 node->count += count;
1126 node->exceptional += exceptional;
1127 replace_sibling_entries(node, slot, count, exceptional);
1128 }
1129
1130 rcu_assign_pointer(*slot, item);
1131 }
1132
1133 static bool node_tag_get(const struct radix_tree_root *root,
1134 const struct radix_tree_node *node,
1135 unsigned int tag, unsigned int offset)
1136 {
1137 if (node)
1138 return tag_get(node, tag, offset);
1139 return root_tag_get(root, tag);
1140 }
1141
1142 /*
1143 * IDR users want to be able to store NULL in the tree, so if the slot isn't
1144 * free, don't adjust the count, even if it's transitioning between NULL and
1145 * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
1146 * have empty bits, but it only stores NULL in slots when they're being
1147 * deleted.
1148 */
1149 static int calculate_count(struct radix_tree_root *root,
1150 struct radix_tree_node *node, void **slot,
1151 void *item, void *old)
1152 {
1153 if (is_idr(root)) {
1154 unsigned offset = get_slot_offset(node, slot);
1155 bool free = node_tag_get(root, node, IDR_FREE, offset);
1156 if (!free)
1157 return 0;
1158 if (!old)
1159 return 1;
1160 }
1161 return !!item - !!old;
1162 }
1163
1164 /**
1165 * __radix_tree_replace - replace item in a slot
1166 * @root: radix tree root
1167 * @node: pointer to tree node
1168 * @slot: pointer to slot in @node
1169 * @item: new item to store in the slot.
1170 * @update_node: callback for changing leaf nodes
1171 * @private: private data to pass to @update_node
1172 *
1173 * For use with __radix_tree_lookup(). Caller must hold tree write locked
1174 * across slot lookup and replacement.
1175 */
1176 void __radix_tree_replace(struct radix_tree_root *root,
1177 struct radix_tree_node *node,
1178 void **slot, void *item,
1179 radix_tree_update_node_t update_node, void *private)
1180 {
1181 void *old = rcu_dereference_raw(*slot);
1182 int exceptional = !!radix_tree_exceptional_entry(item) -
1183 !!radix_tree_exceptional_entry(old);
1184 int count = calculate_count(root, node, slot, item, old);
1185
1186 /*
1187 * This function supports replacing exceptional entries and
1188 * deleting entries, but that needs accounting against the
1189 * node unless the slot is root->rnode.
1190 */
1191 WARN_ON_ONCE(!node && (slot != (void **)&root->rnode) &&
1192 (count || exceptional));
1193 replace_slot(slot, item, node, count, exceptional);
1194
1195 if (!node)
1196 return;
1197
1198 if (update_node)
1199 update_node(node, private);
1200
1201 delete_node(root, node, update_node, private);
1202 }
1203
1204 /**
1205 * radix_tree_replace_slot - replace item in a slot
1206 * @root: radix tree root
1207 * @slot: pointer to slot
1208 * @item: new item to store in the slot.
1209 *
1210 * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(),
1211 * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
1212 * across slot lookup and replacement.
1213 *
1214 * NOTE: This cannot be used to switch between non-entries (empty slots),
1215 * regular entries, and exceptional entries, as that requires accounting
1216 * inside the radix tree node. When switching from one type of entry or
1217 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
1218 * radix_tree_iter_replace().
1219 */
1220 void radix_tree_replace_slot(struct radix_tree_root *root,
1221 void **slot, void *item)
1222 {
1223 __radix_tree_replace(root, NULL, slot, item, NULL, NULL);
1224 }
1225
1226 /**
1227 * radix_tree_iter_replace - replace item in a slot
1228 * @root: radix tree root
1229 * @slot: pointer to slot
1230 * @item: new item to store in the slot.
1231 *
1232 * For use with radix_tree_split() and radix_tree_for_each_slot().
1233 * Caller must hold tree write locked across split and replacement.
1234 */
1235 void radix_tree_iter_replace(struct radix_tree_root *root,
1236 const struct radix_tree_iter *iter, void **slot, void *item)
1237 {
1238 __radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
1239 }
1240
1241 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1242 /**
1243 * radix_tree_join - replace multiple entries with one multiorder entry
1244 * @root: radix tree root
1245 * @index: an index inside the new entry
1246 * @order: order of the new entry
1247 * @item: new entry
1248 *
1249 * Call this function to replace several entries with one larger entry.
1250 * The existing entries are presumed to not need freeing as a result of
1251 * this call.
1252 *
1253 * The replacement entry will have all the tags set on it that were set
1254 * on any of the entries it is replacing.
1255 */
1256 int radix_tree_join(struct radix_tree_root *root, unsigned long index,
1257 unsigned order, void *item)
1258 {
1259 struct radix_tree_node *node;
1260 void **slot;
1261 int error;
1262
1263 BUG_ON(radix_tree_is_internal_node(item));
1264
1265 error = __radix_tree_create(root, index, order, &node, &slot);
1266 if (!error)
1267 error = insert_entries(node, slot, item, order, true);
1268 if (error > 0)
1269 error = 0;
1270
1271 return error;
1272 }
1273
1274 /**
1275 * radix_tree_split - Split an entry into smaller entries
1276 * @root: radix tree root
1277 * @index: An index within the large entry
1278 * @order: Order of new entries
1279 *
1280 * Call this function as the first step in replacing a multiorder entry
1281 * with several entries of lower order. After this function returns,
1282 * loop over the relevant portion of the tree using radix_tree_for_each_slot()
1283 * and call radix_tree_iter_replace() to set up each new entry.
1284 *
1285 * The tags from this entry are replicated to all the new entries.
1286 *
1287 * The radix tree should be locked against modification during the entire
1288 * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which
1289 * should prompt RCU walkers to restart the lookup from the root.
1290 */
1291 int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1292 unsigned order)
1293 {
1294 struct radix_tree_node *parent, *node, *child;
1295 void **slot;
1296 unsigned int offset, end;
1297 unsigned n, tag, tags = 0;
1298 gfp_t gfp = root_gfp_mask(root);
1299
1300 if (!__radix_tree_lookup(root, index, &parent, &slot))
1301 return -ENOENT;
1302 if (!parent)
1303 return -ENOENT;
1304
1305 offset = get_slot_offset(parent, slot);
1306
1307 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1308 if (tag_get(parent, tag, offset))
1309 tags |= 1 << tag;
1310
1311 for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
1312 if (!is_sibling_entry(parent,
1313 rcu_dereference_raw(parent->slots[end])))
1314 break;
1315 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1316 if (tags & (1 << tag))
1317 tag_set(parent, tag, end);
1318 /* rcu_assign_pointer ensures tags are set before RETRY */
1319 rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
1320 }
1321 rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
1322 parent->exceptional -= (end - offset);
1323
1324 if (order == parent->shift)
1325 return 0;
1326 if (order > parent->shift) {
1327 while (offset < end)
1328 offset += insert_entries(parent, &parent->slots[offset],
1329 RADIX_TREE_RETRY, order, true);
1330 return 0;
1331 }
1332
1333 node = parent;
1334
1335 for (;;) {
1336 if (node->shift > order) {
1337 child = radix_tree_node_alloc(gfp, node, root,
1338 node->shift - RADIX_TREE_MAP_SHIFT,
1339 offset, 0, 0);
1340 if (!child)
1341 goto nomem;
1342 if (node != parent) {
1343 node->count++;
1344 rcu_assign_pointer(node->slots[offset],
1345 node_to_entry(child));
1346 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1347 if (tags & (1 << tag))
1348 tag_set(node, tag, offset);
1349 }
1350
1351 node = child;
1352 offset = 0;
1353 continue;
1354 }
1355
1356 n = insert_entries(node, &node->slots[offset],
1357 RADIX_TREE_RETRY, order, false);
1358 BUG_ON(n > RADIX_TREE_MAP_SIZE);
1359
1360 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1361 if (tags & (1 << tag))
1362 tag_set(node, tag, offset);
1363 offset += n;
1364
1365 while (offset == RADIX_TREE_MAP_SIZE) {
1366 if (node == parent)
1367 break;
1368 offset = node->offset;
1369 child = node;
1370 node = node->parent;
1371 rcu_assign_pointer(node->slots[offset],
1372 node_to_entry(child));
1373 offset++;
1374 }
1375 if ((node == parent) && (offset == end))
1376 return 0;
1377 }
1378
1379 nomem:
1380 /* Shouldn't happen; did user forget to preload? */
1381 /* TODO: free all the allocated nodes */
1382 WARN_ON(1);
1383 return -ENOMEM;
1384 }
1385 #endif
1386
1387 static void node_tag_set(struct radix_tree_root *root,
1388 struct radix_tree_node *node,
1389 unsigned int tag, unsigned int offset)
1390 {
1391 while (node) {
1392 if (tag_get(node, tag, offset))
1393 return;
1394 tag_set(node, tag, offset);
1395 offset = node->offset;
1396 node = node->parent;
1397 }
1398
1399 if (!root_tag_get(root, tag))
1400 root_tag_set(root, tag);
1401 }
1402
1403 /**
1404 * radix_tree_tag_set - set a tag on a radix tree node
1405 * @root: radix tree root
1406 * @index: index key
1407 * @tag: tag index
1408 *
1409 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
1410 * corresponding to @index in the radix tree. From
1411 * the root all the way down to the leaf node.
1412 *
1413 * Returns the address of the tagged item. Setting a tag on a not-present
1414 * item is a bug.
1415 */
1416 void *radix_tree_tag_set(struct radix_tree_root *root,
1417 unsigned long index, unsigned int tag)
1418 {
1419 struct radix_tree_node *node, *parent;
1420 unsigned long maxindex;
1421
1422 radix_tree_load_root(root, &node, &maxindex);
1423 BUG_ON(index > maxindex);
1424
1425 while (radix_tree_is_internal_node(node)) {
1426 unsigned offset;
1427
1428 parent = entry_to_node(node);
1429 offset = radix_tree_descend(parent, &node, index);
1430 BUG_ON(!node);
1431
1432 if (!tag_get(parent, tag, offset))
1433 tag_set(parent, tag, offset);
1434 }
1435
1436 /* set the root's tag bit */
1437 if (!root_tag_get(root, tag))
1438 root_tag_set(root, tag);
1439
1440 return node;
1441 }
1442 EXPORT_SYMBOL(radix_tree_tag_set);
1443
1444 /**
1445 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1446 * @root: radix tree root
1447 * @iter: iterator state
1448 * @tag: tag to set
1449 */
1450 void radix_tree_iter_tag_set(struct radix_tree_root *root,
1451 const struct radix_tree_iter *iter, unsigned int tag)
1452 {
1453 node_tag_set(root, iter->node, tag, iter_offset(iter));
1454 }
1455
1456 static void node_tag_clear(struct radix_tree_root *root,
1457 struct radix_tree_node *node,
1458 unsigned int tag, unsigned int offset)
1459 {
1460 while (node) {
1461 if (!tag_get(node, tag, offset))
1462 return;
1463 tag_clear(node, tag, offset);
1464 if (any_tag_set(node, tag))
1465 return;
1466
1467 offset = node->offset;
1468 node = node->parent;
1469 }
1470
1471 /* clear the root's tag bit */
1472 if (root_tag_get(root, tag))
1473 root_tag_clear(root, tag);
1474 }
1475
1476 /**
1477 * radix_tree_tag_clear - clear a tag on a radix tree node
1478 * @root: radix tree root
1479 * @index: index key
1480 * @tag: tag index
1481 *
1482 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
1483 * corresponding to @index in the radix tree. If this causes
1484 * the leaf node to have no tags set then clear the tag in the
1485 * next-to-leaf node, etc.
1486 *
1487 * Returns the address of the tagged item on success, else NULL. ie:
1488 * has the same return value and semantics as radix_tree_lookup().
1489 */
1490 void *radix_tree_tag_clear(struct radix_tree_root *root,
1491 unsigned long index, unsigned int tag)
1492 {
1493 struct radix_tree_node *node, *parent;
1494 unsigned long maxindex;
1495 int uninitialized_var(offset);
1496
1497 radix_tree_load_root(root, &node, &maxindex);
1498 if (index > maxindex)
1499 return NULL;
1500
1501 parent = NULL;
1502
1503 while (radix_tree_is_internal_node(node)) {
1504 parent = entry_to_node(node);
1505 offset = radix_tree_descend(parent, &node, index);
1506 }
1507
1508 if (node)
1509 node_tag_clear(root, parent, tag, offset);
1510
1511 return node;
1512 }
1513 EXPORT_SYMBOL(radix_tree_tag_clear);
1514
1515 /**
1516 * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1517 * @root: radix tree root
1518 * @iter: iterator state
1519 * @tag: tag to clear
1520 */
1521 void radix_tree_iter_tag_clear(struct radix_tree_root *root,
1522 const struct radix_tree_iter *iter, unsigned int tag)
1523 {
1524 node_tag_clear(root, iter->node, tag, iter_offset(iter));
1525 }
1526
1527 /**
1528 * radix_tree_tag_get - get a tag on a radix tree node
1529 * @root: radix tree root
1530 * @index: index key
1531 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
1532 *
1533 * Return values:
1534 *
1535 * 0: tag not present or not set
1536 * 1: tag set
1537 *
1538 * Note that the return value of this function may not be relied on, even if
1539 * the RCU lock is held, unless tag modification and node deletion are excluded
1540 * from concurrency.
1541 */
1542 int radix_tree_tag_get(const struct radix_tree_root *root,
1543 unsigned long index, unsigned int tag)
1544 {
1545 struct radix_tree_node *node, *parent;
1546 unsigned long maxindex;
1547
1548 if (!root_tag_get(root, tag))
1549 return 0;
1550
1551 radix_tree_load_root(root, &node, &maxindex);
1552 if (index > maxindex)
1553 return 0;
1554
1555 while (radix_tree_is_internal_node(node)) {
1556 unsigned offset;
1557
1558 parent = entry_to_node(node);
1559 offset = radix_tree_descend(parent, &node, index);
1560
1561 if (!tag_get(parent, tag, offset))
1562 return 0;
1563 if (node == RADIX_TREE_RETRY)
1564 break;
1565 }
1566
1567 return 1;
1568 }
1569 EXPORT_SYMBOL(radix_tree_tag_get);
1570
1571 static inline void __set_iter_shift(struct radix_tree_iter *iter,
1572 unsigned int shift)
1573 {
1574 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1575 iter->shift = shift;
1576 #endif
1577 }
1578
1579 /* Construct iter->tags bit-mask from node->tags[tag] array */
1580 static void set_iter_tags(struct radix_tree_iter *iter,
1581 struct radix_tree_node *node, unsigned offset,
1582 unsigned tag)
1583 {
1584 unsigned tag_long = offset / BITS_PER_LONG;
1585 unsigned tag_bit = offset % BITS_PER_LONG;
1586
1587 if (!node) {
1588 iter->tags = 1;
1589 return;
1590 }
1591
1592 iter->tags = node->tags[tag][tag_long] >> tag_bit;
1593
1594 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1595 if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
1596 /* Pick tags from next element */
1597 if (tag_bit)
1598 iter->tags |= node->tags[tag][tag_long + 1] <<
1599 (BITS_PER_LONG - tag_bit);
1600 /* Clip chunk size, here only BITS_PER_LONG tags */
1601 iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
1602 }
1603 }
1604
1605 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1606 static void **skip_siblings(struct radix_tree_node **nodep,
1607 void **slot, struct radix_tree_iter *iter)
1608 {
1609 void *sib = node_to_entry(slot - 1);
1610
1611 while (iter->index < iter->next_index) {
1612 *nodep = rcu_dereference_raw(*slot);
1613 if (*nodep && *nodep != sib)
1614 return slot;
1615 slot++;
1616 iter->index = __radix_tree_iter_add(iter, 1);
1617 iter->tags >>= 1;
1618 }
1619
1620 *nodep = NULL;
1621 return NULL;
1622 }
1623
1624 void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
1625 unsigned flags)
1626 {
1627 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1628 struct radix_tree_node *node = rcu_dereference_raw(*slot);
1629
1630 slot = skip_siblings(&node, slot, iter);
1631
1632 while (radix_tree_is_internal_node(node)) {
1633 unsigned offset;
1634 unsigned long next_index;
1635
1636 if (node == RADIX_TREE_RETRY)
1637 return slot;
1638 node = entry_to_node(node);
1639 iter->node = node;
1640 iter->shift = node->shift;
1641
1642 if (flags & RADIX_TREE_ITER_TAGGED) {
1643 offset = radix_tree_find_next_bit(node, tag, 0);
1644 if (offset == RADIX_TREE_MAP_SIZE)
1645 return NULL;
1646 slot = &node->slots[offset];
1647 iter->index = __radix_tree_iter_add(iter, offset);
1648 set_iter_tags(iter, node, offset, tag);
1649 node = rcu_dereference_raw(*slot);
1650 } else {
1651 offset = 0;
1652 slot = &node->slots[0];
1653 for (;;) {
1654 node = rcu_dereference_raw(*slot);
1655 if (node)
1656 break;
1657 slot++;
1658 offset++;
1659 if (offset == RADIX_TREE_MAP_SIZE)
1660 return NULL;
1661 }
1662 iter->index = __radix_tree_iter_add(iter, offset);
1663 }
1664 if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
1665 goto none;
1666 next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
1667 if (next_index < iter->next_index)
1668 iter->next_index = next_index;
1669 }
1670
1671 return slot;
1672 none:
1673 iter->next_index = 0;
1674 return NULL;
1675 }
1676 EXPORT_SYMBOL(__radix_tree_next_slot);
1677 #else
1678 static void **skip_siblings(struct radix_tree_node **nodep,
1679 void **slot, struct radix_tree_iter *iter)
1680 {
1681 return slot;
1682 }
1683 #endif
1684
1685 void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter)
1686 {
1687 struct radix_tree_node *node;
1688
1689 slot++;
1690 iter->index = __radix_tree_iter_add(iter, 1);
1691 skip_siblings(&node, slot, iter);
1692 iter->next_index = iter->index;
1693 iter->tags = 0;
1694 return NULL;
1695 }
1696 EXPORT_SYMBOL(radix_tree_iter_resume);
1697
1698 /**
1699 * radix_tree_next_chunk - find next chunk of slots for iteration
1700 *
1701 * @root: radix tree root
1702 * @iter: iterator state
1703 * @flags: RADIX_TREE_ITER_* flags and tag index
1704 * Returns: pointer to chunk first slot, or NULL if iteration is over
1705 */
1706 void **radix_tree_next_chunk(const struct radix_tree_root *root,
1707 struct radix_tree_iter *iter, unsigned flags)
1708 {
1709 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1710 struct radix_tree_node *node, *child;
1711 unsigned long index, offset, maxindex;
1712
1713 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
1714 return NULL;
1715
1716 /*
1717 * Catch next_index overflow after ~0UL. iter->index never overflows
1718 * during iterating; it can be zero only at the beginning.
1719 * And we cannot overflow iter->next_index in a single step,
1720 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
1721 *
1722 * This condition also used by radix_tree_next_slot() to stop
1723 * contiguous iterating, and forbid switching to the next chunk.
1724 */
1725 index = iter->next_index;
1726 if (!index && iter->index)
1727 return NULL;
1728
1729 restart:
1730 radix_tree_load_root(root, &child, &maxindex);
1731 if (index > maxindex)
1732 return NULL;
1733 if (!child)
1734 return NULL;
1735
1736 if (!radix_tree_is_internal_node(child)) {
1737 /* Single-slot tree */
1738 iter->index = index;
1739 iter->next_index = maxindex + 1;
1740 iter->tags = 1;
1741 iter->node = NULL;
1742 __set_iter_shift(iter, 0);
1743 return (void **)&root->rnode;
1744 }
1745
1746 do {
1747 node = entry_to_node(child);
1748 offset = radix_tree_descend(node, &child, index);
1749
1750 if ((flags & RADIX_TREE_ITER_TAGGED) ?
1751 !tag_get(node, tag, offset) : !child) {
1752 /* Hole detected */
1753 if (flags & RADIX_TREE_ITER_CONTIG)
1754 return NULL;
1755
1756 if (flags & RADIX_TREE_ITER_TAGGED)
1757 offset = radix_tree_find_next_bit(node, tag,
1758 offset + 1);
1759 else
1760 while (++offset < RADIX_TREE_MAP_SIZE) {
1761 void *slot = rcu_dereference_raw(
1762 node->slots[offset]);
1763 if (is_sibling_entry(node, slot))
1764 continue;
1765 if (slot)
1766 break;
1767 }
1768 index &= ~node_maxindex(node);
1769 index += offset << node->shift;
1770 /* Overflow after ~0UL */
1771 if (!index)
1772 return NULL;
1773 if (offset == RADIX_TREE_MAP_SIZE)
1774 goto restart;
1775 child = rcu_dereference_raw(node->slots[offset]);
1776 }
1777
1778 if (!child)
1779 goto restart;
1780 if (child == RADIX_TREE_RETRY)
1781 break;
1782 } while (radix_tree_is_internal_node(child));
1783
1784 /* Update the iterator state */
1785 iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
1786 iter->next_index = (index | node_maxindex(node)) + 1;
1787 iter->node = node;
1788 __set_iter_shift(iter, node->shift);
1789
1790 if (flags & RADIX_TREE_ITER_TAGGED)
1791 set_iter_tags(iter, node, offset, tag);
1792
1793 return node->slots + offset;
1794 }
1795 EXPORT_SYMBOL(radix_tree_next_chunk);
1796
1797 /**
1798 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1799 * @root: radix tree root
1800 * @results: where the results of the lookup are placed
1801 * @first_index: start the lookup from this key
1802 * @max_items: place up to this many items at *results
1803 *
1804 * Performs an index-ascending scan of the tree for present items. Places
1805 * them at *@results and returns the number of items which were placed at
1806 * *@results.
1807 *
1808 * The implementation is naive.
1809 *
1810 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1811 * rcu_read_lock. In this case, rather than the returned results being
1812 * an atomic snapshot of the tree at a single point in time, the
1813 * semantics of an RCU protected gang lookup are as though multiple
1814 * radix_tree_lookups have been issued in individual locks, and results
1815 * stored in 'results'.
1816 */
1817 unsigned int
1818 radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1819 unsigned long first_index, unsigned int max_items)
1820 {
1821 struct radix_tree_iter iter;
1822 void **slot;
1823 unsigned int ret = 0;
1824
1825 if (unlikely(!max_items))
1826 return 0;
1827
1828 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1829 results[ret] = rcu_dereference_raw(*slot);
1830 if (!results[ret])
1831 continue;
1832 if (radix_tree_is_internal_node(results[ret])) {
1833 slot = radix_tree_iter_retry(&iter);
1834 continue;
1835 }
1836 if (++ret == max_items)
1837 break;
1838 }
1839
1840 return ret;
1841 }
1842 EXPORT_SYMBOL(radix_tree_gang_lookup);
1843
1844 /**
1845 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1846 * @root: radix tree root
1847 * @results: where the results of the lookup are placed
1848 * @indices: where their indices should be placed (but usually NULL)
1849 * @first_index: start the lookup from this key
1850 * @max_items: place up to this many items at *results
1851 *
1852 * Performs an index-ascending scan of the tree for present items. Places
1853 * their slots at *@results and returns the number of items which were
1854 * placed at *@results.
1855 *
1856 * The implementation is naive.
1857 *
1858 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1859 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1860 * protection, radix_tree_deref_slot may fail requiring a retry.
1861 */
1862 unsigned int
1863 radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
1864 void ***results, unsigned long *indices,
1865 unsigned long first_index, unsigned int max_items)
1866 {
1867 struct radix_tree_iter iter;
1868 void **slot;
1869 unsigned int ret = 0;
1870
1871 if (unlikely(!max_items))
1872 return 0;
1873
1874 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1875 results[ret] = slot;
1876 if (indices)
1877 indices[ret] = iter.index;
1878 if (++ret == max_items)
1879 break;
1880 }
1881
1882 return ret;
1883 }
1884 EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1885
1886 /**
1887 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1888 * based on a tag
1889 * @root: radix tree root
1890 * @results: where the results of the lookup are placed
1891 * @first_index: start the lookup from this key
1892 * @max_items: place up to this many items at *results
1893 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1894 *
1895 * Performs an index-ascending scan of the tree for present items which
1896 * have the tag indexed by @tag set. Places the items at *@results and
1897 * returns the number of items which were placed at *@results.
1898 */
1899 unsigned int
1900 radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
1901 unsigned long first_index, unsigned int max_items,
1902 unsigned int tag)
1903 {
1904 struct radix_tree_iter iter;
1905 void **slot;
1906 unsigned int ret = 0;
1907
1908 if (unlikely(!max_items))
1909 return 0;
1910
1911 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1912 results[ret] = rcu_dereference_raw(*slot);
1913 if (!results[ret])
1914 continue;
1915 if (radix_tree_is_internal_node(results[ret])) {
1916 slot = radix_tree_iter_retry(&iter);
1917 continue;
1918 }
1919 if (++ret == max_items)
1920 break;
1921 }
1922
1923 return ret;
1924 }
1925 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1926
1927 /**
1928 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1929 * radix tree based on a tag
1930 * @root: radix tree root
1931 * @results: where the results of the lookup are placed
1932 * @first_index: start the lookup from this key
1933 * @max_items: place up to this many items at *results
1934 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1935 *
1936 * Performs an index-ascending scan of the tree for present items which
1937 * have the tag indexed by @tag set. Places the slots at *@results and
1938 * returns the number of slots which were placed at *@results.
1939 */
1940 unsigned int
1941 radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1942 void ***results, unsigned long first_index,
1943 unsigned int max_items, unsigned int tag)
1944 {
1945 struct radix_tree_iter iter;
1946 void **slot;
1947 unsigned int ret = 0;
1948
1949 if (unlikely(!max_items))
1950 return 0;
1951
1952 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1953 results[ret] = slot;
1954 if (++ret == max_items)
1955 break;
1956 }
1957
1958 return ret;
1959 }
1960 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1961
1962 /**
1963 * __radix_tree_delete_node - try to free node after clearing a slot
1964 * @root: radix tree root
1965 * @node: node containing @index
1966 * @update_node: callback for changing leaf nodes
1967 * @private: private data to pass to @update_node
1968 *
1969 * After clearing the slot at @index in @node from radix tree
1970 * rooted at @root, call this function to attempt freeing the
1971 * node and shrinking the tree.
1972 */
1973 void __radix_tree_delete_node(struct radix_tree_root *root,
1974 struct radix_tree_node *node,
1975 radix_tree_update_node_t update_node,
1976 void *private)
1977 {
1978 delete_node(root, node, update_node, private);
1979 }
1980
1981 static bool __radix_tree_delete(struct radix_tree_root *root,
1982 struct radix_tree_node *node, void **slot)
1983 {
1984 void *old = rcu_dereference_raw(*slot);
1985 int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
1986 unsigned offset = get_slot_offset(node, slot);
1987 int tag;
1988
1989 if (is_idr(root))
1990 node_tag_set(root, node, IDR_FREE, offset);
1991 else
1992 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1993 node_tag_clear(root, node, tag, offset);
1994
1995 replace_slot(slot, NULL, node, -1, exceptional);
1996 return node && delete_node(root, node, NULL, NULL);
1997 }
1998
1999 /**
2000 * radix_tree_iter_delete - delete the entry at this iterator position
2001 * @root: radix tree root
2002 * @iter: iterator state
2003 * @slot: pointer to slot
2004 *
2005 * Delete the entry at the position currently pointed to by the iterator.
2006 * This may result in the current node being freed; if it is, the iterator
2007 * is advanced so that it will not reference the freed memory. This
2008 * function may be called without any locking if there are no other threads
2009 * which can access this tree.
2010 */
2011 void radix_tree_iter_delete(struct radix_tree_root *root,
2012 struct radix_tree_iter *iter, void **slot)
2013 {
2014 if (__radix_tree_delete(root, iter->node, slot))
2015 iter->index = iter->next_index;
2016 }
2017
2018 /**
2019 * radix_tree_delete_item - delete an item from a radix tree
2020 * @root: radix tree root
2021 * @index: index key
2022 * @item: expected item
2023 *
2024 * Remove @item at @index from the radix tree rooted at @root.
2025 *
2026 * Return: the deleted entry, or %NULL if it was not present
2027 * or the entry at the given @index was not @item.
2028 */
2029 void *radix_tree_delete_item(struct radix_tree_root *root,
2030 unsigned long index, void *item)
2031 {
2032 struct radix_tree_node *node = NULL;
2033 void **slot;
2034 void *entry;
2035
2036 entry = __radix_tree_lookup(root, index, &node, &slot);
2037 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
2038 get_slot_offset(node, slot))))
2039 return NULL;
2040
2041 if (item && entry != item)
2042 return NULL;
2043
2044 __radix_tree_delete(root, node, slot);
2045
2046 return entry;
2047 }
2048 EXPORT_SYMBOL(radix_tree_delete_item);
2049
2050 /**
2051 * radix_tree_delete - delete an entry from a radix tree
2052 * @root: radix tree root
2053 * @index: index key
2054 *
2055 * Remove the entry at @index from the radix tree rooted at @root.
2056 *
2057 * Return: The deleted entry, or %NULL if it was not present.
2058 */
2059 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
2060 {
2061 return radix_tree_delete_item(root, index, NULL);
2062 }
2063 EXPORT_SYMBOL(radix_tree_delete);
2064
2065 void radix_tree_clear_tags(struct radix_tree_root *root,
2066 struct radix_tree_node *node,
2067 void **slot)
2068 {
2069 if (node) {
2070 unsigned int tag, offset = get_slot_offset(node, slot);
2071 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
2072 node_tag_clear(root, node, tag, offset);
2073 } else {
2074 root_tag_clear_all(root);
2075 }
2076 }
2077
2078 /**
2079 * radix_tree_tagged - test whether any items in the tree are tagged
2080 * @root: radix tree root
2081 * @tag: tag to test
2082 */
2083 int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
2084 {
2085 return root_tag_get(root, tag);
2086 }
2087 EXPORT_SYMBOL(radix_tree_tagged);
2088
2089 /**
2090 * idr_preload - preload for idr_alloc()
2091 * @gfp_mask: allocation mask to use for preloading
2092 *
2093 * Preallocate memory to use for the next call to idr_alloc(). This function
2094 * returns with preemption disabled. It will be enabled by idr_preload_end().
2095 */
2096 void idr_preload(gfp_t gfp_mask)
2097 {
2098 __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
2099 }
2100 EXPORT_SYMBOL(idr_preload);
2101
2102 /**
2103 * ida_pre_get - reserve resources for ida allocation
2104 * @ida: ida handle
2105 * @gfp: memory allocation flags
2106 *
2107 * This function should be called before calling ida_get_new_above(). If it
2108 * is unable to allocate memory, it will return %0. On success, it returns %1.
2109 */
2110 int ida_pre_get(struct ida *ida, gfp_t gfp)
2111 {
2112 __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
2113 /*
2114 * The IDA API has no preload_end() equivalent. Instead,
2115 * ida_get_new() can return -EAGAIN, prompting the caller
2116 * to return to the ida_pre_get() step.
2117 */
2118 preempt_enable();
2119
2120 if (!this_cpu_read(ida_bitmap)) {
2121 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
2122 if (!bitmap)
2123 return 0;
2124 bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
2125 kfree(bitmap);
2126 }
2127
2128 return 1;
2129 }
2130 EXPORT_SYMBOL(ida_pre_get);
2131
2132 void **idr_get_free(struct radix_tree_root *root,
2133 struct radix_tree_iter *iter, gfp_t gfp, int end)
2134 {
2135 struct radix_tree_node *node = NULL, *child;
2136 void **slot = (void **)&root->rnode;
2137 unsigned long maxindex, start = iter->next_index;
2138 unsigned long max = end > 0 ? end - 1 : INT_MAX;
2139 unsigned int shift, offset = 0;
2140
2141 grow:
2142 shift = radix_tree_load_root(root, &child, &maxindex);
2143 if (!radix_tree_tagged(root, IDR_FREE))
2144 start = max(start, maxindex + 1);
2145 if (start > max)
2146 return ERR_PTR(-ENOSPC);
2147
2148 if (start > maxindex) {
2149 int error = radix_tree_extend(root, gfp, start, shift);
2150 if (error < 0)
2151 return ERR_PTR(error);
2152 shift = error;
2153 child = rcu_dereference_raw(root->rnode);
2154 }
2155
2156 while (shift) {
2157 shift -= RADIX_TREE_MAP_SHIFT;
2158 if (child == NULL) {
2159 /* Have to add a child node. */
2160 child = radix_tree_node_alloc(gfp, node, root, shift,
2161 offset, 0, 0);
2162 if (!child)
2163 return ERR_PTR(-ENOMEM);
2164 all_tag_set(child, IDR_FREE);
2165 rcu_assign_pointer(*slot, node_to_entry(child));
2166 if (node)
2167 node->count++;
2168 } else if (!radix_tree_is_internal_node(child))
2169 break;
2170
2171 node = entry_to_node(child);
2172 offset = radix_tree_descend(node, &child, start);
2173 if (!tag_get(node, IDR_FREE, offset)) {
2174 offset = radix_tree_find_next_bit(node, IDR_FREE,
2175 offset + 1);
2176 start = next_index(start, node, offset);
2177 if (start > max)
2178 return ERR_PTR(-ENOSPC);
2179 while (offset == RADIX_TREE_MAP_SIZE) {
2180 offset = node->offset + 1;
2181 node = node->parent;
2182 if (!node)
2183 goto grow;
2184 shift = node->shift;
2185 }
2186 child = rcu_dereference_raw(node->slots[offset]);
2187 }
2188 slot = &node->slots[offset];
2189 }
2190
2191 iter->index = start;
2192 if (node)
2193 iter->next_index = 1 + min(max, (start | node_maxindex(node)));
2194 else
2195 iter->next_index = 1;
2196 iter->node = node;
2197 __set_iter_shift(iter, shift);
2198 set_iter_tags(iter, node, offset, IDR_FREE);
2199
2200 return slot;
2201 }
2202
2203 /**
2204 * idr_destroy - release all internal memory from an IDR
2205 * @idr: idr handle
2206 *
2207 * After this function is called, the IDR is empty, and may be reused or
2208 * the data structure containing it may be freed.
2209 *
2210 * A typical clean-up sequence for objects stored in an idr tree will use
2211 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
2212 * free the memory used to keep track of those objects.
2213 */
2214 void idr_destroy(struct idr *idr)
2215 {
2216 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
2217 if (radix_tree_is_internal_node(node))
2218 radix_tree_free_nodes(node);
2219 idr->idr_rt.rnode = NULL;
2220 root_tag_set(&idr->idr_rt, IDR_FREE);
2221 }
2222 EXPORT_SYMBOL(idr_destroy);
2223
2224 static void
2225 radix_tree_node_ctor(void *arg)
2226 {
2227 struct radix_tree_node *node = arg;
2228
2229 memset(node, 0, sizeof(*node));
2230 INIT_LIST_HEAD(&node->private_list);
2231 }
2232
2233 static __init unsigned long __maxindex(unsigned int height)
2234 {
2235 unsigned int width = height * RADIX_TREE_MAP_SHIFT;
2236 int shift = RADIX_TREE_INDEX_BITS - width;
2237
2238 if (shift < 0)
2239 return ~0UL;
2240 if (shift >= BITS_PER_LONG)
2241 return 0UL;
2242 return ~0UL >> shift;
2243 }
2244
2245 static __init void radix_tree_init_maxnodes(void)
2246 {
2247 unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1];
2248 unsigned int i, j;
2249
2250 for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
2251 height_to_maxindex[i] = __maxindex(i);
2252 for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) {
2253 for (j = i; j > 0; j--)
2254 height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1;
2255 }
2256 }
2257
2258 static int radix_tree_cpu_dead(unsigned int cpu)
2259 {
2260 struct radix_tree_preload *rtp;
2261 struct radix_tree_node *node;
2262
2263 /* Free per-cpu pool of preloaded nodes */
2264 rtp = &per_cpu(radix_tree_preloads, cpu);
2265 while (rtp->nr) {
2266 node = rtp->nodes;
2267 rtp->nodes = node->parent;
2268 kmem_cache_free(radix_tree_node_cachep, node);
2269 rtp->nr--;
2270 }
2271 kfree(per_cpu(ida_bitmap, cpu));
2272 per_cpu(ida_bitmap, cpu) = NULL;
2273 return 0;
2274 }
2275
2276 void __init radix_tree_init(void)
2277 {
2278 int ret;
2279 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
2280 sizeof(struct radix_tree_node), 0,
2281 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
2282 radix_tree_node_ctor);
2283 radix_tree_init_maxnodes();
2284 ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
2285 NULL, radix_tree_cpu_dead);
2286 WARN_ON(ret < 0);
2287 }