2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
7 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/bitmap.h>
26 #include <linux/bitops.h>
27 #include <linux/cpu.h>
28 #include <linux/errno.h>
29 #include <linux/export.h>
30 #include <linux/idr.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/kmemleak.h>
34 #include <linux/percpu.h>
35 #include <linux/preempt.h> /* in_interrupt() */
36 #include <linux/radix-tree.h>
37 #include <linux/rcupdate.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
42 /* Number of nodes in fully populated tree of given height */
43 static unsigned long height_to_maxnodes
[RADIX_TREE_MAX_PATH
+ 1] __read_mostly
;
46 * Radix tree node cache.
48 static struct kmem_cache
*radix_tree_node_cachep
;
51 * The radix tree is variable-height, so an insert operation not only has
52 * to build the branch to its corresponding item, it also has to build the
53 * branch to existing items if the size has to be increased (by
56 * The worst case is a zero height tree with just a single item at index 0,
57 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
58 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
61 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
64 * The IDR does not have to be as high as the radix tree since it uses
65 * signed integers, not unsigned longs.
67 #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
68 #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
69 RADIX_TREE_MAP_SHIFT))
70 #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
73 * The IDA is even shorter since it uses a bitmap at the last level.
75 #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
76 #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
77 RADIX_TREE_MAP_SHIFT))
78 #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
81 * Per-cpu pool of preloaded nodes
83 struct radix_tree_preload
{
85 /* nodes->parent points to next preallocated node */
86 struct radix_tree_node
*nodes
;
88 static DEFINE_PER_CPU(struct radix_tree_preload
, radix_tree_preloads
) = { 0, };
90 static inline struct radix_tree_node
*entry_to_node(void *ptr
)
92 return (void *)((unsigned long)ptr
& ~RADIX_TREE_INTERNAL_NODE
);
95 static inline void *node_to_entry(void *ptr
)
97 return (void *)((unsigned long)ptr
| RADIX_TREE_INTERNAL_NODE
);
100 #define RADIX_TREE_RETRY node_to_entry(NULL)
102 #ifdef CONFIG_RADIX_TREE_MULTIORDER
103 /* Sibling slots point directly to another slot in the same node */
105 bool is_sibling_entry(const struct radix_tree_node
*parent
, void *node
)
108 return (parent
->slots
<= ptr
) &&
109 (ptr
< parent
->slots
+ RADIX_TREE_MAP_SIZE
);
113 bool is_sibling_entry(const struct radix_tree_node
*parent
, void *node
)
120 unsigned long get_slot_offset(const struct radix_tree_node
*parent
, void **slot
)
122 return slot
- parent
->slots
;
125 static unsigned int radix_tree_descend(const struct radix_tree_node
*parent
,
126 struct radix_tree_node
**nodep
, unsigned long index
)
128 unsigned int offset
= (index
>> parent
->shift
) & RADIX_TREE_MAP_MASK
;
129 void **entry
= rcu_dereference_raw(parent
->slots
[offset
]);
131 #ifdef CONFIG_RADIX_TREE_MULTIORDER
132 if (radix_tree_is_internal_node(entry
)) {
133 if (is_sibling_entry(parent
, entry
)) {
134 void **sibentry
= (void **) entry_to_node(entry
);
135 offset
= get_slot_offset(parent
, sibentry
);
136 entry
= rcu_dereference_raw(*sibentry
);
141 *nodep
= (void *)entry
;
145 static inline gfp_t
root_gfp_mask(const struct radix_tree_root
*root
)
147 return root
->gfp_mask
& __GFP_BITS_MASK
;
150 static inline void tag_set(struct radix_tree_node
*node
, unsigned int tag
,
153 __set_bit(offset
, node
->tags
[tag
]);
156 static inline void tag_clear(struct radix_tree_node
*node
, unsigned int tag
,
159 __clear_bit(offset
, node
->tags
[tag
]);
162 static inline int tag_get(const struct radix_tree_node
*node
, unsigned int tag
,
165 return test_bit(offset
, node
->tags
[tag
]);
168 static inline void root_tag_set(struct radix_tree_root
*root
, unsigned tag
)
170 root
->gfp_mask
|= (__force gfp_t
)(1 << (tag
+ ROOT_TAG_SHIFT
));
173 static inline void root_tag_clear(struct radix_tree_root
*root
, unsigned tag
)
175 root
->gfp_mask
&= (__force gfp_t
)~(1 << (tag
+ ROOT_TAG_SHIFT
));
178 static inline void root_tag_clear_all(struct radix_tree_root
*root
)
180 root
->gfp_mask
&= (1 << ROOT_TAG_SHIFT
) - 1;
183 static inline int root_tag_get(const struct radix_tree_root
*root
, unsigned tag
)
185 return (__force
int)root
->gfp_mask
& (1 << (tag
+ ROOT_TAG_SHIFT
));
188 static inline unsigned root_tags_get(const struct radix_tree_root
*root
)
190 return (__force
unsigned)root
->gfp_mask
>> ROOT_TAG_SHIFT
;
193 static inline bool is_idr(const struct radix_tree_root
*root
)
195 return !!(root
->gfp_mask
& ROOT_IS_IDR
);
199 * Returns 1 if any slot in the node has this tag set.
200 * Otherwise returns 0.
202 static inline int any_tag_set(const struct radix_tree_node
*node
,
206 for (idx
= 0; idx
< RADIX_TREE_TAG_LONGS
; idx
++) {
207 if (node
->tags
[tag
][idx
])
213 static inline void all_tag_set(struct radix_tree_node
*node
, unsigned int tag
)
215 bitmap_fill(node
->tags
[tag
], RADIX_TREE_MAP_SIZE
);
219 * radix_tree_find_next_bit - find the next set bit in a memory region
221 * @addr: The address to base the search on
222 * @size: The bitmap size in bits
223 * @offset: The bitnumber to start searching at
225 * Unrollable variant of find_next_bit() for constant size arrays.
226 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
227 * Returns next bit offset, or size if nothing found.
229 static __always_inline
unsigned long
230 radix_tree_find_next_bit(struct radix_tree_node
*node
, unsigned int tag
,
231 unsigned long offset
)
233 const unsigned long *addr
= node
->tags
[tag
];
235 if (offset
< RADIX_TREE_MAP_SIZE
) {
238 addr
+= offset
/ BITS_PER_LONG
;
239 tmp
= *addr
>> (offset
% BITS_PER_LONG
);
241 return __ffs(tmp
) + offset
;
242 offset
= (offset
+ BITS_PER_LONG
) & ~(BITS_PER_LONG
- 1);
243 while (offset
< RADIX_TREE_MAP_SIZE
) {
246 return __ffs(tmp
) + offset
;
247 offset
+= BITS_PER_LONG
;
250 return RADIX_TREE_MAP_SIZE
;
253 static unsigned int iter_offset(const struct radix_tree_iter
*iter
)
255 return (iter
->index
>> iter_shift(iter
)) & RADIX_TREE_MAP_MASK
;
259 * The maximum index which can be stored in a radix tree
261 static inline unsigned long shift_maxindex(unsigned int shift
)
263 return (RADIX_TREE_MAP_SIZE
<< shift
) - 1;
266 static inline unsigned long node_maxindex(const struct radix_tree_node
*node
)
268 return shift_maxindex(node
->shift
);
271 static unsigned long next_index(unsigned long index
,
272 const struct radix_tree_node
*node
,
273 unsigned long offset
)
275 return (index
& ~node_maxindex(node
)) + (offset
<< node
->shift
);
279 static void dump_node(struct radix_tree_node
*node
, unsigned long index
)
283 pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
284 node
, node
->offset
, index
, index
| node_maxindex(node
),
286 node
->tags
[0][0], node
->tags
[1][0], node
->tags
[2][0],
287 node
->shift
, node
->count
, node
->exceptional
);
289 for (i
= 0; i
< RADIX_TREE_MAP_SIZE
; i
++) {
290 unsigned long first
= index
| (i
<< node
->shift
);
291 unsigned long last
= first
| ((1UL << node
->shift
) - 1);
292 void *entry
= node
->slots
[i
];
295 if (entry
== RADIX_TREE_RETRY
) {
296 pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
297 i
, first
, last
, node
);
298 } else if (!radix_tree_is_internal_node(entry
)) {
299 pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
300 entry
, i
, first
, last
, node
);
301 } else if (is_sibling_entry(node
, entry
)) {
302 pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
303 entry
, i
, first
, last
, node
,
304 *(void **)entry_to_node(entry
));
306 dump_node(entry_to_node(entry
), first
);
312 static void radix_tree_dump(struct radix_tree_root
*root
)
314 pr_debug("radix root: %p rnode %p tags %x\n",
316 root
->gfp_mask
>> ROOT_TAG_SHIFT
);
317 if (!radix_tree_is_internal_node(root
->rnode
))
319 dump_node(entry_to_node(root
->rnode
), 0);
322 static void dump_ida_node(void *entry
, unsigned long index
)
329 if (radix_tree_is_internal_node(entry
)) {
330 struct radix_tree_node
*node
= entry_to_node(entry
);
332 pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
333 node
, node
->offset
, index
* IDA_BITMAP_BITS
,
334 ((index
| node_maxindex(node
)) + 1) *
336 node
->parent
, node
->tags
[0][0], node
->shift
,
338 for (i
= 0; i
< RADIX_TREE_MAP_SIZE
; i
++)
339 dump_ida_node(node
->slots
[i
],
340 index
| (i
<< node
->shift
));
341 } else if (radix_tree_exceptional_entry(entry
)) {
342 pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
343 entry
, (int)(index
& RADIX_TREE_MAP_MASK
),
344 index
* IDA_BITMAP_BITS
,
345 index
* IDA_BITMAP_BITS
+ BITS_PER_LONG
-
346 RADIX_TREE_EXCEPTIONAL_SHIFT
,
347 (unsigned long)entry
>>
348 RADIX_TREE_EXCEPTIONAL_SHIFT
);
350 struct ida_bitmap
*bitmap
= entry
;
352 pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap
,
353 (int)(index
& RADIX_TREE_MAP_MASK
),
354 index
* IDA_BITMAP_BITS
,
355 (index
+ 1) * IDA_BITMAP_BITS
- 1);
356 for (i
= 0; i
< IDA_BITMAP_LONGS
; i
++)
357 pr_cont(" %lx", bitmap
->bitmap
[i
]);
362 static void ida_dump(struct ida
*ida
)
364 struct radix_tree_root
*root
= &ida
->ida_rt
;
365 pr_debug("ida: %p node %p free %d\n", ida
, root
->rnode
,
366 root
->gfp_mask
>> ROOT_TAG_SHIFT
);
367 dump_ida_node(root
->rnode
, 0);
372 * This assumes that the caller has performed appropriate preallocation, and
373 * that the caller has pinned this thread of control to the current CPU.
375 static struct radix_tree_node
*
376 radix_tree_node_alloc(gfp_t gfp_mask
, struct radix_tree_node
*parent
,
377 struct radix_tree_root
*root
,
378 unsigned int shift
, unsigned int offset
,
379 unsigned int count
, unsigned int exceptional
)
381 struct radix_tree_node
*ret
= NULL
;
384 * Preload code isn't irq safe and it doesn't make sense to use
385 * preloading during an interrupt anyway as all the allocations have
386 * to be atomic. So just do normal allocation when in interrupt.
388 if (!gfpflags_allow_blocking(gfp_mask
) && !in_interrupt()) {
389 struct radix_tree_preload
*rtp
;
392 * Even if the caller has preloaded, try to allocate from the
393 * cache first for the new node to get accounted to the memory
396 ret
= kmem_cache_alloc(radix_tree_node_cachep
,
397 gfp_mask
| __GFP_NOWARN
);
402 * Provided the caller has preloaded here, we will always
403 * succeed in getting a node here (and never reach
406 rtp
= this_cpu_ptr(&radix_tree_preloads
);
409 rtp
->nodes
= ret
->parent
;
413 * Update the allocation stack trace as this is more useful
416 kmemleak_update_trace(ret
);
419 ret
= kmem_cache_alloc(radix_tree_node_cachep
, gfp_mask
);
421 BUG_ON(radix_tree_is_internal_node(ret
));
424 ret
->offset
= offset
;
426 ret
->exceptional
= exceptional
;
427 ret
->parent
= parent
;
433 static void radix_tree_node_rcu_free(struct rcu_head
*head
)
435 struct radix_tree_node
*node
=
436 container_of(head
, struct radix_tree_node
, rcu_head
);
439 * Must only free zeroed nodes into the slab. We can be left with
440 * non-NULL entries by radix_tree_free_nodes, so clear the entries
443 memset(node
->slots
, 0, sizeof(node
->slots
));
444 memset(node
->tags
, 0, sizeof(node
->tags
));
445 INIT_LIST_HEAD(&node
->private_list
);
447 kmem_cache_free(radix_tree_node_cachep
, node
);
451 radix_tree_node_free(struct radix_tree_node
*node
)
453 call_rcu(&node
->rcu_head
, radix_tree_node_rcu_free
);
457 * Load up this CPU's radix_tree_node buffer with sufficient objects to
458 * ensure that the addition of a single element in the tree cannot fail. On
459 * success, return zero, with preemption disabled. On error, return -ENOMEM
460 * with preemption not disabled.
462 * To make use of this facility, the radix tree must be initialised without
463 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
465 static int __radix_tree_preload(gfp_t gfp_mask
, unsigned nr
)
467 struct radix_tree_preload
*rtp
;
468 struct radix_tree_node
*node
;
472 * Nodes preloaded by one cgroup can be be used by another cgroup, so
473 * they should never be accounted to any particular memory cgroup.
475 gfp_mask
&= ~__GFP_ACCOUNT
;
478 rtp
= this_cpu_ptr(&radix_tree_preloads
);
479 while (rtp
->nr
< nr
) {
481 node
= kmem_cache_alloc(radix_tree_node_cachep
, gfp_mask
);
485 rtp
= this_cpu_ptr(&radix_tree_preloads
);
487 node
->parent
= rtp
->nodes
;
491 kmem_cache_free(radix_tree_node_cachep
, node
);
500 * Load up this CPU's radix_tree_node buffer with sufficient objects to
501 * ensure that the addition of a single element in the tree cannot fail. On
502 * success, return zero, with preemption disabled. On error, return -ENOMEM
503 * with preemption not disabled.
505 * To make use of this facility, the radix tree must be initialised without
506 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
508 int radix_tree_preload(gfp_t gfp_mask
)
510 /* Warn on non-sensical use... */
511 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask
));
512 return __radix_tree_preload(gfp_mask
, RADIX_TREE_PRELOAD_SIZE
);
514 EXPORT_SYMBOL(radix_tree_preload
);
517 * The same as above function, except we don't guarantee preloading happens.
518 * We do it, if we decide it helps. On success, return zero with preemption
519 * disabled. On error, return -ENOMEM with preemption not disabled.
521 int radix_tree_maybe_preload(gfp_t gfp_mask
)
523 if (gfpflags_allow_blocking(gfp_mask
))
524 return __radix_tree_preload(gfp_mask
, RADIX_TREE_PRELOAD_SIZE
);
525 /* Preloading doesn't help anything with this gfp mask, skip it */
529 EXPORT_SYMBOL(radix_tree_maybe_preload
);
531 #ifdef CONFIG_RADIX_TREE_MULTIORDER
533 * Preload with enough objects to ensure that we can split a single entry
534 * of order @old_order into many entries of size @new_order
536 int radix_tree_split_preload(unsigned int old_order
, unsigned int new_order
,
539 unsigned top
= 1 << (old_order
% RADIX_TREE_MAP_SHIFT
);
540 unsigned layers
= (old_order
/ RADIX_TREE_MAP_SHIFT
) -
541 (new_order
/ RADIX_TREE_MAP_SHIFT
);
544 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask
));
545 BUG_ON(new_order
>= old_order
);
548 nr
= nr
* RADIX_TREE_MAP_SIZE
+ 1;
549 return __radix_tree_preload(gfp_mask
, top
* nr
);
554 * The same as function above, but preload number of nodes required to insert
555 * (1 << order) continuous naturally-aligned elements.
557 int radix_tree_maybe_preload_order(gfp_t gfp_mask
, int order
)
559 unsigned long nr_subtrees
;
560 int nr_nodes
, subtree_height
;
562 /* Preloading doesn't help anything with this gfp mask, skip it */
563 if (!gfpflags_allow_blocking(gfp_mask
)) {
569 * Calculate number and height of fully populated subtrees it takes to
570 * store (1 << order) elements.
572 nr_subtrees
= 1 << order
;
573 for (subtree_height
= 0; nr_subtrees
> RADIX_TREE_MAP_SIZE
;
575 nr_subtrees
>>= RADIX_TREE_MAP_SHIFT
;
578 * The worst case is zero height tree with a single item at index 0 and
579 * then inserting items starting at ULONG_MAX - (1 << order).
581 * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
584 nr_nodes
= RADIX_TREE_MAX_PATH
;
586 /* Plus branch to fully populated subtrees. */
587 nr_nodes
+= RADIX_TREE_MAX_PATH
- subtree_height
;
589 /* Root node is shared. */
592 /* Plus nodes required to build subtrees. */
593 nr_nodes
+= nr_subtrees
* height_to_maxnodes
[subtree_height
];
595 return __radix_tree_preload(gfp_mask
, nr_nodes
);
598 static unsigned radix_tree_load_root(const struct radix_tree_root
*root
,
599 struct radix_tree_node
**nodep
, unsigned long *maxindex
)
601 struct radix_tree_node
*node
= rcu_dereference_raw(root
->rnode
);
605 if (likely(radix_tree_is_internal_node(node
))) {
606 node
= entry_to_node(node
);
607 *maxindex
= node_maxindex(node
);
608 return node
->shift
+ RADIX_TREE_MAP_SHIFT
;
616 * Extend a radix tree so it can store key @index.
618 static int radix_tree_extend(struct radix_tree_root
*root
, gfp_t gfp
,
619 unsigned long index
, unsigned int shift
)
621 struct radix_tree_node
*slot
;
622 unsigned int maxshift
;
625 /* Figure out what the shift should be. */
627 while (index
> shift_maxindex(maxshift
))
628 maxshift
+= RADIX_TREE_MAP_SHIFT
;
630 slot
= rcu_dereference_raw(root
->rnode
);
631 if (!slot
&& (!is_idr(root
) || root_tag_get(root
, IDR_FREE
)))
635 struct radix_tree_node
*node
= radix_tree_node_alloc(gfp
, NULL
,
636 root
, shift
, 0, 1, 0);
641 all_tag_set(node
, IDR_FREE
);
642 if (!root_tag_get(root
, IDR_FREE
)) {
643 tag_clear(node
, IDR_FREE
, 0);
644 root_tag_set(root
, IDR_FREE
);
647 /* Propagate the aggregated tag info to the new child */
648 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++) {
649 if (root_tag_get(root
, tag
))
650 tag_set(node
, tag
, 0);
654 BUG_ON(shift
> BITS_PER_LONG
);
655 if (radix_tree_is_internal_node(slot
)) {
656 entry_to_node(slot
)->parent
= node
;
657 } else if (radix_tree_exceptional_entry(slot
)) {
658 /* Moving an exceptional root->rnode to a node */
659 node
->exceptional
= 1;
661 node
->slots
[0] = slot
;
662 slot
= node_to_entry(node
);
663 rcu_assign_pointer(root
->rnode
, slot
);
664 shift
+= RADIX_TREE_MAP_SHIFT
;
665 } while (shift
<= maxshift
);
667 return maxshift
+ RADIX_TREE_MAP_SHIFT
;
671 * radix_tree_shrink - shrink radix tree to minimum height
672 * @root radix tree root
674 static inline bool radix_tree_shrink(struct radix_tree_root
*root
,
675 radix_tree_update_node_t update_node
,
681 struct radix_tree_node
*node
= rcu_dereference_raw(root
->rnode
);
682 struct radix_tree_node
*child
;
684 if (!radix_tree_is_internal_node(node
))
686 node
= entry_to_node(node
);
689 * The candidate node has more than one child, or its child
690 * is not at the leftmost slot, or the child is a multiorder
691 * entry, we cannot shrink.
693 if (node
->count
!= 1)
695 child
= rcu_dereference_raw(node
->slots
[0]);
698 if (!radix_tree_is_internal_node(child
) && node
->shift
)
701 if (radix_tree_is_internal_node(child
))
702 entry_to_node(child
)->parent
= NULL
;
705 * We don't need rcu_assign_pointer(), since we are simply
706 * moving the node from one part of the tree to another: if it
707 * was safe to dereference the old pointer to it
708 * (node->slots[0]), it will be safe to dereference the new
709 * one (root->rnode) as far as dependent read barriers go.
712 if (is_idr(root
) && !tag_get(node
, IDR_FREE
, 0))
713 root_tag_clear(root
, IDR_FREE
);
716 * We have a dilemma here. The node's slot[0] must not be
717 * NULLed in case there are concurrent lookups expecting to
718 * find the item. However if this was a bottom-level node,
719 * then it may be subject to the slot pointer being visible
720 * to callers dereferencing it. If item corresponding to
721 * slot[0] is subsequently deleted, these callers would expect
722 * their slot to become empty sooner or later.
724 * For example, lockless pagecache will look up a slot, deref
725 * the page pointer, and if the page has 0 refcount it means it
726 * was concurrently deleted from pagecache so try the deref
727 * again. Fortunately there is already a requirement for logic
728 * to retry the entire slot lookup -- the indirect pointer
729 * problem (replacing direct root node with an indirect pointer
730 * also results in a stale slot). So tag the slot as indirect
731 * to force callers to retry.
734 if (!radix_tree_is_internal_node(child
)) {
735 node
->slots
[0] = RADIX_TREE_RETRY
;
737 update_node(node
, private);
740 WARN_ON_ONCE(!list_empty(&node
->private_list
));
741 radix_tree_node_free(node
);
748 static bool delete_node(struct radix_tree_root
*root
,
749 struct radix_tree_node
*node
,
750 radix_tree_update_node_t update_node
, void *private)
752 bool deleted
= false;
755 struct radix_tree_node
*parent
;
758 if (node_to_entry(node
) ==
759 rcu_dereference_raw(root
->rnode
))
760 deleted
|= radix_tree_shrink(root
, update_node
,
765 parent
= node
->parent
;
767 parent
->slots
[node
->offset
] = NULL
;
771 * Shouldn't the tags already have all been cleared
775 root_tag_clear_all(root
);
779 WARN_ON_ONCE(!list_empty(&node
->private_list
));
780 radix_tree_node_free(node
);
790 * __radix_tree_create - create a slot in a radix tree
791 * @root: radix tree root
793 * @order: index occupies 2^order aligned slots
794 * @nodep: returns node
795 * @slotp: returns slot
797 * Create, if necessary, and return the node and slot for an item
798 * at position @index in the radix tree @root.
800 * Until there is more than one item in the tree, no nodes are
801 * allocated and @root->rnode is used as a direct slot instead of
802 * pointing to a node, in which case *@nodep will be NULL.
804 * Returns -ENOMEM, or 0 for success.
806 int __radix_tree_create(struct radix_tree_root
*root
, unsigned long index
,
807 unsigned order
, struct radix_tree_node
**nodep
,
810 struct radix_tree_node
*node
= NULL
, *child
;
811 void **slot
= (void **)&root
->rnode
;
812 unsigned long maxindex
;
813 unsigned int shift
, offset
= 0;
814 unsigned long max
= index
| ((1UL << order
) - 1);
815 gfp_t gfp
= root_gfp_mask(root
);
817 shift
= radix_tree_load_root(root
, &child
, &maxindex
);
819 /* Make sure the tree is high enough. */
820 if (order
> 0 && max
== ((1UL << order
) - 1))
822 if (max
> maxindex
) {
823 int error
= radix_tree_extend(root
, gfp
, max
, shift
);
827 child
= rcu_dereference_raw(root
->rnode
);
830 while (shift
> order
) {
831 shift
-= RADIX_TREE_MAP_SHIFT
;
833 /* Have to add a child node. */
834 child
= radix_tree_node_alloc(gfp
, node
, root
, shift
,
838 rcu_assign_pointer(*slot
, node_to_entry(child
));
841 } else if (!radix_tree_is_internal_node(child
))
844 /* Go a level down */
845 node
= entry_to_node(child
);
846 offset
= radix_tree_descend(node
, &child
, index
);
847 slot
= &node
->slots
[offset
];
858 * Free any nodes below this node. The tree is presumed to not need
859 * shrinking, and any user data in the tree is presumed to not need a
860 * destructor called on it. If we need to add a destructor, we can
861 * add that functionality later. Note that we may not clear tags or
862 * slots from the tree as an RCU walker may still have a pointer into
863 * this subtree. We could replace the entries with RADIX_TREE_RETRY,
864 * but we'll still have to clear those in rcu_free.
866 static void radix_tree_free_nodes(struct radix_tree_node
*node
)
869 struct radix_tree_node
*child
= entry_to_node(node
);
872 void *entry
= rcu_dereference_raw(child
->slots
[offset
]);
873 if (radix_tree_is_internal_node(entry
) &&
874 !is_sibling_entry(child
, entry
)) {
875 child
= entry_to_node(entry
);
880 while (offset
== RADIX_TREE_MAP_SIZE
) {
881 struct radix_tree_node
*old
= child
;
882 offset
= child
->offset
+ 1;
883 child
= child
->parent
;
884 WARN_ON_ONCE(!list_empty(&old
->private_list
));
885 radix_tree_node_free(old
);
886 if (old
== entry_to_node(node
))
892 #ifdef CONFIG_RADIX_TREE_MULTIORDER
893 static inline int insert_entries(struct radix_tree_node
*node
, void **slot
,
894 void *item
, unsigned order
, bool replace
)
896 struct radix_tree_node
*child
;
897 unsigned i
, n
, tag
, offset
, tags
= 0;
900 if (order
> node
->shift
)
901 n
= 1 << (order
- node
->shift
);
904 offset
= get_slot_offset(node
, slot
);
911 offset
= offset
& ~(n
- 1);
912 slot
= &node
->slots
[offset
];
914 child
= node_to_entry(slot
);
916 for (i
= 0; i
< n
; i
++) {
920 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
921 if (tag_get(node
, tag
, offset
+ i
))
928 for (i
= 0; i
< n
; i
++) {
929 struct radix_tree_node
*old
= rcu_dereference_raw(slot
[i
]);
931 rcu_assign_pointer(slot
[i
], child
);
932 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
933 if (tags
& (1 << tag
))
934 tag_clear(node
, tag
, offset
+ i
);
936 rcu_assign_pointer(slot
[i
], item
);
937 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
938 if (tags
& (1 << tag
))
939 tag_set(node
, tag
, offset
);
941 if (radix_tree_is_internal_node(old
) &&
942 !is_sibling_entry(node
, old
) &&
943 (old
!= RADIX_TREE_RETRY
))
944 radix_tree_free_nodes(old
);
945 if (radix_tree_exceptional_entry(old
))
950 if (radix_tree_exceptional_entry(item
))
951 node
->exceptional
+= n
;
956 static inline int insert_entries(struct radix_tree_node
*node
, void **slot
,
957 void *item
, unsigned order
, bool replace
)
961 rcu_assign_pointer(*slot
, item
);
964 if (radix_tree_exceptional_entry(item
))
972 * __radix_tree_insert - insert into a radix tree
973 * @root: radix tree root
975 * @order: key covers the 2^order indices around index
976 * @item: item to insert
978 * Insert an item into the radix tree at position @index.
980 int __radix_tree_insert(struct radix_tree_root
*root
, unsigned long index
,
981 unsigned order
, void *item
)
983 struct radix_tree_node
*node
;
987 BUG_ON(radix_tree_is_internal_node(item
));
989 error
= __radix_tree_create(root
, index
, order
, &node
, &slot
);
993 error
= insert_entries(node
, slot
, item
, order
, false);
998 unsigned offset
= get_slot_offset(node
, slot
);
999 BUG_ON(tag_get(node
, 0, offset
));
1000 BUG_ON(tag_get(node
, 1, offset
));
1001 BUG_ON(tag_get(node
, 2, offset
));
1003 BUG_ON(root_tags_get(root
));
1008 EXPORT_SYMBOL(__radix_tree_insert
);
1011 * __radix_tree_lookup - lookup an item in a radix tree
1012 * @root: radix tree root
1014 * @nodep: returns node
1015 * @slotp: returns slot
1017 * Lookup and return the item at position @index in the radix
1020 * Until there is more than one item in the tree, no nodes are
1021 * allocated and @root->rnode is used as a direct slot instead of
1022 * pointing to a node, in which case *@nodep will be NULL.
1024 void *__radix_tree_lookup(const struct radix_tree_root
*root
,
1025 unsigned long index
, struct radix_tree_node
**nodep
,
1028 struct radix_tree_node
*node
, *parent
;
1029 unsigned long maxindex
;
1034 slot
= (void **)&root
->rnode
;
1035 radix_tree_load_root(root
, &node
, &maxindex
);
1036 if (index
> maxindex
)
1039 while (radix_tree_is_internal_node(node
)) {
1042 if (node
== RADIX_TREE_RETRY
)
1044 parent
= entry_to_node(node
);
1045 offset
= radix_tree_descend(parent
, &node
, index
);
1046 slot
= parent
->slots
+ offset
;
1057 * radix_tree_lookup_slot - lookup a slot in a radix tree
1058 * @root: radix tree root
1061 * Returns: the slot corresponding to the position @index in the
1062 * radix tree @root. This is useful for update-if-exists operations.
1064 * This function can be called under rcu_read_lock iff the slot is not
1065 * modified by radix_tree_replace_slot, otherwise it must be called
1066 * exclusive from other writers. Any dereference of the slot must be done
1067 * using radix_tree_deref_slot.
1069 void **radix_tree_lookup_slot(const struct radix_tree_root
*root
,
1070 unsigned long index
)
1074 if (!__radix_tree_lookup(root
, index
, NULL
, &slot
))
1078 EXPORT_SYMBOL(radix_tree_lookup_slot
);
1081 * radix_tree_lookup - perform lookup operation on a radix tree
1082 * @root: radix tree root
1085 * Lookup the item at the position @index in the radix tree @root.
1087 * This function can be called under rcu_read_lock, however the caller
1088 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
1089 * them safely). No RCU barriers are required to access or modify the
1090 * returned item, however.
1092 void *radix_tree_lookup(const struct radix_tree_root
*root
, unsigned long index
)
1094 return __radix_tree_lookup(root
, index
, NULL
, NULL
);
1096 EXPORT_SYMBOL(radix_tree_lookup
);
1098 static inline void replace_sibling_entries(struct radix_tree_node
*node
,
1099 void **slot
, int count
, int exceptional
)
1101 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1102 void *ptr
= node_to_entry(slot
);
1103 unsigned offset
= get_slot_offset(node
, slot
) + 1;
1105 while (offset
< RADIX_TREE_MAP_SIZE
) {
1106 if (rcu_dereference_raw(node
->slots
[offset
]) != ptr
)
1109 node
->slots
[offset
] = NULL
;
1112 node
->exceptional
+= exceptional
;
1118 static void replace_slot(void **slot
, void *item
, struct radix_tree_node
*node
,
1119 int count
, int exceptional
)
1121 if (WARN_ON_ONCE(radix_tree_is_internal_node(item
)))
1124 if (node
&& (count
|| exceptional
)) {
1125 node
->count
+= count
;
1126 node
->exceptional
+= exceptional
;
1127 replace_sibling_entries(node
, slot
, count
, exceptional
);
1130 rcu_assign_pointer(*slot
, item
);
1133 static bool node_tag_get(const struct radix_tree_root
*root
,
1134 const struct radix_tree_node
*node
,
1135 unsigned int tag
, unsigned int offset
)
1138 return tag_get(node
, tag
, offset
);
1139 return root_tag_get(root
, tag
);
1143 * IDR users want to be able to store NULL in the tree, so if the slot isn't
1144 * free, don't adjust the count, even if it's transitioning between NULL and
1145 * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
1146 * have empty bits, but it only stores NULL in slots when they're being
1149 static int calculate_count(struct radix_tree_root
*root
,
1150 struct radix_tree_node
*node
, void **slot
,
1151 void *item
, void *old
)
1154 unsigned offset
= get_slot_offset(node
, slot
);
1155 bool free
= node_tag_get(root
, node
, IDR_FREE
, offset
);
1161 return !!item
- !!old
;
1165 * __radix_tree_replace - replace item in a slot
1166 * @root: radix tree root
1167 * @node: pointer to tree node
1168 * @slot: pointer to slot in @node
1169 * @item: new item to store in the slot.
1170 * @update_node: callback for changing leaf nodes
1171 * @private: private data to pass to @update_node
1173 * For use with __radix_tree_lookup(). Caller must hold tree write locked
1174 * across slot lookup and replacement.
1176 void __radix_tree_replace(struct radix_tree_root
*root
,
1177 struct radix_tree_node
*node
,
1178 void **slot
, void *item
,
1179 radix_tree_update_node_t update_node
, void *private)
1181 void *old
= rcu_dereference_raw(*slot
);
1182 int exceptional
= !!radix_tree_exceptional_entry(item
) -
1183 !!radix_tree_exceptional_entry(old
);
1184 int count
= calculate_count(root
, node
, slot
, item
, old
);
1187 * This function supports replacing exceptional entries and
1188 * deleting entries, but that needs accounting against the
1189 * node unless the slot is root->rnode.
1191 WARN_ON_ONCE(!node
&& (slot
!= (void **)&root
->rnode
) &&
1192 (count
|| exceptional
));
1193 replace_slot(slot
, item
, node
, count
, exceptional
);
1199 update_node(node
, private);
1201 delete_node(root
, node
, update_node
, private);
1205 * radix_tree_replace_slot - replace item in a slot
1206 * @root: radix tree root
1207 * @slot: pointer to slot
1208 * @item: new item to store in the slot.
1210 * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(),
1211 * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
1212 * across slot lookup and replacement.
1214 * NOTE: This cannot be used to switch between non-entries (empty slots),
1215 * regular entries, and exceptional entries, as that requires accounting
1216 * inside the radix tree node. When switching from one type of entry or
1217 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
1218 * radix_tree_iter_replace().
1220 void radix_tree_replace_slot(struct radix_tree_root
*root
,
1221 void **slot
, void *item
)
1223 __radix_tree_replace(root
, NULL
, slot
, item
, NULL
, NULL
);
1227 * radix_tree_iter_replace - replace item in a slot
1228 * @root: radix tree root
1229 * @slot: pointer to slot
1230 * @item: new item to store in the slot.
1232 * For use with radix_tree_split() and radix_tree_for_each_slot().
1233 * Caller must hold tree write locked across split and replacement.
1235 void radix_tree_iter_replace(struct radix_tree_root
*root
,
1236 const struct radix_tree_iter
*iter
, void **slot
, void *item
)
1238 __radix_tree_replace(root
, iter
->node
, slot
, item
, NULL
, NULL
);
1241 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1243 * radix_tree_join - replace multiple entries with one multiorder entry
1244 * @root: radix tree root
1245 * @index: an index inside the new entry
1246 * @order: order of the new entry
1249 * Call this function to replace several entries with one larger entry.
1250 * The existing entries are presumed to not need freeing as a result of
1253 * The replacement entry will have all the tags set on it that were set
1254 * on any of the entries it is replacing.
1256 int radix_tree_join(struct radix_tree_root
*root
, unsigned long index
,
1257 unsigned order
, void *item
)
1259 struct radix_tree_node
*node
;
1263 BUG_ON(radix_tree_is_internal_node(item
));
1265 error
= __radix_tree_create(root
, index
, order
, &node
, &slot
);
1267 error
= insert_entries(node
, slot
, item
, order
, true);
1275 * radix_tree_split - Split an entry into smaller entries
1276 * @root: radix tree root
1277 * @index: An index within the large entry
1278 * @order: Order of new entries
1280 * Call this function as the first step in replacing a multiorder entry
1281 * with several entries of lower order. After this function returns,
1282 * loop over the relevant portion of the tree using radix_tree_for_each_slot()
1283 * and call radix_tree_iter_replace() to set up each new entry.
1285 * The tags from this entry are replicated to all the new entries.
1287 * The radix tree should be locked against modification during the entire
1288 * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which
1289 * should prompt RCU walkers to restart the lookup from the root.
1291 int radix_tree_split(struct radix_tree_root
*root
, unsigned long index
,
1294 struct radix_tree_node
*parent
, *node
, *child
;
1296 unsigned int offset
, end
;
1297 unsigned n
, tag
, tags
= 0;
1298 gfp_t gfp
= root_gfp_mask(root
);
1300 if (!__radix_tree_lookup(root
, index
, &parent
, &slot
))
1305 offset
= get_slot_offset(parent
, slot
);
1307 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1308 if (tag_get(parent
, tag
, offset
))
1311 for (end
= offset
+ 1; end
< RADIX_TREE_MAP_SIZE
; end
++) {
1312 if (!is_sibling_entry(parent
,
1313 rcu_dereference_raw(parent
->slots
[end
])))
1315 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1316 if (tags
& (1 << tag
))
1317 tag_set(parent
, tag
, end
);
1318 /* rcu_assign_pointer ensures tags are set before RETRY */
1319 rcu_assign_pointer(parent
->slots
[end
], RADIX_TREE_RETRY
);
1321 rcu_assign_pointer(parent
->slots
[offset
], RADIX_TREE_RETRY
);
1322 parent
->exceptional
-= (end
- offset
);
1324 if (order
== parent
->shift
)
1326 if (order
> parent
->shift
) {
1327 while (offset
< end
)
1328 offset
+= insert_entries(parent
, &parent
->slots
[offset
],
1329 RADIX_TREE_RETRY
, order
, true);
1336 if (node
->shift
> order
) {
1337 child
= radix_tree_node_alloc(gfp
, node
, root
,
1338 node
->shift
- RADIX_TREE_MAP_SHIFT
,
1342 if (node
!= parent
) {
1344 rcu_assign_pointer(node
->slots
[offset
],
1345 node_to_entry(child
));
1346 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1347 if (tags
& (1 << tag
))
1348 tag_set(node
, tag
, offset
);
1356 n
= insert_entries(node
, &node
->slots
[offset
],
1357 RADIX_TREE_RETRY
, order
, false);
1358 BUG_ON(n
> RADIX_TREE_MAP_SIZE
);
1360 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1361 if (tags
& (1 << tag
))
1362 tag_set(node
, tag
, offset
);
1365 while (offset
== RADIX_TREE_MAP_SIZE
) {
1368 offset
= node
->offset
;
1370 node
= node
->parent
;
1371 rcu_assign_pointer(node
->slots
[offset
],
1372 node_to_entry(child
));
1375 if ((node
== parent
) && (offset
== end
))
1380 /* Shouldn't happen; did user forget to preload? */
1381 /* TODO: free all the allocated nodes */
1387 static void node_tag_set(struct radix_tree_root
*root
,
1388 struct radix_tree_node
*node
,
1389 unsigned int tag
, unsigned int offset
)
1392 if (tag_get(node
, tag
, offset
))
1394 tag_set(node
, tag
, offset
);
1395 offset
= node
->offset
;
1396 node
= node
->parent
;
1399 if (!root_tag_get(root
, tag
))
1400 root_tag_set(root
, tag
);
1404 * radix_tree_tag_set - set a tag on a radix tree node
1405 * @root: radix tree root
1409 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
1410 * corresponding to @index in the radix tree. From
1411 * the root all the way down to the leaf node.
1413 * Returns the address of the tagged item. Setting a tag on a not-present
1416 void *radix_tree_tag_set(struct radix_tree_root
*root
,
1417 unsigned long index
, unsigned int tag
)
1419 struct radix_tree_node
*node
, *parent
;
1420 unsigned long maxindex
;
1422 radix_tree_load_root(root
, &node
, &maxindex
);
1423 BUG_ON(index
> maxindex
);
1425 while (radix_tree_is_internal_node(node
)) {
1428 parent
= entry_to_node(node
);
1429 offset
= radix_tree_descend(parent
, &node
, index
);
1432 if (!tag_get(parent
, tag
, offset
))
1433 tag_set(parent
, tag
, offset
);
1436 /* set the root's tag bit */
1437 if (!root_tag_get(root
, tag
))
1438 root_tag_set(root
, tag
);
1442 EXPORT_SYMBOL(radix_tree_tag_set
);
1445 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1446 * @root: radix tree root
1447 * @iter: iterator state
1450 void radix_tree_iter_tag_set(struct radix_tree_root
*root
,
1451 const struct radix_tree_iter
*iter
, unsigned int tag
)
1453 node_tag_set(root
, iter
->node
, tag
, iter_offset(iter
));
1456 static void node_tag_clear(struct radix_tree_root
*root
,
1457 struct radix_tree_node
*node
,
1458 unsigned int tag
, unsigned int offset
)
1461 if (!tag_get(node
, tag
, offset
))
1463 tag_clear(node
, tag
, offset
);
1464 if (any_tag_set(node
, tag
))
1467 offset
= node
->offset
;
1468 node
= node
->parent
;
1471 /* clear the root's tag bit */
1472 if (root_tag_get(root
, tag
))
1473 root_tag_clear(root
, tag
);
1477 * radix_tree_tag_clear - clear a tag on a radix tree node
1478 * @root: radix tree root
1482 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
1483 * corresponding to @index in the radix tree. If this causes
1484 * the leaf node to have no tags set then clear the tag in the
1485 * next-to-leaf node, etc.
1487 * Returns the address of the tagged item on success, else NULL. ie:
1488 * has the same return value and semantics as radix_tree_lookup().
1490 void *radix_tree_tag_clear(struct radix_tree_root
*root
,
1491 unsigned long index
, unsigned int tag
)
1493 struct radix_tree_node
*node
, *parent
;
1494 unsigned long maxindex
;
1495 int uninitialized_var(offset
);
1497 radix_tree_load_root(root
, &node
, &maxindex
);
1498 if (index
> maxindex
)
1503 while (radix_tree_is_internal_node(node
)) {
1504 parent
= entry_to_node(node
);
1505 offset
= radix_tree_descend(parent
, &node
, index
);
1509 node_tag_clear(root
, parent
, tag
, offset
);
1513 EXPORT_SYMBOL(radix_tree_tag_clear
);
1516 * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1517 * @root: radix tree root
1518 * @iter: iterator state
1519 * @tag: tag to clear
1521 void radix_tree_iter_tag_clear(struct radix_tree_root
*root
,
1522 const struct radix_tree_iter
*iter
, unsigned int tag
)
1524 node_tag_clear(root
, iter
->node
, tag
, iter_offset(iter
));
1528 * radix_tree_tag_get - get a tag on a radix tree node
1529 * @root: radix tree root
1531 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
1535 * 0: tag not present or not set
1538 * Note that the return value of this function may not be relied on, even if
1539 * the RCU lock is held, unless tag modification and node deletion are excluded
1542 int radix_tree_tag_get(const struct radix_tree_root
*root
,
1543 unsigned long index
, unsigned int tag
)
1545 struct radix_tree_node
*node
, *parent
;
1546 unsigned long maxindex
;
1548 if (!root_tag_get(root
, tag
))
1551 radix_tree_load_root(root
, &node
, &maxindex
);
1552 if (index
> maxindex
)
1555 while (radix_tree_is_internal_node(node
)) {
1558 parent
= entry_to_node(node
);
1559 offset
= radix_tree_descend(parent
, &node
, index
);
1561 if (!tag_get(parent
, tag
, offset
))
1563 if (node
== RADIX_TREE_RETRY
)
1569 EXPORT_SYMBOL(radix_tree_tag_get
);
1571 static inline void __set_iter_shift(struct radix_tree_iter
*iter
,
1574 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1575 iter
->shift
= shift
;
1579 /* Construct iter->tags bit-mask from node->tags[tag] array */
1580 static void set_iter_tags(struct radix_tree_iter
*iter
,
1581 struct radix_tree_node
*node
, unsigned offset
,
1584 unsigned tag_long
= offset
/ BITS_PER_LONG
;
1585 unsigned tag_bit
= offset
% BITS_PER_LONG
;
1592 iter
->tags
= node
->tags
[tag
][tag_long
] >> tag_bit
;
1594 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1595 if (tag_long
< RADIX_TREE_TAG_LONGS
- 1) {
1596 /* Pick tags from next element */
1598 iter
->tags
|= node
->tags
[tag
][tag_long
+ 1] <<
1599 (BITS_PER_LONG
- tag_bit
);
1600 /* Clip chunk size, here only BITS_PER_LONG tags */
1601 iter
->next_index
= __radix_tree_iter_add(iter
, BITS_PER_LONG
);
1605 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1606 static void **skip_siblings(struct radix_tree_node
**nodep
,
1607 void **slot
, struct radix_tree_iter
*iter
)
1609 void *sib
= node_to_entry(slot
- 1);
1611 while (iter
->index
< iter
->next_index
) {
1612 *nodep
= rcu_dereference_raw(*slot
);
1613 if (*nodep
&& *nodep
!= sib
)
1616 iter
->index
= __radix_tree_iter_add(iter
, 1);
1624 void ** __radix_tree_next_slot(void **slot
, struct radix_tree_iter
*iter
,
1627 unsigned tag
= flags
& RADIX_TREE_ITER_TAG_MASK
;
1628 struct radix_tree_node
*node
= rcu_dereference_raw(*slot
);
1630 slot
= skip_siblings(&node
, slot
, iter
);
1632 while (radix_tree_is_internal_node(node
)) {
1634 unsigned long next_index
;
1636 if (node
== RADIX_TREE_RETRY
)
1638 node
= entry_to_node(node
);
1640 iter
->shift
= node
->shift
;
1642 if (flags
& RADIX_TREE_ITER_TAGGED
) {
1643 offset
= radix_tree_find_next_bit(node
, tag
, 0);
1644 if (offset
== RADIX_TREE_MAP_SIZE
)
1646 slot
= &node
->slots
[offset
];
1647 iter
->index
= __radix_tree_iter_add(iter
, offset
);
1648 set_iter_tags(iter
, node
, offset
, tag
);
1649 node
= rcu_dereference_raw(*slot
);
1652 slot
= &node
->slots
[0];
1654 node
= rcu_dereference_raw(*slot
);
1659 if (offset
== RADIX_TREE_MAP_SIZE
)
1662 iter
->index
= __radix_tree_iter_add(iter
, offset
);
1664 if ((flags
& RADIX_TREE_ITER_CONTIG
) && (offset
> 0))
1666 next_index
= (iter
->index
| shift_maxindex(iter
->shift
)) + 1;
1667 if (next_index
< iter
->next_index
)
1668 iter
->next_index
= next_index
;
1673 iter
->next_index
= 0;
1676 EXPORT_SYMBOL(__radix_tree_next_slot
);
1678 static void **skip_siblings(struct radix_tree_node
**nodep
,
1679 void **slot
, struct radix_tree_iter
*iter
)
1685 void **radix_tree_iter_resume(void **slot
, struct radix_tree_iter
*iter
)
1687 struct radix_tree_node
*node
;
1690 iter
->index
= __radix_tree_iter_add(iter
, 1);
1691 skip_siblings(&node
, slot
, iter
);
1692 iter
->next_index
= iter
->index
;
1696 EXPORT_SYMBOL(radix_tree_iter_resume
);
1699 * radix_tree_next_chunk - find next chunk of slots for iteration
1701 * @root: radix tree root
1702 * @iter: iterator state
1703 * @flags: RADIX_TREE_ITER_* flags and tag index
1704 * Returns: pointer to chunk first slot, or NULL if iteration is over
1706 void **radix_tree_next_chunk(const struct radix_tree_root
*root
,
1707 struct radix_tree_iter
*iter
, unsigned flags
)
1709 unsigned tag
= flags
& RADIX_TREE_ITER_TAG_MASK
;
1710 struct radix_tree_node
*node
, *child
;
1711 unsigned long index
, offset
, maxindex
;
1713 if ((flags
& RADIX_TREE_ITER_TAGGED
) && !root_tag_get(root
, tag
))
1717 * Catch next_index overflow after ~0UL. iter->index never overflows
1718 * during iterating; it can be zero only at the beginning.
1719 * And we cannot overflow iter->next_index in a single step,
1720 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
1722 * This condition also used by radix_tree_next_slot() to stop
1723 * contiguous iterating, and forbid switching to the next chunk.
1725 index
= iter
->next_index
;
1726 if (!index
&& iter
->index
)
1730 radix_tree_load_root(root
, &child
, &maxindex
);
1731 if (index
> maxindex
)
1736 if (!radix_tree_is_internal_node(child
)) {
1737 /* Single-slot tree */
1738 iter
->index
= index
;
1739 iter
->next_index
= maxindex
+ 1;
1742 __set_iter_shift(iter
, 0);
1743 return (void **)&root
->rnode
;
1747 node
= entry_to_node(child
);
1748 offset
= radix_tree_descend(node
, &child
, index
);
1750 if ((flags
& RADIX_TREE_ITER_TAGGED
) ?
1751 !tag_get(node
, tag
, offset
) : !child
) {
1753 if (flags
& RADIX_TREE_ITER_CONTIG
)
1756 if (flags
& RADIX_TREE_ITER_TAGGED
)
1757 offset
= radix_tree_find_next_bit(node
, tag
,
1760 while (++offset
< RADIX_TREE_MAP_SIZE
) {
1761 void *slot
= rcu_dereference_raw(
1762 node
->slots
[offset
]);
1763 if (is_sibling_entry(node
, slot
))
1768 index
&= ~node_maxindex(node
);
1769 index
+= offset
<< node
->shift
;
1770 /* Overflow after ~0UL */
1773 if (offset
== RADIX_TREE_MAP_SIZE
)
1775 child
= rcu_dereference_raw(node
->slots
[offset
]);
1780 if (child
== RADIX_TREE_RETRY
)
1782 } while (radix_tree_is_internal_node(child
));
1784 /* Update the iterator state */
1785 iter
->index
= (index
&~ node_maxindex(node
)) | (offset
<< node
->shift
);
1786 iter
->next_index
= (index
| node_maxindex(node
)) + 1;
1788 __set_iter_shift(iter
, node
->shift
);
1790 if (flags
& RADIX_TREE_ITER_TAGGED
)
1791 set_iter_tags(iter
, node
, offset
, tag
);
1793 return node
->slots
+ offset
;
1795 EXPORT_SYMBOL(radix_tree_next_chunk
);
1798 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1799 * @root: radix tree root
1800 * @results: where the results of the lookup are placed
1801 * @first_index: start the lookup from this key
1802 * @max_items: place up to this many items at *results
1804 * Performs an index-ascending scan of the tree for present items. Places
1805 * them at *@results and returns the number of items which were placed at
1808 * The implementation is naive.
1810 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1811 * rcu_read_lock. In this case, rather than the returned results being
1812 * an atomic snapshot of the tree at a single point in time, the
1813 * semantics of an RCU protected gang lookup are as though multiple
1814 * radix_tree_lookups have been issued in individual locks, and results
1815 * stored in 'results'.
1818 radix_tree_gang_lookup(const struct radix_tree_root
*root
, void **results
,
1819 unsigned long first_index
, unsigned int max_items
)
1821 struct radix_tree_iter iter
;
1823 unsigned int ret
= 0;
1825 if (unlikely(!max_items
))
1828 radix_tree_for_each_slot(slot
, root
, &iter
, first_index
) {
1829 results
[ret
] = rcu_dereference_raw(*slot
);
1832 if (radix_tree_is_internal_node(results
[ret
])) {
1833 slot
= radix_tree_iter_retry(&iter
);
1836 if (++ret
== max_items
)
1842 EXPORT_SYMBOL(radix_tree_gang_lookup
);
1845 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1846 * @root: radix tree root
1847 * @results: where the results of the lookup are placed
1848 * @indices: where their indices should be placed (but usually NULL)
1849 * @first_index: start the lookup from this key
1850 * @max_items: place up to this many items at *results
1852 * Performs an index-ascending scan of the tree for present items. Places
1853 * their slots at *@results and returns the number of items which were
1854 * placed at *@results.
1856 * The implementation is naive.
1858 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1859 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1860 * protection, radix_tree_deref_slot may fail requiring a retry.
1863 radix_tree_gang_lookup_slot(const struct radix_tree_root
*root
,
1864 void ***results
, unsigned long *indices
,
1865 unsigned long first_index
, unsigned int max_items
)
1867 struct radix_tree_iter iter
;
1869 unsigned int ret
= 0;
1871 if (unlikely(!max_items
))
1874 radix_tree_for_each_slot(slot
, root
, &iter
, first_index
) {
1875 results
[ret
] = slot
;
1877 indices
[ret
] = iter
.index
;
1878 if (++ret
== max_items
)
1884 EXPORT_SYMBOL(radix_tree_gang_lookup_slot
);
1887 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1889 * @root: radix tree root
1890 * @results: where the results of the lookup are placed
1891 * @first_index: start the lookup from this key
1892 * @max_items: place up to this many items at *results
1893 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1895 * Performs an index-ascending scan of the tree for present items which
1896 * have the tag indexed by @tag set. Places the items at *@results and
1897 * returns the number of items which were placed at *@results.
1900 radix_tree_gang_lookup_tag(const struct radix_tree_root
*root
, void **results
,
1901 unsigned long first_index
, unsigned int max_items
,
1904 struct radix_tree_iter iter
;
1906 unsigned int ret
= 0;
1908 if (unlikely(!max_items
))
1911 radix_tree_for_each_tagged(slot
, root
, &iter
, first_index
, tag
) {
1912 results
[ret
] = rcu_dereference_raw(*slot
);
1915 if (radix_tree_is_internal_node(results
[ret
])) {
1916 slot
= radix_tree_iter_retry(&iter
);
1919 if (++ret
== max_items
)
1925 EXPORT_SYMBOL(radix_tree_gang_lookup_tag
);
1928 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1929 * radix tree based on a tag
1930 * @root: radix tree root
1931 * @results: where the results of the lookup are placed
1932 * @first_index: start the lookup from this key
1933 * @max_items: place up to this many items at *results
1934 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1936 * Performs an index-ascending scan of the tree for present items which
1937 * have the tag indexed by @tag set. Places the slots at *@results and
1938 * returns the number of slots which were placed at *@results.
1941 radix_tree_gang_lookup_tag_slot(const struct radix_tree_root
*root
,
1942 void ***results
, unsigned long first_index
,
1943 unsigned int max_items
, unsigned int tag
)
1945 struct radix_tree_iter iter
;
1947 unsigned int ret
= 0;
1949 if (unlikely(!max_items
))
1952 radix_tree_for_each_tagged(slot
, root
, &iter
, first_index
, tag
) {
1953 results
[ret
] = slot
;
1954 if (++ret
== max_items
)
1960 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot
);
1963 * __radix_tree_delete_node - try to free node after clearing a slot
1964 * @root: radix tree root
1965 * @node: node containing @index
1966 * @update_node: callback for changing leaf nodes
1967 * @private: private data to pass to @update_node
1969 * After clearing the slot at @index in @node from radix tree
1970 * rooted at @root, call this function to attempt freeing the
1971 * node and shrinking the tree.
1973 void __radix_tree_delete_node(struct radix_tree_root
*root
,
1974 struct radix_tree_node
*node
,
1975 radix_tree_update_node_t update_node
,
1978 delete_node(root
, node
, update_node
, private);
1981 static bool __radix_tree_delete(struct radix_tree_root
*root
,
1982 struct radix_tree_node
*node
, void **slot
)
1984 void *old
= rcu_dereference_raw(*slot
);
1985 int exceptional
= radix_tree_exceptional_entry(old
) ? -1 : 0;
1986 unsigned offset
= get_slot_offset(node
, slot
);
1990 node_tag_set(root
, node
, IDR_FREE
, offset
);
1992 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1993 node_tag_clear(root
, node
, tag
, offset
);
1995 replace_slot(slot
, NULL
, node
, -1, exceptional
);
1996 return node
&& delete_node(root
, node
, NULL
, NULL
);
2000 * radix_tree_iter_delete - delete the entry at this iterator position
2001 * @root: radix tree root
2002 * @iter: iterator state
2003 * @slot: pointer to slot
2005 * Delete the entry at the position currently pointed to by the iterator.
2006 * This may result in the current node being freed; if it is, the iterator
2007 * is advanced so that it will not reference the freed memory. This
2008 * function may be called without any locking if there are no other threads
2009 * which can access this tree.
2011 void radix_tree_iter_delete(struct radix_tree_root
*root
,
2012 struct radix_tree_iter
*iter
, void **slot
)
2014 if (__radix_tree_delete(root
, iter
->node
, slot
))
2015 iter
->index
= iter
->next_index
;
2019 * radix_tree_delete_item - delete an item from a radix tree
2020 * @root: radix tree root
2022 * @item: expected item
2024 * Remove @item at @index from the radix tree rooted at @root.
2026 * Return: the deleted entry, or %NULL if it was not present
2027 * or the entry at the given @index was not @item.
2029 void *radix_tree_delete_item(struct radix_tree_root
*root
,
2030 unsigned long index
, void *item
)
2032 struct radix_tree_node
*node
= NULL
;
2036 entry
= __radix_tree_lookup(root
, index
, &node
, &slot
);
2037 if (!entry
&& (!is_idr(root
) || node_tag_get(root
, node
, IDR_FREE
,
2038 get_slot_offset(node
, slot
))))
2041 if (item
&& entry
!= item
)
2044 __radix_tree_delete(root
, node
, slot
);
2048 EXPORT_SYMBOL(radix_tree_delete_item
);
2051 * radix_tree_delete - delete an entry from a radix tree
2052 * @root: radix tree root
2055 * Remove the entry at @index from the radix tree rooted at @root.
2057 * Return: The deleted entry, or %NULL if it was not present.
2059 void *radix_tree_delete(struct radix_tree_root
*root
, unsigned long index
)
2061 return radix_tree_delete_item(root
, index
, NULL
);
2063 EXPORT_SYMBOL(radix_tree_delete
);
2065 void radix_tree_clear_tags(struct radix_tree_root
*root
,
2066 struct radix_tree_node
*node
,
2070 unsigned int tag
, offset
= get_slot_offset(node
, slot
);
2071 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
2072 node_tag_clear(root
, node
, tag
, offset
);
2074 root_tag_clear_all(root
);
2079 * radix_tree_tagged - test whether any items in the tree are tagged
2080 * @root: radix tree root
2083 int radix_tree_tagged(const struct radix_tree_root
*root
, unsigned int tag
)
2085 return root_tag_get(root
, tag
);
2087 EXPORT_SYMBOL(radix_tree_tagged
);
2090 * idr_preload - preload for idr_alloc()
2091 * @gfp_mask: allocation mask to use for preloading
2093 * Preallocate memory to use for the next call to idr_alloc(). This function
2094 * returns with preemption disabled. It will be enabled by idr_preload_end().
2096 void idr_preload(gfp_t gfp_mask
)
2098 __radix_tree_preload(gfp_mask
, IDR_PRELOAD_SIZE
);
2100 EXPORT_SYMBOL(idr_preload
);
2103 * ida_pre_get - reserve resources for ida allocation
2105 * @gfp: memory allocation flags
2107 * This function should be called before calling ida_get_new_above(). If it
2108 * is unable to allocate memory, it will return %0. On success, it returns %1.
2110 int ida_pre_get(struct ida
*ida
, gfp_t gfp
)
2112 __radix_tree_preload(gfp
, IDA_PRELOAD_SIZE
);
2114 * The IDA API has no preload_end() equivalent. Instead,
2115 * ida_get_new() can return -EAGAIN, prompting the caller
2116 * to return to the ida_pre_get() step.
2120 if (!this_cpu_read(ida_bitmap
)) {
2121 struct ida_bitmap
*bitmap
= kmalloc(sizeof(*bitmap
), gfp
);
2124 bitmap
= this_cpu_cmpxchg(ida_bitmap
, NULL
, bitmap
);
2130 EXPORT_SYMBOL(ida_pre_get
);
2132 void **idr_get_free(struct radix_tree_root
*root
,
2133 struct radix_tree_iter
*iter
, gfp_t gfp
, int end
)
2135 struct radix_tree_node
*node
= NULL
, *child
;
2136 void **slot
= (void **)&root
->rnode
;
2137 unsigned long maxindex
, start
= iter
->next_index
;
2138 unsigned long max
= end
> 0 ? end
- 1 : INT_MAX
;
2139 unsigned int shift
, offset
= 0;
2142 shift
= radix_tree_load_root(root
, &child
, &maxindex
);
2143 if (!radix_tree_tagged(root
, IDR_FREE
))
2144 start
= max(start
, maxindex
+ 1);
2146 return ERR_PTR(-ENOSPC
);
2148 if (start
> maxindex
) {
2149 int error
= radix_tree_extend(root
, gfp
, start
, shift
);
2151 return ERR_PTR(error
);
2153 child
= rcu_dereference_raw(root
->rnode
);
2157 shift
-= RADIX_TREE_MAP_SHIFT
;
2158 if (child
== NULL
) {
2159 /* Have to add a child node. */
2160 child
= radix_tree_node_alloc(gfp
, node
, root
, shift
,
2163 return ERR_PTR(-ENOMEM
);
2164 all_tag_set(child
, IDR_FREE
);
2165 rcu_assign_pointer(*slot
, node_to_entry(child
));
2168 } else if (!radix_tree_is_internal_node(child
))
2171 node
= entry_to_node(child
);
2172 offset
= radix_tree_descend(node
, &child
, start
);
2173 if (!tag_get(node
, IDR_FREE
, offset
)) {
2174 offset
= radix_tree_find_next_bit(node
, IDR_FREE
,
2176 start
= next_index(start
, node
, offset
);
2178 return ERR_PTR(-ENOSPC
);
2179 while (offset
== RADIX_TREE_MAP_SIZE
) {
2180 offset
= node
->offset
+ 1;
2181 node
= node
->parent
;
2184 shift
= node
->shift
;
2186 child
= rcu_dereference_raw(node
->slots
[offset
]);
2188 slot
= &node
->slots
[offset
];
2191 iter
->index
= start
;
2193 iter
->next_index
= 1 + min(max
, (start
| node_maxindex(node
)));
2195 iter
->next_index
= 1;
2197 __set_iter_shift(iter
, shift
);
2198 set_iter_tags(iter
, node
, offset
, IDR_FREE
);
2204 * idr_destroy - release all internal memory from an IDR
2207 * After this function is called, the IDR is empty, and may be reused or
2208 * the data structure containing it may be freed.
2210 * A typical clean-up sequence for objects stored in an idr tree will use
2211 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
2212 * free the memory used to keep track of those objects.
2214 void idr_destroy(struct idr
*idr
)
2216 struct radix_tree_node
*node
= rcu_dereference_raw(idr
->idr_rt
.rnode
);
2217 if (radix_tree_is_internal_node(node
))
2218 radix_tree_free_nodes(node
);
2219 idr
->idr_rt
.rnode
= NULL
;
2220 root_tag_set(&idr
->idr_rt
, IDR_FREE
);
2222 EXPORT_SYMBOL(idr_destroy
);
2225 radix_tree_node_ctor(void *arg
)
2227 struct radix_tree_node
*node
= arg
;
2229 memset(node
, 0, sizeof(*node
));
2230 INIT_LIST_HEAD(&node
->private_list
);
2233 static __init
unsigned long __maxindex(unsigned int height
)
2235 unsigned int width
= height
* RADIX_TREE_MAP_SHIFT
;
2236 int shift
= RADIX_TREE_INDEX_BITS
- width
;
2240 if (shift
>= BITS_PER_LONG
)
2242 return ~0UL >> shift
;
2245 static __init
void radix_tree_init_maxnodes(void)
2247 unsigned long height_to_maxindex
[RADIX_TREE_MAX_PATH
+ 1];
2250 for (i
= 0; i
< ARRAY_SIZE(height_to_maxindex
); i
++)
2251 height_to_maxindex
[i
] = __maxindex(i
);
2252 for (i
= 0; i
< ARRAY_SIZE(height_to_maxnodes
); i
++) {
2253 for (j
= i
; j
> 0; j
--)
2254 height_to_maxnodes
[i
] += height_to_maxindex
[j
- 1] + 1;
2258 static int radix_tree_cpu_dead(unsigned int cpu
)
2260 struct radix_tree_preload
*rtp
;
2261 struct radix_tree_node
*node
;
2263 /* Free per-cpu pool of preloaded nodes */
2264 rtp
= &per_cpu(radix_tree_preloads
, cpu
);
2267 rtp
->nodes
= node
->parent
;
2268 kmem_cache_free(radix_tree_node_cachep
, node
);
2271 kfree(per_cpu(ida_bitmap
, cpu
));
2272 per_cpu(ida_bitmap
, cpu
) = NULL
;
2276 void __init
radix_tree_init(void)
2279 radix_tree_node_cachep
= kmem_cache_create("radix_tree_node",
2280 sizeof(struct radix_tree_node
), 0,
2281 SLAB_PANIC
| SLAB_RECLAIM_ACCOUNT
,
2282 radix_tree_node_ctor
);
2283 radix_tree_init_maxnodes();
2284 ret
= cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD
, "lib/radix:dead",
2285 NULL
, radix_tree_cpu_dead
);