]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
cde53535 | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
7cf9c2c7 | 5 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
6b053b8e MW |
7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
8 | * Copyright (C) 2016 Intel, Ross Zwisler | |
1da177e4 LT |
9 | * |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2, or (at | |
13 | * your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
25 | #include <linux/errno.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/kernel.h> | |
8bc3bcc9 | 28 | #include <linux/export.h> |
1da177e4 LT |
29 | #include <linux/radix-tree.h> |
30 | #include <linux/percpu.h> | |
31 | #include <linux/slab.h> | |
ce80b067 | 32 | #include <linux/kmemleak.h> |
1da177e4 LT |
33 | #include <linux/notifier.h> |
34 | #include <linux/cpu.h> | |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/bitops.h> | |
7cf9c2c7 | 37 | #include <linux/rcupdate.h> |
92cf2118 | 38 | #include <linux/preempt.h> /* in_interrupt() */ |
1da177e4 LT |
39 | |
40 | ||
c78c66d1 KS |
41 | /* Number of nodes in fully populated tree of given height */ |
42 | static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; | |
43 | ||
1da177e4 LT |
44 | /* |
45 | * Radix tree node cache. | |
46 | */ | |
e18b890b | 47 | static struct kmem_cache *radix_tree_node_cachep; |
1da177e4 | 48 | |
55368052 NP |
49 | /* |
50 | * The radix tree is variable-height, so an insert operation not only has | |
51 | * to build the branch to its corresponding item, it also has to build the | |
52 | * branch to existing items if the size has to be increased (by | |
53 | * radix_tree_extend). | |
54 | * | |
55 | * The worst case is a zero height tree with just a single item at index 0, | |
56 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | |
57 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | |
58 | * Hence: | |
59 | */ | |
60 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | |
61 | ||
1da177e4 LT |
62 | /* |
63 | * Per-cpu pool of preloaded nodes | |
64 | */ | |
65 | struct radix_tree_preload { | |
2fcd9005 | 66 | unsigned nr; |
9d2a8da0 KS |
67 | /* nodes->private_data points to next preallocated node */ |
68 | struct radix_tree_node *nodes; | |
1da177e4 | 69 | }; |
8cef7d57 | 70 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
1da177e4 | 71 | |
148deab2 MW |
72 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
73 | { | |
74 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); | |
75 | } | |
76 | ||
a4db4dce | 77 | static inline void *node_to_entry(void *ptr) |
27d20fdd | 78 | { |
30ff46cc | 79 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
27d20fdd NP |
80 | } |
81 | ||
a4db4dce | 82 | #define RADIX_TREE_RETRY node_to_entry(NULL) |
afe0e395 | 83 | |
db050f29 MW |
84 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
85 | /* Sibling slots point directly to another slot in the same node */ | |
86 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
87 | { | |
88 | void **ptr = node; | |
89 | return (parent->slots <= ptr) && | |
90 | (ptr < parent->slots + RADIX_TREE_MAP_SIZE); | |
91 | } | |
92 | #else | |
93 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
94 | { | |
95 | return false; | |
96 | } | |
97 | #endif | |
98 | ||
99 | static inline unsigned long get_slot_offset(struct radix_tree_node *parent, | |
100 | void **slot) | |
101 | { | |
102 | return slot - parent->slots; | |
103 | } | |
104 | ||
9e85d811 MW |
105 | static unsigned int radix_tree_descend(struct radix_tree_node *parent, |
106 | struct radix_tree_node **nodep, unsigned long index) | |
db050f29 | 107 | { |
9e85d811 | 108 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
db050f29 MW |
109 | void **entry = rcu_dereference_raw(parent->slots[offset]); |
110 | ||
111 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
b194d16c | 112 | if (radix_tree_is_internal_node(entry)) { |
8d2c0d36 LT |
113 | if (is_sibling_entry(parent, entry)) { |
114 | void **sibentry = (void **) entry_to_node(entry); | |
115 | offset = get_slot_offset(parent, sibentry); | |
116 | entry = rcu_dereference_raw(*sibentry); | |
db050f29 MW |
117 | } |
118 | } | |
119 | #endif | |
120 | ||
121 | *nodep = (void *)entry; | |
122 | return offset; | |
123 | } | |
124 | ||
612d6c19 NP |
125 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) |
126 | { | |
127 | return root->gfp_mask & __GFP_BITS_MASK; | |
128 | } | |
129 | ||
643b52b9 NP |
130 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
131 | int offset) | |
132 | { | |
133 | __set_bit(offset, node->tags[tag]); | |
134 | } | |
135 | ||
136 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | |
137 | int offset) | |
138 | { | |
139 | __clear_bit(offset, node->tags[tag]); | |
140 | } | |
141 | ||
142 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | |
143 | int offset) | |
144 | { | |
145 | return test_bit(offset, node->tags[tag]); | |
146 | } | |
147 | ||
148 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | |
149 | { | |
150 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | |
151 | } | |
152 | ||
2fcd9005 | 153 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
643b52b9 NP |
154 | { |
155 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | |
156 | } | |
157 | ||
158 | static inline void root_tag_clear_all(struct radix_tree_root *root) | |
159 | { | |
160 | root->gfp_mask &= __GFP_BITS_MASK; | |
161 | } | |
162 | ||
163 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | |
164 | { | |
2fcd9005 | 165 | return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); |
643b52b9 NP |
166 | } |
167 | ||
7b60e9ad MW |
168 | static inline unsigned root_tags_get(struct radix_tree_root *root) |
169 | { | |
170 | return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; | |
171 | } | |
172 | ||
643b52b9 NP |
173 | /* |
174 | * Returns 1 if any slot in the node has this tag set. | |
175 | * Otherwise returns 0. | |
176 | */ | |
177 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | |
178 | { | |
2fcd9005 | 179 | unsigned idx; |
643b52b9 NP |
180 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
181 | if (node->tags[tag][idx]) | |
182 | return 1; | |
183 | } | |
184 | return 0; | |
185 | } | |
78c1d784 KK |
186 | |
187 | /** | |
188 | * radix_tree_find_next_bit - find the next set bit in a memory region | |
189 | * | |
190 | * @addr: The address to base the search on | |
191 | * @size: The bitmap size in bits | |
192 | * @offset: The bitnumber to start searching at | |
193 | * | |
194 | * Unrollable variant of find_next_bit() for constant size arrays. | |
195 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | |
196 | * Returns next bit offset, or size if nothing found. | |
197 | */ | |
198 | static __always_inline unsigned long | |
bc412fca MW |
199 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
200 | unsigned long offset) | |
78c1d784 | 201 | { |
bc412fca | 202 | const unsigned long *addr = node->tags[tag]; |
78c1d784 | 203 | |
bc412fca | 204 | if (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
205 | unsigned long tmp; |
206 | ||
207 | addr += offset / BITS_PER_LONG; | |
208 | tmp = *addr >> (offset % BITS_PER_LONG); | |
209 | if (tmp) | |
210 | return __ffs(tmp) + offset; | |
211 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | |
bc412fca | 212 | while (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
213 | tmp = *++addr; |
214 | if (tmp) | |
215 | return __ffs(tmp) + offset; | |
216 | offset += BITS_PER_LONG; | |
217 | } | |
218 | } | |
bc412fca | 219 | return RADIX_TREE_MAP_SIZE; |
78c1d784 KK |
220 | } |
221 | ||
218ed750 MW |
222 | /* |
223 | * The maximum index which can be stored in a radix tree | |
224 | */ | |
225 | static inline unsigned long shift_maxindex(unsigned int shift) | |
226 | { | |
227 | return (RADIX_TREE_MAP_SIZE << shift) - 1; | |
228 | } | |
229 | ||
230 | static inline unsigned long node_maxindex(struct radix_tree_node *node) | |
231 | { | |
232 | return shift_maxindex(node->shift); | |
233 | } | |
234 | ||
0796c583 | 235 | #ifndef __KERNEL__ |
d0891265 | 236 | static void dump_node(struct radix_tree_node *node, unsigned long index) |
7cf19af4 | 237 | { |
0796c583 | 238 | unsigned long i; |
7cf19af4 | 239 | |
218ed750 MW |
240 | pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n", |
241 | node, node->offset, index, index | node_maxindex(node), | |
242 | node->parent, | |
0796c583 | 243 | node->tags[0][0], node->tags[1][0], node->tags[2][0], |
218ed750 | 244 | node->shift, node->count, node->exceptional); |
0796c583 RZ |
245 | |
246 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | |
d0891265 MW |
247 | unsigned long first = index | (i << node->shift); |
248 | unsigned long last = first | ((1UL << node->shift) - 1); | |
0796c583 RZ |
249 | void *entry = node->slots[i]; |
250 | if (!entry) | |
251 | continue; | |
218ed750 MW |
252 | if (entry == RADIX_TREE_RETRY) { |
253 | pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n", | |
254 | i, first, last, node); | |
b194d16c | 255 | } else if (!radix_tree_is_internal_node(entry)) { |
218ed750 MW |
256 | pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n", |
257 | entry, i, first, last, node); | |
258 | } else if (is_sibling_entry(node, entry)) { | |
259 | pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n", | |
260 | entry, i, first, last, node, | |
261 | *(void **)entry_to_node(entry)); | |
0796c583 | 262 | } else { |
4dd6c098 | 263 | dump_node(entry_to_node(entry), first); |
0796c583 RZ |
264 | } |
265 | } | |
7cf19af4 MW |
266 | } |
267 | ||
268 | /* For debug */ | |
269 | static void radix_tree_dump(struct radix_tree_root *root) | |
270 | { | |
d0891265 MW |
271 | pr_debug("radix root: %p rnode %p tags %x\n", |
272 | root, root->rnode, | |
7cf19af4 | 273 | root->gfp_mask >> __GFP_BITS_SHIFT); |
b194d16c | 274 | if (!radix_tree_is_internal_node(root->rnode)) |
7cf19af4 | 275 | return; |
4dd6c098 | 276 | dump_node(entry_to_node(root->rnode), 0); |
7cf19af4 MW |
277 | } |
278 | #endif | |
279 | ||
1da177e4 LT |
280 | /* |
281 | * This assumes that the caller has performed appropriate preallocation, and | |
282 | * that the caller has pinned this thread of control to the current CPU. | |
283 | */ | |
284 | static struct radix_tree_node * | |
285 | radix_tree_node_alloc(struct radix_tree_root *root) | |
286 | { | |
e2848a0e | 287 | struct radix_tree_node *ret = NULL; |
612d6c19 | 288 | gfp_t gfp_mask = root_gfp_mask(root); |
1da177e4 | 289 | |
5e4c0d97 | 290 | /* |
2fcd9005 MW |
291 | * Preload code isn't irq safe and it doesn't make sense to use |
292 | * preloading during an interrupt anyway as all the allocations have | |
293 | * to be atomic. So just do normal allocation when in interrupt. | |
5e4c0d97 | 294 | */ |
d0164adc | 295 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
1da177e4 LT |
296 | struct radix_tree_preload *rtp; |
297 | ||
58e698af VD |
298 | /* |
299 | * Even if the caller has preloaded, try to allocate from the | |
05eb6e72 VD |
300 | * cache first for the new node to get accounted to the memory |
301 | * cgroup. | |
58e698af VD |
302 | */ |
303 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
05eb6e72 | 304 | gfp_mask | __GFP_NOWARN); |
58e698af VD |
305 | if (ret) |
306 | goto out; | |
307 | ||
e2848a0e NP |
308 | /* |
309 | * Provided the caller has preloaded here, we will always | |
310 | * succeed in getting a node here (and never reach | |
311 | * kmem_cache_alloc) | |
312 | */ | |
7c8e0181 | 313 | rtp = this_cpu_ptr(&radix_tree_preloads); |
1da177e4 | 314 | if (rtp->nr) { |
9d2a8da0 KS |
315 | ret = rtp->nodes; |
316 | rtp->nodes = ret->private_data; | |
317 | ret->private_data = NULL; | |
1da177e4 LT |
318 | rtp->nr--; |
319 | } | |
ce80b067 CM |
320 | /* |
321 | * Update the allocation stack trace as this is more useful | |
322 | * for debugging. | |
323 | */ | |
324 | kmemleak_update_trace(ret); | |
58e698af | 325 | goto out; |
1da177e4 | 326 | } |
05eb6e72 | 327 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
58e698af | 328 | out: |
b194d16c | 329 | BUG_ON(radix_tree_is_internal_node(ret)); |
1da177e4 LT |
330 | return ret; |
331 | } | |
332 | ||
7cf9c2c7 NP |
333 | static void radix_tree_node_rcu_free(struct rcu_head *head) |
334 | { | |
335 | struct radix_tree_node *node = | |
336 | container_of(head, struct radix_tree_node, rcu_head); | |
b6dd0865 | 337 | int i; |
643b52b9 NP |
338 | |
339 | /* | |
340 | * must only free zeroed nodes into the slab. radix_tree_shrink | |
341 | * can leave us with a non-NULL entry in the first slot, so clear | |
342 | * that here to make sure. | |
343 | */ | |
b6dd0865 DC |
344 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) |
345 | tag_clear(node, i, 0); | |
346 | ||
643b52b9 | 347 | node->slots[0] = NULL; |
91d9c05a | 348 | INIT_LIST_HEAD(&node->private_list); |
643b52b9 | 349 | |
7cf9c2c7 NP |
350 | kmem_cache_free(radix_tree_node_cachep, node); |
351 | } | |
352 | ||
1da177e4 LT |
353 | static inline void |
354 | radix_tree_node_free(struct radix_tree_node *node) | |
355 | { | |
7cf9c2c7 | 356 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
1da177e4 LT |
357 | } |
358 | ||
359 | /* | |
360 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
361 | * ensure that the addition of a single element in the tree cannot fail. On | |
362 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
363 | * with preemption not disabled. | |
b34df792 DH |
364 | * |
365 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 366 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
1da177e4 | 367 | */ |
c78c66d1 | 368 | static int __radix_tree_preload(gfp_t gfp_mask, int nr) |
1da177e4 LT |
369 | { |
370 | struct radix_tree_preload *rtp; | |
371 | struct radix_tree_node *node; | |
372 | int ret = -ENOMEM; | |
373 | ||
05eb6e72 VD |
374 | /* |
375 | * Nodes preloaded by one cgroup can be be used by another cgroup, so | |
376 | * they should never be accounted to any particular memory cgroup. | |
377 | */ | |
378 | gfp_mask &= ~__GFP_ACCOUNT; | |
379 | ||
1da177e4 | 380 | preempt_disable(); |
7c8e0181 | 381 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 382 | while (rtp->nr < nr) { |
1da177e4 | 383 | preempt_enable(); |
488514d1 | 384 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
1da177e4 LT |
385 | if (node == NULL) |
386 | goto out; | |
387 | preempt_disable(); | |
7c8e0181 | 388 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 389 | if (rtp->nr < nr) { |
9d2a8da0 KS |
390 | node->private_data = rtp->nodes; |
391 | rtp->nodes = node; | |
392 | rtp->nr++; | |
393 | } else { | |
1da177e4 | 394 | kmem_cache_free(radix_tree_node_cachep, node); |
9d2a8da0 | 395 | } |
1da177e4 LT |
396 | } |
397 | ret = 0; | |
398 | out: | |
399 | return ret; | |
400 | } | |
5e4c0d97 JK |
401 | |
402 | /* | |
403 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
404 | * ensure that the addition of a single element in the tree cannot fail. On | |
405 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
406 | * with preemption not disabled. | |
407 | * | |
408 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 409 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
5e4c0d97 JK |
410 | */ |
411 | int radix_tree_preload(gfp_t gfp_mask) | |
412 | { | |
413 | /* Warn on non-sensical use... */ | |
d0164adc | 414 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
c78c66d1 | 415 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 | 416 | } |
d7f0923d | 417 | EXPORT_SYMBOL(radix_tree_preload); |
1da177e4 | 418 | |
5e4c0d97 JK |
419 | /* |
420 | * The same as above function, except we don't guarantee preloading happens. | |
421 | * We do it, if we decide it helps. On success, return zero with preemption | |
422 | * disabled. On error, return -ENOMEM with preemption not disabled. | |
423 | */ | |
424 | int radix_tree_maybe_preload(gfp_t gfp_mask) | |
425 | { | |
d0164adc | 426 | if (gfpflags_allow_blocking(gfp_mask)) |
c78c66d1 | 427 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 JK |
428 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
429 | preempt_disable(); | |
430 | return 0; | |
431 | } | |
432 | EXPORT_SYMBOL(radix_tree_maybe_preload); | |
433 | ||
c78c66d1 KS |
434 | /* |
435 | * The same as function above, but preload number of nodes required to insert | |
436 | * (1 << order) continuous naturally-aligned elements. | |
437 | */ | |
438 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) | |
439 | { | |
440 | unsigned long nr_subtrees; | |
441 | int nr_nodes, subtree_height; | |
442 | ||
443 | /* Preloading doesn't help anything with this gfp mask, skip it */ | |
444 | if (!gfpflags_allow_blocking(gfp_mask)) { | |
445 | preempt_disable(); | |
446 | return 0; | |
447 | } | |
448 | ||
449 | /* | |
450 | * Calculate number and height of fully populated subtrees it takes to | |
451 | * store (1 << order) elements. | |
452 | */ | |
453 | nr_subtrees = 1 << order; | |
454 | for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; | |
455 | subtree_height++) | |
456 | nr_subtrees >>= RADIX_TREE_MAP_SHIFT; | |
457 | ||
458 | /* | |
459 | * The worst case is zero height tree with a single item at index 0 and | |
460 | * then inserting items starting at ULONG_MAX - (1 << order). | |
461 | * | |
462 | * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to | |
463 | * 0-index item. | |
464 | */ | |
465 | nr_nodes = RADIX_TREE_MAX_PATH; | |
466 | ||
467 | /* Plus branch to fully populated subtrees. */ | |
468 | nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; | |
469 | ||
470 | /* Root node is shared. */ | |
471 | nr_nodes--; | |
472 | ||
473 | /* Plus nodes required to build subtrees. */ | |
474 | nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; | |
475 | ||
476 | return __radix_tree_preload(gfp_mask, nr_nodes); | |
477 | } | |
478 | ||
1456a439 MW |
479 | static unsigned radix_tree_load_root(struct radix_tree_root *root, |
480 | struct radix_tree_node **nodep, unsigned long *maxindex) | |
481 | { | |
482 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); | |
483 | ||
484 | *nodep = node; | |
485 | ||
b194d16c | 486 | if (likely(radix_tree_is_internal_node(node))) { |
4dd6c098 | 487 | node = entry_to_node(node); |
1456a439 | 488 | *maxindex = node_maxindex(node); |
c12e51b0 | 489 | return node->shift + RADIX_TREE_MAP_SHIFT; |
1456a439 MW |
490 | } |
491 | ||
492 | *maxindex = 0; | |
493 | return 0; | |
494 | } | |
495 | ||
1da177e4 LT |
496 | /* |
497 | * Extend a radix tree so it can store key @index. | |
498 | */ | |
e6145236 | 499 | static int radix_tree_extend(struct radix_tree_root *root, |
d0891265 | 500 | unsigned long index, unsigned int shift) |
1da177e4 | 501 | { |
e2bdb933 | 502 | struct radix_tree_node *slot; |
d0891265 | 503 | unsigned int maxshift; |
1da177e4 LT |
504 | int tag; |
505 | ||
d0891265 MW |
506 | /* Figure out what the shift should be. */ |
507 | maxshift = shift; | |
508 | while (index > shift_maxindex(maxshift)) | |
509 | maxshift += RADIX_TREE_MAP_SHIFT; | |
1da177e4 | 510 | |
d0891265 MW |
511 | slot = root->rnode; |
512 | if (!slot) | |
1da177e4 | 513 | goto out; |
1da177e4 | 514 | |
1da177e4 | 515 | do { |
2fcd9005 MW |
516 | struct radix_tree_node *node = radix_tree_node_alloc(root); |
517 | ||
518 | if (!node) | |
1da177e4 LT |
519 | return -ENOMEM; |
520 | ||
1da177e4 | 521 | /* Propagate the aggregated tag info into the new root */ |
daff89f3 | 522 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
612d6c19 | 523 | if (root_tag_get(root, tag)) |
1da177e4 LT |
524 | tag_set(node, tag, 0); |
525 | } | |
526 | ||
d0891265 MW |
527 | BUG_ON(shift > BITS_PER_LONG); |
528 | node->shift = shift; | |
0c7fa0a8 | 529 | node->offset = 0; |
1da177e4 | 530 | node->count = 1; |
e2bdb933 | 531 | node->parent = NULL; |
f7942430 | 532 | if (radix_tree_is_internal_node(slot)) { |
4dd6c098 | 533 | entry_to_node(slot)->parent = node; |
f7942430 JW |
534 | } else { |
535 | /* Moving an exceptional root->rnode to a node */ | |
536 | if (radix_tree_exceptional_entry(slot)) | |
537 | node->exceptional = 1; | |
538 | } | |
e2bdb933 | 539 | node->slots[0] = slot; |
a4db4dce MW |
540 | slot = node_to_entry(node); |
541 | rcu_assign_pointer(root->rnode, slot); | |
d0891265 | 542 | shift += RADIX_TREE_MAP_SHIFT; |
d0891265 | 543 | } while (shift <= maxshift); |
1da177e4 | 544 | out: |
d0891265 | 545 | return maxshift + RADIX_TREE_MAP_SHIFT; |
1da177e4 LT |
546 | } |
547 | ||
f4b109c6 JW |
548 | /** |
549 | * radix_tree_shrink - shrink radix tree to minimum height | |
550 | * @root radix tree root | |
551 | */ | |
14b46879 | 552 | static inline void radix_tree_shrink(struct radix_tree_root *root, |
4d693d08 JW |
553 | radix_tree_update_node_t update_node, |
554 | void *private) | |
f4b109c6 | 555 | { |
f4b109c6 JW |
556 | for (;;) { |
557 | struct radix_tree_node *node = root->rnode; | |
558 | struct radix_tree_node *child; | |
559 | ||
560 | if (!radix_tree_is_internal_node(node)) | |
561 | break; | |
562 | node = entry_to_node(node); | |
563 | ||
564 | /* | |
565 | * The candidate node has more than one child, or its child | |
566 | * is not at the leftmost slot, or the child is a multiorder | |
567 | * entry, we cannot shrink. | |
568 | */ | |
569 | if (node->count != 1) | |
570 | break; | |
571 | child = node->slots[0]; | |
572 | if (!child) | |
573 | break; | |
574 | if (!radix_tree_is_internal_node(child) && node->shift) | |
575 | break; | |
576 | ||
577 | if (radix_tree_is_internal_node(child)) | |
578 | entry_to_node(child)->parent = NULL; | |
579 | ||
580 | /* | |
581 | * We don't need rcu_assign_pointer(), since we are simply | |
582 | * moving the node from one part of the tree to another: if it | |
583 | * was safe to dereference the old pointer to it | |
584 | * (node->slots[0]), it will be safe to dereference the new | |
585 | * one (root->rnode) as far as dependent read barriers go. | |
586 | */ | |
587 | root->rnode = child; | |
588 | ||
589 | /* | |
590 | * We have a dilemma here. The node's slot[0] must not be | |
591 | * NULLed in case there are concurrent lookups expecting to | |
592 | * find the item. However if this was a bottom-level node, | |
593 | * then it may be subject to the slot pointer being visible | |
594 | * to callers dereferencing it. If item corresponding to | |
595 | * slot[0] is subsequently deleted, these callers would expect | |
596 | * their slot to become empty sooner or later. | |
597 | * | |
598 | * For example, lockless pagecache will look up a slot, deref | |
599 | * the page pointer, and if the page has 0 refcount it means it | |
600 | * was concurrently deleted from pagecache so try the deref | |
601 | * again. Fortunately there is already a requirement for logic | |
602 | * to retry the entire slot lookup -- the indirect pointer | |
603 | * problem (replacing direct root node with an indirect pointer | |
604 | * also results in a stale slot). So tag the slot as indirect | |
605 | * to force callers to retry. | |
606 | */ | |
4d693d08 JW |
607 | node->count = 0; |
608 | if (!radix_tree_is_internal_node(child)) { | |
f4b109c6 | 609 | node->slots[0] = RADIX_TREE_RETRY; |
4d693d08 JW |
610 | if (update_node) |
611 | update_node(node, private); | |
612 | } | |
f4b109c6 JW |
613 | |
614 | radix_tree_node_free(node); | |
f4b109c6 | 615 | } |
f4b109c6 JW |
616 | } |
617 | ||
14b46879 | 618 | static void delete_node(struct radix_tree_root *root, |
4d693d08 JW |
619 | struct radix_tree_node *node, |
620 | radix_tree_update_node_t update_node, void *private) | |
f4b109c6 | 621 | { |
f4b109c6 JW |
622 | do { |
623 | struct radix_tree_node *parent; | |
624 | ||
625 | if (node->count) { | |
626 | if (node == entry_to_node(root->rnode)) | |
14b46879 JW |
627 | radix_tree_shrink(root, update_node, private); |
628 | return; | |
f4b109c6 JW |
629 | } |
630 | ||
631 | parent = node->parent; | |
632 | if (parent) { | |
633 | parent->slots[node->offset] = NULL; | |
634 | parent->count--; | |
635 | } else { | |
636 | root_tag_clear_all(root); | |
637 | root->rnode = NULL; | |
638 | } | |
639 | ||
640 | radix_tree_node_free(node); | |
f4b109c6 JW |
641 | |
642 | node = parent; | |
643 | } while (node); | |
f4b109c6 JW |
644 | } |
645 | ||
1da177e4 | 646 | /** |
139e5616 | 647 | * __radix_tree_create - create a slot in a radix tree |
1da177e4 LT |
648 | * @root: radix tree root |
649 | * @index: index key | |
e6145236 | 650 | * @order: index occupies 2^order aligned slots |
139e5616 JW |
651 | * @nodep: returns node |
652 | * @slotp: returns slot | |
1da177e4 | 653 | * |
139e5616 JW |
654 | * Create, if necessary, and return the node and slot for an item |
655 | * at position @index in the radix tree @root. | |
656 | * | |
657 | * Until there is more than one item in the tree, no nodes are | |
658 | * allocated and @root->rnode is used as a direct slot instead of | |
659 | * pointing to a node, in which case *@nodep will be NULL. | |
660 | * | |
661 | * Returns -ENOMEM, or 0 for success. | |
1da177e4 | 662 | */ |
139e5616 | 663 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
e6145236 MW |
664 | unsigned order, struct radix_tree_node **nodep, |
665 | void ***slotp) | |
1da177e4 | 666 | { |
89148aa4 MW |
667 | struct radix_tree_node *node = NULL, *child; |
668 | void **slot = (void **)&root->rnode; | |
49ea6ebc | 669 | unsigned long maxindex; |
89148aa4 | 670 | unsigned int shift, offset = 0; |
49ea6ebc MW |
671 | unsigned long max = index | ((1UL << order) - 1); |
672 | ||
89148aa4 | 673 | shift = radix_tree_load_root(root, &child, &maxindex); |
1da177e4 LT |
674 | |
675 | /* Make sure the tree is high enough. */ | |
49ea6ebc | 676 | if (max > maxindex) { |
d0891265 | 677 | int error = radix_tree_extend(root, max, shift); |
49ea6ebc | 678 | if (error < 0) |
1da177e4 | 679 | return error; |
49ea6ebc | 680 | shift = error; |
89148aa4 | 681 | child = root->rnode; |
d0891265 | 682 | if (order == shift) |
49ea6ebc | 683 | shift += RADIX_TREE_MAP_SHIFT; |
1da177e4 LT |
684 | } |
685 | ||
e6145236 | 686 | while (shift > order) { |
c12e51b0 | 687 | shift -= RADIX_TREE_MAP_SHIFT; |
89148aa4 | 688 | if (child == NULL) { |
1da177e4 | 689 | /* Have to add a child node. */ |
89148aa4 MW |
690 | child = radix_tree_node_alloc(root); |
691 | if (!child) | |
1da177e4 | 692 | return -ENOMEM; |
89148aa4 MW |
693 | child->shift = shift; |
694 | child->offset = offset; | |
695 | child->parent = node; | |
696 | rcu_assign_pointer(*slot, node_to_entry(child)); | |
697 | if (node) | |
1da177e4 | 698 | node->count++; |
89148aa4 | 699 | } else if (!radix_tree_is_internal_node(child)) |
e6145236 | 700 | break; |
1da177e4 LT |
701 | |
702 | /* Go a level down */ | |
89148aa4 | 703 | node = entry_to_node(child); |
9e85d811 | 704 | offset = radix_tree_descend(node, &child, index); |
89148aa4 | 705 | slot = &node->slots[offset]; |
e6145236 MW |
706 | } |
707 | ||
57578c2e | 708 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
e6145236 | 709 | /* Insert pointers to the canonical entry */ |
3b8c00f6 | 710 | if (order > shift) { |
89148aa4 | 711 | unsigned i, n = 1 << (order - shift); |
e6145236 | 712 | offset = offset & ~(n - 1); |
89148aa4 MW |
713 | slot = &node->slots[offset]; |
714 | child = node_to_entry(slot); | |
e6145236 | 715 | for (i = 0; i < n; i++) { |
89148aa4 | 716 | if (slot[i]) |
e6145236 MW |
717 | return -EEXIST; |
718 | } | |
719 | ||
720 | for (i = 1; i < n; i++) { | |
89148aa4 | 721 | rcu_assign_pointer(slot[i], child); |
e6145236 MW |
722 | node->count++; |
723 | } | |
612d6c19 | 724 | } |
57578c2e | 725 | #endif |
1da177e4 | 726 | |
139e5616 JW |
727 | if (nodep) |
728 | *nodep = node; | |
729 | if (slotp) | |
89148aa4 | 730 | *slotp = slot; |
139e5616 JW |
731 | return 0; |
732 | } | |
733 | ||
734 | /** | |
e6145236 | 735 | * __radix_tree_insert - insert into a radix tree |
139e5616 JW |
736 | * @root: radix tree root |
737 | * @index: index key | |
e6145236 | 738 | * @order: key covers the 2^order indices around index |
139e5616 JW |
739 | * @item: item to insert |
740 | * | |
741 | * Insert an item into the radix tree at position @index. | |
742 | */ | |
e6145236 MW |
743 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
744 | unsigned order, void *item) | |
139e5616 JW |
745 | { |
746 | struct radix_tree_node *node; | |
747 | void **slot; | |
748 | int error; | |
749 | ||
b194d16c | 750 | BUG_ON(radix_tree_is_internal_node(item)); |
139e5616 | 751 | |
e6145236 | 752 | error = __radix_tree_create(root, index, order, &node, &slot); |
139e5616 JW |
753 | if (error) |
754 | return error; | |
755 | if (*slot != NULL) | |
1da177e4 | 756 | return -EEXIST; |
139e5616 | 757 | rcu_assign_pointer(*slot, item); |
201b6264 | 758 | |
612d6c19 | 759 | if (node) { |
7b60e9ad | 760 | unsigned offset = get_slot_offset(node, slot); |
612d6c19 | 761 | node->count++; |
f7942430 JW |
762 | if (radix_tree_exceptional_entry(item)) |
763 | node->exceptional++; | |
7b60e9ad MW |
764 | BUG_ON(tag_get(node, 0, offset)); |
765 | BUG_ON(tag_get(node, 1, offset)); | |
766 | BUG_ON(tag_get(node, 2, offset)); | |
612d6c19 | 767 | } else { |
7b60e9ad | 768 | BUG_ON(root_tags_get(root)); |
612d6c19 | 769 | } |
1da177e4 | 770 | |
1da177e4 LT |
771 | return 0; |
772 | } | |
e6145236 | 773 | EXPORT_SYMBOL(__radix_tree_insert); |
1da177e4 | 774 | |
139e5616 JW |
775 | /** |
776 | * __radix_tree_lookup - lookup an item in a radix tree | |
777 | * @root: radix tree root | |
778 | * @index: index key | |
779 | * @nodep: returns node | |
780 | * @slotp: returns slot | |
781 | * | |
782 | * Lookup and return the item at position @index in the radix | |
783 | * tree @root. | |
784 | * | |
785 | * Until there is more than one item in the tree, no nodes are | |
786 | * allocated and @root->rnode is used as a direct slot instead of | |
787 | * pointing to a node, in which case *@nodep will be NULL. | |
7cf9c2c7 | 788 | */ |
139e5616 JW |
789 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
790 | struct radix_tree_node **nodep, void ***slotp) | |
1da177e4 | 791 | { |
139e5616 | 792 | struct radix_tree_node *node, *parent; |
85829954 | 793 | unsigned long maxindex; |
139e5616 | 794 | void **slot; |
612d6c19 | 795 | |
85829954 MW |
796 | restart: |
797 | parent = NULL; | |
798 | slot = (void **)&root->rnode; | |
9e85d811 | 799 | radix_tree_load_root(root, &node, &maxindex); |
85829954 | 800 | if (index > maxindex) |
1da177e4 LT |
801 | return NULL; |
802 | ||
b194d16c | 803 | while (radix_tree_is_internal_node(node)) { |
85829954 | 804 | unsigned offset; |
1da177e4 | 805 | |
85829954 MW |
806 | if (node == RADIX_TREE_RETRY) |
807 | goto restart; | |
4dd6c098 | 808 | parent = entry_to_node(node); |
9e85d811 | 809 | offset = radix_tree_descend(parent, &node, index); |
85829954 MW |
810 | slot = parent->slots + offset; |
811 | } | |
1da177e4 | 812 | |
139e5616 JW |
813 | if (nodep) |
814 | *nodep = parent; | |
815 | if (slotp) | |
816 | *slotp = slot; | |
817 | return node; | |
b72b71c6 HS |
818 | } |
819 | ||
820 | /** | |
821 | * radix_tree_lookup_slot - lookup a slot in a radix tree | |
822 | * @root: radix tree root | |
823 | * @index: index key | |
824 | * | |
825 | * Returns: the slot corresponding to the position @index in the | |
826 | * radix tree @root. This is useful for update-if-exists operations. | |
827 | * | |
828 | * This function can be called under rcu_read_lock iff the slot is not | |
829 | * modified by radix_tree_replace_slot, otherwise it must be called | |
830 | * exclusive from other writers. Any dereference of the slot must be done | |
831 | * using radix_tree_deref_slot. | |
832 | */ | |
833 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |
834 | { | |
139e5616 JW |
835 | void **slot; |
836 | ||
837 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | |
838 | return NULL; | |
839 | return slot; | |
a4331366 | 840 | } |
a4331366 HR |
841 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
842 | ||
843 | /** | |
844 | * radix_tree_lookup - perform lookup operation on a radix tree | |
845 | * @root: radix tree root | |
846 | * @index: index key | |
847 | * | |
848 | * Lookup the item at the position @index in the radix tree @root. | |
7cf9c2c7 NP |
849 | * |
850 | * This function can be called under rcu_read_lock, however the caller | |
851 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free | |
852 | * them safely). No RCU barriers are required to access or modify the | |
853 | * returned item, however. | |
a4331366 HR |
854 | */ |
855 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | |
856 | { | |
139e5616 | 857 | return __radix_tree_lookup(root, index, NULL, NULL); |
1da177e4 LT |
858 | } |
859 | EXPORT_SYMBOL(radix_tree_lookup); | |
860 | ||
6d75f366 JW |
861 | static void replace_slot(struct radix_tree_root *root, |
862 | struct radix_tree_node *node, | |
863 | void **slot, void *item, | |
864 | bool warn_typeswitch) | |
f7942430 JW |
865 | { |
866 | void *old = rcu_dereference_raw(*slot); | |
f4b109c6 | 867 | int count, exceptional; |
f7942430 JW |
868 | |
869 | WARN_ON_ONCE(radix_tree_is_internal_node(item)); | |
f7942430 | 870 | |
f4b109c6 | 871 | count = !!item - !!old; |
f7942430 JW |
872 | exceptional = !!radix_tree_exceptional_entry(item) - |
873 | !!radix_tree_exceptional_entry(old); | |
874 | ||
f4b109c6 | 875 | WARN_ON_ONCE(warn_typeswitch && (count || exceptional)); |
f7942430 | 876 | |
f4b109c6 JW |
877 | if (node) { |
878 | node->count += count; | |
f7942430 | 879 | node->exceptional += exceptional; |
f4b109c6 | 880 | } |
f7942430 JW |
881 | |
882 | rcu_assign_pointer(*slot, item); | |
883 | } | |
884 | ||
6d75f366 JW |
885 | /** |
886 | * __radix_tree_replace - replace item in a slot | |
4d693d08 JW |
887 | * @root: radix tree root |
888 | * @node: pointer to tree node | |
889 | * @slot: pointer to slot in @node | |
890 | * @item: new item to store in the slot. | |
891 | * @update_node: callback for changing leaf nodes | |
892 | * @private: private data to pass to @update_node | |
6d75f366 JW |
893 | * |
894 | * For use with __radix_tree_lookup(). Caller must hold tree write locked | |
895 | * across slot lookup and replacement. | |
896 | */ | |
897 | void __radix_tree_replace(struct radix_tree_root *root, | |
898 | struct radix_tree_node *node, | |
4d693d08 JW |
899 | void **slot, void *item, |
900 | radix_tree_update_node_t update_node, void *private) | |
6d75f366 JW |
901 | { |
902 | /* | |
f4b109c6 JW |
903 | * This function supports replacing exceptional entries and |
904 | * deleting entries, but that needs accounting against the | |
905 | * node unless the slot is root->rnode. | |
6d75f366 JW |
906 | */ |
907 | replace_slot(root, node, slot, item, | |
908 | !node && slot != (void **)&root->rnode); | |
f4b109c6 | 909 | |
4d693d08 JW |
910 | if (!node) |
911 | return; | |
912 | ||
913 | if (update_node) | |
914 | update_node(node, private); | |
915 | ||
916 | delete_node(root, node, update_node, private); | |
6d75f366 JW |
917 | } |
918 | ||
919 | /** | |
920 | * radix_tree_replace_slot - replace item in a slot | |
921 | * @root: radix tree root | |
922 | * @slot: pointer to slot | |
923 | * @item: new item to store in the slot. | |
924 | * | |
925 | * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), | |
926 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked | |
927 | * across slot lookup and replacement. | |
928 | * | |
929 | * NOTE: This cannot be used to switch between non-entries (empty slots), | |
930 | * regular entries, and exceptional entries, as that requires accounting | |
f4b109c6 JW |
931 | * inside the radix tree node. When switching from one type of entry or |
932 | * deleting, use __radix_tree_lookup() and __radix_tree_replace(). | |
6d75f366 JW |
933 | */ |
934 | void radix_tree_replace_slot(struct radix_tree_root *root, | |
935 | void **slot, void *item) | |
936 | { | |
937 | replace_slot(root, NULL, slot, item, true); | |
938 | } | |
939 | ||
1da177e4 LT |
940 | /** |
941 | * radix_tree_tag_set - set a tag on a radix tree node | |
942 | * @root: radix tree root | |
943 | * @index: index key | |
2fcd9005 | 944 | * @tag: tag index |
1da177e4 | 945 | * |
daff89f3 JC |
946 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
947 | * corresponding to @index in the radix tree. From | |
1da177e4 LT |
948 | * the root all the way down to the leaf node. |
949 | * | |
2fcd9005 | 950 | * Returns the address of the tagged item. Setting a tag on a not-present |
1da177e4 LT |
951 | * item is a bug. |
952 | */ | |
953 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
daff89f3 | 954 | unsigned long index, unsigned int tag) |
1da177e4 | 955 | { |
fb969909 RZ |
956 | struct radix_tree_node *node, *parent; |
957 | unsigned long maxindex; | |
1da177e4 | 958 | |
9e85d811 | 959 | radix_tree_load_root(root, &node, &maxindex); |
fb969909 | 960 | BUG_ON(index > maxindex); |
1da177e4 | 961 | |
b194d16c | 962 | while (radix_tree_is_internal_node(node)) { |
fb969909 | 963 | unsigned offset; |
1da177e4 | 964 | |
4dd6c098 | 965 | parent = entry_to_node(node); |
9e85d811 | 966 | offset = radix_tree_descend(parent, &node, index); |
fb969909 RZ |
967 | BUG_ON(!node); |
968 | ||
969 | if (!tag_get(parent, tag, offset)) | |
970 | tag_set(parent, tag, offset); | |
1da177e4 LT |
971 | } |
972 | ||
612d6c19 | 973 | /* set the root's tag bit */ |
fb969909 | 974 | if (!root_tag_get(root, tag)) |
612d6c19 NP |
975 | root_tag_set(root, tag); |
976 | ||
fb969909 | 977 | return node; |
1da177e4 LT |
978 | } |
979 | EXPORT_SYMBOL(radix_tree_tag_set); | |
980 | ||
d604c324 MW |
981 | static void node_tag_clear(struct radix_tree_root *root, |
982 | struct radix_tree_node *node, | |
983 | unsigned int tag, unsigned int offset) | |
984 | { | |
985 | while (node) { | |
986 | if (!tag_get(node, tag, offset)) | |
987 | return; | |
988 | tag_clear(node, tag, offset); | |
989 | if (any_tag_set(node, tag)) | |
990 | return; | |
991 | ||
992 | offset = node->offset; | |
993 | node = node->parent; | |
994 | } | |
995 | ||
996 | /* clear the root's tag bit */ | |
997 | if (root_tag_get(root, tag)) | |
998 | root_tag_clear(root, tag); | |
999 | } | |
1000 | ||
9498d2bb MW |
1001 | static void node_tag_set(struct radix_tree_root *root, |
1002 | struct radix_tree_node *node, | |
1003 | unsigned int tag, unsigned int offset) | |
1004 | { | |
1005 | while (node) { | |
1006 | if (tag_get(node, tag, offset)) | |
1007 | return; | |
1008 | tag_set(node, tag, offset); | |
1009 | offset = node->offset; | |
1010 | node = node->parent; | |
1011 | } | |
1012 | ||
1013 | if (!root_tag_get(root, tag)) | |
1014 | root_tag_set(root, tag); | |
1015 | } | |
1016 | ||
1da177e4 LT |
1017 | /** |
1018 | * radix_tree_tag_clear - clear a tag on a radix tree node | |
1019 | * @root: radix tree root | |
1020 | * @index: index key | |
2fcd9005 | 1021 | * @tag: tag index |
1da177e4 | 1022 | * |
daff89f3 | 1023 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
2fcd9005 MW |
1024 | * corresponding to @index in the radix tree. If this causes |
1025 | * the leaf node to have no tags set then clear the tag in the | |
1da177e4 LT |
1026 | * next-to-leaf node, etc. |
1027 | * | |
1028 | * Returns the address of the tagged item on success, else NULL. ie: | |
1029 | * has the same return value and semantics as radix_tree_lookup(). | |
1030 | */ | |
1031 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
daff89f3 | 1032 | unsigned long index, unsigned int tag) |
1da177e4 | 1033 | { |
00f47b58 RZ |
1034 | struct radix_tree_node *node, *parent; |
1035 | unsigned long maxindex; | |
e2bdb933 | 1036 | int uninitialized_var(offset); |
1da177e4 | 1037 | |
9e85d811 | 1038 | radix_tree_load_root(root, &node, &maxindex); |
00f47b58 RZ |
1039 | if (index > maxindex) |
1040 | return NULL; | |
1da177e4 | 1041 | |
00f47b58 | 1042 | parent = NULL; |
1da177e4 | 1043 | |
b194d16c | 1044 | while (radix_tree_is_internal_node(node)) { |
4dd6c098 | 1045 | parent = entry_to_node(node); |
9e85d811 | 1046 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 LT |
1047 | } |
1048 | ||
d604c324 MW |
1049 | if (node) |
1050 | node_tag_clear(root, parent, tag, offset); | |
1da177e4 | 1051 | |
00f47b58 | 1052 | return node; |
1da177e4 LT |
1053 | } |
1054 | EXPORT_SYMBOL(radix_tree_tag_clear); | |
1055 | ||
1da177e4 | 1056 | /** |
32605a18 MT |
1057 | * radix_tree_tag_get - get a tag on a radix tree node |
1058 | * @root: radix tree root | |
1059 | * @index: index key | |
2fcd9005 | 1060 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 | 1061 | * |
32605a18 | 1062 | * Return values: |
1da177e4 | 1063 | * |
612d6c19 NP |
1064 | * 0: tag not present or not set |
1065 | * 1: tag set | |
ce82653d DH |
1066 | * |
1067 | * Note that the return value of this function may not be relied on, even if | |
1068 | * the RCU lock is held, unless tag modification and node deletion are excluded | |
1069 | * from concurrency. | |
1da177e4 LT |
1070 | */ |
1071 | int radix_tree_tag_get(struct radix_tree_root *root, | |
daff89f3 | 1072 | unsigned long index, unsigned int tag) |
1da177e4 | 1073 | { |
4589ba6d RZ |
1074 | struct radix_tree_node *node, *parent; |
1075 | unsigned long maxindex; | |
1da177e4 | 1076 | |
612d6c19 NP |
1077 | if (!root_tag_get(root, tag)) |
1078 | return 0; | |
1079 | ||
9e85d811 | 1080 | radix_tree_load_root(root, &node, &maxindex); |
4589ba6d RZ |
1081 | if (index > maxindex) |
1082 | return 0; | |
7cf9c2c7 NP |
1083 | if (node == NULL) |
1084 | return 0; | |
1085 | ||
b194d16c | 1086 | while (radix_tree_is_internal_node(node)) { |
9e85d811 | 1087 | unsigned offset; |
1da177e4 | 1088 | |
4dd6c098 | 1089 | parent = entry_to_node(node); |
9e85d811 | 1090 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 | 1091 | |
4589ba6d | 1092 | if (!node) |
1da177e4 | 1093 | return 0; |
4589ba6d | 1094 | if (!tag_get(parent, tag, offset)) |
3fa36acb | 1095 | return 0; |
4589ba6d RZ |
1096 | if (node == RADIX_TREE_RETRY) |
1097 | break; | |
1da177e4 | 1098 | } |
4589ba6d RZ |
1099 | |
1100 | return 1; | |
1da177e4 LT |
1101 | } |
1102 | EXPORT_SYMBOL(radix_tree_tag_get); | |
1da177e4 | 1103 | |
21ef5339 RZ |
1104 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
1105 | unsigned int shift) | |
1106 | { | |
1107 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1108 | iter->shift = shift; | |
1109 | #endif | |
1110 | } | |
1111 | ||
148deab2 MW |
1112 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
1113 | static void set_iter_tags(struct radix_tree_iter *iter, | |
1114 | struct radix_tree_node *node, unsigned offset, | |
1115 | unsigned tag) | |
1116 | { | |
1117 | unsigned tag_long = offset / BITS_PER_LONG; | |
1118 | unsigned tag_bit = offset % BITS_PER_LONG; | |
1119 | ||
1120 | iter->tags = node->tags[tag][tag_long] >> tag_bit; | |
1121 | ||
1122 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | |
1123 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | |
1124 | /* Pick tags from next element */ | |
1125 | if (tag_bit) | |
1126 | iter->tags |= node->tags[tag][tag_long + 1] << | |
1127 | (BITS_PER_LONG - tag_bit); | |
1128 | /* Clip chunk size, here only BITS_PER_LONG tags */ | |
1129 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); | |
1130 | } | |
1131 | } | |
1132 | ||
1133 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1134 | static void **skip_siblings(struct radix_tree_node **nodep, | |
1135 | void **slot, struct radix_tree_iter *iter) | |
1136 | { | |
1137 | void *sib = node_to_entry(slot - 1); | |
1138 | ||
1139 | while (iter->index < iter->next_index) { | |
1140 | *nodep = rcu_dereference_raw(*slot); | |
1141 | if (*nodep && *nodep != sib) | |
1142 | return slot; | |
1143 | slot++; | |
1144 | iter->index = __radix_tree_iter_add(iter, 1); | |
1145 | iter->tags >>= 1; | |
1146 | } | |
1147 | ||
1148 | *nodep = NULL; | |
1149 | return NULL; | |
1150 | } | |
1151 | ||
1152 | void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, | |
1153 | unsigned flags) | |
1154 | { | |
1155 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; | |
1156 | struct radix_tree_node *node = rcu_dereference_raw(*slot); | |
1157 | ||
1158 | slot = skip_siblings(&node, slot, iter); | |
1159 | ||
1160 | while (radix_tree_is_internal_node(node)) { | |
1161 | unsigned offset; | |
1162 | unsigned long next_index; | |
1163 | ||
1164 | if (node == RADIX_TREE_RETRY) | |
1165 | return slot; | |
1166 | node = entry_to_node(node); | |
1167 | iter->shift = node->shift; | |
1168 | ||
1169 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
1170 | offset = radix_tree_find_next_bit(node, tag, 0); | |
1171 | if (offset == RADIX_TREE_MAP_SIZE) | |
1172 | return NULL; | |
1173 | slot = &node->slots[offset]; | |
1174 | iter->index = __radix_tree_iter_add(iter, offset); | |
1175 | set_iter_tags(iter, node, offset, tag); | |
1176 | node = rcu_dereference_raw(*slot); | |
1177 | } else { | |
1178 | offset = 0; | |
1179 | slot = &node->slots[0]; | |
1180 | for (;;) { | |
1181 | node = rcu_dereference_raw(*slot); | |
1182 | if (node) | |
1183 | break; | |
1184 | slot++; | |
1185 | offset++; | |
1186 | if (offset == RADIX_TREE_MAP_SIZE) | |
1187 | return NULL; | |
1188 | } | |
1189 | iter->index = __radix_tree_iter_add(iter, offset); | |
1190 | } | |
1191 | if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) | |
1192 | goto none; | |
1193 | next_index = (iter->index | shift_maxindex(iter->shift)) + 1; | |
1194 | if (next_index < iter->next_index) | |
1195 | iter->next_index = next_index; | |
1196 | } | |
1197 | ||
1198 | return slot; | |
1199 | none: | |
1200 | iter->next_index = 0; | |
1201 | return NULL; | |
1202 | } | |
1203 | EXPORT_SYMBOL(__radix_tree_next_slot); | |
1204 | #else | |
1205 | static void **skip_siblings(struct radix_tree_node **nodep, | |
1206 | void **slot, struct radix_tree_iter *iter) | |
1207 | { | |
1208 | return slot; | |
1209 | } | |
1210 | #endif | |
1211 | ||
1212 | void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) | |
1213 | { | |
1214 | struct radix_tree_node *node; | |
1215 | ||
1216 | slot++; | |
1217 | iter->index = __radix_tree_iter_add(iter, 1); | |
1218 | node = rcu_dereference_raw(*slot); | |
1219 | skip_siblings(&node, slot, iter); | |
1220 | iter->next_index = iter->index; | |
1221 | iter->tags = 0; | |
1222 | return NULL; | |
1223 | } | |
1224 | EXPORT_SYMBOL(radix_tree_iter_resume); | |
1225 | ||
78c1d784 KK |
1226 | /** |
1227 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
1228 | * | |
1229 | * @root: radix tree root | |
1230 | * @iter: iterator state | |
1231 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
1232 | * Returns: pointer to chunk first slot, or NULL if iteration is over | |
1233 | */ | |
1234 | void **radix_tree_next_chunk(struct radix_tree_root *root, | |
1235 | struct radix_tree_iter *iter, unsigned flags) | |
1236 | { | |
9e85d811 | 1237 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
8c1244de | 1238 | struct radix_tree_node *node, *child; |
21ef5339 | 1239 | unsigned long index, offset, maxindex; |
78c1d784 KK |
1240 | |
1241 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | |
1242 | return NULL; | |
1243 | ||
1244 | /* | |
1245 | * Catch next_index overflow after ~0UL. iter->index never overflows | |
1246 | * during iterating; it can be zero only at the beginning. | |
1247 | * And we cannot overflow iter->next_index in a single step, | |
1248 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | |
fffaee36 KK |
1249 | * |
1250 | * This condition also used by radix_tree_next_slot() to stop | |
91b9677c | 1251 | * contiguous iterating, and forbid switching to the next chunk. |
78c1d784 KK |
1252 | */ |
1253 | index = iter->next_index; | |
1254 | if (!index && iter->index) | |
1255 | return NULL; | |
1256 | ||
21ef5339 | 1257 | restart: |
9e85d811 | 1258 | radix_tree_load_root(root, &child, &maxindex); |
21ef5339 RZ |
1259 | if (index > maxindex) |
1260 | return NULL; | |
8c1244de MW |
1261 | if (!child) |
1262 | return NULL; | |
21ef5339 | 1263 | |
8c1244de | 1264 | if (!radix_tree_is_internal_node(child)) { |
78c1d784 | 1265 | /* Single-slot tree */ |
21ef5339 RZ |
1266 | iter->index = index; |
1267 | iter->next_index = maxindex + 1; | |
78c1d784 | 1268 | iter->tags = 1; |
8c1244de | 1269 | __set_iter_shift(iter, 0); |
78c1d784 | 1270 | return (void **)&root->rnode; |
8c1244de | 1271 | } |
21ef5339 | 1272 | |
8c1244de MW |
1273 | do { |
1274 | node = entry_to_node(child); | |
9e85d811 | 1275 | offset = radix_tree_descend(node, &child, index); |
21ef5339 | 1276 | |
78c1d784 | 1277 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
8c1244de | 1278 | !tag_get(node, tag, offset) : !child) { |
78c1d784 KK |
1279 | /* Hole detected */ |
1280 | if (flags & RADIX_TREE_ITER_CONTIG) | |
1281 | return NULL; | |
1282 | ||
1283 | if (flags & RADIX_TREE_ITER_TAGGED) | |
bc412fca | 1284 | offset = radix_tree_find_next_bit(node, tag, |
78c1d784 KK |
1285 | offset + 1); |
1286 | else | |
1287 | while (++offset < RADIX_TREE_MAP_SIZE) { | |
21ef5339 RZ |
1288 | void *slot = node->slots[offset]; |
1289 | if (is_sibling_entry(node, slot)) | |
1290 | continue; | |
1291 | if (slot) | |
78c1d784 KK |
1292 | break; |
1293 | } | |
8c1244de | 1294 | index &= ~node_maxindex(node); |
9e85d811 | 1295 | index += offset << node->shift; |
78c1d784 KK |
1296 | /* Overflow after ~0UL */ |
1297 | if (!index) | |
1298 | return NULL; | |
1299 | if (offset == RADIX_TREE_MAP_SIZE) | |
1300 | goto restart; | |
8c1244de | 1301 | child = rcu_dereference_raw(node->slots[offset]); |
78c1d784 KK |
1302 | } |
1303 | ||
8c1244de | 1304 | if ((child == NULL) || (child == RADIX_TREE_RETRY)) |
78c1d784 | 1305 | goto restart; |
8c1244de | 1306 | } while (radix_tree_is_internal_node(child)); |
78c1d784 KK |
1307 | |
1308 | /* Update the iterator state */ | |
8c1244de MW |
1309 | iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); |
1310 | iter->next_index = (index | node_maxindex(node)) + 1; | |
9e85d811 | 1311 | __set_iter_shift(iter, node->shift); |
78c1d784 | 1312 | |
148deab2 MW |
1313 | if (flags & RADIX_TREE_ITER_TAGGED) |
1314 | set_iter_tags(iter, node, offset, tag); | |
78c1d784 KK |
1315 | |
1316 | return node->slots + offset; | |
1317 | } | |
1318 | EXPORT_SYMBOL(radix_tree_next_chunk); | |
1319 | ||
ebf8aa44 JK |
1320 | /** |
1321 | * radix_tree_range_tag_if_tagged - for each item in given range set given | |
1322 | * tag if item has another tag set | |
1323 | * @root: radix tree root | |
1324 | * @first_indexp: pointer to a starting index of a range to scan | |
1325 | * @last_index: last index of a range to scan | |
1326 | * @nr_to_tag: maximum number items to tag | |
1327 | * @iftag: tag index to test | |
1328 | * @settag: tag index to set if tested tag is set | |
1329 | * | |
1330 | * This function scans range of radix tree from first_index to last_index | |
1331 | * (inclusive). For each item in the range if iftag is set, the function sets | |
1332 | * also settag. The function stops either after tagging nr_to_tag items or | |
1333 | * after reaching last_index. | |
1334 | * | |
144dcfc0 DC |
1335 | * The tags must be set from the leaf level only and propagated back up the |
1336 | * path to the root. We must do this so that we resolve the full path before | |
1337 | * setting any tags on intermediate nodes. If we set tags as we descend, then | |
1338 | * we can get to the leaf node and find that the index that has the iftag | |
1339 | * set is outside the range we are scanning. This reults in dangling tags and | |
1340 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | |
1341 | * | |
2fcd9005 | 1342 | * The function returns the number of leaves where the tag was set and sets |
ebf8aa44 | 1343 | * *first_indexp to the first unscanned index. |
d5ed3a4a JK |
1344 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must |
1345 | * be prepared to handle that. | |
ebf8aa44 JK |
1346 | */ |
1347 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |
1348 | unsigned long *first_indexp, unsigned long last_index, | |
1349 | unsigned long nr_to_tag, | |
1350 | unsigned int iftag, unsigned int settag) | |
1351 | { | |
9498d2bb | 1352 | struct radix_tree_node *node, *child; |
070c5ac2 | 1353 | unsigned long maxindex; |
144dcfc0 DC |
1354 | unsigned long tagged = 0; |
1355 | unsigned long index = *first_indexp; | |
ebf8aa44 | 1356 | |
9e85d811 | 1357 | radix_tree_load_root(root, &child, &maxindex); |
070c5ac2 | 1358 | last_index = min(last_index, maxindex); |
ebf8aa44 JK |
1359 | if (index > last_index) |
1360 | return 0; | |
1361 | if (!nr_to_tag) | |
1362 | return 0; | |
1363 | if (!root_tag_get(root, iftag)) { | |
1364 | *first_indexp = last_index + 1; | |
1365 | return 0; | |
1366 | } | |
a8e4da25 | 1367 | if (!radix_tree_is_internal_node(child)) { |
ebf8aa44 JK |
1368 | *first_indexp = last_index + 1; |
1369 | root_tag_set(root, settag); | |
1370 | return 1; | |
1371 | } | |
1372 | ||
a8e4da25 | 1373 | node = entry_to_node(child); |
ebf8aa44 JK |
1374 | |
1375 | for (;;) { | |
9e85d811 | 1376 | unsigned offset = radix_tree_descend(node, &child, index); |
a8e4da25 | 1377 | if (!child) |
ebf8aa44 | 1378 | goto next; |
070c5ac2 | 1379 | if (!tag_get(node, iftag, offset)) |
ebf8aa44 | 1380 | goto next; |
070c5ac2 | 1381 | /* Sibling slots never have tags set on them */ |
a8e4da25 MW |
1382 | if (radix_tree_is_internal_node(child)) { |
1383 | node = entry_to_node(child); | |
070c5ac2 | 1384 | continue; |
144dcfc0 DC |
1385 | } |
1386 | ||
070c5ac2 | 1387 | tagged++; |
9498d2bb | 1388 | node_tag_set(root, node, settag, offset); |
070c5ac2 | 1389 | next: |
9e85d811 MW |
1390 | /* Go to next entry in node */ |
1391 | index = ((index >> node->shift) + 1) << node->shift; | |
d5ed3a4a JK |
1392 | /* Overflow can happen when last_index is ~0UL... */ |
1393 | if (index > last_index || !index) | |
ebf8aa44 | 1394 | break; |
9e85d811 | 1395 | offset = (index >> node->shift) & RADIX_TREE_MAP_MASK; |
070c5ac2 | 1396 | while (offset == 0) { |
ebf8aa44 JK |
1397 | /* |
1398 | * We've fully scanned this node. Go up. Because | |
1399 | * last_index is guaranteed to be in the tree, what | |
1400 | * we do below cannot wander astray. | |
1401 | */ | |
070c5ac2 | 1402 | node = node->parent; |
9e85d811 | 1403 | offset = (index >> node->shift) & RADIX_TREE_MAP_MASK; |
ebf8aa44 | 1404 | } |
070c5ac2 MW |
1405 | if (is_sibling_entry(node, node->slots[offset])) |
1406 | goto next; | |
1407 | if (tagged >= nr_to_tag) | |
1408 | break; | |
ebf8aa44 | 1409 | } |
9498d2bb | 1410 | |
ebf8aa44 JK |
1411 | *first_indexp = index; |
1412 | ||
1413 | return tagged; | |
1414 | } | |
1415 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | |
1416 | ||
1da177e4 LT |
1417 | /** |
1418 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | |
1419 | * @root: radix tree root | |
1420 | * @results: where the results of the lookup are placed | |
1421 | * @first_index: start the lookup from this key | |
1422 | * @max_items: place up to this many items at *results | |
1423 | * | |
1424 | * Performs an index-ascending scan of the tree for present items. Places | |
1425 | * them at *@results and returns the number of items which were placed at | |
1426 | * *@results. | |
1427 | * | |
1428 | * The implementation is naive. | |
7cf9c2c7 NP |
1429 | * |
1430 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | |
1431 | * rcu_read_lock. In this case, rather than the returned results being | |
2fcd9005 MW |
1432 | * an atomic snapshot of the tree at a single point in time, the |
1433 | * semantics of an RCU protected gang lookup are as though multiple | |
1434 | * radix_tree_lookups have been issued in individual locks, and results | |
1435 | * stored in 'results'. | |
1da177e4 LT |
1436 | */ |
1437 | unsigned int | |
1438 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
1439 | unsigned long first_index, unsigned int max_items) | |
1440 | { | |
cebbd29e KK |
1441 | struct radix_tree_iter iter; |
1442 | void **slot; | |
1443 | unsigned int ret = 0; | |
7cf9c2c7 | 1444 | |
cebbd29e | 1445 | if (unlikely(!max_items)) |
7cf9c2c7 | 1446 | return 0; |
1da177e4 | 1447 | |
cebbd29e | 1448 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
46437f9a | 1449 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1450 | if (!results[ret]) |
1451 | continue; | |
b194d16c | 1452 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1453 | slot = radix_tree_iter_retry(&iter); |
1454 | continue; | |
1455 | } | |
cebbd29e | 1456 | if (++ret == max_items) |
1da177e4 | 1457 | break; |
1da177e4 | 1458 | } |
7cf9c2c7 | 1459 | |
1da177e4 LT |
1460 | return ret; |
1461 | } | |
1462 | EXPORT_SYMBOL(radix_tree_gang_lookup); | |
1463 | ||
47feff2c NP |
1464 | /** |
1465 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | |
1466 | * @root: radix tree root | |
1467 | * @results: where the results of the lookup are placed | |
6328650b | 1468 | * @indices: where their indices should be placed (but usually NULL) |
47feff2c NP |
1469 | * @first_index: start the lookup from this key |
1470 | * @max_items: place up to this many items at *results | |
1471 | * | |
1472 | * Performs an index-ascending scan of the tree for present items. Places | |
1473 | * their slots at *@results and returns the number of items which were | |
1474 | * placed at *@results. | |
1475 | * | |
1476 | * The implementation is naive. | |
1477 | * | |
1478 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | |
1479 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | |
1480 | * protection, radix_tree_deref_slot may fail requiring a retry. | |
1481 | */ | |
1482 | unsigned int | |
6328650b HD |
1483 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
1484 | void ***results, unsigned long *indices, | |
47feff2c NP |
1485 | unsigned long first_index, unsigned int max_items) |
1486 | { | |
cebbd29e KK |
1487 | struct radix_tree_iter iter; |
1488 | void **slot; | |
1489 | unsigned int ret = 0; | |
47feff2c | 1490 | |
cebbd29e | 1491 | if (unlikely(!max_items)) |
47feff2c NP |
1492 | return 0; |
1493 | ||
cebbd29e KK |
1494 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
1495 | results[ret] = slot; | |
6328650b | 1496 | if (indices) |
cebbd29e KK |
1497 | indices[ret] = iter.index; |
1498 | if (++ret == max_items) | |
47feff2c | 1499 | break; |
47feff2c NP |
1500 | } |
1501 | ||
1502 | return ret; | |
1503 | } | |
1504 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | |
1505 | ||
1da177e4 LT |
1506 | /** |
1507 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | |
1508 | * based on a tag | |
1509 | * @root: radix tree root | |
1510 | * @results: where the results of the lookup are placed | |
1511 | * @first_index: start the lookup from this key | |
1512 | * @max_items: place up to this many items at *results | |
daff89f3 | 1513 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 LT |
1514 | * |
1515 | * Performs an index-ascending scan of the tree for present items which | |
1516 | * have the tag indexed by @tag set. Places the items at *@results and | |
1517 | * returns the number of items which were placed at *@results. | |
1518 | */ | |
1519 | unsigned int | |
1520 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
daff89f3 JC |
1521 | unsigned long first_index, unsigned int max_items, |
1522 | unsigned int tag) | |
1da177e4 | 1523 | { |
cebbd29e KK |
1524 | struct radix_tree_iter iter; |
1525 | void **slot; | |
1526 | unsigned int ret = 0; | |
612d6c19 | 1527 | |
cebbd29e | 1528 | if (unlikely(!max_items)) |
7cf9c2c7 NP |
1529 | return 0; |
1530 | ||
cebbd29e | 1531 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
46437f9a | 1532 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1533 | if (!results[ret]) |
1534 | continue; | |
b194d16c | 1535 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1536 | slot = radix_tree_iter_retry(&iter); |
1537 | continue; | |
1538 | } | |
cebbd29e | 1539 | if (++ret == max_items) |
1da177e4 | 1540 | break; |
1da177e4 | 1541 | } |
7cf9c2c7 | 1542 | |
1da177e4 LT |
1543 | return ret; |
1544 | } | |
1545 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | |
1546 | ||
47feff2c NP |
1547 | /** |
1548 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | |
1549 | * radix tree based on a tag | |
1550 | * @root: radix tree root | |
1551 | * @results: where the results of the lookup are placed | |
1552 | * @first_index: start the lookup from this key | |
1553 | * @max_items: place up to this many items at *results | |
1554 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1555 | * | |
1556 | * Performs an index-ascending scan of the tree for present items which | |
1557 | * have the tag indexed by @tag set. Places the slots at *@results and | |
1558 | * returns the number of slots which were placed at *@results. | |
1559 | */ | |
1560 | unsigned int | |
1561 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
1562 | unsigned long first_index, unsigned int max_items, | |
1563 | unsigned int tag) | |
1564 | { | |
cebbd29e KK |
1565 | struct radix_tree_iter iter; |
1566 | void **slot; | |
1567 | unsigned int ret = 0; | |
47feff2c | 1568 | |
cebbd29e | 1569 | if (unlikely(!max_items)) |
47feff2c NP |
1570 | return 0; |
1571 | ||
cebbd29e KK |
1572 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1573 | results[ret] = slot; | |
1574 | if (++ret == max_items) | |
47feff2c | 1575 | break; |
47feff2c NP |
1576 | } |
1577 | ||
1578 | return ret; | |
1579 | } | |
1580 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |
1581 | ||
e504f3fd HD |
1582 | #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP) |
1583 | #include <linux/sched.h> /* for cond_resched() */ | |
1584 | ||
0a2efc6c MW |
1585 | struct locate_info { |
1586 | unsigned long found_index; | |
1587 | bool stop; | |
1588 | }; | |
1589 | ||
e504f3fd HD |
1590 | /* |
1591 | * This linear search is at present only useful to shmem_unuse_inode(). | |
1592 | */ | |
1593 | static unsigned long __locate(struct radix_tree_node *slot, void *item, | |
0a2efc6c | 1594 | unsigned long index, struct locate_info *info) |
e504f3fd | 1595 | { |
e504f3fd HD |
1596 | unsigned long i; |
1597 | ||
0a2efc6c | 1598 | do { |
9e85d811 | 1599 | unsigned int shift = slot->shift; |
e504f3fd | 1600 | |
0a2efc6c MW |
1601 | for (i = (index >> shift) & RADIX_TREE_MAP_MASK; |
1602 | i < RADIX_TREE_MAP_SIZE; | |
1603 | i++, index += (1UL << shift)) { | |
1604 | struct radix_tree_node *node = | |
1605 | rcu_dereference_raw(slot->slots[i]); | |
1606 | if (node == RADIX_TREE_RETRY) | |
1607 | goto out; | |
b194d16c | 1608 | if (!radix_tree_is_internal_node(node)) { |
0a2efc6c MW |
1609 | if (node == item) { |
1610 | info->found_index = index; | |
1611 | info->stop = true; | |
1612 | goto out; | |
1613 | } | |
1614 | continue; | |
e6145236 | 1615 | } |
4dd6c098 | 1616 | node = entry_to_node(node); |
0a2efc6c MW |
1617 | if (is_sibling_entry(slot, node)) |
1618 | continue; | |
1619 | slot = node; | |
1620 | break; | |
e6145236 | 1621 | } |
9e85d811 | 1622 | } while (i < RADIX_TREE_MAP_SIZE); |
e504f3fd | 1623 | |
e504f3fd | 1624 | out: |
0a2efc6c MW |
1625 | if ((index == 0) && (i == RADIX_TREE_MAP_SIZE)) |
1626 | info->stop = true; | |
e504f3fd HD |
1627 | return index; |
1628 | } | |
1629 | ||
1630 | /** | |
1631 | * radix_tree_locate_item - search through radix tree for item | |
1632 | * @root: radix tree root | |
1633 | * @item: item to be found | |
1634 | * | |
1635 | * Returns index where item was found, or -1 if not found. | |
1636 | * Caller must hold no lock (since this time-consuming function needs | |
1637 | * to be preemptible), and must check afterwards if item is still there. | |
1638 | */ | |
1639 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |
1640 | { | |
1641 | struct radix_tree_node *node; | |
1642 | unsigned long max_index; | |
1643 | unsigned long cur_index = 0; | |
0a2efc6c MW |
1644 | struct locate_info info = { |
1645 | .found_index = -1, | |
1646 | .stop = false, | |
1647 | }; | |
e504f3fd HD |
1648 | |
1649 | do { | |
1650 | rcu_read_lock(); | |
1651 | node = rcu_dereference_raw(root->rnode); | |
b194d16c | 1652 | if (!radix_tree_is_internal_node(node)) { |
e504f3fd HD |
1653 | rcu_read_unlock(); |
1654 | if (node == item) | |
0a2efc6c | 1655 | info.found_index = 0; |
e504f3fd HD |
1656 | break; |
1657 | } | |
1658 | ||
4dd6c098 | 1659 | node = entry_to_node(node); |
0a2efc6c MW |
1660 | |
1661 | max_index = node_maxindex(node); | |
5f30fc94 HD |
1662 | if (cur_index > max_index) { |
1663 | rcu_read_unlock(); | |
e504f3fd | 1664 | break; |
5f30fc94 | 1665 | } |
e504f3fd | 1666 | |
0a2efc6c | 1667 | cur_index = __locate(node, item, cur_index, &info); |
e504f3fd HD |
1668 | rcu_read_unlock(); |
1669 | cond_resched(); | |
0a2efc6c | 1670 | } while (!info.stop && cur_index <= max_index); |
e504f3fd | 1671 | |
0a2efc6c | 1672 | return info.found_index; |
e504f3fd HD |
1673 | } |
1674 | #else | |
1675 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |
1676 | { | |
1677 | return -1; | |
1678 | } | |
1679 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ | |
47feff2c | 1680 | |
139e5616 JW |
1681 | /** |
1682 | * __radix_tree_delete_node - try to free node after clearing a slot | |
1683 | * @root: radix tree root | |
139e5616 JW |
1684 | * @node: node containing @index |
1685 | * | |
1686 | * After clearing the slot at @index in @node from radix tree | |
1687 | * rooted at @root, call this function to attempt freeing the | |
1688 | * node and shrinking the tree. | |
139e5616 | 1689 | */ |
14b46879 | 1690 | void __radix_tree_delete_node(struct radix_tree_root *root, |
139e5616 JW |
1691 | struct radix_tree_node *node) |
1692 | { | |
14b46879 | 1693 | delete_node(root, node, NULL, NULL); |
139e5616 JW |
1694 | } |
1695 | ||
57578c2e MW |
1696 | static inline void delete_sibling_entries(struct radix_tree_node *node, |
1697 | void *ptr, unsigned offset) | |
1698 | { | |
1699 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1700 | int i; | |
1701 | for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { | |
1702 | if (node->slots[offset + i] != ptr) | |
1703 | break; | |
1704 | node->slots[offset + i] = NULL; | |
1705 | node->count--; | |
1706 | } | |
1707 | #endif | |
1708 | } | |
1709 | ||
1da177e4 | 1710 | /** |
53c59f26 | 1711 | * radix_tree_delete_item - delete an item from a radix tree |
1da177e4 LT |
1712 | * @root: radix tree root |
1713 | * @index: index key | |
53c59f26 | 1714 | * @item: expected item |
1da177e4 | 1715 | * |
53c59f26 | 1716 | * Remove @item at @index from the radix tree rooted at @root. |
1da177e4 | 1717 | * |
53c59f26 JW |
1718 | * Returns the address of the deleted item, or NULL if it was not present |
1719 | * or the entry at the given @index was not @item. | |
1da177e4 | 1720 | */ |
53c59f26 JW |
1721 | void *radix_tree_delete_item(struct radix_tree_root *root, |
1722 | unsigned long index, void *item) | |
1da177e4 | 1723 | { |
139e5616 | 1724 | struct radix_tree_node *node; |
57578c2e | 1725 | unsigned int offset; |
139e5616 JW |
1726 | void **slot; |
1727 | void *entry; | |
d5274261 | 1728 | int tag; |
1da177e4 | 1729 | |
139e5616 JW |
1730 | entry = __radix_tree_lookup(root, index, &node, &slot); |
1731 | if (!entry) | |
1732 | return NULL; | |
1da177e4 | 1733 | |
139e5616 JW |
1734 | if (item && entry != item) |
1735 | return NULL; | |
1736 | ||
1737 | if (!node) { | |
612d6c19 NP |
1738 | root_tag_clear_all(root); |
1739 | root->rnode = NULL; | |
139e5616 | 1740 | return entry; |
612d6c19 | 1741 | } |
1da177e4 | 1742 | |
29e0967c | 1743 | offset = get_slot_offset(node, slot); |
53c59f26 | 1744 | |
d604c324 MW |
1745 | /* Clear all tags associated with the item to be deleted. */ |
1746 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1747 | node_tag_clear(root, node, tag, offset); | |
1da177e4 | 1748 | |
a4db4dce | 1749 | delete_sibling_entries(node, node_to_entry(slot), offset); |
4d693d08 | 1750 | __radix_tree_replace(root, node, slot, NULL, NULL, NULL); |
612d6c19 | 1751 | |
139e5616 | 1752 | return entry; |
1da177e4 | 1753 | } |
53c59f26 JW |
1754 | EXPORT_SYMBOL(radix_tree_delete_item); |
1755 | ||
1756 | /** | |
1757 | * radix_tree_delete - delete an item from a radix tree | |
1758 | * @root: radix tree root | |
1759 | * @index: index key | |
1760 | * | |
1761 | * Remove the item at @index from the radix tree rooted at @root. | |
1762 | * | |
1763 | * Returns the address of the deleted item, or NULL if it was not present. | |
1764 | */ | |
1765 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |
1766 | { | |
1767 | return radix_tree_delete_item(root, index, NULL); | |
1768 | } | |
1da177e4 LT |
1769 | EXPORT_SYMBOL(radix_tree_delete); |
1770 | ||
d3798ae8 JW |
1771 | void radix_tree_clear_tags(struct radix_tree_root *root, |
1772 | struct radix_tree_node *node, | |
1773 | void **slot) | |
d604c324 | 1774 | { |
d604c324 MW |
1775 | if (node) { |
1776 | unsigned int tag, offset = get_slot_offset(node, slot); | |
1777 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1778 | node_tag_clear(root, node, tag, offset); | |
1779 | } else { | |
1780 | /* Clear root node tags */ | |
1781 | root->gfp_mask &= __GFP_BITS_MASK; | |
1782 | } | |
d604c324 MW |
1783 | } |
1784 | ||
1da177e4 LT |
1785 | /** |
1786 | * radix_tree_tagged - test whether any items in the tree are tagged | |
1787 | * @root: radix tree root | |
1788 | * @tag: tag to test | |
1789 | */ | |
daff89f3 | 1790 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) |
1da177e4 | 1791 | { |
612d6c19 | 1792 | return root_tag_get(root, tag); |
1da177e4 LT |
1793 | } |
1794 | EXPORT_SYMBOL(radix_tree_tagged); | |
1795 | ||
1796 | static void | |
449dd698 | 1797 | radix_tree_node_ctor(void *arg) |
1da177e4 | 1798 | { |
449dd698 JW |
1799 | struct radix_tree_node *node = arg; |
1800 | ||
1801 | memset(node, 0, sizeof(*node)); | |
1802 | INIT_LIST_HEAD(&node->private_list); | |
1da177e4 LT |
1803 | } |
1804 | ||
c78c66d1 KS |
1805 | static __init unsigned long __maxindex(unsigned int height) |
1806 | { | |
1807 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; | |
1808 | int shift = RADIX_TREE_INDEX_BITS - width; | |
1809 | ||
1810 | if (shift < 0) | |
1811 | return ~0UL; | |
1812 | if (shift >= BITS_PER_LONG) | |
1813 | return 0UL; | |
1814 | return ~0UL >> shift; | |
1815 | } | |
1816 | ||
1817 | static __init void radix_tree_init_maxnodes(void) | |
1818 | { | |
1819 | unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; | |
1820 | unsigned int i, j; | |
1821 | ||
1822 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) | |
1823 | height_to_maxindex[i] = __maxindex(i); | |
1824 | for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { | |
1825 | for (j = i; j > 0; j--) | |
1826 | height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; | |
1827 | } | |
1828 | } | |
1829 | ||
d544abd5 | 1830 | static int radix_tree_cpu_dead(unsigned int cpu) |
1da177e4 | 1831 | { |
2fcd9005 MW |
1832 | struct radix_tree_preload *rtp; |
1833 | struct radix_tree_node *node; | |
1834 | ||
1835 | /* Free per-cpu pool of preloaded nodes */ | |
d544abd5 SAS |
1836 | rtp = &per_cpu(radix_tree_preloads, cpu); |
1837 | while (rtp->nr) { | |
1838 | node = rtp->nodes; | |
1839 | rtp->nodes = node->private_data; | |
1840 | kmem_cache_free(radix_tree_node_cachep, node); | |
1841 | rtp->nr--; | |
2fcd9005 | 1842 | } |
d544abd5 | 1843 | return 0; |
1da177e4 | 1844 | } |
1da177e4 LT |
1845 | |
1846 | void __init radix_tree_init(void) | |
1847 | { | |
d544abd5 | 1848 | int ret; |
1da177e4 LT |
1849 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
1850 | sizeof(struct radix_tree_node), 0, | |
488514d1 CL |
1851 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1852 | radix_tree_node_ctor); | |
c78c66d1 | 1853 | radix_tree_init_maxnodes(); |
d544abd5 SAS |
1854 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
1855 | NULL, radix_tree_cpu_dead); | |
1856 | WARN_ON(ret < 0); | |
1da177e4 | 1857 | } |