]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
cde53535 | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
7cf9c2c7 | 5 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
6b053b8e MW |
7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
8 | * Copyright (C) 2016 Intel, Ross Zwisler | |
1da177e4 LT |
9 | * |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2, or (at | |
13 | * your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
0a835c4f MW |
25 | #include <linux/bitmap.h> |
26 | #include <linux/bitops.h> | |
460488c5 | 27 | #include <linux/bug.h> |
e157b555 | 28 | #include <linux/cpu.h> |
1da177e4 | 29 | #include <linux/errno.h> |
0a835c4f MW |
30 | #include <linux/export.h> |
31 | #include <linux/idr.h> | |
1da177e4 LT |
32 | #include <linux/init.h> |
33 | #include <linux/kernel.h> | |
0a835c4f | 34 | #include <linux/kmemleak.h> |
1da177e4 | 35 | #include <linux/percpu.h> |
0a835c4f MW |
36 | #include <linux/preempt.h> /* in_interrupt() */ |
37 | #include <linux/radix-tree.h> | |
38 | #include <linux/rcupdate.h> | |
1da177e4 | 39 | #include <linux/slab.h> |
1da177e4 | 40 | #include <linux/string.h> |
02c02bf1 | 41 | #include <linux/xarray.h> |
1da177e4 LT |
42 | |
43 | ||
1da177e4 LT |
44 | /* |
45 | * Radix tree node cache. | |
46 | */ | |
58d6ea30 | 47 | struct kmem_cache *radix_tree_node_cachep; |
1da177e4 | 48 | |
55368052 NP |
49 | /* |
50 | * The radix tree is variable-height, so an insert operation not only has | |
51 | * to build the branch to its corresponding item, it also has to build the | |
52 | * branch to existing items if the size has to be increased (by | |
53 | * radix_tree_extend). | |
54 | * | |
55 | * The worst case is a zero height tree with just a single item at index 0, | |
56 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | |
57 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | |
58 | * Hence: | |
59 | */ | |
60 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | |
61 | ||
0a835c4f MW |
62 | /* |
63 | * The IDR does not have to be as high as the radix tree since it uses | |
64 | * signed integers, not unsigned longs. | |
65 | */ | |
66 | #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) | |
67 | #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ | |
68 | RADIX_TREE_MAP_SHIFT)) | |
69 | #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) | |
70 | ||
7ad3d4d8 MW |
71 | /* |
72 | * The IDA is even shorter since it uses a bitmap at the last level. | |
73 | */ | |
74 | #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) | |
75 | #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ | |
76 | RADIX_TREE_MAP_SHIFT)) | |
77 | #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) | |
78 | ||
1da177e4 LT |
79 | /* |
80 | * Per-cpu pool of preloaded nodes | |
81 | */ | |
82 | struct radix_tree_preload { | |
2fcd9005 | 83 | unsigned nr; |
1293d5c5 | 84 | /* nodes->parent points to next preallocated node */ |
9d2a8da0 | 85 | struct radix_tree_node *nodes; |
1da177e4 | 86 | }; |
8cef7d57 | 87 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
1da177e4 | 88 | |
148deab2 MW |
89 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
90 | { | |
91 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); | |
92 | } | |
93 | ||
a4db4dce | 94 | static inline void *node_to_entry(void *ptr) |
27d20fdd | 95 | { |
30ff46cc | 96 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
27d20fdd NP |
97 | } |
98 | ||
02c02bf1 | 99 | #define RADIX_TREE_RETRY XA_RETRY_ENTRY |
db050f29 | 100 | |
d7b62727 MW |
101 | static inline unsigned long |
102 | get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) | |
db050f29 | 103 | { |
76f070b4 | 104 | return parent ? slot - parent->slots : 0; |
db050f29 MW |
105 | } |
106 | ||
35534c86 | 107 | static unsigned int radix_tree_descend(const struct radix_tree_node *parent, |
9e85d811 | 108 | struct radix_tree_node **nodep, unsigned long index) |
db050f29 | 109 | { |
9e85d811 | 110 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
d7b62727 | 111 | void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); |
db050f29 | 112 | |
02c02bf1 MW |
113 | if (xa_is_sibling(entry)) { |
114 | offset = xa_to_sibling(entry); | |
115 | entry = rcu_dereference_raw(parent->slots[offset]); | |
db050f29 | 116 | } |
db050f29 MW |
117 | |
118 | *nodep = (void *)entry; | |
119 | return offset; | |
120 | } | |
121 | ||
35534c86 | 122 | static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) |
612d6c19 | 123 | { |
f8d5d0cc | 124 | return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); |
612d6c19 NP |
125 | } |
126 | ||
643b52b9 NP |
127 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
128 | int offset) | |
129 | { | |
130 | __set_bit(offset, node->tags[tag]); | |
131 | } | |
132 | ||
133 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | |
134 | int offset) | |
135 | { | |
136 | __clear_bit(offset, node->tags[tag]); | |
137 | } | |
138 | ||
35534c86 | 139 | static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, |
643b52b9 NP |
140 | int offset) |
141 | { | |
142 | return test_bit(offset, node->tags[tag]); | |
143 | } | |
144 | ||
35534c86 | 145 | static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) |
643b52b9 | 146 | { |
f8d5d0cc | 147 | root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); |
643b52b9 NP |
148 | } |
149 | ||
2fcd9005 | 150 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
643b52b9 | 151 | { |
f8d5d0cc | 152 | root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); |
643b52b9 NP |
153 | } |
154 | ||
155 | static inline void root_tag_clear_all(struct radix_tree_root *root) | |
156 | { | |
f8d5d0cc | 157 | root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); |
643b52b9 NP |
158 | } |
159 | ||
35534c86 | 160 | static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) |
643b52b9 | 161 | { |
f8d5d0cc | 162 | return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); |
643b52b9 NP |
163 | } |
164 | ||
35534c86 | 165 | static inline unsigned root_tags_get(const struct radix_tree_root *root) |
643b52b9 | 166 | { |
f8d5d0cc | 167 | return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; |
643b52b9 NP |
168 | } |
169 | ||
0a835c4f | 170 | static inline bool is_idr(const struct radix_tree_root *root) |
7b60e9ad | 171 | { |
f8d5d0cc | 172 | return !!(root->xa_flags & ROOT_IS_IDR); |
7b60e9ad MW |
173 | } |
174 | ||
643b52b9 NP |
175 | /* |
176 | * Returns 1 if any slot in the node has this tag set. | |
177 | * Otherwise returns 0. | |
178 | */ | |
35534c86 MW |
179 | static inline int any_tag_set(const struct radix_tree_node *node, |
180 | unsigned int tag) | |
643b52b9 | 181 | { |
2fcd9005 | 182 | unsigned idx; |
643b52b9 NP |
183 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
184 | if (node->tags[tag][idx]) | |
185 | return 1; | |
186 | } | |
187 | return 0; | |
188 | } | |
78c1d784 | 189 | |
0a835c4f MW |
190 | static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) |
191 | { | |
192 | bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); | |
193 | } | |
194 | ||
78c1d784 KK |
195 | /** |
196 | * radix_tree_find_next_bit - find the next set bit in a memory region | |
197 | * | |
198 | * @addr: The address to base the search on | |
199 | * @size: The bitmap size in bits | |
200 | * @offset: The bitnumber to start searching at | |
201 | * | |
202 | * Unrollable variant of find_next_bit() for constant size arrays. | |
203 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | |
204 | * Returns next bit offset, or size if nothing found. | |
205 | */ | |
206 | static __always_inline unsigned long | |
bc412fca MW |
207 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
208 | unsigned long offset) | |
78c1d784 | 209 | { |
bc412fca | 210 | const unsigned long *addr = node->tags[tag]; |
78c1d784 | 211 | |
bc412fca | 212 | if (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
213 | unsigned long tmp; |
214 | ||
215 | addr += offset / BITS_PER_LONG; | |
216 | tmp = *addr >> (offset % BITS_PER_LONG); | |
217 | if (tmp) | |
218 | return __ffs(tmp) + offset; | |
219 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | |
bc412fca | 220 | while (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
221 | tmp = *++addr; |
222 | if (tmp) | |
223 | return __ffs(tmp) + offset; | |
224 | offset += BITS_PER_LONG; | |
225 | } | |
226 | } | |
bc412fca | 227 | return RADIX_TREE_MAP_SIZE; |
78c1d784 KK |
228 | } |
229 | ||
268f42de MW |
230 | static unsigned int iter_offset(const struct radix_tree_iter *iter) |
231 | { | |
232 | return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; | |
233 | } | |
234 | ||
218ed750 MW |
235 | /* |
236 | * The maximum index which can be stored in a radix tree | |
237 | */ | |
238 | static inline unsigned long shift_maxindex(unsigned int shift) | |
239 | { | |
240 | return (RADIX_TREE_MAP_SIZE << shift) - 1; | |
241 | } | |
242 | ||
35534c86 | 243 | static inline unsigned long node_maxindex(const struct radix_tree_node *node) |
218ed750 MW |
244 | { |
245 | return shift_maxindex(node->shift); | |
246 | } | |
247 | ||
0a835c4f MW |
248 | static unsigned long next_index(unsigned long index, |
249 | const struct radix_tree_node *node, | |
250 | unsigned long offset) | |
251 | { | |
252 | return (index & ~node_maxindex(node)) + (offset << node->shift); | |
253 | } | |
254 | ||
1da177e4 LT |
255 | /* |
256 | * This assumes that the caller has performed appropriate preallocation, and | |
257 | * that the caller has pinned this thread of control to the current CPU. | |
258 | */ | |
259 | static struct radix_tree_node * | |
0a835c4f | 260 | radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, |
d58275bc | 261 | struct radix_tree_root *root, |
e8de4340 | 262 | unsigned int shift, unsigned int offset, |
01959dfe | 263 | unsigned int count, unsigned int nr_values) |
1da177e4 | 264 | { |
e2848a0e | 265 | struct radix_tree_node *ret = NULL; |
1da177e4 | 266 | |
5e4c0d97 | 267 | /* |
2fcd9005 MW |
268 | * Preload code isn't irq safe and it doesn't make sense to use |
269 | * preloading during an interrupt anyway as all the allocations have | |
270 | * to be atomic. So just do normal allocation when in interrupt. | |
5e4c0d97 | 271 | */ |
d0164adc | 272 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
1da177e4 LT |
273 | struct radix_tree_preload *rtp; |
274 | ||
58e698af VD |
275 | /* |
276 | * Even if the caller has preloaded, try to allocate from the | |
05eb6e72 VD |
277 | * cache first for the new node to get accounted to the memory |
278 | * cgroup. | |
58e698af VD |
279 | */ |
280 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
05eb6e72 | 281 | gfp_mask | __GFP_NOWARN); |
58e698af VD |
282 | if (ret) |
283 | goto out; | |
284 | ||
e2848a0e NP |
285 | /* |
286 | * Provided the caller has preloaded here, we will always | |
287 | * succeed in getting a node here (and never reach | |
288 | * kmem_cache_alloc) | |
289 | */ | |
7c8e0181 | 290 | rtp = this_cpu_ptr(&radix_tree_preloads); |
1da177e4 | 291 | if (rtp->nr) { |
9d2a8da0 | 292 | ret = rtp->nodes; |
1293d5c5 | 293 | rtp->nodes = ret->parent; |
1da177e4 LT |
294 | rtp->nr--; |
295 | } | |
ce80b067 CM |
296 | /* |
297 | * Update the allocation stack trace as this is more useful | |
298 | * for debugging. | |
299 | */ | |
300 | kmemleak_update_trace(ret); | |
58e698af | 301 | goto out; |
1da177e4 | 302 | } |
05eb6e72 | 303 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
58e698af | 304 | out: |
b194d16c | 305 | BUG_ON(radix_tree_is_internal_node(ret)); |
e8de4340 | 306 | if (ret) { |
e8de4340 MW |
307 | ret->shift = shift; |
308 | ret->offset = offset; | |
309 | ret->count = count; | |
01959dfe | 310 | ret->nr_values = nr_values; |
d58275bc | 311 | ret->parent = parent; |
01959dfe | 312 | ret->array = root; |
e8de4340 | 313 | } |
1da177e4 LT |
314 | return ret; |
315 | } | |
316 | ||
58d6ea30 | 317 | void radix_tree_node_rcu_free(struct rcu_head *head) |
7cf9c2c7 NP |
318 | { |
319 | struct radix_tree_node *node = | |
320 | container_of(head, struct radix_tree_node, rcu_head); | |
643b52b9 NP |
321 | |
322 | /* | |
175542f5 MW |
323 | * Must only free zeroed nodes into the slab. We can be left with |
324 | * non-NULL entries by radix_tree_free_nodes, so clear the entries | |
325 | * and tags here. | |
643b52b9 | 326 | */ |
175542f5 MW |
327 | memset(node->slots, 0, sizeof(node->slots)); |
328 | memset(node->tags, 0, sizeof(node->tags)); | |
91d9c05a | 329 | INIT_LIST_HEAD(&node->private_list); |
643b52b9 | 330 | |
7cf9c2c7 NP |
331 | kmem_cache_free(radix_tree_node_cachep, node); |
332 | } | |
333 | ||
1da177e4 LT |
334 | static inline void |
335 | radix_tree_node_free(struct radix_tree_node *node) | |
336 | { | |
7cf9c2c7 | 337 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
1da177e4 LT |
338 | } |
339 | ||
340 | /* | |
341 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
342 | * ensure that the addition of a single element in the tree cannot fail. On | |
343 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
344 | * with preemption not disabled. | |
b34df792 DH |
345 | * |
346 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 347 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
1da177e4 | 348 | */ |
bc9ae224 | 349 | static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
1da177e4 LT |
350 | { |
351 | struct radix_tree_preload *rtp; | |
352 | struct radix_tree_node *node; | |
353 | int ret = -ENOMEM; | |
354 | ||
05eb6e72 VD |
355 | /* |
356 | * Nodes preloaded by one cgroup can be be used by another cgroup, so | |
357 | * they should never be accounted to any particular memory cgroup. | |
358 | */ | |
359 | gfp_mask &= ~__GFP_ACCOUNT; | |
360 | ||
1da177e4 | 361 | preempt_disable(); |
7c8e0181 | 362 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 363 | while (rtp->nr < nr) { |
1da177e4 | 364 | preempt_enable(); |
488514d1 | 365 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
1da177e4 LT |
366 | if (node == NULL) |
367 | goto out; | |
368 | preempt_disable(); | |
7c8e0181 | 369 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 370 | if (rtp->nr < nr) { |
1293d5c5 | 371 | node->parent = rtp->nodes; |
9d2a8da0 KS |
372 | rtp->nodes = node; |
373 | rtp->nr++; | |
374 | } else { | |
1da177e4 | 375 | kmem_cache_free(radix_tree_node_cachep, node); |
9d2a8da0 | 376 | } |
1da177e4 LT |
377 | } |
378 | ret = 0; | |
379 | out: | |
380 | return ret; | |
381 | } | |
5e4c0d97 JK |
382 | |
383 | /* | |
384 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
385 | * ensure that the addition of a single element in the tree cannot fail. On | |
386 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
387 | * with preemption not disabled. | |
388 | * | |
389 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 390 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
5e4c0d97 JK |
391 | */ |
392 | int radix_tree_preload(gfp_t gfp_mask) | |
393 | { | |
394 | /* Warn on non-sensical use... */ | |
d0164adc | 395 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
c78c66d1 | 396 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 | 397 | } |
d7f0923d | 398 | EXPORT_SYMBOL(radix_tree_preload); |
1da177e4 | 399 | |
5e4c0d97 JK |
400 | /* |
401 | * The same as above function, except we don't guarantee preloading happens. | |
402 | * We do it, if we decide it helps. On success, return zero with preemption | |
403 | * disabled. On error, return -ENOMEM with preemption not disabled. | |
404 | */ | |
405 | int radix_tree_maybe_preload(gfp_t gfp_mask) | |
406 | { | |
d0164adc | 407 | if (gfpflags_allow_blocking(gfp_mask)) |
c78c66d1 | 408 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 JK |
409 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
410 | preempt_disable(); | |
411 | return 0; | |
412 | } | |
413 | EXPORT_SYMBOL(radix_tree_maybe_preload); | |
414 | ||
35534c86 | 415 | static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
1456a439 MW |
416 | struct radix_tree_node **nodep, unsigned long *maxindex) |
417 | { | |
f8d5d0cc | 418 | struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); |
1456a439 MW |
419 | |
420 | *nodep = node; | |
421 | ||
b194d16c | 422 | if (likely(radix_tree_is_internal_node(node))) { |
4dd6c098 | 423 | node = entry_to_node(node); |
1456a439 | 424 | *maxindex = node_maxindex(node); |
c12e51b0 | 425 | return node->shift + RADIX_TREE_MAP_SHIFT; |
1456a439 MW |
426 | } |
427 | ||
428 | *maxindex = 0; | |
429 | return 0; | |
430 | } | |
431 | ||
1da177e4 LT |
432 | /* |
433 | * Extend a radix tree so it can store key @index. | |
434 | */ | |
0a835c4f | 435 | static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, |
d0891265 | 436 | unsigned long index, unsigned int shift) |
1da177e4 | 437 | { |
d7b62727 | 438 | void *entry; |
d0891265 | 439 | unsigned int maxshift; |
1da177e4 LT |
440 | int tag; |
441 | ||
d0891265 MW |
442 | /* Figure out what the shift should be. */ |
443 | maxshift = shift; | |
444 | while (index > shift_maxindex(maxshift)) | |
445 | maxshift += RADIX_TREE_MAP_SHIFT; | |
1da177e4 | 446 | |
f8d5d0cc | 447 | entry = rcu_dereference_raw(root->xa_head); |
d7b62727 | 448 | if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) |
1da177e4 | 449 | goto out; |
1da177e4 | 450 | |
1da177e4 | 451 | do { |
0a835c4f | 452 | struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, |
d58275bc | 453 | root, shift, 0, 1, 0); |
2fcd9005 | 454 | if (!node) |
1da177e4 LT |
455 | return -ENOMEM; |
456 | ||
0a835c4f MW |
457 | if (is_idr(root)) { |
458 | all_tag_set(node, IDR_FREE); | |
459 | if (!root_tag_get(root, IDR_FREE)) { | |
460 | tag_clear(node, IDR_FREE, 0); | |
461 | root_tag_set(root, IDR_FREE); | |
462 | } | |
463 | } else { | |
464 | /* Propagate the aggregated tag info to the new child */ | |
465 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | |
466 | if (root_tag_get(root, tag)) | |
467 | tag_set(node, tag, 0); | |
468 | } | |
1da177e4 LT |
469 | } |
470 | ||
d0891265 | 471 | BUG_ON(shift > BITS_PER_LONG); |
d7b62727 MW |
472 | if (radix_tree_is_internal_node(entry)) { |
473 | entry_to_node(entry)->parent = node; | |
3159f943 | 474 | } else if (xa_is_value(entry)) { |
01959dfe MW |
475 | /* Moving a value entry root->xa_head to a node */ |
476 | node->nr_values = 1; | |
f7942430 | 477 | } |
d7b62727 MW |
478 | /* |
479 | * entry was already in the radix tree, so we do not need | |
480 | * rcu_assign_pointer here | |
481 | */ | |
482 | node->slots[0] = (void __rcu *)entry; | |
483 | entry = node_to_entry(node); | |
f8d5d0cc | 484 | rcu_assign_pointer(root->xa_head, entry); |
d0891265 | 485 | shift += RADIX_TREE_MAP_SHIFT; |
d0891265 | 486 | } while (shift <= maxshift); |
1da177e4 | 487 | out: |
d0891265 | 488 | return maxshift + RADIX_TREE_MAP_SHIFT; |
1da177e4 LT |
489 | } |
490 | ||
f4b109c6 JW |
491 | /** |
492 | * radix_tree_shrink - shrink radix tree to minimum height | |
493 | * @root radix tree root | |
494 | */ | |
1cf56f9d | 495 | static inline bool radix_tree_shrink(struct radix_tree_root *root) |
f4b109c6 | 496 | { |
0ac398ef MW |
497 | bool shrunk = false; |
498 | ||
f4b109c6 | 499 | for (;;) { |
f8d5d0cc | 500 | struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); |
f4b109c6 JW |
501 | struct radix_tree_node *child; |
502 | ||
503 | if (!radix_tree_is_internal_node(node)) | |
504 | break; | |
505 | node = entry_to_node(node); | |
506 | ||
507 | /* | |
508 | * The candidate node has more than one child, or its child | |
509 | * is not at the leftmost slot, or the child is a multiorder | |
510 | * entry, we cannot shrink. | |
511 | */ | |
512 | if (node->count != 1) | |
513 | break; | |
12320d0f | 514 | child = rcu_dereference_raw(node->slots[0]); |
f4b109c6 JW |
515 | if (!child) |
516 | break; | |
517 | if (!radix_tree_is_internal_node(child) && node->shift) | |
518 | break; | |
519 | ||
66ee620f MW |
520 | /* |
521 | * For an IDR, we must not shrink entry 0 into the root in | |
522 | * case somebody calls idr_replace() with a pointer that | |
523 | * appears to be an internal entry | |
524 | */ | |
525 | if (!node->shift && is_idr(root)) | |
526 | break; | |
527 | ||
f4b109c6 JW |
528 | if (radix_tree_is_internal_node(child)) |
529 | entry_to_node(child)->parent = NULL; | |
530 | ||
531 | /* | |
532 | * We don't need rcu_assign_pointer(), since we are simply | |
533 | * moving the node from one part of the tree to another: if it | |
534 | * was safe to dereference the old pointer to it | |
535 | * (node->slots[0]), it will be safe to dereference the new | |
f8d5d0cc | 536 | * one (root->xa_head) as far as dependent read barriers go. |
f4b109c6 | 537 | */ |
f8d5d0cc | 538 | root->xa_head = (void __rcu *)child; |
0a835c4f MW |
539 | if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) |
540 | root_tag_clear(root, IDR_FREE); | |
f4b109c6 JW |
541 | |
542 | /* | |
543 | * We have a dilemma here. The node's slot[0] must not be | |
544 | * NULLed in case there are concurrent lookups expecting to | |
545 | * find the item. However if this was a bottom-level node, | |
546 | * then it may be subject to the slot pointer being visible | |
547 | * to callers dereferencing it. If item corresponding to | |
548 | * slot[0] is subsequently deleted, these callers would expect | |
549 | * their slot to become empty sooner or later. | |
550 | * | |
551 | * For example, lockless pagecache will look up a slot, deref | |
552 | * the page pointer, and if the page has 0 refcount it means it | |
553 | * was concurrently deleted from pagecache so try the deref | |
554 | * again. Fortunately there is already a requirement for logic | |
555 | * to retry the entire slot lookup -- the indirect pointer | |
556 | * problem (replacing direct root node with an indirect pointer | |
557 | * also results in a stale slot). So tag the slot as indirect | |
558 | * to force callers to retry. | |
559 | */ | |
4d693d08 JW |
560 | node->count = 0; |
561 | if (!radix_tree_is_internal_node(child)) { | |
d7b62727 | 562 | node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; |
4d693d08 | 563 | } |
f4b109c6 | 564 | |
ea07b862 | 565 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
f4b109c6 | 566 | radix_tree_node_free(node); |
0ac398ef | 567 | shrunk = true; |
f4b109c6 | 568 | } |
0ac398ef MW |
569 | |
570 | return shrunk; | |
f4b109c6 JW |
571 | } |
572 | ||
0ac398ef | 573 | static bool delete_node(struct radix_tree_root *root, |
1cf56f9d | 574 | struct radix_tree_node *node) |
f4b109c6 | 575 | { |
0ac398ef MW |
576 | bool deleted = false; |
577 | ||
f4b109c6 JW |
578 | do { |
579 | struct radix_tree_node *parent; | |
580 | ||
581 | if (node->count) { | |
12320d0f | 582 | if (node_to_entry(node) == |
f8d5d0cc | 583 | rcu_dereference_raw(root->xa_head)) |
1cf56f9d | 584 | deleted |= radix_tree_shrink(root); |
0ac398ef | 585 | return deleted; |
f4b109c6 JW |
586 | } |
587 | ||
588 | parent = node->parent; | |
589 | if (parent) { | |
590 | parent->slots[node->offset] = NULL; | |
591 | parent->count--; | |
592 | } else { | |
0a835c4f MW |
593 | /* |
594 | * Shouldn't the tags already have all been cleared | |
595 | * by the caller? | |
596 | */ | |
597 | if (!is_idr(root)) | |
598 | root_tag_clear_all(root); | |
f8d5d0cc | 599 | root->xa_head = NULL; |
f4b109c6 JW |
600 | } |
601 | ||
ea07b862 | 602 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
f4b109c6 | 603 | radix_tree_node_free(node); |
0ac398ef | 604 | deleted = true; |
f4b109c6 JW |
605 | |
606 | node = parent; | |
607 | } while (node); | |
0ac398ef MW |
608 | |
609 | return deleted; | |
f4b109c6 JW |
610 | } |
611 | ||
1da177e4 | 612 | /** |
139e5616 | 613 | * __radix_tree_create - create a slot in a radix tree |
1da177e4 LT |
614 | * @root: radix tree root |
615 | * @index: index key | |
e6145236 | 616 | * @order: index occupies 2^order aligned slots |
139e5616 JW |
617 | * @nodep: returns node |
618 | * @slotp: returns slot | |
1da177e4 | 619 | * |
139e5616 JW |
620 | * Create, if necessary, and return the node and slot for an item |
621 | * at position @index in the radix tree @root. | |
622 | * | |
623 | * Until there is more than one item in the tree, no nodes are | |
f8d5d0cc | 624 | * allocated and @root->xa_head is used as a direct slot instead of |
139e5616 JW |
625 | * pointing to a node, in which case *@nodep will be NULL. |
626 | * | |
627 | * Returns -ENOMEM, or 0 for success. | |
1da177e4 | 628 | */ |
74d60958 MW |
629 | static int __radix_tree_create(struct radix_tree_root *root, |
630 | unsigned long index, unsigned order, | |
631 | struct radix_tree_node **nodep, void __rcu ***slotp) | |
1da177e4 | 632 | { |
89148aa4 | 633 | struct radix_tree_node *node = NULL, *child; |
f8d5d0cc | 634 | void __rcu **slot = (void __rcu **)&root->xa_head; |
49ea6ebc | 635 | unsigned long maxindex; |
89148aa4 | 636 | unsigned int shift, offset = 0; |
49ea6ebc | 637 | unsigned long max = index | ((1UL << order) - 1); |
0a835c4f | 638 | gfp_t gfp = root_gfp_mask(root); |
49ea6ebc | 639 | |
89148aa4 | 640 | shift = radix_tree_load_root(root, &child, &maxindex); |
1da177e4 LT |
641 | |
642 | /* Make sure the tree is high enough. */ | |
175542f5 MW |
643 | if (order > 0 && max == ((1UL << order) - 1)) |
644 | max++; | |
49ea6ebc | 645 | if (max > maxindex) { |
0a835c4f | 646 | int error = radix_tree_extend(root, gfp, max, shift); |
49ea6ebc | 647 | if (error < 0) |
1da177e4 | 648 | return error; |
49ea6ebc | 649 | shift = error; |
f8d5d0cc | 650 | child = rcu_dereference_raw(root->xa_head); |
1da177e4 LT |
651 | } |
652 | ||
e6145236 | 653 | while (shift > order) { |
c12e51b0 | 654 | shift -= RADIX_TREE_MAP_SHIFT; |
89148aa4 | 655 | if (child == NULL) { |
1da177e4 | 656 | /* Have to add a child node. */ |
d58275bc | 657 | child = radix_tree_node_alloc(gfp, node, root, shift, |
e8de4340 | 658 | offset, 0, 0); |
89148aa4 | 659 | if (!child) |
1da177e4 | 660 | return -ENOMEM; |
89148aa4 MW |
661 | rcu_assign_pointer(*slot, node_to_entry(child)); |
662 | if (node) | |
1da177e4 | 663 | node->count++; |
89148aa4 | 664 | } else if (!radix_tree_is_internal_node(child)) |
e6145236 | 665 | break; |
1da177e4 LT |
666 | |
667 | /* Go a level down */ | |
89148aa4 | 668 | node = entry_to_node(child); |
9e85d811 | 669 | offset = radix_tree_descend(node, &child, index); |
89148aa4 | 670 | slot = &node->slots[offset]; |
e6145236 MW |
671 | } |
672 | ||
175542f5 MW |
673 | if (nodep) |
674 | *nodep = node; | |
675 | if (slotp) | |
676 | *slotp = slot; | |
677 | return 0; | |
678 | } | |
679 | ||
175542f5 MW |
680 | /* |
681 | * Free any nodes below this node. The tree is presumed to not need | |
682 | * shrinking, and any user data in the tree is presumed to not need a | |
683 | * destructor called on it. If we need to add a destructor, we can | |
684 | * add that functionality later. Note that we may not clear tags or | |
685 | * slots from the tree as an RCU walker may still have a pointer into | |
686 | * this subtree. We could replace the entries with RADIX_TREE_RETRY, | |
687 | * but we'll still have to clear those in rcu_free. | |
688 | */ | |
689 | static void radix_tree_free_nodes(struct radix_tree_node *node) | |
690 | { | |
691 | unsigned offset = 0; | |
692 | struct radix_tree_node *child = entry_to_node(node); | |
693 | ||
694 | for (;;) { | |
12320d0f | 695 | void *entry = rcu_dereference_raw(child->slots[offset]); |
02c02bf1 | 696 | if (xa_is_node(entry) && child->shift) { |
175542f5 MW |
697 | child = entry_to_node(entry); |
698 | offset = 0; | |
699 | continue; | |
700 | } | |
701 | offset++; | |
702 | while (offset == RADIX_TREE_MAP_SIZE) { | |
703 | struct radix_tree_node *old = child; | |
704 | offset = child->offset + 1; | |
705 | child = child->parent; | |
dd040b6f | 706 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
175542f5 MW |
707 | radix_tree_node_free(old); |
708 | if (old == entry_to_node(node)) | |
709 | return; | |
710 | } | |
711 | } | |
712 | } | |
713 | ||
0a835c4f | 714 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
d7b62727 MW |
715 | static inline int insert_entries(struct radix_tree_node *node, |
716 | void __rcu **slot, void *item, unsigned order, bool replace) | |
175542f5 | 717 | { |
02c02bf1 | 718 | void *sibling; |
175542f5 MW |
719 | unsigned i, n, tag, offset, tags = 0; |
720 | ||
721 | if (node) { | |
e157b555 MW |
722 | if (order > node->shift) |
723 | n = 1 << (order - node->shift); | |
724 | else | |
725 | n = 1; | |
175542f5 MW |
726 | offset = get_slot_offset(node, slot); |
727 | } else { | |
728 | n = 1; | |
729 | offset = 0; | |
730 | } | |
731 | ||
732 | if (n > 1) { | |
e6145236 | 733 | offset = offset & ~(n - 1); |
89148aa4 | 734 | slot = &node->slots[offset]; |
175542f5 | 735 | } |
02c02bf1 | 736 | sibling = xa_mk_sibling(offset); |
175542f5 MW |
737 | |
738 | for (i = 0; i < n; i++) { | |
739 | if (slot[i]) { | |
740 | if (replace) { | |
741 | node->count--; | |
742 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
743 | if (tag_get(node, tag, offset + i)) | |
744 | tags |= 1 << tag; | |
745 | } else | |
e6145236 MW |
746 | return -EEXIST; |
747 | } | |
175542f5 | 748 | } |
e6145236 | 749 | |
175542f5 | 750 | for (i = 0; i < n; i++) { |
12320d0f | 751 | struct radix_tree_node *old = rcu_dereference_raw(slot[i]); |
175542f5 | 752 | if (i) { |
02c02bf1 | 753 | rcu_assign_pointer(slot[i], sibling); |
175542f5 MW |
754 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
755 | if (tags & (1 << tag)) | |
756 | tag_clear(node, tag, offset + i); | |
757 | } else { | |
758 | rcu_assign_pointer(slot[i], item); | |
759 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
760 | if (tags & (1 << tag)) | |
761 | tag_set(node, tag, offset); | |
e6145236 | 762 | } |
02c02bf1 | 763 | if (xa_is_node(old)) |
175542f5 | 764 | radix_tree_free_nodes(old); |
3159f943 | 765 | if (xa_is_value(old)) |
01959dfe | 766 | node->nr_values--; |
612d6c19 | 767 | } |
175542f5 MW |
768 | if (node) { |
769 | node->count += n; | |
3159f943 | 770 | if (xa_is_value(item)) |
01959dfe | 771 | node->nr_values += n; |
175542f5 MW |
772 | } |
773 | return n; | |
139e5616 | 774 | } |
175542f5 | 775 | #else |
d7b62727 MW |
776 | static inline int insert_entries(struct radix_tree_node *node, |
777 | void __rcu **slot, void *item, unsigned order, bool replace) | |
175542f5 MW |
778 | { |
779 | if (*slot) | |
780 | return -EEXIST; | |
781 | rcu_assign_pointer(*slot, item); | |
782 | if (node) { | |
783 | node->count++; | |
3159f943 | 784 | if (xa_is_value(item)) |
01959dfe | 785 | node->nr_values++; |
175542f5 MW |
786 | } |
787 | return 1; | |
788 | } | |
789 | #endif | |
139e5616 JW |
790 | |
791 | /** | |
e6145236 | 792 | * __radix_tree_insert - insert into a radix tree |
139e5616 JW |
793 | * @root: radix tree root |
794 | * @index: index key | |
e6145236 | 795 | * @order: key covers the 2^order indices around index |
139e5616 JW |
796 | * @item: item to insert |
797 | * | |
798 | * Insert an item into the radix tree at position @index. | |
799 | */ | |
e6145236 MW |
800 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
801 | unsigned order, void *item) | |
139e5616 JW |
802 | { |
803 | struct radix_tree_node *node; | |
d7b62727 | 804 | void __rcu **slot; |
139e5616 JW |
805 | int error; |
806 | ||
b194d16c | 807 | BUG_ON(radix_tree_is_internal_node(item)); |
139e5616 | 808 | |
e6145236 | 809 | error = __radix_tree_create(root, index, order, &node, &slot); |
139e5616 JW |
810 | if (error) |
811 | return error; | |
175542f5 MW |
812 | |
813 | error = insert_entries(node, slot, item, order, false); | |
814 | if (error < 0) | |
815 | return error; | |
201b6264 | 816 | |
612d6c19 | 817 | if (node) { |
7b60e9ad | 818 | unsigned offset = get_slot_offset(node, slot); |
7b60e9ad MW |
819 | BUG_ON(tag_get(node, 0, offset)); |
820 | BUG_ON(tag_get(node, 1, offset)); | |
821 | BUG_ON(tag_get(node, 2, offset)); | |
612d6c19 | 822 | } else { |
7b60e9ad | 823 | BUG_ON(root_tags_get(root)); |
612d6c19 | 824 | } |
1da177e4 | 825 | |
1da177e4 LT |
826 | return 0; |
827 | } | |
e6145236 | 828 | EXPORT_SYMBOL(__radix_tree_insert); |
1da177e4 | 829 | |
139e5616 JW |
830 | /** |
831 | * __radix_tree_lookup - lookup an item in a radix tree | |
832 | * @root: radix tree root | |
833 | * @index: index key | |
834 | * @nodep: returns node | |
835 | * @slotp: returns slot | |
836 | * | |
837 | * Lookup and return the item at position @index in the radix | |
838 | * tree @root. | |
839 | * | |
840 | * Until there is more than one item in the tree, no nodes are | |
f8d5d0cc | 841 | * allocated and @root->xa_head is used as a direct slot instead of |
139e5616 | 842 | * pointing to a node, in which case *@nodep will be NULL. |
7cf9c2c7 | 843 | */ |
35534c86 MW |
844 | void *__radix_tree_lookup(const struct radix_tree_root *root, |
845 | unsigned long index, struct radix_tree_node **nodep, | |
d7b62727 | 846 | void __rcu ***slotp) |
1da177e4 | 847 | { |
139e5616 | 848 | struct radix_tree_node *node, *parent; |
85829954 | 849 | unsigned long maxindex; |
d7b62727 | 850 | void __rcu **slot; |
612d6c19 | 851 | |
85829954 MW |
852 | restart: |
853 | parent = NULL; | |
f8d5d0cc | 854 | slot = (void __rcu **)&root->xa_head; |
9e85d811 | 855 | radix_tree_load_root(root, &node, &maxindex); |
85829954 | 856 | if (index > maxindex) |
1da177e4 LT |
857 | return NULL; |
858 | ||
b194d16c | 859 | while (radix_tree_is_internal_node(node)) { |
85829954 | 860 | unsigned offset; |
1da177e4 | 861 | |
85829954 MW |
862 | if (node == RADIX_TREE_RETRY) |
863 | goto restart; | |
4dd6c098 | 864 | parent = entry_to_node(node); |
9e85d811 | 865 | offset = radix_tree_descend(parent, &node, index); |
85829954 | 866 | slot = parent->slots + offset; |
66ee620f MW |
867 | if (parent->shift == 0) |
868 | break; | |
85829954 | 869 | } |
1da177e4 | 870 | |
139e5616 JW |
871 | if (nodep) |
872 | *nodep = parent; | |
873 | if (slotp) | |
874 | *slotp = slot; | |
875 | return node; | |
b72b71c6 HS |
876 | } |
877 | ||
878 | /** | |
879 | * radix_tree_lookup_slot - lookup a slot in a radix tree | |
880 | * @root: radix tree root | |
881 | * @index: index key | |
882 | * | |
883 | * Returns: the slot corresponding to the position @index in the | |
884 | * radix tree @root. This is useful for update-if-exists operations. | |
885 | * | |
886 | * This function can be called under rcu_read_lock iff the slot is not | |
887 | * modified by radix_tree_replace_slot, otherwise it must be called | |
888 | * exclusive from other writers. Any dereference of the slot must be done | |
889 | * using radix_tree_deref_slot. | |
890 | */ | |
d7b62727 | 891 | void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, |
35534c86 | 892 | unsigned long index) |
b72b71c6 | 893 | { |
d7b62727 | 894 | void __rcu **slot; |
139e5616 JW |
895 | |
896 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | |
897 | return NULL; | |
898 | return slot; | |
a4331366 | 899 | } |
a4331366 HR |
900 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
901 | ||
902 | /** | |
903 | * radix_tree_lookup - perform lookup operation on a radix tree | |
904 | * @root: radix tree root | |
905 | * @index: index key | |
906 | * | |
907 | * Lookup the item at the position @index in the radix tree @root. | |
7cf9c2c7 NP |
908 | * |
909 | * This function can be called under rcu_read_lock, however the caller | |
910 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free | |
911 | * them safely). No RCU barriers are required to access or modify the | |
912 | * returned item, however. | |
a4331366 | 913 | */ |
35534c86 | 914 | void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) |
a4331366 | 915 | { |
139e5616 | 916 | return __radix_tree_lookup(root, index, NULL, NULL); |
1da177e4 LT |
917 | } |
918 | EXPORT_SYMBOL(radix_tree_lookup); | |
919 | ||
0a835c4f | 920 | static inline void replace_sibling_entries(struct radix_tree_node *node, |
01959dfe | 921 | void __rcu **slot, int count, int values) |
a90eb3a2 | 922 | { |
a90eb3a2 | 923 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
02c02bf1 MW |
924 | unsigned offset = get_slot_offset(node, slot); |
925 | void *ptr = xa_mk_sibling(offset); | |
a90eb3a2 | 926 | |
02c02bf1 | 927 | while (++offset < RADIX_TREE_MAP_SIZE) { |
12320d0f | 928 | if (rcu_dereference_raw(node->slots[offset]) != ptr) |
a90eb3a2 | 929 | break; |
0a835c4f MW |
930 | if (count < 0) { |
931 | node->slots[offset] = NULL; | |
932 | node->count--; | |
933 | } | |
01959dfe | 934 | node->nr_values += values; |
a90eb3a2 MW |
935 | } |
936 | #endif | |
a90eb3a2 MW |
937 | } |
938 | ||
d7b62727 | 939 | static void replace_slot(void __rcu **slot, void *item, |
01959dfe | 940 | struct radix_tree_node *node, int count, int values) |
f7942430 | 941 | { |
01959dfe | 942 | if (node && (count || values)) { |
f4b109c6 | 943 | node->count += count; |
01959dfe MW |
944 | node->nr_values += values; |
945 | replace_sibling_entries(node, slot, count, values); | |
f4b109c6 | 946 | } |
f7942430 JW |
947 | |
948 | rcu_assign_pointer(*slot, item); | |
949 | } | |
950 | ||
0a835c4f MW |
951 | static bool node_tag_get(const struct radix_tree_root *root, |
952 | const struct radix_tree_node *node, | |
953 | unsigned int tag, unsigned int offset) | |
a90eb3a2 | 954 | { |
0a835c4f MW |
955 | if (node) |
956 | return tag_get(node, tag, offset); | |
957 | return root_tag_get(root, tag); | |
958 | } | |
a90eb3a2 | 959 | |
0a835c4f MW |
960 | /* |
961 | * IDR users want to be able to store NULL in the tree, so if the slot isn't | |
962 | * free, don't adjust the count, even if it's transitioning between NULL and | |
963 | * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still | |
964 | * have empty bits, but it only stores NULL in slots when they're being | |
965 | * deleted. | |
966 | */ | |
967 | static int calculate_count(struct radix_tree_root *root, | |
d7b62727 | 968 | struct radix_tree_node *node, void __rcu **slot, |
0a835c4f MW |
969 | void *item, void *old) |
970 | { | |
971 | if (is_idr(root)) { | |
972 | unsigned offset = get_slot_offset(node, slot); | |
973 | bool free = node_tag_get(root, node, IDR_FREE, offset); | |
974 | if (!free) | |
975 | return 0; | |
976 | if (!old) | |
977 | return 1; | |
a90eb3a2 | 978 | } |
0a835c4f | 979 | return !!item - !!old; |
a90eb3a2 MW |
980 | } |
981 | ||
6d75f366 JW |
982 | /** |
983 | * __radix_tree_replace - replace item in a slot | |
4d693d08 JW |
984 | * @root: radix tree root |
985 | * @node: pointer to tree node | |
986 | * @slot: pointer to slot in @node | |
987 | * @item: new item to store in the slot. | |
6d75f366 JW |
988 | * |
989 | * For use with __radix_tree_lookup(). Caller must hold tree write locked | |
990 | * across slot lookup and replacement. | |
991 | */ | |
992 | void __radix_tree_replace(struct radix_tree_root *root, | |
993 | struct radix_tree_node *node, | |
1cf56f9d | 994 | void __rcu **slot, void *item) |
6d75f366 | 995 | { |
0a835c4f | 996 | void *old = rcu_dereference_raw(*slot); |
01959dfe | 997 | int values = !!xa_is_value(item) - !!xa_is_value(old); |
0a835c4f MW |
998 | int count = calculate_count(root, node, slot, item, old); |
999 | ||
6d75f366 | 1000 | /* |
01959dfe | 1001 | * This function supports replacing value entries and |
f4b109c6 | 1002 | * deleting entries, but that needs accounting against the |
f8d5d0cc | 1003 | * node unless the slot is root->xa_head. |
6d75f366 | 1004 | */ |
f8d5d0cc | 1005 | WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && |
01959dfe MW |
1006 | (count || values)); |
1007 | replace_slot(slot, item, node, count, values); | |
f4b109c6 | 1008 | |
4d693d08 JW |
1009 | if (!node) |
1010 | return; | |
1011 | ||
1cf56f9d | 1012 | delete_node(root, node); |
6d75f366 JW |
1013 | } |
1014 | ||
1015 | /** | |
1016 | * radix_tree_replace_slot - replace item in a slot | |
1017 | * @root: radix tree root | |
1018 | * @slot: pointer to slot | |
1019 | * @item: new item to store in the slot. | |
1020 | * | |
7b8d046f | 1021 | * For use with radix_tree_lookup_slot() and |
6d75f366 JW |
1022 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked |
1023 | * across slot lookup and replacement. | |
1024 | * | |
1025 | * NOTE: This cannot be used to switch between non-entries (empty slots), | |
01959dfe | 1026 | * regular entries, and value entries, as that requires accounting |
f4b109c6 | 1027 | * inside the radix tree node. When switching from one type of entry or |
e157b555 MW |
1028 | * deleting, use __radix_tree_lookup() and __radix_tree_replace() or |
1029 | * radix_tree_iter_replace(). | |
6d75f366 JW |
1030 | */ |
1031 | void radix_tree_replace_slot(struct radix_tree_root *root, | |
d7b62727 | 1032 | void __rcu **slot, void *item) |
6d75f366 | 1033 | { |
1cf56f9d | 1034 | __radix_tree_replace(root, NULL, slot, item); |
6d75f366 | 1035 | } |
10257d71 | 1036 | EXPORT_SYMBOL(radix_tree_replace_slot); |
6d75f366 | 1037 | |
e157b555 MW |
1038 | /** |
1039 | * radix_tree_iter_replace - replace item in a slot | |
1040 | * @root: radix tree root | |
1041 | * @slot: pointer to slot | |
1042 | * @item: new item to store in the slot. | |
1043 | * | |
2956c664 MW |
1044 | * For use with radix_tree_for_each_slot(). |
1045 | * Caller must hold tree write locked. | |
e157b555 MW |
1046 | */ |
1047 | void radix_tree_iter_replace(struct radix_tree_root *root, | |
d7b62727 MW |
1048 | const struct radix_tree_iter *iter, |
1049 | void __rcu **slot, void *item) | |
e157b555 | 1050 | { |
1cf56f9d | 1051 | __radix_tree_replace(root, iter->node, slot, item); |
e157b555 MW |
1052 | } |
1053 | ||
30b888ba MW |
1054 | static void node_tag_set(struct radix_tree_root *root, |
1055 | struct radix_tree_node *node, | |
1056 | unsigned int tag, unsigned int offset) | |
1057 | { | |
1058 | while (node) { | |
1059 | if (tag_get(node, tag, offset)) | |
1060 | return; | |
1061 | tag_set(node, tag, offset); | |
1062 | offset = node->offset; | |
1063 | node = node->parent; | |
1064 | } | |
1065 | ||
1066 | if (!root_tag_get(root, tag)) | |
1067 | root_tag_set(root, tag); | |
1068 | } | |
1069 | ||
1da177e4 LT |
1070 | /** |
1071 | * radix_tree_tag_set - set a tag on a radix tree node | |
1072 | * @root: radix tree root | |
1073 | * @index: index key | |
2fcd9005 | 1074 | * @tag: tag index |
1da177e4 | 1075 | * |
daff89f3 JC |
1076 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
1077 | * corresponding to @index in the radix tree. From | |
1da177e4 LT |
1078 | * the root all the way down to the leaf node. |
1079 | * | |
2fcd9005 | 1080 | * Returns the address of the tagged item. Setting a tag on a not-present |
1da177e4 LT |
1081 | * item is a bug. |
1082 | */ | |
1083 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
daff89f3 | 1084 | unsigned long index, unsigned int tag) |
1da177e4 | 1085 | { |
fb969909 RZ |
1086 | struct radix_tree_node *node, *parent; |
1087 | unsigned long maxindex; | |
1da177e4 | 1088 | |
9e85d811 | 1089 | radix_tree_load_root(root, &node, &maxindex); |
fb969909 | 1090 | BUG_ON(index > maxindex); |
1da177e4 | 1091 | |
b194d16c | 1092 | while (radix_tree_is_internal_node(node)) { |
fb969909 | 1093 | unsigned offset; |
1da177e4 | 1094 | |
4dd6c098 | 1095 | parent = entry_to_node(node); |
9e85d811 | 1096 | offset = radix_tree_descend(parent, &node, index); |
fb969909 RZ |
1097 | BUG_ON(!node); |
1098 | ||
1099 | if (!tag_get(parent, tag, offset)) | |
1100 | tag_set(parent, tag, offset); | |
1da177e4 LT |
1101 | } |
1102 | ||
612d6c19 | 1103 | /* set the root's tag bit */ |
fb969909 | 1104 | if (!root_tag_get(root, tag)) |
612d6c19 NP |
1105 | root_tag_set(root, tag); |
1106 | ||
fb969909 | 1107 | return node; |
1da177e4 LT |
1108 | } |
1109 | EXPORT_SYMBOL(radix_tree_tag_set); | |
1110 | ||
d604c324 MW |
1111 | static void node_tag_clear(struct radix_tree_root *root, |
1112 | struct radix_tree_node *node, | |
1113 | unsigned int tag, unsigned int offset) | |
1114 | { | |
1115 | while (node) { | |
1116 | if (!tag_get(node, tag, offset)) | |
1117 | return; | |
1118 | tag_clear(node, tag, offset); | |
1119 | if (any_tag_set(node, tag)) | |
1120 | return; | |
1121 | ||
1122 | offset = node->offset; | |
1123 | node = node->parent; | |
1124 | } | |
1125 | ||
1126 | /* clear the root's tag bit */ | |
1127 | if (root_tag_get(root, tag)) | |
1128 | root_tag_clear(root, tag); | |
1129 | } | |
1130 | ||
1da177e4 LT |
1131 | /** |
1132 | * radix_tree_tag_clear - clear a tag on a radix tree node | |
1133 | * @root: radix tree root | |
1134 | * @index: index key | |
2fcd9005 | 1135 | * @tag: tag index |
1da177e4 | 1136 | * |
daff89f3 | 1137 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
2fcd9005 MW |
1138 | * corresponding to @index in the radix tree. If this causes |
1139 | * the leaf node to have no tags set then clear the tag in the | |
1da177e4 LT |
1140 | * next-to-leaf node, etc. |
1141 | * | |
1142 | * Returns the address of the tagged item on success, else NULL. ie: | |
1143 | * has the same return value and semantics as radix_tree_lookup(). | |
1144 | */ | |
1145 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
daff89f3 | 1146 | unsigned long index, unsigned int tag) |
1da177e4 | 1147 | { |
00f47b58 RZ |
1148 | struct radix_tree_node *node, *parent; |
1149 | unsigned long maxindex; | |
e2bdb933 | 1150 | int uninitialized_var(offset); |
1da177e4 | 1151 | |
9e85d811 | 1152 | radix_tree_load_root(root, &node, &maxindex); |
00f47b58 RZ |
1153 | if (index > maxindex) |
1154 | return NULL; | |
1da177e4 | 1155 | |
00f47b58 | 1156 | parent = NULL; |
1da177e4 | 1157 | |
b194d16c | 1158 | while (radix_tree_is_internal_node(node)) { |
4dd6c098 | 1159 | parent = entry_to_node(node); |
9e85d811 | 1160 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 LT |
1161 | } |
1162 | ||
d604c324 MW |
1163 | if (node) |
1164 | node_tag_clear(root, parent, tag, offset); | |
1da177e4 | 1165 | |
00f47b58 | 1166 | return node; |
1da177e4 LT |
1167 | } |
1168 | EXPORT_SYMBOL(radix_tree_tag_clear); | |
1169 | ||
30b888ba MW |
1170 | /** |
1171 | * radix_tree_iter_tag_clear - clear a tag on the current iterator entry | |
1172 | * @root: radix tree root | |
1173 | * @iter: iterator state | |
1174 | * @tag: tag to clear | |
1175 | */ | |
1176 | void radix_tree_iter_tag_clear(struct radix_tree_root *root, | |
1177 | const struct radix_tree_iter *iter, unsigned int tag) | |
1178 | { | |
1179 | node_tag_clear(root, iter->node, tag, iter_offset(iter)); | |
1180 | } | |
1181 | ||
1da177e4 | 1182 | /** |
32605a18 MT |
1183 | * radix_tree_tag_get - get a tag on a radix tree node |
1184 | * @root: radix tree root | |
1185 | * @index: index key | |
2fcd9005 | 1186 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 | 1187 | * |
32605a18 | 1188 | * Return values: |
1da177e4 | 1189 | * |
612d6c19 NP |
1190 | * 0: tag not present or not set |
1191 | * 1: tag set | |
ce82653d DH |
1192 | * |
1193 | * Note that the return value of this function may not be relied on, even if | |
1194 | * the RCU lock is held, unless tag modification and node deletion are excluded | |
1195 | * from concurrency. | |
1da177e4 | 1196 | */ |
35534c86 | 1197 | int radix_tree_tag_get(const struct radix_tree_root *root, |
daff89f3 | 1198 | unsigned long index, unsigned int tag) |
1da177e4 | 1199 | { |
4589ba6d RZ |
1200 | struct radix_tree_node *node, *parent; |
1201 | unsigned long maxindex; | |
1da177e4 | 1202 | |
612d6c19 NP |
1203 | if (!root_tag_get(root, tag)) |
1204 | return 0; | |
1205 | ||
9e85d811 | 1206 | radix_tree_load_root(root, &node, &maxindex); |
4589ba6d RZ |
1207 | if (index > maxindex) |
1208 | return 0; | |
7cf9c2c7 | 1209 | |
b194d16c | 1210 | while (radix_tree_is_internal_node(node)) { |
9e85d811 | 1211 | unsigned offset; |
1da177e4 | 1212 | |
4dd6c098 | 1213 | parent = entry_to_node(node); |
9e85d811 | 1214 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 | 1215 | |
4589ba6d | 1216 | if (!tag_get(parent, tag, offset)) |
3fa36acb | 1217 | return 0; |
4589ba6d RZ |
1218 | if (node == RADIX_TREE_RETRY) |
1219 | break; | |
1da177e4 | 1220 | } |
4589ba6d RZ |
1221 | |
1222 | return 1; | |
1da177e4 LT |
1223 | } |
1224 | EXPORT_SYMBOL(radix_tree_tag_get); | |
1da177e4 | 1225 | |
21ef5339 RZ |
1226 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
1227 | unsigned int shift) | |
1228 | { | |
1229 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1230 | iter->shift = shift; | |
1231 | #endif | |
1232 | } | |
1233 | ||
148deab2 MW |
1234 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
1235 | static void set_iter_tags(struct radix_tree_iter *iter, | |
1236 | struct radix_tree_node *node, unsigned offset, | |
1237 | unsigned tag) | |
1238 | { | |
1239 | unsigned tag_long = offset / BITS_PER_LONG; | |
1240 | unsigned tag_bit = offset % BITS_PER_LONG; | |
1241 | ||
0a835c4f MW |
1242 | if (!node) { |
1243 | iter->tags = 1; | |
1244 | return; | |
1245 | } | |
1246 | ||
148deab2 MW |
1247 | iter->tags = node->tags[tag][tag_long] >> tag_bit; |
1248 | ||
1249 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | |
1250 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | |
1251 | /* Pick tags from next element */ | |
1252 | if (tag_bit) | |
1253 | iter->tags |= node->tags[tag][tag_long + 1] << | |
1254 | (BITS_PER_LONG - tag_bit); | |
1255 | /* Clip chunk size, here only BITS_PER_LONG tags */ | |
1256 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
d7b62727 MW |
1261 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
1262 | void __rcu **slot, struct radix_tree_iter *iter) | |
148deab2 | 1263 | { |
148deab2 MW |
1264 | while (iter->index < iter->next_index) { |
1265 | *nodep = rcu_dereference_raw(*slot); | |
02c02bf1 | 1266 | if (*nodep && !xa_is_sibling(*nodep)) |
148deab2 MW |
1267 | return slot; |
1268 | slot++; | |
1269 | iter->index = __radix_tree_iter_add(iter, 1); | |
1270 | iter->tags >>= 1; | |
1271 | } | |
1272 | ||
1273 | *nodep = NULL; | |
1274 | return NULL; | |
1275 | } | |
1276 | ||
d7b62727 MW |
1277 | void __rcu **__radix_tree_next_slot(void __rcu **slot, |
1278 | struct radix_tree_iter *iter, unsigned flags) | |
148deab2 MW |
1279 | { |
1280 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; | |
9f418224 | 1281 | struct radix_tree_node *node; |
148deab2 MW |
1282 | |
1283 | slot = skip_siblings(&node, slot, iter); | |
1284 | ||
1285 | while (radix_tree_is_internal_node(node)) { | |
1286 | unsigned offset; | |
1287 | unsigned long next_index; | |
1288 | ||
1289 | if (node == RADIX_TREE_RETRY) | |
1290 | return slot; | |
1291 | node = entry_to_node(node); | |
268f42de | 1292 | iter->node = node; |
148deab2 MW |
1293 | iter->shift = node->shift; |
1294 | ||
1295 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
1296 | offset = radix_tree_find_next_bit(node, tag, 0); | |
1297 | if (offset == RADIX_TREE_MAP_SIZE) | |
1298 | return NULL; | |
1299 | slot = &node->slots[offset]; | |
1300 | iter->index = __radix_tree_iter_add(iter, offset); | |
1301 | set_iter_tags(iter, node, offset, tag); | |
1302 | node = rcu_dereference_raw(*slot); | |
1303 | } else { | |
1304 | offset = 0; | |
1305 | slot = &node->slots[0]; | |
1306 | for (;;) { | |
1307 | node = rcu_dereference_raw(*slot); | |
1308 | if (node) | |
1309 | break; | |
1310 | slot++; | |
1311 | offset++; | |
1312 | if (offset == RADIX_TREE_MAP_SIZE) | |
1313 | return NULL; | |
1314 | } | |
1315 | iter->index = __radix_tree_iter_add(iter, offset); | |
1316 | } | |
1317 | if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) | |
1318 | goto none; | |
1319 | next_index = (iter->index | shift_maxindex(iter->shift)) + 1; | |
1320 | if (next_index < iter->next_index) | |
1321 | iter->next_index = next_index; | |
1322 | } | |
1323 | ||
1324 | return slot; | |
1325 | none: | |
1326 | iter->next_index = 0; | |
1327 | return NULL; | |
1328 | } | |
1329 | EXPORT_SYMBOL(__radix_tree_next_slot); | |
1330 | #else | |
d7b62727 MW |
1331 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
1332 | void __rcu **slot, struct radix_tree_iter *iter) | |
148deab2 MW |
1333 | { |
1334 | return slot; | |
1335 | } | |
1336 | #endif | |
1337 | ||
d7b62727 MW |
1338 | void __rcu **radix_tree_iter_resume(void __rcu **slot, |
1339 | struct radix_tree_iter *iter) | |
148deab2 MW |
1340 | { |
1341 | struct radix_tree_node *node; | |
1342 | ||
1343 | slot++; | |
1344 | iter->index = __radix_tree_iter_add(iter, 1); | |
148deab2 MW |
1345 | skip_siblings(&node, slot, iter); |
1346 | iter->next_index = iter->index; | |
1347 | iter->tags = 0; | |
1348 | return NULL; | |
1349 | } | |
1350 | EXPORT_SYMBOL(radix_tree_iter_resume); | |
1351 | ||
78c1d784 KK |
1352 | /** |
1353 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
1354 | * | |
1355 | * @root: radix tree root | |
1356 | * @iter: iterator state | |
1357 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
1358 | * Returns: pointer to chunk first slot, or NULL if iteration is over | |
1359 | */ | |
d7b62727 | 1360 | void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, |
78c1d784 KK |
1361 | struct radix_tree_iter *iter, unsigned flags) |
1362 | { | |
9e85d811 | 1363 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
8c1244de | 1364 | struct radix_tree_node *node, *child; |
21ef5339 | 1365 | unsigned long index, offset, maxindex; |
78c1d784 KK |
1366 | |
1367 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | |
1368 | return NULL; | |
1369 | ||
1370 | /* | |
1371 | * Catch next_index overflow after ~0UL. iter->index never overflows | |
1372 | * during iterating; it can be zero only at the beginning. | |
1373 | * And we cannot overflow iter->next_index in a single step, | |
1374 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | |
fffaee36 KK |
1375 | * |
1376 | * This condition also used by radix_tree_next_slot() to stop | |
91b9677c | 1377 | * contiguous iterating, and forbid switching to the next chunk. |
78c1d784 KK |
1378 | */ |
1379 | index = iter->next_index; | |
1380 | if (!index && iter->index) | |
1381 | return NULL; | |
1382 | ||
21ef5339 | 1383 | restart: |
9e85d811 | 1384 | radix_tree_load_root(root, &child, &maxindex); |
21ef5339 RZ |
1385 | if (index > maxindex) |
1386 | return NULL; | |
8c1244de MW |
1387 | if (!child) |
1388 | return NULL; | |
21ef5339 | 1389 | |
8c1244de | 1390 | if (!radix_tree_is_internal_node(child)) { |
78c1d784 | 1391 | /* Single-slot tree */ |
21ef5339 RZ |
1392 | iter->index = index; |
1393 | iter->next_index = maxindex + 1; | |
78c1d784 | 1394 | iter->tags = 1; |
268f42de | 1395 | iter->node = NULL; |
8c1244de | 1396 | __set_iter_shift(iter, 0); |
f8d5d0cc | 1397 | return (void __rcu **)&root->xa_head; |
8c1244de | 1398 | } |
21ef5339 | 1399 | |
8c1244de MW |
1400 | do { |
1401 | node = entry_to_node(child); | |
9e85d811 | 1402 | offset = radix_tree_descend(node, &child, index); |
21ef5339 | 1403 | |
78c1d784 | 1404 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
8c1244de | 1405 | !tag_get(node, tag, offset) : !child) { |
78c1d784 KK |
1406 | /* Hole detected */ |
1407 | if (flags & RADIX_TREE_ITER_CONTIG) | |
1408 | return NULL; | |
1409 | ||
1410 | if (flags & RADIX_TREE_ITER_TAGGED) | |
bc412fca | 1411 | offset = radix_tree_find_next_bit(node, tag, |
78c1d784 KK |
1412 | offset + 1); |
1413 | else | |
1414 | while (++offset < RADIX_TREE_MAP_SIZE) { | |
12320d0f MW |
1415 | void *slot = rcu_dereference_raw( |
1416 | node->slots[offset]); | |
02c02bf1 | 1417 | if (xa_is_sibling(slot)) |
21ef5339 RZ |
1418 | continue; |
1419 | if (slot) | |
78c1d784 KK |
1420 | break; |
1421 | } | |
8c1244de | 1422 | index &= ~node_maxindex(node); |
9e85d811 | 1423 | index += offset << node->shift; |
78c1d784 KK |
1424 | /* Overflow after ~0UL */ |
1425 | if (!index) | |
1426 | return NULL; | |
1427 | if (offset == RADIX_TREE_MAP_SIZE) | |
1428 | goto restart; | |
8c1244de | 1429 | child = rcu_dereference_raw(node->slots[offset]); |
78c1d784 KK |
1430 | } |
1431 | ||
e157b555 | 1432 | if (!child) |
78c1d784 | 1433 | goto restart; |
e157b555 MW |
1434 | if (child == RADIX_TREE_RETRY) |
1435 | break; | |
66ee620f | 1436 | } while (node->shift && radix_tree_is_internal_node(child)); |
78c1d784 KK |
1437 | |
1438 | /* Update the iterator state */ | |
8c1244de MW |
1439 | iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); |
1440 | iter->next_index = (index | node_maxindex(node)) + 1; | |
268f42de | 1441 | iter->node = node; |
9e85d811 | 1442 | __set_iter_shift(iter, node->shift); |
78c1d784 | 1443 | |
148deab2 MW |
1444 | if (flags & RADIX_TREE_ITER_TAGGED) |
1445 | set_iter_tags(iter, node, offset, tag); | |
78c1d784 KK |
1446 | |
1447 | return node->slots + offset; | |
1448 | } | |
1449 | EXPORT_SYMBOL(radix_tree_next_chunk); | |
1450 | ||
1da177e4 LT |
1451 | /** |
1452 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | |
1453 | * @root: radix tree root | |
1454 | * @results: where the results of the lookup are placed | |
1455 | * @first_index: start the lookup from this key | |
1456 | * @max_items: place up to this many items at *results | |
1457 | * | |
1458 | * Performs an index-ascending scan of the tree for present items. Places | |
1459 | * them at *@results and returns the number of items which were placed at | |
1460 | * *@results. | |
1461 | * | |
1462 | * The implementation is naive. | |
7cf9c2c7 NP |
1463 | * |
1464 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | |
1465 | * rcu_read_lock. In this case, rather than the returned results being | |
2fcd9005 MW |
1466 | * an atomic snapshot of the tree at a single point in time, the |
1467 | * semantics of an RCU protected gang lookup are as though multiple | |
1468 | * radix_tree_lookups have been issued in individual locks, and results | |
1469 | * stored in 'results'. | |
1da177e4 LT |
1470 | */ |
1471 | unsigned int | |
35534c86 | 1472 | radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, |
1da177e4 LT |
1473 | unsigned long first_index, unsigned int max_items) |
1474 | { | |
cebbd29e | 1475 | struct radix_tree_iter iter; |
d7b62727 | 1476 | void __rcu **slot; |
cebbd29e | 1477 | unsigned int ret = 0; |
7cf9c2c7 | 1478 | |
cebbd29e | 1479 | if (unlikely(!max_items)) |
7cf9c2c7 | 1480 | return 0; |
1da177e4 | 1481 | |
cebbd29e | 1482 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
46437f9a | 1483 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1484 | if (!results[ret]) |
1485 | continue; | |
b194d16c | 1486 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1487 | slot = radix_tree_iter_retry(&iter); |
1488 | continue; | |
1489 | } | |
cebbd29e | 1490 | if (++ret == max_items) |
1da177e4 | 1491 | break; |
1da177e4 | 1492 | } |
7cf9c2c7 | 1493 | |
1da177e4 LT |
1494 | return ret; |
1495 | } | |
1496 | EXPORT_SYMBOL(radix_tree_gang_lookup); | |
1497 | ||
1da177e4 LT |
1498 | /** |
1499 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | |
1500 | * based on a tag | |
1501 | * @root: radix tree root | |
1502 | * @results: where the results of the lookup are placed | |
1503 | * @first_index: start the lookup from this key | |
1504 | * @max_items: place up to this many items at *results | |
daff89f3 | 1505 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 LT |
1506 | * |
1507 | * Performs an index-ascending scan of the tree for present items which | |
1508 | * have the tag indexed by @tag set. Places the items at *@results and | |
1509 | * returns the number of items which were placed at *@results. | |
1510 | */ | |
1511 | unsigned int | |
35534c86 | 1512 | radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, |
daff89f3 JC |
1513 | unsigned long first_index, unsigned int max_items, |
1514 | unsigned int tag) | |
1da177e4 | 1515 | { |
cebbd29e | 1516 | struct radix_tree_iter iter; |
d7b62727 | 1517 | void __rcu **slot; |
cebbd29e | 1518 | unsigned int ret = 0; |
612d6c19 | 1519 | |
cebbd29e | 1520 | if (unlikely(!max_items)) |
7cf9c2c7 NP |
1521 | return 0; |
1522 | ||
cebbd29e | 1523 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
46437f9a | 1524 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1525 | if (!results[ret]) |
1526 | continue; | |
b194d16c | 1527 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1528 | slot = radix_tree_iter_retry(&iter); |
1529 | continue; | |
1530 | } | |
cebbd29e | 1531 | if (++ret == max_items) |
1da177e4 | 1532 | break; |
1da177e4 | 1533 | } |
7cf9c2c7 | 1534 | |
1da177e4 LT |
1535 | return ret; |
1536 | } | |
1537 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | |
1538 | ||
47feff2c NP |
1539 | /** |
1540 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | |
1541 | * radix tree based on a tag | |
1542 | * @root: radix tree root | |
1543 | * @results: where the results of the lookup are placed | |
1544 | * @first_index: start the lookup from this key | |
1545 | * @max_items: place up to this many items at *results | |
1546 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1547 | * | |
1548 | * Performs an index-ascending scan of the tree for present items which | |
1549 | * have the tag indexed by @tag set. Places the slots at *@results and | |
1550 | * returns the number of slots which were placed at *@results. | |
1551 | */ | |
1552 | unsigned int | |
35534c86 | 1553 | radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, |
d7b62727 | 1554 | void __rcu ***results, unsigned long first_index, |
35534c86 | 1555 | unsigned int max_items, unsigned int tag) |
47feff2c | 1556 | { |
cebbd29e | 1557 | struct radix_tree_iter iter; |
d7b62727 | 1558 | void __rcu **slot; |
cebbd29e | 1559 | unsigned int ret = 0; |
47feff2c | 1560 | |
cebbd29e | 1561 | if (unlikely(!max_items)) |
47feff2c NP |
1562 | return 0; |
1563 | ||
cebbd29e KK |
1564 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1565 | results[ret] = slot; | |
1566 | if (++ret == max_items) | |
47feff2c | 1567 | break; |
47feff2c NP |
1568 | } |
1569 | ||
1570 | return ret; | |
1571 | } | |
1572 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |
1573 | ||
0ac398ef | 1574 | static bool __radix_tree_delete(struct radix_tree_root *root, |
d7b62727 | 1575 | struct radix_tree_node *node, void __rcu **slot) |
0ac398ef | 1576 | { |
0a835c4f | 1577 | void *old = rcu_dereference_raw(*slot); |
01959dfe | 1578 | int values = xa_is_value(old) ? -1 : 0; |
0ac398ef MW |
1579 | unsigned offset = get_slot_offset(node, slot); |
1580 | int tag; | |
1581 | ||
0a835c4f MW |
1582 | if (is_idr(root)) |
1583 | node_tag_set(root, node, IDR_FREE, offset); | |
1584 | else | |
1585 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1586 | node_tag_clear(root, node, tag, offset); | |
0ac398ef | 1587 | |
01959dfe | 1588 | replace_slot(slot, NULL, node, -1, values); |
1cf56f9d | 1589 | return node && delete_node(root, node); |
0ac398ef MW |
1590 | } |
1591 | ||
1da177e4 | 1592 | /** |
0ac398ef MW |
1593 | * radix_tree_iter_delete - delete the entry at this iterator position |
1594 | * @root: radix tree root | |
1595 | * @iter: iterator state | |
1596 | * @slot: pointer to slot | |
1da177e4 | 1597 | * |
0ac398ef MW |
1598 | * Delete the entry at the position currently pointed to by the iterator. |
1599 | * This may result in the current node being freed; if it is, the iterator | |
1600 | * is advanced so that it will not reference the freed memory. This | |
1601 | * function may be called without any locking if there are no other threads | |
1602 | * which can access this tree. | |
1603 | */ | |
1604 | void radix_tree_iter_delete(struct radix_tree_root *root, | |
d7b62727 | 1605 | struct radix_tree_iter *iter, void __rcu **slot) |
0ac398ef MW |
1606 | { |
1607 | if (__radix_tree_delete(root, iter->node, slot)) | |
1608 | iter->index = iter->next_index; | |
1609 | } | |
d1b48c1e | 1610 | EXPORT_SYMBOL(radix_tree_iter_delete); |
0ac398ef MW |
1611 | |
1612 | /** | |
1613 | * radix_tree_delete_item - delete an item from a radix tree | |
1614 | * @root: radix tree root | |
1615 | * @index: index key | |
1616 | * @item: expected item | |
1da177e4 | 1617 | * |
0ac398ef | 1618 | * Remove @item at @index from the radix tree rooted at @root. |
1da177e4 | 1619 | * |
0ac398ef MW |
1620 | * Return: the deleted entry, or %NULL if it was not present |
1621 | * or the entry at the given @index was not @item. | |
1da177e4 | 1622 | */ |
53c59f26 JW |
1623 | void *radix_tree_delete_item(struct radix_tree_root *root, |
1624 | unsigned long index, void *item) | |
1da177e4 | 1625 | { |
0a835c4f | 1626 | struct radix_tree_node *node = NULL; |
7a4deea1 | 1627 | void __rcu **slot = NULL; |
139e5616 | 1628 | void *entry; |
1da177e4 | 1629 | |
139e5616 | 1630 | entry = __radix_tree_lookup(root, index, &node, &slot); |
7a4deea1 MW |
1631 | if (!slot) |
1632 | return NULL; | |
0a835c4f MW |
1633 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
1634 | get_slot_offset(node, slot)))) | |
139e5616 | 1635 | return NULL; |
1da177e4 | 1636 | |
139e5616 JW |
1637 | if (item && entry != item) |
1638 | return NULL; | |
1639 | ||
0ac398ef | 1640 | __radix_tree_delete(root, node, slot); |
612d6c19 | 1641 | |
139e5616 | 1642 | return entry; |
1da177e4 | 1643 | } |
53c59f26 JW |
1644 | EXPORT_SYMBOL(radix_tree_delete_item); |
1645 | ||
1646 | /** | |
0ac398ef MW |
1647 | * radix_tree_delete - delete an entry from a radix tree |
1648 | * @root: radix tree root | |
1649 | * @index: index key | |
53c59f26 | 1650 | * |
0ac398ef | 1651 | * Remove the entry at @index from the radix tree rooted at @root. |
53c59f26 | 1652 | * |
0ac398ef | 1653 | * Return: The deleted entry, or %NULL if it was not present. |
53c59f26 JW |
1654 | */ |
1655 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |
1656 | { | |
1657 | return radix_tree_delete_item(root, index, NULL); | |
1658 | } | |
1da177e4 LT |
1659 | EXPORT_SYMBOL(radix_tree_delete); |
1660 | ||
1661 | /** | |
1662 | * radix_tree_tagged - test whether any items in the tree are tagged | |
1663 | * @root: radix tree root | |
1664 | * @tag: tag to test | |
1665 | */ | |
35534c86 | 1666 | int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) |
1da177e4 | 1667 | { |
612d6c19 | 1668 | return root_tag_get(root, tag); |
1da177e4 LT |
1669 | } |
1670 | EXPORT_SYMBOL(radix_tree_tagged); | |
1671 | ||
0a835c4f MW |
1672 | /** |
1673 | * idr_preload - preload for idr_alloc() | |
1674 | * @gfp_mask: allocation mask to use for preloading | |
1675 | * | |
1676 | * Preallocate memory to use for the next call to idr_alloc(). This function | |
1677 | * returns with preemption disabled. It will be enabled by idr_preload_end(). | |
1678 | */ | |
1679 | void idr_preload(gfp_t gfp_mask) | |
1680 | { | |
bc9ae224 ED |
1681 | if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) |
1682 | preempt_disable(); | |
0a835c4f MW |
1683 | } |
1684 | EXPORT_SYMBOL(idr_preload); | |
1685 | ||
460488c5 | 1686 | void __rcu **idr_get_free(struct radix_tree_root *root, |
388f79fd CM |
1687 | struct radix_tree_iter *iter, gfp_t gfp, |
1688 | unsigned long max) | |
0a835c4f MW |
1689 | { |
1690 | struct radix_tree_node *node = NULL, *child; | |
f8d5d0cc | 1691 | void __rcu **slot = (void __rcu **)&root->xa_head; |
0a835c4f | 1692 | unsigned long maxindex, start = iter->next_index; |
0a835c4f MW |
1693 | unsigned int shift, offset = 0; |
1694 | ||
1695 | grow: | |
1696 | shift = radix_tree_load_root(root, &child, &maxindex); | |
1697 | if (!radix_tree_tagged(root, IDR_FREE)) | |
1698 | start = max(start, maxindex + 1); | |
1699 | if (start > max) | |
1700 | return ERR_PTR(-ENOSPC); | |
1701 | ||
1702 | if (start > maxindex) { | |
1703 | int error = radix_tree_extend(root, gfp, start, shift); | |
1704 | if (error < 0) | |
1705 | return ERR_PTR(error); | |
1706 | shift = error; | |
f8d5d0cc | 1707 | child = rcu_dereference_raw(root->xa_head); |
0a835c4f | 1708 | } |
66ee620f MW |
1709 | if (start == 0 && shift == 0) |
1710 | shift = RADIX_TREE_MAP_SHIFT; | |
0a835c4f MW |
1711 | |
1712 | while (shift) { | |
1713 | shift -= RADIX_TREE_MAP_SHIFT; | |
1714 | if (child == NULL) { | |
1715 | /* Have to add a child node. */ | |
d58275bc MW |
1716 | child = radix_tree_node_alloc(gfp, node, root, shift, |
1717 | offset, 0, 0); | |
0a835c4f MW |
1718 | if (!child) |
1719 | return ERR_PTR(-ENOMEM); | |
1720 | all_tag_set(child, IDR_FREE); | |
1721 | rcu_assign_pointer(*slot, node_to_entry(child)); | |
1722 | if (node) | |
1723 | node->count++; | |
1724 | } else if (!radix_tree_is_internal_node(child)) | |
1725 | break; | |
1726 | ||
1727 | node = entry_to_node(child); | |
1728 | offset = radix_tree_descend(node, &child, start); | |
1729 | if (!tag_get(node, IDR_FREE, offset)) { | |
1730 | offset = radix_tree_find_next_bit(node, IDR_FREE, | |
1731 | offset + 1); | |
1732 | start = next_index(start, node, offset); | |
1733 | if (start > max) | |
1734 | return ERR_PTR(-ENOSPC); | |
1735 | while (offset == RADIX_TREE_MAP_SIZE) { | |
1736 | offset = node->offset + 1; | |
1737 | node = node->parent; | |
1738 | if (!node) | |
1739 | goto grow; | |
1740 | shift = node->shift; | |
1741 | } | |
1742 | child = rcu_dereference_raw(node->slots[offset]); | |
1743 | } | |
1744 | slot = &node->slots[offset]; | |
1745 | } | |
1746 | ||
1747 | iter->index = start; | |
1748 | if (node) | |
1749 | iter->next_index = 1 + min(max, (start | node_maxindex(node))); | |
1750 | else | |
1751 | iter->next_index = 1; | |
1752 | iter->node = node; | |
1753 | __set_iter_shift(iter, shift); | |
1754 | set_iter_tags(iter, node, offset, IDR_FREE); | |
1755 | ||
1756 | return slot; | |
1757 | } | |
1758 | ||
1759 | /** | |
1760 | * idr_destroy - release all internal memory from an IDR | |
1761 | * @idr: idr handle | |
1762 | * | |
1763 | * After this function is called, the IDR is empty, and may be reused or | |
1764 | * the data structure containing it may be freed. | |
1765 | * | |
1766 | * A typical clean-up sequence for objects stored in an idr tree will use | |
1767 | * idr_for_each() to free all objects, if necessary, then idr_destroy() to | |
1768 | * free the memory used to keep track of those objects. | |
1769 | */ | |
1770 | void idr_destroy(struct idr *idr) | |
1771 | { | |
f8d5d0cc | 1772 | struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); |
0a835c4f MW |
1773 | if (radix_tree_is_internal_node(node)) |
1774 | radix_tree_free_nodes(node); | |
f8d5d0cc | 1775 | idr->idr_rt.xa_head = NULL; |
0a835c4f MW |
1776 | root_tag_set(&idr->idr_rt, IDR_FREE); |
1777 | } | |
1778 | EXPORT_SYMBOL(idr_destroy); | |
1779 | ||
1da177e4 | 1780 | static void |
449dd698 | 1781 | radix_tree_node_ctor(void *arg) |
1da177e4 | 1782 | { |
449dd698 JW |
1783 | struct radix_tree_node *node = arg; |
1784 | ||
1785 | memset(node, 0, sizeof(*node)); | |
1786 | INIT_LIST_HEAD(&node->private_list); | |
1da177e4 LT |
1787 | } |
1788 | ||
d544abd5 | 1789 | static int radix_tree_cpu_dead(unsigned int cpu) |
1da177e4 | 1790 | { |
2fcd9005 MW |
1791 | struct radix_tree_preload *rtp; |
1792 | struct radix_tree_node *node; | |
1793 | ||
1794 | /* Free per-cpu pool of preloaded nodes */ | |
d544abd5 SAS |
1795 | rtp = &per_cpu(radix_tree_preloads, cpu); |
1796 | while (rtp->nr) { | |
1797 | node = rtp->nodes; | |
1293d5c5 | 1798 | rtp->nodes = node->parent; |
d544abd5 SAS |
1799 | kmem_cache_free(radix_tree_node_cachep, node); |
1800 | rtp->nr--; | |
2fcd9005 | 1801 | } |
d544abd5 | 1802 | return 0; |
1da177e4 | 1803 | } |
1da177e4 LT |
1804 | |
1805 | void __init radix_tree_init(void) | |
1806 | { | |
d544abd5 | 1807 | int ret; |
7e784422 MH |
1808 | |
1809 | BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); | |
fa290cda | 1810 | BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); |
02c02bf1 | 1811 | BUILD_BUG_ON(XA_CHUNK_SIZE > 255); |
1da177e4 LT |
1812 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
1813 | sizeof(struct radix_tree_node), 0, | |
488514d1 CL |
1814 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1815 | radix_tree_node_ctor); | |
d544abd5 SAS |
1816 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
1817 | NULL, radix_tree_cpu_dead); | |
1818 | WARN_ON(ret < 0); | |
1da177e4 | 1819 | } |