2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2006 Nick Piggin
5 * Copyright (C) 2012 Konstantin Khlebnikov
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2, or (at
10 * your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #ifndef _LINUX_RADIX_TREE_H
22 #define _LINUX_RADIX_TREE_H
24 #include <linux/bitops.h>
25 #include <linux/preempt.h>
26 #include <linux/types.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/rcupdate.h>
32 * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
33 * than a data item) is signalled by the low bit set in the root->rnode
36 * In this case root->height is > 0, but the indirect pointer tests are
37 * needed for RCU lookups (because root->height is unreliable). The only
38 * time callers need worry about this is when doing a lookup_slot under
41 * Indirect pointer in fact is also used to tag the last pointer of a node
42 * when it is shrunk, before we rcu free the node. See shrink code for
45 #define RADIX_TREE_INDIRECT_PTR 1
47 * A common use of the radix tree is to store pointers to struct pages;
48 * but shmem/tmpfs needs also to store swap entries in the same tree:
49 * those are marked as exceptional entries to distinguish them.
50 * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
52 #define RADIX_TREE_EXCEPTIONAL_ENTRY 2
53 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2
55 #define RADIX_DAX_MASK 0xf
56 #define RADIX_DAX_SHIFT 4
57 #define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
58 #define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
59 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
60 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
61 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
62 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
64 static inline int radix_tree_is_indirect_ptr(void *ptr
)
66 return (int)((unsigned long)ptr
& RADIX_TREE_INDIRECT_PTR
);
69 /*** radix-tree API starts here ***/
71 #define RADIX_TREE_MAX_TAGS 3
73 #ifndef RADIX_TREE_MAP_SHIFT
74 #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
77 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
78 #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
80 #define RADIX_TREE_TAG_LONGS \
81 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
83 #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
84 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
85 RADIX_TREE_MAP_SHIFT))
87 /* Internally used bits of node->count */
88 #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
89 #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
91 struct radix_tree_node
{
92 unsigned char shift
; /* Bits remaining in each slot */
93 unsigned char offset
; /* Slot offset in parent */
97 /* Used when ascending tree */
98 struct radix_tree_node
*parent
;
102 /* Used when freeing node */
103 struct rcu_head rcu_head
;
106 struct list_head private_list
;
107 void __rcu
*slots
[RADIX_TREE_MAP_SIZE
];
108 unsigned long tags
[RADIX_TREE_MAX_TAGS
][RADIX_TREE_TAG_LONGS
];
111 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
112 struct radix_tree_root
{
114 struct radix_tree_node __rcu
*rnode
;
117 #define RADIX_TREE_INIT(mask) { \
118 .gfp_mask = (mask), \
122 #define RADIX_TREE(name, mask) \
123 struct radix_tree_root name = RADIX_TREE_INIT(mask)
125 #define INIT_RADIX_TREE(root, mask) \
127 (root)->gfp_mask = (mask); \
128 (root)->rnode = NULL; \
131 static inline bool radix_tree_empty(struct radix_tree_root
*root
)
133 return root
->rnode
== NULL
;
137 * Radix-tree synchronization
139 * The radix-tree API requires that users provide all synchronisation (with
140 * specific exceptions, noted below).
142 * Synchronization of access to the data items being stored in the tree, and
143 * management of their lifetimes must be completely managed by API users.
145 * For API usage, in general,
146 * - any function _modifying_ the tree or tags (inserting or deleting
147 * items, setting or clearing tags) must exclude other modifications, and
148 * exclude any functions reading the tree.
149 * - any function _reading_ the tree or tags (looking up items or tags,
150 * gang lookups) must exclude modifications to the tree, but may occur
151 * concurrently with other readers.
153 * The notable exceptions to this rule are the following functions:
154 * __radix_tree_lookup
156 * radix_tree_lookup_slot
158 * radix_tree_gang_lookup
159 * radix_tree_gang_lookup_slot
160 * radix_tree_gang_lookup_tag
161 * radix_tree_gang_lookup_tag_slot
164 * The first 8 functions are able to be called locklessly, using RCU. The
165 * caller must ensure calls to these functions are made within rcu_read_lock()
166 * regions. Other readers (lock-free or otherwise) and modifications may be
167 * running concurrently.
169 * It is still required that the caller manage the synchronization and lifetimes
170 * of the items. So if RCU lock-free lookups are used, typically this would mean
171 * that the items have their own locks, or are amenable to lock-free access; and
172 * that the items are freed by RCU (or only freed after having been deleted from
173 * the radix tree *and* a synchronize_rcu() grace period).
175 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
176 * access to data items when inserting into or looking up from the radix tree)
178 * Note that the value returned by radix_tree_tag_get() may not be relied upon
179 * if only the RCU read lock is held. Functions to set/clear tags and to
180 * delete nodes running concurrently with it may affect its result such that
181 * two consecutive reads in the same locked section may return different
182 * values. If reliability is required, modification functions must also be
183 * excluded from concurrency.
185 * radix_tree_tagged is able to be called without locking or RCU.
189 * radix_tree_deref_slot - dereference a slot
190 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
191 * Returns: item that was stored in that slot with any direct pointer flag
194 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
195 * locked across slot lookup and dereference. Not required if write lock is
196 * held (ie. items cannot be concurrently inserted).
198 * radix_tree_deref_retry must be used to confirm validity of the pointer if
199 * only the read lock is held.
201 static inline void *radix_tree_deref_slot(void **pslot
)
203 return rcu_dereference(*pslot
);
207 * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
208 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
209 * Returns: item that was stored in that slot with any direct pointer flag
212 * Similar to radix_tree_deref_slot but only used during migration when a pages
213 * mapping is being moved. The caller does not hold the RCU read lock but it
214 * must hold the tree lock to prevent parallel updates.
216 static inline void *radix_tree_deref_slot_protected(void **pslot
,
217 spinlock_t
*treelock
)
219 return rcu_dereference_protected(*pslot
, lockdep_is_held(treelock
));
223 * radix_tree_deref_retry - check radix_tree_deref_slot
224 * @arg: pointer returned by radix_tree_deref_slot
225 * Returns: 0 if retry is not required, otherwise retry is required
227 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
229 static inline int radix_tree_deref_retry(void *arg
)
231 return unlikely((unsigned long)arg
& RADIX_TREE_INDIRECT_PTR
);
235 * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
236 * @arg: value returned by radix_tree_deref_slot
237 * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
239 static inline int radix_tree_exceptional_entry(void *arg
)
241 /* Not unlikely because radix_tree_exception often tested first */
242 return (unsigned long)arg
& RADIX_TREE_EXCEPTIONAL_ENTRY
;
246 * radix_tree_exception - radix_tree_deref_slot returned either exception?
247 * @arg: value returned by radix_tree_deref_slot
248 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
250 static inline int radix_tree_exception(void *arg
)
252 return unlikely((unsigned long)arg
&
253 (RADIX_TREE_INDIRECT_PTR
| RADIX_TREE_EXCEPTIONAL_ENTRY
));
257 * radix_tree_replace_slot - replace item in a slot
258 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
259 * @item: new item to store in the slot.
261 * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
262 * across slot lookup and replacement.
264 static inline void radix_tree_replace_slot(void **pslot
, void *item
)
266 BUG_ON(radix_tree_is_indirect_ptr(item
));
267 rcu_assign_pointer(*pslot
, item
);
270 int __radix_tree_create(struct radix_tree_root
*root
, unsigned long index
,
271 unsigned order
, struct radix_tree_node
**nodep
,
273 int __radix_tree_insert(struct radix_tree_root
*, unsigned long index
,
274 unsigned order
, void *);
275 static inline int radix_tree_insert(struct radix_tree_root
*root
,
276 unsigned long index
, void *entry
)
278 return __radix_tree_insert(root
, index
, 0, entry
);
280 void *__radix_tree_lookup(struct radix_tree_root
*root
, unsigned long index
,
281 struct radix_tree_node
**nodep
, void ***slotp
);
282 void *radix_tree_lookup(struct radix_tree_root
*, unsigned long);
283 void **radix_tree_lookup_slot(struct radix_tree_root
*, unsigned long);
284 bool __radix_tree_delete_node(struct radix_tree_root
*root
,
285 struct radix_tree_node
*node
);
286 void *radix_tree_delete_item(struct radix_tree_root
*, unsigned long, void *);
287 void *radix_tree_delete(struct radix_tree_root
*, unsigned long);
289 radix_tree_gang_lookup(struct radix_tree_root
*root
, void **results
,
290 unsigned long first_index
, unsigned int max_items
);
291 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root
*root
,
292 void ***results
, unsigned long *indices
,
293 unsigned long first_index
, unsigned int max_items
);
294 int radix_tree_preload(gfp_t gfp_mask
);
295 int radix_tree_maybe_preload(gfp_t gfp_mask
);
296 void radix_tree_init(void);
297 void *radix_tree_tag_set(struct radix_tree_root
*root
,
298 unsigned long index
, unsigned int tag
);
299 void *radix_tree_tag_clear(struct radix_tree_root
*root
,
300 unsigned long index
, unsigned int tag
);
301 int radix_tree_tag_get(struct radix_tree_root
*root
,
302 unsigned long index
, unsigned int tag
);
304 radix_tree_gang_lookup_tag(struct radix_tree_root
*root
, void **results
,
305 unsigned long first_index
, unsigned int max_items
,
308 radix_tree_gang_lookup_tag_slot(struct radix_tree_root
*root
, void ***results
,
309 unsigned long first_index
, unsigned int max_items
,
311 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root
*root
,
312 unsigned long *first_indexp
, unsigned long last_index
,
313 unsigned long nr_to_tag
,
314 unsigned int fromtag
, unsigned int totag
);
315 int radix_tree_tagged(struct radix_tree_root
*root
, unsigned int tag
);
316 unsigned long radix_tree_locate_item(struct radix_tree_root
*root
, void *item
);
318 static inline void radix_tree_preload_end(void)
324 * struct radix_tree_iter - radix tree iterator state
326 * @index: index of current slot
327 * @next_index: one beyond the last index for this chunk
328 * @tags: bit-mask for tag-iterating
329 * @shift: shift for the node that holds our slots
331 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
332 * subinterval of slots contained within one radix tree leaf node. It is
333 * described by a pointer to its first slot and a struct radix_tree_iter
334 * which holds the chunk's position in the tree and its size. For tagged
335 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
338 struct radix_tree_iter
{
340 unsigned long next_index
;
342 #ifdef CONFIG_RADIX_TREE_MULTIORDER
347 static inline unsigned int iter_shift(struct radix_tree_iter
*iter
)
349 #ifdef CONFIG_RADIX_TREE_MULTIORDER
356 #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
357 #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
358 #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
361 * radix_tree_iter_init - initialize radix tree iterator
363 * @iter: pointer to iterator state
364 * @start: iteration starting index
367 static __always_inline
void **
368 radix_tree_iter_init(struct radix_tree_iter
*iter
, unsigned long start
)
371 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
372 * in the case of a successful tagged chunk lookup. If the lookup was
373 * unsuccessful or non-tagged then nobody cares about ->tags.
375 * Set index to zero to bypass next_index overflow protection.
376 * See the comment in radix_tree_next_chunk() for details.
379 iter
->next_index
= start
;
384 * radix_tree_next_chunk - find next chunk of slots for iteration
386 * @root: radix tree root
387 * @iter: iterator state
388 * @flags: RADIX_TREE_ITER_* flags and tag index
389 * Returns: pointer to chunk first slot, or NULL if there no more left
391 * This function looks up the next chunk in the radix tree starting from
392 * @iter->next_index. It returns a pointer to the chunk's first slot.
393 * Also it fills @iter with data about chunk: position in the tree (index),
394 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
396 void **radix_tree_next_chunk(struct radix_tree_root
*root
,
397 struct radix_tree_iter
*iter
, unsigned flags
);
400 * radix_tree_iter_retry - retry this chunk of the iteration
401 * @iter: iterator state
403 * If we iterate over a tree protected only by the RCU lock, a race
404 * against deletion or creation may result in seeing a slot for which
405 * radix_tree_deref_retry() returns true. If so, call this function
406 * and continue the iteration.
408 static inline __must_check
409 void **radix_tree_iter_retry(struct radix_tree_iter
*iter
)
411 iter
->next_index
= iter
->index
;
415 static inline unsigned long
416 __radix_tree_iter_add(struct radix_tree_iter
*iter
, unsigned long slots
)
418 return iter
->index
+ (slots
<< iter_shift(iter
));
422 * radix_tree_iter_next - resume iterating when the chunk may be invalid
423 * @iter: iterator state
425 * If the iterator needs to release then reacquire a lock, the chunk may
426 * have been invalidated by an insertion or deletion. Call this function
427 * to continue the iteration from the next index.
429 static inline __must_check
430 void **radix_tree_iter_next(struct radix_tree_iter
*iter
)
432 iter
->next_index
= __radix_tree_iter_add(iter
, 1);
438 * radix_tree_chunk_size - get current chunk size
440 * @iter: pointer to radix tree iterator
441 * Returns: current chunk size
443 static __always_inline
long
444 radix_tree_chunk_size(struct radix_tree_iter
*iter
)
446 return (iter
->next_index
- iter
->index
) >> iter_shift(iter
);
449 static inline void *indirect_to_ptr(void *ptr
)
451 return (void *)((unsigned long)ptr
& ~RADIX_TREE_INDIRECT_PTR
);
455 * radix_tree_next_slot - find next slot in chunk
457 * @slot: pointer to current slot
458 * @iter: pointer to interator state
459 * @flags: RADIX_TREE_ITER_*, should be constant
460 * Returns: pointer to next slot, or NULL if there no more left
462 * This function updates @iter->index in the case of a successful lookup.
463 * For tagged lookup it also eats @iter->tags.
465 static __always_inline
void **
466 radix_tree_next_slot(void **slot
, struct radix_tree_iter
*iter
, unsigned flags
)
468 if (flags
& RADIX_TREE_ITER_TAGGED
) {
472 if (unlikely(!iter
->tags
))
474 while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER
) &&
475 radix_tree_is_indirect_ptr(slot
[1])) {
476 if (indirect_to_ptr(slot
[1]) == canon
) {
478 iter
->index
= __radix_tree_iter_add(iter
, 1);
482 iter
->next_index
= __radix_tree_iter_add(iter
, 1);
485 if (likely(iter
->tags
& 1ul)) {
486 iter
->index
= __radix_tree_iter_add(iter
, 1);
489 if (!(flags
& RADIX_TREE_ITER_CONTIG
)) {
490 unsigned offset
= __ffs(iter
->tags
);
492 iter
->tags
>>= offset
;
493 iter
->index
= __radix_tree_iter_add(iter
, offset
+ 1);
494 return slot
+ offset
+ 1;
497 long count
= radix_tree_chunk_size(iter
);
500 while (--count
> 0) {
502 iter
->index
= __radix_tree_iter_add(iter
, 1);
504 if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER
) &&
505 radix_tree_is_indirect_ptr(*slot
)) {
506 if (indirect_to_ptr(*slot
) == canon
)
509 iter
->next_index
= iter
->index
;
516 if (flags
& RADIX_TREE_ITER_CONTIG
) {
517 /* forbid switching to the next chunk */
518 iter
->next_index
= 0;
527 * radix_tree_for_each_slot - iterate over non-empty slots
529 * @slot: the void** variable for pointer to slot
530 * @root: the struct radix_tree_root pointer
531 * @iter: the struct radix_tree_iter pointer
532 * @start: iteration starting index
534 * @slot points to radix tree slot, @iter->index contains its index.
536 #define radix_tree_for_each_slot(slot, root, iter, start) \
537 for (slot = radix_tree_iter_init(iter, start) ; \
538 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
539 slot = radix_tree_next_slot(slot, iter, 0))
542 * radix_tree_for_each_contig - iterate over contiguous slots
544 * @slot: the void** variable for pointer to slot
545 * @root: the struct radix_tree_root pointer
546 * @iter: the struct radix_tree_iter pointer
547 * @start: iteration starting index
549 * @slot points to radix tree slot, @iter->index contains its index.
551 #define radix_tree_for_each_contig(slot, root, iter, start) \
552 for (slot = radix_tree_iter_init(iter, start) ; \
553 slot || (slot = radix_tree_next_chunk(root, iter, \
554 RADIX_TREE_ITER_CONTIG)) ; \
555 slot = radix_tree_next_slot(slot, iter, \
556 RADIX_TREE_ITER_CONTIG))
559 * radix_tree_for_each_tagged - iterate over tagged slots
561 * @slot: the void** variable for pointer to slot
562 * @root: the struct radix_tree_root pointer
563 * @iter: the struct radix_tree_iter pointer
564 * @start: iteration starting index
567 * @slot points to radix tree slot, @iter->index contains its index.
569 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
570 for (slot = radix_tree_iter_init(iter, start) ; \
571 slot || (slot = radix_tree_next_chunk(root, iter, \
572 RADIX_TREE_ITER_TAGGED | tag)) ; \
573 slot = radix_tree_next_slot(slot, iter, \
574 RADIX_TREE_ITER_TAGGED))
576 #endif /* _LINUX_RADIX_TREE_H */