2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2006 Nick Piggin
5 * Copyright (C) 2012 Konstantin Khlebnikov
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2, or (at
10 * your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #ifndef _LINUX_RADIX_TREE_H
22 #define _LINUX_RADIX_TREE_H
24 #include <linux/preempt.h>
25 #include <linux/types.h>
26 #include <linux/bug.h>
27 #include <linux/kernel.h>
28 #include <linux/rcupdate.h>
31 * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
32 * than a data item) is signalled by the low bit set in the root->rnode
35 * In this case root->height is > 0, but the indirect pointer tests are
36 * needed for RCU lookups (because root->height is unreliable). The only
37 * time callers need worry about this is when doing a lookup_slot under
40 * Indirect pointer in fact is also used to tag the last pointer of a node
41 * when it is shrunk, before we rcu free the node. See shrink code for
44 #define RADIX_TREE_INDIRECT_PTR 1
46 * A common use of the radix tree is to store pointers to struct pages;
47 * but shmem/tmpfs needs also to store swap entries in the same tree:
48 * those are marked as exceptional entries to distinguish them.
49 * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
51 #define RADIX_TREE_EXCEPTIONAL_ENTRY 2
52 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2
54 #define RADIX_DAX_MASK 0xf
55 #define RADIX_DAX_SHIFT 4
56 #define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
57 #define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
58 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
59 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
60 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
61 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
63 static inline int radix_tree_is_indirect_ptr(void *ptr
)
65 return (int)((unsigned long)ptr
& RADIX_TREE_INDIRECT_PTR
);
68 /*** radix-tree API starts here ***/
70 #define RADIX_TREE_MAX_TAGS 3
73 #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
75 #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
78 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
79 #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
81 #define RADIX_TREE_TAG_LONGS \
82 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
84 #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
85 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
86 RADIX_TREE_MAP_SHIFT))
88 /* Height component in node->path */
89 #define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1)
90 #define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1)
92 /* Internally used bits of node->count */
93 #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
94 #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
96 struct radix_tree_node
{
97 unsigned int path
; /* Offset in parent & height from the bottom */
101 /* Used when ascending tree */
102 struct radix_tree_node
*parent
;
106 /* Used when freeing node */
107 struct rcu_head rcu_head
;
110 struct list_head private_list
;
111 void __rcu
*slots
[RADIX_TREE_MAP_SIZE
];
112 unsigned long tags
[RADIX_TREE_MAX_TAGS
][RADIX_TREE_TAG_LONGS
];
115 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
116 struct radix_tree_root
{
119 struct radix_tree_node __rcu
*rnode
;
122 #define RADIX_TREE_INIT(mask) { \
124 .gfp_mask = (mask), \
128 #define RADIX_TREE(name, mask) \
129 struct radix_tree_root name = RADIX_TREE_INIT(mask)
131 #define INIT_RADIX_TREE(root, mask) \
133 (root)->height = 0; \
134 (root)->gfp_mask = (mask); \
135 (root)->rnode = NULL; \
139 * Radix-tree synchronization
141 * The radix-tree API requires that users provide all synchronisation (with
142 * specific exceptions, noted below).
144 * Synchronization of access to the data items being stored in the tree, and
145 * management of their lifetimes must be completely managed by API users.
147 * For API usage, in general,
148 * - any function _modifying_ the tree or tags (inserting or deleting
149 * items, setting or clearing tags) must exclude other modifications, and
150 * exclude any functions reading the tree.
151 * - any function _reading_ the tree or tags (looking up items or tags,
152 * gang lookups) must exclude modifications to the tree, but may occur
153 * concurrently with other readers.
155 * The notable exceptions to this rule are the following functions:
156 * __radix_tree_lookup
158 * radix_tree_lookup_slot
160 * radix_tree_gang_lookup
161 * radix_tree_gang_lookup_slot
162 * radix_tree_gang_lookup_tag
163 * radix_tree_gang_lookup_tag_slot
166 * The first 8 functions are able to be called locklessly, using RCU. The
167 * caller must ensure calls to these functions are made within rcu_read_lock()
168 * regions. Other readers (lock-free or otherwise) and modifications may be
169 * running concurrently.
171 * It is still required that the caller manage the synchronization and lifetimes
172 * of the items. So if RCU lock-free lookups are used, typically this would mean
173 * that the items have their own locks, or are amenable to lock-free access; and
174 * that the items are freed by RCU (or only freed after having been deleted from
175 * the radix tree *and* a synchronize_rcu() grace period).
177 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
178 * access to data items when inserting into or looking up from the radix tree)
180 * Note that the value returned by radix_tree_tag_get() may not be relied upon
181 * if only the RCU read lock is held. Functions to set/clear tags and to
182 * delete nodes running concurrently with it may affect its result such that
183 * two consecutive reads in the same locked section may return different
184 * values. If reliability is required, modification functions must also be
185 * excluded from concurrency.
187 * radix_tree_tagged is able to be called without locking or RCU.
191 * radix_tree_deref_slot - dereference a slot
192 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
193 * Returns: item that was stored in that slot with any direct pointer flag
196 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
197 * locked across slot lookup and dereference. Not required if write lock is
198 * held (ie. items cannot be concurrently inserted).
200 * radix_tree_deref_retry must be used to confirm validity of the pointer if
201 * only the read lock is held.
203 static inline void *radix_tree_deref_slot(void **pslot
)
205 return rcu_dereference(*pslot
);
209 * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
210 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
211 * Returns: item that was stored in that slot with any direct pointer flag
214 * Similar to radix_tree_deref_slot but only used during migration when a pages
215 * mapping is being moved. The caller does not hold the RCU read lock but it
216 * must hold the tree lock to prevent parallel updates.
218 static inline void *radix_tree_deref_slot_protected(void **pslot
,
219 spinlock_t
*treelock
)
221 return rcu_dereference_protected(*pslot
, lockdep_is_held(treelock
));
225 * radix_tree_deref_retry - check radix_tree_deref_slot
226 * @arg: pointer returned by radix_tree_deref_slot
227 * Returns: 0 if retry is not required, otherwise retry is required
229 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
231 static inline int radix_tree_deref_retry(void *arg
)
233 return unlikely((unsigned long)arg
& RADIX_TREE_INDIRECT_PTR
);
237 * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
238 * @arg: value returned by radix_tree_deref_slot
239 * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
241 static inline int radix_tree_exceptional_entry(void *arg
)
243 /* Not unlikely because radix_tree_exception often tested first */
244 return (unsigned long)arg
& RADIX_TREE_EXCEPTIONAL_ENTRY
;
248 * radix_tree_exception - radix_tree_deref_slot returned either exception?
249 * @arg: value returned by radix_tree_deref_slot
250 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
252 static inline int radix_tree_exception(void *arg
)
254 return unlikely((unsigned long)arg
&
255 (RADIX_TREE_INDIRECT_PTR
| RADIX_TREE_EXCEPTIONAL_ENTRY
));
259 * radix_tree_replace_slot - replace item in a slot
260 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
261 * @item: new item to store in the slot.
263 * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
264 * across slot lookup and replacement.
266 static inline void radix_tree_replace_slot(void **pslot
, void *item
)
268 BUG_ON(radix_tree_is_indirect_ptr(item
));
269 rcu_assign_pointer(*pslot
, item
);
272 int __radix_tree_create(struct radix_tree_root
*root
, unsigned long index
,
273 struct radix_tree_node
**nodep
, void ***slotp
);
274 int radix_tree_insert(struct radix_tree_root
*, unsigned long, void *);
275 void *__radix_tree_lookup(struct radix_tree_root
*root
, unsigned long index
,
276 struct radix_tree_node
**nodep
, void ***slotp
);
277 void *radix_tree_lookup(struct radix_tree_root
*, unsigned long);
278 void **radix_tree_lookup_slot(struct radix_tree_root
*, unsigned long);
279 bool __radix_tree_delete_node(struct radix_tree_root
*root
,
280 struct radix_tree_node
*node
);
281 void *radix_tree_delete_item(struct radix_tree_root
*, unsigned long, void *);
282 void *radix_tree_delete(struct radix_tree_root
*, unsigned long);
284 radix_tree_gang_lookup(struct radix_tree_root
*root
, void **results
,
285 unsigned long first_index
, unsigned int max_items
);
286 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root
*root
,
287 void ***results
, unsigned long *indices
,
288 unsigned long first_index
, unsigned int max_items
);
289 int radix_tree_preload(gfp_t gfp_mask
);
290 int radix_tree_maybe_preload(gfp_t gfp_mask
);
291 void radix_tree_init(void);
292 void *radix_tree_tag_set(struct radix_tree_root
*root
,
293 unsigned long index
, unsigned int tag
);
294 void *radix_tree_tag_clear(struct radix_tree_root
*root
,
295 unsigned long index
, unsigned int tag
);
296 int radix_tree_tag_get(struct radix_tree_root
*root
,
297 unsigned long index
, unsigned int tag
);
299 radix_tree_gang_lookup_tag(struct radix_tree_root
*root
, void **results
,
300 unsigned long first_index
, unsigned int max_items
,
303 radix_tree_gang_lookup_tag_slot(struct radix_tree_root
*root
, void ***results
,
304 unsigned long first_index
, unsigned int max_items
,
306 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root
*root
,
307 unsigned long *first_indexp
, unsigned long last_index
,
308 unsigned long nr_to_tag
,
309 unsigned int fromtag
, unsigned int totag
);
310 int radix_tree_tagged(struct radix_tree_root
*root
, unsigned int tag
);
311 unsigned long radix_tree_locate_item(struct radix_tree_root
*root
, void *item
);
313 static inline void radix_tree_preload_end(void)
319 * struct radix_tree_iter - radix tree iterator state
321 * @index: index of current slot
322 * @next_index: next-to-last index for this chunk
323 * @tags: bit-mask for tag-iterating
325 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
326 * subinterval of slots contained within one radix tree leaf node. It is
327 * described by a pointer to its first slot and a struct radix_tree_iter
328 * which holds the chunk's position in the tree and its size. For tagged
329 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
332 struct radix_tree_iter
{
334 unsigned long next_index
;
338 #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
339 #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
340 #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
343 * radix_tree_iter_init - initialize radix tree iterator
345 * @iter: pointer to iterator state
346 * @start: iteration starting index
349 static __always_inline
void **
350 radix_tree_iter_init(struct radix_tree_iter
*iter
, unsigned long start
)
353 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
354 * in the case of a successful tagged chunk lookup. If the lookup was
355 * unsuccessful or non-tagged then nobody cares about ->tags.
357 * Set index to zero to bypass next_index overflow protection.
358 * See the comment in radix_tree_next_chunk() for details.
361 iter
->next_index
= start
;
366 * radix_tree_next_chunk - find next chunk of slots for iteration
368 * @root: radix tree root
369 * @iter: iterator state
370 * @flags: RADIX_TREE_ITER_* flags and tag index
371 * Returns: pointer to chunk first slot, or NULL if there no more left
373 * This function looks up the next chunk in the radix tree starting from
374 * @iter->next_index. It returns a pointer to the chunk's first slot.
375 * Also it fills @iter with data about chunk: position in the tree (index),
376 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
378 void **radix_tree_next_chunk(struct radix_tree_root
*root
,
379 struct radix_tree_iter
*iter
, unsigned flags
);
382 * radix_tree_chunk_size - get current chunk size
384 * @iter: pointer to radix tree iterator
385 * Returns: current chunk size
387 static __always_inline
unsigned
388 radix_tree_chunk_size(struct radix_tree_iter
*iter
)
390 return iter
->next_index
- iter
->index
;
394 * radix_tree_next_slot - find next slot in chunk
396 * @slot: pointer to current slot
397 * @iter: pointer to interator state
398 * @flags: RADIX_TREE_ITER_*, should be constant
399 * Returns: pointer to next slot, or NULL if there no more left
401 * This function updates @iter->index in the case of a successful lookup.
402 * For tagged lookup it also eats @iter->tags.
404 static __always_inline
void **
405 radix_tree_next_slot(void **slot
, struct radix_tree_iter
*iter
, unsigned flags
)
407 if (flags
& RADIX_TREE_ITER_TAGGED
) {
409 if (likely(iter
->tags
& 1ul)) {
413 if (!(flags
& RADIX_TREE_ITER_CONTIG
) && likely(iter
->tags
)) {
414 unsigned offset
= __ffs(iter
->tags
);
416 iter
->tags
>>= offset
;
417 iter
->index
+= offset
+ 1;
418 return slot
+ offset
+ 1;
421 unsigned size
= radix_tree_chunk_size(iter
) - 1;
428 if (flags
& RADIX_TREE_ITER_CONTIG
) {
429 /* forbid switching to the next chunk */
430 iter
->next_index
= 0;
439 * radix_tree_for_each_chunk - iterate over chunks
441 * @slot: the void** variable for pointer to chunk first slot
442 * @root: the struct radix_tree_root pointer
443 * @iter: the struct radix_tree_iter pointer
444 * @start: iteration starting index
445 * @flags: RADIX_TREE_ITER_* and tag index
447 * Locks can be released and reacquired between iterations.
449 #define radix_tree_for_each_chunk(slot, root, iter, start, flags) \
450 for (slot = radix_tree_iter_init(iter, start) ; \
451 (slot = radix_tree_next_chunk(root, iter, flags)) ;)
454 * radix_tree_for_each_chunk_slot - iterate over slots in one chunk
456 * @slot: the void** variable, at the beginning points to chunk first slot
457 * @iter: the struct radix_tree_iter pointer
458 * @flags: RADIX_TREE_ITER_*, should be constant
460 * This macro is designed to be nested inside radix_tree_for_each_chunk().
461 * @slot points to the radix tree slot, @iter->index contains its index.
463 #define radix_tree_for_each_chunk_slot(slot, iter, flags) \
464 for (; slot ; slot = radix_tree_next_slot(slot, iter, flags))
467 * radix_tree_for_each_slot - iterate over non-empty slots
469 * @slot: the void** variable for pointer to slot
470 * @root: the struct radix_tree_root pointer
471 * @iter: the struct radix_tree_iter pointer
472 * @start: iteration starting index
474 * @slot points to radix tree slot, @iter->index contains its index.
476 #define radix_tree_for_each_slot(slot, root, iter, start) \
477 for (slot = radix_tree_iter_init(iter, start) ; \
478 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
479 slot = radix_tree_next_slot(slot, iter, 0))
482 * radix_tree_for_each_contig - iterate over contiguous slots
484 * @slot: the void** variable for pointer to slot
485 * @root: the struct radix_tree_root pointer
486 * @iter: the struct radix_tree_iter pointer
487 * @start: iteration starting index
489 * @slot points to radix tree slot, @iter->index contains its index.
491 #define radix_tree_for_each_contig(slot, root, iter, start) \
492 for (slot = radix_tree_iter_init(iter, start) ; \
493 slot || (slot = radix_tree_next_chunk(root, iter, \
494 RADIX_TREE_ITER_CONTIG)) ; \
495 slot = radix_tree_next_slot(slot, iter, \
496 RADIX_TREE_ITER_CONTIG))
499 * radix_tree_for_each_tagged - iterate over tagged slots
501 * @slot: the void** variable for pointer to slot
502 * @root: the struct radix_tree_root pointer
503 * @iter: the struct radix_tree_iter pointer
504 * @start: iteration starting index
507 * @slot points to radix tree slot, @iter->index contains its index.
509 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
510 for (slot = radix_tree_iter_init(iter, start) ; \
511 slot || (slot = radix_tree_next_chunk(root, iter, \
512 RADIX_TREE_ITER_TAGGED | tag)) ; \
513 slot = radix_tree_next_slot(slot, iter, \
514 RADIX_TREE_ITER_TAGGED))
516 #endif /* _LINUX_RADIX_TREE_H */