]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
4 | * Copyright (C) 2006 Nick Piggin | |
5 | * Copyright (C) 2012 Konstantin Khlebnikov | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation; either version 2, or (at | |
10 | * your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
20 | */ | |
21 | #ifndef _LINUX_RADIX_TREE_H | |
22 | #define _LINUX_RADIX_TREE_H | |
23 | ||
24 | #include <linux/preempt.h> | |
25 | #include <linux/types.h> | |
26 | #include <linux/bug.h> | |
27 | #include <linux/kernel.h> | |
28 | #include <linux/rcupdate.h> | |
29 | ||
30 | /* | |
31 | * An indirect pointer (root->rnode pointing to a radix_tree_node, rather | |
32 | * than a data item) is signalled by the low bit set in the root->rnode | |
33 | * pointer. | |
34 | * | |
35 | * In this case root->height is > 0, but the indirect pointer tests are | |
36 | * needed for RCU lookups (because root->height is unreliable). The only | |
37 | * time callers need worry about this is when doing a lookup_slot under | |
38 | * RCU. | |
39 | * | |
40 | * Indirect pointer in fact is also used to tag the last pointer of a node | |
41 | * when it is shrunk, before we rcu free the node. See shrink code for | |
42 | * details. | |
43 | */ | |
44 | #define RADIX_TREE_INDIRECT_PTR 1 | |
45 | /* | |
46 | * A common use of the radix tree is to store pointers to struct pages; | |
47 | * but shmem/tmpfs needs also to store swap entries in the same tree: | |
48 | * those are marked as exceptional entries to distinguish them. | |
49 | * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. | |
50 | */ | |
51 | #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 | |
52 | #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 | |
53 | ||
54 | static inline int radix_tree_is_indirect_ptr(void *ptr) | |
55 | { | |
56 | return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); | |
57 | } | |
58 | ||
59 | /*** radix-tree API starts here ***/ | |
60 | ||
61 | #define RADIX_TREE_MAX_TAGS 3 | |
62 | ||
63 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ | |
64 | struct radix_tree_root { | |
65 | unsigned int height; | |
66 | gfp_t gfp_mask; | |
67 | struct radix_tree_node __rcu *rnode; | |
68 | }; | |
69 | ||
70 | #define RADIX_TREE_INIT(mask) { \ | |
71 | .height = 0, \ | |
72 | .gfp_mask = (mask), \ | |
73 | .rnode = NULL, \ | |
74 | } | |
75 | ||
76 | #define RADIX_TREE(name, mask) \ | |
77 | struct radix_tree_root name = RADIX_TREE_INIT(mask) | |
78 | ||
79 | #define INIT_RADIX_TREE(root, mask) \ | |
80 | do { \ | |
81 | (root)->height = 0; \ | |
82 | (root)->gfp_mask = (mask); \ | |
83 | (root)->rnode = NULL; \ | |
84 | } while (0) | |
85 | ||
86 | /** | |
87 | * Radix-tree synchronization | |
88 | * | |
89 | * The radix-tree API requires that users provide all synchronisation (with | |
90 | * specific exceptions, noted below). | |
91 | * | |
92 | * Synchronization of access to the data items being stored in the tree, and | |
93 | * management of their lifetimes must be completely managed by API users. | |
94 | * | |
95 | * For API usage, in general, | |
96 | * - any function _modifying_ the tree or tags (inserting or deleting | |
97 | * items, setting or clearing tags) must exclude other modifications, and | |
98 | * exclude any functions reading the tree. | |
99 | * - any function _reading_ the tree or tags (looking up items or tags, | |
100 | * gang lookups) must exclude modifications to the tree, but may occur | |
101 | * concurrently with other readers. | |
102 | * | |
103 | * The notable exceptions to this rule are the following functions: | |
104 | * radix_tree_lookup | |
105 | * radix_tree_lookup_slot | |
106 | * radix_tree_tag_get | |
107 | * radix_tree_gang_lookup | |
108 | * radix_tree_gang_lookup_slot | |
109 | * radix_tree_gang_lookup_tag | |
110 | * radix_tree_gang_lookup_tag_slot | |
111 | * radix_tree_tagged | |
112 | * | |
113 | * The first 7 functions are able to be called locklessly, using RCU. The | |
114 | * caller must ensure calls to these functions are made within rcu_read_lock() | |
115 | * regions. Other readers (lock-free or otherwise) and modifications may be | |
116 | * running concurrently. | |
117 | * | |
118 | * It is still required that the caller manage the synchronization and lifetimes | |
119 | * of the items. So if RCU lock-free lookups are used, typically this would mean | |
120 | * that the items have their own locks, or are amenable to lock-free access; and | |
121 | * that the items are freed by RCU (or only freed after having been deleted from | |
122 | * the radix tree *and* a synchronize_rcu() grace period). | |
123 | * | |
124 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control | |
125 | * access to data items when inserting into or looking up from the radix tree) | |
126 | * | |
127 | * Note that the value returned by radix_tree_tag_get() may not be relied upon | |
128 | * if only the RCU read lock is held. Functions to set/clear tags and to | |
129 | * delete nodes running concurrently with it may affect its result such that | |
130 | * two consecutive reads in the same locked section may return different | |
131 | * values. If reliability is required, modification functions must also be | |
132 | * excluded from concurrency. | |
133 | * | |
134 | * radix_tree_tagged is able to be called without locking or RCU. | |
135 | */ | |
136 | ||
137 | /** | |
138 | * radix_tree_deref_slot - dereference a slot | |
139 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | |
140 | * Returns: item that was stored in that slot with any direct pointer flag | |
141 | * removed. | |
142 | * | |
143 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read | |
144 | * locked across slot lookup and dereference. Not required if write lock is | |
145 | * held (ie. items cannot be concurrently inserted). | |
146 | * | |
147 | * radix_tree_deref_retry must be used to confirm validity of the pointer if | |
148 | * only the read lock is held. | |
149 | */ | |
150 | static inline void *radix_tree_deref_slot(void **pslot) | |
151 | { | |
152 | return rcu_dereference(*pslot); | |
153 | } | |
154 | ||
155 | /** | |
156 | * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held | |
157 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | |
158 | * Returns: item that was stored in that slot with any direct pointer flag | |
159 | * removed. | |
160 | * | |
161 | * Similar to radix_tree_deref_slot but only used during migration when a pages | |
162 | * mapping is being moved. The caller does not hold the RCU read lock but it | |
163 | * must hold the tree lock to prevent parallel updates. | |
164 | */ | |
165 | static inline void *radix_tree_deref_slot_protected(void **pslot, | |
166 | spinlock_t *treelock) | |
167 | { | |
168 | return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); | |
169 | } | |
170 | ||
171 | /** | |
172 | * radix_tree_deref_retry - check radix_tree_deref_slot | |
173 | * @arg: pointer returned by radix_tree_deref_slot | |
174 | * Returns: 0 if retry is not required, otherwise retry is required | |
175 | * | |
176 | * radix_tree_deref_retry must be used with radix_tree_deref_slot. | |
177 | */ | |
178 | static inline int radix_tree_deref_retry(void *arg) | |
179 | { | |
180 | return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); | |
181 | } | |
182 | ||
183 | /** | |
184 | * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? | |
185 | * @arg: value returned by radix_tree_deref_slot | |
186 | * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. | |
187 | */ | |
188 | static inline int radix_tree_exceptional_entry(void *arg) | |
189 | { | |
190 | /* Not unlikely because radix_tree_exception often tested first */ | |
191 | return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; | |
192 | } | |
193 | ||
194 | /** | |
195 | * radix_tree_exception - radix_tree_deref_slot returned either exception? | |
196 | * @arg: value returned by radix_tree_deref_slot | |
197 | * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. | |
198 | */ | |
199 | static inline int radix_tree_exception(void *arg) | |
200 | { | |
201 | return unlikely((unsigned long)arg & | |
202 | (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY)); | |
203 | } | |
204 | ||
205 | /** | |
206 | * radix_tree_replace_slot - replace item in a slot | |
207 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | |
208 | * @item: new item to store in the slot. | |
209 | * | |
210 | * For use with radix_tree_lookup_slot(). Caller must hold tree write locked | |
211 | * across slot lookup and replacement. | |
212 | */ | |
213 | static inline void radix_tree_replace_slot(void **pslot, void *item) | |
214 | { | |
215 | BUG_ON(radix_tree_is_indirect_ptr(item)); | |
216 | rcu_assign_pointer(*pslot, item); | |
217 | } | |
218 | ||
219 | int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); | |
220 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); | |
221 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); | |
222 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); | |
223 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); | |
224 | unsigned int | |
225 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
226 | unsigned long first_index, unsigned int max_items); | |
227 | unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, | |
228 | void ***results, unsigned long *indices, | |
229 | unsigned long first_index, unsigned int max_items); | |
230 | int radix_tree_preload(gfp_t gfp_mask); | |
231 | int radix_tree_maybe_preload(gfp_t gfp_mask); | |
232 | void radix_tree_init(void); | |
233 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
234 | unsigned long index, unsigned int tag); | |
235 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
236 | unsigned long index, unsigned int tag); | |
237 | int radix_tree_tag_get(struct radix_tree_root *root, | |
238 | unsigned long index, unsigned int tag); | |
239 | unsigned int | |
240 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
241 | unsigned long first_index, unsigned int max_items, | |
242 | unsigned int tag); | |
243 | unsigned int | |
244 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
245 | unsigned long first_index, unsigned int max_items, | |
246 | unsigned int tag); | |
247 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |
248 | unsigned long *first_indexp, unsigned long last_index, | |
249 | unsigned long nr_to_tag, | |
250 | unsigned int fromtag, unsigned int totag); | |
251 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); | |
252 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); | |
253 | ||
254 | static inline void radix_tree_preload_end(void) | |
255 | { | |
256 | preempt_enable(); | |
257 | } | |
258 | ||
259 | /** | |
260 | * struct radix_tree_iter - radix tree iterator state | |
261 | * | |
262 | * @index: index of current slot | |
263 | * @next_index: next-to-last index for this chunk | |
264 | * @tags: bit-mask for tag-iterating | |
265 | * | |
266 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | |
267 | * subinterval of slots contained within one radix tree leaf node. It is | |
268 | * described by a pointer to its first slot and a struct radix_tree_iter | |
269 | * which holds the chunk's position in the tree and its size. For tagged | |
270 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | |
271 | * radix tree tag. | |
272 | */ | |
273 | struct radix_tree_iter { | |
274 | unsigned long index; | |
275 | unsigned long next_index; | |
276 | unsigned long tags; | |
277 | }; | |
278 | ||
279 | #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ | |
280 | #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ | |
281 | #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ | |
282 | ||
283 | /** | |
284 | * radix_tree_iter_init - initialize radix tree iterator | |
285 | * | |
286 | * @iter: pointer to iterator state | |
287 | * @start: iteration starting index | |
288 | * Returns: NULL | |
289 | */ | |
290 | static __always_inline void ** | |
291 | radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) | |
292 | { | |
293 | /* | |
294 | * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it | |
295 | * in the case of a successful tagged chunk lookup. If the lookup was | |
296 | * unsuccessful or non-tagged then nobody cares about ->tags. | |
297 | * | |
298 | * Set index to zero to bypass next_index overflow protection. | |
299 | * See the comment in radix_tree_next_chunk() for details. | |
300 | */ | |
301 | iter->index = 0; | |
302 | iter->next_index = start; | |
303 | return NULL; | |
304 | } | |
305 | ||
306 | /** | |
307 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
308 | * | |
309 | * @root: radix tree root | |
310 | * @iter: iterator state | |
311 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
312 | * Returns: pointer to chunk first slot, or NULL if there no more left | |
313 | * | |
314 | * This function looks up the next chunk in the radix tree starting from | |
315 | * @iter->next_index. It returns a pointer to the chunk's first slot. | |
316 | * Also it fills @iter with data about chunk: position in the tree (index), | |
317 | * its end (next_index), and constructs a bit mask for tagged iterating (tags). | |
318 | */ | |
319 | void **radix_tree_next_chunk(struct radix_tree_root *root, | |
320 | struct radix_tree_iter *iter, unsigned flags); | |
321 | ||
322 | /** | |
323 | * radix_tree_chunk_size - get current chunk size | |
324 | * | |
325 | * @iter: pointer to radix tree iterator | |
326 | * Returns: current chunk size | |
327 | */ | |
328 | static __always_inline unsigned | |
329 | radix_tree_chunk_size(struct radix_tree_iter *iter) | |
330 | { | |
331 | return iter->next_index - iter->index; | |
332 | } | |
333 | ||
334 | /** | |
335 | * radix_tree_next_slot - find next slot in chunk | |
336 | * | |
337 | * @slot: pointer to current slot | |
338 | * @iter: pointer to interator state | |
339 | * @flags: RADIX_TREE_ITER_*, should be constant | |
340 | * Returns: pointer to next slot, or NULL if there no more left | |
341 | * | |
342 | * This function updates @iter->index in the case of a successful lookup. | |
343 | * For tagged lookup it also eats @iter->tags. | |
344 | */ | |
345 | static __always_inline void ** | |
346 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | |
347 | { | |
348 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
349 | iter->tags >>= 1; | |
350 | if (likely(iter->tags & 1ul)) { | |
351 | iter->index++; | |
352 | return slot + 1; | |
353 | } | |
354 | if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { | |
355 | unsigned offset = __ffs(iter->tags); | |
356 | ||
357 | iter->tags >>= offset; | |
358 | iter->index += offset + 1; | |
359 | return slot + offset + 1; | |
360 | } | |
361 | } else { | |
362 | unsigned size = radix_tree_chunk_size(iter) - 1; | |
363 | ||
364 | while (size--) { | |
365 | slot++; | |
366 | iter->index++; | |
367 | if (likely(*slot)) | |
368 | return slot; | |
369 | if (flags & RADIX_TREE_ITER_CONTIG) { | |
370 | /* forbid switching to the next chunk */ | |
371 | iter->next_index = 0; | |
372 | break; | |
373 | } | |
374 | } | |
375 | } | |
376 | return NULL; | |
377 | } | |
378 | ||
379 | /** | |
380 | * radix_tree_for_each_chunk - iterate over chunks | |
381 | * | |
382 | * @slot: the void** variable for pointer to chunk first slot | |
383 | * @root: the struct radix_tree_root pointer | |
384 | * @iter: the struct radix_tree_iter pointer | |
385 | * @start: iteration starting index | |
386 | * @flags: RADIX_TREE_ITER_* and tag index | |
387 | * | |
388 | * Locks can be released and reacquired between iterations. | |
389 | */ | |
390 | #define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ | |
391 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
392 | (slot = radix_tree_next_chunk(root, iter, flags)) ;) | |
393 | ||
394 | /** | |
395 | * radix_tree_for_each_chunk_slot - iterate over slots in one chunk | |
396 | * | |
397 | * @slot: the void** variable, at the beginning points to chunk first slot | |
398 | * @iter: the struct radix_tree_iter pointer | |
399 | * @flags: RADIX_TREE_ITER_*, should be constant | |
400 | * | |
401 | * This macro is designed to be nested inside radix_tree_for_each_chunk(). | |
402 | * @slot points to the radix tree slot, @iter->index contains its index. | |
403 | */ | |
404 | #define radix_tree_for_each_chunk_slot(slot, iter, flags) \ | |
405 | for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) | |
406 | ||
407 | /** | |
408 | * radix_tree_for_each_slot - iterate over non-empty slots | |
409 | * | |
410 | * @slot: the void** variable for pointer to slot | |
411 | * @root: the struct radix_tree_root pointer | |
412 | * @iter: the struct radix_tree_iter pointer | |
413 | * @start: iteration starting index | |
414 | * | |
415 | * @slot points to radix tree slot, @iter->index contains its index. | |
416 | */ | |
417 | #define radix_tree_for_each_slot(slot, root, iter, start) \ | |
418 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
419 | slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ | |
420 | slot = radix_tree_next_slot(slot, iter, 0)) | |
421 | ||
422 | /** | |
423 | * radix_tree_for_each_contig - iterate over contiguous slots | |
424 | * | |
425 | * @slot: the void** variable for pointer to slot | |
426 | * @root: the struct radix_tree_root pointer | |
427 | * @iter: the struct radix_tree_iter pointer | |
428 | * @start: iteration starting index | |
429 | * | |
430 | * @slot points to radix tree slot, @iter->index contains its index. | |
431 | */ | |
432 | #define radix_tree_for_each_contig(slot, root, iter, start) \ | |
433 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
434 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
435 | RADIX_TREE_ITER_CONTIG)) ; \ | |
436 | slot = radix_tree_next_slot(slot, iter, \ | |
437 | RADIX_TREE_ITER_CONTIG)) | |
438 | ||
439 | /** | |
440 | * radix_tree_for_each_tagged - iterate over tagged slots | |
441 | * | |
442 | * @slot: the void** variable for pointer to slot | |
443 | * @root: the struct radix_tree_root pointer | |
444 | * @iter: the struct radix_tree_iter pointer | |
445 | * @start: iteration starting index | |
446 | * @tag: tag index | |
447 | * | |
448 | * @slot points to radix tree slot, @iter->index contains its index. | |
449 | */ | |
450 | #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ | |
451 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
452 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
453 | RADIX_TREE_ITER_TAGGED | tag)) ; \ | |
454 | slot = radix_tree_next_slot(slot, iter, \ | |
455 | RADIX_TREE_ITER_TAGGED)) | |
456 | ||
457 | #endif /* _LINUX_RADIX_TREE_H */ |