]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/radix-tree.h
net/mlx5: Use flow steering infrastructure for mlx5_en
[mirror_ubuntu-artful-kernel.git] / include / linux / radix-tree.h
1 /*
2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2006 Nick Piggin
5 * Copyright (C) 2012 Konstantin Khlebnikov
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2, or (at
10 * your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21 #ifndef _LINUX_RADIX_TREE_H
22 #define _LINUX_RADIX_TREE_H
23
24 #include <linux/preempt.h>
25 #include <linux/types.h>
26 #include <linux/bug.h>
27 #include <linux/kernel.h>
28 #include <linux/rcupdate.h>
29
30 /*
31 * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
32 * than a data item) is signalled by the low bit set in the root->rnode
33 * pointer.
34 *
35 * In this case root->height is > 0, but the indirect pointer tests are
36 * needed for RCU lookups (because root->height is unreliable). The only
37 * time callers need worry about this is when doing a lookup_slot under
38 * RCU.
39 *
40 * Indirect pointer in fact is also used to tag the last pointer of a node
41 * when it is shrunk, before we rcu free the node. See shrink code for
42 * details.
43 */
44 #define RADIX_TREE_INDIRECT_PTR 1
45 /*
46 * A common use of the radix tree is to store pointers to struct pages;
47 * but shmem/tmpfs needs also to store swap entries in the same tree:
48 * those are marked as exceptional entries to distinguish them.
49 * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
50 */
51 #define RADIX_TREE_EXCEPTIONAL_ENTRY 2
52 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2
53
54 static inline int radix_tree_is_indirect_ptr(void *ptr)
55 {
56 return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
57 }
58
59 /*** radix-tree API starts here ***/
60
61 #define RADIX_TREE_MAX_TAGS 3
62
63 #ifdef __KERNEL__
64 #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
65 #else
66 #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
67 #endif
68
69 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
70 #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
71
72 #define RADIX_TREE_TAG_LONGS \
73 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
74
75 #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
76 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
77 RADIX_TREE_MAP_SHIFT))
78
79 /* Height component in node->path */
80 #define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1)
81 #define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1)
82
83 /* Internally used bits of node->count */
84 #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
85 #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
86
87 struct radix_tree_node {
88 unsigned int path; /* Offset in parent & height from the bottom */
89 unsigned int count;
90 union {
91 struct {
92 /* Used when ascending tree */
93 struct radix_tree_node *parent;
94 /* For tree user */
95 void *private_data;
96 };
97 /* Used when freeing node */
98 struct rcu_head rcu_head;
99 };
100 /* For tree user */
101 struct list_head private_list;
102 void __rcu *slots[RADIX_TREE_MAP_SIZE];
103 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
104 };
105
106 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
107 struct radix_tree_root {
108 unsigned int height;
109 gfp_t gfp_mask;
110 struct radix_tree_node __rcu *rnode;
111 };
112
113 #define RADIX_TREE_INIT(mask) { \
114 .height = 0, \
115 .gfp_mask = (mask), \
116 .rnode = NULL, \
117 }
118
119 #define RADIX_TREE(name, mask) \
120 struct radix_tree_root name = RADIX_TREE_INIT(mask)
121
122 #define INIT_RADIX_TREE(root, mask) \
123 do { \
124 (root)->height = 0; \
125 (root)->gfp_mask = (mask); \
126 (root)->rnode = NULL; \
127 } while (0)
128
129 /**
130 * Radix-tree synchronization
131 *
132 * The radix-tree API requires that users provide all synchronisation (with
133 * specific exceptions, noted below).
134 *
135 * Synchronization of access to the data items being stored in the tree, and
136 * management of their lifetimes must be completely managed by API users.
137 *
138 * For API usage, in general,
139 * - any function _modifying_ the tree or tags (inserting or deleting
140 * items, setting or clearing tags) must exclude other modifications, and
141 * exclude any functions reading the tree.
142 * - any function _reading_ the tree or tags (looking up items or tags,
143 * gang lookups) must exclude modifications to the tree, but may occur
144 * concurrently with other readers.
145 *
146 * The notable exceptions to this rule are the following functions:
147 * __radix_tree_lookup
148 * radix_tree_lookup
149 * radix_tree_lookup_slot
150 * radix_tree_tag_get
151 * radix_tree_gang_lookup
152 * radix_tree_gang_lookup_slot
153 * radix_tree_gang_lookup_tag
154 * radix_tree_gang_lookup_tag_slot
155 * radix_tree_tagged
156 *
157 * The first 7 functions are able to be called locklessly, using RCU. The
158 * caller must ensure calls to these functions are made within rcu_read_lock()
159 * regions. Other readers (lock-free or otherwise) and modifications may be
160 * running concurrently.
161 *
162 * It is still required that the caller manage the synchronization and lifetimes
163 * of the items. So if RCU lock-free lookups are used, typically this would mean
164 * that the items have their own locks, or are amenable to lock-free access; and
165 * that the items are freed by RCU (or only freed after having been deleted from
166 * the radix tree *and* a synchronize_rcu() grace period).
167 *
168 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
169 * access to data items when inserting into or looking up from the radix tree)
170 *
171 * Note that the value returned by radix_tree_tag_get() may not be relied upon
172 * if only the RCU read lock is held. Functions to set/clear tags and to
173 * delete nodes running concurrently with it may affect its result such that
174 * two consecutive reads in the same locked section may return different
175 * values. If reliability is required, modification functions must also be
176 * excluded from concurrency.
177 *
178 * radix_tree_tagged is able to be called without locking or RCU.
179 */
180
181 /**
182 * radix_tree_deref_slot - dereference a slot
183 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
184 * Returns: item that was stored in that slot with any direct pointer flag
185 * removed.
186 *
187 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
188 * locked across slot lookup and dereference. Not required if write lock is
189 * held (ie. items cannot be concurrently inserted).
190 *
191 * radix_tree_deref_retry must be used to confirm validity of the pointer if
192 * only the read lock is held.
193 */
194 static inline void *radix_tree_deref_slot(void **pslot)
195 {
196 return rcu_dereference(*pslot);
197 }
198
199 /**
200 * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
201 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
202 * Returns: item that was stored in that slot with any direct pointer flag
203 * removed.
204 *
205 * Similar to radix_tree_deref_slot but only used during migration when a pages
206 * mapping is being moved. The caller does not hold the RCU read lock but it
207 * must hold the tree lock to prevent parallel updates.
208 */
209 static inline void *radix_tree_deref_slot_protected(void **pslot,
210 spinlock_t *treelock)
211 {
212 return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
213 }
214
215 /**
216 * radix_tree_deref_retry - check radix_tree_deref_slot
217 * @arg: pointer returned by radix_tree_deref_slot
218 * Returns: 0 if retry is not required, otherwise retry is required
219 *
220 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
221 */
222 static inline int radix_tree_deref_retry(void *arg)
223 {
224 return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
225 }
226
227 /**
228 * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
229 * @arg: value returned by radix_tree_deref_slot
230 * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
231 */
232 static inline int radix_tree_exceptional_entry(void *arg)
233 {
234 /* Not unlikely because radix_tree_exception often tested first */
235 return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
236 }
237
238 /**
239 * radix_tree_exception - radix_tree_deref_slot returned either exception?
240 * @arg: value returned by radix_tree_deref_slot
241 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
242 */
243 static inline int radix_tree_exception(void *arg)
244 {
245 return unlikely((unsigned long)arg &
246 (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY));
247 }
248
249 /**
250 * radix_tree_replace_slot - replace item in a slot
251 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
252 * @item: new item to store in the slot.
253 *
254 * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
255 * across slot lookup and replacement.
256 */
257 static inline void radix_tree_replace_slot(void **pslot, void *item)
258 {
259 BUG_ON(radix_tree_is_indirect_ptr(item));
260 rcu_assign_pointer(*pslot, item);
261 }
262
263 int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
264 struct radix_tree_node **nodep, void ***slotp);
265 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
266 void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
267 struct radix_tree_node **nodep, void ***slotp);
268 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
269 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
270 bool __radix_tree_delete_node(struct radix_tree_root *root,
271 struct radix_tree_node *node);
272 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
273 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
274 unsigned int
275 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
276 unsigned long first_index, unsigned int max_items);
277 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
278 void ***results, unsigned long *indices,
279 unsigned long first_index, unsigned int max_items);
280 int radix_tree_preload(gfp_t gfp_mask);
281 int radix_tree_maybe_preload(gfp_t gfp_mask);
282 void radix_tree_init(void);
283 void *radix_tree_tag_set(struct radix_tree_root *root,
284 unsigned long index, unsigned int tag);
285 void *radix_tree_tag_clear(struct radix_tree_root *root,
286 unsigned long index, unsigned int tag);
287 int radix_tree_tag_get(struct radix_tree_root *root,
288 unsigned long index, unsigned int tag);
289 unsigned int
290 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
291 unsigned long first_index, unsigned int max_items,
292 unsigned int tag);
293 unsigned int
294 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
295 unsigned long first_index, unsigned int max_items,
296 unsigned int tag);
297 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
298 unsigned long *first_indexp, unsigned long last_index,
299 unsigned long nr_to_tag,
300 unsigned int fromtag, unsigned int totag);
301 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
302 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
303
304 static inline void radix_tree_preload_end(void)
305 {
306 preempt_enable();
307 }
308
309 /**
310 * struct radix_tree_iter - radix tree iterator state
311 *
312 * @index: index of current slot
313 * @next_index: next-to-last index for this chunk
314 * @tags: bit-mask for tag-iterating
315 *
316 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
317 * subinterval of slots contained within one radix tree leaf node. It is
318 * described by a pointer to its first slot and a struct radix_tree_iter
319 * which holds the chunk's position in the tree and its size. For tagged
320 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
321 * radix tree tag.
322 */
323 struct radix_tree_iter {
324 unsigned long index;
325 unsigned long next_index;
326 unsigned long tags;
327 };
328
329 #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
330 #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
331 #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
332
333 /**
334 * radix_tree_iter_init - initialize radix tree iterator
335 *
336 * @iter: pointer to iterator state
337 * @start: iteration starting index
338 * Returns: NULL
339 */
340 static __always_inline void **
341 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
342 {
343 /*
344 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
345 * in the case of a successful tagged chunk lookup. If the lookup was
346 * unsuccessful or non-tagged then nobody cares about ->tags.
347 *
348 * Set index to zero to bypass next_index overflow protection.
349 * See the comment in radix_tree_next_chunk() for details.
350 */
351 iter->index = 0;
352 iter->next_index = start;
353 return NULL;
354 }
355
356 /**
357 * radix_tree_next_chunk - find next chunk of slots for iteration
358 *
359 * @root: radix tree root
360 * @iter: iterator state
361 * @flags: RADIX_TREE_ITER_* flags and tag index
362 * Returns: pointer to chunk first slot, or NULL if there no more left
363 *
364 * This function looks up the next chunk in the radix tree starting from
365 * @iter->next_index. It returns a pointer to the chunk's first slot.
366 * Also it fills @iter with data about chunk: position in the tree (index),
367 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
368 */
369 void **radix_tree_next_chunk(struct radix_tree_root *root,
370 struct radix_tree_iter *iter, unsigned flags);
371
372 /**
373 * radix_tree_chunk_size - get current chunk size
374 *
375 * @iter: pointer to radix tree iterator
376 * Returns: current chunk size
377 */
378 static __always_inline unsigned
379 radix_tree_chunk_size(struct radix_tree_iter *iter)
380 {
381 return iter->next_index - iter->index;
382 }
383
384 /**
385 * radix_tree_next_slot - find next slot in chunk
386 *
387 * @slot: pointer to current slot
388 * @iter: pointer to interator state
389 * @flags: RADIX_TREE_ITER_*, should be constant
390 * Returns: pointer to next slot, or NULL if there no more left
391 *
392 * This function updates @iter->index in the case of a successful lookup.
393 * For tagged lookup it also eats @iter->tags.
394 */
395 static __always_inline void **
396 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
397 {
398 if (flags & RADIX_TREE_ITER_TAGGED) {
399 iter->tags >>= 1;
400 if (likely(iter->tags & 1ul)) {
401 iter->index++;
402 return slot + 1;
403 }
404 if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) {
405 unsigned offset = __ffs(iter->tags);
406
407 iter->tags >>= offset;
408 iter->index += offset + 1;
409 return slot + offset + 1;
410 }
411 } else {
412 unsigned size = radix_tree_chunk_size(iter) - 1;
413
414 while (size--) {
415 slot++;
416 iter->index++;
417 if (likely(*slot))
418 return slot;
419 if (flags & RADIX_TREE_ITER_CONTIG) {
420 /* forbid switching to the next chunk */
421 iter->next_index = 0;
422 break;
423 }
424 }
425 }
426 return NULL;
427 }
428
429 /**
430 * radix_tree_for_each_chunk - iterate over chunks
431 *
432 * @slot: the void** variable for pointer to chunk first slot
433 * @root: the struct radix_tree_root pointer
434 * @iter: the struct radix_tree_iter pointer
435 * @start: iteration starting index
436 * @flags: RADIX_TREE_ITER_* and tag index
437 *
438 * Locks can be released and reacquired between iterations.
439 */
440 #define radix_tree_for_each_chunk(slot, root, iter, start, flags) \
441 for (slot = radix_tree_iter_init(iter, start) ; \
442 (slot = radix_tree_next_chunk(root, iter, flags)) ;)
443
444 /**
445 * radix_tree_for_each_chunk_slot - iterate over slots in one chunk
446 *
447 * @slot: the void** variable, at the beginning points to chunk first slot
448 * @iter: the struct radix_tree_iter pointer
449 * @flags: RADIX_TREE_ITER_*, should be constant
450 *
451 * This macro is designed to be nested inside radix_tree_for_each_chunk().
452 * @slot points to the radix tree slot, @iter->index contains its index.
453 */
454 #define radix_tree_for_each_chunk_slot(slot, iter, flags) \
455 for (; slot ; slot = radix_tree_next_slot(slot, iter, flags))
456
457 /**
458 * radix_tree_for_each_slot - iterate over non-empty slots
459 *
460 * @slot: the void** variable for pointer to slot
461 * @root: the struct radix_tree_root pointer
462 * @iter: the struct radix_tree_iter pointer
463 * @start: iteration starting index
464 *
465 * @slot points to radix tree slot, @iter->index contains its index.
466 */
467 #define radix_tree_for_each_slot(slot, root, iter, start) \
468 for (slot = radix_tree_iter_init(iter, start) ; \
469 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
470 slot = radix_tree_next_slot(slot, iter, 0))
471
472 /**
473 * radix_tree_for_each_contig - iterate over contiguous slots
474 *
475 * @slot: the void** variable for pointer to slot
476 * @root: the struct radix_tree_root pointer
477 * @iter: the struct radix_tree_iter pointer
478 * @start: iteration starting index
479 *
480 * @slot points to radix tree slot, @iter->index contains its index.
481 */
482 #define radix_tree_for_each_contig(slot, root, iter, start) \
483 for (slot = radix_tree_iter_init(iter, start) ; \
484 slot || (slot = radix_tree_next_chunk(root, iter, \
485 RADIX_TREE_ITER_CONTIG)) ; \
486 slot = radix_tree_next_slot(slot, iter, \
487 RADIX_TREE_ITER_CONTIG))
488
489 /**
490 * radix_tree_for_each_tagged - iterate over tagged slots
491 *
492 * @slot: the void** variable for pointer to slot
493 * @root: the struct radix_tree_root pointer
494 * @iter: the struct radix_tree_iter pointer
495 * @start: iteration starting index
496 * @tag: tag index
497 *
498 * @slot points to radix tree slot, @iter->index contains its index.
499 */
500 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
501 for (slot = radix_tree_iter_init(iter, start) ; \
502 slot || (slot = radix_tree_next_chunk(root, iter, \
503 RADIX_TREE_ITER_TAGGED | tag)) ; \
504 slot = radix_tree_next_slot(slot, iter, \
505 RADIX_TREE_ITER_TAGGED))
506
507 #endif /* _LINUX_RADIX_TREE_H */