]>
Commit | Line | Data |
---|---|---|
f6bb2a2c MW |
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | #ifndef _LINUX_XARRAY_H | |
3 | #define _LINUX_XARRAY_H | |
4 | /* | |
5 | * eXtensible Arrays | |
6 | * Copyright (c) 2017 Microsoft Corporation | |
3d0186bb | 7 | * Author: Matthew Wilcox <willy@infradead.org> |
3159f943 MW |
8 | * |
9 | * See Documentation/core-api/xarray.rst for how to use the XArray. | |
f6bb2a2c MW |
10 | */ |
11 | ||
3159f943 | 12 | #include <linux/bug.h> |
f8d5d0cc | 13 | #include <linux/compiler.h> |
9b89a035 | 14 | #include <linux/gfp.h> |
f8d5d0cc | 15 | #include <linux/kconfig.h> |
ad3d6c72 MW |
16 | #include <linux/kernel.h> |
17 | #include <linux/rcupdate.h> | |
f6bb2a2c | 18 | #include <linux/spinlock.h> |
3159f943 MW |
19 | #include <linux/types.h> |
20 | ||
21 | /* | |
22 | * The bottom two bits of the entry determine how the XArray interprets | |
23 | * the contents: | |
24 | * | |
25 | * 00: Pointer entry | |
26 | * 10: Internal entry | |
27 | * x1: Value entry or tagged pointer | |
28 | * | |
29 | * Attempting to store internal entries in the XArray is a bug. | |
02c02bf1 MW |
30 | * |
31 | * Most internal entries are pointers to the next node in the tree. | |
32 | * The following internal entries have a special meaning: | |
33 | * | |
34 | * 0-62: Sibling entries | |
9f14d4f1 MW |
35 | * 256: Zero entry |
36 | * 257: Retry entry | |
ad3d6c72 MW |
37 | * |
38 | * Errors are also represented as internal entries, but use the negative | |
39 | * space (-4094 to -2). They're never stored in the slots array; only | |
40 | * returned by the normal API. | |
3159f943 MW |
41 | */ |
42 | ||
43 | #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) | |
44 | ||
45 | /** | |
46 | * xa_mk_value() - Create an XArray entry from an integer. | |
47 | * @v: Value to store in XArray. | |
48 | * | |
49 | * Context: Any context. | |
50 | * Return: An entry suitable for storing in the XArray. | |
51 | */ | |
52 | static inline void *xa_mk_value(unsigned long v) | |
53 | { | |
54 | WARN_ON((long)v < 0); | |
55 | return (void *)((v << 1) | 1); | |
56 | } | |
57 | ||
58 | /** | |
59 | * xa_to_value() - Get value stored in an XArray entry. | |
60 | * @entry: XArray entry. | |
61 | * | |
62 | * Context: Any context. | |
63 | * Return: The value stored in the XArray entry. | |
64 | */ | |
65 | static inline unsigned long xa_to_value(const void *entry) | |
66 | { | |
67 | return (unsigned long)entry >> 1; | |
68 | } | |
69 | ||
70 | /** | |
71 | * xa_is_value() - Determine if an entry is a value. | |
72 | * @entry: XArray entry. | |
73 | * | |
74 | * Context: Any context. | |
75 | * Return: True if the entry is a value, false if it is a pointer. | |
76 | */ | |
77 | static inline bool xa_is_value(const void *entry) | |
78 | { | |
79 | return (unsigned long)entry & 1; | |
80 | } | |
81 | ||
82 | /** | |
83 | * xa_tag_pointer() - Create an XArray entry for a tagged pointer. | |
84 | * @p: Plain pointer. | |
85 | * @tag: Tag value (0, 1 or 3). | |
86 | * | |
87 | * If the user of the XArray prefers, they can tag their pointers instead | |
88 | * of storing value entries. Three tags are available (0, 1 and 3). | |
89 | * These are distinct from the xa_mark_t as they are not replicated up | |
90 | * through the array and cannot be searched for. | |
91 | * | |
92 | * Context: Any context. | |
93 | * Return: An XArray entry. | |
94 | */ | |
95 | static inline void *xa_tag_pointer(void *p, unsigned long tag) | |
96 | { | |
97 | return (void *)((unsigned long)p | tag); | |
98 | } | |
99 | ||
100 | /** | |
101 | * xa_untag_pointer() - Turn an XArray entry into a plain pointer. | |
102 | * @entry: XArray entry. | |
103 | * | |
104 | * If you have stored a tagged pointer in the XArray, call this function | |
105 | * to get the untagged version of the pointer. | |
106 | * | |
107 | * Context: Any context. | |
108 | * Return: A pointer. | |
109 | */ | |
110 | static inline void *xa_untag_pointer(void *entry) | |
111 | { | |
112 | return (void *)((unsigned long)entry & ~3UL); | |
113 | } | |
114 | ||
115 | /** | |
116 | * xa_pointer_tag() - Get the tag stored in an XArray entry. | |
117 | * @entry: XArray entry. | |
118 | * | |
119 | * If you have stored a tagged pointer in the XArray, call this function | |
120 | * to get the tag of that pointer. | |
121 | * | |
122 | * Context: Any context. | |
123 | * Return: A tag. | |
124 | */ | |
125 | static inline unsigned int xa_pointer_tag(void *entry) | |
126 | { | |
127 | return (unsigned long)entry & 3UL; | |
128 | } | |
f6bb2a2c | 129 | |
02c02bf1 MW |
130 | /* |
131 | * xa_mk_internal() - Create an internal entry. | |
132 | * @v: Value to turn into an internal entry. | |
133 | * | |
b38f6c50 MW |
134 | * Internal entries are used for a number of purposes. Entries 0-255 are |
135 | * used for sibling entries (only 0-62 are used by the current code). 256 | |
136 | * is used for the retry entry. 257 is used for the reserved / zero entry. | |
137 | * Negative internal entries are used to represent errnos. Node pointers | |
138 | * are also tagged as internal entries in some situations. | |
139 | * | |
02c02bf1 MW |
140 | * Context: Any context. |
141 | * Return: An XArray internal entry corresponding to this value. | |
142 | */ | |
143 | static inline void *xa_mk_internal(unsigned long v) | |
144 | { | |
145 | return (void *)((v << 2) | 2); | |
146 | } | |
147 | ||
148 | /* | |
149 | * xa_to_internal() - Extract the value from an internal entry. | |
150 | * @entry: XArray entry. | |
151 | * | |
152 | * Context: Any context. | |
153 | * Return: The value which was stored in the internal entry. | |
154 | */ | |
155 | static inline unsigned long xa_to_internal(const void *entry) | |
156 | { | |
157 | return (unsigned long)entry >> 2; | |
158 | } | |
159 | ||
160 | /* | |
161 | * xa_is_internal() - Is the entry an internal entry? | |
162 | * @entry: XArray entry. | |
163 | * | |
164 | * Context: Any context. | |
165 | * Return: %true if the entry is an internal entry. | |
166 | */ | |
167 | static inline bool xa_is_internal(const void *entry) | |
168 | { | |
169 | return ((unsigned long)entry & 3) == 2; | |
170 | } | |
171 | ||
b38f6c50 MW |
172 | #define XA_ZERO_ENTRY xa_mk_internal(257) |
173 | ||
174 | /** | |
175 | * xa_is_zero() - Is the entry a zero entry? | |
176 | * @entry: Entry retrieved from the XArray | |
177 | * | |
178 | * The normal API will return NULL as the contents of a slot containing | |
179 | * a zero entry. You can only see zero entries by using the advanced API. | |
180 | * | |
181 | * Return: %true if the entry is a zero entry. | |
182 | */ | |
183 | static inline bool xa_is_zero(const void *entry) | |
184 | { | |
185 | return unlikely(entry == XA_ZERO_ENTRY); | |
186 | } | |
187 | ||
ad3d6c72 MW |
188 | /** |
189 | * xa_is_err() - Report whether an XArray operation returned an error | |
190 | * @entry: Result from calling an XArray function | |
191 | * | |
192 | * If an XArray operation cannot complete an operation, it will return | |
193 | * a special value indicating an error. This function tells you | |
194 | * whether an error occurred; xa_err() tells you which error occurred. | |
195 | * | |
196 | * Context: Any context. | |
197 | * Return: %true if the entry indicates an error. | |
198 | */ | |
199 | static inline bool xa_is_err(const void *entry) | |
200 | { | |
76b4e529 | 201 | return unlikely(xa_is_internal(entry) && |
edcddd4c | 202 | entry >= xa_mk_internal(-MAX_ERRNO)); |
ad3d6c72 MW |
203 | } |
204 | ||
205 | /** | |
206 | * xa_err() - Turn an XArray result into an errno. | |
207 | * @entry: Result from calling an XArray function. | |
208 | * | |
209 | * If an XArray operation cannot complete an operation, it will return | |
210 | * a special pointer value which encodes an errno. This function extracts | |
211 | * the errno from the pointer value, or returns 0 if the pointer does not | |
212 | * represent an errno. | |
213 | * | |
214 | * Context: Any context. | |
215 | * Return: A negative errno or 0. | |
216 | */ | |
217 | static inline int xa_err(void *entry) | |
218 | { | |
219 | /* xa_to_internal() would not do sign extension. */ | |
220 | if (xa_is_err(entry)) | |
221 | return (long)entry >> 2; | |
222 | return 0; | |
223 | } | |
224 | ||
a3e4d3f9 MW |
225 | /** |
226 | * struct xa_limit - Represents a range of IDs. | |
227 | * @min: The lowest ID to allocate (inclusive). | |
228 | * @max: The maximum ID to allocate (inclusive). | |
229 | * | |
230 | * This structure is used either directly or via the XA_LIMIT() macro | |
231 | * to communicate the range of IDs that are valid for allocation. | |
232 | * Two common ranges are predefined for you: | |
13bea898 RD |
233 | * * xa_limit_32b - [0 - UINT_MAX] |
234 | * * xa_limit_31b - [0 - INT_MAX] | |
a3e4d3f9 MW |
235 | */ |
236 | struct xa_limit { | |
237 | u32 max; | |
238 | u32 min; | |
239 | }; | |
240 | ||
241 | #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } | |
242 | ||
243 | #define xa_limit_32b XA_LIMIT(0, UINT_MAX) | |
244 | #define xa_limit_31b XA_LIMIT(0, INT_MAX) | |
245 | ||
9b89a035 MW |
246 | typedef unsigned __bitwise xa_mark_t; |
247 | #define XA_MARK_0 ((__force xa_mark_t)0U) | |
248 | #define XA_MARK_1 ((__force xa_mark_t)1U) | |
249 | #define XA_MARK_2 ((__force xa_mark_t)2U) | |
250 | #define XA_PRESENT ((__force xa_mark_t)8U) | |
251 | #define XA_MARK_MAX XA_MARK_2 | |
371c752d | 252 | #define XA_FREE_MARK XA_MARK_0 |
9b89a035 | 253 | |
58d6ea30 MW |
254 | enum xa_lock_type { |
255 | XA_LOCK_IRQ = 1, | |
256 | XA_LOCK_BH = 2, | |
257 | }; | |
258 | ||
9b89a035 MW |
259 | /* |
260 | * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags, | |
261 | * and we remain compatible with that. | |
262 | */ | |
58d6ea30 MW |
263 | #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) |
264 | #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) | |
371c752d | 265 | #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) |
3ccaf57a | 266 | #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) |
2fa044e5 | 267 | #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) |
7b785645 | 268 | #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U) |
9b89a035 MW |
269 | #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ |
270 | (__force unsigned)(mark))) | |
271 | ||
3ccaf57a | 272 | /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ |
371c752d | 273 | #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) |
3ccaf57a | 274 | #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) |
371c752d | 275 | |
f8d5d0cc MW |
276 | /** |
277 | * struct xarray - The anchor of the XArray. | |
278 | * @xa_lock: Lock that protects the contents of the XArray. | |
279 | * | |
280 | * To use the xarray, define it statically or embed it in your data structure. | |
281 | * It is a very small data structure, so it does not usually make sense to | |
282 | * allocate it separately and keep a pointer to it in your data structure. | |
283 | * | |
284 | * You may use the xa_lock to protect your own data structures as well. | |
285 | */ | |
286 | /* | |
287 | * If all of the entries in the array are NULL, @xa_head is a NULL pointer. | |
288 | * If the only non-NULL entry in the array is at index 0, @xa_head is that | |
289 | * entry. If any other entry in the array is non-NULL, @xa_head points | |
290 | * to an @xa_node. | |
291 | */ | |
292 | struct xarray { | |
293 | spinlock_t xa_lock; | |
294 | /* private: The rest of the data structure is not to be used directly. */ | |
295 | gfp_t xa_flags; | |
296 | void __rcu * xa_head; | |
297 | }; | |
298 | ||
299 | #define XARRAY_INIT(name, flags) { \ | |
300 | .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ | |
301 | .xa_flags = flags, \ | |
302 | .xa_head = NULL, \ | |
303 | } | |
304 | ||
305 | /** | |
306 | * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags. | |
307 | * @name: A string that names your XArray. | |
308 | * @flags: XA_FLAG values. | |
309 | * | |
310 | * This is intended for file scope definitions of XArrays. It declares | |
311 | * and initialises an empty XArray with the chosen name and flags. It is | |
312 | * equivalent to calling xa_init_flags() on the array, but it does the | |
313 | * initialisation at compiletime instead of runtime. | |
314 | */ | |
315 | #define DEFINE_XARRAY_FLAGS(name, flags) \ | |
316 | struct xarray name = XARRAY_INIT(name, flags) | |
317 | ||
318 | /** | |
319 | * DEFINE_XARRAY() - Define an XArray. | |
320 | * @name: A string that names your XArray. | |
321 | * | |
322 | * This is intended for file scope definitions of XArrays. It declares | |
323 | * and initialises an empty XArray with the chosen name. It is equivalent | |
324 | * to calling xa_init() on the array, but it does the initialisation at | |
325 | * compiletime instead of runtime. | |
326 | */ | |
327 | #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) | |
328 | ||
371c752d | 329 | /** |
3ccaf57a | 330 | * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. |
371c752d MW |
331 | * @name: A string that names your XArray. |
332 | * | |
333 | * This is intended for file scope definitions of allocating XArrays. | |
334 | * See also DEFINE_XARRAY(). | |
335 | */ | |
336 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) | |
337 | ||
3ccaf57a MW |
338 | /** |
339 | * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. | |
340 | * @name: A string that names your XArray. | |
341 | * | |
342 | * This is intended for file scope definitions of allocating XArrays. | |
343 | * See also DEFINE_XARRAY(). | |
344 | */ | |
345 | #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) | |
346 | ||
ad3d6c72 | 347 | void *xa_load(struct xarray *, unsigned long index); |
58d6ea30 | 348 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
9c16bb88 | 349 | void *xa_erase(struct xarray *, unsigned long index); |
0e9446c3 MW |
350 | void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, |
351 | void *entry, gfp_t); | |
9b89a035 MW |
352 | bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); |
353 | void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); | |
354 | void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); | |
b803b428 MW |
355 | void *xa_find(struct xarray *xa, unsigned long *index, |
356 | unsigned long max, xa_mark_t) __attribute__((nonnull(2))); | |
357 | void *xa_find_after(struct xarray *xa, unsigned long *index, | |
358 | unsigned long max, xa_mark_t) __attribute__((nonnull(2))); | |
80a0a1a9 MW |
359 | unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, |
360 | unsigned long max, unsigned int n, xa_mark_t); | |
687149fc | 361 | void xa_destroy(struct xarray *); |
f8d5d0cc | 362 | |
02669b17 MW |
363 | /** |
364 | * xa_init_flags() - Initialise an empty XArray with flags. | |
365 | * @xa: XArray. | |
366 | * @flags: XA_FLAG values. | |
367 | * | |
368 | * If you need to initialise an XArray with special flags (eg you need | |
369 | * to take the lock from interrupt context), use this function instead | |
370 | * of xa_init(). | |
371 | * | |
372 | * Context: Any context. | |
373 | */ | |
374 | static inline void xa_init_flags(struct xarray *xa, gfp_t flags) | |
375 | { | |
376 | spin_lock_init(&xa->xa_lock); | |
377 | xa->xa_flags = flags; | |
378 | xa->xa_head = NULL; | |
379 | } | |
380 | ||
f8d5d0cc MW |
381 | /** |
382 | * xa_init() - Initialise an empty XArray. | |
383 | * @xa: XArray. | |
384 | * | |
385 | * An empty XArray is full of NULL entries. | |
386 | * | |
387 | * Context: Any context. | |
388 | */ | |
389 | static inline void xa_init(struct xarray *xa) | |
390 | { | |
391 | xa_init_flags(xa, 0); | |
392 | } | |
393 | ||
ad3d6c72 MW |
394 | /** |
395 | * xa_empty() - Determine if an array has any present entries. | |
396 | * @xa: XArray. | |
397 | * | |
398 | * Context: Any context. | |
399 | * Return: %true if the array contains only NULL pointers. | |
400 | */ | |
401 | static inline bool xa_empty(const struct xarray *xa) | |
402 | { | |
403 | return xa->xa_head == NULL; | |
404 | } | |
405 | ||
9b89a035 MW |
406 | /** |
407 | * xa_marked() - Inquire whether any entry in this array has a mark set | |
408 | * @xa: Array | |
409 | * @mark: Mark value | |
410 | * | |
411 | * Context: Any context. | |
412 | * Return: %true if any entry has this mark set. | |
413 | */ | |
414 | static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |
415 | { | |
416 | return xa->xa_flags & XA_FLAGS_MARK(mark); | |
417 | } | |
418 | ||
b803b428 | 419 | /** |
4a31896c | 420 | * xa_for_each_start() - Iterate over a portion of an XArray. |
b803b428 | 421 | * @xa: XArray. |
4a31896c | 422 | * @index: Index of @entry. |
b803b428 | 423 | * @entry: Entry retrieved from array. |
4a31896c MW |
424 | * @start: First index to retrieve from array. |
425 | * | |
426 | * During the iteration, @entry will have the value of the entry stored | |
427 | * in @xa at @index. You may modify @index during the iteration if you | |
428 | * want to skip or reprocess indices. It is safe to modify the array | |
429 | * during the iteration. At the end of the iteration, @entry will be set | |
430 | * to NULL and @index will have a value less than or equal to max. | |
431 | * | |
432 | * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have | |
433 | * to handle your own locking with xas_for_each(), and if you have to unlock | |
434 | * after each iteration, it will also end up being O(n.log(n)). | |
435 | * xa_for_each_start() will spin if it hits a retry entry; if you intend to | |
436 | * see retry entries, you should use the xas_for_each() iterator instead. | |
437 | * The xas_for_each() iterator will expand into more inline code than | |
438 | * xa_for_each_start(). | |
439 | * | |
440 | * Context: Any context. Takes and releases the RCU lock. | |
441 | */ | |
442 | #define xa_for_each_start(xa, index, entry, start) \ | |
443 | for (index = start, \ | |
444 | entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ | |
445 | entry; \ | |
446 | entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) | |
447 | ||
448 | /** | |
449 | * xa_for_each() - Iterate over present entries in an XArray. | |
450 | * @xa: XArray. | |
b803b428 | 451 | * @index: Index of @entry. |
4a31896c | 452 | * @entry: Entry retrieved from array. |
b803b428 | 453 | * |
4a31896c MW |
454 | * During the iteration, @entry will have the value of the entry stored |
455 | * in @xa at @index. You may modify @index during the iteration if you want | |
456 | * to skip or reprocess indices. It is safe to modify the array during the | |
457 | * iteration. At the end of the iteration, @entry will be set to NULL and | |
458 | * @index will have a value less than or equal to max. | |
b803b428 MW |
459 | * |
460 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have | |
461 | * to handle your own locking with xas_for_each(), and if you have to unlock | |
462 | * after each iteration, it will also end up being O(n.log(n)). xa_for_each() | |
463 | * will spin if it hits a retry entry; if you intend to see retry entries, | |
464 | * you should use the xas_for_each() iterator instead. The xas_for_each() | |
465 | * iterator will expand into more inline code than xa_for_each(). | |
466 | * | |
467 | * Context: Any context. Takes and releases the RCU lock. | |
468 | */ | |
4a31896c MW |
469 | #define xa_for_each(xa, index, entry) \ |
470 | xa_for_each_start(xa, index, entry, 0) | |
471 | ||
472 | /** | |
473 | * xa_for_each_marked() - Iterate over marked entries in an XArray. | |
474 | * @xa: XArray. | |
475 | * @index: Index of @entry. | |
476 | * @entry: Entry retrieved from array. | |
477 | * @filter: Selection criterion. | |
478 | * | |
479 | * During the iteration, @entry will have the value of the entry stored | |
480 | * in @xa at @index. The iteration will skip all entries in the array | |
481 | * which do not match @filter. You may modify @index during the iteration | |
482 | * if you want to skip or reprocess indices. It is safe to modify the array | |
483 | * during the iteration. At the end of the iteration, @entry will be set to | |
484 | * NULL and @index will have a value less than or equal to max. | |
485 | * | |
486 | * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). | |
487 | * You have to handle your own locking with xas_for_each(), and if you have | |
488 | * to unlock after each iteration, it will also end up being O(n.log(n)). | |
489 | * xa_for_each_marked() will spin if it hits a retry entry; if you intend to | |
490 | * see retry entries, you should use the xas_for_each_marked() iterator | |
491 | * instead. The xas_for_each_marked() iterator will expand into more inline | |
492 | * code than xa_for_each_marked(). | |
493 | * | |
494 | * Context: Any context. Takes and releases the RCU lock. | |
495 | */ | |
496 | #define xa_for_each_marked(xa, index, entry, filter) \ | |
497 | for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ | |
498 | entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) | |
b803b428 | 499 | |
f6bb2a2c MW |
500 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) |
501 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) | |
502 | #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) | |
503 | #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) | |
504 | #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) | |
505 | #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) | |
506 | #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) | |
507 | #define xa_lock_irqsave(xa, flags) \ | |
508 | spin_lock_irqsave(&(xa)->xa_lock, flags) | |
509 | #define xa_unlock_irqrestore(xa, flags) \ | |
510 | spin_unlock_irqrestore(&(xa)->xa_lock, flags) | |
511 | ||
9b89a035 | 512 | /* |
58d6ea30 MW |
513 | * Versions of the normal API which require the caller to hold the |
514 | * xa_lock. If the GFP flags allow it, they will drop the lock to | |
515 | * allocate memory, then reacquire it afterwards. These functions | |
516 | * may also re-enable interrupts if the XArray flags indicate the | |
517 | * locking should be interrupt safe. | |
518 | */ | |
519 | void *__xa_erase(struct xarray *, unsigned long index); | |
520 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | |
41aec91f MW |
521 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, |
522 | void *entry, gfp_t); | |
f818b82b MW |
523 | int __must_check __xa_insert(struct xarray *, unsigned long index, |
524 | void *entry, gfp_t); | |
a3e4d3f9 MW |
525 | int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, |
526 | struct xa_limit, gfp_t); | |
2fa044e5 MW |
527 | int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, |
528 | struct xa_limit, u32 *next, gfp_t); | |
9b89a035 MW |
529 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
530 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); | |
531 | ||
84e5acb7 MW |
532 | /** |
533 | * xa_store_bh() - Store this entry in the XArray. | |
534 | * @xa: XArray. | |
535 | * @index: Index into array. | |
536 | * @entry: New entry. | |
537 | * @gfp: Memory allocation flags. | |
538 | * | |
539 | * This function is like calling xa_store() except it disables softirqs | |
540 | * while holding the array lock. | |
541 | * | |
542 | * Context: Any context. Takes and releases the xa_lock while | |
543 | * disabling softirqs. | |
544 | * Return: The entry which used to be at this index. | |
545 | */ | |
546 | static inline void *xa_store_bh(struct xarray *xa, unsigned long index, | |
547 | void *entry, gfp_t gfp) | |
548 | { | |
549 | void *curr; | |
550 | ||
551 | xa_lock_bh(xa); | |
552 | curr = __xa_store(xa, index, entry, gfp); | |
553 | xa_unlock_bh(xa); | |
554 | ||
555 | return curr; | |
556 | } | |
557 | ||
558 | /** | |
19ba9ecf | 559 | * xa_store_irq() - Store this entry in the XArray. |
84e5acb7 MW |
560 | * @xa: XArray. |
561 | * @index: Index into array. | |
562 | * @entry: New entry. | |
563 | * @gfp: Memory allocation flags. | |
564 | * | |
565 | * This function is like calling xa_store() except it disables interrupts | |
566 | * while holding the array lock. | |
567 | * | |
568 | * Context: Process context. Takes and releases the xa_lock while | |
569 | * disabling interrupts. | |
570 | * Return: The entry which used to be at this index. | |
571 | */ | |
572 | static inline void *xa_store_irq(struct xarray *xa, unsigned long index, | |
573 | void *entry, gfp_t gfp) | |
574 | { | |
575 | void *curr; | |
576 | ||
577 | xa_lock_irq(xa); | |
578 | curr = __xa_store(xa, index, entry, gfp); | |
579 | xa_unlock_irq(xa); | |
580 | ||
581 | return curr; | |
582 | } | |
583 | ||
58d6ea30 MW |
584 | /** |
585 | * xa_erase_bh() - Erase this entry from the XArray. | |
586 | * @xa: XArray. | |
587 | * @index: Index of entry. | |
588 | * | |
809ab937 MW |
589 | * After this function returns, loading from @index will return %NULL. |
590 | * If the index is part of a multi-index entry, all indices will be erased | |
591 | * and none of the entries will be part of a multi-index entry. | |
58d6ea30 | 592 | * |
804dfaf0 | 593 | * Context: Any context. Takes and releases the xa_lock while |
58d6ea30 MW |
594 | * disabling softirqs. |
595 | * Return: The entry which used to be at this index. | |
596 | */ | |
597 | static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) | |
598 | { | |
599 | void *entry; | |
600 | ||
601 | xa_lock_bh(xa); | |
602 | entry = __xa_erase(xa, index); | |
603 | xa_unlock_bh(xa); | |
604 | ||
605 | return entry; | |
606 | } | |
607 | ||
608 | /** | |
609 | * xa_erase_irq() - Erase this entry from the XArray. | |
610 | * @xa: XArray. | |
611 | * @index: Index of entry. | |
612 | * | |
809ab937 MW |
613 | * After this function returns, loading from @index will return %NULL. |
614 | * If the index is part of a multi-index entry, all indices will be erased | |
615 | * and none of the entries will be part of a multi-index entry. | |
58d6ea30 MW |
616 | * |
617 | * Context: Process context. Takes and releases the xa_lock while | |
618 | * disabling interrupts. | |
619 | * Return: The entry which used to be at this index. | |
620 | */ | |
621 | static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) | |
622 | { | |
623 | void *entry; | |
624 | ||
625 | xa_lock_irq(xa); | |
626 | entry = __xa_erase(xa, index); | |
627 | xa_unlock_irq(xa); | |
628 | ||
629 | return entry; | |
630 | } | |
631 | ||
c5beb07e MW |
632 | /** |
633 | * xa_cmpxchg() - Conditionally replace an entry in the XArray. | |
634 | * @xa: XArray. | |
635 | * @index: Index into array. | |
636 | * @old: Old value to test against. | |
637 | * @entry: New value to place in array. | |
638 | * @gfp: Memory allocation flags. | |
639 | * | |
640 | * If the entry at @index is the same as @old, replace it with @entry. | |
641 | * If the return value is equal to @old, then the exchange was successful. | |
642 | * | |
643 | * Context: Any context. Takes and releases the xa_lock. May sleep | |
644 | * if the @gfp flags permit. | |
645 | * Return: The old value at this index or xa_err() if an error happened. | |
646 | */ | |
647 | static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, | |
648 | void *old, void *entry, gfp_t gfp) | |
649 | { | |
650 | void *curr; | |
651 | ||
652 | xa_lock(xa); | |
653 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | |
654 | xa_unlock(xa); | |
655 | ||
656 | return curr; | |
657 | } | |
658 | ||
55f3f7ea MW |
659 | /** |
660 | * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. | |
661 | * @xa: XArray. | |
662 | * @index: Index into array. | |
663 | * @old: Old value to test against. | |
664 | * @entry: New value to place in array. | |
665 | * @gfp: Memory allocation flags. | |
666 | * | |
667 | * This function is like calling xa_cmpxchg() except it disables softirqs | |
668 | * while holding the array lock. | |
669 | * | |
670 | * Context: Any context. Takes and releases the xa_lock while | |
671 | * disabling softirqs. May sleep if the @gfp flags permit. | |
672 | * Return: The old value at this index or xa_err() if an error happened. | |
673 | */ | |
674 | static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, | |
675 | void *old, void *entry, gfp_t gfp) | |
676 | { | |
677 | void *curr; | |
678 | ||
679 | xa_lock_bh(xa); | |
680 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | |
681 | xa_unlock_bh(xa); | |
682 | ||
683 | return curr; | |
684 | } | |
685 | ||
686 | /** | |
687 | * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. | |
688 | * @xa: XArray. | |
689 | * @index: Index into array. | |
690 | * @old: Old value to test against. | |
691 | * @entry: New value to place in array. | |
692 | * @gfp: Memory allocation flags. | |
693 | * | |
694 | * This function is like calling xa_cmpxchg() except it disables interrupts | |
695 | * while holding the array lock. | |
696 | * | |
697 | * Context: Process context. Takes and releases the xa_lock while | |
698 | * disabling interrupts. May sleep if the @gfp flags permit. | |
699 | * Return: The old value at this index or xa_err() if an error happened. | |
700 | */ | |
701 | static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, | |
702 | void *old, void *entry, gfp_t gfp) | |
703 | { | |
704 | void *curr; | |
705 | ||
706 | xa_lock_irq(xa); | |
707 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | |
708 | xa_unlock_irq(xa); | |
709 | ||
710 | return curr; | |
711 | } | |
712 | ||
c5beb07e MW |
713 | /** |
714 | * xa_insert() - Store this entry in the XArray unless another entry is | |
715 | * already present. | |
716 | * @xa: XArray. | |
717 | * @index: Index into array. | |
718 | * @entry: New entry. | |
719 | * @gfp: Memory allocation flags. | |
720 | * | |
b0606fed MW |
721 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
722 | * if no entry is present. Inserting will fail if a reserved entry is | |
723 | * present, even though loading from this index will return NULL. | |
c5beb07e | 724 | * |
b0606fed MW |
725 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
726 | * the @gfp flags permit. | |
fd9dc93e | 727 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
c5beb07e MW |
728 | * -ENOMEM if memory could not be allocated. |
729 | */ | |
f818b82b MW |
730 | static inline int __must_check xa_insert(struct xarray *xa, |
731 | unsigned long index, void *entry, gfp_t gfp) | |
c5beb07e | 732 | { |
b0606fed MW |
733 | int err; |
734 | ||
735 | xa_lock(xa); | |
736 | err = __xa_insert(xa, index, entry, gfp); | |
737 | xa_unlock(xa); | |
738 | ||
739 | return err; | |
740 | } | |
741 | ||
742 | /** | |
743 | * xa_insert_bh() - Store this entry in the XArray unless another entry is | |
744 | * already present. | |
745 | * @xa: XArray. | |
746 | * @index: Index into array. | |
747 | * @entry: New entry. | |
748 | * @gfp: Memory allocation flags. | |
749 | * | |
750 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | |
751 | * if no entry is present. Inserting will fail if a reserved entry is | |
752 | * present, even though loading from this index will return NULL. | |
753 | * | |
754 | * Context: Any context. Takes and releases the xa_lock while | |
755 | * disabling softirqs. May sleep if the @gfp flags permit. | |
fd9dc93e | 756 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
b0606fed MW |
757 | * -ENOMEM if memory could not be allocated. |
758 | */ | |
f818b82b MW |
759 | static inline int __must_check xa_insert_bh(struct xarray *xa, |
760 | unsigned long index, void *entry, gfp_t gfp) | |
b0606fed MW |
761 | { |
762 | int err; | |
763 | ||
764 | xa_lock_bh(xa); | |
765 | err = __xa_insert(xa, index, entry, gfp); | |
766 | xa_unlock_bh(xa); | |
767 | ||
768 | return err; | |
769 | } | |
770 | ||
771 | /** | |
772 | * xa_insert_irq() - Store this entry in the XArray unless another entry is | |
773 | * already present. | |
774 | * @xa: XArray. | |
775 | * @index: Index into array. | |
776 | * @entry: New entry. | |
777 | * @gfp: Memory allocation flags. | |
778 | * | |
779 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | |
780 | * if no entry is present. Inserting will fail if a reserved entry is | |
781 | * present, even though loading from this index will return NULL. | |
782 | * | |
783 | * Context: Process context. Takes and releases the xa_lock while | |
784 | * disabling interrupts. May sleep if the @gfp flags permit. | |
fd9dc93e | 785 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
b0606fed MW |
786 | * -ENOMEM if memory could not be allocated. |
787 | */ | |
f818b82b MW |
788 | static inline int __must_check xa_insert_irq(struct xarray *xa, |
789 | unsigned long index, void *entry, gfp_t gfp) | |
b0606fed MW |
790 | { |
791 | int err; | |
792 | ||
793 | xa_lock_irq(xa); | |
794 | err = __xa_insert(xa, index, entry, gfp); | |
795 | xa_unlock_irq(xa); | |
796 | ||
797 | return err; | |
c5beb07e MW |
798 | } |
799 | ||
371c752d MW |
800 | /** |
801 | * xa_alloc() - Find somewhere to store this entry in the XArray. | |
802 | * @xa: XArray. | |
803 | * @id: Pointer to ID. | |
371c752d | 804 | * @entry: New entry. |
a3e4d3f9 | 805 | * @limit: Range of ID to allocate. |
371c752d MW |
806 | * @gfp: Memory allocation flags. |
807 | * | |
a3e4d3f9 MW |
808 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
809 | * stores the index into the @id pointer, then stores the entry at | |
810 | * that index. A concurrent lookup will not see an uninitialised @id. | |
371c752d | 811 | * |
a3e4d3f9 | 812 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
371c752d | 813 | * the @gfp flags permit. |
a3e4d3f9 MW |
814 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
815 | * -EBUSY if there are no free entries in @limit. | |
371c752d | 816 | */ |
a3e4d3f9 MW |
817 | static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, |
818 | void *entry, struct xa_limit limit, gfp_t gfp) | |
371c752d MW |
819 | { |
820 | int err; | |
821 | ||
822 | xa_lock(xa); | |
a3e4d3f9 | 823 | err = __xa_alloc(xa, id, entry, limit, gfp); |
371c752d MW |
824 | xa_unlock(xa); |
825 | ||
826 | return err; | |
827 | } | |
828 | ||
829 | /** | |
830 | * xa_alloc_bh() - Find somewhere to store this entry in the XArray. | |
831 | * @xa: XArray. | |
832 | * @id: Pointer to ID. | |
371c752d | 833 | * @entry: New entry. |
a3e4d3f9 | 834 | * @limit: Range of ID to allocate. |
371c752d MW |
835 | * @gfp: Memory allocation flags. |
836 | * | |
a3e4d3f9 MW |
837 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
838 | * stores the index into the @id pointer, then stores the entry at | |
839 | * that index. A concurrent lookup will not see an uninitialised @id. | |
371c752d | 840 | * |
804dfaf0 | 841 | * Context: Any context. Takes and releases the xa_lock while |
371c752d | 842 | * disabling softirqs. May sleep if the @gfp flags permit. |
a3e4d3f9 MW |
843 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
844 | * -EBUSY if there are no free entries in @limit. | |
371c752d | 845 | */ |
a3e4d3f9 MW |
846 | static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, |
847 | void *entry, struct xa_limit limit, gfp_t gfp) | |
371c752d MW |
848 | { |
849 | int err; | |
850 | ||
851 | xa_lock_bh(xa); | |
a3e4d3f9 | 852 | err = __xa_alloc(xa, id, entry, limit, gfp); |
371c752d MW |
853 | xa_unlock_bh(xa); |
854 | ||
855 | return err; | |
856 | } | |
857 | ||
858 | /** | |
859 | * xa_alloc_irq() - Find somewhere to store this entry in the XArray. | |
860 | * @xa: XArray. | |
861 | * @id: Pointer to ID. | |
371c752d | 862 | * @entry: New entry. |
a3e4d3f9 | 863 | * @limit: Range of ID to allocate. |
371c752d MW |
864 | * @gfp: Memory allocation flags. |
865 | * | |
a3e4d3f9 MW |
866 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
867 | * stores the index into the @id pointer, then stores the entry at | |
868 | * that index. A concurrent lookup will not see an uninitialised @id. | |
371c752d MW |
869 | * |
870 | * Context: Process context. Takes and releases the xa_lock while | |
871 | * disabling interrupts. May sleep if the @gfp flags permit. | |
a3e4d3f9 MW |
872 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
873 | * -EBUSY if there are no free entries in @limit. | |
371c752d | 874 | */ |
a3e4d3f9 MW |
875 | static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, |
876 | void *entry, struct xa_limit limit, gfp_t gfp) | |
371c752d MW |
877 | { |
878 | int err; | |
879 | ||
880 | xa_lock_irq(xa); | |
a3e4d3f9 | 881 | err = __xa_alloc(xa, id, entry, limit, gfp); |
371c752d MW |
882 | xa_unlock_irq(xa); |
883 | ||
884 | return err; | |
885 | } | |
886 | ||
2fa044e5 MW |
887 | /** |
888 | * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. | |
889 | * @xa: XArray. | |
890 | * @id: Pointer to ID. | |
891 | * @entry: New entry. | |
892 | * @limit: Range of allocated ID. | |
893 | * @next: Pointer to next ID to allocate. | |
894 | * @gfp: Memory allocation flags. | |
895 | * | |
896 | * Finds an empty entry in @xa between @limit.min and @limit.max, | |
897 | * stores the index into the @id pointer, then stores the entry at | |
898 | * that index. A concurrent lookup will not see an uninitialised @id. | |
899 | * The search for an empty entry will start at @next and will wrap | |
900 | * around if necessary. | |
901 | * | |
902 | * Context: Any context. Takes and releases the xa_lock. May sleep if | |
903 | * the @gfp flags permit. | |
904 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | |
905 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | |
906 | * allocated or -EBUSY if there are no free entries in @limit. | |
907 | */ | |
908 | static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, | |
909 | struct xa_limit limit, u32 *next, gfp_t gfp) | |
910 | { | |
911 | int err; | |
912 | ||
913 | xa_lock(xa); | |
914 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); | |
915 | xa_unlock(xa); | |
916 | ||
917 | return err; | |
918 | } | |
919 | ||
920 | /** | |
921 | * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. | |
922 | * @xa: XArray. | |
923 | * @id: Pointer to ID. | |
924 | * @entry: New entry. | |
925 | * @limit: Range of allocated ID. | |
926 | * @next: Pointer to next ID to allocate. | |
927 | * @gfp: Memory allocation flags. | |
928 | * | |
929 | * Finds an empty entry in @xa between @limit.min and @limit.max, | |
930 | * stores the index into the @id pointer, then stores the entry at | |
931 | * that index. A concurrent lookup will not see an uninitialised @id. | |
932 | * The search for an empty entry will start at @next and will wrap | |
933 | * around if necessary. | |
934 | * | |
935 | * Context: Any context. Takes and releases the xa_lock while | |
936 | * disabling softirqs. May sleep if the @gfp flags permit. | |
937 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | |
938 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | |
939 | * allocated or -EBUSY if there are no free entries in @limit. | |
940 | */ | |
941 | static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, | |
942 | struct xa_limit limit, u32 *next, gfp_t gfp) | |
943 | { | |
944 | int err; | |
945 | ||
946 | xa_lock_bh(xa); | |
947 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); | |
948 | xa_unlock_bh(xa); | |
949 | ||
950 | return err; | |
951 | } | |
952 | ||
953 | /** | |
954 | * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. | |
955 | * @xa: XArray. | |
956 | * @id: Pointer to ID. | |
957 | * @entry: New entry. | |
958 | * @limit: Range of allocated ID. | |
959 | * @next: Pointer to next ID to allocate. | |
960 | * @gfp: Memory allocation flags. | |
961 | * | |
962 | * Finds an empty entry in @xa between @limit.min and @limit.max, | |
963 | * stores the index into the @id pointer, then stores the entry at | |
964 | * that index. A concurrent lookup will not see an uninitialised @id. | |
965 | * The search for an empty entry will start at @next and will wrap | |
966 | * around if necessary. | |
967 | * | |
968 | * Context: Process context. Takes and releases the xa_lock while | |
969 | * disabling interrupts. May sleep if the @gfp flags permit. | |
970 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | |
971 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | |
972 | * allocated or -EBUSY if there are no free entries in @limit. | |
973 | */ | |
974 | static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, | |
975 | struct xa_limit limit, u32 *next, gfp_t gfp) | |
976 | { | |
977 | int err; | |
978 | ||
979 | xa_lock_irq(xa); | |
980 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); | |
981 | xa_unlock_irq(xa); | |
982 | ||
983 | return err; | |
984 | } | |
985 | ||
4c0608f4 MW |
986 | /** |
987 | * xa_reserve() - Reserve this index in the XArray. | |
988 | * @xa: XArray. | |
989 | * @index: Index into array. | |
990 | * @gfp: Memory allocation flags. | |
991 | * | |
992 | * Ensures there is somewhere to store an entry at @index in the array. | |
993 | * If there is already something stored at @index, this function does | |
994 | * nothing. If there was nothing there, the entry is marked as reserved. | |
995 | * Loading from a reserved entry returns a %NULL pointer. | |
996 | * | |
997 | * If you do not use the entry that you have reserved, call xa_release() | |
998 | * or xa_erase() to free any unnecessary memory. | |
999 | * | |
1000 | * Context: Any context. Takes and releases the xa_lock. | |
1001 | * May sleep if the @gfp flags permit. | |
1002 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | |
1003 | */ | |
f818b82b | 1004 | static inline __must_check |
4c0608f4 MW |
1005 | int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) |
1006 | { | |
962033d5 | 1007 | return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
4c0608f4 MW |
1008 | } |
1009 | ||
1010 | /** | |
1011 | * xa_reserve_bh() - Reserve this index in the XArray. | |
1012 | * @xa: XArray. | |
1013 | * @index: Index into array. | |
1014 | * @gfp: Memory allocation flags. | |
1015 | * | |
1016 | * A softirq-disabling version of xa_reserve(). | |
1017 | * | |
1018 | * Context: Any context. Takes and releases the xa_lock while | |
1019 | * disabling softirqs. | |
1020 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | |
1021 | */ | |
f818b82b | 1022 | static inline __must_check |
4c0608f4 MW |
1023 | int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) |
1024 | { | |
962033d5 | 1025 | return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
4c0608f4 MW |
1026 | } |
1027 | ||
1028 | /** | |
1029 | * xa_reserve_irq() - Reserve this index in the XArray. | |
1030 | * @xa: XArray. | |
1031 | * @index: Index into array. | |
1032 | * @gfp: Memory allocation flags. | |
1033 | * | |
1034 | * An interrupt-disabling version of xa_reserve(). | |
1035 | * | |
1036 | * Context: Process context. Takes and releases the xa_lock while | |
1037 | * disabling interrupts. | |
1038 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | |
1039 | */ | |
f818b82b | 1040 | static inline __must_check |
4c0608f4 MW |
1041 | int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) |
1042 | { | |
962033d5 | 1043 | return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
4c0608f4 MW |
1044 | } |
1045 | ||
c5beb07e MW |
1046 | /** |
1047 | * xa_release() - Release a reserved entry. | |
1048 | * @xa: XArray. | |
1049 | * @index: Index of entry. | |
1050 | * | |
1051 | * After calling xa_reserve(), you can call this function to release the | |
1052 | * reservation. If the entry at @index has been stored to, this function | |
1053 | * will do nothing. | |
1054 | */ | |
1055 | static inline void xa_release(struct xarray *xa, unsigned long index) | |
1056 | { | |
b38f6c50 | 1057 | xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); |
c5beb07e MW |
1058 | } |
1059 | ||
02c02bf1 MW |
1060 | /* Everything below here is the Advanced API. Proceed with caution. */ |
1061 | ||
1062 | /* | |
1063 | * The xarray is constructed out of a set of 'chunks' of pointers. Choosing | |
1064 | * the best chunk size requires some tradeoffs. A power of two recommends | |
1065 | * itself so that we can walk the tree based purely on shifts and masks. | |
1066 | * Generally, the larger the better; as the number of slots per level of the | |
1067 | * tree increases, the less tall the tree needs to be. But that needs to be | |
1068 | * balanced against the memory consumption of each node. On a 64-bit system, | |
1069 | * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we | |
1070 | * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. | |
1071 | */ | |
1072 | #ifndef XA_CHUNK_SHIFT | |
1073 | #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) | |
1074 | #endif | |
1075 | #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) | |
1076 | #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) | |
01959dfe MW |
1077 | #define XA_MAX_MARKS 3 |
1078 | #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG) | |
1079 | ||
1080 | /* | |
1081 | * @count is the count of every non-NULL element in the ->slots array | |
1082 | * whether that is a value entry, a retry entry, a user pointer, | |
1083 | * a sibling entry or a pointer to the next level of the tree. | |
1084 | * @nr_values is the count of every element in ->slots which is | |
1085 | * either a value entry or a sibling of a value entry. | |
1086 | */ | |
1087 | struct xa_node { | |
1088 | unsigned char shift; /* Bits remaining in each slot */ | |
1089 | unsigned char offset; /* Slot offset in parent */ | |
1090 | unsigned char count; /* Total entry count */ | |
1091 | unsigned char nr_values; /* Value entry count */ | |
1092 | struct xa_node __rcu *parent; /* NULL at top of tree */ | |
1093 | struct xarray *array; /* The array we belong to */ | |
1094 | union { | |
1095 | struct list_head private_list; /* For tree user */ | |
1096 | struct rcu_head rcu_head; /* Used when freeing node */ | |
1097 | }; | |
1098 | void __rcu *slots[XA_CHUNK_SIZE]; | |
1099 | union { | |
1100 | unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS]; | |
1101 | unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS]; | |
1102 | }; | |
1103 | }; | |
02c02bf1 | 1104 | |
ad3d6c72 MW |
1105 | void xa_dump(const struct xarray *); |
1106 | void xa_dump_node(const struct xa_node *); | |
1107 | ||
1108 | #ifdef XA_DEBUG | |
1109 | #define XA_BUG_ON(xa, x) do { \ | |
1110 | if (x) { \ | |
1111 | xa_dump(xa); \ | |
1112 | BUG(); \ | |
1113 | } \ | |
1114 | } while (0) | |
1115 | #define XA_NODE_BUG_ON(node, x) do { \ | |
1116 | if (x) { \ | |
1117 | if (node) xa_dump_node(node); \ | |
1118 | BUG(); \ | |
1119 | } \ | |
1120 | } while (0) | |
1121 | #else | |
1122 | #define XA_BUG_ON(xa, x) do { } while (0) | |
1123 | #define XA_NODE_BUG_ON(node, x) do { } while (0) | |
1124 | #endif | |
1125 | ||
1126 | /* Private */ | |
1127 | static inline void *xa_head(const struct xarray *xa) | |
1128 | { | |
1129 | return rcu_dereference_check(xa->xa_head, | |
1130 | lockdep_is_held(&xa->xa_lock)); | |
1131 | } | |
1132 | ||
1133 | /* Private */ | |
1134 | static inline void *xa_head_locked(const struct xarray *xa) | |
1135 | { | |
1136 | return rcu_dereference_protected(xa->xa_head, | |
1137 | lockdep_is_held(&xa->xa_lock)); | |
1138 | } | |
1139 | ||
1140 | /* Private */ | |
1141 | static inline void *xa_entry(const struct xarray *xa, | |
1142 | const struct xa_node *node, unsigned int offset) | |
1143 | { | |
1144 | XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); | |
1145 | return rcu_dereference_check(node->slots[offset], | |
1146 | lockdep_is_held(&xa->xa_lock)); | |
1147 | } | |
1148 | ||
1149 | /* Private */ | |
1150 | static inline void *xa_entry_locked(const struct xarray *xa, | |
1151 | const struct xa_node *node, unsigned int offset) | |
1152 | { | |
1153 | XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); | |
1154 | return rcu_dereference_protected(node->slots[offset], | |
1155 | lockdep_is_held(&xa->xa_lock)); | |
1156 | } | |
1157 | ||
9b89a035 MW |
1158 | /* Private */ |
1159 | static inline struct xa_node *xa_parent(const struct xarray *xa, | |
1160 | const struct xa_node *node) | |
1161 | { | |
1162 | return rcu_dereference_check(node->parent, | |
1163 | lockdep_is_held(&xa->xa_lock)); | |
1164 | } | |
1165 | ||
1166 | /* Private */ | |
1167 | static inline struct xa_node *xa_parent_locked(const struct xarray *xa, | |
1168 | const struct xa_node *node) | |
1169 | { | |
1170 | return rcu_dereference_protected(node->parent, | |
1171 | lockdep_is_held(&xa->xa_lock)); | |
1172 | } | |
1173 | ||
58d6ea30 MW |
1174 | /* Private */ |
1175 | static inline void *xa_mk_node(const struct xa_node *node) | |
1176 | { | |
1177 | return (void *)((unsigned long)node | 2); | |
1178 | } | |
1179 | ||
ad3d6c72 MW |
1180 | /* Private */ |
1181 | static inline struct xa_node *xa_to_node(const void *entry) | |
1182 | { | |
1183 | return (struct xa_node *)((unsigned long)entry - 2); | |
1184 | } | |
1185 | ||
02c02bf1 MW |
1186 | /* Private */ |
1187 | static inline bool xa_is_node(const void *entry) | |
1188 | { | |
1189 | return xa_is_internal(entry) && (unsigned long)entry > 4096; | |
1190 | } | |
1191 | ||
1192 | /* Private */ | |
1193 | static inline void *xa_mk_sibling(unsigned int offset) | |
1194 | { | |
1195 | return xa_mk_internal(offset); | |
1196 | } | |
1197 | ||
1198 | /* Private */ | |
1199 | static inline unsigned long xa_to_sibling(const void *entry) | |
1200 | { | |
1201 | return xa_to_internal(entry); | |
1202 | } | |
1203 | ||
1204 | /** | |
1205 | * xa_is_sibling() - Is the entry a sibling entry? | |
1206 | * @entry: Entry retrieved from the XArray | |
1207 | * | |
1208 | * Return: %true if the entry is a sibling entry. | |
1209 | */ | |
1210 | static inline bool xa_is_sibling(const void *entry) | |
1211 | { | |
1212 | return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) && | |
1213 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); | |
1214 | } | |
1215 | ||
76b4e529 | 1216 | #define XA_RETRY_ENTRY xa_mk_internal(256) |
02c02bf1 | 1217 | |
ad3d6c72 MW |
1218 | /** |
1219 | * xa_is_retry() - Is the entry a retry entry? | |
1220 | * @entry: Entry retrieved from the XArray | |
1221 | * | |
1222 | * Return: %true if the entry is a retry entry. | |
1223 | */ | |
1224 | static inline bool xa_is_retry(const void *entry) | |
1225 | { | |
1226 | return unlikely(entry == XA_RETRY_ENTRY); | |
1227 | } | |
1228 | ||
76b4e529 MW |
1229 | /** |
1230 | * xa_is_advanced() - Is the entry only permitted for the advanced API? | |
1231 | * @entry: Entry to be stored in the XArray. | |
1232 | * | |
1233 | * Return: %true if the entry cannot be stored by the normal API. | |
1234 | */ | |
1235 | static inline bool xa_is_advanced(const void *entry) | |
1236 | { | |
1237 | return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); | |
1238 | } | |
1239 | ||
ad3d6c72 MW |
1240 | /** |
1241 | * typedef xa_update_node_t - A callback function from the XArray. | |
1242 | * @node: The node which is being processed | |
1243 | * | |
1244 | * This function is called every time the XArray updates the count of | |
1245 | * present and value entries in a node. It allows advanced users to | |
1246 | * maintain the private_list in the node. | |
1247 | * | |
1248 | * Context: The xa_lock is held and interrupts may be disabled. | |
1249 | * Implementations should not drop the xa_lock, nor re-enable | |
1250 | * interrupts. | |
1251 | */ | |
1252 | typedef void (*xa_update_node_t)(struct xa_node *node); | |
1253 | ||
1254 | /* | |
1255 | * The xa_state is opaque to its users. It contains various different pieces | |
1256 | * of state involved in the current operation on the XArray. It should be | |
1257 | * declared on the stack and passed between the various internal routines. | |
1258 | * The various elements in it should not be accessed directly, but only | |
1259 | * through the provided accessor functions. The below documentation is for | |
1260 | * the benefit of those working on the code, not for users of the XArray. | |
1261 | * | |
1262 | * @xa_node usually points to the xa_node containing the slot we're operating | |
1263 | * on (and @xa_offset is the offset in the slots array). If there is a | |
1264 | * single entry in the array at index 0, there are no allocated xa_nodes to | |
1265 | * point to, and so we store %NULL in @xa_node. @xa_node is set to | |
1266 | * the value %XAS_RESTART if the xa_state is not walked to the correct | |
1267 | * position in the tree of nodes for this operation. If an error occurs | |
1268 | * during an operation, it is set to an %XAS_ERROR value. If we run off the | |
1269 | * end of the allocated nodes, it is set to %XAS_BOUNDS. | |
1270 | */ | |
1271 | struct xa_state { | |
1272 | struct xarray *xa; | |
1273 | unsigned long xa_index; | |
1274 | unsigned char xa_shift; | |
1275 | unsigned char xa_sibs; | |
1276 | unsigned char xa_offset; | |
1277 | unsigned char xa_pad; /* Helps gcc generate better code */ | |
1278 | struct xa_node *xa_node; | |
1279 | struct xa_node *xa_alloc; | |
1280 | xa_update_node_t xa_update; | |
1281 | }; | |
1282 | ||
1283 | /* | |
1284 | * We encode errnos in the xas->xa_node. If an error has happened, we need to | |
1285 | * drop the lock to fix it, and once we've done so the xa_state is invalid. | |
1286 | */ | |
1287 | #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL)) | |
1288 | #define XAS_BOUNDS ((struct xa_node *)1UL) | |
1289 | #define XAS_RESTART ((struct xa_node *)3UL) | |
1290 | ||
1291 | #define __XA_STATE(array, index, shift, sibs) { \ | |
1292 | .xa = array, \ | |
1293 | .xa_index = index, \ | |
1294 | .xa_shift = shift, \ | |
1295 | .xa_sibs = sibs, \ | |
1296 | .xa_offset = 0, \ | |
1297 | .xa_pad = 0, \ | |
1298 | .xa_node = XAS_RESTART, \ | |
1299 | .xa_alloc = NULL, \ | |
1300 | .xa_update = NULL \ | |
1301 | } | |
1302 | ||
1303 | /** | |
1304 | * XA_STATE() - Declare an XArray operation state. | |
1305 | * @name: Name of this operation state (usually xas). | |
1306 | * @array: Array to operate on. | |
1307 | * @index: Initial index of interest. | |
1308 | * | |
1309 | * Declare and initialise an xa_state on the stack. | |
1310 | */ | |
1311 | #define XA_STATE(name, array, index) \ | |
1312 | struct xa_state name = __XA_STATE(array, index, 0, 0) | |
1313 | ||
1314 | /** | |
1315 | * XA_STATE_ORDER() - Declare an XArray operation state. | |
1316 | * @name: Name of this operation state (usually xas). | |
1317 | * @array: Array to operate on. | |
1318 | * @index: Initial index of interest. | |
1319 | * @order: Order of entry. | |
1320 | * | |
1321 | * Declare and initialise an xa_state on the stack. This variant of | |
1322 | * XA_STATE() allows you to specify the 'order' of the element you | |
1323 | * want to operate on.` | |
1324 | */ | |
1325 | #define XA_STATE_ORDER(name, array, index, order) \ | |
1326 | struct xa_state name = __XA_STATE(array, \ | |
1327 | (index >> order) << order, \ | |
1328 | order - (order % XA_CHUNK_SHIFT), \ | |
1329 | (1U << (order % XA_CHUNK_SHIFT)) - 1) | |
1330 | ||
1331 | #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) | |
1332 | #define xas_trylock(xas) xa_trylock((xas)->xa) | |
1333 | #define xas_lock(xas) xa_lock((xas)->xa) | |
1334 | #define xas_unlock(xas) xa_unlock((xas)->xa) | |
1335 | #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) | |
1336 | #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) | |
1337 | #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) | |
1338 | #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) | |
1339 | #define xas_lock_irqsave(xas, flags) \ | |
1340 | xa_lock_irqsave((xas)->xa, flags) | |
1341 | #define xas_unlock_irqrestore(xas, flags) \ | |
1342 | xa_unlock_irqrestore((xas)->xa, flags) | |
1343 | ||
1344 | /** | |
1345 | * xas_error() - Return an errno stored in the xa_state. | |
1346 | * @xas: XArray operation state. | |
1347 | * | |
1348 | * Return: 0 if no error has been noted. A negative errno if one has. | |
1349 | */ | |
1350 | static inline int xas_error(const struct xa_state *xas) | |
1351 | { | |
1352 | return xa_err(xas->xa_node); | |
1353 | } | |
1354 | ||
1355 | /** | |
1356 | * xas_set_err() - Note an error in the xa_state. | |
1357 | * @xas: XArray operation state. | |
1358 | * @err: Negative error number. | |
1359 | * | |
1360 | * Only call this function with a negative @err; zero or positive errors | |
1361 | * will probably not behave the way you think they should. If you want | |
1362 | * to clear the error from an xa_state, use xas_reset(). | |
1363 | */ | |
1364 | static inline void xas_set_err(struct xa_state *xas, long err) | |
1365 | { | |
1366 | xas->xa_node = XA_ERROR(err); | |
1367 | } | |
1368 | ||
1369 | /** | |
1370 | * xas_invalid() - Is the xas in a retry or error state? | |
1371 | * @xas: XArray operation state. | |
1372 | * | |
1373 | * Return: %true if the xas cannot be used for operations. | |
1374 | */ | |
1375 | static inline bool xas_invalid(const struct xa_state *xas) | |
1376 | { | |
1377 | return (unsigned long)xas->xa_node & 3; | |
1378 | } | |
1379 | ||
1380 | /** | |
1381 | * xas_valid() - Is the xas a valid cursor into the array? | |
1382 | * @xas: XArray operation state. | |
1383 | * | |
1384 | * Return: %true if the xas can be used for operations. | |
1385 | */ | |
1386 | static inline bool xas_valid(const struct xa_state *xas) | |
1387 | { | |
1388 | return !xas_invalid(xas); | |
1389 | } | |
1390 | ||
2264f513 MW |
1391 | /** |
1392 | * xas_is_node() - Does the xas point to a node? | |
1393 | * @xas: XArray operation state. | |
1394 | * | |
1395 | * Return: %true if the xas currently references a node. | |
1396 | */ | |
1397 | static inline bool xas_is_node(const struct xa_state *xas) | |
1398 | { | |
1399 | return xas_valid(xas) && xas->xa_node; | |
1400 | } | |
1401 | ||
9b89a035 MW |
1402 | /* True if the pointer is something other than a node */ |
1403 | static inline bool xas_not_node(struct xa_node *node) | |
1404 | { | |
1405 | return ((unsigned long)node & 3) || !node; | |
1406 | } | |
1407 | ||
64d3e9a9 MW |
1408 | /* True if the node represents RESTART or an error */ |
1409 | static inline bool xas_frozen(struct xa_node *node) | |
1410 | { | |
1411 | return (unsigned long)node & 2; | |
1412 | } | |
1413 | ||
58d6ea30 MW |
1414 | /* True if the node represents head-of-tree, RESTART or BOUNDS */ |
1415 | static inline bool xas_top(struct xa_node *node) | |
1416 | { | |
1417 | return node <= XAS_RESTART; | |
1418 | } | |
1419 | ||
ad3d6c72 MW |
1420 | /** |
1421 | * xas_reset() - Reset an XArray operation state. | |
1422 | * @xas: XArray operation state. | |
1423 | * | |
1424 | * Resets the error or walk state of the @xas so future walks of the | |
1425 | * array will start from the root. Use this if you have dropped the | |
1426 | * xarray lock and want to reuse the xa_state. | |
1427 | * | |
1428 | * Context: Any context. | |
1429 | */ | |
1430 | static inline void xas_reset(struct xa_state *xas) | |
1431 | { | |
1432 | xas->xa_node = XAS_RESTART; | |
1433 | } | |
1434 | ||
1435 | /** | |
1436 | * xas_retry() - Retry the operation if appropriate. | |
1437 | * @xas: XArray operation state. | |
1438 | * @entry: Entry from xarray. | |
1439 | * | |
1440 | * The advanced functions may sometimes return an internal entry, such as | |
1441 | * a retry entry or a zero entry. This function sets up the @xas to restart | |
1442 | * the walk from the head of the array if needed. | |
1443 | * | |
1444 | * Context: Any context. | |
1445 | * Return: true if the operation needs to be retried. | |
1446 | */ | |
1447 | static inline bool xas_retry(struct xa_state *xas, const void *entry) | |
1448 | { | |
9f14d4f1 MW |
1449 | if (xa_is_zero(entry)) |
1450 | return true; | |
ad3d6c72 MW |
1451 | if (!xa_is_retry(entry)) |
1452 | return false; | |
1453 | xas_reset(xas); | |
1454 | return true; | |
1455 | } | |
1456 | ||
1457 | void *xas_load(struct xa_state *); | |
58d6ea30 | 1458 | void *xas_store(struct xa_state *, void *entry); |
b803b428 | 1459 | void *xas_find(struct xa_state *, unsigned long max); |
4e99d4e9 | 1460 | void *xas_find_conflict(struct xa_state *); |
ad3d6c72 | 1461 | |
9b89a035 MW |
1462 | bool xas_get_mark(const struct xa_state *, xa_mark_t); |
1463 | void xas_set_mark(const struct xa_state *, xa_mark_t); | |
1464 | void xas_clear_mark(const struct xa_state *, xa_mark_t); | |
b803b428 | 1465 | void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); |
58d6ea30 MW |
1466 | void xas_init_marks(const struct xa_state *); |
1467 | ||
1468 | bool xas_nomem(struct xa_state *, gfp_t); | |
b803b428 | 1469 | void xas_pause(struct xa_state *); |
9b89a035 | 1470 | |
2264f513 MW |
1471 | void xas_create_range(struct xa_state *); |
1472 | ||
ad3d6c72 MW |
1473 | /** |
1474 | * xas_reload() - Refetch an entry from the xarray. | |
1475 | * @xas: XArray operation state. | |
1476 | * | |
1477 | * Use this function to check that a previously loaded entry still has | |
1478 | * the same value. This is useful for the lockless pagecache lookup where | |
1479 | * we walk the array with only the RCU lock to protect us, lock the page, | |
1480 | * then check that the page hasn't moved since we looked it up. | |
1481 | * | |
1482 | * The caller guarantees that @xas is still valid. If it may be in an | |
1483 | * error or restart state, call xas_load() instead. | |
1484 | * | |
1485 | * Return: The entry at this location in the xarray. | |
1486 | */ | |
1487 | static inline void *xas_reload(struct xa_state *xas) | |
1488 | { | |
1489 | struct xa_node *node = xas->xa_node; | |
1490 | ||
1491 | if (node) | |
1492 | return xa_entry(xas->xa, node, xas->xa_offset); | |
1493 | return xa_head(xas->xa); | |
1494 | } | |
1495 | ||
58d6ea30 MW |
1496 | /** |
1497 | * xas_set() - Set up XArray operation state for a different index. | |
1498 | * @xas: XArray operation state. | |
1499 | * @index: New index into the XArray. | |
1500 | * | |
1501 | * Move the operation state to refer to a different index. This will | |
1502 | * have the effect of starting a walk from the top; see xas_next() | |
1503 | * to move to an adjacent index. | |
1504 | */ | |
1505 | static inline void xas_set(struct xa_state *xas, unsigned long index) | |
1506 | { | |
1507 | xas->xa_index = index; | |
1508 | xas->xa_node = XAS_RESTART; | |
1509 | } | |
1510 | ||
1511 | /** | |
1512 | * xas_set_order() - Set up XArray operation state for a multislot entry. | |
1513 | * @xas: XArray operation state. | |
1514 | * @index: Target of the operation. | |
1515 | * @order: Entry occupies 2^@order indices. | |
1516 | */ | |
1517 | static inline void xas_set_order(struct xa_state *xas, unsigned long index, | |
1518 | unsigned int order) | |
1519 | { | |
1520 | #ifdef CONFIG_XARRAY_MULTI | |
1521 | xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; | |
1522 | xas->xa_shift = order - (order % XA_CHUNK_SHIFT); | |
1523 | xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; | |
1524 | xas->xa_node = XAS_RESTART; | |
1525 | #else | |
1526 | BUG_ON(order > 0); | |
1527 | xas_set(xas, index); | |
1528 | #endif | |
1529 | } | |
1530 | ||
1531 | /** | |
1532 | * xas_set_update() - Set up XArray operation state for a callback. | |
1533 | * @xas: XArray operation state. | |
1534 | * @update: Function to call when updating a node. | |
1535 | * | |
1536 | * The XArray can notify a caller after it has updated an xa_node. | |
1537 | * This is advanced functionality and is only needed by the page cache. | |
1538 | */ | |
1539 | static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) | |
1540 | { | |
1541 | xas->xa_update = update; | |
1542 | } | |
1543 | ||
b803b428 MW |
1544 | /** |
1545 | * xas_next_entry() - Advance iterator to next present entry. | |
1546 | * @xas: XArray operation state. | |
1547 | * @max: Highest index to return. | |
1548 | * | |
1549 | * xas_next_entry() is an inline function to optimise xarray traversal for | |
1550 | * speed. It is equivalent to calling xas_find(), and will call xas_find() | |
1551 | * for all the hard cases. | |
1552 | * | |
1553 | * Return: The next present entry after the one currently referred to by @xas. | |
1554 | */ | |
1555 | static inline void *xas_next_entry(struct xa_state *xas, unsigned long max) | |
1556 | { | |
1557 | struct xa_node *node = xas->xa_node; | |
1558 | void *entry; | |
1559 | ||
1560 | if (unlikely(xas_not_node(node) || node->shift || | |
1561 | xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) | |
1562 | return xas_find(xas, max); | |
1563 | ||
1564 | do { | |
1565 | if (unlikely(xas->xa_index >= max)) | |
1566 | return xas_find(xas, max); | |
1567 | if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) | |
1568 | return xas_find(xas, max); | |
1569 | entry = xa_entry(xas->xa, node, xas->xa_offset + 1); | |
1570 | if (unlikely(xa_is_internal(entry))) | |
1571 | return xas_find(xas, max); | |
1572 | xas->xa_offset++; | |
1573 | xas->xa_index++; | |
1574 | } while (!entry); | |
1575 | ||
1576 | return entry; | |
1577 | } | |
1578 | ||
1579 | /* Private */ | |
1580 | static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance, | |
1581 | xa_mark_t mark) | |
1582 | { | |
1583 | unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; | |
1584 | unsigned int offset = xas->xa_offset; | |
1585 | ||
1586 | if (advance) | |
1587 | offset++; | |
1588 | if (XA_CHUNK_SIZE == BITS_PER_LONG) { | |
1589 | if (offset < XA_CHUNK_SIZE) { | |
1590 | unsigned long data = *addr & (~0UL << offset); | |
1591 | if (data) | |
1592 | return __ffs(data); | |
1593 | } | |
1594 | return XA_CHUNK_SIZE; | |
1595 | } | |
1596 | ||
1597 | return find_next_bit(addr, XA_CHUNK_SIZE, offset); | |
1598 | } | |
1599 | ||
1600 | /** | |
1601 | * xas_next_marked() - Advance iterator to next marked entry. | |
1602 | * @xas: XArray operation state. | |
1603 | * @max: Highest index to return. | |
1604 | * @mark: Mark to search for. | |
1605 | * | |
1606 | * xas_next_marked() is an inline function to optimise xarray traversal for | |
1607 | * speed. It is equivalent to calling xas_find_marked(), and will call | |
1608 | * xas_find_marked() for all the hard cases. | |
1609 | * | |
1610 | * Return: The next marked entry after the one currently referred to by @xas. | |
1611 | */ | |
1612 | static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, | |
1613 | xa_mark_t mark) | |
1614 | { | |
1615 | struct xa_node *node = xas->xa_node; | |
6f1e96f1 | 1616 | void *entry; |
b803b428 MW |
1617 | unsigned int offset; |
1618 | ||
1619 | if (unlikely(xas_not_node(node) || node->shift)) | |
1620 | return xas_find_marked(xas, max, mark); | |
1621 | offset = xas_find_chunk(xas, true, mark); | |
1622 | xas->xa_offset = offset; | |
1623 | xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; | |
1624 | if (xas->xa_index > max) | |
1625 | return NULL; | |
1626 | if (offset == XA_CHUNK_SIZE) | |
1627 | return xas_find_marked(xas, max, mark); | |
6f1e96f1 MWO |
1628 | entry = xa_entry(xas->xa, node, offset); |
1629 | if (!entry) | |
1630 | return xas_find_marked(xas, max, mark); | |
1631 | return entry; | |
b803b428 MW |
1632 | } |
1633 | ||
1634 | /* | |
1635 | * If iterating while holding a lock, drop the lock and reschedule | |
1636 | * every %XA_CHECK_SCHED loops. | |
1637 | */ | |
1638 | enum { | |
1639 | XA_CHECK_SCHED = 4096, | |
1640 | }; | |
1641 | ||
1642 | /** | |
1643 | * xas_for_each() - Iterate over a range of an XArray. | |
1644 | * @xas: XArray operation state. | |
1645 | * @entry: Entry retrieved from the array. | |
1646 | * @max: Maximum index to retrieve from array. | |
1647 | * | |
1648 | * The loop body will be executed for each entry present in the xarray | |
1649 | * between the current xas position and @max. @entry will be set to | |
1650 | * the entry retrieved from the xarray. It is safe to delete entries | |
1651 | * from the array in the loop body. You should hold either the RCU lock | |
1652 | * or the xa_lock while iterating. If you need to drop the lock, call | |
1653 | * xas_pause() first. | |
1654 | */ | |
1655 | #define xas_for_each(xas, entry, max) \ | |
1656 | for (entry = xas_find(xas, max); entry; \ | |
1657 | entry = xas_next_entry(xas, max)) | |
1658 | ||
1659 | /** | |
1660 | * xas_for_each_marked() - Iterate over a range of an XArray. | |
1661 | * @xas: XArray operation state. | |
1662 | * @entry: Entry retrieved from the array. | |
1663 | * @max: Maximum index to retrieve from array. | |
1664 | * @mark: Mark to search for. | |
1665 | * | |
1666 | * The loop body will be executed for each marked entry in the xarray | |
1667 | * between the current xas position and @max. @entry will be set to | |
1668 | * the entry retrieved from the xarray. It is safe to delete entries | |
1669 | * from the array in the loop body. You should hold either the RCU lock | |
1670 | * or the xa_lock while iterating. If you need to drop the lock, call | |
1671 | * xas_pause() first. | |
1672 | */ | |
1673 | #define xas_for_each_marked(xas, entry, max, mark) \ | |
1674 | for (entry = xas_find_marked(xas, max, mark); entry; \ | |
1675 | entry = xas_next_marked(xas, max, mark)) | |
1676 | ||
4e99d4e9 MW |
1677 | /** |
1678 | * xas_for_each_conflict() - Iterate over a range of an XArray. | |
1679 | * @xas: XArray operation state. | |
1680 | * @entry: Entry retrieved from the array. | |
1681 | * | |
1682 | * The loop body will be executed for each entry in the XArray that lies | |
1683 | * within the range specified by @xas. If the loop completes successfully, | |
1684 | * any entries that lie in this range will be replaced by @entry. The caller | |
1685 | * may break out of the loop; if they do so, the contents of the XArray will | |
1686 | * be unchanged. The operation may fail due to an out of memory condition. | |
1687 | * The caller may also call xa_set_err() to exit the loop while setting an | |
1688 | * error to record the reason. | |
1689 | */ | |
1690 | #define xas_for_each_conflict(xas, entry) \ | |
1691 | while ((entry = xas_find_conflict(xas))) | |
1692 | ||
64d3e9a9 MW |
1693 | void *__xas_next(struct xa_state *); |
1694 | void *__xas_prev(struct xa_state *); | |
1695 | ||
1696 | /** | |
1697 | * xas_prev() - Move iterator to previous index. | |
1698 | * @xas: XArray operation state. | |
1699 | * | |
1700 | * If the @xas was in an error state, it will remain in an error state | |
1701 | * and this function will return %NULL. If the @xas has never been walked, | |
1702 | * it will have the effect of calling xas_load(). Otherwise one will be | |
1703 | * subtracted from the index and the state will be walked to the correct | |
1704 | * location in the array for the next operation. | |
1705 | * | |
1706 | * If the iterator was referencing index 0, this function wraps | |
1707 | * around to %ULONG_MAX. | |
1708 | * | |
1709 | * Return: The entry at the new index. This may be %NULL or an internal | |
1710 | * entry. | |
1711 | */ | |
1712 | static inline void *xas_prev(struct xa_state *xas) | |
1713 | { | |
1714 | struct xa_node *node = xas->xa_node; | |
1715 | ||
1716 | if (unlikely(xas_not_node(node) || node->shift || | |
1717 | xas->xa_offset == 0)) | |
1718 | return __xas_prev(xas); | |
1719 | ||
1720 | xas->xa_index--; | |
1721 | xas->xa_offset--; | |
1722 | return xa_entry(xas->xa, node, xas->xa_offset); | |
1723 | } | |
1724 | ||
1725 | /** | |
1726 | * xas_next() - Move state to next index. | |
1727 | * @xas: XArray operation state. | |
1728 | * | |
1729 | * If the @xas was in an error state, it will remain in an error state | |
1730 | * and this function will return %NULL. If the @xas has never been walked, | |
1731 | * it will have the effect of calling xas_load(). Otherwise one will be | |
1732 | * added to the index and the state will be walked to the correct | |
1733 | * location in the array for the next operation. | |
1734 | * | |
1735 | * If the iterator was referencing index %ULONG_MAX, this function wraps | |
1736 | * around to 0. | |
1737 | * | |
1738 | * Return: The entry at the new index. This may be %NULL or an internal | |
1739 | * entry. | |
1740 | */ | |
1741 | static inline void *xas_next(struct xa_state *xas) | |
1742 | { | |
1743 | struct xa_node *node = xas->xa_node; | |
1744 | ||
1745 | if (unlikely(xas_not_node(node) || node->shift || | |
1746 | xas->xa_offset == XA_CHUNK_MASK)) | |
1747 | return __xas_next(xas); | |
1748 | ||
1749 | xas->xa_index++; | |
1750 | xas->xa_offset++; | |
1751 | return xa_entry(xas->xa, node, xas->xa_offset); | |
1752 | } | |
1753 | ||
f6bb2a2c | 1754 | #endif /* _LINUX_XARRAY_H */ |