]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
82524746 FBH |
2 | #ifndef _LINUX_RCULIST_H |
3 | #define _LINUX_RCULIST_H | |
4 | ||
5 | #ifdef __KERNEL__ | |
6 | ||
7 | /* | |
8 | * RCU-protected list version | |
9 | */ | |
10 | #include <linux/list.h> | |
10aa9d2c | 11 | #include <linux/rcupdate.h> |
82524746 | 12 | |
65e6bf48 PM |
13 | /* |
14 | * Why is there no list_empty_rcu()? Because list_empty() serves this | |
15 | * purpose. The list_empty() function fetches the RCU-protected pointer | |
16 | * and compares it to the address of the list head, but neither dereferences | |
17 | * this pointer itself nor provides this pointer to the caller. Therefore, | |
18 | * it is not necessary to use rcu_dereference(), so that list_empty() can | |
19 | * be used anywhere you would want to use a list_empty_rcu(). | |
20 | */ | |
21 | ||
2a855b64 PM |
22 | /* |
23 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers | |
24 | * @list: list to be initialized | |
25 | * | |
26 | * You should instead use INIT_LIST_HEAD() for normal initialization and | |
27 | * cleanup tasks, when readers have no access to the list being initialized. | |
28 | * However, if the list being initialized is visible to readers, you | |
29 | * need to keep the compiler from being too mischievous. | |
30 | */ | |
31 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | |
32 | { | |
7d0ae808 PM |
33 | WRITE_ONCE(list->next, list); |
34 | WRITE_ONCE(list->prev, list); | |
2a855b64 PM |
35 | } |
36 | ||
67bdbffd AB |
37 | /* |
38 | * return the ->next pointer of a list_head in an rcu safe | |
39 | * way, we must not access it directly | |
40 | */ | |
41 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) | |
42 | ||
afa47fdf MB |
43 | /** |
44 | * list_tail_rcu - returns the prev pointer of the head of the list | |
45 | * @head: the head of the list | |
46 | * | |
47 | * Note: This should only be used with the list header, and even then | |
48 | * only if list_del() and similar primitives are not also used on the | |
49 | * list header. | |
50 | */ | |
51 | #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) | |
52 | ||
28875945 JFG |
53 | /* |
54 | * Check during list traversal that we are within an RCU reader | |
55 | */ | |
56 | ||
57 | #define check_arg_count_one(dummy) | |
58 | ||
59 | #ifdef CONFIG_PROVE_RCU_LIST | |
60 | #define __list_check_rcu(dummy, cond, extra...) \ | |
61 | ({ \ | |
62 | check_arg_count_one(extra); \ | |
4dfd5cd8 | 63 | RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ |
28875945 | 64 | "RCU-list traversed in non-reader section!"); \ |
4dfd5cd8 | 65 | }) |
28875945 JFG |
66 | #else |
67 | #define __list_check_rcu(dummy, cond, extra...) \ | |
68 | ({ check_arg_count_one(extra); }) | |
69 | #endif | |
70 | ||
82524746 FBH |
71 | /* |
72 | * Insert a new entry between two known consecutive entries. | |
73 | * | |
74 | * This is only for internal list manipulation where we know | |
75 | * the prev/next entries already! | |
76 | */ | |
77 | static inline void __list_add_rcu(struct list_head *new, | |
78 | struct list_head *prev, struct list_head *next) | |
79 | { | |
54acd439 KC |
80 | if (!__list_add_valid(new, prev, next)) |
81 | return; | |
82 | ||
82524746 FBH |
83 | new->next = next; |
84 | new->prev = prev; | |
67bdbffd | 85 | rcu_assign_pointer(list_next_rcu(prev), new); |
82524746 | 86 | next->prev = new; |
82524746 FBH |
87 | } |
88 | ||
89 | /** | |
90 | * list_add_rcu - add a new entry to rcu-protected list | |
91 | * @new: new entry to be added | |
92 | * @head: list head to add it after | |
93 | * | |
94 | * Insert a new entry after the specified head. | |
95 | * This is good for implementing stacks. | |
96 | * | |
97 | * The caller must take whatever precautions are necessary | |
98 | * (such as holding appropriate locks) to avoid racing | |
99 | * with another list-mutation primitive, such as list_add_rcu() | |
100 | * or list_del_rcu(), running on this same list. | |
101 | * However, it is perfectly legal to run concurrently with | |
102 | * the _rcu list-traversal primitives, such as | |
103 | * list_for_each_entry_rcu(). | |
104 | */ | |
105 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | |
106 | { | |
107 | __list_add_rcu(new, head, head->next); | |
108 | } | |
109 | ||
110 | /** | |
111 | * list_add_tail_rcu - add a new entry to rcu-protected list | |
112 | * @new: new entry to be added | |
113 | * @head: list head to add it before | |
114 | * | |
115 | * Insert a new entry before the specified head. | |
116 | * This is useful for implementing queues. | |
117 | * | |
118 | * The caller must take whatever precautions are necessary | |
119 | * (such as holding appropriate locks) to avoid racing | |
120 | * with another list-mutation primitive, such as list_add_tail_rcu() | |
121 | * or list_del_rcu(), running on this same list. | |
122 | * However, it is perfectly legal to run concurrently with | |
123 | * the _rcu list-traversal primitives, such as | |
124 | * list_for_each_entry_rcu(). | |
125 | */ | |
126 | static inline void list_add_tail_rcu(struct list_head *new, | |
127 | struct list_head *head) | |
128 | { | |
129 | __list_add_rcu(new, head->prev, head); | |
130 | } | |
131 | ||
132 | /** | |
133 | * list_del_rcu - deletes entry from list without re-initialization | |
134 | * @entry: the element to delete from the list. | |
135 | * | |
136 | * Note: list_empty() on entry does not return true after this, | |
137 | * the entry is in an undefined state. It is useful for RCU based | |
138 | * lockfree traversal. | |
139 | * | |
140 | * In particular, it means that we can not poison the forward | |
141 | * pointers that may still be used for walking the list. | |
142 | * | |
143 | * The caller must take whatever precautions are necessary | |
144 | * (such as holding appropriate locks) to avoid racing | |
145 | * with another list-mutation primitive, such as list_del_rcu() | |
146 | * or list_add_rcu(), running on this same list. | |
147 | * However, it is perfectly legal to run concurrently with | |
148 | * the _rcu list-traversal primitives, such as | |
149 | * list_for_each_entry_rcu(). | |
150 | * | |
151 | * Note that the caller is not permitted to immediately free | |
152 | * the newly deleted entry. Instead, either synchronize_rcu() | |
153 | * or call_rcu() must be used to defer freeing until an RCU | |
154 | * grace period has elapsed. | |
155 | */ | |
156 | static inline void list_del_rcu(struct list_head *entry) | |
157 | { | |
559f9bad | 158 | __list_del_entry(entry); |
82524746 FBH |
159 | entry->prev = LIST_POISON2; |
160 | } | |
161 | ||
6beeac76 AA |
162 | /** |
163 | * hlist_del_init_rcu - deletes entry from hash list with re-initialization | |
164 | * @n: the element to delete from the hash list. | |
165 | * | |
166 | * Note: list_unhashed() on the node return true after this. It is | |
167 | * useful for RCU based read lockfree traversal if the writer side | |
168 | * must know if the list entry is still hashed or already unhashed. | |
169 | * | |
170 | * In particular, it means that we can not poison the forward pointers | |
171 | * that may still be used for walking the hash list and we can only | |
172 | * zero the pprev pointer so list_unhashed() will return true after | |
173 | * this. | |
174 | * | |
175 | * The caller must take whatever precautions are necessary (such as | |
176 | * holding appropriate locks) to avoid racing with another | |
177 | * list-mutation primitive, such as hlist_add_head_rcu() or | |
178 | * hlist_del_rcu(), running on this same list. However, it is | |
179 | * perfectly legal to run concurrently with the _rcu list-traversal | |
180 | * primitives, such as hlist_for_each_entry_rcu(). | |
181 | */ | |
182 | static inline void hlist_del_init_rcu(struct hlist_node *n) | |
183 | { | |
184 | if (!hlist_unhashed(n)) { | |
185 | __hlist_del(n); | |
c54a2744 | 186 | WRITE_ONCE(n->pprev, NULL); |
6beeac76 AA |
187 | } |
188 | } | |
189 | ||
82524746 FBH |
190 | /** |
191 | * list_replace_rcu - replace old entry by new one | |
192 | * @old : the element to be replaced | |
193 | * @new : the new element to insert | |
194 | * | |
195 | * The @old entry will be replaced with the @new entry atomically. | |
196 | * Note: @old should not be empty. | |
197 | */ | |
198 | static inline void list_replace_rcu(struct list_head *old, | |
199 | struct list_head *new) | |
200 | { | |
201 | new->next = old->next; | |
202 | new->prev = old->prev; | |
67bdbffd | 203 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
82524746 | 204 | new->next->prev = new; |
82524746 FBH |
205 | old->prev = LIST_POISON2; |
206 | } | |
207 | ||
208 | /** | |
7d86dccf | 209 | * __list_splice_init_rcu - join an RCU-protected list into an existing list. |
82524746 | 210 | * @list: the RCU-protected list to splice |
7d86dccf PM |
211 | * @prev: points to the last element of the existing list |
212 | * @next: points to the first element of the existing list | |
aff5f036 | 213 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
82524746 | 214 | * |
7d86dccf PM |
215 | * The list pointed to by @prev and @next can be RCU-read traversed |
216 | * concurrently with this function. | |
82524746 FBH |
217 | * |
218 | * Note that this function blocks. | |
219 | * | |
7d86dccf PM |
220 | * Important note: the caller must take whatever action is necessary to prevent |
221 | * any other updates to the existing list. In principle, it is possible to | |
222 | * modify the list as soon as sync() begins execution. If this sort of thing | |
223 | * becomes necessary, an alternative version based on call_rcu() could be | |
224 | * created. But only if -really- needed -- there is no shortage of RCU API | |
225 | * members. | |
82524746 | 226 | */ |
7d86dccf PM |
227 | static inline void __list_splice_init_rcu(struct list_head *list, |
228 | struct list_head *prev, | |
229 | struct list_head *next, | |
230 | void (*sync)(void)) | |
82524746 FBH |
231 | { |
232 | struct list_head *first = list->next; | |
233 | struct list_head *last = list->prev; | |
82524746 | 234 | |
2a855b64 PM |
235 | /* |
236 | * "first" and "last" tracking list, so initialize it. RCU readers | |
237 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() | |
238 | * instead of INIT_LIST_HEAD(). | |
239 | */ | |
82524746 | 240 | |
2a855b64 | 241 | INIT_LIST_HEAD_RCU(list); |
82524746 FBH |
242 | |
243 | /* | |
244 | * At this point, the list body still points to the source list. | |
245 | * Wait for any readers to finish using the list before splicing | |
246 | * the list body into the new list. Any new readers will see | |
247 | * an empty list. | |
248 | */ | |
249 | ||
250 | sync(); | |
251 | ||
252 | /* | |
253 | * Readers are finished with the source list, so perform splice. | |
254 | * The order is important if the new list is global and accessible | |
255 | * to concurrent RCU readers. Note that RCU readers are not | |
256 | * permitted to traverse the prev pointers without excluding | |
257 | * this function. | |
258 | */ | |
259 | ||
7d86dccf PM |
260 | last->next = next; |
261 | rcu_assign_pointer(list_next_rcu(prev), first); | |
262 | first->prev = prev; | |
263 | next->prev = last; | |
264 | } | |
265 | ||
266 | /** | |
267 | * list_splice_init_rcu - splice an RCU-protected list into an existing list, | |
268 | * designed for stacks. | |
269 | * @list: the RCU-protected list to splice | |
270 | * @head: the place in the existing list to splice the first list into | |
aff5f036 | 271 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
7d86dccf PM |
272 | */ |
273 | static inline void list_splice_init_rcu(struct list_head *list, | |
274 | struct list_head *head, | |
275 | void (*sync)(void)) | |
276 | { | |
277 | if (!list_empty(list)) | |
278 | __list_splice_init_rcu(list, head, head->next, sync); | |
279 | } | |
280 | ||
281 | /** | |
282 | * list_splice_tail_init_rcu - splice an RCU-protected list into an existing | |
283 | * list, designed for queues. | |
284 | * @list: the RCU-protected list to splice | |
285 | * @head: the place in the existing list to splice the first list into | |
aff5f036 | 286 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
7d86dccf PM |
287 | */ |
288 | static inline void list_splice_tail_init_rcu(struct list_head *list, | |
289 | struct list_head *head, | |
290 | void (*sync)(void)) | |
291 | { | |
292 | if (!list_empty(list)) | |
293 | __list_splice_init_rcu(list, head->prev, head, sync); | |
82524746 FBH |
294 | } |
295 | ||
72c6a987 JP |
296 | /** |
297 | * list_entry_rcu - get the struct for this entry | |
298 | * @ptr: the &struct list_head pointer. | |
299 | * @type: the type of the struct this is embedded in. | |
3943f42c | 300 | * @member: the name of the list_head within the struct. |
72c6a987 JP |
301 | * |
302 | * This primitive may safely run concurrently with the _rcu list-mutation | |
303 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
304 | */ | |
305 | #define list_entry_rcu(ptr, type, member) \ | |
506458ef | 306 | container_of(READ_ONCE(ptr), type, member) |
72c6a987 | 307 | |
27fdb35f | 308 | /* |
f88022a4 MM |
309 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
310 | * | |
311 | * Implementing those functions following their counterparts list_empty() and | |
312 | * list_first_entry() is not advisable because they lead to subtle race | |
313 | * conditions as the following snippet shows: | |
314 | * | |
315 | * if (!list_empty_rcu(mylist)) { | |
316 | * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); | |
317 | * do_something(bar); | |
318 | * } | |
319 | * | |
320 | * The list may not be empty when list_empty_rcu checks it, but it may be when | |
321 | * list_first_entry_rcu rereads the ->next pointer. | |
322 | * | |
323 | * Rereading the ->next pointer is not a problem for list_empty() and | |
324 | * list_first_entry() because they would be protected by a lock that blocks | |
325 | * writers. | |
326 | * | |
327 | * See list_first_or_null_rcu for an alternative. | |
328 | */ | |
329 | ||
330 | /** | |
331 | * list_first_or_null_rcu - get the first element from a list | |
72c6a987 JP |
332 | * @ptr: the list head to take the element from. |
333 | * @type: the type of the struct this is embedded in. | |
3943f42c | 334 | * @member: the name of the list_head within the struct. |
72c6a987 | 335 | * |
f88022a4 | 336 | * Note that if the list is empty, it returns NULL. |
72c6a987 JP |
337 | * |
338 | * This primitive may safely run concurrently with the _rcu list-mutation | |
339 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
340 | */ | |
f88022a4 | 341 | #define list_first_or_null_rcu(ptr, type, member) \ |
0adab9b9 JP |
342 | ({ \ |
343 | struct list_head *__ptr = (ptr); \ | |
7d0ae808 | 344 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
0adab9b9 JP |
345 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
346 | }) | |
72c6a987 | 347 | |
ff3c44e6 TH |
348 | /** |
349 | * list_next_or_null_rcu - get the first element from a list | |
350 | * @head: the head for the list. | |
351 | * @ptr: the list head to take the next element from. | |
352 | * @type: the type of the struct this is embedded in. | |
353 | * @member: the name of the list_head within the struct. | |
354 | * | |
355 | * Note that if the ptr is at the end of the list, NULL is returned. | |
356 | * | |
357 | * This primitive may safely run concurrently with the _rcu list-mutation | |
358 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
359 | */ | |
360 | #define list_next_or_null_rcu(head, ptr, type, member) \ | |
361 | ({ \ | |
362 | struct list_head *__head = (head); \ | |
363 | struct list_head *__ptr = (ptr); \ | |
364 | struct list_head *__next = READ_ONCE(__ptr->next); \ | |
365 | likely(__next != __head) ? list_entry_rcu(__next, type, \ | |
366 | member) : NULL; \ | |
367 | }) | |
368 | ||
82524746 FBH |
369 | /** |
370 | * list_for_each_entry_rcu - iterate over rcu list of given type | |
371 | * @pos: the type * to use as a loop cursor. | |
372 | * @head: the head for your list. | |
3943f42c | 373 | * @member: the name of the list_head within the struct. |
f452ee09 | 374 | * @cond...: optional lockdep expression if called from non-RCU protection. |
82524746 FBH |
375 | * |
376 | * This list-traversal primitive may safely run concurrently with | |
377 | * the _rcu list-mutation primitives such as list_add_rcu() | |
378 | * as long as the traversal is guarded by rcu_read_lock(). | |
379 | */ | |
28875945 JFG |
380 | #define list_for_each_entry_rcu(pos, head, member, cond...) \ |
381 | for (__list_check_rcu(dummy, ## cond, 0), \ | |
382 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ | |
383 | &pos->member != (head); \ | |
72c6a987 | 384 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
82524746 | 385 | |
69b90729 AK |
386 | /** |
387 | * list_entry_lockless - get the struct for this entry | |
388 | * @ptr: the &struct list_head pointer. | |
389 | * @type: the type of the struct this is embedded in. | |
390 | * @member: the name of the list_head within the struct. | |
391 | * | |
aff5f036 PM |
392 | * This primitive may safely run concurrently with the _rcu |
393 | * list-mutation primitives such as list_add_rcu(), but requires some | |
394 | * implicit RCU read-side guarding. One example is running within a special | |
395 | * exception-time environment where preemption is disabled and where lockdep | |
396 | * cannot be invoked. Another example is when items are added to the list, | |
397 | * but never deleted. | |
69b90729 AK |
398 | */ |
399 | #define list_entry_lockless(ptr, type, member) \ | |
506458ef | 400 | container_of((typeof(ptr))READ_ONCE(ptr), type, member) |
69b90729 AK |
401 | |
402 | /** | |
403 | * list_for_each_entry_lockless - iterate over rcu list of given type | |
404 | * @pos: the type * to use as a loop cursor. | |
405 | * @head: the head for your list. | |
406 | * @member: the name of the list_struct within the struct. | |
407 | * | |
aff5f036 PM |
408 | * This primitive may safely run concurrently with the _rcu |
409 | * list-mutation primitives such as list_add_rcu(), but requires some | |
410 | * implicit RCU read-side guarding. One example is running within a special | |
411 | * exception-time environment where preemption is disabled and where lockdep | |
412 | * cannot be invoked. Another example is when items are added to the list, | |
413 | * but never deleted. | |
69b90729 AK |
414 | */ |
415 | #define list_for_each_entry_lockless(pos, head, member) \ | |
416 | for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ | |
417 | &pos->member != (head); \ | |
418 | pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) | |
419 | ||
254245d2 | 420 | /** |
421 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | |
422 | * @pos: the type * to use as a loop cursor. | |
423 | * @head: the head for your list. | |
3943f42c | 424 | * @member: the name of the list_head within the struct. |
254245d2 | 425 | * |
426 | * Continue to iterate over list of given type, continuing after | |
b7b6f94c N |
427 | * the current position which must have been in the list when the RCU read |
428 | * lock was taken. | |
429 | * This would typically require either that you obtained the node from a | |
430 | * previous walk of the list in the same RCU read-side critical section, or | |
431 | * that you held some sort of non-RCU reference (such as a reference count) | |
432 | * to keep the node alive *and* in the list. | |
433 | * | |
434 | * This iterator is similar to list_for_each_entry_from_rcu() except | |
435 | * this starts after the given position and that one starts at the given | |
436 | * position. | |
254245d2 | 437 | */ |
438 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | |
439 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | |
e66eed65 | 440 | &pos->member != (head); \ |
254245d2 | 441 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
442 | ||
ead9ad72 N |
443 | /** |
444 | * list_for_each_entry_from_rcu - iterate over a list from current point | |
445 | * @pos: the type * to use as a loop cursor. | |
446 | * @head: the head for your list. | |
447 | * @member: the name of the list_node within the struct. | |
448 | * | |
449 | * Iterate over the tail of a list starting from a given position, | |
450 | * which must have been in the list when the RCU read lock was taken. | |
b7b6f94c N |
451 | * This would typically require either that you obtained the node from a |
452 | * previous walk of the list in the same RCU read-side critical section, or | |
453 | * that you held some sort of non-RCU reference (such as a reference count) | |
454 | * to keep the node alive *and* in the list. | |
455 | * | |
456 | * This iterator is similar to list_for_each_entry_continue_rcu() except | |
457 | * this starts from the given position and that one starts from the position | |
458 | * after the given position. | |
ead9ad72 N |
459 | */ |
460 | #define list_for_each_entry_from_rcu(pos, head, member) \ | |
461 | for (; &(pos)->member != (head); \ | |
462 | pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) | |
463 | ||
82524746 FBH |
464 | /** |
465 | * hlist_del_rcu - deletes entry from hash list without re-initialization | |
466 | * @n: the element to delete from the hash list. | |
467 | * | |
468 | * Note: list_unhashed() on entry does not return true after this, | |
469 | * the entry is in an undefined state. It is useful for RCU based | |
470 | * lockfree traversal. | |
471 | * | |
472 | * In particular, it means that we can not poison the forward | |
473 | * pointers that may still be used for walking the hash list. | |
474 | * | |
475 | * The caller must take whatever precautions are necessary | |
476 | * (such as holding appropriate locks) to avoid racing | |
477 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
478 | * or hlist_del_rcu(), running on this same list. | |
479 | * However, it is perfectly legal to run concurrently with | |
480 | * the _rcu list-traversal primitives, such as | |
481 | * hlist_for_each_entry(). | |
482 | */ | |
483 | static inline void hlist_del_rcu(struct hlist_node *n) | |
484 | { | |
485 | __hlist_del(n); | |
c54a2744 | 486 | WRITE_ONCE(n->pprev, LIST_POISON2); |
82524746 FBH |
487 | } |
488 | ||
489 | /** | |
490 | * hlist_replace_rcu - replace old entry by new one | |
491 | * @old : the element to be replaced | |
492 | * @new : the new element to insert | |
493 | * | |
494 | * The @old entry will be replaced with the @new entry atomically. | |
495 | */ | |
496 | static inline void hlist_replace_rcu(struct hlist_node *old, | |
497 | struct hlist_node *new) | |
498 | { | |
499 | struct hlist_node *next = old->next; | |
500 | ||
501 | new->next = next; | |
c54a2744 | 502 | WRITE_ONCE(new->pprev, old->pprev); |
67bdbffd | 503 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
82524746 | 504 | if (next) |
c54a2744 ED |
505 | WRITE_ONCE(new->next->pprev, &new->next); |
506 | WRITE_ONCE(old->pprev, LIST_POISON2); | |
82524746 FBH |
507 | } |
508 | ||
67bdbffd AB |
509 | /* |
510 | * return the first or the next element in an RCU protected hlist | |
511 | */ | |
512 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) | |
513 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) | |
514 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) | |
515 | ||
82524746 FBH |
516 | /** |
517 | * hlist_add_head_rcu | |
518 | * @n: the element to add to the hash list. | |
519 | * @h: the list to add to. | |
520 | * | |
521 | * Description: | |
522 | * Adds the specified element to the specified hlist, | |
523 | * while permitting racing traversals. | |
524 | * | |
525 | * The caller must take whatever precautions are necessary | |
526 | * (such as holding appropriate locks) to avoid racing | |
527 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
528 | * or hlist_del_rcu(), running on this same list. | |
529 | * However, it is perfectly legal to run concurrently with | |
530 | * the _rcu list-traversal primitives, such as | |
531 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
532 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
533 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
534 | */ | |
535 | static inline void hlist_add_head_rcu(struct hlist_node *n, | |
536 | struct hlist_head *h) | |
537 | { | |
538 | struct hlist_node *first = h->first; | |
10aa9d2c | 539 | |
82524746 | 540 | n->next = first; |
c54a2744 | 541 | WRITE_ONCE(n->pprev, &h->first); |
67bdbffd | 542 | rcu_assign_pointer(hlist_first_rcu(h), n); |
82524746 | 543 | if (first) |
c54a2744 | 544 | WRITE_ONCE(first->pprev, &n->next); |
82524746 FBH |
545 | } |
546 | ||
1602f49b DM |
547 | /** |
548 | * hlist_add_tail_rcu | |
549 | * @n: the element to add to the hash list. | |
550 | * @h: the list to add to. | |
551 | * | |
552 | * Description: | |
553 | * Adds the specified element to the specified hlist, | |
554 | * while permitting racing traversals. | |
555 | * | |
556 | * The caller must take whatever precautions are necessary | |
557 | * (such as holding appropriate locks) to avoid racing | |
558 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
559 | * or hlist_del_rcu(), running on this same list. | |
560 | * However, it is perfectly legal to run concurrently with | |
561 | * the _rcu list-traversal primitives, such as | |
562 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
563 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
564 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
565 | */ | |
566 | static inline void hlist_add_tail_rcu(struct hlist_node *n, | |
567 | struct hlist_head *h) | |
568 | { | |
569 | struct hlist_node *i, *last = NULL; | |
570 | ||
48ac3466 MT |
571 | /* Note: write side code, so rcu accessors are not needed. */ |
572 | for (i = h->first; i; i = i->next) | |
1602f49b DM |
573 | last = i; |
574 | ||
575 | if (last) { | |
576 | n->next = last->next; | |
c54a2744 | 577 | WRITE_ONCE(n->pprev, &last->next); |
1602f49b DM |
578 | rcu_assign_pointer(hlist_next_rcu(last), n); |
579 | } else { | |
580 | hlist_add_head_rcu(n, h); | |
581 | } | |
582 | } | |
583 | ||
82524746 FBH |
584 | /** |
585 | * hlist_add_before_rcu | |
586 | * @n: the new element to add to the hash list. | |
587 | * @next: the existing element to add the new element before. | |
588 | * | |
589 | * Description: | |
590 | * Adds the specified element to the specified hlist | |
591 | * before the specified node while permitting racing traversals. | |
592 | * | |
593 | * The caller must take whatever precautions are necessary | |
594 | * (such as holding appropriate locks) to avoid racing | |
595 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
596 | * or hlist_del_rcu(), running on this same list. | |
597 | * However, it is perfectly legal to run concurrently with | |
598 | * the _rcu list-traversal primitives, such as | |
599 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
600 | * problems on Alpha CPUs. | |
601 | */ | |
602 | static inline void hlist_add_before_rcu(struct hlist_node *n, | |
603 | struct hlist_node *next) | |
604 | { | |
c54a2744 | 605 | WRITE_ONCE(n->pprev, next->pprev); |
82524746 | 606 | n->next = next; |
67bdbffd | 607 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
c54a2744 | 608 | WRITE_ONCE(next->pprev, &n->next); |
82524746 FBH |
609 | } |
610 | ||
611 | /** | |
1d023284 | 612 | * hlist_add_behind_rcu |
82524746 | 613 | * @n: the new element to add to the hash list. |
1d023284 | 614 | * @prev: the existing element to add the new element after. |
82524746 FBH |
615 | * |
616 | * Description: | |
617 | * Adds the specified element to the specified hlist | |
618 | * after the specified node while permitting racing traversals. | |
619 | * | |
620 | * The caller must take whatever precautions are necessary | |
621 | * (such as holding appropriate locks) to avoid racing | |
622 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
623 | * or hlist_del_rcu(), running on this same list. | |
624 | * However, it is perfectly legal to run concurrently with | |
625 | * the _rcu list-traversal primitives, such as | |
626 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
627 | * problems on Alpha CPUs. | |
628 | */ | |
1d023284 KH |
629 | static inline void hlist_add_behind_rcu(struct hlist_node *n, |
630 | struct hlist_node *prev) | |
82524746 FBH |
631 | { |
632 | n->next = prev->next; | |
c54a2744 | 633 | WRITE_ONCE(n->pprev, &prev->next); |
67bdbffd | 634 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
82524746 | 635 | if (n->next) |
c54a2744 | 636 | WRITE_ONCE(n->next->pprev, &n->next); |
82524746 FBH |
637 | } |
638 | ||
67bdbffd AB |
639 | #define __hlist_for_each_rcu(pos, head) \ |
640 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ | |
75d65a42 | 641 | pos; \ |
67bdbffd | 642 | pos = rcu_dereference(hlist_next_rcu(pos))) |
1cc52327 | 643 | |
82524746 FBH |
644 | /** |
645 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | |
b67bfe0d | 646 | * @pos: the type * to use as a loop cursor. |
82524746 FBH |
647 | * @head: the head for your list. |
648 | * @member: the name of the hlist_node within the struct. | |
f452ee09 | 649 | * @cond...: optional lockdep expression if called from non-RCU protection. |
82524746 FBH |
650 | * |
651 | * This list-traversal primitive may safely run concurrently with | |
652 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
653 | * as long as the traversal is guarded by rcu_read_lock(). | |
654 | */ | |
28875945 JFG |
655 | #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ |
656 | for (__list_check_rcu(dummy, ## cond, 0), \ | |
657 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ | |
b67bfe0d SL |
658 | typeof(*(pos)), member); \ |
659 | pos; \ | |
660 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ | |
661 | &(pos)->member)), typeof(*(pos)), member)) | |
82524746 | 662 | |
12bcbe66 SR |
663 | /** |
664 | * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) | |
665 | * @pos: the type * to use as a loop cursor. | |
666 | * @head: the head for your list. | |
667 | * @member: the name of the hlist_node within the struct. | |
668 | * | |
669 | * This list-traversal primitive may safely run concurrently with | |
670 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
671 | * as long as the traversal is guarded by rcu_read_lock(). | |
672 | * | |
673 | * This is the same as hlist_for_each_entry_rcu() except that it does | |
674 | * not do any RCU debugging or tracing. | |
675 | */ | |
676 | #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ | |
0a5b99f5 | 677 | for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ |
12bcbe66 SR |
678 | typeof(*(pos)), member); \ |
679 | pos; \ | |
0a5b99f5 | 680 | pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ |
12bcbe66 SR |
681 | &(pos)->member)), typeof(*(pos)), member)) |
682 | ||
4f70ecca ED |
683 | /** |
684 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | |
b67bfe0d | 685 | * @pos: the type * to use as a loop cursor. |
4f70ecca ED |
686 | * @head: the head for your list. |
687 | * @member: the name of the hlist_node within the struct. | |
688 | * | |
689 | * This list-traversal primitive may safely run concurrently with | |
690 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
691 | * as long as the traversal is guarded by rcu_read_lock(). | |
692 | */ | |
b67bfe0d SL |
693 | #define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
694 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ | |
695 | typeof(*(pos)), member); \ | |
696 | pos; \ | |
697 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ | |
698 | &(pos)->member)), typeof(*(pos)), member)) | |
4f70ecca | 699 | |
5c578aed | 700 | /** |
701 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point | |
b67bfe0d | 702 | * @pos: the type * to use as a loop cursor. |
5c578aed | 703 | * @member: the name of the hlist_node within the struct. |
704 | */ | |
b67bfe0d | 705 | #define hlist_for_each_entry_continue_rcu(pos, member) \ |
f520c98e YX |
706 | for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
707 | &(pos)->member)), typeof(*(pos)), member); \ | |
b67bfe0d | 708 | pos; \ |
f520c98e YX |
709 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
710 | &(pos)->member)), typeof(*(pos)), member)) | |
5c578aed | 711 | |
4f70ecca ED |
712 | /** |
713 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point | |
b67bfe0d | 714 | * @pos: the type * to use as a loop cursor. |
4f70ecca ED |
715 | * @member: the name of the hlist_node within the struct. |
716 | */ | |
b67bfe0d | 717 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
f520c98e YX |
718 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
719 | &(pos)->member)), typeof(*(pos)), member); \ | |
b67bfe0d | 720 | pos; \ |
f520c98e YX |
721 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
722 | &(pos)->member)), typeof(*(pos)), member)) | |
4f70ecca | 723 | |
97ede29e YX |
724 | /** |
725 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point | |
726 | * @pos: the type * to use as a loop cursor. | |
727 | * @member: the name of the hlist_node within the struct. | |
728 | */ | |
729 | #define hlist_for_each_entry_from_rcu(pos, member) \ | |
730 | for (; pos; \ | |
f517700c YX |
731 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
732 | &(pos)->member)), typeof(*(pos)), member)) | |
5c578aed | 733 | |
82524746 FBH |
734 | #endif /* __KERNEL__ */ |
735 | #endif |