]>
Commit | Line | Data |
---|---|---|
82524746 FBH |
1 | #ifndef _LINUX_RCULIST_H |
2 | #define _LINUX_RCULIST_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | /* | |
7 | * RCU-protected list version | |
8 | */ | |
9 | #include <linux/list.h> | |
10aa9d2c | 10 | #include <linux/rcupdate.h> |
82524746 | 11 | |
65e6bf48 PM |
12 | /* |
13 | * Why is there no list_empty_rcu()? Because list_empty() serves this | |
14 | * purpose. The list_empty() function fetches the RCU-protected pointer | |
15 | * and compares it to the address of the list head, but neither dereferences | |
16 | * this pointer itself nor provides this pointer to the caller. Therefore, | |
17 | * it is not necessary to use rcu_dereference(), so that list_empty() can | |
18 | * be used anywhere you would want to use a list_empty_rcu(). | |
19 | */ | |
20 | ||
2a855b64 PM |
21 | /* |
22 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers | |
23 | * @list: list to be initialized | |
24 | * | |
25 | * You should instead use INIT_LIST_HEAD() for normal initialization and | |
26 | * cleanup tasks, when readers have no access to the list being initialized. | |
27 | * However, if the list being initialized is visible to readers, you | |
28 | * need to keep the compiler from being too mischievous. | |
29 | */ | |
30 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | |
31 | { | |
7d0ae808 PM |
32 | WRITE_ONCE(list->next, list); |
33 | WRITE_ONCE(list->prev, list); | |
2a855b64 PM |
34 | } |
35 | ||
67bdbffd AB |
36 | /* |
37 | * return the ->next pointer of a list_head in an rcu safe | |
38 | * way, we must not access it directly | |
39 | */ | |
40 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) | |
41 | ||
82524746 FBH |
42 | /* |
43 | * Insert a new entry between two known consecutive entries. | |
44 | * | |
45 | * This is only for internal list manipulation where we know | |
46 | * the prev/next entries already! | |
47 | */ | |
48 | static inline void __list_add_rcu(struct list_head *new, | |
49 | struct list_head *prev, struct list_head *next) | |
50 | { | |
54acd439 KC |
51 | if (!__list_add_valid(new, prev, next)) |
52 | return; | |
53 | ||
82524746 FBH |
54 | new->next = next; |
55 | new->prev = prev; | |
67bdbffd | 56 | rcu_assign_pointer(list_next_rcu(prev), new); |
82524746 | 57 | next->prev = new; |
82524746 FBH |
58 | } |
59 | ||
60 | /** | |
61 | * list_add_rcu - add a new entry to rcu-protected list | |
62 | * @new: new entry to be added | |
63 | * @head: list head to add it after | |
64 | * | |
65 | * Insert a new entry after the specified head. | |
66 | * This is good for implementing stacks. | |
67 | * | |
68 | * The caller must take whatever precautions are necessary | |
69 | * (such as holding appropriate locks) to avoid racing | |
70 | * with another list-mutation primitive, such as list_add_rcu() | |
71 | * or list_del_rcu(), running on this same list. | |
72 | * However, it is perfectly legal to run concurrently with | |
73 | * the _rcu list-traversal primitives, such as | |
74 | * list_for_each_entry_rcu(). | |
75 | */ | |
76 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | |
77 | { | |
78 | __list_add_rcu(new, head, head->next); | |
79 | } | |
80 | ||
81 | /** | |
82 | * list_add_tail_rcu - add a new entry to rcu-protected list | |
83 | * @new: new entry to be added | |
84 | * @head: list head to add it before | |
85 | * | |
86 | * Insert a new entry before the specified head. | |
87 | * This is useful for implementing queues. | |
88 | * | |
89 | * The caller must take whatever precautions are necessary | |
90 | * (such as holding appropriate locks) to avoid racing | |
91 | * with another list-mutation primitive, such as list_add_tail_rcu() | |
92 | * or list_del_rcu(), running on this same list. | |
93 | * However, it is perfectly legal to run concurrently with | |
94 | * the _rcu list-traversal primitives, such as | |
95 | * list_for_each_entry_rcu(). | |
96 | */ | |
97 | static inline void list_add_tail_rcu(struct list_head *new, | |
98 | struct list_head *head) | |
99 | { | |
100 | __list_add_rcu(new, head->prev, head); | |
101 | } | |
102 | ||
103 | /** | |
104 | * list_del_rcu - deletes entry from list without re-initialization | |
105 | * @entry: the element to delete from the list. | |
106 | * | |
107 | * Note: list_empty() on entry does not return true after this, | |
108 | * the entry is in an undefined state. It is useful for RCU based | |
109 | * lockfree traversal. | |
110 | * | |
111 | * In particular, it means that we can not poison the forward | |
112 | * pointers that may still be used for walking the list. | |
113 | * | |
114 | * The caller must take whatever precautions are necessary | |
115 | * (such as holding appropriate locks) to avoid racing | |
116 | * with another list-mutation primitive, such as list_del_rcu() | |
117 | * or list_add_rcu(), running on this same list. | |
118 | * However, it is perfectly legal to run concurrently with | |
119 | * the _rcu list-traversal primitives, such as | |
120 | * list_for_each_entry_rcu(). | |
121 | * | |
122 | * Note that the caller is not permitted to immediately free | |
123 | * the newly deleted entry. Instead, either synchronize_rcu() | |
124 | * or call_rcu() must be used to defer freeing until an RCU | |
125 | * grace period has elapsed. | |
126 | */ | |
127 | static inline void list_del_rcu(struct list_head *entry) | |
128 | { | |
559f9bad | 129 | __list_del_entry(entry); |
82524746 FBH |
130 | entry->prev = LIST_POISON2; |
131 | } | |
132 | ||
6beeac76 AA |
133 | /** |
134 | * hlist_del_init_rcu - deletes entry from hash list with re-initialization | |
135 | * @n: the element to delete from the hash list. | |
136 | * | |
137 | * Note: list_unhashed() on the node return true after this. It is | |
138 | * useful for RCU based read lockfree traversal if the writer side | |
139 | * must know if the list entry is still hashed or already unhashed. | |
140 | * | |
141 | * In particular, it means that we can not poison the forward pointers | |
142 | * that may still be used for walking the hash list and we can only | |
143 | * zero the pprev pointer so list_unhashed() will return true after | |
144 | * this. | |
145 | * | |
146 | * The caller must take whatever precautions are necessary (such as | |
147 | * holding appropriate locks) to avoid racing with another | |
148 | * list-mutation primitive, such as hlist_add_head_rcu() or | |
149 | * hlist_del_rcu(), running on this same list. However, it is | |
150 | * perfectly legal to run concurrently with the _rcu list-traversal | |
151 | * primitives, such as hlist_for_each_entry_rcu(). | |
152 | */ | |
153 | static inline void hlist_del_init_rcu(struct hlist_node *n) | |
154 | { | |
155 | if (!hlist_unhashed(n)) { | |
156 | __hlist_del(n); | |
157 | n->pprev = NULL; | |
158 | } | |
159 | } | |
160 | ||
82524746 FBH |
161 | /** |
162 | * list_replace_rcu - replace old entry by new one | |
163 | * @old : the element to be replaced | |
164 | * @new : the new element to insert | |
165 | * | |
166 | * The @old entry will be replaced with the @new entry atomically. | |
167 | * Note: @old should not be empty. | |
168 | */ | |
169 | static inline void list_replace_rcu(struct list_head *old, | |
170 | struct list_head *new) | |
171 | { | |
172 | new->next = old->next; | |
173 | new->prev = old->prev; | |
67bdbffd | 174 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
82524746 | 175 | new->next->prev = new; |
82524746 FBH |
176 | old->prev = LIST_POISON2; |
177 | } | |
178 | ||
179 | /** | |
7d86dccf | 180 | * __list_splice_init_rcu - join an RCU-protected list into an existing list. |
82524746 | 181 | * @list: the RCU-protected list to splice |
7d86dccf PM |
182 | * @prev: points to the last element of the existing list |
183 | * @next: points to the first element of the existing list | |
82524746 FBH |
184 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... |
185 | * | |
7d86dccf PM |
186 | * The list pointed to by @prev and @next can be RCU-read traversed |
187 | * concurrently with this function. | |
82524746 FBH |
188 | * |
189 | * Note that this function blocks. | |
190 | * | |
7d86dccf PM |
191 | * Important note: the caller must take whatever action is necessary to prevent |
192 | * any other updates to the existing list. In principle, it is possible to | |
193 | * modify the list as soon as sync() begins execution. If this sort of thing | |
194 | * becomes necessary, an alternative version based on call_rcu() could be | |
195 | * created. But only if -really- needed -- there is no shortage of RCU API | |
196 | * members. | |
82524746 | 197 | */ |
7d86dccf PM |
198 | static inline void __list_splice_init_rcu(struct list_head *list, |
199 | struct list_head *prev, | |
200 | struct list_head *next, | |
201 | void (*sync)(void)) | |
82524746 FBH |
202 | { |
203 | struct list_head *first = list->next; | |
204 | struct list_head *last = list->prev; | |
82524746 | 205 | |
2a855b64 PM |
206 | /* |
207 | * "first" and "last" tracking list, so initialize it. RCU readers | |
208 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() | |
209 | * instead of INIT_LIST_HEAD(). | |
210 | */ | |
82524746 | 211 | |
2a855b64 | 212 | INIT_LIST_HEAD_RCU(list); |
82524746 FBH |
213 | |
214 | /* | |
215 | * At this point, the list body still points to the source list. | |
216 | * Wait for any readers to finish using the list before splicing | |
217 | * the list body into the new list. Any new readers will see | |
218 | * an empty list. | |
219 | */ | |
220 | ||
221 | sync(); | |
222 | ||
223 | /* | |
224 | * Readers are finished with the source list, so perform splice. | |
225 | * The order is important if the new list is global and accessible | |
226 | * to concurrent RCU readers. Note that RCU readers are not | |
227 | * permitted to traverse the prev pointers without excluding | |
228 | * this function. | |
229 | */ | |
230 | ||
7d86dccf PM |
231 | last->next = next; |
232 | rcu_assign_pointer(list_next_rcu(prev), first); | |
233 | first->prev = prev; | |
234 | next->prev = last; | |
235 | } | |
236 | ||
237 | /** | |
238 | * list_splice_init_rcu - splice an RCU-protected list into an existing list, | |
239 | * designed for stacks. | |
240 | * @list: the RCU-protected list to splice | |
241 | * @head: the place in the existing list to splice the first list into | |
242 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | |
243 | */ | |
244 | static inline void list_splice_init_rcu(struct list_head *list, | |
245 | struct list_head *head, | |
246 | void (*sync)(void)) | |
247 | { | |
248 | if (!list_empty(list)) | |
249 | __list_splice_init_rcu(list, head, head->next, sync); | |
250 | } | |
251 | ||
252 | /** | |
253 | * list_splice_tail_init_rcu - splice an RCU-protected list into an existing | |
254 | * list, designed for queues. | |
255 | * @list: the RCU-protected list to splice | |
256 | * @head: the place in the existing list to splice the first list into | |
257 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | |
258 | */ | |
259 | static inline void list_splice_tail_init_rcu(struct list_head *list, | |
260 | struct list_head *head, | |
261 | void (*sync)(void)) | |
262 | { | |
263 | if (!list_empty(list)) | |
264 | __list_splice_init_rcu(list, head->prev, head, sync); | |
82524746 FBH |
265 | } |
266 | ||
72c6a987 JP |
267 | /** |
268 | * list_entry_rcu - get the struct for this entry | |
269 | * @ptr: the &struct list_head pointer. | |
270 | * @type: the type of the struct this is embedded in. | |
3943f42c | 271 | * @member: the name of the list_head within the struct. |
72c6a987 JP |
272 | * |
273 | * This primitive may safely run concurrently with the _rcu list-mutation | |
274 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
275 | */ | |
276 | #define list_entry_rcu(ptr, type, member) \ | |
8db70b13 | 277 | container_of(lockless_dereference(ptr), type, member) |
72c6a987 JP |
278 | |
279 | /** | |
f88022a4 MM |
280 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
281 | * | |
282 | * Implementing those functions following their counterparts list_empty() and | |
283 | * list_first_entry() is not advisable because they lead to subtle race | |
284 | * conditions as the following snippet shows: | |
285 | * | |
286 | * if (!list_empty_rcu(mylist)) { | |
287 | * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); | |
288 | * do_something(bar); | |
289 | * } | |
290 | * | |
291 | * The list may not be empty when list_empty_rcu checks it, but it may be when | |
292 | * list_first_entry_rcu rereads the ->next pointer. | |
293 | * | |
294 | * Rereading the ->next pointer is not a problem for list_empty() and | |
295 | * list_first_entry() because they would be protected by a lock that blocks | |
296 | * writers. | |
297 | * | |
298 | * See list_first_or_null_rcu for an alternative. | |
299 | */ | |
300 | ||
301 | /** | |
302 | * list_first_or_null_rcu - get the first element from a list | |
72c6a987 JP |
303 | * @ptr: the list head to take the element from. |
304 | * @type: the type of the struct this is embedded in. | |
3943f42c | 305 | * @member: the name of the list_head within the struct. |
72c6a987 | 306 | * |
f88022a4 | 307 | * Note that if the list is empty, it returns NULL. |
72c6a987 JP |
308 | * |
309 | * This primitive may safely run concurrently with the _rcu list-mutation | |
310 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
311 | */ | |
f88022a4 | 312 | #define list_first_or_null_rcu(ptr, type, member) \ |
0adab9b9 JP |
313 | ({ \ |
314 | struct list_head *__ptr = (ptr); \ | |
7d0ae808 | 315 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
0adab9b9 JP |
316 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
317 | }) | |
72c6a987 | 318 | |
ff3c44e6 TH |
319 | /** |
320 | * list_next_or_null_rcu - get the first element from a list | |
321 | * @head: the head for the list. | |
322 | * @ptr: the list head to take the next element from. | |
323 | * @type: the type of the struct this is embedded in. | |
324 | * @member: the name of the list_head within the struct. | |
325 | * | |
326 | * Note that if the ptr is at the end of the list, NULL is returned. | |
327 | * | |
328 | * This primitive may safely run concurrently with the _rcu list-mutation | |
329 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | |
330 | */ | |
331 | #define list_next_or_null_rcu(head, ptr, type, member) \ | |
332 | ({ \ | |
333 | struct list_head *__head = (head); \ | |
334 | struct list_head *__ptr = (ptr); \ | |
335 | struct list_head *__next = READ_ONCE(__ptr->next); \ | |
336 | likely(__next != __head) ? list_entry_rcu(__next, type, \ | |
337 | member) : NULL; \ | |
338 | }) | |
339 | ||
82524746 FBH |
340 | /** |
341 | * list_for_each_entry_rcu - iterate over rcu list of given type | |
342 | * @pos: the type * to use as a loop cursor. | |
343 | * @head: the head for your list. | |
3943f42c | 344 | * @member: the name of the list_head within the struct. |
82524746 FBH |
345 | * |
346 | * This list-traversal primitive may safely run concurrently with | |
347 | * the _rcu list-mutation primitives such as list_add_rcu() | |
348 | * as long as the traversal is guarded by rcu_read_lock(). | |
349 | */ | |
350 | #define list_for_each_entry_rcu(pos, head, member) \ | |
72c6a987 | 351 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
e66eed65 | 352 | &pos->member != (head); \ |
72c6a987 | 353 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
82524746 | 354 | |
69b90729 AK |
355 | /** |
356 | * list_entry_lockless - get the struct for this entry | |
357 | * @ptr: the &struct list_head pointer. | |
358 | * @type: the type of the struct this is embedded in. | |
359 | * @member: the name of the list_head within the struct. | |
360 | * | |
361 | * This primitive may safely run concurrently with the _rcu list-mutation | |
362 | * primitives such as list_add_rcu(), but requires some implicit RCU | |
363 | * read-side guarding. One example is running within a special | |
364 | * exception-time environment where preemption is disabled and where | |
365 | * lockdep cannot be invoked (in which case updaters must use RCU-sched, | |
366 | * as in synchronize_sched(), call_rcu_sched(), and friends). Another | |
367 | * example is when items are added to the list, but never deleted. | |
368 | */ | |
369 | #define list_entry_lockless(ptr, type, member) \ | |
370 | container_of((typeof(ptr))lockless_dereference(ptr), type, member) | |
371 | ||
372 | /** | |
373 | * list_for_each_entry_lockless - iterate over rcu list of given type | |
374 | * @pos: the type * to use as a loop cursor. | |
375 | * @head: the head for your list. | |
376 | * @member: the name of the list_struct within the struct. | |
377 | * | |
378 | * This primitive may safely run concurrently with the _rcu list-mutation | |
379 | * primitives such as list_add_rcu(), but requires some implicit RCU | |
380 | * read-side guarding. One example is running within a special | |
381 | * exception-time environment where preemption is disabled and where | |
382 | * lockdep cannot be invoked (in which case updaters must use RCU-sched, | |
383 | * as in synchronize_sched(), call_rcu_sched(), and friends). Another | |
384 | * example is when items are added to the list, but never deleted. | |
385 | */ | |
386 | #define list_for_each_entry_lockless(pos, head, member) \ | |
387 | for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ | |
388 | &pos->member != (head); \ | |
389 | pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) | |
390 | ||
254245d2 | 391 | /** |
392 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | |
393 | * @pos: the type * to use as a loop cursor. | |
394 | * @head: the head for your list. | |
3943f42c | 395 | * @member: the name of the list_head within the struct. |
254245d2 | 396 | * |
397 | * Continue to iterate over list of given type, continuing after | |
398 | * the current position. | |
399 | */ | |
400 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | |
401 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | |
e66eed65 | 402 | &pos->member != (head); \ |
254245d2 | 403 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
404 | ||
82524746 FBH |
405 | /** |
406 | * hlist_del_rcu - deletes entry from hash list without re-initialization | |
407 | * @n: the element to delete from the hash list. | |
408 | * | |
409 | * Note: list_unhashed() on entry does not return true after this, | |
410 | * the entry is in an undefined state. It is useful for RCU based | |
411 | * lockfree traversal. | |
412 | * | |
413 | * In particular, it means that we can not poison the forward | |
414 | * pointers that may still be used for walking the hash list. | |
415 | * | |
416 | * The caller must take whatever precautions are necessary | |
417 | * (such as holding appropriate locks) to avoid racing | |
418 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
419 | * or hlist_del_rcu(), running on this same list. | |
420 | * However, it is perfectly legal to run concurrently with | |
421 | * the _rcu list-traversal primitives, such as | |
422 | * hlist_for_each_entry(). | |
423 | */ | |
424 | static inline void hlist_del_rcu(struct hlist_node *n) | |
425 | { | |
426 | __hlist_del(n); | |
427 | n->pprev = LIST_POISON2; | |
428 | } | |
429 | ||
430 | /** | |
431 | * hlist_replace_rcu - replace old entry by new one | |
432 | * @old : the element to be replaced | |
433 | * @new : the new element to insert | |
434 | * | |
435 | * The @old entry will be replaced with the @new entry atomically. | |
436 | */ | |
437 | static inline void hlist_replace_rcu(struct hlist_node *old, | |
438 | struct hlist_node *new) | |
439 | { | |
440 | struct hlist_node *next = old->next; | |
441 | ||
442 | new->next = next; | |
443 | new->pprev = old->pprev; | |
67bdbffd | 444 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
82524746 FBH |
445 | if (next) |
446 | new->next->pprev = &new->next; | |
82524746 FBH |
447 | old->pprev = LIST_POISON2; |
448 | } | |
449 | ||
67bdbffd AB |
450 | /* |
451 | * return the first or the next element in an RCU protected hlist | |
452 | */ | |
453 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) | |
454 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) | |
455 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) | |
456 | ||
82524746 FBH |
457 | /** |
458 | * hlist_add_head_rcu | |
459 | * @n: the element to add to the hash list. | |
460 | * @h: the list to add to. | |
461 | * | |
462 | * Description: | |
463 | * Adds the specified element to the specified hlist, | |
464 | * while permitting racing traversals. | |
465 | * | |
466 | * The caller must take whatever precautions are necessary | |
467 | * (such as holding appropriate locks) to avoid racing | |
468 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
469 | * or hlist_del_rcu(), running on this same list. | |
470 | * However, it is perfectly legal to run concurrently with | |
471 | * the _rcu list-traversal primitives, such as | |
472 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
473 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
474 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
475 | */ | |
476 | static inline void hlist_add_head_rcu(struct hlist_node *n, | |
477 | struct hlist_head *h) | |
478 | { | |
479 | struct hlist_node *first = h->first; | |
10aa9d2c | 480 | |
82524746 FBH |
481 | n->next = first; |
482 | n->pprev = &h->first; | |
67bdbffd | 483 | rcu_assign_pointer(hlist_first_rcu(h), n); |
82524746 FBH |
484 | if (first) |
485 | first->pprev = &n->next; | |
82524746 FBH |
486 | } |
487 | ||
1602f49b DM |
488 | /** |
489 | * hlist_add_tail_rcu | |
490 | * @n: the element to add to the hash list. | |
491 | * @h: the list to add to. | |
492 | * | |
493 | * Description: | |
494 | * Adds the specified element to the specified hlist, | |
495 | * while permitting racing traversals. | |
496 | * | |
497 | * The caller must take whatever precautions are necessary | |
498 | * (such as holding appropriate locks) to avoid racing | |
499 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
500 | * or hlist_del_rcu(), running on this same list. | |
501 | * However, it is perfectly legal to run concurrently with | |
502 | * the _rcu list-traversal primitives, such as | |
503 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
504 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
505 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
506 | */ | |
507 | static inline void hlist_add_tail_rcu(struct hlist_node *n, | |
508 | struct hlist_head *h) | |
509 | { | |
510 | struct hlist_node *i, *last = NULL; | |
511 | ||
48ac3466 MT |
512 | /* Note: write side code, so rcu accessors are not needed. */ |
513 | for (i = h->first; i; i = i->next) | |
1602f49b DM |
514 | last = i; |
515 | ||
516 | if (last) { | |
517 | n->next = last->next; | |
518 | n->pprev = &last->next; | |
519 | rcu_assign_pointer(hlist_next_rcu(last), n); | |
520 | } else { | |
521 | hlist_add_head_rcu(n, h); | |
522 | } | |
523 | } | |
524 | ||
82524746 FBH |
525 | /** |
526 | * hlist_add_before_rcu | |
527 | * @n: the new element to add to the hash list. | |
528 | * @next: the existing element to add the new element before. | |
529 | * | |
530 | * Description: | |
531 | * Adds the specified element to the specified hlist | |
532 | * before the specified node while permitting racing traversals. | |
533 | * | |
534 | * The caller must take whatever precautions are necessary | |
535 | * (such as holding appropriate locks) to avoid racing | |
536 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
537 | * or hlist_del_rcu(), running on this same list. | |
538 | * However, it is perfectly legal to run concurrently with | |
539 | * the _rcu list-traversal primitives, such as | |
540 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
541 | * problems on Alpha CPUs. | |
542 | */ | |
543 | static inline void hlist_add_before_rcu(struct hlist_node *n, | |
544 | struct hlist_node *next) | |
545 | { | |
546 | n->pprev = next->pprev; | |
547 | n->next = next; | |
67bdbffd | 548 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
82524746 | 549 | next->pprev = &n->next; |
82524746 FBH |
550 | } |
551 | ||
552 | /** | |
1d023284 | 553 | * hlist_add_behind_rcu |
82524746 | 554 | * @n: the new element to add to the hash list. |
1d023284 | 555 | * @prev: the existing element to add the new element after. |
82524746 FBH |
556 | * |
557 | * Description: | |
558 | * Adds the specified element to the specified hlist | |
559 | * after the specified node while permitting racing traversals. | |
560 | * | |
561 | * The caller must take whatever precautions are necessary | |
562 | * (such as holding appropriate locks) to avoid racing | |
563 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
564 | * or hlist_del_rcu(), running on this same list. | |
565 | * However, it is perfectly legal to run concurrently with | |
566 | * the _rcu list-traversal primitives, such as | |
567 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
568 | * problems on Alpha CPUs. | |
569 | */ | |
1d023284 KH |
570 | static inline void hlist_add_behind_rcu(struct hlist_node *n, |
571 | struct hlist_node *prev) | |
82524746 FBH |
572 | { |
573 | n->next = prev->next; | |
574 | n->pprev = &prev->next; | |
67bdbffd | 575 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
82524746 FBH |
576 | if (n->next) |
577 | n->next->pprev = &n->next; | |
578 | } | |
579 | ||
67bdbffd AB |
580 | #define __hlist_for_each_rcu(pos, head) \ |
581 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ | |
75d65a42 | 582 | pos; \ |
67bdbffd | 583 | pos = rcu_dereference(hlist_next_rcu(pos))) |
1cc52327 | 584 | |
82524746 FBH |
585 | /** |
586 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | |
b67bfe0d | 587 | * @pos: the type * to use as a loop cursor. |
82524746 FBH |
588 | * @head: the head for your list. |
589 | * @member: the name of the hlist_node within the struct. | |
590 | * | |
591 | * This list-traversal primitive may safely run concurrently with | |
592 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
593 | * as long as the traversal is guarded by rcu_read_lock(). | |
594 | */ | |
b67bfe0d SL |
595 | #define hlist_for_each_entry_rcu(pos, head, member) \ |
596 | for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ | |
597 | typeof(*(pos)), member); \ | |
598 | pos; \ | |
599 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ | |
600 | &(pos)->member)), typeof(*(pos)), member)) | |
82524746 | 601 | |
12bcbe66 SR |
602 | /** |
603 | * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) | |
604 | * @pos: the type * to use as a loop cursor. | |
605 | * @head: the head for your list. | |
606 | * @member: the name of the hlist_node within the struct. | |
607 | * | |
608 | * This list-traversal primitive may safely run concurrently with | |
609 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
610 | * as long as the traversal is guarded by rcu_read_lock(). | |
611 | * | |
612 | * This is the same as hlist_for_each_entry_rcu() except that it does | |
613 | * not do any RCU debugging or tracing. | |
614 | */ | |
615 | #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ | |
616 | for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ | |
617 | typeof(*(pos)), member); \ | |
618 | pos; \ | |
619 | pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ | |
620 | &(pos)->member)), typeof(*(pos)), member)) | |
621 | ||
4f70ecca ED |
622 | /** |
623 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | |
b67bfe0d | 624 | * @pos: the type * to use as a loop cursor. |
4f70ecca ED |
625 | * @head: the head for your list. |
626 | * @member: the name of the hlist_node within the struct. | |
627 | * | |
628 | * This list-traversal primitive may safely run concurrently with | |
629 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
630 | * as long as the traversal is guarded by rcu_read_lock(). | |
631 | */ | |
b67bfe0d SL |
632 | #define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
633 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ | |
634 | typeof(*(pos)), member); \ | |
635 | pos; \ | |
636 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ | |
637 | &(pos)->member)), typeof(*(pos)), member)) | |
4f70ecca | 638 | |
5c578aed | 639 | /** |
640 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point | |
b67bfe0d | 641 | * @pos: the type * to use as a loop cursor. |
5c578aed | 642 | * @member: the name of the hlist_node within the struct. |
643 | */ | |
b67bfe0d | 644 | #define hlist_for_each_entry_continue_rcu(pos, member) \ |
f520c98e YX |
645 | for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
646 | &(pos)->member)), typeof(*(pos)), member); \ | |
b67bfe0d | 647 | pos; \ |
f520c98e YX |
648 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
649 | &(pos)->member)), typeof(*(pos)), member)) | |
5c578aed | 650 | |
4f70ecca ED |
651 | /** |
652 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point | |
b67bfe0d | 653 | * @pos: the type * to use as a loop cursor. |
4f70ecca ED |
654 | * @member: the name of the hlist_node within the struct. |
655 | */ | |
b67bfe0d | 656 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
f520c98e YX |
657 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
658 | &(pos)->member)), typeof(*(pos)), member); \ | |
b67bfe0d | 659 | pos; \ |
f520c98e YX |
660 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
661 | &(pos)->member)), typeof(*(pos)), member)) | |
4f70ecca | 662 | |
97ede29e YX |
663 | /** |
664 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point | |
665 | * @pos: the type * to use as a loop cursor. | |
666 | * @member: the name of the hlist_node within the struct. | |
667 | */ | |
668 | #define hlist_for_each_entry_from_rcu(pos, member) \ | |
669 | for (; pos; \ | |
f517700c YX |
670 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
671 | &(pos)->member)), typeof(*(pos)), member)) | |
5c578aed | 672 | |
82524746 FBH |
673 | #endif /* __KERNEL__ */ |
674 | #endif |