]> git.proxmox.com Git - mirror_frr.git/blob - lib/atomlist.c
Merge pull request #11023 from AbhishekNR/igmp_flag
[mirror_frr.git] / lib / atomlist.c
1 /*
2 * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include <assert.h>
22
23 #include "atomlist.h"
24
25 void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item)
26 {
27 atomptr_t prevval;
28 atomptr_t i = atomptr_i(item);
29
30 atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
31
32 /* updating ->last is possible here, but makes the code considerably
33 * more complicated... let's not.
34 */
35 prevval = ATOMPTR_NULL;
36 item->next = ATOMPTR_NULL;
37
38 /* head-insert atomically
39 * release barrier: item + item->next writes must be completed
40 */
41 while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i,
42 memory_order_release, memory_order_relaxed))
43 atomic_store_explicit(&item->next, prevval,
44 memory_order_relaxed);
45 }
46
47 void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item)
48 {
49 atomptr_t prevval = ATOMPTR_NULL;
50 atomptr_t i = atomptr_i(item);
51 atomptr_t hint;
52 struct atomlist_item *prevptr;
53 _Atomic atomptr_t *prev;
54
55 item->next = ATOMPTR_NULL;
56
57 atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
58
59 /* place new item into ->last
60 * release: item writes completed; acquire: DD barrier on hint
61 */
62 hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel);
63
64 while (1) {
65 if (atomptr_p(hint) == NULL)
66 prev = &h->first;
67 else
68 prev = &atomlist_itemp(hint)->next;
69
70 do {
71 prevval = atomic_load_explicit(prev,
72 memory_order_consume);
73 prevptr = atomlist_itemp(prevval);
74 if (prevptr == NULL)
75 break;
76
77 prev = &prevptr->next;
78 } while (prevptr);
79
80 /* last item is being deleted - start over */
81 if (atomptr_l(prevval)) {
82 hint = ATOMPTR_NULL;
83 continue;
84 }
85
86 /* no barrier - item->next is NULL and was so in xchg above */
87 if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i,
88 memory_order_consume,
89 memory_order_consume)) {
90 hint = prevval;
91 continue;
92 }
93 break;
94 }
95 }
96
97 static void atomlist_del_core(struct atomlist_head *h,
98 struct atomlist_item *item,
99 _Atomic atomptr_t *hint,
100 atomptr_t next)
101 {
102 _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
103 atomptr_t prevval, updval;
104 struct atomlist_item *prevptr;
105
106 /* drop us off "last" if needed. no r/w to barrier. */
107 prevval = atomptr_i(item);
108 atomic_compare_exchange_strong_explicit(&h->last, &prevval,
109 ATOMPTR_NULL,
110 memory_order_relaxed, memory_order_relaxed);
111
112 atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
113
114 /* the following code should be identical (except sort<>list) to
115 * atomsort_del_hint()
116 */
117 while (1) {
118 upd = NULL;
119 updval = ATOMPTR_LOCK;
120
121 do {
122 prevval = atomic_load_explicit(prev,
123 memory_order_consume);
124
125 /* track the beginning of a chain of deleted items
126 * this is necessary to make this lock-free; we can
127 * complete deletions started by other threads.
128 */
129 if (!atomptr_l(prevval)) {
130 updval = prevval;
131 upd = prev;
132 }
133
134 prevptr = atomlist_itemp(prevval);
135 if (prevptr == item)
136 break;
137
138 prev = &prevptr->next;
139 } while (prevptr);
140
141 if (prevptr != item)
142 /* another thread completed our deletion */
143 return;
144
145 if (!upd || atomptr_l(updval)) {
146 /* failed to find non-deleted predecessor...
147 * have to try again
148 */
149 prev = &h->first;
150 continue;
151 }
152
153 if (!atomic_compare_exchange_strong_explicit(upd, &updval,
154 next, memory_order_consume,
155 memory_order_consume)) {
156 /* prev doesn't point to item anymore, something
157 * was inserted. continue at same position forward.
158 */
159 continue;
160 }
161 break;
162 }
163 }
164
165 void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
166 _Atomic atomptr_t *hint)
167 {
168 atomptr_t next;
169
170 /* mark ourselves in-delete - full barrier */
171 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
172 memory_order_acquire);
173 assert(!atomptr_l(next)); /* delete race on same item */
174
175 atomlist_del_core(h, item, hint, next);
176 }
177
178 struct atomlist_item *atomlist_pop(struct atomlist_head *h)
179 {
180 struct atomlist_item *item;
181 atomptr_t next;
182
183 /* grab head of the list - and remember it in replval for the
184 * actual delete below. No matter what, the head of the list is
185 * where we start deleting because either it's our item, or it's
186 * some delete-marked items and then our item.
187 */
188 next = atomic_load_explicit(&h->first, memory_order_consume);
189
190 do {
191 item = atomlist_itemp(next);
192 if (!item)
193 return NULL;
194
195 /* try to mark deletion */
196 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
197 memory_order_acquire);
198
199 } while (atomptr_l(next));
200 /* if loop is taken: delete race on same item (another pop or del)
201 * => proceed to next item
202 * if loop exited here: we have our item selected and marked
203 */
204 atomlist_del_core(h, item, &h->first, next);
205 return item;
206 }
207
208 struct atomsort_item *atomsort_add(struct atomsort_head *h,
209 struct atomsort_item *item, int (*cmpfn)(
210 const struct atomsort_item *,
211 const struct atomsort_item *))
212 {
213 _Atomic atomptr_t *prev;
214 atomptr_t prevval;
215 atomptr_t i = atomptr_i(item);
216 struct atomsort_item *previtem;
217 int cmpval;
218
219 do {
220 prev = &h->first;
221
222 do {
223 prevval = atomic_load_explicit(prev,
224 memory_order_acquire);
225 previtem = atomptr_p(prevval);
226
227 if (!previtem || (cmpval = cmpfn(previtem, item)) > 0)
228 break;
229 if (cmpval == 0)
230 return previtem;
231
232 prev = &previtem->next;
233 } while (1);
234
235 if (atomptr_l(prevval))
236 continue;
237
238 item->next = prevval;
239 if (atomic_compare_exchange_strong_explicit(prev, &prevval, i,
240 memory_order_release, memory_order_relaxed))
241 break;
242 } while (1);
243
244 atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
245 return NULL;
246 }
247
248 static void atomsort_del_core(struct atomsort_head *h,
249 struct atomsort_item *item, _Atomic atomptr_t *hint,
250 atomptr_t next)
251 {
252 _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
253 atomptr_t prevval, updval;
254 struct atomsort_item *prevptr;
255
256 atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
257
258 /* the following code should be identical (except sort<>list) to
259 * atomlist_del_core()
260 */
261 while (1) {
262 upd = NULL;
263 updval = ATOMPTR_LOCK;
264
265 do {
266 prevval = atomic_load_explicit(prev,
267 memory_order_consume);
268
269 /* track the beginning of a chain of deleted items
270 * this is necessary to make this lock-free; we can
271 * complete deletions started by other threads.
272 */
273 if (!atomptr_l(prevval)) {
274 updval = prevval;
275 upd = prev;
276 }
277
278 prevptr = atomsort_itemp(prevval);
279 if (prevptr == item)
280 break;
281
282 prev = &prevptr->next;
283 } while (prevptr);
284
285 if (prevptr != item)
286 /* another thread completed our deletion */
287 return;
288
289 if (!upd || atomptr_l(updval)) {
290 /* failed to find non-deleted predecessor...
291 * have to try again
292 */
293 prev = &h->first;
294 continue;
295 }
296
297 if (!atomic_compare_exchange_strong_explicit(upd, &updval,
298 next, memory_order_relaxed,
299 memory_order_relaxed)) {
300 /* prev doesn't point to item anymore, something
301 * was inserted. continue at same position forward.
302 */
303 continue;
304 }
305 break;
306 }
307 }
308
309 void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item,
310 _Atomic atomptr_t *hint)
311 {
312 atomptr_t next;
313
314 /* mark ourselves in-delete - full barrier */
315 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
316 memory_order_seq_cst);
317 assert(!atomptr_l(next)); /* delete race on same item */
318
319 atomsort_del_core(h, item, hint, next);
320 }
321
322 struct atomsort_item *atomsort_pop(struct atomsort_head *h)
323 {
324 struct atomsort_item *item;
325 atomptr_t next;
326
327 /* grab head of the list - and remember it in replval for the
328 * actual delete below. No matter what, the head of the list is
329 * where we start deleting because either it's our item, or it's
330 * some delete-marked items and then our item.
331 */
332 next = atomic_load_explicit(&h->first, memory_order_consume);
333
334 do {
335 item = atomsort_itemp(next);
336 if (!item)
337 return NULL;
338
339 /* try to mark deletion */
340 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
341 memory_order_acquire);
342
343 } while (atomptr_l(next));
344 /* if loop is taken: delete race on same item (another pop or del)
345 * => proceed to next item
346 * if loop exited here: we have our item selected and marked
347 */
348 atomsort_del_core(h, item, &h->first, next);
349 return item;
350 }