]> git.proxmox.com Git - mirror_frr.git/blame - lib/atomlist.c
Merge pull request #5628 from donaldsharp/rtm_getneigh
[mirror_frr.git] / lib / atomlist.c
CommitLineData
bcea0c0f
DL
1/*
2 * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifdef HAVE_CONFIG_H
18#include "config.h"
19#endif
20
21#include "atomlist.h"
22
23void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item)
24{
25 atomptr_t prevval;
26 atomptr_t i = atomptr_i(item);
27
28 atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
29
30 /* updating ->last is possible here, but makes the code considerably
31 * more complicated... let's not.
32 */
33 prevval = ATOMPTR_NULL;
34 item->next = ATOMPTR_NULL;
35
36 /* head-insert atomically
37 * release barrier: item + item->next writes must be completed
38 */
39 while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i,
40 memory_order_release, memory_order_relaxed))
41 atomic_store_explicit(&item->next, prevval,
42 memory_order_relaxed);
43}
44
45void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item)
46{
47 atomptr_t prevval = ATOMPTR_NULL;
48 atomptr_t i = atomptr_i(item);
49 atomptr_t hint;
50 struct atomlist_item *prevptr;
51 _Atomic atomptr_t *prev;
52
53 item->next = ATOMPTR_NULL;
54
55 atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
56
57 /* place new item into ->last
58 * release: item writes completed; acquire: DD barrier on hint
59 */
60 hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel);
61
62 while (1) {
63 if (atomptr_p(hint) == NULL)
64 prev = &h->first;
65 else
66 prev = &atomlist_itemp(hint)->next;
67
68 do {
69 prevval = atomic_load_explicit(prev,
70 memory_order_consume);
71 prevptr = atomlist_itemp(prevval);
72 if (prevptr == NULL)
73 break;
74
75 prev = &prevptr->next;
76 } while (prevptr);
77
78 /* last item is being deleted - start over */
79 if (atomptr_l(prevval)) {
80 hint = ATOMPTR_NULL;
81 continue;
82 }
83
84 /* no barrier - item->next is NULL and was so in xchg above */
85 if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i,
86 memory_order_consume,
87 memory_order_consume)) {
88 hint = prevval;
89 continue;
90 }
91 break;
92 }
93}
94
95static void atomlist_del_core(struct atomlist_head *h,
96 struct atomlist_item *item,
97 _Atomic atomptr_t *hint,
98 atomptr_t next)
99{
100 _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
101 atomptr_t prevval, updval;
102 struct atomlist_item *prevptr;
103
104 /* drop us off "last" if needed. no r/w to barrier. */
105 prevval = atomptr_i(item);
106 atomic_compare_exchange_strong_explicit(&h->last, &prevval,
107 ATOMPTR_NULL,
108 memory_order_relaxed, memory_order_relaxed);
109
110 atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
111
112 /* the following code should be identical (except sort<>list) to
113 * atomsort_del_hint()
114 */
115 while (1) {
116 upd = NULL;
117 updval = ATOMPTR_LOCK;
118
119 do {
120 prevval = atomic_load_explicit(prev,
121 memory_order_consume);
122
123 /* track the beginning of a chain of deleted items
124 * this is neccessary to make this lock-free; we can
125 * complete deletions started by other threads.
126 */
127 if (!atomptr_l(prevval)) {
128 updval = prevval;
129 upd = prev;
130 }
131
132 prevptr = atomlist_itemp(prevval);
133 if (prevptr == item)
134 break;
135
136 prev = &prevptr->next;
137 } while (prevptr);
138
139 if (prevptr != item)
140 /* another thread completed our deletion */
141 return;
142
143 if (!upd || atomptr_l(updval)) {
144 /* failed to find non-deleted predecessor...
145 * have to try again
146 */
147 prev = &h->first;
148 continue;
149 }
150
151 if (!atomic_compare_exchange_strong_explicit(upd, &updval,
152 next, memory_order_consume,
153 memory_order_consume)) {
154 /* prev doesn't point to item anymore, something
155 * was inserted. continue at same position forward.
156 */
157 continue;
158 }
159 break;
160 }
161}
162
163void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
164 _Atomic atomptr_t *hint)
165{
166 atomptr_t next;
167
168 /* mark ourselves in-delete - full barrier */
169 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
170 memory_order_acquire);
171 assert(!atomptr_l(next)); /* delete race on same item */
172
173 atomlist_del_core(h, item, hint, next);
174}
175
176struct atomlist_item *atomlist_pop(struct atomlist_head *h)
177{
178 struct atomlist_item *item;
179 atomptr_t next;
180
181 /* grab head of the list - and remember it in replval for the
182 * actual delete below. No matter what, the head of the list is
183 * where we start deleting because either it's our item, or it's
184 * some delete-marked items and then our item.
185 */
186 next = atomic_load_explicit(&h->first, memory_order_consume);
187
188 do {
189 item = atomlist_itemp(next);
190 if (!item)
191 return NULL;
192
193 /* try to mark deletion */
194 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
195 memory_order_acquire);
196
197 } while (atomptr_l(next));
198 /* if loop is taken: delete race on same item (another pop or del)
199 * => proceed to next item
200 * if loop exited here: we have our item selected and marked
201 */
202 atomlist_del_core(h, item, &h->first, next);
203 return item;
204}
205
206struct atomsort_item *atomsort_add(struct atomsort_head *h,
207 struct atomsort_item *item, int (*cmpfn)(
208 const struct atomsort_item *,
209 const struct atomsort_item *))
210{
211 _Atomic atomptr_t *prev;
212 atomptr_t prevval;
213 atomptr_t i = atomptr_i(item);
214 struct atomsort_item *previtem;
215 int cmpval;
216
217 do {
218 prev = &h->first;
219
220 do {
221 prevval = atomic_load_explicit(prev,
222 memory_order_acquire);
223 previtem = atomptr_p(prevval);
224
225 if (!previtem || (cmpval = cmpfn(previtem, item)) > 0)
226 break;
227 if (cmpval == 0)
228 return previtem;
229
230 prev = &previtem->next;
231 } while (1);
232
233 if (atomptr_l(prevval))
234 continue;
235
236 item->next = prevval;
237 if (atomic_compare_exchange_strong_explicit(prev, &prevval, i,
238 memory_order_release, memory_order_relaxed))
239 break;
240 } while (1);
241
242 atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
243 return NULL;
244}
245
246static void atomsort_del_core(struct atomsort_head *h,
247 struct atomsort_item *item, _Atomic atomptr_t *hint,
248 atomptr_t next)
249{
250 _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
251 atomptr_t prevval, updval;
252 struct atomsort_item *prevptr;
253
254 atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
255
256 /* the following code should be identical (except sort<>list) to
257 * atomlist_del_core()
258 */
259 while (1) {
260 upd = NULL;
261 updval = ATOMPTR_LOCK;
262
263 do {
264 prevval = atomic_load_explicit(prev,
265 memory_order_consume);
266
267 /* track the beginning of a chain of deleted items
268 * this is neccessary to make this lock-free; we can
269 * complete deletions started by other threads.
270 */
271 if (!atomptr_l(prevval)) {
272 updval = prevval;
273 upd = prev;
274 }
275
276 prevptr = atomsort_itemp(prevval);
277 if (prevptr == item)
278 break;
279
280 prev = &prevptr->next;
281 } while (prevptr);
282
283 if (prevptr != item)
284 /* another thread completed our deletion */
285 return;
286
287 if (!upd || atomptr_l(updval)) {
288 /* failed to find non-deleted predecessor...
289 * have to try again
290 */
291 prev = &h->first;
292 continue;
293 }
294
295 if (!atomic_compare_exchange_strong_explicit(upd, &updval,
296 next, memory_order_relaxed,
297 memory_order_relaxed)) {
298 /* prev doesn't point to item anymore, something
299 * was inserted. continue at same position forward.
300 */
301 continue;
302 }
303 break;
304 }
305}
306
307void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item,
308 _Atomic atomptr_t *hint)
309{
310 atomptr_t next;
311
312 /* mark ourselves in-delete - full barrier */
313 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
314 memory_order_seq_cst);
315 assert(!atomptr_l(next)); /* delete race on same item */
316
317 atomsort_del_core(h, item, hint, next);
318}
319
320struct atomsort_item *atomsort_pop(struct atomsort_head *h)
321{
322 struct atomsort_item *item;
323 atomptr_t next;
324
325 /* grab head of the list - and remember it in replval for the
326 * actual delete below. No matter what, the head of the list is
327 * where we start deleting because either it's our item, or it's
328 * some delete-marked items and then our item.
329 */
330 next = atomic_load_explicit(&h->first, memory_order_consume);
331
332 do {
333 item = atomsort_itemp(next);
334 if (!item)
335 return NULL;
336
337 /* try to mark deletion */
338 next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
339 memory_order_acquire);
340
341 } while (atomptr_l(next));
342 /* if loop is taken: delete race on same item (another pop or del)
343 * => proceed to next item
344 * if loop exited here: we have our item selected and marked
345 */
346 atomsort_del_core(h, item, &h->first, next);
347 return item;
348}