]>
git.proxmox.com Git - mirror_frr.git/blob - lib/atomlist.c
2 * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 void atomlist_add_head(struct atomlist_head
*h
, struct atomlist_item
*item
)
26 atomptr_t i
= atomptr_i(item
);
28 atomic_fetch_add_explicit(&h
->count
, 1, memory_order_relaxed
);
30 /* updating ->last is possible here, but makes the code considerably
31 * more complicated... let's not.
33 prevval
= ATOMPTR_NULL
;
34 item
->next
= ATOMPTR_NULL
;
36 /* head-insert atomically
37 * release barrier: item + item->next writes must be completed
39 while (!atomic_compare_exchange_weak_explicit(&h
->first
, &prevval
, i
,
40 memory_order_release
, memory_order_relaxed
))
41 atomic_store_explicit(&item
->next
, prevval
,
42 memory_order_relaxed
);
45 void atomlist_add_tail(struct atomlist_head
*h
, struct atomlist_item
*item
)
47 atomptr_t prevval
= ATOMPTR_NULL
;
48 atomptr_t i
= atomptr_i(item
);
50 struct atomlist_item
*prevptr
;
51 _Atomic atomptr_t
*prev
;
53 item
->next
= ATOMPTR_NULL
;
55 atomic_fetch_add_explicit(&h
->count
, 1, memory_order_relaxed
);
57 /* place new item into ->last
58 * release: item writes completed; acquire: DD barrier on hint
60 hint
= atomic_exchange_explicit(&h
->last
, i
, memory_order_acq_rel
);
63 if (atomptr_p(hint
) == NULL
)
66 prev
= &atomlist_itemp(hint
)->next
;
69 prevval
= atomic_load_explicit(prev
,
70 memory_order_consume
);
71 prevptr
= atomlist_itemp(prevval
);
75 prev
= &prevptr
->next
;
78 /* last item is being deleted - start over */
79 if (atomptr_l(prevval
)) {
84 /* no barrier - item->next is NULL and was so in xchg above */
85 if (!atomic_compare_exchange_strong_explicit(prev
, &prevval
, i
,
87 memory_order_consume
)) {
95 static void atomlist_del_core(struct atomlist_head
*h
,
96 struct atomlist_item
*item
,
97 _Atomic atomptr_t
*hint
,
100 _Atomic atomptr_t
*prev
= hint
? hint
: &h
->first
, *upd
;
101 atomptr_t prevval
, updval
;
102 struct atomlist_item
*prevptr
;
104 /* drop us off "last" if needed. no r/w to barrier. */
105 prevval
= atomptr_i(item
);
106 atomic_compare_exchange_strong_explicit(&h
->last
, &prevval
,
108 memory_order_relaxed
, memory_order_relaxed
);
110 atomic_fetch_sub_explicit(&h
->count
, 1, memory_order_relaxed
);
112 /* the following code should be identical (except sort<>list) to
113 * atomsort_del_hint()
117 updval
= ATOMPTR_LOCK
;
120 prevval
= atomic_load_explicit(prev
,
121 memory_order_consume
);
123 /* track the beginning of a chain of deleted items
124 * this is neccessary to make this lock-free; we can
125 * complete deletions started by other threads.
127 if (!atomptr_l(prevval
)) {
132 prevptr
= atomlist_itemp(prevval
);
136 prev
= &prevptr
->next
;
140 /* another thread completed our deletion */
143 if (!upd
|| atomptr_l(updval
)) {
144 /* failed to find non-deleted predecessor...
151 if (!atomic_compare_exchange_strong_explicit(upd
, &updval
,
152 next
, memory_order_consume
,
153 memory_order_consume
)) {
154 /* prev doesn't point to item anymore, something
155 * was inserted. continue at same position forward.
163 void atomlist_del_hint(struct atomlist_head
*h
, struct atomlist_item
*item
,
164 _Atomic atomptr_t
*hint
)
168 /* mark ourselves in-delete - full barrier */
169 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
170 memory_order_acquire
);
171 assert(!atomptr_l(next
)); /* delete race on same item */
173 atomlist_del_core(h
, item
, hint
, next
);
176 struct atomlist_item
*atomlist_pop(struct atomlist_head
*h
)
178 struct atomlist_item
*item
;
181 /* grab head of the list - and remember it in replval for the
182 * actual delete below. No matter what, the head of the list is
183 * where we start deleting because either it's our item, or it's
184 * some delete-marked items and then our item.
186 next
= atomic_load_explicit(&h
->first
, memory_order_consume
);
189 item
= atomlist_itemp(next
);
193 /* try to mark deletion */
194 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
195 memory_order_acquire
);
197 } while (atomptr_l(next
));
198 /* if loop is taken: delete race on same item (another pop or del)
199 * => proceed to next item
200 * if loop exited here: we have our item selected and marked
202 atomlist_del_core(h
, item
, &h
->first
, next
);
206 struct atomsort_item
*atomsort_add(struct atomsort_head
*h
,
207 struct atomsort_item
*item
, int (*cmpfn
)(
208 const struct atomsort_item
*,
209 const struct atomsort_item
*))
211 _Atomic atomptr_t
*prev
;
213 atomptr_t i
= atomptr_i(item
);
214 struct atomsort_item
*previtem
;
221 prevval
= atomic_load_explicit(prev
,
222 memory_order_acquire
);
223 previtem
= atomptr_p(prevval
);
225 if (!previtem
|| (cmpval
= cmpfn(previtem
, item
)) > 0)
230 prev
= &previtem
->next
;
233 if (atomptr_l(prevval
))
236 item
->next
= prevval
;
237 if (atomic_compare_exchange_strong_explicit(prev
, &prevval
, i
,
238 memory_order_release
, memory_order_relaxed
))
242 atomic_fetch_add_explicit(&h
->count
, 1, memory_order_relaxed
);
246 static void atomsort_del_core(struct atomsort_head
*h
,
247 struct atomsort_item
*item
, _Atomic atomptr_t
*hint
,
250 _Atomic atomptr_t
*prev
= hint
? hint
: &h
->first
, *upd
;
251 atomptr_t prevval
, updval
;
252 struct atomsort_item
*prevptr
;
254 atomic_fetch_sub_explicit(&h
->count
, 1, memory_order_relaxed
);
256 /* the following code should be identical (except sort<>list) to
257 * atomlist_del_core()
261 updval
= ATOMPTR_LOCK
;
264 prevval
= atomic_load_explicit(prev
,
265 memory_order_consume
);
267 /* track the beginning of a chain of deleted items
268 * this is neccessary to make this lock-free; we can
269 * complete deletions started by other threads.
271 if (!atomptr_l(prevval
)) {
276 prevptr
= atomsort_itemp(prevval
);
280 prev
= &prevptr
->next
;
284 /* another thread completed our deletion */
287 if (!upd
|| atomptr_l(updval
)) {
288 /* failed to find non-deleted predecessor...
295 if (!atomic_compare_exchange_strong_explicit(upd
, &updval
,
296 next
, memory_order_relaxed
,
297 memory_order_relaxed
)) {
298 /* prev doesn't point to item anymore, something
299 * was inserted. continue at same position forward.
307 void atomsort_del_hint(struct atomsort_head
*h
, struct atomsort_item
*item
,
308 _Atomic atomptr_t
*hint
)
312 /* mark ourselves in-delete - full barrier */
313 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
314 memory_order_seq_cst
);
315 assert(!atomptr_l(next
)); /* delete race on same item */
317 atomsort_del_core(h
, item
, hint
, next
);
320 struct atomsort_item
*atomsort_pop(struct atomsort_head
*h
)
322 struct atomsort_item
*item
;
325 /* grab head of the list - and remember it in replval for the
326 * actual delete below. No matter what, the head of the list is
327 * where we start deleting because either it's our item, or it's
328 * some delete-marked items and then our item.
330 next
= atomic_load_explicit(&h
->first
, memory_order_consume
);
333 item
= atomsort_itemp(next
);
337 /* try to mark deletion */
338 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
339 memory_order_acquire
);
341 } while (atomptr_l(next
));
342 /* if loop is taken: delete race on same item (another pop or del)
343 * => proceed to next item
344 * if loop exited here: we have our item selected and marked
346 atomsort_del_core(h
, item
, &h
->first
, next
);