]>
git.proxmox.com Git - mirror_frr.git/blob - lib/atomlist.c
2 * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 void atomlist_add_head(struct atomlist_head
*h
, struct atomlist_item
*item
)
28 atomptr_t i
= atomptr_i(item
);
30 atomic_fetch_add_explicit(&h
->count
, 1, memory_order_relaxed
);
32 /* updating ->last is possible here, but makes the code considerably
33 * more complicated... let's not.
35 prevval
= ATOMPTR_NULL
;
36 item
->next
= ATOMPTR_NULL
;
38 /* head-insert atomically
39 * release barrier: item + item->next writes must be completed
41 while (!atomic_compare_exchange_weak_explicit(&h
->first
, &prevval
, i
,
42 memory_order_release
, memory_order_relaxed
))
43 atomic_store_explicit(&item
->next
, prevval
,
44 memory_order_relaxed
);
47 void atomlist_add_tail(struct atomlist_head
*h
, struct atomlist_item
*item
)
49 atomptr_t prevval
= ATOMPTR_NULL
;
50 atomptr_t i
= atomptr_i(item
);
52 struct atomlist_item
*prevptr
;
53 _Atomic atomptr_t
*prev
;
55 item
->next
= ATOMPTR_NULL
;
57 atomic_fetch_add_explicit(&h
->count
, 1, memory_order_relaxed
);
59 /* place new item into ->last
60 * release: item writes completed; acquire: DD barrier on hint
62 hint
= atomic_exchange_explicit(&h
->last
, i
, memory_order_acq_rel
);
65 if (atomptr_p(hint
) == NULL
)
68 prev
= &atomlist_itemp(hint
)->next
;
71 prevval
= atomic_load_explicit(prev
,
72 memory_order_consume
);
73 prevptr
= atomlist_itemp(prevval
);
77 prev
= &prevptr
->next
;
80 /* last item is being deleted - start over */
81 if (atomptr_l(prevval
)) {
86 /* no barrier - item->next is NULL and was so in xchg above */
87 if (!atomic_compare_exchange_strong_explicit(prev
, &prevval
, i
,
89 memory_order_consume
)) {
97 static void atomlist_del_core(struct atomlist_head
*h
,
98 struct atomlist_item
*item
,
99 _Atomic atomptr_t
*hint
,
102 _Atomic atomptr_t
*prev
= hint
? hint
: &h
->first
, *upd
;
103 atomptr_t prevval
, updval
;
104 struct atomlist_item
*prevptr
;
106 /* drop us off "last" if needed. no r/w to barrier. */
107 prevval
= atomptr_i(item
);
108 atomic_compare_exchange_strong_explicit(&h
->last
, &prevval
,
110 memory_order_relaxed
, memory_order_relaxed
);
112 atomic_fetch_sub_explicit(&h
->count
, 1, memory_order_relaxed
);
114 /* the following code should be identical (except sort<>list) to
115 * atomsort_del_hint()
119 updval
= ATOMPTR_LOCK
;
122 prevval
= atomic_load_explicit(prev
,
123 memory_order_consume
);
125 /* track the beginning of a chain of deleted items
126 * this is necessary to make this lock-free; we can
127 * complete deletions started by other threads.
129 if (!atomptr_l(prevval
)) {
134 prevptr
= atomlist_itemp(prevval
);
138 prev
= &prevptr
->next
;
142 /* another thread completed our deletion */
145 if (!upd
|| atomptr_l(updval
)) {
146 /* failed to find non-deleted predecessor...
153 if (!atomic_compare_exchange_strong_explicit(upd
, &updval
,
154 next
, memory_order_consume
,
155 memory_order_consume
)) {
156 /* prev doesn't point to item anymore, something
157 * was inserted. continue at same position forward.
165 void atomlist_del_hint(struct atomlist_head
*h
, struct atomlist_item
*item
,
166 _Atomic atomptr_t
*hint
)
170 /* mark ourselves in-delete - full barrier */
171 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
172 memory_order_acquire
);
173 assert(!atomptr_l(next
)); /* delete race on same item */
175 atomlist_del_core(h
, item
, hint
, next
);
178 struct atomlist_item
*atomlist_pop(struct atomlist_head
*h
)
180 struct atomlist_item
*item
;
183 /* grab head of the list - and remember it in replval for the
184 * actual delete below. No matter what, the head of the list is
185 * where we start deleting because either it's our item, or it's
186 * some delete-marked items and then our item.
188 next
= atomic_load_explicit(&h
->first
, memory_order_consume
);
191 item
= atomlist_itemp(next
);
195 /* try to mark deletion */
196 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
197 memory_order_acquire
);
199 } while (atomptr_l(next
));
200 /* if loop is taken: delete race on same item (another pop or del)
201 * => proceed to next item
202 * if loop exited here: we have our item selected and marked
204 atomlist_del_core(h
, item
, &h
->first
, next
);
208 struct atomsort_item
*atomsort_add(struct atomsort_head
*h
,
209 struct atomsort_item
*item
, int (*cmpfn
)(
210 const struct atomsort_item
*,
211 const struct atomsort_item
*))
213 _Atomic atomptr_t
*prev
;
215 atomptr_t i
= atomptr_i(item
);
216 struct atomsort_item
*previtem
;
223 prevval
= atomic_load_explicit(prev
,
224 memory_order_acquire
);
225 previtem
= atomptr_p(prevval
);
227 if (!previtem
|| (cmpval
= cmpfn(previtem
, item
)) > 0)
232 prev
= &previtem
->next
;
235 if (atomptr_l(prevval
))
238 item
->next
= prevval
;
239 if (atomic_compare_exchange_strong_explicit(prev
, &prevval
, i
,
240 memory_order_release
, memory_order_relaxed
))
244 atomic_fetch_add_explicit(&h
->count
, 1, memory_order_relaxed
);
248 static void atomsort_del_core(struct atomsort_head
*h
,
249 struct atomsort_item
*item
, _Atomic atomptr_t
*hint
,
252 _Atomic atomptr_t
*prev
= hint
? hint
: &h
->first
, *upd
;
253 atomptr_t prevval
, updval
;
254 struct atomsort_item
*prevptr
;
256 atomic_fetch_sub_explicit(&h
->count
, 1, memory_order_relaxed
);
258 /* the following code should be identical (except sort<>list) to
259 * atomlist_del_core()
263 updval
= ATOMPTR_LOCK
;
266 prevval
= atomic_load_explicit(prev
,
267 memory_order_consume
);
269 /* track the beginning of a chain of deleted items
270 * this is necessary to make this lock-free; we can
271 * complete deletions started by other threads.
273 if (!atomptr_l(prevval
)) {
278 prevptr
= atomsort_itemp(prevval
);
282 prev
= &prevptr
->next
;
286 /* another thread completed our deletion */
289 if (!upd
|| atomptr_l(updval
)) {
290 /* failed to find non-deleted predecessor...
297 if (!atomic_compare_exchange_strong_explicit(upd
, &updval
,
298 next
, memory_order_relaxed
,
299 memory_order_relaxed
)) {
300 /* prev doesn't point to item anymore, something
301 * was inserted. continue at same position forward.
309 void atomsort_del_hint(struct atomsort_head
*h
, struct atomsort_item
*item
,
310 _Atomic atomptr_t
*hint
)
314 /* mark ourselves in-delete - full barrier */
315 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
316 memory_order_seq_cst
);
317 assert(!atomptr_l(next
)); /* delete race on same item */
319 atomsort_del_core(h
, item
, hint
, next
);
322 struct atomsort_item
*atomsort_pop(struct atomsort_head
*h
)
324 struct atomsort_item
*item
;
327 /* grab head of the list - and remember it in replval for the
328 * actual delete below. No matter what, the head of the list is
329 * where we start deleting because either it's our item, or it's
330 * some delete-marked items and then our item.
332 next
= atomic_load_explicit(&h
->first
, memory_order_consume
);
335 item
= atomsort_itemp(next
);
339 /* try to mark deletion */
340 next
= atomic_fetch_or_explicit(&item
->next
, ATOMPTR_LOCK
,
341 memory_order_acquire
);
343 } while (atomptr_l(next
));
344 /* if loop is taken: delete race on same item (another pop or del)
345 * => proceed to next item
346 * if loop exited here: we have our item selected and marked
348 atomsort_del_core(h
, item
, &h
->first
, next
);