]>
Commit | Line | Data |
---|---|---|
acddc0ed | 1 | // SPDX-License-Identifier: ISC |
bcea0c0f DL |
2 | /* |
3 | * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc. | |
bcea0c0f DL |
4 | */ |
5 | ||
6 | #ifdef HAVE_CONFIG_H | |
7 | #include "config.h" | |
8 | #endif | |
9 | ||
8d4e934b DL |
10 | #include <assert.h> |
11 | ||
bcea0c0f DL |
12 | #include "atomlist.h" |
13 | ||
14 | void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item) | |
15 | { | |
16 | atomptr_t prevval; | |
17 | atomptr_t i = atomptr_i(item); | |
18 | ||
19 | atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed); | |
20 | ||
21 | /* updating ->last is possible here, but makes the code considerably | |
22 | * more complicated... let's not. | |
23 | */ | |
24 | prevval = ATOMPTR_NULL; | |
25 | item->next = ATOMPTR_NULL; | |
26 | ||
27 | /* head-insert atomically | |
28 | * release barrier: item + item->next writes must be completed | |
29 | */ | |
30 | while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i, | |
31 | memory_order_release, memory_order_relaxed)) | |
32 | atomic_store_explicit(&item->next, prevval, | |
33 | memory_order_relaxed); | |
34 | } | |
35 | ||
36 | void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item) | |
37 | { | |
38 | atomptr_t prevval = ATOMPTR_NULL; | |
39 | atomptr_t i = atomptr_i(item); | |
40 | atomptr_t hint; | |
41 | struct atomlist_item *prevptr; | |
42 | _Atomic atomptr_t *prev; | |
43 | ||
44 | item->next = ATOMPTR_NULL; | |
45 | ||
46 | atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed); | |
47 | ||
48 | /* place new item into ->last | |
49 | * release: item writes completed; acquire: DD barrier on hint | |
50 | */ | |
51 | hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel); | |
52 | ||
53 | while (1) { | |
54 | if (atomptr_p(hint) == NULL) | |
55 | prev = &h->first; | |
56 | else | |
57 | prev = &atomlist_itemp(hint)->next; | |
58 | ||
59 | do { | |
60 | prevval = atomic_load_explicit(prev, | |
61 | memory_order_consume); | |
62 | prevptr = atomlist_itemp(prevval); | |
63 | if (prevptr == NULL) | |
64 | break; | |
65 | ||
66 | prev = &prevptr->next; | |
67 | } while (prevptr); | |
68 | ||
69 | /* last item is being deleted - start over */ | |
70 | if (atomptr_l(prevval)) { | |
71 | hint = ATOMPTR_NULL; | |
72 | continue; | |
73 | } | |
74 | ||
75 | /* no barrier - item->next is NULL and was so in xchg above */ | |
76 | if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i, | |
77 | memory_order_consume, | |
78 | memory_order_consume)) { | |
79 | hint = prevval; | |
80 | continue; | |
81 | } | |
82 | break; | |
83 | } | |
84 | } | |
85 | ||
86 | static void atomlist_del_core(struct atomlist_head *h, | |
87 | struct atomlist_item *item, | |
88 | _Atomic atomptr_t *hint, | |
89 | atomptr_t next) | |
90 | { | |
91 | _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd; | |
92 | atomptr_t prevval, updval; | |
93 | struct atomlist_item *prevptr; | |
94 | ||
95 | /* drop us off "last" if needed. no r/w to barrier. */ | |
96 | prevval = atomptr_i(item); | |
97 | atomic_compare_exchange_strong_explicit(&h->last, &prevval, | |
98 | ATOMPTR_NULL, | |
99 | memory_order_relaxed, memory_order_relaxed); | |
100 | ||
101 | atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed); | |
102 | ||
103 | /* the following code should be identical (except sort<>list) to | |
104 | * atomsort_del_hint() | |
105 | */ | |
106 | while (1) { | |
107 | upd = NULL; | |
108 | updval = ATOMPTR_LOCK; | |
109 | ||
110 | do { | |
111 | prevval = atomic_load_explicit(prev, | |
112 | memory_order_consume); | |
113 | ||
114 | /* track the beginning of a chain of deleted items | |
214d8a60 | 115 | * this is necessary to make this lock-free; we can |
bcea0c0f DL |
116 | * complete deletions started by other threads. |
117 | */ | |
118 | if (!atomptr_l(prevval)) { | |
119 | updval = prevval; | |
120 | upd = prev; | |
121 | } | |
122 | ||
123 | prevptr = atomlist_itemp(prevval); | |
124 | if (prevptr == item) | |
125 | break; | |
126 | ||
127 | prev = &prevptr->next; | |
128 | } while (prevptr); | |
129 | ||
130 | if (prevptr != item) | |
131 | /* another thread completed our deletion */ | |
132 | return; | |
133 | ||
134 | if (!upd || atomptr_l(updval)) { | |
135 | /* failed to find non-deleted predecessor... | |
136 | * have to try again | |
137 | */ | |
138 | prev = &h->first; | |
139 | continue; | |
140 | } | |
141 | ||
142 | if (!atomic_compare_exchange_strong_explicit(upd, &updval, | |
143 | next, memory_order_consume, | |
144 | memory_order_consume)) { | |
145 | /* prev doesn't point to item anymore, something | |
146 | * was inserted. continue at same position forward. | |
147 | */ | |
148 | continue; | |
149 | } | |
150 | break; | |
151 | } | |
152 | } | |
153 | ||
154 | void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item, | |
155 | _Atomic atomptr_t *hint) | |
156 | { | |
157 | atomptr_t next; | |
158 | ||
159 | /* mark ourselves in-delete - full barrier */ | |
160 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, | |
161 | memory_order_acquire); | |
162 | assert(!atomptr_l(next)); /* delete race on same item */ | |
163 | ||
164 | atomlist_del_core(h, item, hint, next); | |
165 | } | |
166 | ||
167 | struct atomlist_item *atomlist_pop(struct atomlist_head *h) | |
168 | { | |
169 | struct atomlist_item *item; | |
170 | atomptr_t next; | |
171 | ||
172 | /* grab head of the list - and remember it in replval for the | |
173 | * actual delete below. No matter what, the head of the list is | |
174 | * where we start deleting because either it's our item, or it's | |
175 | * some delete-marked items and then our item. | |
176 | */ | |
177 | next = atomic_load_explicit(&h->first, memory_order_consume); | |
178 | ||
179 | do { | |
180 | item = atomlist_itemp(next); | |
181 | if (!item) | |
182 | return NULL; | |
183 | ||
184 | /* try to mark deletion */ | |
185 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, | |
186 | memory_order_acquire); | |
187 | ||
188 | } while (atomptr_l(next)); | |
189 | /* if loop is taken: delete race on same item (another pop or del) | |
190 | * => proceed to next item | |
191 | * if loop exited here: we have our item selected and marked | |
192 | */ | |
193 | atomlist_del_core(h, item, &h->first, next); | |
194 | return item; | |
195 | } | |
196 | ||
197 | struct atomsort_item *atomsort_add(struct atomsort_head *h, | |
198 | struct atomsort_item *item, int (*cmpfn)( | |
199 | const struct atomsort_item *, | |
200 | const struct atomsort_item *)) | |
201 | { | |
202 | _Atomic atomptr_t *prev; | |
203 | atomptr_t prevval; | |
204 | atomptr_t i = atomptr_i(item); | |
205 | struct atomsort_item *previtem; | |
206 | int cmpval; | |
207 | ||
208 | do { | |
209 | prev = &h->first; | |
210 | ||
211 | do { | |
212 | prevval = atomic_load_explicit(prev, | |
213 | memory_order_acquire); | |
214 | previtem = atomptr_p(prevval); | |
215 | ||
216 | if (!previtem || (cmpval = cmpfn(previtem, item)) > 0) | |
217 | break; | |
218 | if (cmpval == 0) | |
219 | return previtem; | |
220 | ||
221 | prev = &previtem->next; | |
222 | } while (1); | |
223 | ||
224 | if (atomptr_l(prevval)) | |
225 | continue; | |
226 | ||
227 | item->next = prevval; | |
228 | if (atomic_compare_exchange_strong_explicit(prev, &prevval, i, | |
229 | memory_order_release, memory_order_relaxed)) | |
230 | break; | |
231 | } while (1); | |
232 | ||
233 | atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed); | |
234 | return NULL; | |
235 | } | |
236 | ||
237 | static void atomsort_del_core(struct atomsort_head *h, | |
238 | struct atomsort_item *item, _Atomic atomptr_t *hint, | |
239 | atomptr_t next) | |
240 | { | |
241 | _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd; | |
242 | atomptr_t prevval, updval; | |
243 | struct atomsort_item *prevptr; | |
244 | ||
245 | atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed); | |
246 | ||
247 | /* the following code should be identical (except sort<>list) to | |
248 | * atomlist_del_core() | |
249 | */ | |
250 | while (1) { | |
251 | upd = NULL; | |
252 | updval = ATOMPTR_LOCK; | |
253 | ||
254 | do { | |
255 | prevval = atomic_load_explicit(prev, | |
256 | memory_order_consume); | |
257 | ||
258 | /* track the beginning of a chain of deleted items | |
485ac9a7 | 259 | * this is necessary to make this lock-free; we can |
bcea0c0f DL |
260 | * complete deletions started by other threads. |
261 | */ | |
262 | if (!atomptr_l(prevval)) { | |
263 | updval = prevval; | |
264 | upd = prev; | |
265 | } | |
266 | ||
267 | prevptr = atomsort_itemp(prevval); | |
268 | if (prevptr == item) | |
269 | break; | |
270 | ||
271 | prev = &prevptr->next; | |
272 | } while (prevptr); | |
273 | ||
274 | if (prevptr != item) | |
275 | /* another thread completed our deletion */ | |
276 | return; | |
277 | ||
278 | if (!upd || atomptr_l(updval)) { | |
279 | /* failed to find non-deleted predecessor... | |
280 | * have to try again | |
281 | */ | |
282 | prev = &h->first; | |
283 | continue; | |
284 | } | |
285 | ||
286 | if (!atomic_compare_exchange_strong_explicit(upd, &updval, | |
287 | next, memory_order_relaxed, | |
288 | memory_order_relaxed)) { | |
289 | /* prev doesn't point to item anymore, something | |
290 | * was inserted. continue at same position forward. | |
291 | */ | |
292 | continue; | |
293 | } | |
294 | break; | |
295 | } | |
296 | } | |
297 | ||
298 | void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item, | |
299 | _Atomic atomptr_t *hint) | |
300 | { | |
301 | atomptr_t next; | |
302 | ||
303 | /* mark ourselves in-delete - full barrier */ | |
304 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, | |
305 | memory_order_seq_cst); | |
306 | assert(!atomptr_l(next)); /* delete race on same item */ | |
307 | ||
308 | atomsort_del_core(h, item, hint, next); | |
309 | } | |
310 | ||
311 | struct atomsort_item *atomsort_pop(struct atomsort_head *h) | |
312 | { | |
313 | struct atomsort_item *item; | |
314 | atomptr_t next; | |
315 | ||
316 | /* grab head of the list - and remember it in replval for the | |
317 | * actual delete below. No matter what, the head of the list is | |
318 | * where we start deleting because either it's our item, or it's | |
319 | * some delete-marked items and then our item. | |
320 | */ | |
321 | next = atomic_load_explicit(&h->first, memory_order_consume); | |
322 | ||
323 | do { | |
324 | item = atomsort_itemp(next); | |
325 | if (!item) | |
326 | return NULL; | |
327 | ||
328 | /* try to mark deletion */ | |
329 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, | |
330 | memory_order_acquire); | |
331 | ||
332 | } while (atomptr_l(next)); | |
333 | /* if loop is taken: delete race on same item (another pop or del) | |
334 | * => proceed to next item | |
335 | * if loop exited here: we have our item selected and marked | |
336 | */ | |
337 | atomsort_del_core(h, item, &h->first, next); | |
338 | return item; | |
339 | } |