]> git.proxmox.com Git - mirror_frr.git/blame - lib/atomlist.h
Merge pull request #6279 from opensourcerouting/nb-cb-args
[mirror_frr.git] / lib / atomlist.h
CommitLineData
bcea0c0f
DL
1/*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _FRR_ATOMLIST_H
18#define _FRR_ATOMLIST_H
19
20#include "typesafe.h"
21#include "frratomic.h"
22
17e38209
RW
23#ifdef __cplusplus
24extern "C" {
25#endif
26
bcea0c0f
DL
27/* pointer with lock/deleted/invalid bit in lowest bit
28 *
29 * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
30 * item is being deleted". it is permissible to assume the item will indeed
31 * be deleted (as there are no replace/etc. ops in this).
32 *
33 * in general, lowest 2/3 bits on 32/64bit architectures are available for
34 * uses like this; the only thing that will really break this is putting an
35 * atomlist_item in a struct with "packed" attribute. (it'll break
36 * immediately and consistently.) -- don't do that.
37 *
38 * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
39 * implementations.)
40 */
5d6299d7
DL
41
42/* atomic_atomptr_t may look a bit odd, it's for the sake of C++ compat */
43typedef uintptr_t atomptr_t;
44typedef atomic_uintptr_t atomic_atomptr_t;
45
bcea0c0f
DL
46#define ATOMPTR_MASK (UINTPTR_MAX - 3)
47#define ATOMPTR_LOCK (1)
48#define ATOMPTR_USER (2)
49#define ATOMPTR_NULL (0)
50
51static inline atomptr_t atomptr_i(void *val)
52{
53 atomptr_t atomval = (atomptr_t)val;
54
55 assert(!(atomval & ATOMPTR_LOCK));
56 return atomval;
57}
58static inline void *atomptr_p(atomptr_t val)
59{
60 return (void *)(val & ATOMPTR_MASK);
61}
62static inline bool atomptr_l(atomptr_t val)
63{
64 return (bool)(val & ATOMPTR_LOCK);
65}
66static inline bool atomptr_u(atomptr_t val)
67{
68 return (bool)(val & ATOMPTR_USER);
69}
70
71
72/* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
73 * they're neither an "acquire" nor a "release" operation; the element that
74 * was found is still on the list and doesn't change ownership. Therefore,
75 * an atomic transition in ownership state can't be implemented.
76 *
77 * Contrast this with add() or pop(): both function calls atomically transfer
78 * ownership of an item to or from the list, which makes them "acquire" /
79 * "release" operations.
80 *
81 * What can be implemented atomically is a "find_pop()", i.e. try to locate an
82 * item and atomically try to remove it if found. It's not currently
83 * implemented but can be added when needed.
84 *
85 * Either way - for find(), generally speaking, if you need to use find() on
86 * a list then the whole thing probably isn't well-suited to atomic
87 * implementation and you'll need to have extra locks around to make it work
88 * correctly.
89 */
90#ifdef WNO_ATOMLIST_UNSAFE_FIND
91# define atomic_find_warn
92#else
93# define atomic_find_warn __attribute__((_DEPRECATED( \
94 "WARNING: find() on atomic lists cannot be atomic by principle; " \
95 "check code to make sure usage pattern is OK and if it is, use " \
96 "#define WNO_ATOMLIST_UNSAFE_FIND")))
97#endif
98
99
100/* single-linked list, unsorted/arbitrary.
101 * can be used as queue with add_tail / pop
102 *
103 * all operations are lock-free, but not neccessarily wait-free. this means
104 * that there is no state where the system as a whole stops making process,
105 * but it *is* possible that a *particular* thread is delayed by some time.
106 *
107 * the only way for this to happen is for other threads to continuously make
108 * updates. an inactive / blocked / deadlocked other thread cannot cause such
109 * delays, and to cause such delays a thread must be heavily hitting the list -
110 * it's a rather theoretical concern.
111 */
112
113/* don't use these structs directly */
114struct atomlist_item {
5d6299d7 115 atomic_uintptr_t next;
bcea0c0f
DL
116};
117#define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
118
119struct atomlist_head {
5d6299d7
DL
120 atomic_uintptr_t first, last;
121 atomic_size_t count;
bcea0c0f
DL
122};
123
124/* use as:
125 *
126 * PREDECL_ATOMLIST(namelist)
127 * struct name {
128 * struct namelist_item nlitem;
129 * }
130 * DECLARE_ATOMLIST(namelist, struct name, nlitem)
131 */
132#define PREDECL_ATOMLIST(prefix) \
133struct prefix ## _head { struct atomlist_head ah; }; \
134struct prefix ## _item { struct atomlist_item ai; };
135
136#define INIT_ATOMLIST(var) { }
137
138#define DECLARE_ATOMLIST(prefix, type, field) \
139macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
140{ atomlist_add_head(&h->ah, &item->field.ai); } \
141macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
142{ atomlist_add_tail(&h->ah, &item->field.ai); } \
143macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
5d6299d7 144 atomic_atomptr_t *hint) \
bcea0c0f 145{ atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
da098d78
SW
146macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
147{ atomlist_del_hint(&h->ah, &item->field.ai, NULL); \
148 /* TODO: Return NULL if not found */ \
149 return item; } \
bcea0c0f
DL
150macro_inline type *prefix ## _pop(struct prefix##_head *h) \
151{ char *p = (char *)atomlist_pop(&h->ah); \
152 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
153macro_inline type *prefix ## _first(struct prefix##_head *h) \
154{ char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
155 memory_order_acquire)); \
156 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
157macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
158{ char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
159 memory_order_acquire)); \
160 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
161macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
162{ return item ? prefix##_next(h, item) : NULL; } \
163macro_inline size_t prefix ## _count(struct prefix##_head *h) \
164{ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
9680a2af
DL
165macro_inline void prefix ## _init(struct prefix##_head *h) \
166{ \
167 memset(h, 0, sizeof(*h)); \
168} \
169macro_inline void prefix ## _fini(struct prefix##_head *h) \
170{ \
171 assert(prefix ## _count(h) == 0); \
172 memset(h, 0, sizeof(*h)); \
173} \
bcea0c0f
DL
174/* ... */
175
176/* add_head:
177 * - contention on ->first pointer
178 * - return implies completion
179 */
180void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
181
182/* add_tail:
183 * - concurrent add_tail can cause wait but has progress guarantee
184 * - return does NOT imply completion. completion is only guaranteed after
185 * all other add_tail operations that started before this add_tail have
186 * completed as well.
187 */
188void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
189
190/* del/del_hint:
191 *
192 * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
193 * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
194 *
195 * as with all deletions, threads that started reading earlier may still hold
196 * pointers to the deleted item. completion is however guaranteed for all
197 * reads starting later.
198 */
199void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
5d6299d7 200 atomic_atomptr_t *hint);
bcea0c0f
DL
201
202/* pop:
203 *
204 * as with all deletions, threads that started reading earlier may still hold
205 * pointers to the deleted item. completion is however guaranteed for all
206 * reads starting later.
207 */
208struct atomlist_item *atomlist_pop(struct atomlist_head *h);
209
210
211
212struct atomsort_item {
5d6299d7 213 atomic_atomptr_t next;
bcea0c0f
DL
214};
215#define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
216
217struct atomsort_head {
5d6299d7
DL
218 atomic_atomptr_t first;
219 atomic_size_t count;
bcea0c0f
DL
220};
221
222#define _PREDECL_ATOMSORT(prefix) \
223struct prefix ## _head { struct atomsort_head ah; }; \
224struct prefix ## _item { struct atomsort_item ai; };
225
226#define INIT_ATOMSORT_UNIQ(var) { }
227#define INIT_ATOMSORT_NONUNIQ(var) { }
228
229#define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
230macro_inline void prefix ## _init(struct prefix##_head *h) \
231{ \
232 memset(h, 0, sizeof(*h)); \
233} \
234macro_inline void prefix ## _fini(struct prefix##_head *h) \
235{ \
236 assert(h->ah.count == 0); \
237 memset(h, 0, sizeof(*h)); \
238} \
239macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
240{ \
241 struct atomsort_item *p; \
242 p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
243 return container_of_null(p, type, field.ai); \
244} \
245macro_inline type *prefix ## _first(struct prefix##_head *h) \
246{ \
247 struct atomsort_item *p; \
248 p = atomptr_p(atomic_load_explicit(&h->ah.first, \
249 memory_order_acquire)); \
250 return container_of_null(p, type, field.ai); \
251} \
252macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
253{ \
254 struct atomsort_item *p; \
255 p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
256 memory_order_acquire)); \
257 return container_of_null(p, type, field.ai); \
258} \
259macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
260{ \
261 return item ? prefix##_next(h, item) : NULL; \
262} \
263atomic_find_warn \
264macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
265 const type *item) \
266{ \
267 type *p = prefix ## _first(h); \
268 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
269 p = prefix ## _next(h, p); \
270 return p; \
271} \
272atomic_find_warn \
273macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
274 const type *item) \
275{ \
276 type *p = prefix ## _first(h), *prev = NULL; \
277 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
278 p = prefix ## _next(h, (prev = p)); \
279 return prev; \
280} \
281macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
5d6299d7 282 atomic_atomptr_t *hint) \
bcea0c0f
DL
283{ \
284 atomsort_del_hint(&h->ah, &item->field.ai, hint); \
285} \
da098d78 286macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
bcea0c0f
DL
287{ \
288 atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
da098d78
SW
289 /* TODO: Return NULL if not found */ \
290 return item; \
bcea0c0f
DL
291} \
292macro_inline size_t prefix ## _count(struct prefix##_head *h) \
293{ \
294 return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
295} \
296macro_inline type *prefix ## _pop(struct prefix##_head *h) \
297{ \
298 struct atomsort_item *p = atomsort_pop(&h->ah); \
299 return p ? container_of(p, type, field.ai) : NULL; \
300} \
301/* ... */
302
303#define PREDECL_ATOMSORT_UNIQ(prefix) \
304 _PREDECL_ATOMSORT(prefix)
305#define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
306 \
307macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
308 const struct atomsort_item *b) \
309{ \
310 return cmpfn(container_of(a, type, field.ai), \
311 container_of(b, type, field.ai)); \
312} \
313 \
314_DECLARE_ATOMSORT(prefix, type, field, \
315 prefix ## __cmp, prefix ## __cmp) \
316 \
317atomic_find_warn \
318macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
319{ \
320 type *p = prefix ## _first(h); \
321 int cmpval = 0; \
322 while (p && (cmpval = cmpfn(p, item)) < 0) \
323 p = prefix ## _next(h, p); \
324 if (!p || cmpval > 0) \
325 return NULL; \
326 return p; \
327} \
328/* ... */
329
330#define PREDECL_ATOMSORT_NONUNIQ(prefix) \
331 _PREDECL_ATOMSORT(prefix)
332#define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
333 \
334macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
335 const struct atomsort_item *b) \
336{ \
337 return cmpfn(container_of(a, type, field.ai), \
338 container_of(b, type, field.ai)); \
339} \
340macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
341 const struct atomsort_item *b) \
342{ \
343 int cmpval = cmpfn(container_of(a, type, field.ai), \
344 container_of(b, type, field.ai)); \
345 if (cmpval) \
346 return cmpval; \
347 if (a < b) \
348 return -1; \
349 if (a > b) \
350 return 1; \
351 return 0; \
352} \
353 \
354_DECLARE_ATOMSORT(prefix, type, field, \
355 prefix ## __cmp, prefix ## __cmp_uq) \
356/* ... */
357
358struct atomsort_item *atomsort_add(struct atomsort_head *h,
359 struct atomsort_item *item, int (*cmpfn)(
360 const struct atomsort_item *,
361 const struct atomsort_item *));
362
363void atomsort_del_hint(struct atomsort_head *h,
5d6299d7 364 struct atomsort_item *item, atomic_atomptr_t *hint);
bcea0c0f
DL
365
366struct atomsort_item *atomsort_pop(struct atomsort_head *h);
367
17e38209
RW
368#ifdef __cplusplus
369}
370#endif
371
bcea0c0f 372#endif /* _FRR_ATOMLIST_H */