]> git.proxmox.com Git - mirror_frr.git/blame - lib/atomlist.h
Merge pull request #13108 from opensourcerouting/clippy-string-warn
[mirror_frr.git] / lib / atomlist.h
CommitLineData
acddc0ed 1// SPDX-License-Identifier: ISC
bcea0c0f
DL
2/*
3 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
bcea0c0f
DL
4 */
5
6#ifndef _FRR_ATOMLIST_H
7#define _FRR_ATOMLIST_H
8
9#include "typesafe.h"
10#include "frratomic.h"
11
17e38209
RW
12#ifdef __cplusplus
13extern "C" {
14#endif
15
bcea0c0f
DL
16/* pointer with lock/deleted/invalid bit in lowest bit
17 *
18 * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
19 * item is being deleted". it is permissible to assume the item will indeed
20 * be deleted (as there are no replace/etc. ops in this).
21 *
22 * in general, lowest 2/3 bits on 32/64bit architectures are available for
23 * uses like this; the only thing that will really break this is putting an
24 * atomlist_item in a struct with "packed" attribute. (it'll break
25 * immediately and consistently.) -- don't do that.
26 *
27 * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
28 * implementations.)
29 */
5d6299d7
DL
30
31/* atomic_atomptr_t may look a bit odd, it's for the sake of C++ compat */
32typedef uintptr_t atomptr_t;
33typedef atomic_uintptr_t atomic_atomptr_t;
34
bcea0c0f
DL
35#define ATOMPTR_MASK (UINTPTR_MAX - 3)
36#define ATOMPTR_LOCK (1)
37#define ATOMPTR_USER (2)
38#define ATOMPTR_NULL (0)
39
40static inline atomptr_t atomptr_i(void *val)
41{
42 atomptr_t atomval = (atomptr_t)val;
43
44 assert(!(atomval & ATOMPTR_LOCK));
45 return atomval;
46}
47static inline void *atomptr_p(atomptr_t val)
48{
49 return (void *)(val & ATOMPTR_MASK);
50}
51static inline bool atomptr_l(atomptr_t val)
52{
53 return (bool)(val & ATOMPTR_LOCK);
54}
55static inline bool atomptr_u(atomptr_t val)
56{
57 return (bool)(val & ATOMPTR_USER);
58}
59
60
61/* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
62 * they're neither an "acquire" nor a "release" operation; the element that
63 * was found is still on the list and doesn't change ownership. Therefore,
64 * an atomic transition in ownership state can't be implemented.
65 *
66 * Contrast this with add() or pop(): both function calls atomically transfer
67 * ownership of an item to or from the list, which makes them "acquire" /
68 * "release" operations.
69 *
70 * What can be implemented atomically is a "find_pop()", i.e. try to locate an
71 * item and atomically try to remove it if found. It's not currently
72 * implemented but can be added when needed.
73 *
74 * Either way - for find(), generally speaking, if you need to use find() on
75 * a list then the whole thing probably isn't well-suited to atomic
76 * implementation and you'll need to have extra locks around to make it work
77 * correctly.
78 */
79#ifdef WNO_ATOMLIST_UNSAFE_FIND
80# define atomic_find_warn
81#else
82# define atomic_find_warn __attribute__((_DEPRECATED( \
83 "WARNING: find() on atomic lists cannot be atomic by principle; " \
84 "check code to make sure usage pattern is OK and if it is, use " \
85 "#define WNO_ATOMLIST_UNSAFE_FIND")))
86#endif
87
88
89/* single-linked list, unsorted/arbitrary.
90 * can be used as queue with add_tail / pop
91 *
214d8a60 92 * all operations are lock-free, but not necessarily wait-free. this means
bcea0c0f
DL
93 * that there is no state where the system as a whole stops making process,
94 * but it *is* possible that a *particular* thread is delayed by some time.
95 *
96 * the only way for this to happen is for other threads to continuously make
97 * updates. an inactive / blocked / deadlocked other thread cannot cause such
98 * delays, and to cause such delays a thread must be heavily hitting the list -
99 * it's a rather theoretical concern.
100 */
101
102/* don't use these structs directly */
103struct atomlist_item {
5d6299d7 104 atomic_uintptr_t next;
bcea0c0f
DL
105};
106#define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
107
108struct atomlist_head {
5d6299d7
DL
109 atomic_uintptr_t first, last;
110 atomic_size_t count;
bcea0c0f
DL
111};
112
113/* use as:
114 *
960b9a53 115 * PREDECL_ATOMLIST(namelist);
bcea0c0f
DL
116 * struct name {
117 * struct namelist_item nlitem;
118 * }
960b9a53 119 * DECLARE_ATOMLIST(namelist, struct name, nlitem);
bcea0c0f
DL
120 */
121#define PREDECL_ATOMLIST(prefix) \
122struct prefix ## _head { struct atomlist_head ah; }; \
960b9a53
DL
123struct prefix ## _item { struct atomlist_item ai; }; \
124MACRO_REQUIRE_SEMICOLON() /* end */
bcea0c0f
DL
125
126#define INIT_ATOMLIST(var) { }
127
128#define DECLARE_ATOMLIST(prefix, type, field) \
129macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
130{ atomlist_add_head(&h->ah, &item->field.ai); } \
131macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
132{ atomlist_add_tail(&h->ah, &item->field.ai); } \
133macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
5d6299d7 134 atomic_atomptr_t *hint) \
bcea0c0f 135{ atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
da098d78
SW
136macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
137{ atomlist_del_hint(&h->ah, &item->field.ai, NULL); \
138 /* TODO: Return NULL if not found */ \
139 return item; } \
bcea0c0f
DL
140macro_inline type *prefix ## _pop(struct prefix##_head *h) \
141{ char *p = (char *)atomlist_pop(&h->ah); \
142 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
143macro_inline type *prefix ## _first(struct prefix##_head *h) \
144{ char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
145 memory_order_acquire)); \
146 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
147macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
148{ char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
149 memory_order_acquire)); \
150 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
151macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
152{ return item ? prefix##_next(h, item) : NULL; } \
153macro_inline size_t prefix ## _count(struct prefix##_head *h) \
154{ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
9680a2af
DL
155macro_inline void prefix ## _init(struct prefix##_head *h) \
156{ \
157 memset(h, 0, sizeof(*h)); \
158} \
159macro_inline void prefix ## _fini(struct prefix##_head *h) \
160{ \
161 assert(prefix ## _count(h) == 0); \
162 memset(h, 0, sizeof(*h)); \
163} \
960b9a53 164MACRO_REQUIRE_SEMICOLON() /* end */
bcea0c0f
DL
165
166/* add_head:
167 * - contention on ->first pointer
168 * - return implies completion
169 */
170void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
171
172/* add_tail:
173 * - concurrent add_tail can cause wait but has progress guarantee
174 * - return does NOT imply completion. completion is only guaranteed after
175 * all other add_tail operations that started before this add_tail have
176 * completed as well.
177 */
178void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
179
180/* del/del_hint:
181 *
182 * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
183 * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
184 *
185 * as with all deletions, threads that started reading earlier may still hold
186 * pointers to the deleted item. completion is however guaranteed for all
187 * reads starting later.
188 */
189void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
5d6299d7 190 atomic_atomptr_t *hint);
bcea0c0f
DL
191
192/* pop:
193 *
194 * as with all deletions, threads that started reading earlier may still hold
195 * pointers to the deleted item. completion is however guaranteed for all
196 * reads starting later.
197 */
198struct atomlist_item *atomlist_pop(struct atomlist_head *h);
199
200
201
202struct atomsort_item {
5d6299d7 203 atomic_atomptr_t next;
bcea0c0f
DL
204};
205#define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
206
207struct atomsort_head {
5d6299d7
DL
208 atomic_atomptr_t first;
209 atomic_size_t count;
bcea0c0f
DL
210};
211
212#define _PREDECL_ATOMSORT(prefix) \
213struct prefix ## _head { struct atomsort_head ah; }; \
960b9a53
DL
214struct prefix ## _item { struct atomsort_item ai; }; \
215MACRO_REQUIRE_SEMICOLON() /* end */
bcea0c0f
DL
216
217#define INIT_ATOMSORT_UNIQ(var) { }
218#define INIT_ATOMSORT_NONUNIQ(var) { }
219
220#define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
221macro_inline void prefix ## _init(struct prefix##_head *h) \
222{ \
223 memset(h, 0, sizeof(*h)); \
224} \
225macro_inline void prefix ## _fini(struct prefix##_head *h) \
226{ \
227 assert(h->ah.count == 0); \
228 memset(h, 0, sizeof(*h)); \
229} \
230macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
231{ \
232 struct atomsort_item *p; \
233 p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
234 return container_of_null(p, type, field.ai); \
235} \
236macro_inline type *prefix ## _first(struct prefix##_head *h) \
237{ \
238 struct atomsort_item *p; \
239 p = atomptr_p(atomic_load_explicit(&h->ah.first, \
240 memory_order_acquire)); \
241 return container_of_null(p, type, field.ai); \
242} \
243macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
244{ \
245 struct atomsort_item *p; \
246 p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
247 memory_order_acquire)); \
248 return container_of_null(p, type, field.ai); \
249} \
250macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
251{ \
252 return item ? prefix##_next(h, item) : NULL; \
253} \
254atomic_find_warn \
255macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
256 const type *item) \
257{ \
258 type *p = prefix ## _first(h); \
259 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
260 p = prefix ## _next(h, p); \
261 return p; \
262} \
263atomic_find_warn \
264macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
265 const type *item) \
266{ \
267 type *p = prefix ## _first(h), *prev = NULL; \
268 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
269 p = prefix ## _next(h, (prev = p)); \
270 return prev; \
271} \
272macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
5d6299d7 273 atomic_atomptr_t *hint) \
bcea0c0f
DL
274{ \
275 atomsort_del_hint(&h->ah, &item->field.ai, hint); \
276} \
da098d78 277macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
bcea0c0f
DL
278{ \
279 atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
da098d78
SW
280 /* TODO: Return NULL if not found */ \
281 return item; \
bcea0c0f
DL
282} \
283macro_inline size_t prefix ## _count(struct prefix##_head *h) \
284{ \
285 return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
286} \
287macro_inline type *prefix ## _pop(struct prefix##_head *h) \
288{ \
289 struct atomsort_item *p = atomsort_pop(&h->ah); \
290 return p ? container_of(p, type, field.ai) : NULL; \
291} \
960b9a53 292MACRO_REQUIRE_SEMICOLON() /* end */
bcea0c0f
DL
293
294#define PREDECL_ATOMSORT_UNIQ(prefix) \
295 _PREDECL_ATOMSORT(prefix)
296#define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
297 \
298macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
299 const struct atomsort_item *b) \
300{ \
301 return cmpfn(container_of(a, type, field.ai), \
302 container_of(b, type, field.ai)); \
303} \
304 \
305_DECLARE_ATOMSORT(prefix, type, field, \
960b9a53 306 prefix ## __cmp, prefix ## __cmp); \
bcea0c0f
DL
307 \
308atomic_find_warn \
309macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
310{ \
311 type *p = prefix ## _first(h); \
312 int cmpval = 0; \
313 while (p && (cmpval = cmpfn(p, item)) < 0) \
314 p = prefix ## _next(h, p); \
315 if (!p || cmpval > 0) \
316 return NULL; \
317 return p; \
318} \
960b9a53 319MACRO_REQUIRE_SEMICOLON() /* end */
bcea0c0f
DL
320
321#define PREDECL_ATOMSORT_NONUNIQ(prefix) \
322 _PREDECL_ATOMSORT(prefix)
323#define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
324 \
325macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
326 const struct atomsort_item *b) \
327{ \
328 return cmpfn(container_of(a, type, field.ai), \
329 container_of(b, type, field.ai)); \
330} \
331macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
332 const struct atomsort_item *b) \
333{ \
334 int cmpval = cmpfn(container_of(a, type, field.ai), \
335 container_of(b, type, field.ai)); \
336 if (cmpval) \
337 return cmpval; \
338 if (a < b) \
339 return -1; \
340 if (a > b) \
341 return 1; \
342 return 0; \
343} \
344 \
345_DECLARE_ATOMSORT(prefix, type, field, \
960b9a53
DL
346 prefix ## __cmp, prefix ## __cmp_uq); \
347MACRO_REQUIRE_SEMICOLON() /* end */
bcea0c0f
DL
348
349struct atomsort_item *atomsort_add(struct atomsort_head *h,
350 struct atomsort_item *item, int (*cmpfn)(
351 const struct atomsort_item *,
352 const struct atomsort_item *));
353
354void atomsort_del_hint(struct atomsort_head *h,
5d6299d7 355 struct atomsort_item *item, atomic_atomptr_t *hint);
bcea0c0f
DL
356
357struct atomsort_item *atomsort_pop(struct atomsort_head *h);
358
17e38209
RW
359#ifdef __cplusplus
360}
361#endif
362
bcea0c0f 363#endif /* _FRR_ATOMLIST_H */