2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _FRR_ATOMLIST_H
18 #define _FRR_ATOMLIST_H
21 #include "frratomic.h"
27 /* pointer with lock/deleted/invalid bit in lowest bit
29 * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
30 * item is being deleted". it is permissible to assume the item will indeed
31 * be deleted (as there are no replace/etc. ops in this).
33 * in general, lowest 2/3 bits on 32/64bit architectures are available for
34 * uses like this; the only thing that will really break this is putting an
35 * atomlist_item in a struct with "packed" attribute. (it'll break
36 * immediately and consistently.) -- don't do that.
38 * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
42 /* atomic_atomptr_t may look a bit odd, it's for the sake of C++ compat */
43 typedef uintptr_t atomptr_t
;
44 typedef atomic_uintptr_t atomic_atomptr_t
;
46 #define ATOMPTR_MASK (UINTPTR_MAX - 3)
47 #define ATOMPTR_LOCK (1)
48 #define ATOMPTR_USER (2)
49 #define ATOMPTR_NULL (0)
51 static inline atomptr_t
atomptr_i(void *val
)
53 atomptr_t atomval
= (atomptr_t
)val
;
55 assert(!(atomval
& ATOMPTR_LOCK
));
58 static inline void *atomptr_p(atomptr_t val
)
60 return (void *)(val
& ATOMPTR_MASK
);
62 static inline bool atomptr_l(atomptr_t val
)
64 return (bool)(val
& ATOMPTR_LOCK
);
66 static inline bool atomptr_u(atomptr_t val
)
68 return (bool)(val
& ATOMPTR_USER
);
72 /* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
73 * they're neither an "acquire" nor a "release" operation; the element that
74 * was found is still on the list and doesn't change ownership. Therefore,
75 * an atomic transition in ownership state can't be implemented.
77 * Contrast this with add() or pop(): both function calls atomically transfer
78 * ownership of an item to or from the list, which makes them "acquire" /
79 * "release" operations.
81 * What can be implemented atomically is a "find_pop()", i.e. try to locate an
82 * item and atomically try to remove it if found. It's not currently
83 * implemented but can be added when needed.
85 * Either way - for find(), generally speaking, if you need to use find() on
86 * a list then the whole thing probably isn't well-suited to atomic
87 * implementation and you'll need to have extra locks around to make it work
90 #ifdef WNO_ATOMLIST_UNSAFE_FIND
91 # define atomic_find_warn
93 # define atomic_find_warn __attribute__((_DEPRECATED( \
94 "WARNING: find() on atomic lists cannot be atomic by principle; " \
95 "check code to make sure usage pattern is OK and if it is, use " \
96 "#define WNO_ATOMLIST_UNSAFE_FIND")))
100 /* single-linked list, unsorted/arbitrary.
101 * can be used as queue with add_tail / pop
103 * all operations are lock-free, but not necessarily wait-free. this means
104 * that there is no state where the system as a whole stops making process,
105 * but it *is* possible that a *particular* thread is delayed by some time.
107 * the only way for this to happen is for other threads to continuously make
108 * updates. an inactive / blocked / deadlocked other thread cannot cause such
109 * delays, and to cause such delays a thread must be heavily hitting the list -
110 * it's a rather theoretical concern.
113 /* don't use these structs directly */
114 struct atomlist_item
{
115 atomic_uintptr_t next
;
117 #define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
119 struct atomlist_head
{
120 atomic_uintptr_t first
, last
;
126 * PREDECL_ATOMLIST(namelist);
128 * struct namelist_item nlitem;
130 * DECLARE_ATOMLIST(namelist, struct name, nlitem);
132 #define PREDECL_ATOMLIST(prefix) \
133 struct prefix ## _head { struct atomlist_head ah; }; \
134 struct prefix ## _item { struct atomlist_item ai; }; \
135 MACRO_REQUIRE_SEMICOLON() /* end */
137 #define INIT_ATOMLIST(var) { }
139 #define DECLARE_ATOMLIST(prefix, type, field) \
140 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
141 { atomlist_add_head(&h->ah, &item->field.ai); } \
142 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
143 { atomlist_add_tail(&h->ah, &item->field.ai); } \
144 macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
145 atomic_atomptr_t *hint) \
146 { atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
147 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
148 { atomlist_del_hint(&h->ah, &item->field.ai, NULL); \
149 /* TODO: Return NULL if not found */ \
151 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
152 { char *p = (char *)atomlist_pop(&h->ah); \
153 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
154 macro_inline type *prefix ## _first(struct prefix##_head *h) \
155 { char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
156 memory_order_acquire)); \
157 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
158 macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
159 { char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
160 memory_order_acquire)); \
161 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
162 macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
163 { return item ? prefix##_next(h, item) : NULL; } \
164 macro_inline size_t prefix ## _count(struct prefix##_head *h) \
165 { return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
166 macro_inline void prefix ## _init(struct prefix##_head *h) \
168 memset(h, 0, sizeof(*h)); \
170 macro_inline void prefix ## _fini(struct prefix##_head *h) \
172 assert(prefix ## _count(h) == 0); \
173 memset(h, 0, sizeof(*h)); \
175 MACRO_REQUIRE_SEMICOLON() /* end */
178 * - contention on ->first pointer
179 * - return implies completion
181 void atomlist_add_head(struct atomlist_head
*h
, struct atomlist_item
*item
);
184 * - concurrent add_tail can cause wait but has progress guarantee
185 * - return does NOT imply completion. completion is only guaranteed after
186 * all other add_tail operations that started before this add_tail have
189 void atomlist_add_tail(struct atomlist_head
*h
, struct atomlist_item
*item
);
193 * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
194 * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
196 * as with all deletions, threads that started reading earlier may still hold
197 * pointers to the deleted item. completion is however guaranteed for all
198 * reads starting later.
200 void atomlist_del_hint(struct atomlist_head
*h
, struct atomlist_item
*item
,
201 atomic_atomptr_t
*hint
);
205 * as with all deletions, threads that started reading earlier may still hold
206 * pointers to the deleted item. completion is however guaranteed for all
207 * reads starting later.
209 struct atomlist_item
*atomlist_pop(struct atomlist_head
*h
);
213 struct atomsort_item
{
214 atomic_atomptr_t next
;
216 #define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
218 struct atomsort_head
{
219 atomic_atomptr_t first
;
223 #define _PREDECL_ATOMSORT(prefix) \
224 struct prefix ## _head { struct atomsort_head ah; }; \
225 struct prefix ## _item { struct atomsort_item ai; }; \
226 MACRO_REQUIRE_SEMICOLON() /* end */
228 #define INIT_ATOMSORT_UNIQ(var) { }
229 #define INIT_ATOMSORT_NONUNIQ(var) { }
231 #define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
232 macro_inline void prefix ## _init(struct prefix##_head *h) \
234 memset(h, 0, sizeof(*h)); \
236 macro_inline void prefix ## _fini(struct prefix##_head *h) \
238 assert(h->ah.count == 0); \
239 memset(h, 0, sizeof(*h)); \
241 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
243 struct atomsort_item *p; \
244 p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
245 return container_of_null(p, type, field.ai); \
247 macro_inline type *prefix ## _first(struct prefix##_head *h) \
249 struct atomsort_item *p; \
250 p = atomptr_p(atomic_load_explicit(&h->ah.first, \
251 memory_order_acquire)); \
252 return container_of_null(p, type, field.ai); \
254 macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
256 struct atomsort_item *p; \
257 p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
258 memory_order_acquire)); \
259 return container_of_null(p, type, field.ai); \
261 macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
263 return item ? prefix##_next(h, item) : NULL; \
266 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
269 type *p = prefix ## _first(h); \
270 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
271 p = prefix ## _next(h, p); \
275 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
278 type *p = prefix ## _first(h), *prev = NULL; \
279 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
280 p = prefix ## _next(h, (prev = p)); \
283 macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
284 atomic_atomptr_t *hint) \
286 atomsort_del_hint(&h->ah, &item->field.ai, hint); \
288 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
290 atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
291 /* TODO: Return NULL if not found */ \
294 macro_inline size_t prefix ## _count(struct prefix##_head *h) \
296 return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
298 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
300 struct atomsort_item *p = atomsort_pop(&h->ah); \
301 return p ? container_of(p, type, field.ai) : NULL; \
303 MACRO_REQUIRE_SEMICOLON() /* end */
305 #define PREDECL_ATOMSORT_UNIQ(prefix) \
306 _PREDECL_ATOMSORT(prefix)
307 #define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
309 macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
310 const struct atomsort_item *b) \
312 return cmpfn(container_of(a, type, field.ai), \
313 container_of(b, type, field.ai)); \
316 _DECLARE_ATOMSORT(prefix, type, field, \
317 prefix ## __cmp, prefix ## __cmp); \
320 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
322 type *p = prefix ## _first(h); \
324 while (p && (cmpval = cmpfn(p, item)) < 0) \
325 p = prefix ## _next(h, p); \
326 if (!p || cmpval > 0) \
330 MACRO_REQUIRE_SEMICOLON() /* end */
332 #define PREDECL_ATOMSORT_NONUNIQ(prefix) \
333 _PREDECL_ATOMSORT(prefix)
334 #define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
336 macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
337 const struct atomsort_item *b) \
339 return cmpfn(container_of(a, type, field.ai), \
340 container_of(b, type, field.ai)); \
342 macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
343 const struct atomsort_item *b) \
345 int cmpval = cmpfn(container_of(a, type, field.ai), \
346 container_of(b, type, field.ai)); \
356 _DECLARE_ATOMSORT(prefix, type, field, \
357 prefix ## __cmp, prefix ## __cmp_uq); \
358 MACRO_REQUIRE_SEMICOLON() /* end */
360 struct atomsort_item
*atomsort_add(struct atomsort_head
*h
,
361 struct atomsort_item
*item
, int (*cmpfn
)(
362 const struct atomsort_item
*,
363 const struct atomsort_item
*));
365 void atomsort_del_hint(struct atomsort_head
*h
,
366 struct atomsort_item
*item
, atomic_atomptr_t
*hint
);
368 struct atomsort_item
*atomsort_pop(struct atomsort_head
*h
);
374 #endif /* _FRR_ATOMLIST_H */