]> git.proxmox.com Git - mirror_frr.git/blob - lib/atomlist.h
Merge pull request #5473 from yasuhiro-ohara-ntt/ospf6d-self-orig-maxage-fix
[mirror_frr.git] / lib / atomlist.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_ATOMLIST_H
18 #define _FRR_ATOMLIST_H
19
20 #include "typesafe.h"
21 #include "frratomic.h"
22
23 /* pointer with lock/deleted/invalid bit in lowest bit
24 *
25 * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
26 * item is being deleted". it is permissible to assume the item will indeed
27 * be deleted (as there are no replace/etc. ops in this).
28 *
29 * in general, lowest 2/3 bits on 32/64bit architectures are available for
30 * uses like this; the only thing that will really break this is putting an
31 * atomlist_item in a struct with "packed" attribute. (it'll break
32 * immediately and consistently.) -- don't do that.
33 *
34 * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
35 * implementations.)
36 */
37 typedef uintptr_t atomptr_t;
38 #define ATOMPTR_MASK (UINTPTR_MAX - 3)
39 #define ATOMPTR_LOCK (1)
40 #define ATOMPTR_USER (2)
41 #define ATOMPTR_NULL (0)
42
43 static inline atomptr_t atomptr_i(void *val)
44 {
45 atomptr_t atomval = (atomptr_t)val;
46
47 assert(!(atomval & ATOMPTR_LOCK));
48 return atomval;
49 }
50 static inline void *atomptr_p(atomptr_t val)
51 {
52 return (void *)(val & ATOMPTR_MASK);
53 }
54 static inline bool atomptr_l(atomptr_t val)
55 {
56 return (bool)(val & ATOMPTR_LOCK);
57 }
58 static inline bool atomptr_u(atomptr_t val)
59 {
60 return (bool)(val & ATOMPTR_USER);
61 }
62
63
64 /* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
65 * they're neither an "acquire" nor a "release" operation; the element that
66 * was found is still on the list and doesn't change ownership. Therefore,
67 * an atomic transition in ownership state can't be implemented.
68 *
69 * Contrast this with add() or pop(): both function calls atomically transfer
70 * ownership of an item to or from the list, which makes them "acquire" /
71 * "release" operations.
72 *
73 * What can be implemented atomically is a "find_pop()", i.e. try to locate an
74 * item and atomically try to remove it if found. It's not currently
75 * implemented but can be added when needed.
76 *
77 * Either way - for find(), generally speaking, if you need to use find() on
78 * a list then the whole thing probably isn't well-suited to atomic
79 * implementation and you'll need to have extra locks around to make it work
80 * correctly.
81 */
82 #ifdef WNO_ATOMLIST_UNSAFE_FIND
83 # define atomic_find_warn
84 #else
85 # define atomic_find_warn __attribute__((_DEPRECATED( \
86 "WARNING: find() on atomic lists cannot be atomic by principle; " \
87 "check code to make sure usage pattern is OK and if it is, use " \
88 "#define WNO_ATOMLIST_UNSAFE_FIND")))
89 #endif
90
91
92 /* single-linked list, unsorted/arbitrary.
93 * can be used as queue with add_tail / pop
94 *
95 * all operations are lock-free, but not neccessarily wait-free. this means
96 * that there is no state where the system as a whole stops making process,
97 * but it *is* possible that a *particular* thread is delayed by some time.
98 *
99 * the only way for this to happen is for other threads to continuously make
100 * updates. an inactive / blocked / deadlocked other thread cannot cause such
101 * delays, and to cause such delays a thread must be heavily hitting the list -
102 * it's a rather theoretical concern.
103 */
104
105 /* don't use these structs directly */
106 struct atomlist_item {
107 _Atomic atomptr_t next;
108 };
109 #define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
110
111 struct atomlist_head {
112 _Atomic atomptr_t first, last;
113 _Atomic size_t count;
114 };
115
116 /* use as:
117 *
118 * PREDECL_ATOMLIST(namelist)
119 * struct name {
120 * struct namelist_item nlitem;
121 * }
122 * DECLARE_ATOMLIST(namelist, struct name, nlitem)
123 */
124 #define PREDECL_ATOMLIST(prefix) \
125 struct prefix ## _head { struct atomlist_head ah; }; \
126 struct prefix ## _item { struct atomlist_item ai; };
127
128 #define INIT_ATOMLIST(var) { }
129
130 #define DECLARE_ATOMLIST(prefix, type, field) \
131 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
132 { atomlist_add_head(&h->ah, &item->field.ai); } \
133 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
134 { atomlist_add_tail(&h->ah, &item->field.ai); } \
135 macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
136 _Atomic atomptr_t *hint) \
137 { atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
138 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
139 { atomlist_del_hint(&h->ah, &item->field.ai, NULL); \
140 /* TODO: Return NULL if not found */ \
141 return item; } \
142 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
143 { char *p = (char *)atomlist_pop(&h->ah); \
144 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
145 macro_inline type *prefix ## _first(struct prefix##_head *h) \
146 { char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
147 memory_order_acquire)); \
148 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
149 macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
150 { char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
151 memory_order_acquire)); \
152 return p ? (type *)(p - offsetof(type, field)) : NULL; } \
153 macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
154 { return item ? prefix##_next(h, item) : NULL; } \
155 macro_inline size_t prefix ## _count(struct prefix##_head *h) \
156 { return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
157 macro_inline void prefix ## _init(struct prefix##_head *h) \
158 { \
159 memset(h, 0, sizeof(*h)); \
160 } \
161 macro_inline void prefix ## _fini(struct prefix##_head *h) \
162 { \
163 assert(prefix ## _count(h) == 0); \
164 memset(h, 0, sizeof(*h)); \
165 } \
166 /* ... */
167
168 /* add_head:
169 * - contention on ->first pointer
170 * - return implies completion
171 */
172 void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
173
174 /* add_tail:
175 * - concurrent add_tail can cause wait but has progress guarantee
176 * - return does NOT imply completion. completion is only guaranteed after
177 * all other add_tail operations that started before this add_tail have
178 * completed as well.
179 */
180 void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
181
182 /* del/del_hint:
183 *
184 * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
185 * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
186 *
187 * as with all deletions, threads that started reading earlier may still hold
188 * pointers to the deleted item. completion is however guaranteed for all
189 * reads starting later.
190 */
191 void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
192 _Atomic atomptr_t *hint);
193
194 /* pop:
195 *
196 * as with all deletions, threads that started reading earlier may still hold
197 * pointers to the deleted item. completion is however guaranteed for all
198 * reads starting later.
199 */
200 struct atomlist_item *atomlist_pop(struct atomlist_head *h);
201
202
203
204 struct atomsort_item {
205 _Atomic atomptr_t next;
206 };
207 #define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
208
209 struct atomsort_head {
210 _Atomic atomptr_t first;
211 _Atomic size_t count;
212 };
213
214 #define _PREDECL_ATOMSORT(prefix) \
215 struct prefix ## _head { struct atomsort_head ah; }; \
216 struct prefix ## _item { struct atomsort_item ai; };
217
218 #define INIT_ATOMSORT_UNIQ(var) { }
219 #define INIT_ATOMSORT_NONUNIQ(var) { }
220
221 #define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
222 macro_inline void prefix ## _init(struct prefix##_head *h) \
223 { \
224 memset(h, 0, sizeof(*h)); \
225 } \
226 macro_inline void prefix ## _fini(struct prefix##_head *h) \
227 { \
228 assert(h->ah.count == 0); \
229 memset(h, 0, sizeof(*h)); \
230 } \
231 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
232 { \
233 struct atomsort_item *p; \
234 p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
235 return container_of_null(p, type, field.ai); \
236 } \
237 macro_inline type *prefix ## _first(struct prefix##_head *h) \
238 { \
239 struct atomsort_item *p; \
240 p = atomptr_p(atomic_load_explicit(&h->ah.first, \
241 memory_order_acquire)); \
242 return container_of_null(p, type, field.ai); \
243 } \
244 macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
245 { \
246 struct atomsort_item *p; \
247 p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
248 memory_order_acquire)); \
249 return container_of_null(p, type, field.ai); \
250 } \
251 macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
252 { \
253 return item ? prefix##_next(h, item) : NULL; \
254 } \
255 atomic_find_warn \
256 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
257 const type *item) \
258 { \
259 type *p = prefix ## _first(h); \
260 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
261 p = prefix ## _next(h, p); \
262 return p; \
263 } \
264 atomic_find_warn \
265 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
266 const type *item) \
267 { \
268 type *p = prefix ## _first(h), *prev = NULL; \
269 while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
270 p = prefix ## _next(h, (prev = p)); \
271 return prev; \
272 } \
273 macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
274 _Atomic atomptr_t *hint) \
275 { \
276 atomsort_del_hint(&h->ah, &item->field.ai, hint); \
277 } \
278 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
279 { \
280 atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
281 /* TODO: Return NULL if not found */ \
282 return item; \
283 } \
284 macro_inline size_t prefix ## _count(struct prefix##_head *h) \
285 { \
286 return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
287 } \
288 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
289 { \
290 struct atomsort_item *p = atomsort_pop(&h->ah); \
291 return p ? container_of(p, type, field.ai) : NULL; \
292 } \
293 /* ... */
294
295 #define PREDECL_ATOMSORT_UNIQ(prefix) \
296 _PREDECL_ATOMSORT(prefix)
297 #define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
298 \
299 macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
300 const struct atomsort_item *b) \
301 { \
302 return cmpfn(container_of(a, type, field.ai), \
303 container_of(b, type, field.ai)); \
304 } \
305 \
306 _DECLARE_ATOMSORT(prefix, type, field, \
307 prefix ## __cmp, prefix ## __cmp) \
308 \
309 atomic_find_warn \
310 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
311 { \
312 type *p = prefix ## _first(h); \
313 int cmpval = 0; \
314 while (p && (cmpval = cmpfn(p, item)) < 0) \
315 p = prefix ## _next(h, p); \
316 if (!p || cmpval > 0) \
317 return NULL; \
318 return p; \
319 } \
320 /* ... */
321
322 #define PREDECL_ATOMSORT_NONUNIQ(prefix) \
323 _PREDECL_ATOMSORT(prefix)
324 #define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
325 \
326 macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
327 const struct atomsort_item *b) \
328 { \
329 return cmpfn(container_of(a, type, field.ai), \
330 container_of(b, type, field.ai)); \
331 } \
332 macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
333 const struct atomsort_item *b) \
334 { \
335 int cmpval = cmpfn(container_of(a, type, field.ai), \
336 container_of(b, type, field.ai)); \
337 if (cmpval) \
338 return cmpval; \
339 if (a < b) \
340 return -1; \
341 if (a > b) \
342 return 1; \
343 return 0; \
344 } \
345 \
346 _DECLARE_ATOMSORT(prefix, type, field, \
347 prefix ## __cmp, prefix ## __cmp_uq) \
348 /* ... */
349
350 struct atomsort_item *atomsort_add(struct atomsort_head *h,
351 struct atomsort_item *item, int (*cmpfn)(
352 const struct atomsort_item *,
353 const struct atomsort_item *));
354
355 void atomsort_del_hint(struct atomsort_head *h,
356 struct atomsort_item *item, _Atomic atomptr_t *hint);
357
358 struct atomsort_item *atomsort_pop(struct atomsort_head *h);
359
360 #endif /* _FRR_ATOMLIST_H */