--- /dev/null
+/*
+ * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "atomlist.h"
+
+void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item)
+{
+ atomptr_t prevval;
+ atomptr_t i = atomptr_i(item);
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* updating ->last is possible here, but makes the code considerably
+ * more complicated... let's not.
+ */
+ prevval = ATOMPTR_NULL;
+ item->next = ATOMPTR_NULL;
+
+ /* head-insert atomically
+ * release barrier: item + item->next writes must be completed
+ */
+ while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i,
+ memory_order_release, memory_order_relaxed))
+ atomic_store_explicit(&item->next, prevval,
+ memory_order_relaxed);
+}
+
+void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item)
+{
+ atomptr_t prevval = ATOMPTR_NULL;
+ atomptr_t i = atomptr_i(item);
+ atomptr_t hint;
+ struct atomlist_item *prevptr;
+ _Atomic atomptr_t *prev;
+
+ item->next = ATOMPTR_NULL;
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* place new item into ->last
+ * release: item writes completed; acquire: DD barrier on hint
+ */
+ hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel);
+
+ while (1) {
+ if (atomptr_p(hint) == NULL)
+ prev = &h->first;
+ else
+ prev = &atomlist_itemp(hint)->next;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+ prevptr = atomlist_itemp(prevval);
+ if (prevptr == NULL)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ /* last item is being deleted - start over */
+ if (atomptr_l(prevval)) {
+ hint = ATOMPTR_NULL;
+ continue;
+ }
+
+ /* no barrier - item->next is NULL and was so in xchg above */
+ if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i,
+ memory_order_consume,
+ memory_order_consume)) {
+ hint = prevval;
+ continue;
+ }
+ break;
+ }
+}
+
+static void atomlist_del_core(struct atomlist_head *h,
+ struct atomlist_item *item,
+ _Atomic atomptr_t *hint,
+ atomptr_t next)
+{
+ _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
+ atomptr_t prevval, updval;
+ struct atomlist_item *prevptr;
+
+ /* drop us off "last" if needed. no r/w to barrier. */
+ prevval = atomptr_i(item);
+ atomic_compare_exchange_strong_explicit(&h->last, &prevval,
+ ATOMPTR_NULL,
+ memory_order_relaxed, memory_order_relaxed);
+
+ atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* the following code should be identical (except sort<>list) to
+ * atomsort_del_hint()
+ */
+ while (1) {
+ upd = NULL;
+ updval = ATOMPTR_LOCK;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+
+ /* track the beginning of a chain of deleted items
+ * this is neccessary to make this lock-free; we can
+ * complete deletions started by other threads.
+ */
+ if (!atomptr_l(prevval)) {
+ updval = prevval;
+ upd = prev;
+ }
+
+ prevptr = atomlist_itemp(prevval);
+ if (prevptr == item)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ if (prevptr != item)
+ /* another thread completed our deletion */
+ return;
+
+ if (!upd || atomptr_l(updval)) {
+ /* failed to find non-deleted predecessor...
+ * have to try again
+ */
+ prev = &h->first;
+ continue;
+ }
+
+ if (!atomic_compare_exchange_strong_explicit(upd, &updval,
+ next, memory_order_consume,
+ memory_order_consume)) {
+ /* prev doesn't point to item anymore, something
+ * was inserted. continue at same position forward.
+ */
+ continue;
+ }
+ break;
+ }
+}
+
+void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
+ _Atomic atomptr_t *hint)
+{
+ atomptr_t next;
+
+ /* mark ourselves in-delete - full barrier */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+ assert(!atomptr_l(next)); /* delete race on same item */
+
+ atomlist_del_core(h, item, hint, next);
+}
+
+struct atomlist_item *atomlist_pop(struct atomlist_head *h)
+{
+ struct atomlist_item *item;
+ atomptr_t next;
+
+ /* grab head of the list - and remember it in replval for the
+ * actual delete below. No matter what, the head of the list is
+ * where we start deleting because either it's our item, or it's
+ * some delete-marked items and then our item.
+ */
+ next = atomic_load_explicit(&h->first, memory_order_consume);
+
+ do {
+ item = atomlist_itemp(next);
+ if (!item)
+ return NULL;
+
+ /* try to mark deletion */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+
+ } while (atomptr_l(next));
+ /* if loop is taken: delete race on same item (another pop or del)
+ * => proceed to next item
+ * if loop exited here: we have our item selected and marked
+ */
+ atomlist_del_core(h, item, &h->first, next);
+ return item;
+}
+
+struct atomsort_item *atomsort_add(struct atomsort_head *h,
+ struct atomsort_item *item, int (*cmpfn)(
+ const struct atomsort_item *,
+ const struct atomsort_item *))
+{
+ _Atomic atomptr_t *prev;
+ atomptr_t prevval;
+ atomptr_t i = atomptr_i(item);
+ struct atomsort_item *previtem;
+ int cmpval;
+
+ do {
+ prev = &h->first;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_acquire);
+ previtem = atomptr_p(prevval);
+
+ if (!previtem || (cmpval = cmpfn(previtem, item)) > 0)
+ break;
+ if (cmpval == 0)
+ return previtem;
+
+ prev = &previtem->next;
+ } while (1);
+
+ if (atomptr_l(prevval))
+ continue;
+
+ item->next = prevval;
+ if (atomic_compare_exchange_strong_explicit(prev, &prevval, i,
+ memory_order_release, memory_order_relaxed))
+ break;
+ } while (1);
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+ return NULL;
+}
+
+static void atomsort_del_core(struct atomsort_head *h,
+ struct atomsort_item *item, _Atomic atomptr_t *hint,
+ atomptr_t next)
+{
+ _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
+ atomptr_t prevval, updval;
+ struct atomsort_item *prevptr;
+
+ atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* the following code should be identical (except sort<>list) to
+ * atomlist_del_core()
+ */
+ while (1) {
+ upd = NULL;
+ updval = ATOMPTR_LOCK;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+
+ /* track the beginning of a chain of deleted items
+ * this is neccessary to make this lock-free; we can
+ * complete deletions started by other threads.
+ */
+ if (!atomptr_l(prevval)) {
+ updval = prevval;
+ upd = prev;
+ }
+
+ prevptr = atomsort_itemp(prevval);
+ if (prevptr == item)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ if (prevptr != item)
+ /* another thread completed our deletion */
+ return;
+
+ if (!upd || atomptr_l(updval)) {
+ /* failed to find non-deleted predecessor...
+ * have to try again
+ */
+ prev = &h->first;
+ continue;
+ }
+
+ if (!atomic_compare_exchange_strong_explicit(upd, &updval,
+ next, memory_order_relaxed,
+ memory_order_relaxed)) {
+ /* prev doesn't point to item anymore, something
+ * was inserted. continue at same position forward.
+ */
+ continue;
+ }
+ break;
+ }
+}
+
+void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item,
+ _Atomic atomptr_t *hint)
+{
+ atomptr_t next;
+
+ /* mark ourselves in-delete - full barrier */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_seq_cst);
+ assert(!atomptr_l(next)); /* delete race on same item */
+
+ atomsort_del_core(h, item, hint, next);
+}
+
+struct atomsort_item *atomsort_pop(struct atomsort_head *h)
+{
+ struct atomsort_item *item;
+ atomptr_t next;
+
+ /* grab head of the list - and remember it in replval for the
+ * actual delete below. No matter what, the head of the list is
+ * where we start deleting because either it's our item, or it's
+ * some delete-marked items and then our item.
+ */
+ next = atomic_load_explicit(&h->first, memory_order_consume);
+
+ do {
+ item = atomsort_itemp(next);
+ if (!item)
+ return NULL;
+
+ /* try to mark deletion */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+
+ } while (atomptr_l(next));
+ /* if loop is taken: delete race on same item (another pop or del)
+ * => proceed to next item
+ * if loop exited here: we have our item selected and marked
+ */
+ atomsort_del_core(h, item, &h->first, next);
+ return item;
+}
--- /dev/null
+/*
+ * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_ATOMLIST_H
+#define _FRR_ATOMLIST_H
+
+#include "typesafe.h"
+#include "frratomic.h"
+
+/* pointer with lock/deleted/invalid bit in lowest bit
+ *
+ * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
+ * item is being deleted". it is permissible to assume the item will indeed
+ * be deleted (as there are no replace/etc. ops in this).
+ *
+ * in general, lowest 2/3 bits on 32/64bit architectures are available for
+ * uses like this; the only thing that will really break this is putting an
+ * atomlist_item in a struct with "packed" attribute. (it'll break
+ * immediately and consistently.) -- don't do that.
+ *
+ * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
+ * implementations.)
+ */
+typedef uintptr_t atomptr_t;
+#define ATOMPTR_MASK (UINTPTR_MAX - 3)
+#define ATOMPTR_LOCK (1)
+#define ATOMPTR_USER (2)
+#define ATOMPTR_NULL (0)
+
+static inline atomptr_t atomptr_i(void *val)
+{
+ atomptr_t atomval = (atomptr_t)val;
+
+ assert(!(atomval & ATOMPTR_LOCK));
+ return atomval;
+}
+static inline void *atomptr_p(atomptr_t val)
+{
+ return (void *)(val & ATOMPTR_MASK);
+}
+static inline bool atomptr_l(atomptr_t val)
+{
+ return (bool)(val & ATOMPTR_LOCK);
+}
+static inline bool atomptr_u(atomptr_t val)
+{
+ return (bool)(val & ATOMPTR_USER);
+}
+
+
+/* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
+ * they're neither an "acquire" nor a "release" operation; the element that
+ * was found is still on the list and doesn't change ownership. Therefore,
+ * an atomic transition in ownership state can't be implemented.
+ *
+ * Contrast this with add() or pop(): both function calls atomically transfer
+ * ownership of an item to or from the list, which makes them "acquire" /
+ * "release" operations.
+ *
+ * What can be implemented atomically is a "find_pop()", i.e. try to locate an
+ * item and atomically try to remove it if found. It's not currently
+ * implemented but can be added when needed.
+ *
+ * Either way - for find(), generally speaking, if you need to use find() on
+ * a list then the whole thing probably isn't well-suited to atomic
+ * implementation and you'll need to have extra locks around to make it work
+ * correctly.
+ */
+#ifdef WNO_ATOMLIST_UNSAFE_FIND
+# define atomic_find_warn
+#else
+# define atomic_find_warn __attribute__((_DEPRECATED( \
+ "WARNING: find() on atomic lists cannot be atomic by principle; " \
+ "check code to make sure usage pattern is OK and if it is, use " \
+ "#define WNO_ATOMLIST_UNSAFE_FIND")))
+#endif
+
+
+/* single-linked list, unsorted/arbitrary.
+ * can be used as queue with add_tail / pop
+ *
+ * all operations are lock-free, but not neccessarily wait-free. this means
+ * that there is no state where the system as a whole stops making process,
+ * but it *is* possible that a *particular* thread is delayed by some time.
+ *
+ * the only way for this to happen is for other threads to continuously make
+ * updates. an inactive / blocked / deadlocked other thread cannot cause such
+ * delays, and to cause such delays a thread must be heavily hitting the list -
+ * it's a rather theoretical concern.
+ */
+
+/* don't use these structs directly */
+struct atomlist_item {
+ _Atomic atomptr_t next;
+};
+#define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
+
+struct atomlist_head {
+ _Atomic atomptr_t first, last;
+ _Atomic size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_ATOMLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_ATOMLIST(namelist, struct name, nlitem)
+ */
+#define PREDECL_ATOMLIST(prefix) \
+struct prefix ## _head { struct atomlist_head ah; }; \
+struct prefix ## _item { struct atomlist_item ai; };
+
+#define INIT_ATOMLIST(var) { }
+
+#define DECLARE_ATOMLIST(prefix, type, field) \
+macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
+{ atomlist_add_head(&h->ah, &item->field.ai); } \
+macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
+{ atomlist_add_tail(&h->ah, &item->field.ai); } \
+macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
+ _Atomic atomptr_t *hint) \
+{ atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ atomlist_del_hint(&h->ah, &item->field.ai, NULL); } \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ char *p = (char *)atomlist_pop(&h->ah); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _first(struct prefix##_head *h) \
+{ char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
+ memory_order_acquire)); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
+ memory_order_acquire)); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ return item ? prefix##_next(h, item) : NULL; } \
+macro_inline size_t prefix ## _count(struct prefix##_head *h) \
+{ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
+/* ... */
+
+/* add_head:
+ * - contention on ->first pointer
+ * - return implies completion
+ */
+void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
+
+/* add_tail:
+ * - concurrent add_tail can cause wait but has progress guarantee
+ * - return does NOT imply completion. completion is only guaranteed after
+ * all other add_tail operations that started before this add_tail have
+ * completed as well.
+ */
+void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
+
+/* del/del_hint:
+ *
+ * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
+ * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
+ *
+ * as with all deletions, threads that started reading earlier may still hold
+ * pointers to the deleted item. completion is however guaranteed for all
+ * reads starting later.
+ */
+void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
+ _Atomic atomptr_t *hint);
+
+/* pop:
+ *
+ * as with all deletions, threads that started reading earlier may still hold
+ * pointers to the deleted item. completion is however guaranteed for all
+ * reads starting later.
+ */
+struct atomlist_item *atomlist_pop(struct atomlist_head *h);
+
+
+
+struct atomsort_item {
+ _Atomic atomptr_t next;
+};
+#define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
+
+struct atomsort_head {
+ _Atomic atomptr_t first;
+ _Atomic size_t count;
+};
+
+#define _PREDECL_ATOMSORT(prefix) \
+struct prefix ## _head { struct atomsort_head ah; }; \
+struct prefix ## _item { struct atomsort_item ai; };
+
+#define INIT_ATOMSORT_UNIQ(var) { }
+#define INIT_ATOMSORT_NONUNIQ(var) { }
+
+#define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(h->ah.count == 0); \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct atomsort_item *p; \
+ p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct atomsort_item *p; \
+ p = atomptr_p(atomic_load_explicit(&h->ah.first, \
+ memory_order_acquire)); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct atomsort_item *p; \
+ p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
+ memory_order_acquire)); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ return item ? prefix##_next(h, item) : NULL; \
+} \
+atomic_find_warn \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ type *p = prefix ## _first(h); \
+ while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
+ p = prefix ## _next(h, p); \
+ return p; \
+} \
+atomic_find_warn \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ type *p = prefix ## _first(h), *prev = NULL; \
+ while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
+ p = prefix ## _next(h, (prev = p)); \
+ return prev; \
+} \
+macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
+ _Atomic atomptr_t *hint) \
+{ \
+ atomsort_del_hint(&h->ah, &item->field.ai, hint); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
+} \
+macro_inline size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct atomsort_item *p = atomsort_pop(&h->ah); \
+ return p ? container_of(p, type, field.ai) : NULL; \
+} \
+/* ... */
+
+#define PREDECL_ATOMSORT_UNIQ(prefix) \
+ _PREDECL_ATOMSORT(prefix)
+#define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+} \
+ \
+_DECLARE_ATOMSORT(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp) \
+ \
+atomic_find_warn \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ type *p = prefix ## _first(h); \
+ int cmpval = 0; \
+ while (p && (cmpval = cmpfn(p, item)) < 0) \
+ p = prefix ## _next(h, p); \
+ if (!p || cmpval > 0) \
+ return NULL; \
+ return p; \
+} \
+/* ... */
+
+#define PREDECL_ATOMSORT_NONUNIQ(prefix) \
+ _PREDECL_ATOMSORT(prefix)
+#define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_ATOMSORT(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+struct atomsort_item *atomsort_add(struct atomsort_head *h,
+ struct atomsort_item *item, int (*cmpfn)(
+ const struct atomsort_item *,
+ const struct atomsort_item *));
+
+void atomsort_del_hint(struct atomsort_head *h,
+ struct atomsort_item *item, _Atomic atomptr_t *hint);
+
+struct atomsort_item *atomsort_pop(struct atomsort_head *h);
+
+#endif /* _FRR_ATOMLIST_H */
# define _FALLTHROUGH __attribute__((fallthrough));
#endif
# define _CONSTRUCTOR(x) constructor(x)
+# define _DEPRECATED(x) deprecated(x)
#elif defined(__GNUC__)
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
# define _RET_NONNULL , returns_nonnull
# define _DESTRUCTOR(x) destructor(x)
# define _ALLOC_SIZE(x) alloc_size(x)
#endif
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+# define _DEPRECATED(x) deprecated(x)
+#endif
#if __GNUC__ >= 7
# define _FALLTHROUGH __attribute__((fallthrough));
#endif
#ifndef _FALLTHROUGH
#define _FALLTHROUGH
#endif
+#ifndef _DEPRECATED
+#define _DEPRECATED(x) deprecated
+#endif
/* for helper functions defined inside macros */
#define macro_inline static inline __attribute__((unused))
lib_libfrr_la_SOURCES = \
lib/agg_table.c \
+ lib/atomlist.c \
lib/bfd.c \
lib/buffer.c \
lib/checksum.c \
pkginclude_HEADERS += \
lib/agg_table.h \
+ lib/atomlist.h \
lib/bfd.h \
lib/bitfield.h \
lib/buffer.h \
/lib/cli/test_commands_defun.c
/lib/northbound/test_oper_data
/lib/cxxcompat
+/lib/test_atomlist
/lib/test_buffer
/lib/test_checksum
/lib/test_graph
--- /dev/null
+/*
+ * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <pthread.h>
+
+#include "atomlist.h"
+#include "seqlock.h"
+#include "monotime.h"
+
+/*
+ * maybe test:
+ * - alist_del_hint
+ * - alist_next_safe
+ * - asort_del_hint
+ * - asort_next_safe
+ */
+
+static struct seqlock sqlo;
+
+PREDECL_ATOMLIST(alist)
+PREDECL_ATOMSORT_UNIQ(asort)
+struct item {
+ uint64_t val1;
+ struct alist_item chain;
+ struct asort_item sortc;
+ uint64_t val2;
+};
+DECLARE_ATOMLIST(alist, struct item, chain)
+
+static int icmp(const struct item *a, const struct item *b);
+DECLARE_ATOMSORT_UNIQ(asort, struct item, sortc, icmp)
+
+static int icmp(const struct item *a, const struct item *b)
+{
+ if (a->val1 > b->val1)
+ return 1;
+ if (a->val1 < b->val1)
+ return -1;
+ return 0;
+}
+
+#define NITEM 10000
+struct item itm[NITEM];
+
+static struct alist_head ahead;
+static struct asort_head shead;
+
+#define NTHREADS 4
+static struct testthread {
+ pthread_t pt;
+ struct seqlock sqlo;
+ size_t counter, nullops;
+} thr[NTHREADS];
+
+struct testrun {
+ struct testrun *next;
+ int lineno;
+ const char *desc;
+ ssize_t prefill;
+ bool sorted;
+ void (*func)(unsigned int offset);
+};
+struct testrun *runs = NULL;
+
+#define NOCLEAR -1
+
+#define deftestrun(name, _desc, _prefill, _sorted) \
+static void trfunc_##name(unsigned int offset); \
+struct testrun tr_##name = { \
+ .desc = _desc, \
+ .lineno = __LINE__, \
+ .prefill = _prefill, \
+ .func = &trfunc_##name, \
+ .sorted = _sorted }; \
+static void __attribute__((constructor)) trsetup_##name(void) \
+{ \
+ struct testrun **inspos = &runs; \
+ while (*inspos && (*inspos)->lineno < tr_##name.lineno) \
+ inspos = &(*inspos)->next; \
+ tr_##name.next = *inspos; \
+ *inspos = &tr_##name; \
+} \
+static void trfunc_##name(unsigned int offset) \
+{ \
+ size_t i = 0, n = 0;
+
+#define endtestrun \
+ thr[offset].counter = i; \
+ thr[offset].nullops = n; \
+}
+
+deftestrun(add, "add vs. add", 0, false)
+ for (; i < NITEM / NTHREADS; i++)
+ alist_add_head(&ahead, &itm[i * NTHREADS + offset]);
+endtestrun
+
+deftestrun(del, "del vs. del", NOCLEAR, false)
+ for (; i < NITEM / NTHREADS / 10; i++)
+ alist_del(&ahead, &itm[i * NTHREADS + offset]);
+endtestrun
+
+deftestrun(addtail, "add_tail vs. add_tail", 0, false)
+ for (; i < NITEM / NTHREADS; i++)
+ alist_add_tail(&ahead, &itm[i * NTHREADS + offset]);
+endtestrun
+
+deftestrun(pop, "pop vs. pop", NOCLEAR, false)
+ for (; i < NITEM / NTHREADS; )
+ if (alist_pop(&ahead))
+ i++;
+ else
+ n++;
+endtestrun
+
+deftestrun(headN_vs_pop1, "add_head(N) vs. pop(1)", 1, false);
+ if (offset == 0) {
+ struct item *dr = NULL;
+
+ for (i = n = 0; i < NITEM; ) {
+ dr = alist_pop(&ahead);
+ if (dr)
+ i++;
+ else
+ n++;
+ }
+ } else {
+ for (i = offset; i < NITEM; i += NTHREADS)
+ alist_add_head(&ahead, &itm[i]);
+ i = 0;
+ }
+endtestrun
+
+deftestrun(head1_vs_popN, "add_head(1) vs. pop(N)", 0, false);
+ if (offset < NTHREADS - 1) {
+ struct item *dr = NULL;
+
+ for (i = n = 0; i < NITEM / NTHREADS; ) {
+ dr = alist_pop(&ahead);
+ if (dr)
+ i++;
+ else
+ n++;
+ }
+ } else {
+ for (i = 0; i < NITEM; i++)
+ alist_add_head(&ahead, &itm[i]);
+ i = 0;
+ }
+endtestrun
+
+deftestrun(headN_vs_popN, "add_head(N) vs. pop(N)", NTHREADS / 2, false)
+ if (offset < NTHREADS / 2) {
+ struct item *dr = NULL;
+
+ for (i = n = 0; i < NITEM * 2 / NTHREADS; ) {
+ dr = alist_pop(&ahead);
+ if (dr)
+ i++;
+ else
+ n++;
+ }
+ } else {
+ for (i = offset; i < NITEM; i += NTHREADS)
+ alist_add_head(&ahead, &itm[i]);
+ i = 0;
+ }
+endtestrun
+
+deftestrun(tailN_vs_pop1, "add_tail(N) vs. pop(1)", 1, false)
+ if (offset == 0) {
+ struct item *dr = NULL;
+
+ for (i = n = 0; i < NITEM - (NITEM / NTHREADS); ) {
+ dr = alist_pop(&ahead);
+ if (dr)
+ i++;
+ else
+ n++;
+ }
+ } else {
+ for (i = offset; i < NITEM; i += NTHREADS)
+ alist_add_tail(&ahead, &itm[i]);
+ i = 0;
+ }
+endtestrun
+
+deftestrun(tail1_vs_popN, "add_tail(1) vs. pop(N)", 0, false)
+ if (offset < NTHREADS - 1) {
+ struct item *dr = NULL;
+
+ for (i = n = 0; i < NITEM / NTHREADS; ) {
+ dr = alist_pop(&ahead);
+ if (dr)
+ i++;
+ else
+ n++;
+ }
+ } else {
+ for (i = 0; i < NITEM; i++)
+ alist_add_tail(&ahead, &itm[i]);
+ i = 0;
+ }
+endtestrun
+
+deftestrun(sort_add, "add_sort vs. add_sort", 0, true)
+ for (; i < NITEM / NTHREADS / 10; i++)
+ asort_add(&shead, &itm[i * NTHREADS + offset]);
+endtestrun
+
+deftestrun(sort_del, "del_sort vs. del_sort", NOCLEAR, true)
+ for (; i < NITEM / NTHREADS / 10; i++)
+ asort_del(&shead, &itm[i * NTHREADS + offset]);
+endtestrun
+
+deftestrun(sort_add_del, "add_sort vs. del_sort", NTHREADS / 2, true)
+ if (offset < NTHREADS / 2) {
+ for (; i < NITEM / NTHREADS / 10; i++)
+ asort_del(&shead, &itm[i * NTHREADS + offset]);
+ } else {
+ for (; i < NITEM / NTHREADS / 10; i++)
+ asort_add(&shead, &itm[i * NTHREADS + offset]);
+ }
+endtestrun
+
+static void *thr1func(void *arg)
+{
+ struct testthread *p = arg;
+ unsigned int offset = (unsigned int)(p - &thr[0]);
+ seqlock_val_t sv;
+ struct testrun *tr;
+
+ for (tr = runs; tr; tr = tr->next) {
+ sv = seqlock_bump(&p->sqlo);
+ seqlock_wait(&sqlo, sv);
+
+ tr->func(offset);
+ }
+ seqlock_bump(&p->sqlo);
+
+ return NULL;
+}
+
+static void clear_list(size_t prefill)
+{
+ size_t i;
+
+ memset(&ahead, 0, sizeof(ahead));
+ memset(&shead, 0, sizeof(shead));
+ memset(itm, 0, sizeof(itm));
+ for (i = 0; i < NITEM; i++) {
+ itm[i].val1 = itm[i].val2 = i;
+ if ((i % NTHREADS) < prefill) {
+ alist_add_tail(&ahead, &itm[i]);
+ asort_add(&shead, &itm[i]);
+ }
+ }
+}
+
+static void run_tr(struct testrun *tr)
+{
+ const char *desc = tr->desc;
+ struct timeval tv;
+ int64_t delta;
+ seqlock_val_t sv;
+ size_t c = 0, s = 0, n = 0;
+ struct item *item, *prev, dummy;
+
+ printf("[%02u] %35s %s\n", seqlock_cur(&sqlo) >> 1, "", desc);
+ fflush(stdout);
+
+ if (tr->prefill != NOCLEAR)
+ clear_list(tr->prefill);
+
+ monotime(&tv);
+ sv = seqlock_bump(&sqlo);
+ for (size_t i = 0; i < NTHREADS; i++) {
+ seqlock_wait(&thr[i].sqlo, seqlock_cur(&sqlo));
+ s += thr[i].counter;
+ n += thr[i].nullops;
+ thr[i].counter = 0;
+ thr[i].nullops = 0;
+ }
+
+ delta = monotime_since(&tv, NULL);
+ if (tr->sorted) {
+ uint64_t prevval = 0;
+
+ for_each(asort, &shead, item) {
+ assert(item->val1 >= prevval);
+ prevval = item->val1;
+ c++;
+ }
+ assert(c == asort_count(&shead));
+ } else {
+ prev = &dummy;
+ for_each(alist, &ahead, item) {
+ assert(item != prev);
+ prev = item;
+ c++;
+ assert(c <= NITEM);
+ }
+ assert(c == alist_count(&ahead));
+ }
+ printf("\033[1A[%02u] %9"PRId64"us c=%5zu s=%5zu n=%5zu %s\n",
+ sv >> 1, delta, c, s, n, desc);
+}
+
+#ifdef BASIC_TESTS
+static void dump(const char *lbl)
+{
+ struct item *item, *safe;
+ size_t ctr = 0;
+
+ printf("dumping %s:\n", lbl);
+ for_each_safe(alist, &ahead, item) {
+ printf("%s %3zu %p %3"PRIu64" %3"PRIu64"\n", lbl, ctr++,
+ (void *)item, item->val1, item->val2);
+ }
+}
+
+static void basic_tests(void)
+{
+ size_t i;
+
+ memset(&ahead, 0, sizeof(ahead));
+ memset(itm, 0, sizeof(itm));
+ for (i = 0; i < NITEM; i++)
+ itm[i].val1 = itm[i].val2 = i;
+
+ assert(alist_first(&ahead) == NULL);
+ dump("");
+ alist_add_head(&ahead, &itm[0]);
+ dump("");
+ alist_add_head(&ahead, &itm[1]);
+ dump("");
+ alist_add_tail(&ahead, &itm[2]);
+ dump("");
+ alist_add_tail(&ahead, &itm[3]);
+ dump("");
+ alist_del(&ahead, &itm[1]);
+ dump("");
+ printf("POP: %p\n", alist_pop(&ahead));
+ dump("");
+ printf("POP: %p\n", alist_pop(&ahead));
+ printf("POP: %p\n", alist_pop(&ahead));
+ printf("POP: %p\n", alist_pop(&ahead));
+ printf("POP: %p\n", alist_pop(&ahead));
+ dump("");
+}
+#else
+#define basic_tests() do { } while (0)
+#endif
+
+int main(int argc, char **argv)
+{
+ size_t i;
+
+ basic_tests();
+
+ seqlock_init(&sqlo);
+ seqlock_acquire_val(&sqlo, 1);
+
+ for (i = 0; i < NTHREADS; i++) {
+ seqlock_init(&thr[i].sqlo);
+ seqlock_acquire(&thr[i].sqlo, &sqlo);
+ thr[i].counter = 0;
+ thr[i].nullops = 0;
+
+ pthread_create(&thr[i].pt, NULL, thr1func, &thr[i]);
+ }
+
+ struct testrun *tr;
+
+ for (tr = runs; tr; tr = tr->next)
+ run_tr(tr);
+
+ for (i = 0; i < NTHREADS; i++)
+ pthread_join(thr[i].pt, NULL);
+
+ return 0;
+}
--- /dev/null
+import frrtest
+
+class TestAtomlist(frrtest.TestMultiOut):
+ program = './test_atomlist'
+
+TestAtomlist.exit_cleanly()
check_PROGRAMS = \
tests/lib/cxxcompat \
+ tests/lib/test_atomlist \
tests/lib/test_buffer \
tests/lib/test_checksum \
tests/lib/test_heavy_thread \
tests_lib_northbound_test_oper_data_LDADD = $(ALL_TESTS_LDADD)
tests_lib_northbound_test_oper_data_SOURCES = tests/lib/northbound/test_oper_data.c
nodist_tests_lib_northbound_test_oper_data_SOURCES = yang/frr-test-module.yang.c
+tests_lib_test_atomlist_CFLAGS = $(TESTS_CFLAGS)
+tests_lib_test_atomlist_CPPFLAGS = $(TESTS_CPPFLAGS)
+tests_lib_test_atomlist_LDADD = $(ALL_TESTS_LDADD)
+tests_lib_test_atomlist_SOURCES = tests/lib/test_atomlist.c
tests_lib_test_buffer_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_buffer_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_buffer_LDADD = $(ALL_TESTS_LDADD)
tests/lib/northbound/test_oper_data.in \
tests/lib/northbound/test_oper_data.py \
tests/lib/northbound/test_oper_data.refout \
+ tests/lib/test_atomlist.py \
tests/lib/test_nexthop_iter.py \
tests/lib/test_ringbuf.py \
tests/lib/test_srcdest_table.py \