1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
6 #ifndef _FRR_TYPESAFE_H
7 #define _FRR_TYPESAFE_H
18 /* generic macros for all list-like types */
20 #define frr_each(prefix, head, item) \
21 for (item = prefix##_first(head); item; \
22 item = prefix##_next(head, item))
23 #define frr_each_safe(prefix, head, item) \
24 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
25 prefix##_next_safe(head, \
26 (item = prefix##_first(head))); \
28 item = prefix##_safe, \
29 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
30 #define frr_each_from(prefix, head, item, from) \
31 for (item = from, from = prefix##_next_safe(head, item); \
33 item = from, from = prefix##_next_safe(head, from))
35 /* reverse direction, only supported by a few containers */
37 #define frr_rev_each(prefix, head, item) \
38 for (item = prefix##_last(head); item; \
39 item = prefix##_prev(head, item))
40 #define frr_rev_each_safe(prefix, head, item) \
41 for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
42 prefix##_prev_safe(head, \
43 (item = prefix##_last(head))); \
45 item = prefix##_safe, \
46 prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
47 #define frr_rev_each_from(prefix, head, item, from) \
48 for (item = from, from = prefix##_prev_safe(head, item); \
50 item = from, from = prefix##_prev_safe(head, from))
52 /* non-const variants. these wrappers are the same for all the types, so
53 * bundle them together here.
55 #define TYPESAFE_FIRST_NEXT(prefix, type) \
56 macro_pure type *prefix ## _first(struct prefix##_head *h) \
58 return (type *)prefix ## _const_first(h); \
60 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
62 return (type *)prefix ## _const_next(h, item); \
65 #define TYPESAFE_LAST_PREV(prefix, type) \
66 macro_pure type *prefix ## _last(struct prefix##_head *h) \
68 return (type *)prefix ## _const_last(h); \
70 macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
72 return (type *)prefix ## _const_prev(h, item); \
75 #define TYPESAFE_FIND(prefix, type) \
76 macro_inline type *prefix ## _find(struct prefix##_head *h, \
79 return (type *)prefix ## _const_find(h, item); \
82 #define TYPESAFE_FIND_CMP(prefix, type) \
83 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
86 return (type *)prefix ## _const_find_lt(h, item); \
88 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
91 return (type *)prefix ## _const_find_gteq(h, item); \
95 /* *_member via find - when there is no better membership check than find() */
96 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
97 macro_inline bool prefix ## _member(struct prefix##_head *h, \
100 return item == prefix ## _const_find(h, item); \
104 /* *_member via find_gteq - same for non-unique containers */
105 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
106 macro_inline bool prefix ## _member(struct prefix##_head *h, \
110 for (iter = prefix ## _const_find_gteq(h, item); iter; \
111 iter = prefix ## _const_next(h, iter)) { \
114 if (cmpfn(iter, item) > 0) \
121 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
122 * head *AND* the head doesn't point to itself (= everything except LIST,
123 * DLIST and SKIPLIST), just switch out the entire head
125 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
126 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
127 struct prefix##_head *b) \
129 struct prefix##_head tmp = *a; \
135 /* single-linked list, unsorted/arbitrary.
136 * can be used as queue with add_tail / pop
139 /* don't use these structs directly */
141 struct slist_item
*next
;
145 struct slist_item
*first
, **last_next
;
149 /* this replaces NULL as the value for ->next on the last item. */
150 extern struct slist_item typesafe_slist_sentinel
;
151 #define _SLIST_LAST &typesafe_slist_sentinel
153 static inline void typesafe_list_add(struct slist_head
*head
,
154 struct slist_item
**pos
, struct slist_item
*item
)
158 if (pos
== head
->last_next
)
159 head
->last_next
= &item
->next
;
163 extern bool typesafe_list_member(const struct slist_head
*head
,
164 const struct slist_item
*item
);
168 * PREDECL_LIST(namelist);
170 * struct namelist_item nlitem;
172 * DECLARE_LIST(namelist, struct name, nlitem);
174 #define PREDECL_LIST(prefix) \
175 struct prefix ## _head { struct slist_head sh; }; \
176 struct prefix ## _item { struct slist_item si; }; \
177 MACRO_REQUIRE_SEMICOLON() /* end */
179 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
181 #define DECLARE_LIST(prefix, type, field) \
183 macro_inline void prefix ## _init(struct prefix##_head *h) \
185 memset(h, 0, sizeof(*h)); \
186 h->sh.first = _SLIST_LAST; \
187 h->sh.last_next = &h->sh.first; \
189 macro_inline void prefix ## _fini(struct prefix##_head *h) \
191 memset(h, 0, sizeof(*h)); \
193 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
195 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
197 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
199 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
201 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
202 type *after, type *item) \
204 struct slist_item **nextp; \
205 nextp = after ? &after->field.si.next : &h->sh.first; \
206 typesafe_list_add(&h->sh, nextp, &item->field.si); \
208 /* TODO: del_hint */ \
209 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
211 struct slist_item **iter = &h->sh.first; \
212 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
213 iter = &(*iter)->next; \
214 if (*iter == _SLIST_LAST) \
217 *iter = item->field.si.next; \
218 if (item->field.si.next == _SLIST_LAST) \
219 h->sh.last_next = iter; \
220 item->field.si.next = NULL; \
223 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
225 struct slist_item *sitem = h->sh.first; \
226 if (sitem == _SLIST_LAST) \
229 h->sh.first = sitem->next; \
230 if (h->sh.first == _SLIST_LAST) \
231 h->sh.last_next = &h->sh.first; \
232 sitem->next = NULL; \
233 return container_of(sitem, type, field.si); \
235 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
236 struct prefix##_head *b) \
238 struct prefix##_head tmp = *a; \
241 if (a->sh.last_next == &b->sh.first) \
242 a->sh.last_next = &a->sh.first; \
243 if (b->sh.last_next == &a->sh.first) \
244 b->sh.last_next = &b->sh.first; \
246 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
248 if (h->sh.first != _SLIST_LAST) \
249 return container_of(h->sh.first, type, field.si); \
252 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
255 const struct slist_item *sitem = &item->field.si; \
256 if (sitem->next != _SLIST_LAST) \
257 return container_of(sitem->next, type, field.si); \
260 TYPESAFE_FIRST_NEXT(prefix, type) \
261 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
263 struct slist_item *sitem; \
266 sitem = &item->field.si; \
267 if (sitem->next != _SLIST_LAST) \
268 return container_of(sitem->next, type, field.si); \
271 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
273 return h->sh.count; \
275 macro_pure bool prefix ## _anywhere(const type *item) \
277 return item->field.si.next != NULL; \
279 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
282 return typesafe_list_member(&h->sh, &item->field.si); \
284 MACRO_REQUIRE_SEMICOLON() /* end */
286 /* don't use these structs directly */
288 struct dlist_item
*next
;
289 struct dlist_item
*prev
;
293 struct dlist_item hitem
;
297 static inline void typesafe_dlist_add(struct dlist_head
*head
,
298 struct dlist_item
*prev
, struct dlist_item
*item
)
300 /* SA on clang-11 thinks this can happen, but in reality -assuming no
301 * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the
302 * termination at the end of the list is not NULL but rather a pointer
303 * back to the head. (This eliminates special-casing the first or last
306 * Sadly, can't use assert() here since the libfrr assert / xref code
307 * uses typesafe lists itself... that said, if an assert tripped here
308 * we'd already be way past some memory corruption, so we might as
309 * well just take the SEGV. (In the presence of corruption, we'd see
310 * random SEGVs from places that make no sense at all anyway, an
311 * assert might actually be a red herring.)
313 * ("assume()" tells the compiler to produce code as if the condition
314 * will always hold; it doesn't have any actual effect here, it'll
315 * just SEGV out on "item->next->prev = item".)
317 assume(prev
->next
!= NULL
);
319 item
->next
= prev
->next
;
320 item
->next
->prev
= item
;
326 static inline void typesafe_dlist_swap_all(struct dlist_head
*a
,
327 struct dlist_head
*b
)
329 struct dlist_head tmp
= *a
;
333 a
->hitem
.next
= b
->hitem
.next
;
334 a
->hitem
.prev
= b
->hitem
.prev
;
335 a
->hitem
.next
->prev
= &a
->hitem
;
336 a
->hitem
.prev
->next
= &a
->hitem
;
338 a
->hitem
.next
= &a
->hitem
;
339 a
->hitem
.prev
= &a
->hitem
;
342 b
->count
= tmp
.count
;
344 b
->hitem
.next
= tmp
.hitem
.next
;
345 b
->hitem
.prev
= tmp
.hitem
.prev
;
346 b
->hitem
.next
->prev
= &b
->hitem
;
347 b
->hitem
.prev
->next
= &b
->hitem
;
349 b
->hitem
.next
= &b
->hitem
;
350 b
->hitem
.prev
= &b
->hitem
;
354 extern bool typesafe_dlist_member(const struct dlist_head
*head
,
355 const struct dlist_item
*item
);
357 /* double-linked list, for fast item deletion
359 #define PREDECL_DLIST(prefix) \
360 struct prefix ## _head { struct dlist_head dh; }; \
361 struct prefix ## _item { struct dlist_item di; }; \
362 MACRO_REQUIRE_SEMICOLON() /* end */
364 #define INIT_DLIST(var) { .dh = { \
365 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
367 #define DECLARE_DLIST(prefix, type, field) \
369 macro_inline void prefix ## _init(struct prefix##_head *h) \
371 memset(h, 0, sizeof(*h)); \
372 h->dh.hitem.prev = &h->dh.hitem; \
373 h->dh.hitem.next = &h->dh.hitem; \
375 macro_inline void prefix ## _fini(struct prefix##_head *h) \
377 memset(h, 0, sizeof(*h)); \
379 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
381 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
383 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
385 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
387 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
388 type *after, type *item) \
390 struct dlist_item *prev; \
391 prev = after ? &after->field.di : &h->dh.hitem; \
392 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
394 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
396 struct dlist_item *ditem = &item->field.di; \
397 ditem->prev->next = ditem->next; \
398 ditem->next->prev = ditem->prev; \
400 ditem->prev = ditem->next = NULL; \
403 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
405 struct dlist_item *ditem = h->dh.hitem.next; \
406 if (ditem == &h->dh.hitem) \
408 ditem->prev->next = ditem->next; \
409 ditem->next->prev = ditem->prev; \
411 ditem->prev = ditem->next = NULL; \
412 return container_of(ditem, type, field.di); \
414 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
415 struct prefix##_head *b) \
417 typesafe_dlist_swap_all(&a->dh, &b->dh); \
419 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
421 const struct dlist_item *ditem = h->dh.hitem.next; \
422 if (ditem == &h->dh.hitem) \
424 return container_of(ditem, type, field.di); \
426 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
429 const struct dlist_item *ditem = &item->field.di; \
430 if (ditem->next == &h->dh.hitem) \
432 return container_of(ditem->next, type, field.di); \
434 TYPESAFE_FIRST_NEXT(prefix, type) \
435 macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
437 const struct dlist_item *ditem = h->dh.hitem.prev; \
438 if (ditem == &h->dh.hitem) \
440 return container_of(ditem, type, field.di); \
442 macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
445 const struct dlist_item *ditem = &item->field.di; \
446 if (ditem->prev == &h->dh.hitem) \
448 return container_of(ditem->prev, type, field.di); \
450 TYPESAFE_LAST_PREV(prefix, type) \
451 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
455 return prefix ## _next(h, item); \
457 macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
461 return prefix ## _prev(h, item); \
463 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
465 return h->dh.count; \
467 macro_pure bool prefix ## _anywhere(const type *item) \
469 const struct dlist_item *ditem = &item->field.di; \
470 return ditem->next && ditem->prev; \
472 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
475 return typesafe_dlist_member(&h->dh, &item->field.di); \
477 MACRO_REQUIRE_SEMICOLON() /* end */
479 /* note: heap currently caps out at 4G items */
482 typedef uint32_t heap_index_i
;
489 struct heap_item
**array
;
490 uint32_t arraysz
, count
;
493 #define HEAP_RESIZE_TRESH_UP(h) \
494 (h->hh.count + 1 >= h->hh.arraysz)
495 #define HEAP_RESIZE_TRESH_DN(h) \
496 (h->hh.count == 0 || \
497 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
499 #define PREDECL_HEAP(prefix) \
500 struct prefix ## _head { struct heap_head hh; }; \
501 struct prefix ## _item { struct heap_item hi; }; \
502 MACRO_REQUIRE_SEMICOLON() /* end */
504 #define INIT_HEAP(var) { }
506 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
508 macro_inline void prefix ## _init(struct prefix##_head *h) \
510 memset(h, 0, sizeof(*h)); \
512 macro_inline void prefix ## _fini(struct prefix##_head *h) \
514 assert(h->hh.count == 0); \
515 memset(h, 0, sizeof(*h)); \
517 macro_inline int prefix ## __cmp(const struct heap_item *a, \
518 const struct heap_item *b) \
520 return cmpfn(container_of(a, type, field.hi), \
521 container_of(b, type, field.hi)); \
523 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
525 if (HEAP_RESIZE_TRESH_UP(h)) \
526 typesafe_heap_resize(&h->hh, true); \
527 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
532 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
534 struct heap_item *other; \
535 uint32_t index = item->field.hi.index; \
536 assert(h->hh.array[index] == &item->field.hi); \
538 other = h->hh.array[h->hh.count]; \
539 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
540 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
542 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
543 if (HEAP_RESIZE_TRESH_DN(h)) \
544 typesafe_heap_resize(&h->hh, false); \
547 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
549 struct heap_item *hitem, *other; \
550 if (h->hh.count == 0) \
552 hitem = h->hh.array[0]; \
554 other = h->hh.array[h->hh.count]; \
555 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
556 if (HEAP_RESIZE_TRESH_DN(h)) \
557 typesafe_heap_resize(&h->hh, false); \
558 return container_of(hitem, type, field.hi); \
560 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
561 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
563 if (h->hh.count == 0) \
565 return container_of(h->hh.array[0], type, field.hi); \
567 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
570 uint32_t idx = item->field.hi.index + 1; \
571 if (idx >= h->hh.count) \
573 return container_of(h->hh.array[idx], type, field.hi); \
575 TYPESAFE_FIRST_NEXT(prefix, type) \
576 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
580 return prefix ## _next(h, item); \
582 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
584 return h->hh.count; \
586 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
589 uint32_t idx = item->field.hi.index; \
590 if (idx >= h->hh.count) \
592 return h->hh.array[idx] == &item->field.hi; \
594 MACRO_REQUIRE_SEMICOLON() /* end */
596 extern void typesafe_heap_resize(struct heap_head
*head
, bool grow
);
597 extern void typesafe_heap_pushdown(struct heap_head
*head
, uint32_t index
,
598 struct heap_item
*item
,
599 int (*cmpfn
)(const struct heap_item
*a
,
600 const struct heap_item
*b
));
601 extern void typesafe_heap_pullup(struct heap_head
*head
, uint32_t index
,
602 struct heap_item
*item
,
603 int (*cmpfn
)(const struct heap_item
*a
,
604 const struct heap_item
*b
));
606 /* single-linked list, sorted.
607 * can be used as priority queue with add / pop
610 /* don't use these structs directly */
612 struct ssort_item
*next
;
616 struct ssort_item
*first
;
622 * PREDECL_SORTLIST(namelist)
624 * struct namelist_item nlitem;
626 * DECLARE_SORTLIST(namelist, struct name, nlitem)
628 #define _PREDECL_SORTLIST(prefix) \
629 struct prefix ## _head { struct ssort_head sh; }; \
630 struct prefix ## _item { struct ssort_item si; }; \
631 MACRO_REQUIRE_SEMICOLON() /* end */
633 #define INIT_SORTLIST_UNIQ(var) { }
634 #define INIT_SORTLIST_NONUNIQ(var) { }
636 #define PREDECL_SORTLIST_UNIQ(prefix) \
637 _PREDECL_SORTLIST(prefix)
638 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
639 _PREDECL_SORTLIST(prefix)
641 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
643 macro_inline void prefix ## _init(struct prefix##_head *h) \
645 memset(h, 0, sizeof(*h)); \
647 macro_inline void prefix ## _fini(struct prefix##_head *h) \
649 memset(h, 0, sizeof(*h)); \
651 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
653 struct ssort_item **np = &h->sh.first; \
655 while (*np && (c = cmpfn_uq( \
656 container_of(*np, type, field.si), item)) < 0) \
659 return container_of(*np, type, field.si); \
660 item->field.si.next = *np; \
661 *np = &item->field.si; \
665 macro_inline const type *prefix ## _const_find_gteq( \
666 const struct prefix##_head *h, const type *item) \
668 const struct ssort_item *sitem = h->sh.first; \
670 while (sitem && (cmpval = cmpfn_nuq( \
671 container_of(sitem, type, field.si), item)) < 0) \
672 sitem = sitem->next; \
673 return container_of_null(sitem, type, field.si); \
675 macro_inline const type *prefix ## _const_find_lt( \
676 const struct prefix##_head *h, const type *item) \
678 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
680 while (sitem && (cmpval = cmpfn_nuq( \
681 container_of(sitem, type, field.si), item)) < 0) \
682 sitem = (prev = sitem)->next; \
683 return container_of_null(prev, type, field.si); \
685 TYPESAFE_FIND_CMP(prefix, type) \
686 /* TODO: del_hint */ \
687 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
689 struct ssort_item **iter = &h->sh.first; \
690 while (*iter && *iter != &item->field.si) \
691 iter = &(*iter)->next; \
695 *iter = item->field.si.next; \
696 item->field.si.next = NULL; \
699 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
701 struct ssort_item *sitem = h->sh.first; \
705 h->sh.first = sitem->next; \
706 return container_of(sitem, type, field.si); \
708 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
709 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
711 return container_of_null(h->sh.first, type, field.si); \
713 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
716 const struct ssort_item *sitem = &item->field.si; \
717 return container_of_null(sitem->next, type, field.si); \
719 TYPESAFE_FIRST_NEXT(prefix, type) \
720 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
722 struct ssort_item *sitem; \
725 sitem = &item->field.si; \
726 return container_of_null(sitem->next, type, field.si); \
728 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
730 return h->sh.count; \
732 MACRO_REQUIRE_SEMICOLON() /* end */
734 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
735 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
737 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
740 const struct ssort_item *sitem = h->sh.first; \
742 while (sitem && (cmpval = cmpfn( \
743 container_of(sitem, type, field.si), item)) < 0) \
744 sitem = sitem->next; \
745 if (!sitem || cmpval > 0) \
747 return container_of(sitem, type, field.si); \
749 TYPESAFE_FIND(prefix, type) \
750 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
751 MACRO_REQUIRE_SEMICOLON() /* end */
753 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
754 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
756 int cmpval = cmpfn(a, b); \
765 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
766 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
767 MACRO_REQUIRE_SEMICOLON() /* end */
770 /* hash, "sorted" by hash value
773 /* don't use these structs directly */
775 struct thash_item
*next
;
780 struct thash_item
**entries
;
784 uint8_t minshift
, maxshift
;
787 #define _HASH_SIZE(tabshift) \
788 ((1U << (tabshift)) >> 1)
789 #define HASH_SIZE(head) \
790 _HASH_SIZE((head).tabshift)
791 #define _HASH_KEY(tabshift, val) \
792 ((val) >> (33 - (tabshift)))
793 #define HASH_KEY(head, val) \
794 _HASH_KEY((head).tabshift, val)
795 #define HASH_GROW_THRESHOLD(head) \
796 ((head).count >= HASH_SIZE(head))
797 #define HASH_SHRINK_THRESHOLD(head) \
798 ((head).count <= (HASH_SIZE(head) - 1) / 2)
800 extern void typesafe_hash_grow(struct thash_head
*head
);
801 extern void typesafe_hash_shrink(struct thash_head
*head
);
805 * PREDECL_HASH(namelist)
807 * struct namelist_item nlitem;
809 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
811 #define PREDECL_HASH(prefix) \
812 struct prefix ## _head { struct thash_head hh; }; \
813 struct prefix ## _item { struct thash_item hi; }; \
814 MACRO_REQUIRE_SEMICOLON() /* end */
816 #define INIT_HASH(var) { }
818 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
820 macro_inline void prefix ## _init(struct prefix##_head *h) \
822 memset(h, 0, sizeof(*h)); \
824 macro_inline void prefix ## _fini(struct prefix##_head *h) \
826 assert(h->hh.count == 0); \
827 h->hh.minshift = 0; \
828 typesafe_hash_shrink(&h->hh); \
829 memset(h, 0, sizeof(*h)); \
831 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
834 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
835 typesafe_hash_grow(&h->hh); \
837 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
838 item->field.hi.hashval = hval; \
839 struct thash_item **np = &h->hh.entries[hbits]; \
840 while (*np && (*np)->hashval < hval) \
842 while (*np && (*np)->hashval == hval) { \
843 if (cmpfn(container_of(*np, type, field.hi), item) == 0) { \
845 return container_of(*np, type, field.hi); \
849 item->field.hi.next = *np; \
850 *np = &item->field.hi; \
853 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
856 if (!h->hh.tabshift) \
858 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
859 const struct thash_item *hitem = h->hh.entries[hbits]; \
860 while (hitem && hitem->hashval < hval) \
861 hitem = hitem->next; \
862 while (hitem && hitem->hashval == hval) { \
863 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
864 return container_of(hitem, type, field.hi); \
865 hitem = hitem->next; \
869 TYPESAFE_FIND(prefix, type) \
870 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
872 if (!h->hh.tabshift) \
874 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
875 struct thash_item **np = &h->hh.entries[hbits]; \
876 while (*np && (*np)->hashval < hval) \
878 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
880 if (*np != &item->field.hi) \
882 *np = item->field.hi.next; \
883 item->field.hi.next = NULL; \
885 if (HASH_SHRINK_THRESHOLD(h->hh)) \
886 typesafe_hash_shrink(&h->hh); \
889 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
892 for (i = 0; i < HASH_SIZE(h->hh); i++) \
893 if (h->hh.entries[i]) { \
894 struct thash_item *hitem = h->hh.entries[i]; \
895 h->hh.entries[i] = hitem->next; \
897 hitem->next = NULL; \
898 if (HASH_SHRINK_THRESHOLD(h->hh)) \
899 typesafe_hash_shrink(&h->hh); \
900 return container_of(hitem, type, field.hi); \
904 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
905 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
908 for (i = 0; i < HASH_SIZE(h->hh); i++) \
909 if (h->hh.entries[i]) \
910 return container_of(h->hh.entries[i], type, field.hi); \
913 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
916 const struct thash_item *hitem = &item->field.hi; \
918 return container_of(hitem->next, type, field.hi); \
919 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
920 for (; i < HASH_SIZE(h->hh); i++) \
921 if (h->hh.entries[i]) \
922 return container_of(h->hh.entries[i], type, field.hi); \
925 TYPESAFE_FIRST_NEXT(prefix, type) \
926 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
930 return prefix ## _next(h, item); \
932 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
934 return h->hh.count; \
936 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
939 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
940 const struct thash_item *hitem = h->hh.entries[hbits]; \
941 while (hitem && hitem->hashval < hval) \
942 hitem = hitem->next; \
943 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
944 hitem = hitem->next) \
945 if (hitem == &item->field.hi) \
949 MACRO_REQUIRE_SEMICOLON() /* end */
952 * can be used as priority queue with add / pop
955 /* don't use these structs directly */
956 #define SKIPLIST_MAXDEPTH 16
957 #define SKIPLIST_EMBED 4
958 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
961 struct sskip_item
*next
[SKIPLIST_EMBED
];
964 struct sskip_overflow
{
965 struct sskip_item
*next
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
969 struct sskip_item hitem
;
970 struct sskip_item
*overflow
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
976 * PREDECL_SKIPLIST(namelist)
978 * struct namelist_item nlitem;
980 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
982 #define _PREDECL_SKIPLIST(prefix) \
983 struct prefix ## _head { struct sskip_head sh; }; \
984 struct prefix ## _item { struct sskip_item si; }; \
985 MACRO_REQUIRE_SEMICOLON() /* end */
987 #define INIT_SKIPLIST_UNIQ(var) { }
988 #define INIT_SKIPLIST_NONUNIQ(var) { }
990 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
992 macro_inline void prefix ## _init(struct prefix##_head *h) \
994 memset(h, 0, sizeof(*h)); \
995 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
996 ((uintptr_t)h->sh.overflow | 1); \
998 macro_inline void prefix ## _fini(struct prefix##_head *h) \
1000 memset(h, 0, sizeof(*h)); \
1002 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
1004 struct sskip_item *si; \
1005 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
1006 return container_of_null(si, type, field.si); \
1008 macro_inline const type *prefix ## _const_find_gteq( \
1009 const struct prefix##_head *h, const type *item) \
1011 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
1012 &item->field.si, cmpfn_nuq); \
1013 return container_of_null(sitem, type, field.si); \
1015 macro_inline const type *prefix ## _const_find_lt( \
1016 const struct prefix##_head *h, const type *item) \
1018 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
1019 &item->field.si, cmpfn_nuq); \
1020 return container_of_null(sitem, type, field.si); \
1022 TYPESAFE_FIND_CMP(prefix, type) \
1023 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
1025 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
1026 &item->field.si, cmpfn_uq); \
1027 return container_of_null(sitem, type, field.si); \
1029 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
1031 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
1032 return container_of_null(sitem, type, field.si); \
1034 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
1035 struct prefix##_head *b) \
1037 struct prefix##_head tmp = *a; \
1040 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1041 ((uintptr_t)a->sh.overflow | 1); \
1042 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1043 ((uintptr_t)b->sh.overflow | 1); \
1045 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
1047 const struct sskip_item *first = h->sh.hitem.next[0]; \
1048 return container_of_null(first, type, field.si); \
1050 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
1053 const struct sskip_item *next = item->field.si.next[0]; \
1054 return container_of_null(next, type, field.si); \
1056 TYPESAFE_FIRST_NEXT(prefix, type) \
1057 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1059 struct sskip_item *next; \
1060 next = item ? item->field.si.next[0] : NULL; \
1061 return container_of_null(next, type, field.si); \
1063 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1065 return h->sh.count; \
1067 MACRO_REQUIRE_SEMICOLON() /* end */
1069 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1070 _PREDECL_SKIPLIST(prefix)
1071 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1073 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1074 const struct sskip_item *b) \
1076 return cmpfn(container_of(a, type, field.si), \
1077 container_of(b, type, field.si)); \
1079 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1082 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1083 &item->field.si, &prefix ## __cmp); \
1084 return container_of_null(sitem, type, field.si); \
1086 TYPESAFE_FIND(prefix, type) \
1087 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1089 _DECLARE_SKIPLIST(prefix, type, field, \
1090 prefix ## __cmp, prefix ## __cmp); \
1091 MACRO_REQUIRE_SEMICOLON() /* end */
1093 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1094 _PREDECL_SKIPLIST(prefix)
1095 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1097 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1098 const struct sskip_item *b) \
1100 return cmpfn(container_of(a, type, field.si), \
1101 container_of(b, type, field.si)); \
1103 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1104 const struct sskip_item *b) \
1106 int cmpval = cmpfn(container_of(a, type, field.si), \
1107 container_of(b, type, field.si)); \
1117 _DECLARE_SKIPLIST(prefix, type, field, \
1118 prefix ## __cmp, prefix ## __cmp_uq); \
1119 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1120 MACRO_REQUIRE_SEMICOLON() /* end */
1123 extern struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
1124 struct sskip_item
*item
, int (*cmpfn
)(
1125 const struct sskip_item
*a
,
1126 const struct sskip_item
*b
));
1127 extern const struct sskip_item
*typesafe_skiplist_find(
1128 const struct sskip_head
*head
,
1129 const struct sskip_item
*item
, int (*cmpfn
)(
1130 const struct sskip_item
*a
,
1131 const struct sskip_item
*b
));
1132 extern const struct sskip_item
*typesafe_skiplist_find_gteq(
1133 const struct sskip_head
*head
,
1134 const struct sskip_item
*item
, int (*cmpfn
)(
1135 const struct sskip_item
*a
,
1136 const struct sskip_item
*b
));
1137 extern const struct sskip_item
*typesafe_skiplist_find_lt(
1138 const struct sskip_head
*head
,
1139 const struct sskip_item
*item
, int (*cmpfn
)(
1140 const struct sskip_item
*a
,
1141 const struct sskip_item
*b
));
1142 extern struct sskip_item
*typesafe_skiplist_del(
1143 struct sskip_head
*head
, struct sskip_item
*item
, int (*cmpfn
)(
1144 const struct sskip_item
*a
,
1145 const struct sskip_item
*b
));
1146 extern struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
);
1152 /* this needs to stay at the end because both files include each other.
1153 * the resolved order is typesafe.h before typerb.h
1157 #endif /* _FRR_TYPESAFE_H */