2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
29 /* generic macros for all list-like types */
31 #define frr_each(prefix, head, item) \
32 for (item = prefix##_first(head); item; \
33 item = prefix##_next(head, item))
34 #define frr_each_safe(prefix, head, item) \
35 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
36 prefix##_next_safe(head, \
37 (item = prefix##_first(head))); \
39 item = prefix##_safe, \
40 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
41 #define frr_each_from(prefix, head, item, from) \
42 for (item = from, from = prefix##_next_safe(head, item); \
44 item = from, from = prefix##_next_safe(head, from))
46 /* reverse direction, only supported by a few containers */
48 #define frr_rev_each(prefix, head, item) \
49 for (item = prefix##_last(head); item; \
50 item = prefix##_prev(head, item))
51 #define frr_rev_each_safe(prefix, head, item) \
52 for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
53 prefix##_prev_safe(head, \
54 (item = prefix##_last(head))); \
56 item = prefix##_safe, \
57 prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
58 #define frr_rev_each_from(prefix, head, item, from) \
59 for (item = from, from = prefix##_prev_safe(head, item); \
61 item = from, from = prefix##_prev_safe(head, from))
63 /* non-const variants. these wrappers are the same for all the types, so
64 * bundle them together here.
66 #define TYPESAFE_FIRST_NEXT(prefix, type) \
67 macro_pure type *prefix ## _first(struct prefix##_head *h) \
69 return (type *)prefix ## _const_first(h); \
71 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
73 return (type *)prefix ## _const_next(h, item); \
76 #define TYPESAFE_LAST_PREV(prefix, type) \
77 macro_pure type *prefix ## _last(struct prefix##_head *h) \
79 return (type *)prefix ## _const_last(h); \
81 macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
83 return (type *)prefix ## _const_prev(h, item); \
86 #define TYPESAFE_FIND(prefix, type) \
87 macro_inline type *prefix ## _find(struct prefix##_head *h, \
90 return (type *)prefix ## _const_find(h, item); \
93 #define TYPESAFE_FIND_CMP(prefix, type) \
94 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
97 return (type *)prefix ## _const_find_lt(h, item); \
99 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
102 return (type *)prefix ## _const_find_gteq(h, item); \
106 /* *_member via find - when there is no better membership check than find() */
107 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
108 macro_inline bool prefix ## _member(struct prefix##_head *h, \
111 return item == prefix ## _const_find(h, item); \
115 /* *_member via find_gteq - same for non-unique containers */
116 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
117 macro_inline bool prefix ## _member(struct prefix##_head *h, \
121 for (iter = prefix ## _const_find_gteq(h, item); iter; \
122 iter = prefix ## _const_next(h, iter)) { \
125 if (cmpfn(iter, item) > 0) \
132 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
133 * head *AND* the head doesn't point to itself (= everything except LIST,
134 * DLIST and SKIPLIST), just switch out the entire head
136 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
137 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
138 struct prefix##_head *b) \
140 struct prefix##_head tmp = *a; \
146 /* single-linked list, unsorted/arbitrary.
147 * can be used as queue with add_tail / pop
150 /* don't use these structs directly */
152 struct slist_item
*next
;
156 struct slist_item
*first
, **last_next
;
160 /* this replaces NULL as the value for ->next on the last item. */
161 extern struct slist_item typesafe_slist_sentinel
;
162 #define _SLIST_LAST &typesafe_slist_sentinel
164 static inline void typesafe_list_add(struct slist_head
*head
,
165 struct slist_item
**pos
, struct slist_item
*item
)
169 if (pos
== head
->last_next
)
170 head
->last_next
= &item
->next
;
174 extern bool typesafe_list_member(const struct slist_head
*head
,
175 const struct slist_item
*item
);
179 * PREDECL_LIST(namelist);
181 * struct namelist_item nlitem;
183 * DECLARE_LIST(namelist, struct name, nlitem);
185 #define PREDECL_LIST(prefix) \
186 struct prefix ## _head { struct slist_head sh; }; \
187 struct prefix ## _item { struct slist_item si; }; \
188 MACRO_REQUIRE_SEMICOLON() /* end */
190 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
192 #define DECLARE_LIST(prefix, type, field) \
194 macro_inline void prefix ## _init(struct prefix##_head *h) \
196 memset(h, 0, sizeof(*h)); \
197 h->sh.first = _SLIST_LAST; \
198 h->sh.last_next = &h->sh.first; \
200 macro_inline void prefix ## _fini(struct prefix##_head *h) \
202 memset(h, 0, sizeof(*h)); \
204 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
206 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
208 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
210 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
212 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
213 type *after, type *item) \
215 struct slist_item **nextp; \
216 nextp = after ? &after->field.si.next : &h->sh.first; \
217 typesafe_list_add(&h->sh, nextp, &item->field.si); \
219 /* TODO: del_hint */ \
220 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
222 struct slist_item **iter = &h->sh.first; \
223 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
224 iter = &(*iter)->next; \
225 if (*iter == _SLIST_LAST) \
228 *iter = item->field.si.next; \
229 if (item->field.si.next == _SLIST_LAST) \
230 h->sh.last_next = iter; \
231 item->field.si.next = NULL; \
234 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
236 struct slist_item *sitem = h->sh.first; \
237 if (sitem == _SLIST_LAST) \
240 h->sh.first = sitem->next; \
241 if (h->sh.first == _SLIST_LAST) \
242 h->sh.last_next = &h->sh.first; \
243 sitem->next = NULL; \
244 return container_of(sitem, type, field.si); \
246 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
247 struct prefix##_head *b) \
249 struct prefix##_head tmp = *a; \
252 if (a->sh.last_next == &b->sh.first) \
253 a->sh.last_next = &a->sh.first; \
254 if (b->sh.last_next == &a->sh.first) \
255 b->sh.last_next = &b->sh.first; \
257 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
259 if (h->sh.first != _SLIST_LAST) \
260 return container_of(h->sh.first, type, field.si); \
263 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
266 const struct slist_item *sitem = &item->field.si; \
267 if (sitem->next != _SLIST_LAST) \
268 return container_of(sitem->next, type, field.si); \
271 TYPESAFE_FIRST_NEXT(prefix, type) \
272 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
274 struct slist_item *sitem; \
277 sitem = &item->field.si; \
278 if (sitem->next != _SLIST_LAST) \
279 return container_of(sitem->next, type, field.si); \
282 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
284 return h->sh.count; \
286 macro_pure bool prefix ## _anywhere(const type *item) \
288 return item->field.si.next != NULL; \
290 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
293 return typesafe_list_member(&h->sh, &item->field.si); \
295 MACRO_REQUIRE_SEMICOLON() /* end */
297 /* don't use these structs directly */
299 struct dlist_item
*next
;
300 struct dlist_item
*prev
;
304 struct dlist_item hitem
;
308 static inline void typesafe_dlist_add(struct dlist_head
*head
,
309 struct dlist_item
*prev
, struct dlist_item
*item
)
311 /* SA on clang-11 thinks this can happen, but in reality -assuming no
312 * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the
313 * termination at the end of the list is not NULL but rather a pointer
314 * back to the head. (This eliminates special-casing the first or last
317 * Sadly, can't use assert() here since the libfrr assert / xref code
318 * uses typesafe lists itself... that said, if an assert tripped here
319 * we'd already be way past some memory corruption, so we might as
320 * well just take the SEGV. (In the presence of corruption, we'd see
321 * random SEGVs from places that make no sense at all anyway, an
322 * assert might actually be a red herring.)
324 * ("assume()" tells the compiler to produce code as if the condition
325 * will always hold; it doesn't have any actual effect here, it'll
326 * just SEGV out on "item->next->prev = item".)
328 assume(prev
->next
!= NULL
);
330 item
->next
= prev
->next
;
331 item
->next
->prev
= item
;
337 static inline void typesafe_dlist_swap_all(struct dlist_head
*a
,
338 struct dlist_head
*b
)
340 struct dlist_head tmp
= *a
;
344 a
->hitem
.next
= b
->hitem
.next
;
345 a
->hitem
.prev
= b
->hitem
.prev
;
346 a
->hitem
.next
->prev
= &a
->hitem
;
347 a
->hitem
.prev
->next
= &a
->hitem
;
349 a
->hitem
.next
= &a
->hitem
;
350 a
->hitem
.prev
= &a
->hitem
;
353 b
->count
= tmp
.count
;
355 b
->hitem
.next
= tmp
.hitem
.next
;
356 b
->hitem
.prev
= tmp
.hitem
.prev
;
357 b
->hitem
.next
->prev
= &b
->hitem
;
358 b
->hitem
.prev
->next
= &b
->hitem
;
360 b
->hitem
.next
= &b
->hitem
;
361 b
->hitem
.prev
= &b
->hitem
;
365 extern bool typesafe_dlist_member(const struct dlist_head
*head
,
366 const struct dlist_item
*item
);
368 /* double-linked list, for fast item deletion
370 #define PREDECL_DLIST(prefix) \
371 struct prefix ## _head { struct dlist_head dh; }; \
372 struct prefix ## _item { struct dlist_item di; }; \
373 MACRO_REQUIRE_SEMICOLON() /* end */
375 #define INIT_DLIST(var) { .dh = { \
376 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
378 #define DECLARE_DLIST(prefix, type, field) \
380 macro_inline void prefix ## _init(struct prefix##_head *h) \
382 memset(h, 0, sizeof(*h)); \
383 h->dh.hitem.prev = &h->dh.hitem; \
384 h->dh.hitem.next = &h->dh.hitem; \
386 macro_inline void prefix ## _fini(struct prefix##_head *h) \
388 memset(h, 0, sizeof(*h)); \
390 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
392 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
394 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
396 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
398 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
399 type *after, type *item) \
401 struct dlist_item *prev; \
402 prev = after ? &after->field.di : &h->dh.hitem; \
403 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
405 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
407 struct dlist_item *ditem = &item->field.di; \
408 ditem->prev->next = ditem->next; \
409 ditem->next->prev = ditem->prev; \
411 ditem->prev = ditem->next = NULL; \
414 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
416 struct dlist_item *ditem = h->dh.hitem.next; \
417 if (ditem == &h->dh.hitem) \
419 ditem->prev->next = ditem->next; \
420 ditem->next->prev = ditem->prev; \
422 ditem->prev = ditem->next = NULL; \
423 return container_of(ditem, type, field.di); \
425 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
426 struct prefix##_head *b) \
428 typesafe_dlist_swap_all(&a->dh, &b->dh); \
430 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
432 const struct dlist_item *ditem = h->dh.hitem.next; \
433 if (ditem == &h->dh.hitem) \
435 return container_of(ditem, type, field.di); \
437 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
440 const struct dlist_item *ditem = &item->field.di; \
441 if (ditem->next == &h->dh.hitem) \
443 return container_of(ditem->next, type, field.di); \
445 TYPESAFE_FIRST_NEXT(prefix, type) \
446 macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
448 const struct dlist_item *ditem = h->dh.hitem.prev; \
449 if (ditem == &h->dh.hitem) \
451 return container_of(ditem, type, field.di); \
453 macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
456 const struct dlist_item *ditem = &item->field.di; \
457 if (ditem->prev == &h->dh.hitem) \
459 return container_of(ditem->prev, type, field.di); \
461 TYPESAFE_LAST_PREV(prefix, type) \
462 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
466 return prefix ## _next(h, item); \
468 macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
472 return prefix ## _prev(h, item); \
474 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
476 return h->dh.count; \
478 macro_pure bool prefix ## _anywhere(const type *item) \
480 const struct dlist_item *ditem = &item->field.di; \
481 return ditem->next && ditem->prev; \
483 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
486 return typesafe_dlist_member(&h->dh, &item->field.di); \
488 MACRO_REQUIRE_SEMICOLON() /* end */
490 /* note: heap currently caps out at 4G items */
493 typedef uint32_t heap_index_i
;
500 struct heap_item
**array
;
501 uint32_t arraysz
, count
;
504 #define HEAP_RESIZE_TRESH_UP(h) \
505 (h->hh.count + 1 >= h->hh.arraysz)
506 #define HEAP_RESIZE_TRESH_DN(h) \
507 (h->hh.count == 0 || \
508 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
510 #define PREDECL_HEAP(prefix) \
511 struct prefix ## _head { struct heap_head hh; }; \
512 struct prefix ## _item { struct heap_item hi; }; \
513 MACRO_REQUIRE_SEMICOLON() /* end */
515 #define INIT_HEAP(var) { }
517 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
519 macro_inline void prefix ## _init(struct prefix##_head *h) \
521 memset(h, 0, sizeof(*h)); \
523 macro_inline void prefix ## _fini(struct prefix##_head *h) \
525 assert(h->hh.count == 0); \
526 memset(h, 0, sizeof(*h)); \
528 macro_inline int prefix ## __cmp(const struct heap_item *a, \
529 const struct heap_item *b) \
531 return cmpfn(container_of(a, type, field.hi), \
532 container_of(b, type, field.hi)); \
534 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
536 if (HEAP_RESIZE_TRESH_UP(h)) \
537 typesafe_heap_resize(&h->hh, true); \
538 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
543 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
545 struct heap_item *other; \
546 uint32_t index = item->field.hi.index; \
547 assert(h->hh.array[index] == &item->field.hi); \
549 other = h->hh.array[h->hh.count]; \
550 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
551 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
553 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
554 if (HEAP_RESIZE_TRESH_DN(h)) \
555 typesafe_heap_resize(&h->hh, false); \
558 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
560 struct heap_item *hitem, *other; \
561 if (h->hh.count == 0) \
563 hitem = h->hh.array[0]; \
565 other = h->hh.array[h->hh.count]; \
566 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
567 if (HEAP_RESIZE_TRESH_DN(h)) \
568 typesafe_heap_resize(&h->hh, false); \
569 return container_of(hitem, type, field.hi); \
571 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
572 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
574 if (h->hh.count == 0) \
576 return container_of(h->hh.array[0], type, field.hi); \
578 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
581 uint32_t idx = item->field.hi.index + 1; \
582 if (idx >= h->hh.count) \
584 return container_of(h->hh.array[idx], type, field.hi); \
586 TYPESAFE_FIRST_NEXT(prefix, type) \
587 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
591 return prefix ## _next(h, item); \
593 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
595 return h->hh.count; \
597 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
600 uint32_t idx = item->field.hi.index; \
601 if (idx >= h->hh.count) \
603 return h->hh.array[idx] == &item->field.hi; \
605 MACRO_REQUIRE_SEMICOLON() /* end */
607 extern void typesafe_heap_resize(struct heap_head
*head
, bool grow
);
608 extern void typesafe_heap_pushdown(struct heap_head
*head
, uint32_t index
,
609 struct heap_item
*item
,
610 int (*cmpfn
)(const struct heap_item
*a
,
611 const struct heap_item
*b
));
612 extern void typesafe_heap_pullup(struct heap_head
*head
, uint32_t index
,
613 struct heap_item
*item
,
614 int (*cmpfn
)(const struct heap_item
*a
,
615 const struct heap_item
*b
));
617 /* single-linked list, sorted.
618 * can be used as priority queue with add / pop
621 /* don't use these structs directly */
623 struct ssort_item
*next
;
627 struct ssort_item
*first
;
633 * PREDECL_SORTLIST(namelist)
635 * struct namelist_item nlitem;
637 * DECLARE_SORTLIST(namelist, struct name, nlitem)
639 #define _PREDECL_SORTLIST(prefix) \
640 struct prefix ## _head { struct ssort_head sh; }; \
641 struct prefix ## _item { struct ssort_item si; }; \
642 MACRO_REQUIRE_SEMICOLON() /* end */
644 #define INIT_SORTLIST_UNIQ(var) { }
645 #define INIT_SORTLIST_NONUNIQ(var) { }
647 #define PREDECL_SORTLIST_UNIQ(prefix) \
648 _PREDECL_SORTLIST(prefix)
649 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
650 _PREDECL_SORTLIST(prefix)
652 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
654 macro_inline void prefix ## _init(struct prefix##_head *h) \
656 memset(h, 0, sizeof(*h)); \
658 macro_inline void prefix ## _fini(struct prefix##_head *h) \
660 memset(h, 0, sizeof(*h)); \
662 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
664 struct ssort_item **np = &h->sh.first; \
666 while (*np && (c = cmpfn_uq( \
667 container_of(*np, type, field.si), item)) < 0) \
670 return container_of(*np, type, field.si); \
671 item->field.si.next = *np; \
672 *np = &item->field.si; \
676 macro_inline const type *prefix ## _const_find_gteq( \
677 const struct prefix##_head *h, const type *item) \
679 const struct ssort_item *sitem = h->sh.first; \
681 while (sitem && (cmpval = cmpfn_nuq( \
682 container_of(sitem, type, field.si), item)) < 0) \
683 sitem = sitem->next; \
684 return container_of_null(sitem, type, field.si); \
686 macro_inline const type *prefix ## _const_find_lt( \
687 const struct prefix##_head *h, const type *item) \
689 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
691 while (sitem && (cmpval = cmpfn_nuq( \
692 container_of(sitem, type, field.si), item)) < 0) \
693 sitem = (prev = sitem)->next; \
694 return container_of_null(prev, type, field.si); \
696 TYPESAFE_FIND_CMP(prefix, type) \
697 /* TODO: del_hint */ \
698 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
700 struct ssort_item **iter = &h->sh.first; \
701 while (*iter && *iter != &item->field.si) \
702 iter = &(*iter)->next; \
706 *iter = item->field.si.next; \
707 item->field.si.next = NULL; \
710 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
712 struct ssort_item *sitem = h->sh.first; \
716 h->sh.first = sitem->next; \
717 return container_of(sitem, type, field.si); \
719 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
720 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
722 return container_of_null(h->sh.first, type, field.si); \
724 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
727 const struct ssort_item *sitem = &item->field.si; \
728 return container_of_null(sitem->next, type, field.si); \
730 TYPESAFE_FIRST_NEXT(prefix, type) \
731 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
733 struct ssort_item *sitem; \
736 sitem = &item->field.si; \
737 return container_of_null(sitem->next, type, field.si); \
739 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
741 return h->sh.count; \
743 MACRO_REQUIRE_SEMICOLON() /* end */
745 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
746 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
748 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
751 const struct ssort_item *sitem = h->sh.first; \
753 while (sitem && (cmpval = cmpfn( \
754 container_of(sitem, type, field.si), item)) < 0) \
755 sitem = sitem->next; \
756 if (!sitem || cmpval > 0) \
758 return container_of(sitem, type, field.si); \
760 TYPESAFE_FIND(prefix, type) \
761 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
762 MACRO_REQUIRE_SEMICOLON() /* end */
764 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
765 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
767 int cmpval = cmpfn(a, b); \
776 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
777 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
778 MACRO_REQUIRE_SEMICOLON() /* end */
781 /* hash, "sorted" by hash value
784 /* don't use these structs directly */
786 struct thash_item
*next
;
791 struct thash_item
**entries
;
795 uint8_t minshift
, maxshift
;
798 #define _HASH_SIZE(tabshift) \
799 ((1U << (tabshift)) >> 1)
800 #define HASH_SIZE(head) \
801 _HASH_SIZE((head).tabshift)
802 #define _HASH_KEY(tabshift, val) \
803 ((val) >> (33 - (tabshift)))
804 #define HASH_KEY(head, val) \
805 _HASH_KEY((head).tabshift, val)
806 #define HASH_GROW_THRESHOLD(head) \
807 ((head).count >= HASH_SIZE(head))
808 #define HASH_SHRINK_THRESHOLD(head) \
809 ((head).count <= (HASH_SIZE(head) - 1) / 2)
811 extern void typesafe_hash_grow(struct thash_head
*head
);
812 extern void typesafe_hash_shrink(struct thash_head
*head
);
816 * PREDECL_HASH(namelist)
818 * struct namelist_item nlitem;
820 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
822 #define PREDECL_HASH(prefix) \
823 struct prefix ## _head { struct thash_head hh; }; \
824 struct prefix ## _item { struct thash_item hi; }; \
825 MACRO_REQUIRE_SEMICOLON() /* end */
827 #define INIT_HASH(var) { }
829 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
831 macro_inline void prefix ## _init(struct prefix##_head *h) \
833 memset(h, 0, sizeof(*h)); \
835 macro_inline void prefix ## _fini(struct prefix##_head *h) \
837 assert(h->hh.count == 0); \
838 h->hh.minshift = 0; \
839 typesafe_hash_shrink(&h->hh); \
840 memset(h, 0, sizeof(*h)); \
842 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
845 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
846 typesafe_hash_grow(&h->hh); \
848 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
849 item->field.hi.hashval = hval; \
850 struct thash_item **np = &h->hh.entries[hbits]; \
851 while (*np && (*np)->hashval < hval) \
853 while (*np && (*np)->hashval == hval) { \
854 if (cmpfn(container_of(*np, type, field.hi), item) == 0) { \
856 return container_of(*np, type, field.hi); \
860 item->field.hi.next = *np; \
861 *np = &item->field.hi; \
864 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
867 if (!h->hh.tabshift) \
869 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
870 const struct thash_item *hitem = h->hh.entries[hbits]; \
871 while (hitem && hitem->hashval < hval) \
872 hitem = hitem->next; \
873 while (hitem && hitem->hashval == hval) { \
874 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
875 return container_of(hitem, type, field.hi); \
876 hitem = hitem->next; \
880 TYPESAFE_FIND(prefix, type) \
881 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
883 if (!h->hh.tabshift) \
885 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
886 struct thash_item **np = &h->hh.entries[hbits]; \
887 while (*np && (*np)->hashval < hval) \
889 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
891 if (*np != &item->field.hi) \
893 *np = item->field.hi.next; \
894 item->field.hi.next = NULL; \
896 if (HASH_SHRINK_THRESHOLD(h->hh)) \
897 typesafe_hash_shrink(&h->hh); \
900 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
903 for (i = 0; i < HASH_SIZE(h->hh); i++) \
904 if (h->hh.entries[i]) { \
905 struct thash_item *hitem = h->hh.entries[i]; \
906 h->hh.entries[i] = hitem->next; \
908 hitem->next = NULL; \
909 if (HASH_SHRINK_THRESHOLD(h->hh)) \
910 typesafe_hash_shrink(&h->hh); \
911 return container_of(hitem, type, field.hi); \
915 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
916 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
919 for (i = 0; i < HASH_SIZE(h->hh); i++) \
920 if (h->hh.entries[i]) \
921 return container_of(h->hh.entries[i], type, field.hi); \
924 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
927 const struct thash_item *hitem = &item->field.hi; \
929 return container_of(hitem->next, type, field.hi); \
930 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
931 for (; i < HASH_SIZE(h->hh); i++) \
932 if (h->hh.entries[i]) \
933 return container_of(h->hh.entries[i], type, field.hi); \
936 TYPESAFE_FIRST_NEXT(prefix, type) \
937 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
941 return prefix ## _next(h, item); \
943 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
945 return h->hh.count; \
947 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
950 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
951 const struct thash_item *hitem = h->hh.entries[hbits]; \
952 while (hitem && hitem->hashval < hval) \
953 hitem = hitem->next; \
954 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
955 hitem = hitem->next) \
956 if (hitem == &item->field.hi) \
960 MACRO_REQUIRE_SEMICOLON() /* end */
963 * can be used as priority queue with add / pop
966 /* don't use these structs directly */
967 #define SKIPLIST_MAXDEPTH 16
968 #define SKIPLIST_EMBED 4
969 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
972 struct sskip_item
*next
[SKIPLIST_EMBED
];
975 struct sskip_overflow
{
976 struct sskip_item
*next
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
980 struct sskip_item hitem
;
981 struct sskip_item
*overflow
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
987 * PREDECL_SKIPLIST(namelist)
989 * struct namelist_item nlitem;
991 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
993 #define _PREDECL_SKIPLIST(prefix) \
994 struct prefix ## _head { struct sskip_head sh; }; \
995 struct prefix ## _item { struct sskip_item si; }; \
996 MACRO_REQUIRE_SEMICOLON() /* end */
998 #define INIT_SKIPLIST_UNIQ(var) { }
999 #define INIT_SKIPLIST_NONUNIQ(var) { }
1001 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
1003 macro_inline void prefix ## _init(struct prefix##_head *h) \
1005 memset(h, 0, sizeof(*h)); \
1006 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1007 ((uintptr_t)h->sh.overflow | 1); \
1009 macro_inline void prefix ## _fini(struct prefix##_head *h) \
1011 memset(h, 0, sizeof(*h)); \
1013 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
1015 struct sskip_item *si; \
1016 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
1017 return container_of_null(si, type, field.si); \
1019 macro_inline const type *prefix ## _const_find_gteq( \
1020 const struct prefix##_head *h, const type *item) \
1022 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
1023 &item->field.si, cmpfn_nuq); \
1024 return container_of_null(sitem, type, field.si); \
1026 macro_inline const type *prefix ## _const_find_lt( \
1027 const struct prefix##_head *h, const type *item) \
1029 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
1030 &item->field.si, cmpfn_nuq); \
1031 return container_of_null(sitem, type, field.si); \
1033 TYPESAFE_FIND_CMP(prefix, type) \
1034 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
1036 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
1037 &item->field.si, cmpfn_uq); \
1038 return container_of_null(sitem, type, field.si); \
1040 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
1042 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
1043 return container_of_null(sitem, type, field.si); \
1045 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
1046 struct prefix##_head *b) \
1048 struct prefix##_head tmp = *a; \
1051 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1052 ((uintptr_t)a->sh.overflow | 1); \
1053 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1054 ((uintptr_t)b->sh.overflow | 1); \
1056 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
1058 const struct sskip_item *first = h->sh.hitem.next[0]; \
1059 return container_of_null(first, type, field.si); \
1061 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
1064 const struct sskip_item *next = item->field.si.next[0]; \
1065 return container_of_null(next, type, field.si); \
1067 TYPESAFE_FIRST_NEXT(prefix, type) \
1068 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1070 struct sskip_item *next; \
1071 next = item ? item->field.si.next[0] : NULL; \
1072 return container_of_null(next, type, field.si); \
1074 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1076 return h->sh.count; \
1078 MACRO_REQUIRE_SEMICOLON() /* end */
1080 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1081 _PREDECL_SKIPLIST(prefix)
1082 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1084 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1085 const struct sskip_item *b) \
1087 return cmpfn(container_of(a, type, field.si), \
1088 container_of(b, type, field.si)); \
1090 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1093 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1094 &item->field.si, &prefix ## __cmp); \
1095 return container_of_null(sitem, type, field.si); \
1097 TYPESAFE_FIND(prefix, type) \
1098 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1100 _DECLARE_SKIPLIST(prefix, type, field, \
1101 prefix ## __cmp, prefix ## __cmp); \
1102 MACRO_REQUIRE_SEMICOLON() /* end */
1104 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1105 _PREDECL_SKIPLIST(prefix)
1106 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1108 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1109 const struct sskip_item *b) \
1111 return cmpfn(container_of(a, type, field.si), \
1112 container_of(b, type, field.si)); \
1114 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1115 const struct sskip_item *b) \
1117 int cmpval = cmpfn(container_of(a, type, field.si), \
1118 container_of(b, type, field.si)); \
1128 _DECLARE_SKIPLIST(prefix, type, field, \
1129 prefix ## __cmp, prefix ## __cmp_uq); \
1130 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1131 MACRO_REQUIRE_SEMICOLON() /* end */
1134 extern struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
1135 struct sskip_item
*item
, int (*cmpfn
)(
1136 const struct sskip_item
*a
,
1137 const struct sskip_item
*b
));
1138 extern const struct sskip_item
*typesafe_skiplist_find(
1139 const struct sskip_head
*head
,
1140 const struct sskip_item
*item
, int (*cmpfn
)(
1141 const struct sskip_item
*a
,
1142 const struct sskip_item
*b
));
1143 extern const struct sskip_item
*typesafe_skiplist_find_gteq(
1144 const struct sskip_head
*head
,
1145 const struct sskip_item
*item
, int (*cmpfn
)(
1146 const struct sskip_item
*a
,
1147 const struct sskip_item
*b
));
1148 extern const struct sskip_item
*typesafe_skiplist_find_lt(
1149 const struct sskip_head
*head
,
1150 const struct sskip_item
*item
, int (*cmpfn
)(
1151 const struct sskip_item
*a
,
1152 const struct sskip_item
*b
));
1153 extern struct sskip_item
*typesafe_skiplist_del(
1154 struct sskip_head
*head
, struct sskip_item
*item
, int (*cmpfn
)(
1155 const struct sskip_item
*a
,
1156 const struct sskip_item
*b
));
1157 extern struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
);
1163 /* this needs to stay at the end because both files include each other.
1164 * the resolved order is typesafe.h before typerb.h
1168 #endif /* _FRR_TYPESAFE_H */