2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
30 /* generic macros for all list-like types */
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
45 item = from, from = prefix##_next_safe(head, from))
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
54 return (type *)prefix ## _const_first(h); \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
58 return (type *)prefix ## _const_next(h, item); \
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
65 return (type *)prefix ## _const_find(h, item); \
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
72 return (type *)prefix ## _const_find_lt(h, item); \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
77 return (type *)prefix ## _const_find_gteq(h, item); \
81 /* *_member via find - when there is no better membership check than find() */
82 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
83 macro_inline bool prefix ## _member(struct prefix##_head *h, \
86 return item == prefix ## _const_find(h, item); \
90 /* *_member via find_gteq - same for non-unique containers */
91 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
92 macro_inline bool prefix ## _member(struct prefix##_head *h, \
96 for (iter = prefix ## _const_find_gteq(h, item); iter; \
97 iter = prefix ## _const_next(h, iter)) { \
100 if (cmpfn(iter, item) > 0) \
107 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
108 * head *AND* the head doesn't point to itself (= everything except LIST,
109 * DLIST and SKIPLIST), just switch out the entire head
111 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
112 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
113 struct prefix##_head *b) \
115 struct prefix##_head tmp = *a; \
121 /* single-linked list, unsorted/arbitrary.
122 * can be used as queue with add_tail / pop
125 /* don't use these structs directly */
127 struct slist_item
*next
;
131 struct slist_item
*first
, **last_next
;
135 /* this replaces NULL as the value for ->next on the last item. */
136 extern struct slist_item typesafe_slist_sentinel
;
137 #define _SLIST_LAST &typesafe_slist_sentinel
139 static inline void typesafe_list_add(struct slist_head
*head
,
140 struct slist_item
**pos
, struct slist_item
*item
)
144 if (pos
== head
->last_next
)
145 head
->last_next
= &item
->next
;
149 extern bool typesafe_list_member(const struct slist_head
*head
,
150 const struct slist_item
*item
);
154 * PREDECL_LIST(namelist);
156 * struct namelist_item nlitem;
158 * DECLARE_LIST(namelist, struct name, nlitem);
160 #define PREDECL_LIST(prefix) \
161 struct prefix ## _head { struct slist_head sh; }; \
162 struct prefix ## _item { struct slist_item si; }; \
163 MACRO_REQUIRE_SEMICOLON() /* end */
165 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
167 #define DECLARE_LIST(prefix, type, field) \
169 macro_inline void prefix ## _init(struct prefix##_head *h) \
171 memset(h, 0, sizeof(*h)); \
172 h->sh.first = _SLIST_LAST; \
173 h->sh.last_next = &h->sh.first; \
175 macro_inline void prefix ## _fini(struct prefix##_head *h) \
177 memset(h, 0, sizeof(*h)); \
179 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
181 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
183 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
185 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
187 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
188 type *after, type *item) \
190 struct slist_item **nextp; \
191 nextp = after ? &after->field.si.next : &h->sh.first; \
192 typesafe_list_add(&h->sh, nextp, &item->field.si); \
194 /* TODO: del_hint */ \
195 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
197 struct slist_item **iter = &h->sh.first; \
198 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
199 iter = &(*iter)->next; \
200 if (*iter == _SLIST_LAST) \
203 *iter = item->field.si.next; \
204 if (item->field.si.next == _SLIST_LAST) \
205 h->sh.last_next = iter; \
206 item->field.si.next = NULL; \
209 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
211 struct slist_item *sitem = h->sh.first; \
212 if (sitem == _SLIST_LAST) \
215 h->sh.first = sitem->next; \
216 if (h->sh.first == _SLIST_LAST) \
217 h->sh.last_next = &h->sh.first; \
218 sitem->next = NULL; \
219 return container_of(sitem, type, field.si); \
221 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
222 struct prefix##_head *b) \
224 struct prefix##_head tmp = *a; \
227 if (a->sh.last_next == &b->sh.first) \
228 a->sh.last_next = &a->sh.first; \
229 if (b->sh.last_next == &a->sh.first) \
230 b->sh.last_next = &b->sh.first; \
232 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
234 if (h->sh.first != _SLIST_LAST) \
235 return container_of(h->sh.first, type, field.si); \
238 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
241 const struct slist_item *sitem = &item->field.si; \
242 if (sitem->next != _SLIST_LAST) \
243 return container_of(sitem->next, type, field.si); \
246 TYPESAFE_FIRST_NEXT(prefix, type) \
247 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
249 struct slist_item *sitem; \
252 sitem = &item->field.si; \
253 if (sitem->next != _SLIST_LAST) \
254 return container_of(sitem->next, type, field.si); \
257 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
259 return h->sh.count; \
261 macro_pure bool prefix ## _anywhere(const type *item) \
263 return item->field.si.next != NULL; \
265 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
268 return typesafe_list_member(&h->sh, &item->field.si); \
270 MACRO_REQUIRE_SEMICOLON() /* end */
272 /* don't use these structs directly */
274 struct dlist_item
*next
;
275 struct dlist_item
*prev
;
279 struct dlist_item hitem
;
283 static inline void typesafe_dlist_add(struct dlist_head
*head
,
284 struct dlist_item
*prev
, struct dlist_item
*item
)
286 item
->next
= prev
->next
;
287 item
->next
->prev
= item
;
293 static inline void typesafe_dlist_swap_all(struct dlist_head
*a
,
294 struct dlist_head
*b
)
296 struct dlist_head tmp
= *a
;
300 a
->hitem
.next
= b
->hitem
.next
;
301 a
->hitem
.prev
= b
->hitem
.prev
;
302 a
->hitem
.next
->prev
= &a
->hitem
;
303 a
->hitem
.prev
->next
= &a
->hitem
;
305 a
->hitem
.next
= &a
->hitem
;
306 a
->hitem
.prev
= &a
->hitem
;
309 b
->count
= tmp
.count
;
311 b
->hitem
.next
= tmp
.hitem
.next
;
312 b
->hitem
.prev
= tmp
.hitem
.prev
;
313 b
->hitem
.next
->prev
= &b
->hitem
;
314 b
->hitem
.prev
->next
= &b
->hitem
;
316 b
->hitem
.next
= &b
->hitem
;
317 b
->hitem
.prev
= &b
->hitem
;
321 extern bool typesafe_dlist_member(const struct dlist_head
*head
,
322 const struct dlist_item
*item
);
324 /* double-linked list, for fast item deletion
326 #define PREDECL_DLIST(prefix) \
327 struct prefix ## _head { struct dlist_head dh; }; \
328 struct prefix ## _item { struct dlist_item di; }; \
329 MACRO_REQUIRE_SEMICOLON() /* end */
331 #define INIT_DLIST(var) { .dh = { \
332 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
334 #define DECLARE_DLIST(prefix, type, field) \
336 macro_inline void prefix ## _init(struct prefix##_head *h) \
338 memset(h, 0, sizeof(*h)); \
339 h->dh.hitem.prev = &h->dh.hitem; \
340 h->dh.hitem.next = &h->dh.hitem; \
342 macro_inline void prefix ## _fini(struct prefix##_head *h) \
344 memset(h, 0, sizeof(*h)); \
346 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
348 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
350 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
352 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
354 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
355 type *after, type *item) \
357 struct dlist_item *prev; \
358 prev = after ? &after->field.di : &h->dh.hitem; \
359 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
361 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
363 struct dlist_item *ditem = &item->field.di; \
364 ditem->prev->next = ditem->next; \
365 ditem->next->prev = ditem->prev; \
367 ditem->prev = ditem->next = NULL; \
370 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
372 struct dlist_item *ditem = h->dh.hitem.next; \
373 if (ditem == &h->dh.hitem) \
375 ditem->prev->next = ditem->next; \
376 ditem->next->prev = ditem->prev; \
378 ditem->prev = ditem->next = NULL; \
379 return container_of(ditem, type, field.di); \
381 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
382 struct prefix##_head *b) \
384 typesafe_dlist_swap_all(&a->dh, &b->dh); \
386 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
388 const struct dlist_item *ditem = h->dh.hitem.next; \
389 if (ditem == &h->dh.hitem) \
391 return container_of(ditem, type, field.di); \
393 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
396 const struct dlist_item *ditem = &item->field.di; \
397 if (ditem->next == &h->dh.hitem) \
399 return container_of(ditem->next, type, field.di); \
401 TYPESAFE_FIRST_NEXT(prefix, type) \
402 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
406 return prefix ## _next(h, item); \
408 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
410 return h->dh.count; \
412 macro_pure bool prefix ## _anywhere(const type *item) \
414 const struct dlist_item *ditem = &item->field.di; \
415 return ditem->next && ditem->prev; \
417 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
420 return typesafe_dlist_member(&h->dh, &item->field.di); \
422 MACRO_REQUIRE_SEMICOLON() /* end */
424 /* note: heap currently caps out at 4G items */
427 typedef uint32_t heap_index_i
;
434 struct heap_item
**array
;
435 uint32_t arraysz
, count
;
438 #define HEAP_RESIZE_TRESH_UP(h) \
439 (h->hh.count + 1 >= h->hh.arraysz)
440 #define HEAP_RESIZE_TRESH_DN(h) \
441 (h->hh.count == 0 || \
442 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
444 #define PREDECL_HEAP(prefix) \
445 struct prefix ## _head { struct heap_head hh; }; \
446 struct prefix ## _item { struct heap_item hi; }; \
447 MACRO_REQUIRE_SEMICOLON() /* end */
449 #define INIT_HEAP(var) { }
451 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
453 macro_inline void prefix ## _init(struct prefix##_head *h) \
455 memset(h, 0, sizeof(*h)); \
457 macro_inline void prefix ## _fini(struct prefix##_head *h) \
459 assert(h->hh.count == 0); \
460 memset(h, 0, sizeof(*h)); \
462 macro_inline int prefix ## __cmp(const struct heap_item *a, \
463 const struct heap_item *b) \
465 return cmpfn(container_of(a, type, field.hi), \
466 container_of(b, type, field.hi)); \
468 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
470 if (HEAP_RESIZE_TRESH_UP(h)) \
471 typesafe_heap_resize(&h->hh, true); \
472 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
477 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
479 struct heap_item *other; \
480 uint32_t index = item->field.hi.index; \
481 assert(h->hh.array[index] == &item->field.hi); \
483 other = h->hh.array[h->hh.count]; \
484 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
485 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
487 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
488 if (HEAP_RESIZE_TRESH_DN(h)) \
489 typesafe_heap_resize(&h->hh, false); \
492 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
494 struct heap_item *hitem, *other; \
495 if (h->hh.count == 0) \
497 hitem = h->hh.array[0]; \
499 other = h->hh.array[h->hh.count]; \
500 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
501 if (HEAP_RESIZE_TRESH_DN(h)) \
502 typesafe_heap_resize(&h->hh, false); \
503 return container_of(hitem, type, field.hi); \
505 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
506 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
508 if (h->hh.count == 0) \
510 return container_of(h->hh.array[0], type, field.hi); \
512 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
515 uint32_t idx = item->field.hi.index + 1; \
516 if (idx >= h->hh.count) \
518 return container_of(h->hh.array[idx], type, field.hi); \
520 TYPESAFE_FIRST_NEXT(prefix, type) \
521 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
525 return prefix ## _next(h, item); \
527 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
529 return h->hh.count; \
531 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
534 uint32_t idx = item->field.hi.index; \
535 if (idx >= h->hh.count) \
537 return h->hh.array[idx] == &item->field.hi; \
539 MACRO_REQUIRE_SEMICOLON() /* end */
541 extern void typesafe_heap_resize(struct heap_head
*head
, bool grow
);
542 extern void typesafe_heap_pushdown(struct heap_head
*head
, uint32_t index
,
543 struct heap_item
*item
,
544 int (*cmpfn
)(const struct heap_item
*a
,
545 const struct heap_item
*b
));
546 extern void typesafe_heap_pullup(struct heap_head
*head
, uint32_t index
,
547 struct heap_item
*item
,
548 int (*cmpfn
)(const struct heap_item
*a
,
549 const struct heap_item
*b
));
551 /* single-linked list, sorted.
552 * can be used as priority queue with add / pop
555 /* don't use these structs directly */
557 struct ssort_item
*next
;
561 struct ssort_item
*first
;
567 * PREDECL_SORTLIST(namelist)
569 * struct namelist_item nlitem;
571 * DECLARE_SORTLIST(namelist, struct name, nlitem)
573 #define _PREDECL_SORTLIST(prefix) \
574 struct prefix ## _head { struct ssort_head sh; }; \
575 struct prefix ## _item { struct ssort_item si; }; \
576 MACRO_REQUIRE_SEMICOLON() /* end */
578 #define INIT_SORTLIST_UNIQ(var) { }
579 #define INIT_SORTLIST_NONUNIQ(var) { }
581 #define PREDECL_SORTLIST_UNIQ(prefix) \
582 _PREDECL_SORTLIST(prefix)
583 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
584 _PREDECL_SORTLIST(prefix)
586 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
588 macro_inline void prefix ## _init(struct prefix##_head *h) \
590 memset(h, 0, sizeof(*h)); \
592 macro_inline void prefix ## _fini(struct prefix##_head *h) \
594 memset(h, 0, sizeof(*h)); \
596 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
598 struct ssort_item **np = &h->sh.first; \
600 while (*np && (c = cmpfn_uq( \
601 container_of(*np, type, field.si), item)) < 0) \
604 return container_of(*np, type, field.si); \
605 item->field.si.next = *np; \
606 *np = &item->field.si; \
610 macro_inline const type *prefix ## _const_find_gteq( \
611 const struct prefix##_head *h, const type *item) \
613 const struct ssort_item *sitem = h->sh.first; \
615 while (sitem && (cmpval = cmpfn_nuq( \
616 container_of(sitem, type, field.si), item)) < 0) \
617 sitem = sitem->next; \
618 return container_of_null(sitem, type, field.si); \
620 macro_inline const type *prefix ## _const_find_lt( \
621 const struct prefix##_head *h, const type *item) \
623 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
625 while (sitem && (cmpval = cmpfn_nuq( \
626 container_of(sitem, type, field.si), item)) < 0) \
627 sitem = (prev = sitem)->next; \
628 return container_of_null(prev, type, field.si); \
630 TYPESAFE_FIND_CMP(prefix, type) \
631 /* TODO: del_hint */ \
632 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
634 struct ssort_item **iter = &h->sh.first; \
635 while (*iter && *iter != &item->field.si) \
636 iter = &(*iter)->next; \
640 *iter = item->field.si.next; \
641 item->field.si.next = NULL; \
644 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
646 struct ssort_item *sitem = h->sh.first; \
650 h->sh.first = sitem->next; \
651 return container_of(sitem, type, field.si); \
653 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
654 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
656 return container_of_null(h->sh.first, type, field.si); \
658 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
661 const struct ssort_item *sitem = &item->field.si; \
662 return container_of_null(sitem->next, type, field.si); \
664 TYPESAFE_FIRST_NEXT(prefix, type) \
665 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
667 struct ssort_item *sitem; \
670 sitem = &item->field.si; \
671 return container_of_null(sitem->next, type, field.si); \
673 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
675 return h->sh.count; \
677 MACRO_REQUIRE_SEMICOLON() /* end */
679 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
680 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
682 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
685 const struct ssort_item *sitem = h->sh.first; \
687 while (sitem && (cmpval = cmpfn( \
688 container_of(sitem, type, field.si), item)) < 0) \
689 sitem = sitem->next; \
690 if (!sitem || cmpval > 0) \
692 return container_of(sitem, type, field.si); \
694 TYPESAFE_FIND(prefix, type) \
695 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
696 MACRO_REQUIRE_SEMICOLON() /* end */
698 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
699 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
701 int cmpval = cmpfn(a, b); \
710 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
711 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
712 MACRO_REQUIRE_SEMICOLON() /* end */
715 /* hash, "sorted" by hash value
718 /* don't use these structs directly */
720 struct thash_item
*next
;
725 struct thash_item
**entries
;
729 uint8_t minshift
, maxshift
;
732 #define _HASH_SIZE(tabshift) \
733 ((1U << (tabshift)) >> 1)
734 #define HASH_SIZE(head) \
735 _HASH_SIZE((head).tabshift)
736 #define _HASH_KEY(tabshift, val) \
737 ((val) >> (33 - (tabshift)))
738 #define HASH_KEY(head, val) \
739 _HASH_KEY((head).tabshift, val)
740 #define HASH_GROW_THRESHOLD(head) \
741 ((head).count >= HASH_SIZE(head))
742 #define HASH_SHRINK_THRESHOLD(head) \
743 ((head).count <= (HASH_SIZE(head) - 1) / 2)
745 extern void typesafe_hash_grow(struct thash_head
*head
);
746 extern void typesafe_hash_shrink(struct thash_head
*head
);
750 * PREDECL_HASH(namelist)
752 * struct namelist_item nlitem;
754 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
756 #define PREDECL_HASH(prefix) \
757 struct prefix ## _head { struct thash_head hh; }; \
758 struct prefix ## _item { struct thash_item hi; }; \
759 MACRO_REQUIRE_SEMICOLON() /* end */
761 #define INIT_HASH(var) { }
763 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
765 macro_inline void prefix ## _init(struct prefix##_head *h) \
767 memset(h, 0, sizeof(*h)); \
769 macro_inline void prefix ## _fini(struct prefix##_head *h) \
771 assert(h->hh.count == 0); \
772 h->hh.minshift = 0; \
773 typesafe_hash_shrink(&h->hh); \
774 memset(h, 0, sizeof(*h)); \
776 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
779 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
780 typesafe_hash_grow(&h->hh); \
782 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
783 item->field.hi.hashval = hval; \
784 struct thash_item **np = &h->hh.entries[hbits]; \
785 while (*np && (*np)->hashval < hval) \
787 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
789 return container_of(*np, type, field.hi); \
791 item->field.hi.next = *np; \
792 *np = &item->field.hi; \
795 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
798 if (!h->hh.tabshift) \
800 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
801 const struct thash_item *hitem = h->hh.entries[hbits]; \
802 while (hitem && hitem->hashval < hval) \
803 hitem = hitem->next; \
804 while (hitem && hitem->hashval == hval) { \
805 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
806 return container_of(hitem, type, field.hi); \
807 hitem = hitem->next; \
811 TYPESAFE_FIND(prefix, type) \
812 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
814 if (!h->hh.tabshift) \
816 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
817 struct thash_item **np = &h->hh.entries[hbits]; \
818 while (*np && (*np)->hashval < hval) \
820 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
822 if (*np != &item->field.hi) \
824 *np = item->field.hi.next; \
825 item->field.hi.next = NULL; \
827 if (HASH_SHRINK_THRESHOLD(h->hh)) \
828 typesafe_hash_shrink(&h->hh); \
831 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
834 for (i = 0; i < HASH_SIZE(h->hh); i++) \
835 if (h->hh.entries[i]) { \
836 struct thash_item *hitem = h->hh.entries[i]; \
837 h->hh.entries[i] = hitem->next; \
839 hitem->next = NULL; \
840 if (HASH_SHRINK_THRESHOLD(h->hh)) \
841 typesafe_hash_shrink(&h->hh); \
842 return container_of(hitem, type, field.hi); \
846 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
847 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
850 for (i = 0; i < HASH_SIZE(h->hh); i++) \
851 if (h->hh.entries[i]) \
852 return container_of(h->hh.entries[i], type, field.hi); \
855 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
858 const struct thash_item *hitem = &item->field.hi; \
860 return container_of(hitem->next, type, field.hi); \
861 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
862 for (; i < HASH_SIZE(h->hh); i++) \
863 if (h->hh.entries[i]) \
864 return container_of(h->hh.entries[i], type, field.hi); \
867 TYPESAFE_FIRST_NEXT(prefix, type) \
868 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
872 return prefix ## _next(h, item); \
874 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
876 return h->hh.count; \
878 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
881 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
882 const struct thash_item *hitem = h->hh.entries[hbits]; \
883 while (hitem && hitem->hashval < hval) \
884 hitem = hitem->next; \
885 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
886 hitem = hitem->next) \
887 if (hitem == &item->field.hi) \
891 MACRO_REQUIRE_SEMICOLON() /* end */
894 * can be used as priority queue with add / pop
897 /* don't use these structs directly */
898 #define SKIPLIST_MAXDEPTH 16
899 #define SKIPLIST_EMBED 4
900 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
903 struct sskip_item
*next
[SKIPLIST_EMBED
];
906 struct sskip_overflow
{
907 struct sskip_item
*next
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
911 struct sskip_item hitem
;
912 struct sskip_item
*overflow
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
918 * PREDECL_SKIPLIST(namelist)
920 * struct namelist_item nlitem;
922 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
924 #define _PREDECL_SKIPLIST(prefix) \
925 struct prefix ## _head { struct sskip_head sh; }; \
926 struct prefix ## _item { struct sskip_item si; }; \
927 MACRO_REQUIRE_SEMICOLON() /* end */
929 #define INIT_SKIPLIST_UNIQ(var) { }
930 #define INIT_SKIPLIST_NONUNIQ(var) { }
932 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
934 macro_inline void prefix ## _init(struct prefix##_head *h) \
936 memset(h, 0, sizeof(*h)); \
937 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
938 ((uintptr_t)h->sh.overflow | 1); \
940 macro_inline void prefix ## _fini(struct prefix##_head *h) \
942 memset(h, 0, sizeof(*h)); \
944 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
946 struct sskip_item *si; \
947 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
948 return container_of_null(si, type, field.si); \
950 macro_inline const type *prefix ## _const_find_gteq( \
951 const struct prefix##_head *h, const type *item) \
953 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
954 &item->field.si, cmpfn_nuq); \
955 return container_of_null(sitem, type, field.si); \
957 macro_inline const type *prefix ## _const_find_lt( \
958 const struct prefix##_head *h, const type *item) \
960 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
961 &item->field.si, cmpfn_nuq); \
962 return container_of_null(sitem, type, field.si); \
964 TYPESAFE_FIND_CMP(prefix, type) \
965 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
967 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
968 &item->field.si, cmpfn_uq); \
969 return container_of_null(sitem, type, field.si); \
971 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
973 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
974 return container_of_null(sitem, type, field.si); \
976 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
977 struct prefix##_head *b) \
979 struct prefix##_head tmp = *a; \
982 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
983 ((uintptr_t)a->sh.overflow | 1); \
984 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
985 ((uintptr_t)b->sh.overflow | 1); \
987 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
989 const struct sskip_item *first = h->sh.hitem.next[0]; \
990 return container_of_null(first, type, field.si); \
992 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
995 const struct sskip_item *next = item->field.si.next[0]; \
996 return container_of_null(next, type, field.si); \
998 TYPESAFE_FIRST_NEXT(prefix, type) \
999 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1001 struct sskip_item *next; \
1002 next = item ? item->field.si.next[0] : NULL; \
1003 return container_of_null(next, type, field.si); \
1005 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1007 return h->sh.count; \
1009 MACRO_REQUIRE_SEMICOLON() /* end */
1011 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1012 _PREDECL_SKIPLIST(prefix)
1013 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1015 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1016 const struct sskip_item *b) \
1018 return cmpfn(container_of(a, type, field.si), \
1019 container_of(b, type, field.si)); \
1021 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1024 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1025 &item->field.si, &prefix ## __cmp); \
1026 return container_of_null(sitem, type, field.si); \
1028 TYPESAFE_FIND(prefix, type) \
1029 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1031 _DECLARE_SKIPLIST(prefix, type, field, \
1032 prefix ## __cmp, prefix ## __cmp); \
1033 MACRO_REQUIRE_SEMICOLON() /* end */
1035 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1036 _PREDECL_SKIPLIST(prefix)
1037 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1039 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1040 const struct sskip_item *b) \
1042 return cmpfn(container_of(a, type, field.si), \
1043 container_of(b, type, field.si)); \
1045 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1046 const struct sskip_item *b) \
1048 int cmpval = cmpfn(container_of(a, type, field.si), \
1049 container_of(b, type, field.si)); \
1059 _DECLARE_SKIPLIST(prefix, type, field, \
1060 prefix ## __cmp, prefix ## __cmp_uq); \
1061 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1062 MACRO_REQUIRE_SEMICOLON() /* end */
1065 extern struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
1066 struct sskip_item
*item
, int (*cmpfn
)(
1067 const struct sskip_item
*a
,
1068 const struct sskip_item
*b
));
1069 extern const struct sskip_item
*typesafe_skiplist_find(
1070 const struct sskip_head
*head
,
1071 const struct sskip_item
*item
, int (*cmpfn
)(
1072 const struct sskip_item
*a
,
1073 const struct sskip_item
*b
));
1074 extern const struct sskip_item
*typesafe_skiplist_find_gteq(
1075 const struct sskip_head
*head
,
1076 const struct sskip_item
*item
, int (*cmpfn
)(
1077 const struct sskip_item
*a
,
1078 const struct sskip_item
*b
));
1079 extern const struct sskip_item
*typesafe_skiplist_find_lt(
1080 const struct sskip_head
*head
,
1081 const struct sskip_item
*item
, int (*cmpfn
)(
1082 const struct sskip_item
*a
,
1083 const struct sskip_item
*b
));
1084 extern struct sskip_item
*typesafe_skiplist_del(
1085 struct sskip_head
*head
, struct sskip_item
*item
, int (*cmpfn
)(
1086 const struct sskip_item
*a
,
1087 const struct sskip_item
*b
));
1088 extern struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
);
1094 /* this needs to stay at the end because both files include each other.
1095 * the resolved order is typesafe.h before typerb.h
1099 #endif /* _FRR_TYPESAFE_H */