]>
git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
30 /* generic macros for all list-like types */
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
45 item = from, from = prefix##_next_safe(head, from))
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
54 return (type *)prefix ## _const_first(h); \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
58 return (type *)prefix ## _const_next(h, item); \
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
65 return (type *)prefix ## _const_find(h, item); \
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
72 return (type *)prefix ## _const_find_lt(h, item); \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
77 return (type *)prefix ## _const_find_gteq(h, item); \
81 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
82 * head *AND* the head doesn'T points to itself (= everything except LIST,
83 * DLIST and SKIPLIST), just switch out the entire head
85 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
86 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
87 struct prefix##_head *b) \
89 struct prefix##_head tmp = *a; \
95 /* single-linked list, unsorted/arbitrary.
96 * can be used as queue with add_tail / pop
99 /* don't use these structs directly */
101 struct slist_item
*next
;
105 struct slist_item
*first
, **last_next
;
109 static inline void typesafe_list_add(struct slist_head
*head
,
110 struct slist_item
**pos
, struct slist_item
*item
)
114 if (pos
== head
->last_next
)
115 head
->last_next
= &item
->next
;
121 * PREDECL_LIST(namelist);
123 * struct namelist_item nlitem;
125 * DECLARE_LIST(namelist, struct name, nlitem);
127 #define PREDECL_LIST(prefix) \
128 struct prefix ## _head { struct slist_head sh; }; \
129 struct prefix ## _item { struct slist_item si; }; \
130 MACRO_REQUIRE_SEMICOLON() /* end */
132 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
134 #define DECLARE_LIST(prefix, type, field) \
136 macro_inline void prefix ## _init(struct prefix##_head *h) \
138 memset(h, 0, sizeof(*h)); \
139 h->sh.last_next = &h->sh.first; \
141 macro_inline void prefix ## _fini(struct prefix##_head *h) \
143 memset(h, 0, sizeof(*h)); \
145 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
147 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
149 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
151 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
153 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
154 type *after, type *item) \
156 struct slist_item **nextp; \
157 nextp = after ? &after->field.si.next : &h->sh.first; \
158 typesafe_list_add(&h->sh, nextp, &item->field.si); \
160 /* TODO: del_hint */ \
161 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
163 struct slist_item **iter = &h->sh.first; \
164 while (*iter && *iter != &item->field.si) \
165 iter = &(*iter)->next; \
169 *iter = item->field.si.next; \
170 if (!item->field.si.next) \
171 h->sh.last_next = iter; \
174 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
176 struct slist_item *sitem = h->sh.first; \
180 h->sh.first = sitem->next; \
181 if (h->sh.first == NULL) \
182 h->sh.last_next = &h->sh.first; \
183 return container_of(sitem, type, field.si); \
185 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
186 struct prefix##_head *b) \
188 struct prefix##_head tmp = *a; \
191 if (a->sh.last_next == &b->sh.first) \
192 a->sh.last_next = &a->sh.first; \
193 if (b->sh.last_next == &a->sh.first) \
194 b->sh.last_next = &b->sh.first; \
196 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
198 return container_of_null(h->sh.first, type, field.si); \
200 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
203 const struct slist_item *sitem = &item->field.si; \
204 return container_of_null(sitem->next, type, field.si); \
206 TYPESAFE_FIRST_NEXT(prefix, type) \
207 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
209 struct slist_item *sitem; \
212 sitem = &item->field.si; \
213 return container_of_null(sitem->next, type, field.si); \
215 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
217 return h->sh.count; \
219 MACRO_REQUIRE_SEMICOLON() /* end */
221 /* don't use these structs directly */
223 struct dlist_item
*next
;
224 struct dlist_item
*prev
;
228 struct dlist_item hitem
;
232 static inline void typesafe_dlist_add(struct dlist_head
*head
,
233 struct dlist_item
*prev
, struct dlist_item
*item
)
235 item
->next
= prev
->next
;
236 item
->next
->prev
= item
;
242 static inline void typesafe_dlist_swap_all(struct dlist_head
*a
,
243 struct dlist_head
*b
)
245 struct dlist_head tmp
= *a
;
249 a
->hitem
.next
= b
->hitem
.next
;
250 a
->hitem
.prev
= b
->hitem
.prev
;
251 a
->hitem
.next
->prev
= &a
->hitem
;
252 a
->hitem
.prev
->next
= &a
->hitem
;
254 a
->hitem
.next
= &a
->hitem
;
255 a
->hitem
.prev
= &a
->hitem
;
258 b
->count
= tmp
.count
;
260 b
->hitem
.next
= tmp
.hitem
.next
;
261 b
->hitem
.prev
= tmp
.hitem
.prev
;
262 b
->hitem
.next
->prev
= &b
->hitem
;
263 b
->hitem
.prev
->next
= &b
->hitem
;
265 b
->hitem
.next
= &b
->hitem
;
266 b
->hitem
.prev
= &b
->hitem
;
270 /* double-linked list, for fast item deletion
272 #define PREDECL_DLIST(prefix) \
273 struct prefix ## _head { struct dlist_head dh; }; \
274 struct prefix ## _item { struct dlist_item di; }; \
275 MACRO_REQUIRE_SEMICOLON() /* end */
277 #define INIT_DLIST(var) { .dh = { \
278 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
280 #define DECLARE_DLIST(prefix, type, field) \
282 macro_inline void prefix ## _init(struct prefix##_head *h) \
284 memset(h, 0, sizeof(*h)); \
285 h->dh.hitem.prev = &h->dh.hitem; \
286 h->dh.hitem.next = &h->dh.hitem; \
288 macro_inline void prefix ## _fini(struct prefix##_head *h) \
290 memset(h, 0, sizeof(*h)); \
292 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
294 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
296 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
298 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
300 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
301 type *after, type *item) \
303 struct dlist_item *prev; \
304 prev = after ? &after->field.di : &h->dh.hitem; \
305 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
307 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
309 struct dlist_item *ditem = &item->field.di; \
310 ditem->prev->next = ditem->next; \
311 ditem->next->prev = ditem->prev; \
313 ditem->prev = ditem->next = NULL; \
316 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
318 struct dlist_item *ditem = h->dh.hitem.next; \
319 if (ditem == &h->dh.hitem) \
321 ditem->prev->next = ditem->next; \
322 ditem->next->prev = ditem->prev; \
324 return container_of(ditem, type, field.di); \
326 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
327 struct prefix##_head *b) \
329 typesafe_dlist_swap_all(&a->dh, &b->dh); \
331 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
333 const struct dlist_item *ditem = h->dh.hitem.next; \
334 if (ditem == &h->dh.hitem) \
336 return container_of(ditem, type, field.di); \
338 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
341 const struct dlist_item *ditem = &item->field.di; \
342 if (ditem->next == &h->dh.hitem) \
344 return container_of(ditem->next, type, field.di); \
346 TYPESAFE_FIRST_NEXT(prefix, type) \
347 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
351 return prefix ## _next(h, item); \
353 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
355 return h->dh.count; \
357 MACRO_REQUIRE_SEMICOLON() /* end */
359 /* note: heap currently caps out at 4G items */
362 typedef uint32_t heap_index_i
;
369 struct heap_item
**array
;
370 uint32_t arraysz
, count
;
373 #define HEAP_RESIZE_TRESH_UP(h) \
374 (h->hh.count + 1 >= h->hh.arraysz)
375 #define HEAP_RESIZE_TRESH_DN(h) \
376 (h->hh.count == 0 || \
377 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
379 #define PREDECL_HEAP(prefix) \
380 struct prefix ## _head { struct heap_head hh; }; \
381 struct prefix ## _item { struct heap_item hi; }; \
382 MACRO_REQUIRE_SEMICOLON() /* end */
384 #define INIT_HEAP(var) { }
386 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
388 macro_inline void prefix ## _init(struct prefix##_head *h) \
390 memset(h, 0, sizeof(*h)); \
392 macro_inline void prefix ## _fini(struct prefix##_head *h) \
394 assert(h->hh.count == 0); \
395 memset(h, 0, sizeof(*h)); \
397 macro_inline int prefix ## __cmp(const struct heap_item *a, \
398 const struct heap_item *b) \
400 return cmpfn(container_of(a, type, field.hi), \
401 container_of(b, type, field.hi)); \
403 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
405 if (HEAP_RESIZE_TRESH_UP(h)) \
406 typesafe_heap_resize(&h->hh, true); \
407 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
412 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
414 struct heap_item *other; \
415 uint32_t index = item->field.hi.index; \
416 assert(h->hh.array[index] == &item->field.hi); \
418 other = h->hh.array[h->hh.count]; \
419 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
420 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
422 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
423 if (HEAP_RESIZE_TRESH_DN(h)) \
424 typesafe_heap_resize(&h->hh, false); \
427 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
429 struct heap_item *hitem, *other; \
430 if (h->hh.count == 0) \
432 hitem = h->hh.array[0]; \
434 other = h->hh.array[h->hh.count]; \
435 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
436 if (HEAP_RESIZE_TRESH_DN(h)) \
437 typesafe_heap_resize(&h->hh, false); \
438 return container_of(hitem, type, field.hi); \
440 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
441 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
443 if (h->hh.count == 0) \
445 return container_of(h->hh.array[0], type, field.hi); \
447 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
450 uint32_t idx = item->field.hi.index + 1; \
451 if (idx >= h->hh.count) \
453 return container_of(h->hh.array[idx], type, field.hi); \
455 TYPESAFE_FIRST_NEXT(prefix, type) \
456 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
460 return prefix ## _next(h, item); \
462 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
464 return h->hh.count; \
466 MACRO_REQUIRE_SEMICOLON() /* end */
468 extern void typesafe_heap_resize(struct heap_head
*head
, bool grow
);
469 extern void typesafe_heap_pushdown(struct heap_head
*head
, uint32_t index
,
470 struct heap_item
*item
,
471 int (*cmpfn
)(const struct heap_item
*a
,
472 const struct heap_item
*b
));
473 extern void typesafe_heap_pullup(struct heap_head
*head
, uint32_t index
,
474 struct heap_item
*item
,
475 int (*cmpfn
)(const struct heap_item
*a
,
476 const struct heap_item
*b
));
478 /* single-linked list, sorted.
479 * can be used as priority queue with add / pop
482 /* don't use these structs directly */
484 struct ssort_item
*next
;
488 struct ssort_item
*first
;
494 * PREDECL_SORTLIST(namelist)
496 * struct namelist_item nlitem;
498 * DECLARE_SORTLIST(namelist, struct name, nlitem)
500 #define _PREDECL_SORTLIST(prefix) \
501 struct prefix ## _head { struct ssort_head sh; }; \
502 struct prefix ## _item { struct ssort_item si; }; \
503 MACRO_REQUIRE_SEMICOLON() /* end */
505 #define INIT_SORTLIST_UNIQ(var) { }
506 #define INIT_SORTLIST_NONUNIQ(var) { }
508 #define PREDECL_SORTLIST_UNIQ(prefix) \
509 _PREDECL_SORTLIST(prefix)
510 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
511 _PREDECL_SORTLIST(prefix)
513 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
515 macro_inline void prefix ## _init(struct prefix##_head *h) \
517 memset(h, 0, sizeof(*h)); \
519 macro_inline void prefix ## _fini(struct prefix##_head *h) \
521 memset(h, 0, sizeof(*h)); \
523 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
525 struct ssort_item **np = &h->sh.first; \
527 while (*np && (c = cmpfn_uq( \
528 container_of(*np, type, field.si), item)) < 0) \
531 return container_of(*np, type, field.si); \
532 item->field.si.next = *np; \
533 *np = &item->field.si; \
537 macro_inline const type *prefix ## _const_find_gteq( \
538 const struct prefix##_head *h, const type *item) \
540 const struct ssort_item *sitem = h->sh.first; \
542 while (sitem && (cmpval = cmpfn_nuq( \
543 container_of(sitem, type, field.si), item)) < 0) \
544 sitem = sitem->next; \
545 return container_of_null(sitem, type, field.si); \
547 macro_inline const type *prefix ## _const_find_lt( \
548 const struct prefix##_head *h, const type *item) \
550 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
552 while (sitem && (cmpval = cmpfn_nuq( \
553 container_of(sitem, type, field.si), item)) < 0) \
554 sitem = (prev = sitem)->next; \
555 return container_of_null(prev, type, field.si); \
557 TYPESAFE_FIND_CMP(prefix, type) \
558 /* TODO: del_hint */ \
559 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
561 struct ssort_item **iter = &h->sh.first; \
562 while (*iter && *iter != &item->field.si) \
563 iter = &(*iter)->next; \
567 *iter = item->field.si.next; \
570 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
572 struct ssort_item *sitem = h->sh.first; \
576 h->sh.first = sitem->next; \
577 return container_of(sitem, type, field.si); \
579 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
580 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
582 return container_of_null(h->sh.first, type, field.si); \
584 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
587 const struct ssort_item *sitem = &item->field.si; \
588 return container_of_null(sitem->next, type, field.si); \
590 TYPESAFE_FIRST_NEXT(prefix, type) \
591 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
593 struct ssort_item *sitem; \
596 sitem = &item->field.si; \
597 return container_of_null(sitem->next, type, field.si); \
599 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
601 return h->sh.count; \
603 MACRO_REQUIRE_SEMICOLON() /* end */
605 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
606 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
608 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
611 const struct ssort_item *sitem = h->sh.first; \
613 while (sitem && (cmpval = cmpfn( \
614 container_of(sitem, type, field.si), item)) < 0) \
615 sitem = sitem->next; \
616 if (!sitem || cmpval > 0) \
618 return container_of(sitem, type, field.si); \
620 TYPESAFE_FIND(prefix, type) \
621 MACRO_REQUIRE_SEMICOLON() /* end */
623 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
624 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
626 int cmpval = cmpfn(a, b); \
635 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
636 MACRO_REQUIRE_SEMICOLON() /* end */
639 /* hash, "sorted" by hash value
642 /* don't use these structs directly */
644 struct thash_item
*next
;
649 struct thash_item
**entries
;
653 uint8_t minshift
, maxshift
;
656 #define _HASH_SIZE(tabshift) \
657 ((1U << (tabshift)) >> 1)
658 #define HASH_SIZE(head) \
659 _HASH_SIZE((head).tabshift)
660 #define _HASH_KEY(tabshift, val) \
661 ((val) >> (33 - (tabshift)))
662 #define HASH_KEY(head, val) \
663 _HASH_KEY((head).tabshift, val)
664 #define HASH_GROW_THRESHOLD(head) \
665 ((head).count >= HASH_SIZE(head))
666 #define HASH_SHRINK_THRESHOLD(head) \
667 ((head).count <= (HASH_SIZE(head) - 1) / 2)
669 extern void typesafe_hash_grow(struct thash_head
*head
);
670 extern void typesafe_hash_shrink(struct thash_head
*head
);
674 * PREDECL_HASH(namelist)
676 * struct namelist_item nlitem;
678 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
680 #define PREDECL_HASH(prefix) \
681 struct prefix ## _head { struct thash_head hh; }; \
682 struct prefix ## _item { struct thash_item hi; }; \
683 MACRO_REQUIRE_SEMICOLON() /* end */
685 #define INIT_HASH(var) { }
687 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
689 macro_inline void prefix ## _init(struct prefix##_head *h) \
691 memset(h, 0, sizeof(*h)); \
693 macro_inline void prefix ## _fini(struct prefix##_head *h) \
695 assert(h->hh.count == 0); \
696 h->hh.minshift = 0; \
697 typesafe_hash_shrink(&h->hh); \
698 memset(h, 0, sizeof(*h)); \
700 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
703 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
704 typesafe_hash_grow(&h->hh); \
706 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
707 item->field.hi.hashval = hval; \
708 struct thash_item **np = &h->hh.entries[hbits]; \
709 while (*np && (*np)->hashval < hval) \
711 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
713 return container_of(*np, type, field.hi); \
715 item->field.hi.next = *np; \
716 *np = &item->field.hi; \
719 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
722 if (!h->hh.tabshift) \
724 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
725 const struct thash_item *hitem = h->hh.entries[hbits]; \
726 while (hitem && hitem->hashval < hval) \
727 hitem = hitem->next; \
728 while (hitem && hitem->hashval == hval) { \
729 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
730 return container_of(hitem, type, field.hi); \
731 hitem = hitem->next; \
735 TYPESAFE_FIND(prefix, type) \
736 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
738 if (!h->hh.tabshift) \
740 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
741 struct thash_item **np = &h->hh.entries[hbits]; \
742 while (*np && (*np)->hashval < hval) \
744 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
746 if (*np != &item->field.hi) \
748 *np = item->field.hi.next; \
749 item->field.hi.next = NULL; \
751 if (HASH_SHRINK_THRESHOLD(h->hh)) \
752 typesafe_hash_shrink(&h->hh); \
755 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
758 for (i = 0; i < HASH_SIZE(h->hh); i++) \
759 if (h->hh.entries[i]) { \
760 struct thash_item *hitem = h->hh.entries[i]; \
761 h->hh.entries[i] = hitem->next; \
763 hitem->next = NULL; \
764 if (HASH_SHRINK_THRESHOLD(h->hh)) \
765 typesafe_hash_shrink(&h->hh); \
766 return container_of(hitem, type, field.hi); \
770 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
771 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
774 for (i = 0; i < HASH_SIZE(h->hh); i++) \
775 if (h->hh.entries[i]) \
776 return container_of(h->hh.entries[i], type, field.hi); \
779 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
782 const struct thash_item *hitem = &item->field.hi; \
784 return container_of(hitem->next, type, field.hi); \
785 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
786 for (; i < HASH_SIZE(h->hh); i++) \
787 if (h->hh.entries[i]) \
788 return container_of(h->hh.entries[i], type, field.hi); \
791 TYPESAFE_FIRST_NEXT(prefix, type) \
792 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
796 return prefix ## _next(h, item); \
798 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
800 return h->hh.count; \
802 MACRO_REQUIRE_SEMICOLON() /* end */
805 * can be used as priority queue with add / pop
808 /* don't use these structs directly */
809 #define SKIPLIST_MAXDEPTH 16
810 #define SKIPLIST_EMBED 4
811 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
814 struct sskip_item
*next
[SKIPLIST_EMBED
];
817 struct sskip_overflow
{
818 struct sskip_item
*next
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
822 struct sskip_item hitem
;
823 struct sskip_item
*overflow
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
829 * PREDECL_SKIPLIST(namelist)
831 * struct namelist_item nlitem;
833 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
835 #define _PREDECL_SKIPLIST(prefix) \
836 struct prefix ## _head { struct sskip_head sh; }; \
837 struct prefix ## _item { struct sskip_item si; }; \
838 MACRO_REQUIRE_SEMICOLON() /* end */
840 #define INIT_SKIPLIST_UNIQ(var) { }
841 #define INIT_SKIPLIST_NONUNIQ(var) { }
843 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
845 macro_inline void prefix ## _init(struct prefix##_head *h) \
847 memset(h, 0, sizeof(*h)); \
848 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
849 ((uintptr_t)h->sh.overflow | 1); \
851 macro_inline void prefix ## _fini(struct prefix##_head *h) \
853 memset(h, 0, sizeof(*h)); \
855 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
857 struct sskip_item *si; \
858 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
859 return container_of_null(si, type, field.si); \
861 macro_inline const type *prefix ## _const_find_gteq( \
862 const struct prefix##_head *h, const type *item) \
864 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
865 &item->field.si, cmpfn_nuq); \
866 return container_of_null(sitem, type, field.si); \
868 macro_inline const type *prefix ## _const_find_lt( \
869 const struct prefix##_head *h, const type *item) \
871 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
872 &item->field.si, cmpfn_nuq); \
873 return container_of_null(sitem, type, field.si); \
875 TYPESAFE_FIND_CMP(prefix, type) \
876 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
878 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
879 &item->field.si, cmpfn_uq); \
880 return container_of_null(sitem, type, field.si); \
882 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
884 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
885 return container_of_null(sitem, type, field.si); \
887 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
888 struct prefix##_head *b) \
890 struct prefix##_head tmp = *a; \
893 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
894 ((uintptr_t)a->sh.overflow | 1); \
895 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
896 ((uintptr_t)b->sh.overflow | 1); \
898 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
900 const struct sskip_item *first = h->sh.hitem.next[0]; \
901 return container_of_null(first, type, field.si); \
903 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
906 const struct sskip_item *next = item->field.si.next[0]; \
907 return container_of_null(next, type, field.si); \
909 TYPESAFE_FIRST_NEXT(prefix, type) \
910 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
912 struct sskip_item *next; \
913 next = item ? item->field.si.next[0] : NULL; \
914 return container_of_null(next, type, field.si); \
916 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
918 return h->sh.count; \
920 MACRO_REQUIRE_SEMICOLON() /* end */
922 #define PREDECL_SKIPLIST_UNIQ(prefix) \
923 _PREDECL_SKIPLIST(prefix)
924 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
926 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
927 const struct sskip_item *b) \
929 return cmpfn(container_of(a, type, field.si), \
930 container_of(b, type, field.si)); \
932 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
935 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
936 &item->field.si, &prefix ## __cmp); \
937 return container_of_null(sitem, type, field.si); \
939 TYPESAFE_FIND(prefix, type) \
941 _DECLARE_SKIPLIST(prefix, type, field, \
942 prefix ## __cmp, prefix ## __cmp); \
943 MACRO_REQUIRE_SEMICOLON() /* end */
945 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
946 _PREDECL_SKIPLIST(prefix)
947 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
949 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
950 const struct sskip_item *b) \
952 return cmpfn(container_of(a, type, field.si), \
953 container_of(b, type, field.si)); \
955 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
956 const struct sskip_item *b) \
958 int cmpval = cmpfn(container_of(a, type, field.si), \
959 container_of(b, type, field.si)); \
969 _DECLARE_SKIPLIST(prefix, type, field, \
970 prefix ## __cmp, prefix ## __cmp_uq); \
971 MACRO_REQUIRE_SEMICOLON() /* end */
974 extern struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
975 struct sskip_item
*item
, int (*cmpfn
)(
976 const struct sskip_item
*a
,
977 const struct sskip_item
*b
));
978 extern const struct sskip_item
*typesafe_skiplist_find(
979 const struct sskip_head
*head
,
980 const struct sskip_item
*item
, int (*cmpfn
)(
981 const struct sskip_item
*a
,
982 const struct sskip_item
*b
));
983 extern const struct sskip_item
*typesafe_skiplist_find_gteq(
984 const struct sskip_head
*head
,
985 const struct sskip_item
*item
, int (*cmpfn
)(
986 const struct sskip_item
*a
,
987 const struct sskip_item
*b
));
988 extern const struct sskip_item
*typesafe_skiplist_find_lt(
989 const struct sskip_head
*head
,
990 const struct sskip_item
*item
, int (*cmpfn
)(
991 const struct sskip_item
*a
,
992 const struct sskip_item
*b
));
993 extern struct sskip_item
*typesafe_skiplist_del(
994 struct sskip_head
*head
, struct sskip_item
*item
, int (*cmpfn
)(
995 const struct sskip_item
*a
,
996 const struct sskip_item
*b
));
997 extern struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
);
1003 /* this needs to stay at the end because both files include each other.
1004 * the resolved order is typesafe.h before typerb.h
1008 #endif /* _FRR_TYPESAFE_H */