]>
git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
28d18e09f71324075408ad59972490779a60a9fe
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
26 /* generic macros for all list-like types */
28 #define for_each(prefix, head, item) \
29 for (item = prefix##_first(head); item; \
30 item = prefix##_next(head, item))
31 #define for_each_safe(prefix, head, item) \
32 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
33 prefix##_next_safe(head, \
34 (item = prefix##_first(head))); \
36 item = prefix##_safe, \
37 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
38 #define for_each_from(prefix, head, item, from) \
39 for (item = from, from = prefix##_next_safe(head, item); \
41 item = from, from = prefix##_next_safe(head, from))
43 /* single-linked list, unsorted/arbitrary.
44 * can be used as queue with add_tail / pop
47 /* don't use these structs directly */
49 struct slist_item
*next
;
53 struct slist_item
*first
, **last_next
;
57 static inline void typesafe_list_add(struct slist_head
*head
,
58 struct slist_item
**pos
, struct slist_item
*item
)
62 if (pos
== head
->last_next
)
63 head
->last_next
= &item
->next
;
69 * PREDECL_LIST(namelist)
71 * struct namelist_item nlitem;
73 * DECLARE_LIST(namelist, struct name, nlitem)
75 #define PREDECL_LIST(prefix) \
76 struct prefix ## _head { struct slist_head sh; }; \
77 struct prefix ## _item { struct slist_item si; };
79 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
81 #define DECLARE_LIST(prefix, type, field) \
83 macro_inline void prefix ## _init(struct prefix##_head *h) \
85 memset(h, 0, sizeof(*h)); \
86 h->sh.last_next = &h->sh.first; \
88 macro_inline void prefix ## _fini(struct prefix##_head *h) \
90 memset(h, 0, sizeof(*h)); \
92 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
94 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
96 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
98 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
100 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
101 type *after, type *item) \
103 struct slist_item **nextp; \
104 nextp = after ? &after->field.si.next : &h->sh.first; \
105 typesafe_list_add(&h->sh, nextp, &item->field.si); \
107 /* TODO: del_hint */ \
108 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
110 struct slist_item **iter = &h->sh.first; \
111 while (*iter && *iter != &item->field.si) \
112 iter = &(*iter)->next; \
116 *iter = item->field.si.next; \
117 if (!item->field.si.next) \
118 h->sh.last_next = iter; \
120 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
122 struct slist_item *sitem = h->sh.first; \
126 h->sh.first = sitem->next; \
127 if (h->sh.first == NULL) \
128 h->sh.last_next = &h->sh.first; \
129 return container_of(sitem, type, field.si); \
131 macro_pure type *prefix ## _first(struct prefix##_head *h) \
133 return container_of_null(h->sh.first, type, field.si); \
135 macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
137 struct slist_item *sitem = &item->field.si; \
138 return container_of_null(sitem->next, type, field.si); \
140 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
142 struct slist_item *sitem; \
145 sitem = &item->field.si; \
146 return container_of_null(sitem->next, type, field.si); \
148 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
150 return h->sh.count; \
154 /* don't use these structs directly */
156 struct dlist_item
*next
;
157 struct dlist_item
*prev
;
161 struct dlist_item hitem
;
165 static inline void typesafe_dlist_add(struct dlist_head
*head
,
166 struct dlist_item
*prev
, struct dlist_item
*item
)
168 item
->next
= prev
->next
;
169 item
->next
->prev
= item
;
175 /* double-linked list, for fast item deletion
177 #define PREDECL_DLIST(prefix) \
178 struct prefix ## _head { struct dlist_head dh; }; \
179 struct prefix ## _item { struct dlist_item di; };
181 #define INIT_DLIST(var) { .dh = { \
182 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
184 #define DECLARE_DLIST(prefix, type, field) \
186 macro_inline void prefix ## _init(struct prefix##_head *h) \
188 memset(h, 0, sizeof(*h)); \
189 h->dh.hitem.prev = &h->dh.hitem; \
190 h->dh.hitem.next = &h->dh.hitem; \
192 macro_inline void prefix ## _fini(struct prefix##_head *h) \
194 memset(h, 0, sizeof(*h)); \
196 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
198 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
200 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
202 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
204 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
205 type *after, type *item) \
207 struct dlist_item *prev; \
208 prev = after ? &after->field.di : &h->dh.hitem; \
209 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
211 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
213 struct dlist_item *ditem = &item->field.di; \
214 ditem->prev->next = ditem->next; \
215 ditem->next->prev = ditem->prev; \
217 ditem->prev = ditem->next = NULL; \
219 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
221 struct dlist_item *ditem = h->dh.hitem.next; \
222 if (ditem == &h->dh.hitem) \
224 ditem->prev->next = ditem->next; \
225 ditem->next->prev = ditem->prev; \
227 return container_of(ditem, type, field.di); \
229 macro_pure type *prefix ## _first(struct prefix##_head *h) \
231 struct dlist_item *ditem = h->dh.hitem.next; \
232 if (ditem == &h->dh.hitem) \
234 return container_of(ditem, type, field.di); \
236 macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
238 struct dlist_item *ditem = &item->field.di; \
239 if (ditem->next == &h->dh.hitem) \
241 return container_of(ditem->next, type, field.di); \
243 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
247 return prefix ## _next(h, item); \
249 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
251 return h->dh.count; \
255 /* single-linked list, sorted.
256 * can be used as priority queue with add / pop
259 /* don't use these structs directly */
261 struct ssort_item
*next
;
265 struct ssort_item
*first
;
271 * PREDECL_SORTLIST(namelist)
273 * struct namelist_item nlitem;
275 * DECLARE_SORTLIST(namelist, struct name, nlitem)
277 #define _PREDECL_SORTLIST(prefix) \
278 struct prefix ## _head { struct ssort_head sh; }; \
279 struct prefix ## _item { struct ssort_item si; };
281 #define INIT_SORTLIST_UNIQ(var) { }
282 #define INIT_SORTLIST_NONUNIQ(var) { }
284 #define PREDECL_SORTLIST_UNIQ(prefix) \
285 _PREDECL_SORTLIST(prefix)
286 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
287 _PREDECL_SORTLIST(prefix)
289 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
291 macro_inline void prefix ## _init(struct prefix##_head *h) \
293 memset(h, 0, sizeof(*h)); \
295 macro_inline void prefix ## _fini(struct prefix##_head *h) \
297 memset(h, 0, sizeof(*h)); \
299 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
301 struct ssort_item **np = &h->sh.first; \
303 while (*np && (c = cmpfn_uq( \
304 container_of(*np, type, field.si), item)) < 0) \
307 return container_of(*np, type, field.si); \
308 item->field.si.next = *np; \
309 *np = &item->field.si; \
313 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
316 struct ssort_item *sitem = h->sh.first; \
318 while (sitem && (cmpval = cmpfn_nuq( \
319 container_of(sitem, type, field.si), item) < 0)) \
320 sitem = sitem->next; \
321 return container_of_null(sitem, type, field.si); \
323 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
326 struct ssort_item *prev = NULL, *sitem = h->sh.first; \
328 while (sitem && (cmpval = cmpfn_nuq( \
329 container_of(sitem, type, field.si), item) < 0)) \
330 sitem = (prev = sitem)->next; \
331 return container_of_null(prev, type, field.si); \
333 /* TODO: del_hint */ \
334 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
336 struct ssort_item **iter = &h->sh.first; \
337 while (*iter && *iter != &item->field.si) \
338 iter = &(*iter)->next; \
342 *iter = item->field.si.next; \
344 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
346 struct ssort_item *sitem = h->sh.first; \
350 h->sh.first = sitem->next; \
351 return container_of(sitem, type, field.si); \
353 macro_pure type *prefix ## _first(struct prefix##_head *h) \
355 return container_of_null(h->sh.first, type, field.si); \
357 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
359 struct ssort_item *sitem = &item->field.si; \
360 return container_of_null(sitem->next, type, field.si); \
362 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
364 struct ssort_item *sitem; \
367 sitem = &item->field.si; \
368 return container_of_null(sitem->next, type, field.si); \
370 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
372 return h->sh.count; \
376 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
377 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
379 macro_inline type *prefix ## _find(const struct prefix##_head *h, const type *item) \
381 struct ssort_item *sitem = h->sh.first; \
383 while (sitem && (cmpval = cmpfn( \
384 container_of(sitem, type, field.si), item) < 0)) \
385 sitem = sitem->next; \
386 if (!sitem || cmpval > 0) \
388 return container_of(sitem, type, field.si); \
392 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
393 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
395 int cmpval = cmpfn(a, b); \
404 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
408 /* hash, "sorted" by hash value
411 /* don't use these structs directly */
413 struct thash_item
*next
;
418 struct thash_item
**entries
;
422 uint8_t minshift
, maxshift
;
425 #define _HASH_SIZE(tabshift) \
426 ((1U << (tabshift)) >> 1)
427 #define HASH_SIZE(head) \
428 _HASH_SIZE((head).tabshift)
429 #define _HASH_KEY(tabshift, val) \
430 ((val) >> (33 - (tabshift)))
431 #define HASH_KEY(head, val) \
432 _HASH_KEY((head).tabshift, val)
433 #define HASH_GROW_THRESHOLD(head) \
434 ((head).count >= HASH_SIZE(head))
435 #define HASH_SHRINK_THRESHOLD(head) \
436 ((head).count <= (HASH_SIZE(head) - 1) / 2)
438 extern void typesafe_hash_grow(struct thash_head
*head
);
439 extern void typesafe_hash_shrink(struct thash_head
*head
);
443 * PREDECL_HASH(namelist)
445 * struct namelist_item nlitem;
447 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
449 #define PREDECL_HASH(prefix) \
450 struct prefix ## _head { struct thash_head hh; }; \
451 struct prefix ## _item { struct thash_item hi; };
453 #define INIT_HASH(var) { }
455 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
457 macro_inline void prefix ## _init(struct prefix##_head *h) \
459 memset(h, 0, sizeof(*h)); \
461 macro_inline void prefix ## _fini(struct prefix##_head *h) \
463 assert(h->hh.count == 0); \
464 h->hh.minshift = 0; \
465 typesafe_hash_shrink(&h->hh); \
466 memset(h, 0, sizeof(*h)); \
468 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
471 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
472 typesafe_hash_grow(&h->hh); \
474 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
475 item->field.hi.hashval = hval; \
476 struct thash_item **np = &h->hh.entries[hbits]; \
477 while (*np && (*np)->hashval < hval) \
479 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
481 return container_of(*np, type, field.hi); \
483 item->field.hi.next = *np; \
484 *np = &item->field.hi; \
487 macro_inline type *prefix ## _find(const struct prefix##_head *h, const type *item) \
489 if (!h->hh.tabshift) \
491 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
492 struct thash_item *hitem = h->hh.entries[hbits]; \
493 while (hitem && hitem->hashval < hval) \
494 hitem = hitem->next; \
495 while (hitem && hitem->hashval == hval) { \
496 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
497 return container_of(hitem, type, field.hi); \
498 hitem = hitem->next; \
502 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
504 if (!h->hh.tabshift) \
506 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
507 struct thash_item **np = &h->hh.entries[hbits]; \
508 while (*np && (*np)->hashval < hval) \
510 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
512 if (*np != &item->field.hi) \
514 *np = item->field.hi.next; \
515 item->field.hi.next = NULL; \
517 if (HASH_SHRINK_THRESHOLD(h->hh)) \
518 typesafe_hash_shrink(&h->hh); \
520 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
523 for (i = 0; i < HASH_SIZE(h->hh); i++) \
524 if (h->hh.entries[i]) { \
525 struct thash_item *hitem = h->hh.entries[i]; \
526 h->hh.entries[i] = hitem->next; \
528 hitem->next = NULL; \
529 if (HASH_SHRINK_THRESHOLD(h->hh)) \
530 typesafe_hash_shrink(&h->hh); \
531 return container_of(hitem, type, field.hi); \
535 macro_pure type *prefix ## _first(struct prefix##_head *h) \
538 for (i = 0; i < HASH_SIZE(h->hh); i++) \
539 if (h->hh.entries[i]) \
540 return container_of(h->hh.entries[i], type, field.hi); \
543 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
545 struct thash_item *hitem = &item->field.hi; \
547 return container_of(hitem->next, type, field.hi); \
548 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
549 for (; i < HASH_SIZE(h->hh); i++) \
550 if (h->hh.entries[i]) \
551 return container_of(h->hh.entries[i], type, field.hi); \
554 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
558 return prefix ## _next(h, item); \
560 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
562 return h->hh.count; \
567 * can be used as priority queue with add / pop
570 /* don't use these structs directly */
571 #define SKIPLIST_MAXDEPTH 16
572 #define SKIPLIST_EMBED 4
573 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
576 struct sskip_item
*next
[SKIPLIST_EMBED
];
579 struct sskip_overflow
{
580 struct sskip_item
*next
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
584 struct sskip_item hitem
;
585 struct sskip_item
*overflow
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
591 * PREDECL_SKIPLIST(namelist)
593 * struct namelist_item nlitem;
595 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
597 #define _PREDECL_SKIPLIST(prefix) \
598 struct prefix ## _head { struct sskip_head sh; }; \
599 struct prefix ## _item { struct sskip_item si; };
601 #define INIT_SKIPLIST_UNIQ(var) { }
602 #define INIT_SKIPLIST_NONUNIQ(var) { }
604 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
606 macro_inline void prefix ## _init(struct prefix##_head *h) \
608 memset(h, 0, sizeof(*h)); \
609 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
610 ((uintptr_t)h->sh.overflow | 1); \
612 macro_inline void prefix ## _fini(struct prefix##_head *h) \
614 memset(h, 0, sizeof(*h)); \
616 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
618 struct sskip_item *si; \
619 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
620 return container_of_null(si, type, field.si); \
622 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
625 struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
626 &item->field.si, cmpfn_nuq); \
627 return container_of_null(sitem, type, field.si); \
629 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
632 struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
633 &item->field.si, cmpfn_nuq); \
634 return container_of_null(sitem, type, field.si); \
636 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
638 typesafe_skiplist_del(&h->sh, &item->field.si, cmpfn_uq); \
640 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
642 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
643 return container_of_null(sitem, type, field.si); \
645 macro_pure type *prefix ## _first(struct prefix##_head *h) \
647 struct sskip_item *first = h->sh.hitem.next[0]; \
648 return container_of_null(first, type, field.si); \
650 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
652 struct sskip_item *next = item->field.si.next[0]; \
653 return container_of_null(next, type, field.si); \
655 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
657 struct sskip_item *next; \
658 next = item ? item->field.si.next[0] : NULL; \
659 return container_of_null(next, type, field.si); \
661 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
663 return h->sh.count; \
667 #define PREDECL_SKIPLIST_UNIQ(prefix) \
668 _PREDECL_SKIPLIST(prefix)
669 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
671 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
672 const struct sskip_item *b) \
674 return cmpfn(container_of(a, type, field.si), \
675 container_of(b, type, field.si)); \
677 macro_inline type *prefix ## _find(const struct prefix##_head *h, const type *item) \
679 struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
680 &item->field.si, &prefix ## __cmp); \
681 return container_of_null(sitem, type, field.si); \
684 _DECLARE_SKIPLIST(prefix, type, field, \
685 prefix ## __cmp, prefix ## __cmp) \
688 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
689 _PREDECL_SKIPLIST(prefix)
690 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
692 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
693 const struct sskip_item *b) \
695 return cmpfn(container_of(a, type, field.si), \
696 container_of(b, type, field.si)); \
698 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
699 const struct sskip_item *b) \
701 int cmpval = cmpfn(container_of(a, type, field.si), \
702 container_of(b, type, field.si)); \
712 _DECLARE_SKIPLIST(prefix, type, field, \
713 prefix ## __cmp, prefix ## __cmp_uq) \
717 extern struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
718 struct sskip_item
*item
, int (*cmpfn
)(
719 const struct sskip_item
*a
,
720 const struct sskip_item
*b
));
721 extern struct sskip_item
*typesafe_skiplist_find(struct sskip_head
*head
,
722 const struct sskip_item
*item
, int (*cmpfn
)(
723 const struct sskip_item
*a
,
724 const struct sskip_item
*b
));
725 extern struct sskip_item
*typesafe_skiplist_find_gteq(struct sskip_head
*head
,
726 const struct sskip_item
*item
, int (*cmpfn
)(
727 const struct sskip_item
*a
,
728 const struct sskip_item
*b
));
729 extern struct sskip_item
*typesafe_skiplist_find_lt(struct sskip_head
*head
,
730 const struct sskip_item
*item
, int (*cmpfn
)(
731 const struct sskip_item
*a
,
732 const struct sskip_item
*b
));
733 extern void typesafe_skiplist_del(struct sskip_head
*head
,
734 struct sskip_item
*item
, int (*cmpfn
)(
735 const struct sskip_item
*a
,
736 const struct sskip_item
*b
));
737 extern struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
);
739 /* this needs to stay at the end because both files include each other.
740 * the resolved order is typesafe.h before typerb.h
744 #endif /* _FRR_TYPESAFE_H */