]>
git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
30 /* generic macros for all list-like types */
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
45 item = from, from = prefix##_next_safe(head, from))
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
54 return (type *)prefix ## _const_first(h); \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
58 return (type *)prefix ## _const_next(h, item); \
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
65 return (type *)prefix ## _const_find(h, item); \
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
72 return (type *)prefix ## _const_find_lt(h, item); \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
77 return (type *)prefix ## _const_find_gteq(h, item); \
82 /* single-linked list, unsorted/arbitrary.
83 * can be used as queue with add_tail / pop
86 /* don't use these structs directly */
88 struct slist_item
*next
;
92 struct slist_item
*first
, **last_next
;
96 static inline void typesafe_list_add(struct slist_head
*head
,
97 struct slist_item
**pos
, struct slist_item
*item
)
101 if (pos
== head
->last_next
)
102 head
->last_next
= &item
->next
;
108 * PREDECL_LIST(namelist);
110 * struct namelist_item nlitem;
112 * DECLARE_LIST(namelist, struct name, nlitem);
114 #define PREDECL_LIST(prefix) \
115 struct prefix ## _head { struct slist_head sh; }; \
116 struct prefix ## _item { struct slist_item si; }; \
117 MACRO_REQUIRE_SEMICOLON() /* end */
119 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
121 #define DECLARE_LIST(prefix, type, field) \
123 macro_inline void prefix ## _init(struct prefix##_head *h) \
125 memset(h, 0, sizeof(*h)); \
126 h->sh.last_next = &h->sh.first; \
128 macro_inline void prefix ## _fini(struct prefix##_head *h) \
130 memset(h, 0, sizeof(*h)); \
132 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
134 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
136 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
138 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
140 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
141 type *after, type *item) \
143 struct slist_item **nextp; \
144 nextp = after ? &after->field.si.next : &h->sh.first; \
145 typesafe_list_add(&h->sh, nextp, &item->field.si); \
147 /* TODO: del_hint */ \
148 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
150 struct slist_item **iter = &h->sh.first; \
151 while (*iter && *iter != &item->field.si) \
152 iter = &(*iter)->next; \
156 *iter = item->field.si.next; \
157 if (!item->field.si.next) \
158 h->sh.last_next = iter; \
161 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
163 struct slist_item *sitem = h->sh.first; \
167 h->sh.first = sitem->next; \
168 if (h->sh.first == NULL) \
169 h->sh.last_next = &h->sh.first; \
170 return container_of(sitem, type, field.si); \
172 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
174 return container_of_null(h->sh.first, type, field.si); \
176 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
179 const struct slist_item *sitem = &item->field.si; \
180 return container_of_null(sitem->next, type, field.si); \
182 TYPESAFE_FIRST_NEXT(prefix, type) \
183 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
185 struct slist_item *sitem; \
188 sitem = &item->field.si; \
189 return container_of_null(sitem->next, type, field.si); \
191 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
193 return h->sh.count; \
195 MACRO_REQUIRE_SEMICOLON() /* end */
197 /* don't use these structs directly */
199 struct dlist_item
*next
;
200 struct dlist_item
*prev
;
204 struct dlist_item hitem
;
208 static inline void typesafe_dlist_add(struct dlist_head
*head
,
209 struct dlist_item
*prev
, struct dlist_item
*item
)
211 item
->next
= prev
->next
;
212 item
->next
->prev
= item
;
218 /* double-linked list, for fast item deletion
220 #define PREDECL_DLIST(prefix) \
221 struct prefix ## _head { struct dlist_head dh; }; \
222 struct prefix ## _item { struct dlist_item di; }; \
223 MACRO_REQUIRE_SEMICOLON() /* end */
225 #define INIT_DLIST(var) { .dh = { \
226 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
228 #define DECLARE_DLIST(prefix, type, field) \
230 macro_inline void prefix ## _init(struct prefix##_head *h) \
232 memset(h, 0, sizeof(*h)); \
233 h->dh.hitem.prev = &h->dh.hitem; \
234 h->dh.hitem.next = &h->dh.hitem; \
236 macro_inline void prefix ## _fini(struct prefix##_head *h) \
238 memset(h, 0, sizeof(*h)); \
240 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
242 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
244 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
246 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
248 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
249 type *after, type *item) \
251 struct dlist_item *prev; \
252 prev = after ? &after->field.di : &h->dh.hitem; \
253 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
255 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
257 struct dlist_item *ditem = &item->field.di; \
258 ditem->prev->next = ditem->next; \
259 ditem->next->prev = ditem->prev; \
261 ditem->prev = ditem->next = NULL; \
264 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
266 struct dlist_item *ditem = h->dh.hitem.next; \
267 if (ditem == &h->dh.hitem) \
269 ditem->prev->next = ditem->next; \
270 ditem->next->prev = ditem->prev; \
272 return container_of(ditem, type, field.di); \
274 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
276 const struct dlist_item *ditem = h->dh.hitem.next; \
277 if (ditem == &h->dh.hitem) \
279 return container_of(ditem, type, field.di); \
281 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
284 const struct dlist_item *ditem = &item->field.di; \
285 if (ditem->next == &h->dh.hitem) \
287 return container_of(ditem->next, type, field.di); \
289 TYPESAFE_FIRST_NEXT(prefix, type) \
290 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
294 return prefix ## _next(h, item); \
296 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
298 return h->dh.count; \
300 MACRO_REQUIRE_SEMICOLON() /* end */
302 /* note: heap currently caps out at 4G items */
305 typedef uint32_t heap_index_i
;
312 struct heap_item
**array
;
313 uint32_t arraysz
, count
;
316 #define HEAP_RESIZE_TRESH_UP(h) \
317 (h->hh.count + 1 >= h->hh.arraysz)
318 #define HEAP_RESIZE_TRESH_DN(h) \
319 (h->hh.count == 0 || \
320 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
322 #define PREDECL_HEAP(prefix) \
323 struct prefix ## _head { struct heap_head hh; }; \
324 struct prefix ## _item { struct heap_item hi; }; \
325 MACRO_REQUIRE_SEMICOLON() /* end */
327 #define INIT_HEAP(var) { }
329 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
331 macro_inline void prefix ## _init(struct prefix##_head *h) \
333 memset(h, 0, sizeof(*h)); \
335 macro_inline void prefix ## _fini(struct prefix##_head *h) \
337 assert(h->hh.count == 0); \
338 memset(h, 0, sizeof(*h)); \
340 macro_inline int prefix ## __cmp(const struct heap_item *a, \
341 const struct heap_item *b) \
343 return cmpfn(container_of(a, type, field.hi), \
344 container_of(b, type, field.hi)); \
346 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
348 if (HEAP_RESIZE_TRESH_UP(h)) \
349 typesafe_heap_resize(&h->hh, true); \
350 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
355 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
357 struct heap_item *other; \
358 uint32_t index = item->field.hi.index; \
359 assert(h->hh.array[index] == &item->field.hi); \
361 other = h->hh.array[h->hh.count]; \
362 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
363 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
365 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
366 if (HEAP_RESIZE_TRESH_DN(h)) \
367 typesafe_heap_resize(&h->hh, false); \
370 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
372 struct heap_item *hitem, *other; \
373 if (h->hh.count == 0) \
375 hitem = h->hh.array[0]; \
377 other = h->hh.array[h->hh.count]; \
378 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
379 if (HEAP_RESIZE_TRESH_DN(h)) \
380 typesafe_heap_resize(&h->hh, false); \
381 return container_of(hitem, type, field.hi); \
383 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
385 if (h->hh.count == 0) \
387 return container_of(h->hh.array[0], type, field.hi); \
389 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
392 uint32_t idx = item->field.hi.index + 1; \
393 if (idx >= h->hh.count) \
395 return container_of(h->hh.array[idx], type, field.hi); \
397 TYPESAFE_FIRST_NEXT(prefix, type) \
398 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
402 return prefix ## _next(h, item); \
404 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
406 return h->hh.count; \
408 MACRO_REQUIRE_SEMICOLON() /* end */
410 extern void typesafe_heap_resize(struct heap_head
*head
, bool grow
);
411 extern void typesafe_heap_pushdown(struct heap_head
*head
, uint32_t index
,
412 struct heap_item
*item
,
413 int (*cmpfn
)(const struct heap_item
*a
,
414 const struct heap_item
*b
));
415 extern void typesafe_heap_pullup(struct heap_head
*head
, uint32_t index
,
416 struct heap_item
*item
,
417 int (*cmpfn
)(const struct heap_item
*a
,
418 const struct heap_item
*b
));
420 /* single-linked list, sorted.
421 * can be used as priority queue with add / pop
424 /* don't use these structs directly */
426 struct ssort_item
*next
;
430 struct ssort_item
*first
;
436 * PREDECL_SORTLIST(namelist)
438 * struct namelist_item nlitem;
440 * DECLARE_SORTLIST(namelist, struct name, nlitem)
442 #define _PREDECL_SORTLIST(prefix) \
443 struct prefix ## _head { struct ssort_head sh; }; \
444 struct prefix ## _item { struct ssort_item si; }; \
445 MACRO_REQUIRE_SEMICOLON() /* end */
447 #define INIT_SORTLIST_UNIQ(var) { }
448 #define INIT_SORTLIST_NONUNIQ(var) { }
450 #define PREDECL_SORTLIST_UNIQ(prefix) \
451 _PREDECL_SORTLIST(prefix)
452 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
453 _PREDECL_SORTLIST(prefix)
455 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
457 macro_inline void prefix ## _init(struct prefix##_head *h) \
459 memset(h, 0, sizeof(*h)); \
461 macro_inline void prefix ## _fini(struct prefix##_head *h) \
463 memset(h, 0, sizeof(*h)); \
465 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
467 struct ssort_item **np = &h->sh.first; \
469 while (*np && (c = cmpfn_uq( \
470 container_of(*np, type, field.si), item)) < 0) \
473 return container_of(*np, type, field.si); \
474 item->field.si.next = *np; \
475 *np = &item->field.si; \
479 macro_inline const type *prefix ## _const_find_gteq( \
480 const struct prefix##_head *h, const type *item) \
482 const struct ssort_item *sitem = h->sh.first; \
484 while (sitem && (cmpval = cmpfn_nuq( \
485 container_of(sitem, type, field.si), item)) < 0) \
486 sitem = sitem->next; \
487 return container_of_null(sitem, type, field.si); \
489 macro_inline const type *prefix ## _const_find_lt( \
490 const struct prefix##_head *h, const type *item) \
492 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
494 while (sitem && (cmpval = cmpfn_nuq( \
495 container_of(sitem, type, field.si), item)) < 0) \
496 sitem = (prev = sitem)->next; \
497 return container_of_null(prev, type, field.si); \
499 TYPESAFE_FIND_CMP(prefix, type) \
500 /* TODO: del_hint */ \
501 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
503 struct ssort_item **iter = &h->sh.first; \
504 while (*iter && *iter != &item->field.si) \
505 iter = &(*iter)->next; \
509 *iter = item->field.si.next; \
512 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
514 struct ssort_item *sitem = h->sh.first; \
518 h->sh.first = sitem->next; \
519 return container_of(sitem, type, field.si); \
521 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
523 return container_of_null(h->sh.first, type, field.si); \
525 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
528 const struct ssort_item *sitem = &item->field.si; \
529 return container_of_null(sitem->next, type, field.si); \
531 TYPESAFE_FIRST_NEXT(prefix, type) \
532 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
534 struct ssort_item *sitem; \
537 sitem = &item->field.si; \
538 return container_of_null(sitem->next, type, field.si); \
540 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
542 return h->sh.count; \
544 MACRO_REQUIRE_SEMICOLON() /* end */
546 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
547 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
549 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
552 const struct ssort_item *sitem = h->sh.first; \
554 while (sitem && (cmpval = cmpfn( \
555 container_of(sitem, type, field.si), item)) < 0) \
556 sitem = sitem->next; \
557 if (!sitem || cmpval > 0) \
559 return container_of(sitem, type, field.si); \
561 TYPESAFE_FIND(prefix, type) \
562 MACRO_REQUIRE_SEMICOLON() /* end */
564 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
565 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
567 int cmpval = cmpfn(a, b); \
576 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
577 MACRO_REQUIRE_SEMICOLON() /* end */
580 /* hash, "sorted" by hash value
583 /* don't use these structs directly */
585 struct thash_item
*next
;
590 struct thash_item
**entries
;
594 uint8_t minshift
, maxshift
;
597 #define _HASH_SIZE(tabshift) \
598 ((1U << (tabshift)) >> 1)
599 #define HASH_SIZE(head) \
600 _HASH_SIZE((head).tabshift)
601 #define _HASH_KEY(tabshift, val) \
602 ((val) >> (33 - (tabshift)))
603 #define HASH_KEY(head, val) \
604 _HASH_KEY((head).tabshift, val)
605 #define HASH_GROW_THRESHOLD(head) \
606 ((head).count >= HASH_SIZE(head))
607 #define HASH_SHRINK_THRESHOLD(head) \
608 ((head).count <= (HASH_SIZE(head) - 1) / 2)
610 extern void typesafe_hash_grow(struct thash_head
*head
);
611 extern void typesafe_hash_shrink(struct thash_head
*head
);
615 * PREDECL_HASH(namelist)
617 * struct namelist_item nlitem;
619 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
621 #define PREDECL_HASH(prefix) \
622 struct prefix ## _head { struct thash_head hh; }; \
623 struct prefix ## _item { struct thash_item hi; }; \
624 MACRO_REQUIRE_SEMICOLON() /* end */
626 #define INIT_HASH(var) { }
628 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
630 macro_inline void prefix ## _init(struct prefix##_head *h) \
632 memset(h, 0, sizeof(*h)); \
634 macro_inline void prefix ## _fini(struct prefix##_head *h) \
636 assert(h->hh.count == 0); \
637 h->hh.minshift = 0; \
638 typesafe_hash_shrink(&h->hh); \
639 memset(h, 0, sizeof(*h)); \
641 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
644 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
645 typesafe_hash_grow(&h->hh); \
647 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
648 item->field.hi.hashval = hval; \
649 struct thash_item **np = &h->hh.entries[hbits]; \
650 while (*np && (*np)->hashval < hval) \
652 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
654 return container_of(*np, type, field.hi); \
656 item->field.hi.next = *np; \
657 *np = &item->field.hi; \
660 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
663 if (!h->hh.tabshift) \
665 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
666 const struct thash_item *hitem = h->hh.entries[hbits]; \
667 while (hitem && hitem->hashval < hval) \
668 hitem = hitem->next; \
669 while (hitem && hitem->hashval == hval) { \
670 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
671 return container_of(hitem, type, field.hi); \
672 hitem = hitem->next; \
676 TYPESAFE_FIND(prefix, type) \
677 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
679 if (!h->hh.tabshift) \
681 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
682 struct thash_item **np = &h->hh.entries[hbits]; \
683 while (*np && (*np)->hashval < hval) \
685 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
687 if (*np != &item->field.hi) \
689 *np = item->field.hi.next; \
690 item->field.hi.next = NULL; \
692 if (HASH_SHRINK_THRESHOLD(h->hh)) \
693 typesafe_hash_shrink(&h->hh); \
696 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
699 for (i = 0; i < HASH_SIZE(h->hh); i++) \
700 if (h->hh.entries[i]) { \
701 struct thash_item *hitem = h->hh.entries[i]; \
702 h->hh.entries[i] = hitem->next; \
704 hitem->next = NULL; \
705 if (HASH_SHRINK_THRESHOLD(h->hh)) \
706 typesafe_hash_shrink(&h->hh); \
707 return container_of(hitem, type, field.hi); \
711 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
714 for (i = 0; i < HASH_SIZE(h->hh); i++) \
715 if (h->hh.entries[i]) \
716 return container_of(h->hh.entries[i], type, field.hi); \
719 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
722 const struct thash_item *hitem = &item->field.hi; \
724 return container_of(hitem->next, type, field.hi); \
725 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
726 for (; i < HASH_SIZE(h->hh); i++) \
727 if (h->hh.entries[i]) \
728 return container_of(h->hh.entries[i], type, field.hi); \
731 TYPESAFE_FIRST_NEXT(prefix, type) \
732 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
736 return prefix ## _next(h, item); \
738 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
740 return h->hh.count; \
742 MACRO_REQUIRE_SEMICOLON() /* end */
745 * can be used as priority queue with add / pop
748 /* don't use these structs directly */
749 #define SKIPLIST_MAXDEPTH 16
750 #define SKIPLIST_EMBED 4
751 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
754 struct sskip_item
*next
[SKIPLIST_EMBED
];
757 struct sskip_overflow
{
758 struct sskip_item
*next
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
762 struct sskip_item hitem
;
763 struct sskip_item
*overflow
[SKIPLIST_MAXDEPTH
- SKIPLIST_OVERFLOW
];
769 * PREDECL_SKIPLIST(namelist)
771 * struct namelist_item nlitem;
773 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
775 #define _PREDECL_SKIPLIST(prefix) \
776 struct prefix ## _head { struct sskip_head sh; }; \
777 struct prefix ## _item { struct sskip_item si; }; \
778 MACRO_REQUIRE_SEMICOLON() /* end */
780 #define INIT_SKIPLIST_UNIQ(var) { }
781 #define INIT_SKIPLIST_NONUNIQ(var) { }
783 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
785 macro_inline void prefix ## _init(struct prefix##_head *h) \
787 memset(h, 0, sizeof(*h)); \
788 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
789 ((uintptr_t)h->sh.overflow | 1); \
791 macro_inline void prefix ## _fini(struct prefix##_head *h) \
793 memset(h, 0, sizeof(*h)); \
795 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
797 struct sskip_item *si; \
798 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
799 return container_of_null(si, type, field.si); \
801 macro_inline const type *prefix ## _const_find_gteq( \
802 const struct prefix##_head *h, const type *item) \
804 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
805 &item->field.si, cmpfn_nuq); \
806 return container_of_null(sitem, type, field.si); \
808 macro_inline const type *prefix ## _const_find_lt( \
809 const struct prefix##_head *h, const type *item) \
811 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
812 &item->field.si, cmpfn_nuq); \
813 return container_of_null(sitem, type, field.si); \
815 TYPESAFE_FIND_CMP(prefix, type) \
816 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
818 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
819 &item->field.si, cmpfn_uq); \
820 return container_of_null(sitem, type, field.si); \
822 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
824 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
825 return container_of_null(sitem, type, field.si); \
827 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
829 const struct sskip_item *first = h->sh.hitem.next[0]; \
830 return container_of_null(first, type, field.si); \
832 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
835 const struct sskip_item *next = item->field.si.next[0]; \
836 return container_of_null(next, type, field.si); \
838 TYPESAFE_FIRST_NEXT(prefix, type) \
839 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
841 struct sskip_item *next; \
842 next = item ? item->field.si.next[0] : NULL; \
843 return container_of_null(next, type, field.si); \
845 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
847 return h->sh.count; \
849 MACRO_REQUIRE_SEMICOLON() /* end */
851 #define PREDECL_SKIPLIST_UNIQ(prefix) \
852 _PREDECL_SKIPLIST(prefix)
853 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
855 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
856 const struct sskip_item *b) \
858 return cmpfn(container_of(a, type, field.si), \
859 container_of(b, type, field.si)); \
861 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
864 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
865 &item->field.si, &prefix ## __cmp); \
866 return container_of_null(sitem, type, field.si); \
868 TYPESAFE_FIND(prefix, type) \
870 _DECLARE_SKIPLIST(prefix, type, field, \
871 prefix ## __cmp, prefix ## __cmp); \
872 MACRO_REQUIRE_SEMICOLON() /* end */
874 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
875 _PREDECL_SKIPLIST(prefix)
876 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
878 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
879 const struct sskip_item *b) \
881 return cmpfn(container_of(a, type, field.si), \
882 container_of(b, type, field.si)); \
884 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
885 const struct sskip_item *b) \
887 int cmpval = cmpfn(container_of(a, type, field.si), \
888 container_of(b, type, field.si)); \
898 _DECLARE_SKIPLIST(prefix, type, field, \
899 prefix ## __cmp, prefix ## __cmp_uq); \
900 MACRO_REQUIRE_SEMICOLON() /* end */
903 extern struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
904 struct sskip_item
*item
, int (*cmpfn
)(
905 const struct sskip_item
*a
,
906 const struct sskip_item
*b
));
907 extern const struct sskip_item
*typesafe_skiplist_find(
908 const struct sskip_head
*head
,
909 const struct sskip_item
*item
, int (*cmpfn
)(
910 const struct sskip_item
*a
,
911 const struct sskip_item
*b
));
912 extern const struct sskip_item
*typesafe_skiplist_find_gteq(
913 const struct sskip_head
*head
,
914 const struct sskip_item
*item
, int (*cmpfn
)(
915 const struct sskip_item
*a
,
916 const struct sskip_item
*b
));
917 extern const struct sskip_item
*typesafe_skiplist_find_lt(
918 const struct sskip_head
*head
,
919 const struct sskip_item
*item
, int (*cmpfn
)(
920 const struct sskip_item
*a
,
921 const struct sskip_item
*b
));
922 extern struct sskip_item
*typesafe_skiplist_del(
923 struct sskip_head
*head
, struct sskip_item
*item
, int (*cmpfn
)(
924 const struct sskip_item
*a
,
925 const struct sskip_item
*b
));
926 extern struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
);
932 /* this needs to stay at the end because both files include each other.
933 * the resolved order is typesafe.h before typerb.h
937 #endif /* _FRR_TYPESAFE_H */