]> git.proxmox.com Git - mirror_frr.git/blame - lib/typesafe.h
Merge pull request #4234 from donaldsharp/flood_the_vtep
[mirror_frr.git] / lib / typesafe.h
CommitLineData
abd71baa
DL
1/*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _FRR_TYPESAFE_H
18#define _FRR_TYPESAFE_H
19
20#include <stddef.h>
21#include <stdint.h>
22#include <stdbool.h>
23#include <assert.h>
24#include "compiler.h"
25
26/* generic macros for all list-like types */
27
28#define for_each(prefix, head, item) \
29 for (item = prefix##_first(head); item; \
30 item = prefix##_next(head, item))
31#define for_each_safe(prefix, head, item) \
32 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
33 prefix##_next_safe(head, \
34 (item = prefix##_first(head))); \
35 item; \
36 item = prefix##_safe, \
37 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
38#define for_each_from(prefix, head, item, from) \
39 for (item = from, from = prefix##_next_safe(head, item); \
40 item; \
41 item = from, from = prefix##_next_safe(head, from))
42
43/* single-linked list, unsorted/arbitrary.
44 * can be used as queue with add_tail / pop
45 */
46
47/* don't use these structs directly */
48struct slist_item {
49 struct slist_item *next;
50};
51
52struct slist_head {
53 struct slist_item *first, **last_next;
54 size_t count;
55};
56
57static inline void typesafe_list_add(struct slist_head *head,
58 struct slist_item **pos, struct slist_item *item)
59{
60 item->next = *pos;
61 *pos = item;
62 if (pos == head->last_next)
63 head->last_next = &item->next;
64 head->count++;
65}
66
67/* use as:
68 *
69 * PREDECL_LIST(namelist)
70 * struct name {
71 * struct namelist_item nlitem;
72 * }
73 * DECLARE_LIST(namelist, struct name, nlitem)
74 */
75#define PREDECL_LIST(prefix) \
76struct prefix ## _head { struct slist_head sh; }; \
77struct prefix ## _item { struct slist_item si; };
78
79#define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
80
81#define DECLARE_LIST(prefix, type, field) \
82 \
83macro_inline void prefix ## _init(struct prefix##_head *h) \
84{ \
85 memset(h, 0, sizeof(*h)); \
86 h->sh.last_next = &h->sh.first; \
87} \
88macro_inline void prefix ## _fini(struct prefix##_head *h) \
89{ \
90 memset(h, 0, sizeof(*h)); \
91} \
92macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
93{ \
94 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
95} \
96macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
97{ \
98 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
99} \
100macro_inline void prefix ## _add_after(struct prefix##_head *h, \
101 type *after, type *item) \
102{ \
103 struct slist_item **nextp; \
104 nextp = after ? &after->field.si.next : &h->sh.first; \
105 typesafe_list_add(&h->sh, nextp, &item->field.si); \
106} \
107/* TODO: del_hint */ \
108macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
109{ \
110 struct slist_item **iter = &h->sh.first; \
111 while (*iter && *iter != &item->field.si) \
112 iter = &(*iter)->next; \
113 if (!*iter) \
114 return; \
115 h->sh.count--; \
116 *iter = item->field.si.next; \
117 if (!item->field.si.next) \
118 h->sh.last_next = iter; \
119} \
120macro_inline type *prefix ## _pop(struct prefix##_head *h) \
121{ \
122 struct slist_item *sitem = h->sh.first; \
123 if (!sitem) \
124 return NULL; \
125 h->sh.count--; \
126 h->sh.first = sitem->next; \
127 if (h->sh.first == NULL) \
128 h->sh.last_next = &h->sh.first; \
129 return container_of(sitem, type, field.si); \
130} \
131macro_pure type *prefix ## _first(struct prefix##_head *h) \
132{ \
133 return container_of_null(h->sh.first, type, field.si); \
134} \
135macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
136{ \
137 struct slist_item *sitem = &item->field.si; \
138 return container_of_null(sitem->next, type, field.si); \
139} \
140macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
141{ \
142 struct slist_item *sitem; \
143 if (!item) \
144 return NULL; \
145 sitem = &item->field.si; \
146 return container_of_null(sitem->next, type, field.si); \
147} \
148macro_pure size_t prefix ## _count(struct prefix##_head *h) \
149{ \
150 return h->sh.count; \
151} \
152/* ... */
153
154/* single-linked list, sorted.
155 * can be used as priority queue with add / pop
156 */
157
158/* don't use these structs directly */
159struct ssort_item {
160 struct ssort_item *next;
161};
162
163struct ssort_head {
164 struct ssort_item *first;
165 size_t count;
166};
167
168/* use as:
169 *
170 * PREDECL_SORTLIST(namelist)
171 * struct name {
172 * struct namelist_item nlitem;
173 * }
174 * DECLARE_SORTLIST(namelist, struct name, nlitem)
175 */
176#define _PREDECL_SORTLIST(prefix) \
177struct prefix ## _head { struct ssort_head sh; }; \
178struct prefix ## _item { struct ssort_item si; };
179
180#define INIT_SORTLIST_UNIQ(var) { }
181#define INIT_SORTLIST_NONUNIQ(var) { }
182
183#define PREDECL_SORTLIST_UNIQ(prefix) \
184 _PREDECL_SORTLIST(prefix)
185#define PREDECL_SORTLIST_NONUNIQ(prefix) \
186 _PREDECL_SORTLIST(prefix)
187
188#define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
189 \
190macro_inline void prefix ## _init(struct prefix##_head *h) \
191{ \
192 memset(h, 0, sizeof(*h)); \
193} \
194macro_inline void prefix ## _fini(struct prefix##_head *h) \
195{ \
196 memset(h, 0, sizeof(*h)); \
197} \
198macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
199{ \
200 struct ssort_item **np = &h->sh.first; \
201 int c = 1; \
202 while (*np && (c = cmpfn_uq( \
203 container_of(*np, type, field.si), item)) < 0) \
204 np = &(*np)->next; \
205 if (c == 0) \
206 return container_of(*np, type, field.si); \
207 item->field.si.next = *np; \
208 *np = &item->field.si; \
209 h->sh.count++; \
210 return NULL; \
211} \
212macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
213 const type *item) \
214{ \
215 struct ssort_item *sitem = h->sh.first; \
216 int cmpval = 0; \
217 while (sitem && (cmpval = cmpfn_nuq( \
218 container_of(sitem, type, field.si), item) < 0)) \
219 sitem = sitem->next; \
220 return container_of_null(sitem, type, field.si); \
221} \
222macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
223 const type *item) \
224{ \
225 struct ssort_item *prev = NULL, *sitem = h->sh.first; \
226 int cmpval = 0; \
227 while (sitem && (cmpval = cmpfn_nuq( \
228 container_of(sitem, type, field.si), item) < 0)) \
229 sitem = (prev = sitem)->next; \
230 return container_of_null(prev, type, field.si); \
231} \
232/* TODO: del_hint */ \
233macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
234{ \
235 struct ssort_item **iter = &h->sh.first; \
236 while (*iter && *iter != &item->field.si) \
237 iter = &(*iter)->next; \
238 if (!*iter) \
239 return; \
240 h->sh.count--; \
241 *iter = item->field.si.next; \
242} \
243macro_inline type *prefix ## _pop(struct prefix##_head *h) \
244{ \
245 struct ssort_item *sitem = h->sh.first; \
246 if (!sitem) \
247 return NULL; \
248 h->sh.count--; \
249 h->sh.first = sitem->next; \
250 return container_of(sitem, type, field.si); \
251} \
252macro_pure type *prefix ## _first(struct prefix##_head *h) \
253{ \
254 return container_of_null(h->sh.first, type, field.si); \
255} \
256macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
257{ \
258 struct ssort_item *sitem = &item->field.si; \
259 return container_of_null(sitem->next, type, field.si); \
260} \
261macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
262{ \
263 struct ssort_item *sitem; \
264 if (!item) \
265 return NULL; \
266 sitem = &item->field.si; \
267 return container_of_null(sitem->next, type, field.si); \
268} \
269macro_pure size_t prefix ## _count(struct prefix##_head *h) \
270{ \
271 return h->sh.count; \
272} \
273/* ... */
274
275#define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
276 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
277 \
e8b9ad5c 278macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
abd71baa
DL
279{ \
280 struct ssort_item *sitem = h->sh.first; \
281 int cmpval = 0; \
282 while (sitem && (cmpval = cmpfn( \
283 container_of(sitem, type, field.si), item) < 0)) \
284 sitem = sitem->next; \
285 if (!sitem || cmpval > 0) \
286 return NULL; \
287 return container_of(sitem, type, field.si); \
288} \
289/* ... */
290
291#define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
292macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
293{ \
294 int cmpval = cmpfn(a, b); \
295 if (cmpval) \
296 return cmpval; \
297 if (a < b) \
298 return -1; \
299 if (a > b) \
300 return 1; \
301 return 0; \
302} \
303 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
304/* ... */
305
306
307/* hash, "sorted" by hash value
308 */
309
310/* don't use these structs directly */
311struct thash_item {
312 struct thash_item *next;
313 uint32_t hashval;
314};
315
316struct thash_head {
317 struct thash_item **entries;
318 uint32_t count;
319
320 uint8_t tabshift;
321 uint8_t minshift, maxshift;
322};
323
324#define _HASH_SIZE(tabshift) \
325 ((1U << (tabshift)) >> 1)
326#define HASH_SIZE(head) \
327 _HASH_SIZE((head).tabshift)
328#define _HASH_KEY(tabshift, val) \
329 ((val) >> (33 - (tabshift)))
330#define HASH_KEY(head, val) \
331 _HASH_KEY((head).tabshift, val)
332#define HASH_GROW_THRESHOLD(head) \
333 ((head).count >= HASH_SIZE(head))
334#define HASH_SHRINK_THRESHOLD(head) \
335 ((head).count <= (HASH_SIZE(head) - 1) / 2)
336
337extern void typesafe_hash_grow(struct thash_head *head);
338extern void typesafe_hash_shrink(struct thash_head *head);
339
340/* use as:
341 *
342 * PREDECL_HASH(namelist)
343 * struct name {
344 * struct namelist_item nlitem;
345 * }
346 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
347 */
348#define PREDECL_HASH(prefix) \
349struct prefix ## _head { struct thash_head hh; }; \
350struct prefix ## _item { struct thash_item hi; };
351
352#define INIT_HASH(var) { }
353
354#define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
355 \
356macro_inline void prefix ## _init(struct prefix##_head *h) \
357{ \
358 memset(h, 0, sizeof(*h)); \
359} \
360macro_inline void prefix ## _fini(struct prefix##_head *h) \
361{ \
362 assert(h->hh.count == 0); \
363 h->hh.minshift = 0; \
364 typesafe_hash_shrink(&h->hh); \
365 memset(h, 0, sizeof(*h)); \
366} \
367macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
368{ \
369 h->hh.count++; \
370 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
371 typesafe_hash_grow(&h->hh); \
372 \
373 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
374 item->field.hi.hashval = hval; \
375 struct thash_item **np = &h->hh.entries[hbits]; \
376 while (*np && (*np)->hashval < hval) \
377 np = &(*np)->next; \
378 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
379 h->hh.count--; \
380 return container_of(*np, type, field.hi); \
381 } \
382 item->field.hi.next = *np; \
383 *np = &item->field.hi; \
384 return NULL; \
385} \
e8b9ad5c 386macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
abd71baa
DL
387{ \
388 if (!h->hh.tabshift) \
389 return NULL; \
390 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
391 struct thash_item *hitem = h->hh.entries[hbits]; \
392 while (hitem && hitem->hashval < hval) \
393 hitem = hitem->next; \
394 while (hitem && hitem->hashval == hval) { \
395 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
396 return container_of(hitem, type, field.hi); \
397 hitem = hitem->next; \
398 } \
399 return NULL; \
400} \
401macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
402{ \
403 if (!h->hh.tabshift) \
404 return; \
405 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
406 struct thash_item **np = &h->hh.entries[hbits]; \
407 while (*np && (*np)->hashval < hval) \
408 np = &(*np)->next; \
409 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
410 np = &(*np)->next; \
411 if (*np != &item->field.hi) \
412 return; \
413 *np = item->field.hi.next; \
414 item->field.hi.next = NULL; \
415 h->hh.count--; \
416 if (HASH_SHRINK_THRESHOLD(h->hh)) \
417 typesafe_hash_shrink(&h->hh); \
418} \
419macro_inline type *prefix ## _pop(struct prefix##_head *h) \
420{ \
421 uint32_t i; \
422 for (i = 0; i < HASH_SIZE(h->hh); i++) \
423 if (h->hh.entries[i]) { \
424 struct thash_item *hitem = h->hh.entries[i]; \
425 h->hh.entries[i] = hitem->next; \
426 h->hh.count--; \
427 hitem->next = NULL; \
428 if (HASH_SHRINK_THRESHOLD(h->hh)) \
429 typesafe_hash_shrink(&h->hh); \
430 return container_of(hitem, type, field.hi); \
431 } \
432 return NULL; \
433} \
434macro_pure type *prefix ## _first(struct prefix##_head *h) \
435{ \
436 uint32_t i; \
437 for (i = 0; i < HASH_SIZE(h->hh); i++) \
438 if (h->hh.entries[i]) \
439 return container_of(h->hh.entries[i], type, field.hi); \
440 return NULL; \
441} \
442macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
443{ \
444 struct thash_item *hitem = &item->field.hi; \
445 if (hitem->next) \
446 return container_of(hitem->next, type, field.hi); \
447 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
448 for (; i < HASH_SIZE(h->hh); i++) \
449 if (h->hh.entries[i]) \
450 return container_of(h->hh.entries[i], type, field.hi); \
451 return NULL; \
452} \
453macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
454{ \
455 if (!item) \
456 return NULL; \
457 return prefix ## _next(h, item); \
458} \
459macro_pure size_t prefix ## _count(struct prefix##_head *h) \
460{ \
461 return h->hh.count; \
462} \
463/* ... */
464
465/* skiplist, sorted.
466 * can be used as priority queue with add / pop
467 */
468
469/* don't use these structs directly */
470#define SKIPLIST_MAXDEPTH 16
471#define SKIPLIST_EMBED 4
472#define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
473
474struct sskip_item {
475 struct sskip_item *next[SKIPLIST_EMBED];
476};
477
478struct sskip_overflow {
479 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
480};
481
482struct sskip_head {
483 struct sskip_item hitem;
484 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
485 size_t count;
486};
487
488/* use as:
489 *
490 * PREDECL_SKIPLIST(namelist)
491 * struct name {
492 * struct namelist_item nlitem;
493 * }
494 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
495 */
496#define _PREDECL_SKIPLIST(prefix) \
497struct prefix ## _head { struct sskip_head sh; }; \
498struct prefix ## _item { struct sskip_item si; };
499
500#define INIT_SKIPLIST_UNIQ(var) { }
501#define INIT_SKIPLIST_NONUNIQ(var) { }
502
503#define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
504 \
505macro_inline void prefix ## _init(struct prefix##_head *h) \
506{ \
507 memset(h, 0, sizeof(*h)); \
508 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
509 ((uintptr_t)h->sh.overflow | 1); \
510} \
511macro_inline void prefix ## _fini(struct prefix##_head *h) \
512{ \
513 memset(h, 0, sizeof(*h)); \
514} \
515macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
516{ \
517 struct sskip_item *si; \
518 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
519 return container_of_null(si, type, field.si); \
520} \
521macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
522 const type *item) \
523{ \
524 struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
525 &item->field.si, cmpfn_nuq); \
526 return container_of_null(sitem, type, field.si); \
527} \
528macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
529 const type *item) \
530{ \
531 struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
532 &item->field.si, cmpfn_nuq); \
533 return container_of_null(sitem, type, field.si); \
534} \
535macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
536{ \
537 typesafe_skiplist_del(&h->sh, &item->field.si, cmpfn_uq); \
538} \
539macro_inline type *prefix ## _pop(struct prefix##_head *h) \
540{ \
541 struct sskip_item *sitem = h->sh.hitem.next[0]; \
542 if (!sitem) \
543 return NULL; \
544 typesafe_skiplist_del(&h->sh, sitem, cmpfn_uq); \
545 return container_of(sitem, type, field.si); \
546} \
547macro_pure type *prefix ## _first(struct prefix##_head *h) \
548{ \
549 struct sskip_item *first = h->sh.hitem.next[0]; \
550 return container_of_null(first, type, field.si); \
551} \
552macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
553{ \
554 struct sskip_item *next = item->field.si.next[0]; \
555 return container_of_null(next, type, field.si); \
556} \
557macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
558{ \
559 struct sskip_item *next; \
560 next = item ? item->field.si.next[0] : NULL; \
561 return container_of_null(next, type, field.si); \
562} \
563macro_pure size_t prefix ## _count(struct prefix##_head *h) \
564{ \
565 return h->sh.count; \
566} \
567/* ... */
568
569#define PREDECL_SKIPLIST_UNIQ(prefix) \
570 _PREDECL_SKIPLIST(prefix)
571#define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
572 \
573macro_inline int prefix ## __cmp(const struct sskip_item *a, \
574 const struct sskip_item *b) \
575{ \
576 return cmpfn(container_of(a, type, field.si), \
577 container_of(b, type, field.si)); \
578} \
e8b9ad5c 579macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
abd71baa
DL
580{ \
581 struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
582 &item->field.si, &prefix ## __cmp); \
583 return container_of_null(sitem, type, field.si); \
584} \
585 \
586_DECLARE_SKIPLIST(prefix, type, field, \
587 prefix ## __cmp, prefix ## __cmp) \
588/* ... */
589
590#define PREDECL_SKIPLIST_NONUNIQ(prefix) \
591 _PREDECL_SKIPLIST(prefix)
592#define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
593 \
594macro_inline int prefix ## __cmp(const struct sskip_item *a, \
595 const struct sskip_item *b) \
596{ \
597 return cmpfn(container_of(a, type, field.si), \
598 container_of(b, type, field.si)); \
599} \
600macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
601 const struct sskip_item *b) \
602{ \
603 int cmpval = cmpfn(container_of(a, type, field.si), \
604 container_of(b, type, field.si)); \
605 if (cmpval) \
606 return cmpval; \
607 if (a < b) \
608 return -1; \
609 if (a > b) \
610 return 1; \
611 return 0; \
612} \
613 \
614_DECLARE_SKIPLIST(prefix, type, field, \
615 prefix ## __cmp, prefix ## __cmp_uq) \
616/* ... */
617
618
619extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
620 struct sskip_item *item, int (*cmpfn)(
621 const struct sskip_item *a,
622 const struct sskip_item *b));
623extern struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
624 const struct sskip_item *item, int (*cmpfn)(
625 const struct sskip_item *a,
626 const struct sskip_item *b));
627extern struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
628 const struct sskip_item *item, int (*cmpfn)(
629 const struct sskip_item *a,
630 const struct sskip_item *b));
631extern struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
632 const struct sskip_item *item, int (*cmpfn)(
633 const struct sskip_item *a,
634 const struct sskip_item *b));
635extern void typesafe_skiplist_del(struct sskip_head *head,
636 struct sskip_item *item, int (*cmpfn)(
637 const struct sskip_item *a,
638 const struct sskip_item *b));
639
80911bc2
DL
640/* this needs to stay at the end because both files include each other.
641 * the resolved order is typesafe.h before typerb.h
642 */
643#include "typerb.h"
644
abd71baa 645#endif /* _FRR_TYPESAFE_H */